1 /*-
2 * CAM SCSI interface for the Advanced Systems Inc.
3 * Second Generation SCSI controllers.
4 *
5 * Product specific probe and attach routines can be found in:
6 *
7 * adw_pci.c ABP[3]940UW, ABP950UW, ABP3940U2W
8 *
9 * Copyright (c) 1998, 1999, 2000 Justin Gibbs.
10 * All rights reserved.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions, and the following disclaimer,
17 * without modification.
18 * 2. The name of the author may not be used to endorse or promote products
19 * derived from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
25 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 */
33 /*
34 * Ported from:
35 * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
36 *
37 * Copyright (c) 1995-1998 Advanced System Products, Inc.
38 * All Rights Reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that redistributions of source
42 * code retain the above copyright notice and this comment without
43 * modification.
44 */
45
46 #include <sys/cdefs.h>
47 __FBSDID("$FreeBSD: releng/10.0/sys/dev/advansys/adwcam.c 246713 2013-02-12 16:57:20Z kib $");
48
49 #include <sys/param.h>
50 #include <sys/conf.h>
51 #include <sys/systm.h>
52 #include <sys/kernel.h>
53 #include <sys/malloc.h>
54 #include <sys/lock.h>
55 #include <sys/module.h>
56 #include <sys/mutex.h>
57 #include <sys/bus.h>
58
59 #include <machine/bus.h>
60 #include <machine/resource.h>
61
62 #include <sys/rman.h>
63
64 #include <cam/cam.h>
65 #include <cam/cam_ccb.h>
66 #include <cam/cam_sim.h>
67 #include <cam/cam_xpt_sim.h>
68 #include <cam/cam_debug.h>
69
70 #include <cam/scsi/scsi_message.h>
71
72 #include <dev/advansys/adwvar.h>
73
74 /* Definitions for our use of the SIM private CCB area */
75 #define ccb_acb_ptr spriv_ptr0
76 #define ccb_adw_ptr spriv_ptr1
77
78 static __inline cam_status adwccbstatus(union ccb*);
79 static __inline struct acb* adwgetacb(struct adw_softc *adw);
80 static __inline void adwfreeacb(struct adw_softc *adw,
81 struct acb *acb);
82
83 static void adwmapmem(void *arg, bus_dma_segment_t *segs,
84 int nseg, int error);
85 static struct sg_map_node*
86 adwallocsgmap(struct adw_softc *adw);
87 static int adwallocacbs(struct adw_softc *adw);
88
89 static void adwexecuteacb(void *arg, bus_dma_segment_t *dm_segs,
90 int nseg, int error);
91 static void adw_action(struct cam_sim *sim, union ccb *ccb);
92 static void adw_intr_locked(struct adw_softc *adw);
93 static void adw_poll(struct cam_sim *sim);
94 static void adw_async(void *callback_arg, u_int32_t code,
95 struct cam_path *path, void *arg);
96 static void adwprocesserror(struct adw_softc *adw, struct acb *acb);
97 static void adwtimeout(void *arg);
98 static void adw_handle_device_reset(struct adw_softc *adw,
99 u_int target);
100 static void adw_handle_bus_reset(struct adw_softc *adw,
101 int initiated);
102
103 static __inline cam_status
104 adwccbstatus(union ccb* ccb)
105 {
106 return (ccb->ccb_h.status & CAM_STATUS_MASK);
107 }
108
109 static __inline struct acb*
110 adwgetacb(struct adw_softc *adw)
111 {
112 struct acb* acb;
113
114 if (!dumping)
115 mtx_assert(&adw->lock, MA_OWNED);
116 if ((acb = SLIST_FIRST(&adw->free_acb_list)) != NULL) {
117 SLIST_REMOVE_HEAD(&adw->free_acb_list, links);
118 } else if (adw->num_acbs < adw->max_acbs) {
119 adwallocacbs(adw);
120 acb = SLIST_FIRST(&adw->free_acb_list);
121 if (acb == NULL)
122 device_printf(adw->device, "Can't malloc ACB\n");
123 else {
124 SLIST_REMOVE_HEAD(&adw->free_acb_list, links);
125 }
126 }
127
128 return (acb);
129 }
130
131 static __inline void
132 adwfreeacb(struct adw_softc *adw, struct acb *acb)
133 {
134
135 if (!dumping)
136 mtx_assert(&adw->lock, MA_OWNED);
137 if ((acb->state & ACB_ACTIVE) != 0)
138 LIST_REMOVE(&acb->ccb->ccb_h, sim_links.le);
139 if ((acb->state & ACB_RELEASE_SIMQ) != 0)
140 acb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
141 else if ((adw->state & ADW_RESOURCE_SHORTAGE) != 0
142 && (acb->ccb->ccb_h.status & CAM_RELEASE_SIMQ) == 0) {
143 acb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
144 adw->state &= ~ADW_RESOURCE_SHORTAGE;
145 }
146 acb->state = ACB_FREE;
147 SLIST_INSERT_HEAD(&adw->free_acb_list, acb, links);
148 }
149
150 static void
151 adwmapmem(void *arg, bus_dma_segment_t *segs, int nseg, int error)
152 {
153 bus_addr_t *busaddrp;
154
155 busaddrp = (bus_addr_t *)arg;
156 *busaddrp = segs->ds_addr;
157 }
158
159 static struct sg_map_node *
160 adwallocsgmap(struct adw_softc *adw)
161 {
162 struct sg_map_node *sg_map;
163
164 sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT);
165
166 if (sg_map == NULL)
167 return (NULL);
168
169 /* Allocate S/G space for the next batch of ACBS */
170 if (bus_dmamem_alloc(adw->sg_dmat, (void **)&sg_map->sg_vaddr,
171 BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) {
172 free(sg_map, M_DEVBUF);
173 return (NULL);
174 }
175
176 SLIST_INSERT_HEAD(&adw->sg_maps, sg_map, links);
177
178 bus_dmamap_load(adw->sg_dmat, sg_map->sg_dmamap, sg_map->sg_vaddr,
179 PAGE_SIZE, adwmapmem, &sg_map->sg_physaddr, /*flags*/0);
180
181 bzero(sg_map->sg_vaddr, PAGE_SIZE);
182 return (sg_map);
183 }
184
185 /*
186 * Allocate another chunk of CCB's. Return count of entries added.
187 */
188 static int
189 adwallocacbs(struct adw_softc *adw)
190 {
191 struct acb *next_acb;
192 struct sg_map_node *sg_map;
193 bus_addr_t busaddr;
194 struct adw_sg_block *blocks;
195 int newcount;
196 int i;
197
198 next_acb = &adw->acbs[adw->num_acbs];
199 sg_map = adwallocsgmap(adw);
200
201 if (sg_map == NULL)
202 return (0);
203
204 blocks = sg_map->sg_vaddr;
205 busaddr = sg_map->sg_physaddr;
206
207 newcount = (PAGE_SIZE / (ADW_SG_BLOCKCNT * sizeof(*blocks)));
208 for (i = 0; adw->num_acbs < adw->max_acbs && i < newcount; i++) {
209 int error;
210
211 error = bus_dmamap_create(adw->buffer_dmat, /*flags*/0,
212 &next_acb->dmamap);
213 if (error != 0)
214 break;
215 next_acb->queue.scsi_req_baddr = acbvtob(adw, next_acb);
216 next_acb->queue.scsi_req_bo = acbvtobo(adw, next_acb);
217 next_acb->queue.sense_baddr =
218 acbvtob(adw, next_acb) + offsetof(struct acb, sense_data);
219 next_acb->sg_blocks = blocks;
220 next_acb->sg_busaddr = busaddr;
221 next_acb->state = ACB_FREE;
222 callout_init_mtx(&next_acb->timer, &adw->lock, 0);
223 SLIST_INSERT_HEAD(&adw->free_acb_list, next_acb, links);
224 blocks += ADW_SG_BLOCKCNT;
225 busaddr += ADW_SG_BLOCKCNT * sizeof(*blocks);
226 next_acb++;
227 adw->num_acbs++;
228 }
229 return (i);
230 }
231
232 static void
233 adwexecuteacb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
234 {
235 struct acb *acb;
236 union ccb *ccb;
237 struct adw_softc *adw;
238
239 acb = (struct acb *)arg;
240 ccb = acb->ccb;
241 adw = (struct adw_softc *)ccb->ccb_h.ccb_adw_ptr;
242
243 if (!dumping)
244 mtx_assert(&adw->lock, MA_OWNED);
245 if (error != 0) {
246 if (error != EFBIG)
247 device_printf(adw->device, "Unexepected error 0x%x "
248 "returned from bus_dmamap_load\n", error);
249 if (ccb->ccb_h.status == CAM_REQ_INPROG) {
250 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
251 ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN;
252 }
253 adwfreeacb(adw, acb);
254 xpt_done(ccb);
255 return;
256 }
257
258 if (nseg != 0) {
259 bus_dmasync_op_t op;
260
261 acb->queue.data_addr = dm_segs[0].ds_addr;
262 acb->queue.data_cnt = ccb->csio.dxfer_len;
263 if (nseg > 1) {
264 struct adw_sg_block *sg_block;
265 struct adw_sg_elm *sg;
266 bus_addr_t sg_busaddr;
267 u_int sg_index;
268 bus_dma_segment_t *end_seg;
269
270 end_seg = dm_segs + nseg;
271
272 sg_busaddr = acb->sg_busaddr;
273 sg_index = 0;
274 /* Copy the segments into our SG list */
275 for (sg_block = acb->sg_blocks;; sg_block++) {
276 u_int i;
277
278 sg = sg_block->sg_list;
279 for (i = 0; i < ADW_NO_OF_SG_PER_BLOCK; i++) {
280 if (dm_segs >= end_seg)
281 break;
282
283 sg->sg_addr = dm_segs->ds_addr;
284 sg->sg_count = dm_segs->ds_len;
285 sg++;
286 dm_segs++;
287 }
288 sg_block->sg_cnt = i;
289 sg_index += i;
290 if (dm_segs == end_seg) {
291 sg_block->sg_busaddr_next = 0;
292 break;
293 } else {
294 sg_busaddr +=
295 sizeof(struct adw_sg_block);
296 sg_block->sg_busaddr_next = sg_busaddr;
297 }
298 }
299 acb->queue.sg_real_addr = acb->sg_busaddr;
300 } else {
301 acb->queue.sg_real_addr = 0;
302 }
303
304 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
305 op = BUS_DMASYNC_PREREAD;
306 else
307 op = BUS_DMASYNC_PREWRITE;
308
309 bus_dmamap_sync(adw->buffer_dmat, acb->dmamap, op);
310
311 } else {
312 acb->queue.data_addr = 0;
313 acb->queue.data_cnt = 0;
314 acb->queue.sg_real_addr = 0;
315 }
316
317 /*
318 * Last time we need to check if this CCB needs to
319 * be aborted.
320 */
321 if (ccb->ccb_h.status != CAM_REQ_INPROG) {
322 if (nseg != 0)
323 bus_dmamap_unload(adw->buffer_dmat, acb->dmamap);
324 adwfreeacb(adw, acb);
325 xpt_done(ccb);
326 return;
327 }
328
329 acb->state |= ACB_ACTIVE;
330 ccb->ccb_h.status |= CAM_SIM_QUEUED;
331 LIST_INSERT_HEAD(&adw->pending_ccbs, &ccb->ccb_h, sim_links.le);
332 callout_reset(&acb->timer, (ccb->ccb_h.timeout * hz) / 1000,
333 adwtimeout, acb);
334
335 adw_send_acb(adw, acb, acbvtob(adw, acb));
336 }
337
338 static void
339 adw_action(struct cam_sim *sim, union ccb *ccb)
340 {
341 struct adw_softc *adw;
342
343 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("adw_action\n"));
344
345 adw = (struct adw_softc *)cam_sim_softc(sim);
346 if (!dumping)
347 mtx_assert(&adw->lock, MA_OWNED);
348
349 switch (ccb->ccb_h.func_code) {
350 /* Common cases first */
351 case XPT_SCSI_IO: /* Execute the requested I/O operation */
352 {
353 struct ccb_scsiio *csio;
354 struct ccb_hdr *ccbh;
355 struct acb *acb;
356 int error;
357
358 csio = &ccb->csio;
359 ccbh = &ccb->ccb_h;
360
361 /* Max supported CDB length is 12 bytes */
362 if (csio->cdb_len > 12) {
363 ccb->ccb_h.status = CAM_REQ_INVALID;
364 xpt_done(ccb);
365 return;
366 }
367
368 if ((acb = adwgetacb(adw)) == NULL) {
369 adw->state |= ADW_RESOURCE_SHORTAGE;
370 xpt_freeze_simq(sim, /*count*/1);
371 ccb->ccb_h.status = CAM_REQUEUE_REQ;
372 xpt_done(ccb);
373 return;
374 }
375
376 /* Link acb and ccb so we can find one from the other */
377 acb->ccb = ccb;
378 ccb->ccb_h.ccb_acb_ptr = acb;
379 ccb->ccb_h.ccb_adw_ptr = adw;
380
381 acb->queue.cntl = 0;
382 acb->queue.target_cmd = 0;
383 acb->queue.target_id = ccb->ccb_h.target_id;
384 acb->queue.target_lun = ccb->ccb_h.target_lun;
385
386 acb->queue.mflag = 0;
387 acb->queue.sense_len =
388 MIN(csio->sense_len, sizeof(acb->sense_data));
389 acb->queue.cdb_len = csio->cdb_len;
390 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
391 switch (csio->tag_action) {
392 case MSG_SIMPLE_Q_TAG:
393 acb->queue.scsi_cntl = ADW_QSC_SIMPLE_Q_TAG;
394 break;
395 case MSG_HEAD_OF_Q_TAG:
396 acb->queue.scsi_cntl = ADW_QSC_HEAD_OF_Q_TAG;
397 break;
398 case MSG_ORDERED_Q_TAG:
399 acb->queue.scsi_cntl = ADW_QSC_ORDERED_Q_TAG;
400 break;
401 default:
402 acb->queue.scsi_cntl = ADW_QSC_NO_TAGMSG;
403 break;
404 }
405 } else
406 acb->queue.scsi_cntl = ADW_QSC_NO_TAGMSG;
407
408 if ((ccb->ccb_h.flags & CAM_DIS_DISCONNECT) != 0)
409 acb->queue.scsi_cntl |= ADW_QSC_NO_DISC;
410
411 acb->queue.done_status = 0;
412 acb->queue.scsi_status = 0;
413 acb->queue.host_status = 0;
414 acb->queue.sg_wk_ix = 0;
415 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
416 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) == 0) {
417 bcopy(csio->cdb_io.cdb_ptr,
418 acb->queue.cdb, csio->cdb_len);
419 } else {
420 /* I guess I could map it in... */
421 ccb->ccb_h.status = CAM_REQ_INVALID;
422 adwfreeacb(adw, acb);
423 xpt_done(ccb);
424 return;
425 }
426 } else {
427 bcopy(csio->cdb_io.cdb_bytes,
428 acb->queue.cdb, csio->cdb_len);
429 }
430
431 error = bus_dmamap_load_ccb(adw->buffer_dmat,
432 acb->dmamap,
433 ccb,
434 adwexecuteacb,
435 acb, /*flags*/0);
436 if (error == EINPROGRESS) {
437 /*
438 * So as to maintain ordering, freeze the controller
439 * queue until our mapping is returned.
440 */
441 xpt_freeze_simq(sim, 1);
442 acb->state |= CAM_RELEASE_SIMQ;
443 }
444 break;
445 }
446 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */
447 {
448 adw_idle_cmd_status_t status;
449
450 status = adw_idle_cmd_send(adw, ADW_IDLE_CMD_DEVICE_RESET,
451 ccb->ccb_h.target_id);
452 if (status == ADW_IDLE_CMD_SUCCESS) {
453 ccb->ccb_h.status = CAM_REQ_CMP;
454 if (bootverbose) {
455 xpt_print_path(ccb->ccb_h.path);
456 printf("BDR Delivered\n");
457 }
458 } else
459 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
460 xpt_done(ccb);
461 break;
462 }
463 case XPT_ABORT: /* Abort the specified CCB */
464 /* XXX Implement */
465 ccb->ccb_h.status = CAM_REQ_INVALID;
466 xpt_done(ccb);
467 break;
468 case XPT_SET_TRAN_SETTINGS:
469 {
470 struct ccb_trans_settings_scsi *scsi;
471 struct ccb_trans_settings_spi *spi;
472 struct ccb_trans_settings *cts;
473 u_int target_mask;
474
475 cts = &ccb->cts;
476 target_mask = 0x01 << ccb->ccb_h.target_id;
477
478 scsi = &cts->proto_specific.scsi;
479 spi = &cts->xport_specific.spi;
480 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
481 u_int sdtrdone;
482
483 sdtrdone = adw_lram_read_16(adw, ADW_MC_SDTR_DONE);
484 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
485 u_int discenb;
486
487 discenb =
488 adw_lram_read_16(adw, ADW_MC_DISC_ENABLE);
489
490 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
491 discenb |= target_mask;
492 else
493 discenb &= ~target_mask;
494
495 adw_lram_write_16(adw, ADW_MC_DISC_ENABLE,
496 discenb);
497 }
498
499 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
500
501 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
502 adw->tagenb |= target_mask;
503 else
504 adw->tagenb &= ~target_mask;
505 }
506
507 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
508 u_int wdtrenb_orig;
509 u_int wdtrenb;
510 u_int wdtrdone;
511
512 wdtrenb_orig =
513 adw_lram_read_16(adw, ADW_MC_WDTR_ABLE);
514 wdtrenb = wdtrenb_orig;
515 wdtrdone = adw_lram_read_16(adw,
516 ADW_MC_WDTR_DONE);
517 switch (spi->bus_width) {
518 case MSG_EXT_WDTR_BUS_32_BIT:
519 case MSG_EXT_WDTR_BUS_16_BIT:
520 wdtrenb |= target_mask;
521 break;
522 case MSG_EXT_WDTR_BUS_8_BIT:
523 default:
524 wdtrenb &= ~target_mask;
525 break;
526 }
527 if (wdtrenb != wdtrenb_orig) {
528 adw_lram_write_16(adw,
529 ADW_MC_WDTR_ABLE,
530 wdtrenb);
531 wdtrdone &= ~target_mask;
532 adw_lram_write_16(adw,
533 ADW_MC_WDTR_DONE,
534 wdtrdone);
535 /* Wide negotiation forces async */
536 sdtrdone &= ~target_mask;
537 adw_lram_write_16(adw,
538 ADW_MC_SDTR_DONE,
539 sdtrdone);
540 }
541 }
542
543 if (((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0)
544 || ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)) {
545 u_int sdtr_orig;
546 u_int sdtr;
547 u_int sdtrable_orig;
548 u_int sdtrable;
549
550 sdtr = adw_get_chip_sdtr(adw,
551 ccb->ccb_h.target_id);
552 sdtr_orig = sdtr;
553 sdtrable = adw_lram_read_16(adw,
554 ADW_MC_SDTR_ABLE);
555 sdtrable_orig = sdtrable;
556
557 if ((spi->valid
558 & CTS_SPI_VALID_SYNC_RATE) != 0) {
559
560 sdtr =
561 adw_find_sdtr(adw,
562 spi->sync_period);
563 }
564
565 if ((spi->valid
566 & CTS_SPI_VALID_SYNC_OFFSET) != 0) {
567 if (spi->sync_offset == 0)
568 sdtr = ADW_MC_SDTR_ASYNC;
569 }
570
571 if (sdtr == ADW_MC_SDTR_ASYNC)
572 sdtrable &= ~target_mask;
573 else
574 sdtrable |= target_mask;
575 if (sdtr != sdtr_orig
576 || sdtrable != sdtrable_orig) {
577 adw_set_chip_sdtr(adw,
578 ccb->ccb_h.target_id,
579 sdtr);
580 sdtrdone &= ~target_mask;
581 adw_lram_write_16(adw, ADW_MC_SDTR_ABLE,
582 sdtrable);
583 adw_lram_write_16(adw, ADW_MC_SDTR_DONE,
584 sdtrdone);
585
586 }
587 }
588 }
589 ccb->ccb_h.status = CAM_REQ_CMP;
590 xpt_done(ccb);
591 break;
592 }
593 case XPT_GET_TRAN_SETTINGS:
594 /* Get default/user set transfer settings for the target */
595 {
596 struct ccb_trans_settings_scsi *scsi;
597 struct ccb_trans_settings_spi *spi;
598 struct ccb_trans_settings *cts;
599 u_int target_mask;
600
601 cts = &ccb->cts;
602 target_mask = 0x01 << ccb->ccb_h.target_id;
603 cts->protocol = PROTO_SCSI;
604 cts->protocol_version = SCSI_REV_2;
605 cts->transport = XPORT_SPI;
606 cts->transport_version = 2;
607
608 scsi = &cts->proto_specific.scsi;
609 spi = &cts->xport_specific.spi;
610 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) {
611 u_int mc_sdtr;
612
613 spi->flags = 0;
614 if ((adw->user_discenb & target_mask) != 0)
615 spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
616
617 if ((adw->user_tagenb & target_mask) != 0)
618 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
619
620 if ((adw->user_wdtr & target_mask) != 0)
621 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
622 else
623 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
624
625 mc_sdtr = adw_get_user_sdtr(adw, ccb->ccb_h.target_id);
626 spi->sync_period = adw_find_period(adw, mc_sdtr);
627 if (spi->sync_period != 0)
628 spi->sync_offset = 15; /* XXX ??? */
629 else
630 spi->sync_offset = 0;
631
632
633 } else {
634 u_int targ_tinfo;
635
636 spi->flags = 0;
637 if ((adw_lram_read_16(adw, ADW_MC_DISC_ENABLE)
638 & target_mask) != 0)
639 spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
640
641 if ((adw->tagenb & target_mask) != 0)
642 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
643
644 targ_tinfo =
645 adw_lram_read_16(adw,
646 ADW_MC_DEVICE_HSHK_CFG_TABLE
647 + (2 * ccb->ccb_h.target_id));
648
649 if ((targ_tinfo & ADW_HSHK_CFG_WIDE_XFR) != 0)
650 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
651 else
652 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
653
654 spi->sync_period =
655 adw_hshk_cfg_period_factor(targ_tinfo);
656
657 spi->sync_offset = targ_tinfo & ADW_HSHK_CFG_OFFSET;
658 if (spi->sync_period == 0)
659 spi->sync_offset = 0;
660
661 if (spi->sync_offset == 0)
662 spi->sync_period = 0;
663 }
664
665 spi->valid = CTS_SPI_VALID_SYNC_RATE
666 | CTS_SPI_VALID_SYNC_OFFSET
667 | CTS_SPI_VALID_BUS_WIDTH
668 | CTS_SPI_VALID_DISC;
669 scsi->valid = CTS_SCSI_VALID_TQ;
670 ccb->ccb_h.status = CAM_REQ_CMP;
671 xpt_done(ccb);
672 break;
673 }
674 case XPT_CALC_GEOMETRY:
675 {
676 /*
677 * XXX Use Adaptec translation until I find out how to
678 * get this information from the card.
679 */
680 cam_calc_geometry(&ccb->ccg, /*extended*/1);
681 xpt_done(ccb);
682 break;
683 }
684 case XPT_RESET_BUS: /* Reset the specified SCSI bus */
685 {
686 int failure;
687
688 failure = adw_reset_bus(adw);
689 if (failure != 0) {
690 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
691 } else {
692 if (bootverbose) {
693 xpt_print_path(adw->path);
694 printf("Bus Reset Delivered\n");
695 }
696 ccb->ccb_h.status = CAM_REQ_CMP;
697 }
698 xpt_done(ccb);
699 break;
700 }
701 case XPT_TERM_IO: /* Terminate the I/O process */
702 /* XXX Implement */
703 ccb->ccb_h.status = CAM_REQ_INVALID;
704 xpt_done(ccb);
705 break;
706 case XPT_PATH_INQ: /* Path routing inquiry */
707 {
708 struct ccb_pathinq *cpi = &ccb->cpi;
709
710 cpi->version_num = 1;
711 cpi->hba_inquiry = PI_WIDE_16|PI_SDTR_ABLE|PI_TAG_ABLE;
712 cpi->target_sprt = 0;
713 cpi->hba_misc = 0;
714 cpi->hba_eng_cnt = 0;
715 cpi->max_target = ADW_MAX_TID;
716 cpi->max_lun = ADW_MAX_LUN;
717 cpi->initiator_id = adw->initiator_id;
718 cpi->bus_id = cam_sim_bus(sim);
719 cpi->base_transfer_speed = 3300;
720 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
721 strncpy(cpi->hba_vid, "AdvanSys", HBA_IDLEN);
722 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
723 cpi->unit_number = cam_sim_unit(sim);
724 cpi->transport = XPORT_SPI;
725 cpi->transport_version = 2;
726 cpi->protocol = PROTO_SCSI;
727 cpi->protocol_version = SCSI_REV_2;
728 cpi->ccb_h.status = CAM_REQ_CMP;
729 xpt_done(ccb);
730 break;
731 }
732 default:
733 ccb->ccb_h.status = CAM_REQ_INVALID;
734 xpt_done(ccb);
735 break;
736 }
737 }
738
739 static void
740 adw_poll(struct cam_sim *sim)
741 {
742 adw_intr_locked(cam_sim_softc(sim));
743 }
744
745 static void
746 adw_async(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg)
747 {
748 }
749
750 struct adw_softc *
751 adw_alloc(device_t dev, struct resource *regs, int regs_type, int regs_id)
752 {
753 struct adw_softc *adw;
754
755 adw = device_get_softc(dev);
756 LIST_INIT(&adw->pending_ccbs);
757 SLIST_INIT(&adw->sg_maps);
758 mtx_init(&adw->lock, "adw", NULL, MTX_DEF);
759 adw->device = dev;
760 adw->regs_res_type = regs_type;
761 adw->regs_res_id = regs_id;
762 adw->regs = regs;
763 return(adw);
764 }
765
766 void
767 adw_free(struct adw_softc *adw)
768 {
769 switch (adw->init_level) {
770 case 9:
771 {
772 struct sg_map_node *sg_map;
773
774 while ((sg_map = SLIST_FIRST(&adw->sg_maps)) != NULL) {
775 SLIST_REMOVE_HEAD(&adw->sg_maps, links);
776 bus_dmamap_unload(adw->sg_dmat,
777 sg_map->sg_dmamap);
778 bus_dmamem_free(adw->sg_dmat, sg_map->sg_vaddr,
779 sg_map->sg_dmamap);
780 free(sg_map, M_DEVBUF);
781 }
782 bus_dma_tag_destroy(adw->sg_dmat);
783 }
784 case 8:
785 bus_dmamap_unload(adw->acb_dmat, adw->acb_dmamap);
786 case 7:
787 bus_dmamem_free(adw->acb_dmat, adw->acbs,
788 adw->acb_dmamap);
789 bus_dmamap_destroy(adw->acb_dmat, adw->acb_dmamap);
790 case 6:
791 bus_dma_tag_destroy(adw->acb_dmat);
792 case 5:
793 bus_dmamap_unload(adw->carrier_dmat, adw->carrier_dmamap);
794 case 4:
795 bus_dmamem_free(adw->carrier_dmat, adw->carriers,
796 adw->carrier_dmamap);
797 bus_dmamap_destroy(adw->carrier_dmat, adw->carrier_dmamap);
798 case 3:
799 bus_dma_tag_destroy(adw->carrier_dmat);
800 case 2:
801 bus_dma_tag_destroy(adw->buffer_dmat);
802 case 1:
803 bus_dma_tag_destroy(adw->parent_dmat);
804 case 0:
805 break;
806 }
807
808 if (adw->regs != NULL)
809 bus_release_resource(adw->device,
810 adw->regs_res_type,
811 adw->regs_res_id,
812 adw->regs);
813
814 if (adw->irq != NULL)
815 bus_release_resource(adw->device,
816 adw->irq_res_type,
817 0, adw->irq);
818
819 if (adw->sim != NULL) {
820 if (adw->path != NULL) {
821 xpt_async(AC_LOST_DEVICE, adw->path, NULL);
822 xpt_free_path(adw->path);
823 }
824 xpt_bus_deregister(cam_sim_path(adw->sim));
825 cam_sim_free(adw->sim, /*free_devq*/TRUE);
826 }
827 mtx_destroy(&adw->lock);
828 }
829
830 int
831 adw_init(struct adw_softc *adw)
832 {
833 struct adw_eeprom eep_config;
834 u_int tid;
835 u_int i;
836 u_int16_t checksum;
837 u_int16_t scsicfg1;
838
839 checksum = adw_eeprom_read(adw, &eep_config);
840 bcopy(eep_config.serial_number, adw->serial_number,
841 sizeof(adw->serial_number));
842 if (checksum != eep_config.checksum) {
843 u_int16_t serial_number[3];
844
845 adw->flags |= ADW_EEPROM_FAILED;
846 device_printf(adw->device,
847 "EEPROM checksum failed. Restoring Defaults\n");
848
849 /*
850 * Restore the default EEPROM settings.
851 * Assume the 6 byte board serial number that was read
852 * from EEPROM is correct even if the EEPROM checksum
853 * failed.
854 */
855 bcopy(adw->default_eeprom, &eep_config, sizeof(eep_config));
856 bcopy(adw->serial_number, eep_config.serial_number,
857 sizeof(serial_number));
858 adw_eeprom_write(adw, &eep_config);
859 }
860
861 /* Pull eeprom information into our softc. */
862 adw->bios_ctrl = eep_config.bios_ctrl;
863 adw->user_wdtr = eep_config.wdtr_able;
864 for (tid = 0; tid < ADW_MAX_TID; tid++) {
865 u_int mc_sdtr;
866 u_int16_t tid_mask;
867
868 tid_mask = 0x1 << tid;
869 if ((adw->features & ADW_ULTRA) != 0) {
870 /*
871 * Ultra chips store sdtr and ultraenb
872 * bits in their seeprom, so we must
873 * construct valid mc_sdtr entries for
874 * indirectly.
875 */
876 if (eep_config.sync1.sync_enable & tid_mask) {
877 if (eep_config.sync2.ultra_enable & tid_mask)
878 mc_sdtr = ADW_MC_SDTR_20;
879 else
880 mc_sdtr = ADW_MC_SDTR_10;
881 } else
882 mc_sdtr = ADW_MC_SDTR_ASYNC;
883 } else {
884 switch (ADW_TARGET_GROUP(tid)) {
885 case 3:
886 mc_sdtr = eep_config.sync4.sdtr4;
887 break;
888 case 2:
889 mc_sdtr = eep_config.sync3.sdtr3;
890 break;
891 case 1:
892 mc_sdtr = eep_config.sync2.sdtr2;
893 break;
894 default: /* Shut up compiler */
895 case 0:
896 mc_sdtr = eep_config.sync1.sdtr1;
897 break;
898 }
899 mc_sdtr >>= ADW_TARGET_GROUP_SHIFT(tid);
900 mc_sdtr &= 0xFF;
901 }
902 adw_set_user_sdtr(adw, tid, mc_sdtr);
903 }
904 adw->user_tagenb = eep_config.tagqng_able;
905 adw->user_discenb = eep_config.disc_enable;
906 adw->max_acbs = eep_config.max_host_qng;
907 adw->initiator_id = (eep_config.adapter_scsi_id & ADW_MAX_TID);
908
909 /*
910 * Sanity check the number of host openings.
911 */
912 if (adw->max_acbs > ADW_DEF_MAX_HOST_QNG)
913 adw->max_acbs = ADW_DEF_MAX_HOST_QNG;
914 else if (adw->max_acbs < ADW_DEF_MIN_HOST_QNG) {
915 /* If the value is zero, assume it is uninitialized. */
916 if (adw->max_acbs == 0)
917 adw->max_acbs = ADW_DEF_MAX_HOST_QNG;
918 else
919 adw->max_acbs = ADW_DEF_MIN_HOST_QNG;
920 }
921
922 scsicfg1 = 0;
923 if ((adw->features & ADW_ULTRA2) != 0) {
924 switch (eep_config.termination_lvd) {
925 default:
926 device_printf(adw->device,
927 "Invalid EEPROM LVD Termination Settings.\n");
928 device_printf(adw->device,
929 "Reverting to Automatic LVD Termination\n");
930 /* FALLTHROUGH */
931 case ADW_EEPROM_TERM_AUTO:
932 break;
933 case ADW_EEPROM_TERM_BOTH_ON:
934 scsicfg1 |= ADW2_SCSI_CFG1_TERM_LVD_LO;
935 /* FALLTHROUGH */
936 case ADW_EEPROM_TERM_HIGH_ON:
937 scsicfg1 |= ADW2_SCSI_CFG1_TERM_LVD_HI;
938 /* FALLTHROUGH */
939 case ADW_EEPROM_TERM_OFF:
940 scsicfg1 |= ADW2_SCSI_CFG1_DIS_TERM_DRV;
941 break;
942 }
943 }
944
945 switch (eep_config.termination_se) {
946 default:
947 device_printf(adw->device,
948 "Invalid SE EEPROM Termination Settings.\n");
949 device_printf(adw->device,
950 "Reverting to Automatic SE Termination\n");
951 /* FALLTHROUGH */
952 case ADW_EEPROM_TERM_AUTO:
953 break;
954 case ADW_EEPROM_TERM_BOTH_ON:
955 scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_L;
956 /* FALLTHROUGH */
957 case ADW_EEPROM_TERM_HIGH_ON:
958 scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_H;
959 /* FALLTHROUGH */
960 case ADW_EEPROM_TERM_OFF:
961 scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_MANUAL;
962 break;
963 }
964 device_printf(adw->device, "SCSI ID %d, ", adw->initiator_id);
965
966 /* DMA tag for mapping buffers into device visible space. */
967 if (bus_dma_tag_create(
968 /* parent */ adw->parent_dmat,
969 /* alignment */ 1,
970 /* boundary */ 0,
971 /* lowaddr */ BUS_SPACE_MAXADDR_32BIT,
972 /* highaddr */ BUS_SPACE_MAXADDR,
973 /* filter */ NULL,
974 /* filterarg */ NULL,
975 /* maxsize */ MAXBSIZE,
976 /* nsegments */ ADW_SGSIZE,
977 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT,
978 /* flags */ BUS_DMA_ALLOCNOW,
979 /* lockfunc */ busdma_lock_mutex,
980 /* lockarg */ &adw->lock,
981 &adw->buffer_dmat) != 0) {
982 return (ENOMEM);
983 }
984
985 adw->init_level++;
986
987 /* DMA tag for our ccb carrier structures */
988 if (bus_dma_tag_create(
989 /* parent */ adw->parent_dmat,
990 /* alignment */ 0x10,
991 /* boundary */ 0,
992 /* lowaddr */ BUS_SPACE_MAXADDR_32BIT,
993 /* highaddr */ BUS_SPACE_MAXADDR,
994 /* filter */ NULL,
995 /* filterarg */ NULL,
996 /* maxsize */ (adw->max_acbs +
997 ADW_NUM_CARRIER_QUEUES + 1) *
998 sizeof(struct adw_carrier),
999 /* nsegments */ 1,
1000 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT,
1001 /* flags */ 0,
1002 /* lockfunc */ NULL,
1003 /* lockarg */ NULL,
1004 &adw->carrier_dmat) != 0) {
1005 return (ENOMEM);
1006 }
1007
1008 adw->init_level++;
1009
1010 /* Allocation for our ccb carrier structures */
1011 if (bus_dmamem_alloc(adw->carrier_dmat, (void **)&adw->carriers,
1012 BUS_DMA_NOWAIT, &adw->carrier_dmamap) != 0) {
1013 return (ENOMEM);
1014 }
1015
1016 adw->init_level++;
1017
1018 /* And permanently map them */
1019 bus_dmamap_load(adw->carrier_dmat, adw->carrier_dmamap,
1020 adw->carriers,
1021 (adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1)
1022 * sizeof(struct adw_carrier),
1023 adwmapmem, &adw->carrier_busbase, /*flags*/0);
1024
1025 /* Clear them out. */
1026 bzero(adw->carriers, (adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1)
1027 * sizeof(struct adw_carrier));
1028
1029 /* Setup our free carrier list */
1030 adw->free_carriers = adw->carriers;
1031 for (i = 0; i < adw->max_acbs + ADW_NUM_CARRIER_QUEUES; i++) {
1032 adw->carriers[i].carr_offset =
1033 carriervtobo(adw, &adw->carriers[i]);
1034 adw->carriers[i].carr_ba =
1035 carriervtob(adw, &adw->carriers[i]);
1036 adw->carriers[i].areq_ba = 0;
1037 adw->carriers[i].next_ba =
1038 carriervtobo(adw, &adw->carriers[i+1]);
1039 }
1040 /* Terminal carrier. Never leaves the freelist */
1041 adw->carriers[i].carr_offset =
1042 carriervtobo(adw, &adw->carriers[i]);
1043 adw->carriers[i].carr_ba =
1044 carriervtob(adw, &adw->carriers[i]);
1045 adw->carriers[i].areq_ba = 0;
1046 adw->carriers[i].next_ba = ~0;
1047
1048 adw->init_level++;
1049
1050 /* DMA tag for our acb structures */
1051 if (bus_dma_tag_create(
1052 /* parent */ adw->parent_dmat,
1053 /* alignment */ 1,
1054 /* boundary */ 0,
1055 /* lowaddr */ BUS_SPACE_MAXADDR,
1056 /* highaddr */ BUS_SPACE_MAXADDR,
1057 /* filter */ NULL,
1058 /* filterarg */ NULL,
1059 /* maxsize */ adw->max_acbs * sizeof(struct acb),
1060 /* nsegments */ 1,
1061 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT,
1062 /* flags */ 0,
1063 /* lockfunc */ NULL,
1064 /* lockarg */ NULL,
1065 &adw->acb_dmat) != 0) {
1066 return (ENOMEM);
1067 }
1068
1069 adw->init_level++;
1070
1071 /* Allocation for our ccbs */
1072 if (bus_dmamem_alloc(adw->acb_dmat, (void **)&adw->acbs,
1073 BUS_DMA_NOWAIT, &adw->acb_dmamap) != 0)
1074 return (ENOMEM);
1075
1076 adw->init_level++;
1077
1078 /* And permanently map them */
1079 bus_dmamap_load(adw->acb_dmat, adw->acb_dmamap,
1080 adw->acbs,
1081 adw->max_acbs * sizeof(struct acb),
1082 adwmapmem, &adw->acb_busbase, /*flags*/0);
1083
1084 /* Clear them out. */
1085 bzero(adw->acbs, adw->max_acbs * sizeof(struct acb));
1086
1087 /* DMA tag for our S/G structures. We allocate in page sized chunks */
1088 if (bus_dma_tag_create(
1089 /* parent */ adw->parent_dmat,
1090 /* alignment */ 1,
1091 /* boundary */ 0,
1092 /* lowaddr */ BUS_SPACE_MAXADDR,
1093 /* highaddr */ BUS_SPACE_MAXADDR,
1094 /* filter */ NULL,
1095 /* filterarg */ NULL,
1096 /* maxsize */ PAGE_SIZE,
1097 /* nsegments */ 1,
1098 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT,
1099 /* flags */ 0,
1100 /* lockfunc */ NULL,
1101 /* lockarg */ NULL,
1102 &adw->sg_dmat) != 0) {
1103 return (ENOMEM);
1104 }
1105
1106 adw->init_level++;
1107
1108 /* Allocate our first batch of ccbs */
1109 mtx_lock(&adw->lock);
1110 if (adwallocacbs(adw) == 0) {
1111 mtx_unlock(&adw->lock);
1112 return (ENOMEM);
1113 }
1114
1115 if (adw_init_chip(adw, scsicfg1) != 0) {
1116 mtx_unlock(&adw->lock);
1117 return (ENXIO);
1118 }
1119
1120 printf("Queue Depth %d\n", adw->max_acbs);
1121 mtx_unlock(&adw->lock);
1122
1123 return (0);
1124 }
1125
1126 /*
1127 * Attach all the sub-devices we can find
1128 */
1129 int
1130 adw_attach(struct adw_softc *adw)
1131 {
1132 struct ccb_setasync csa;
1133 struct cam_devq *devq;
1134 int error;
1135
1136 /* Hook up our interrupt handler */
1137 error = bus_setup_intr(adw->device, adw->irq,
1138 INTR_TYPE_CAM | INTR_ENTROPY | INTR_MPSAFE, NULL, adw_intr, adw,
1139 &adw->ih);
1140 if (error != 0) {
1141 device_printf(adw->device, "bus_setup_intr() failed: %d\n",
1142 error);
1143 return (error);
1144 }
1145
1146 /* Start the Risc processor now that we are fully configured. */
1147 adw_outw(adw, ADW_RISC_CSR, ADW_RISC_CSR_RUN);
1148
1149 /*
1150 * Create the device queue for our SIM.
1151 */
1152 devq = cam_simq_alloc(adw->max_acbs);
1153 if (devq == NULL)
1154 return (ENOMEM);
1155
1156 /*
1157 * Construct our SIM entry.
1158 */
1159 adw->sim = cam_sim_alloc(adw_action, adw_poll, "adw", adw,
1160 device_get_unit(adw->device), &adw->lock, 1, adw->max_acbs, devq);
1161 if (adw->sim == NULL)
1162 return (ENOMEM);
1163
1164 /*
1165 * Register the bus.
1166 */
1167 mtx_lock(&adw->lock);
1168 if (xpt_bus_register(adw->sim, adw->device, 0) != CAM_SUCCESS) {
1169 cam_sim_free(adw->sim, /*free devq*/TRUE);
1170 error = ENOMEM;
1171 goto fail;
1172 }
1173
1174 if (xpt_create_path(&adw->path, /*periph*/NULL, cam_sim_path(adw->sim),
1175 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD)
1176 == CAM_REQ_CMP) {
1177 xpt_setup_ccb(&csa.ccb_h, adw->path, /*priority*/5);
1178 csa.ccb_h.func_code = XPT_SASYNC_CB;
1179 csa.event_enable = AC_LOST_DEVICE;
1180 csa.callback = adw_async;
1181 csa.callback_arg = adw;
1182 xpt_action((union ccb *)&csa);
1183 }
1184
1185 fail:
1186 mtx_unlock(&adw->lock);
1187 return (error);
1188 }
1189
1190 void
1191 adw_intr(void *arg)
1192 {
1193 struct adw_softc *adw;
1194
1195 adw = arg;
1196 mtx_lock(&adw->lock);
1197 adw_intr_locked(adw);
1198 mtx_unlock(&adw->lock);
1199 }
1200
1201 void
1202 adw_intr_locked(struct adw_softc *adw)
1203 {
1204 u_int int_stat;
1205
1206 if ((adw_inw(adw, ADW_CTRL_REG) & ADW_CTRL_REG_HOST_INTR) == 0)
1207 return;
1208
1209 /* Reading the register clears the interrupt. */
1210 int_stat = adw_inb(adw, ADW_INTR_STATUS_REG);
1211
1212 if ((int_stat & ADW_INTR_STATUS_INTRB) != 0) {
1213 u_int intrb_code;
1214
1215 /* Async Microcode Event */
1216 intrb_code = adw_lram_read_8(adw, ADW_MC_INTRB_CODE);
1217 switch (intrb_code) {
1218 case ADW_ASYNC_CARRIER_READY_FAILURE:
1219 /*
1220 * The RISC missed our update of
1221 * the commandq.
1222 */
1223 if (LIST_FIRST(&adw->pending_ccbs) != NULL)
1224 adw_tickle_risc(adw, ADW_TICKLE_A);
1225 break;
1226 case ADW_ASYNC_SCSI_BUS_RESET_DET:
1227 /*
1228 * The firmware detected a SCSI Bus reset.
1229 */
1230 device_printf(adw->device, "Someone Reset the Bus\n");
1231 adw_handle_bus_reset(adw, /*initiated*/FALSE);
1232 break;
1233 case ADW_ASYNC_RDMA_FAILURE:
1234 /*
1235 * Handle RDMA failure by resetting the
1236 * SCSI Bus and chip.
1237 */
1238 #if 0 /* XXX */
1239 AdvResetChipAndSB(adv_dvc_varp);
1240 #endif
1241 break;
1242
1243 case ADW_ASYNC_HOST_SCSI_BUS_RESET:
1244 /*
1245 * Host generated SCSI bus reset occurred.
1246 */
1247 adw_handle_bus_reset(adw, /*initiated*/TRUE);
1248 break;
1249 default:
1250 printf("adw_intr: unknown async code 0x%x\n",
1251 intrb_code);
1252 break;
1253 }
1254 }
1255
1256 /*
1257 * Run down the RequestQ.
1258 */
1259 while ((adw->responseq->next_ba & ADW_RQ_DONE) != 0) {
1260 struct adw_carrier *free_carrier;
1261 struct acb *acb;
1262 union ccb *ccb;
1263
1264 #if 0
1265 printf("0x%x, 0x%x, 0x%x, 0x%x\n",
1266 adw->responseq->carr_offset,
1267 adw->responseq->carr_ba,
1268 adw->responseq->areq_ba,
1269 adw->responseq->next_ba);
1270 #endif
1271 /*
1272 * The firmware copies the adw_scsi_req_q.acb_baddr
1273 * field into the areq_ba field of the carrier.
1274 */
1275 acb = acbbotov(adw, adw->responseq->areq_ba);
1276
1277 /*
1278 * The least significant four bits of the next_ba
1279 * field are used as flags. Mask them out and then
1280 * advance through the list.
1281 */
1282 free_carrier = adw->responseq;
1283 adw->responseq =
1284 carrierbotov(adw, free_carrier->next_ba & ADW_NEXT_BA_MASK);
1285 free_carrier->next_ba = adw->free_carriers->carr_offset;
1286 adw->free_carriers = free_carrier;
1287
1288 /* Process CCB */
1289 ccb = acb->ccb;
1290 callout_stop(&acb->timer);
1291 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1292 bus_dmasync_op_t op;
1293
1294 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1295 op = BUS_DMASYNC_POSTREAD;
1296 else
1297 op = BUS_DMASYNC_POSTWRITE;
1298 bus_dmamap_sync(adw->buffer_dmat, acb->dmamap, op);
1299 bus_dmamap_unload(adw->buffer_dmat, acb->dmamap);
1300 ccb->csio.resid = acb->queue.data_cnt;
1301 } else
1302 ccb->csio.resid = 0;
1303
1304 /* Common Cases inline... */
1305 if (acb->queue.host_status == QHSTA_NO_ERROR
1306 && (acb->queue.done_status == QD_NO_ERROR
1307 || acb->queue.done_status == QD_WITH_ERROR)) {
1308 ccb->csio.scsi_status = acb->queue.scsi_status;
1309 ccb->ccb_h.status = 0;
1310 switch (ccb->csio.scsi_status) {
1311 case SCSI_STATUS_OK:
1312 ccb->ccb_h.status |= CAM_REQ_CMP;
1313 break;
1314 case SCSI_STATUS_CHECK_COND:
1315 case SCSI_STATUS_CMD_TERMINATED:
1316 bcopy(&acb->sense_data, &ccb->csio.sense_data,
1317 ccb->csio.sense_len);
1318 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1319 ccb->csio.sense_resid = acb->queue.sense_len;
1320 /* FALLTHROUGH */
1321 default:
1322 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR
1323 | CAM_DEV_QFRZN;
1324 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1325 break;
1326 }
1327 adwfreeacb(adw, acb);
1328 xpt_done(ccb);
1329 } else {
1330 adwprocesserror(adw, acb);
1331 }
1332 }
1333 }
1334
1335 static void
1336 adwprocesserror(struct adw_softc *adw, struct acb *acb)
1337 {
1338 union ccb *ccb;
1339
1340 ccb = acb->ccb;
1341 if (acb->queue.done_status == QD_ABORTED_BY_HOST) {
1342 ccb->ccb_h.status = CAM_REQ_ABORTED;
1343 } else {
1344
1345 switch (acb->queue.host_status) {
1346 case QHSTA_M_SEL_TIMEOUT:
1347 ccb->ccb_h.status = CAM_SEL_TIMEOUT;
1348 break;
1349 case QHSTA_M_SXFR_OFF_UFLW:
1350 case QHSTA_M_SXFR_OFF_OFLW:
1351 case QHSTA_M_DATA_OVER_RUN:
1352 ccb->ccb_h.status = CAM_DATA_RUN_ERR;
1353 break;
1354 case QHSTA_M_SXFR_DESELECTED:
1355 case QHSTA_M_UNEXPECTED_BUS_FREE:
1356 ccb->ccb_h.status = CAM_UNEXP_BUSFREE;
1357 break;
1358 case QHSTA_M_SCSI_BUS_RESET:
1359 case QHSTA_M_SCSI_BUS_RESET_UNSOL:
1360 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
1361 break;
1362 case QHSTA_M_BUS_DEVICE_RESET:
1363 ccb->ccb_h.status = CAM_BDR_SENT;
1364 break;
1365 case QHSTA_M_QUEUE_ABORTED:
1366 /* BDR or Bus Reset */
1367 xpt_print_path(adw->path);
1368 printf("Saw Queue Aborted\n");
1369 ccb->ccb_h.status = adw->last_reset;
1370 break;
1371 case QHSTA_M_SXFR_SDMA_ERR:
1372 case QHSTA_M_SXFR_SXFR_PERR:
1373 case QHSTA_M_RDMA_PERR:
1374 ccb->ccb_h.status = CAM_UNCOR_PARITY;
1375 break;
1376 case QHSTA_M_WTM_TIMEOUT:
1377 case QHSTA_M_SXFR_WD_TMO:
1378 {
1379 /* The SCSI bus hung in a phase */
1380 xpt_print_path(adw->path);
1381 printf("Watch Dog timer expired. Resetting bus\n");
1382 adw_reset_bus(adw);
1383 break;
1384 }
1385 case QHSTA_M_SXFR_XFR_PH_ERR:
1386 ccb->ccb_h.status = CAM_SEQUENCE_FAIL;
1387 break;
1388 case QHSTA_M_SXFR_UNKNOWN_ERROR:
1389 break;
1390 case QHSTA_M_BAD_CMPL_STATUS_IN:
1391 /* No command complete after a status message */
1392 ccb->ccb_h.status = CAM_SEQUENCE_FAIL;
1393 break;
1394 case QHSTA_M_AUTO_REQ_SENSE_FAIL:
1395 ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
1396 break;
1397 case QHSTA_M_INVALID_DEVICE:
1398 ccb->ccb_h.status = CAM_PATH_INVALID;
1399 break;
1400 case QHSTA_M_NO_AUTO_REQ_SENSE:
1401 /*
1402 * User didn't request sense, but we got a
1403 * check condition.
1404 */
1405 ccb->csio.scsi_status = acb->queue.scsi_status;
1406 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1407 break;
1408 default:
1409 panic("%s: Unhandled Host status error %x",
1410 device_get_nameunit(adw->device),
1411 acb->queue.host_status);
1412 /* NOTREACHED */
1413 }
1414 }
1415 if ((acb->state & ACB_RECOVERY_ACB) != 0) {
1416 if (ccb->ccb_h.status == CAM_SCSI_BUS_RESET
1417 || ccb->ccb_h.status == CAM_BDR_SENT)
1418 ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1419 }
1420 if (ccb->ccb_h.status != CAM_REQ_CMP) {
1421 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1422 ccb->ccb_h.status |= CAM_DEV_QFRZN;
1423 }
1424 adwfreeacb(adw, acb);
1425 xpt_done(ccb);
1426 }
1427
1428 static void
1429 adwtimeout(void *arg)
1430 {
1431 struct acb *acb;
1432 union ccb *ccb;
1433 struct adw_softc *adw;
1434 adw_idle_cmd_status_t status;
1435 int target_id;
1436
1437 acb = (struct acb *)arg;
1438 ccb = acb->ccb;
1439 adw = (struct adw_softc *)ccb->ccb_h.ccb_adw_ptr;
1440 xpt_print_path(ccb->ccb_h.path);
1441 printf("ACB %p - timed out\n", (void *)acb);
1442
1443 mtx_assert(&adw->lock, MA_OWNED);
1444
1445 if ((acb->state & ACB_ACTIVE) == 0) {
1446 xpt_print_path(ccb->ccb_h.path);
1447 printf("ACB %p - timed out CCB already completed\n",
1448 (void *)acb);
1449 return;
1450 }
1451
1452 acb->state |= ACB_RECOVERY_ACB;
1453 target_id = ccb->ccb_h.target_id;
1454
1455 /* Attempt a BDR first */
1456 status = adw_idle_cmd_send(adw, ADW_IDLE_CMD_DEVICE_RESET,
1457 ccb->ccb_h.target_id);
1458 if (status == ADW_IDLE_CMD_SUCCESS) {
1459 device_printf(adw->device,
1460 "BDR Delivered. No longer in timeout\n");
1461 adw_handle_device_reset(adw, target_id);
1462 } else {
1463 adw_reset_bus(adw);
1464 xpt_print_path(adw->path);
1465 printf("Bus Reset Delivered. No longer in timeout\n");
1466 }
1467 }
1468
1469 static void
1470 adw_handle_device_reset(struct adw_softc *adw, u_int target)
1471 {
1472 struct cam_path *path;
1473 cam_status error;
1474
1475 error = xpt_create_path(&path, /*periph*/NULL, cam_sim_path(adw->sim),
1476 target, CAM_LUN_WILDCARD);
1477
1478 if (error == CAM_REQ_CMP) {
1479 xpt_async(AC_SENT_BDR, path, NULL);
1480 xpt_free_path(path);
1481 }
1482 adw->last_reset = CAM_BDR_SENT;
1483 }
1484
1485 static void
1486 adw_handle_bus_reset(struct adw_softc *adw, int initiated)
1487 {
1488 if (initiated) {
1489 /*
1490 * The microcode currently sets the SCSI Bus Reset signal
1491 * while handling the AscSendIdleCmd() IDLE_CMD_SCSI_RESET
1492 * command above. But the SCSI Bus Reset Hold Time in the
1493 * microcode is not deterministic (it may in fact be for less
1494 * than the SCSI Spec. minimum of 25 us). Therefore on return
1495 * the Adv Library sets the SCSI Bus Reset signal for
1496 * ADW_SCSI_RESET_HOLD_TIME_US, which is defined to be greater
1497 * than 25 us.
1498 */
1499 u_int scsi_ctrl;
1500
1501 scsi_ctrl = adw_inw(adw, ADW_SCSI_CTRL) & ~ADW_SCSI_CTRL_RSTOUT;
1502 adw_outw(adw, ADW_SCSI_CTRL, scsi_ctrl | ADW_SCSI_CTRL_RSTOUT);
1503 DELAY(ADW_SCSI_RESET_HOLD_TIME_US);
1504 adw_outw(adw, ADW_SCSI_CTRL, scsi_ctrl);
1505
1506 /*
1507 * We will perform the async notification when the
1508 * SCSI Reset interrupt occurs.
1509 */
1510 } else
1511 xpt_async(AC_BUS_RESET, adw->path, NULL);
1512 adw->last_reset = CAM_SCSI_BUS_RESET;
1513 }
1514 MODULE_DEPEND(adw, cam, 1, 1, 1);
1515
Cache object: 4cc87bc9c21ac7d667ecd34d99488646
|