FreeBSD/Linux Kernel Cross Reference
sys/dev/ahb/ahb.c
1 /*-
2 * CAM SCSI device driver for the Adaptec 174X SCSI Host adapter
3 *
4 * Copyright (c) 1998 Justin T. Gibbs
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice immediately at the beginning of the file, without modification,
12 * this list of conditions, and the following disclaimer.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * $FreeBSD$
29 */
30
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/malloc.h>
35 #include <sys/module.h>
36 #include <sys/lock.h>
37 #include <sys/mutex.h>
38 #include <sys/bus.h>
39
40 #include <machine/bus_pio.h>
41 #include <machine/bus.h>
42 #include <machine/resource.h>
43 #include <sys/rman.h>
44
45 #include <cam/cam.h>
46 #include <cam/cam_ccb.h>
47 #include <cam/cam_sim.h>
48 #include <cam/cam_xpt_sim.h>
49 #include <cam/cam_debug.h>
50
51 #include <cam/scsi/scsi_message.h>
52
53 #include <dev/eisa/eisaconf.h>
54
55 #include <dev/ahb/ahbreg.h>
56
57 #define ccb_ecb_ptr spriv_ptr0
58 #define ccb_ahb_ptr spriv_ptr1
59
60 #define ahb_inb(ahb, port) \
61 bus_space_read_1((ahb)->tag, (ahb)->bsh, port)
62
63 #define ahb_inl(ahb, port) \
64 bus_space_read_4((ahb)->tag, (ahb)->bsh, port)
65
66 #define ahb_outb(ahb, port, value) \
67 bus_space_write_1((ahb)->tag, (ahb)->bsh, port, value)
68
69 #define ahb_outl(ahb, port, value) \
70 bus_space_write_4((ahb)->tag, (ahb)->bsh, port, value)
71
72 static const char *ahbmatch(eisa_id_t type);
73 static struct ahb_softc *ahballoc(u_long unit, struct resource *res);
74 static void ahbfree(struct ahb_softc *ahb);
75 static int ahbreset(struct ahb_softc *ahb);
76 static void ahbmapecbs(void *arg, bus_dma_segment_t *segs,
77 int nseg, int error);
78 static int ahbxptattach(struct ahb_softc *ahb);
79 static void ahbhandleimmed(struct ahb_softc *ahb,
80 u_int32_t mbox, u_int intstat);
81 static void ahbcalcresid(struct ahb_softc *ahb,
82 struct ecb *ecb, union ccb *ccb);
83 static __inline void ahbdone(struct ahb_softc *ahb, u_int32_t mbox,
84 u_int intstat);
85 static void ahbintr(void *arg);
86 static bus_dmamap_callback_t ahbexecuteecb;
87 static void ahbaction(struct cam_sim *sim, union ccb *ccb);
88 static void ahbpoll(struct cam_sim *sim);
89
90 /* Our timeout handler */
91 static timeout_t ahbtimeout;
92
93 static __inline struct ecb* ahbecbget(struct ahb_softc *ahb);
94 static __inline void ahbecbfree(struct ahb_softc* ahb,
95 struct ecb* ecb);
96 static __inline u_int32_t ahbecbvtop(struct ahb_softc *ahb,
97 struct ecb *ecb);
98 static __inline struct ecb* ahbecbptov(struct ahb_softc *ahb,
99 u_int32_t ecb_addr);
100 static __inline u_int32_t ahbstatuspaddr(u_int32_t ecb_paddr);
101 static __inline u_int32_t ahbsensepaddr(u_int32_t ecb_paddr);
102 static __inline u_int32_t ahbsgpaddr(u_int32_t ecb_paddr);
103 static __inline void ahbqueuembox(struct ahb_softc *ahb,
104 u_int32_t mboxval,
105 u_int attn_code);
106
107 static __inline struct ecb*
108 ahbecbget(struct ahb_softc *ahb)
109 {
110 struct ecb* ecb;
111 int s;
112
113 s = splcam();
114 if ((ecb = SLIST_FIRST(&ahb->free_ecbs)) != NULL)
115 SLIST_REMOVE_HEAD(&ahb->free_ecbs, links);
116 splx(s);
117
118 return (ecb);
119 }
120
121 static __inline void
122 ahbecbfree(struct ahb_softc* ahb, struct ecb* ecb)
123 {
124 int s;
125
126 s = splcam();
127 ecb->state = ECB_FREE;
128 SLIST_INSERT_HEAD(&ahb->free_ecbs, ecb, links);
129 splx(s);
130 }
131
132 static __inline u_int32_t
133 ahbecbvtop(struct ahb_softc *ahb, struct ecb *ecb)
134 {
135 return (ahb->ecb_physbase
136 + (u_int32_t)((caddr_t)ecb - (caddr_t)ahb->ecb_array));
137 }
138
139 static __inline struct ecb*
140 ahbecbptov(struct ahb_softc *ahb, u_int32_t ecb_addr)
141 {
142 return (ahb->ecb_array
143 + ((struct ecb*)(uintptr_t)ecb_addr
144 - (struct ecb*)(uintptr_t)ahb->ecb_physbase));
145 }
146
147 static __inline u_int32_t
148 ahbstatuspaddr(u_int32_t ecb_paddr)
149 {
150 return (ecb_paddr + offsetof(struct ecb, status));
151 }
152
153 static __inline u_int32_t
154 ahbsensepaddr(u_int32_t ecb_paddr)
155 {
156 return (ecb_paddr + offsetof(struct ecb, sense));
157 }
158
159 static __inline u_int32_t
160 ahbsgpaddr(u_int32_t ecb_paddr)
161 {
162 return (ecb_paddr + offsetof(struct ecb, sg_list));
163 }
164
165 static __inline void
166 ahbqueuembox(struct ahb_softc *ahb, u_int32_t mboxval, u_int attn_code)
167 {
168 u_int loopmax = 300;
169 while (--loopmax) {
170 u_int status;
171
172 status = ahb_inb(ahb, HOSTSTAT);
173 if ((status & (HOSTSTAT_MBOX_EMPTY|HOSTSTAT_BUSY))
174 == HOSTSTAT_MBOX_EMPTY)
175 break;
176 DELAY(20);
177 }
178 if (loopmax == 0)
179 panic("ahb%ld: adapter not taking commands\n", ahb->unit);
180
181 ahb_outl(ahb, MBOXOUT0, mboxval);
182 ahb_outb(ahb, ATTN, attn_code);
183 }
184
185 static const char *
186 ahbmatch(eisa_id_t type)
187 {
188 switch(type & 0xfffffe00) {
189 case EISA_DEVICE_ID_ADAPTEC_1740:
190 return ("Adaptec 174x SCSI host adapter");
191 break;
192 default:
193 break;
194 }
195 return (NULL);
196 }
197
198 static int
199 ahbprobe(device_t dev)
200 {
201 const char *desc;
202 u_int32_t iobase;
203 u_int32_t irq;
204 u_int8_t intdef;
205 int shared;
206
207 desc = ahbmatch(eisa_get_id(dev));
208 if (!desc)
209 return (ENXIO);
210 device_set_desc(dev, desc);
211
212 iobase = (eisa_get_slot(dev) * EISA_SLOT_SIZE) +
213 AHB_EISA_SLOT_OFFSET;
214
215 eisa_add_iospace(dev, iobase, AHB_EISA_IOSIZE, RESVADDR_NONE);
216
217 intdef = inb(INTDEF + iobase);
218 switch (intdef & 0x7) {
219 case INT9:
220 irq = 9;
221 break;
222 case INT10:
223 irq = 10;
224 break;
225 case INT11:
226 irq = 11;
227 break;
228 case INT12:
229 irq = 12;
230 break;
231 case INT14:
232 irq = 14;
233 break;
234 case INT15:
235 irq = 15;
236 break;
237 default:
238 printf("Adaptec 174X at slot %d: illegal "
239 "irq setting %d\n", eisa_get_slot(dev),
240 (intdef & 0x7));
241 irq = 0;
242 break;
243 }
244 if (irq == 0)
245 return ENXIO;
246
247 shared = (inb(INTDEF + iobase) & INTLEVEL) ?
248 EISA_TRIGGER_LEVEL : EISA_TRIGGER_EDGE;
249
250 eisa_add_intr(dev, irq, shared);
251
252 return 0;
253 }
254
255 static int
256 ahbattach(device_t dev)
257 {
258 /*
259 * find unit and check we have that many defined
260 */
261 struct ahb_softc *ahb;
262 struct ecb* next_ecb;
263 struct resource *io = 0;
264 struct resource *irq = 0;
265 int rid;
266 void *ih;
267
268 rid = 0;
269 io = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE);
270 if (!io) {
271 device_printf(dev, "No I/O space?!\n");
272 return ENOMEM;
273 }
274
275 if ((ahb = ahballoc(device_get_unit(dev), io)) == NULL) {
276 goto error_exit2;
277 }
278
279 if (ahbreset(ahb) != 0)
280 goto error_exit;
281
282 rid = 0;
283 irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE);
284 if (!irq) {
285 device_printf(dev, "Can't allocate interrupt\n");
286 goto error_exit;
287 }
288
289 /*
290 * Create our DMA tags. These tags define the kinds of device
291 * accessible memory allocations and memory mappings we will
292 * need to perform during normal operation.
293 */
294 /* DMA tag for mapping buffers into device visible space. */
295 /* XXX Should be a child of the EISA bus dma tag */
296 if (bus_dma_tag_create( /* parent */ NULL,
297 /* alignment */ 1,
298 /* boundary */ 0,
299 /* lowaddr */ BUS_SPACE_MAXADDR_32BIT,
300 /* highaddr */ BUS_SPACE_MAXADDR,
301 /* filter */ NULL,
302 /* filterarg */ NULL,
303 /* maxsize */ MAXBSIZE,
304 /* nsegments */ AHB_NSEG,
305 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT,
306 /* flags */ BUS_DMA_ALLOCNOW,
307 /* lockfunc */ busdma_lock_mutex,
308 /* lockarg */ &Giant,
309 &ahb->buffer_dmat) != 0)
310 goto error_exit;
311
312 ahb->init_level++;
313
314 /* DMA tag for our ccb structures and ha inquiry data */
315 if (bus_dma_tag_create( /* parent */ NULL,
316 /* alignment */ 1,
317 /* boundary */ 0,
318 /* lowaddr */ BUS_SPACE_MAXADDR_32BIT,
319 /* highaddr */ BUS_SPACE_MAXADDR,
320 /* filter */ NULL,
321 /* filterarg */ NULL,
322 /* maxsize */ (AHB_NECB *
323 sizeof(struct ecb))
324 + sizeof(*ahb->ha_inq_data),
325 /* nsegments */ 1,
326 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT,
327 /* flags */ 0,
328 /* lockfunc */ busdma_lock_mutex,
329 /* lockarg */ &Giant,
330 &ahb->ecb_dmat) != 0)
331 goto error_exit;
332
333 ahb->init_level++;
334
335 /* Allocation for our ccbs */
336 if (bus_dmamem_alloc(ahb->ecb_dmat, (void **)&ahb->ecb_array,
337 BUS_DMA_NOWAIT, &ahb->ecb_dmamap) != 0)
338 goto error_exit;
339
340 ahb->ha_inq_data = (struct ha_inquiry_data *)&ahb->ecb_array[AHB_NECB];
341
342 ahb->init_level++;
343
344 /* And permanently map them */
345 bus_dmamap_load(ahb->ecb_dmat, ahb->ecb_dmamap,
346 ahb->ecb_array, AHB_NSEG * sizeof(struct ecb),
347 ahbmapecbs, ahb, /*flags*/0);
348
349 ahb->init_level++;
350
351 /* Allocate the buffer dmamaps for each of our ECBs */
352 bzero(ahb->ecb_array, (AHB_NECB * sizeof(struct ecb))
353 + sizeof(*ahb->ha_inq_data));
354 next_ecb = ahb->ecb_array;
355 while (ahb->num_ecbs < AHB_NECB) {
356 u_int32_t ecb_paddr;
357
358 if (bus_dmamap_create(ahb->buffer_dmat, /*flags*/0,
359 &next_ecb->dmamap))
360 break;
361 ecb_paddr = ahbecbvtop(ahb, next_ecb);
362 next_ecb->hecb.status_ptr = ahbstatuspaddr(ecb_paddr);
363 next_ecb->hecb.sense_ptr = ahbsensepaddr(ecb_paddr);
364 ahb->num_ecbs++;
365 ahbecbfree(ahb, next_ecb);
366 next_ecb++;
367 }
368
369 if (ahb->num_ecbs == 0)
370 goto error_exit;
371
372 ahb->init_level++;
373
374 /*
375 * Now that we know we own the resources we need, register
376 * our bus with the XPT.
377 */
378 if (ahbxptattach(ahb))
379 goto error_exit;
380
381 /* Enable our interrupt */
382 bus_setup_intr(dev, irq, INTR_TYPE_CAM|INTR_ENTROPY, ahbintr, ahb, &ih);
383 return (0);
384
385 error_exit:
386 /*
387 * The board's IRQ line will not be left enabled
388 * if we can't intialize correctly, so its safe
389 * to release the irq.
390 */
391 ahbfree(ahb);
392 error_exit2:
393 if (io)
394 bus_release_resource(dev, SYS_RES_IOPORT, 0, io);
395 if (irq)
396 bus_release_resource(dev, SYS_RES_IRQ, 0, irq);
397 return (-1);
398 }
399
400 static struct ahb_softc *
401 ahballoc(u_long unit, struct resource *res)
402 {
403 struct ahb_softc *ahb;
404
405 /*
406 * Allocate a storage area for us
407 */
408 ahb = malloc(sizeof(struct ahb_softc), M_DEVBUF, M_NOWAIT | M_ZERO);
409 if (!ahb) {
410 printf("ahb%ld: cannot malloc!\n", unit);
411 return (NULL);
412 }
413 SLIST_INIT(&ahb->free_ecbs);
414 LIST_INIT(&ahb->pending_ccbs);
415 ahb->unit = unit;
416 ahb->tag = rman_get_bustag(res);
417 ahb->bsh = rman_get_bushandle(res);
418 ahb->disc_permitted = ~0;
419 ahb->tags_permitted = ~0;
420
421 return (ahb);
422 }
423
424 static void
425 ahbfree(struct ahb_softc *ahb)
426 {
427 switch (ahb->init_level) {
428 default:
429 case 4:
430 bus_dmamap_unload(ahb->ecb_dmat, ahb->ecb_dmamap);
431 case 3:
432 bus_dmamem_free(ahb->ecb_dmat, ahb->ecb_array,
433 ahb->ecb_dmamap);
434 bus_dmamap_destroy(ahb->ecb_dmat, ahb->ecb_dmamap);
435 case 2:
436 bus_dma_tag_destroy(ahb->ecb_dmat);
437 case 1:
438 bus_dma_tag_destroy(ahb->buffer_dmat);
439 case 0:
440 break;
441 }
442 free(ahb, M_DEVBUF);
443 }
444
445 /*
446 * reset board, If it doesn't respond, return failure
447 */
448 static int
449 ahbreset(struct ahb_softc *ahb)
450 {
451 int wait = 1000; /* 1 sec enough? */
452 int test;
453
454 if ((ahb_inb(ahb, PORTADDR) & PORTADDR_ENHANCED) == 0) {
455 printf("ahb_reset: Controller not in enhanced mode\n");
456 return (-1);
457 }
458
459 ahb_outb(ahb, CONTROL, CNTRL_HARD_RST);
460 DELAY(1000);
461 ahb_outb(ahb, CONTROL, 0);
462 while (--wait) {
463 DELAY(1000);
464 if ((ahb_inb(ahb, HOSTSTAT) & HOSTSTAT_BUSY) == 0)
465 break;
466 }
467
468 if (wait == 0) {
469 printf("ahbreset: No answer from aha1742 board\n");
470 return (-1);
471 }
472 if ((test = ahb_inb(ahb, MBOXIN0)) != 0) {
473 printf("ahb_reset: self test failed, val = 0x%x\n", test);
474 return (-1);
475 }
476 while (ahb_inb(ahb, HOSTSTAT) & HOSTSTAT_INTPEND) {
477 ahb_outb(ahb, CONTROL, CNTRL_CLRINT);
478 DELAY(10000);
479 }
480 return (0);
481 }
482
483 static void
484 ahbmapecbs(void *arg, bus_dma_segment_t *segs, int nseg, int error)
485 {
486 struct ahb_softc* ahb;
487
488 ahb = (struct ahb_softc*)arg;
489 ahb->ecb_physbase = segs->ds_addr;
490 /*
491 * Space for adapter inquiry information is on the
492 * tail of the ecb array.
493 */
494 ahb->ha_inq_physbase = ahbecbvtop(ahb, &ahb->ecb_array[AHB_NECB]);
495 }
496
497 static int
498 ahbxptattach(struct ahb_softc *ahb)
499 {
500 struct cam_devq *devq;
501 struct ecb *ecb;
502 u_int i;
503
504 /* Remeber who are we on the scsi bus */
505 ahb->scsi_id = ahb_inb(ahb, SCSIDEF) & HSCSIID;
506
507 /* Use extended translation?? */
508 ahb->extended_trans = ahb_inb(ahb, RESV1) & EXTENDED_TRANS;
509
510 /* Fetch adapter inquiry data */
511 ecb = ahbecbget(ahb); /* Always succeeds - no outstanding commands */
512 ecb->hecb.opcode = ECBOP_READ_HA_INQDATA;
513 ecb->hecb.flag_word1 = FW1_SUPPRESS_URUN_ERR|FW1_ERR_STATUS_BLK_ONLY;
514 ecb->hecb.data_ptr = ahb->ha_inq_physbase;
515 ecb->hecb.data_len = sizeof(struct ha_inquiry_data);
516 ecb->hecb.sense_ptr = 0;
517 ecb->state = ECB_ACTIVE;
518
519 /* Tell the adapter about this command */
520 ahbqueuembox(ahb, ahbecbvtop(ahb, ecb),
521 ATTN_STARTECB|ahb->scsi_id);
522
523 /* Poll for interrupt completion */
524 for (i = 1000; ecb->state != ECB_FREE && i != 0; i--) {
525 ahbintr(ahb);
526 DELAY(1000);
527 }
528
529 ahb->num_ecbs = MIN(ahb->num_ecbs,
530 ahb->ha_inq_data->scsi_data.reserved[1]);
531 printf("ahb%ld: %.8s %s SCSI Adapter, FW Rev. %.4s, ID=%d, %d ECBs\n",
532 ahb->unit, ahb->ha_inq_data->scsi_data.product,
533 (ahb->ha_inq_data->scsi_data.flags & 0x4) ? "Differential"
534 : "Single Ended",
535 ahb->ha_inq_data->scsi_data.revision,
536 ahb->scsi_id, ahb->num_ecbs);
537
538 /* Restore sense paddr for future CCB clients */
539 ecb->hecb.sense_ptr = ahbsensepaddr(ahbecbvtop(ahb, ecb));
540
541 ahbecbfree(ahb, ecb);
542
543 /*
544 * Create the device queue for our SIM.
545 */
546 devq = cam_simq_alloc(ahb->num_ecbs);
547 if (devq == NULL)
548 return (ENOMEM);
549
550 /*
551 * Construct our SIM entry
552 */
553 ahb->sim = cam_sim_alloc(ahbaction, ahbpoll, "ahb", ahb, ahb->unit,
554 2, ahb->num_ecbs, devq);
555 if (ahb->sim == NULL) {
556 cam_simq_free(devq);
557 return (ENOMEM);
558 }
559
560 if (xpt_bus_register(ahb->sim, 0) != CAM_SUCCESS) {
561 cam_sim_free(ahb->sim, /*free_devq*/TRUE);
562 return (ENXIO);
563 }
564
565 if (xpt_create_path(&ahb->path, /*periph*/NULL,
566 cam_sim_path(ahb->sim), CAM_TARGET_WILDCARD,
567 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
568 xpt_bus_deregister(cam_sim_path(ahb->sim));
569 cam_sim_free(ahb->sim, /*free_devq*/TRUE);
570 return (ENXIO);
571 }
572
573 /*
574 * Allow the board to generate interrupts.
575 */
576 ahb_outb(ahb, INTDEF, ahb_inb(ahb, INTDEF) | INTEN);
577
578 return (0);
579 }
580
581 static void
582 ahbhandleimmed(struct ahb_softc *ahb, u_int32_t mbox, u_int intstat)
583 {
584 struct ccb_hdr *ccb_h;
585 u_int target_id;
586
587 if (ahb->immed_cmd == 0) {
588 printf("ahb%ld: Immediate Command complete with no "
589 " pending command\n", ahb->unit);
590 return;
591 }
592
593 target_id = intstat & INTSTAT_TARGET_MASK;
594
595 ccb_h = LIST_FIRST(&ahb->pending_ccbs);
596 while (ccb_h != NULL) {
597 struct ecb *pending_ecb;
598 union ccb *ccb;
599
600 pending_ecb = (struct ecb *)ccb_h->ccb_ecb_ptr;
601 ccb = pending_ecb->ccb;
602 ccb_h = LIST_NEXT(ccb_h, sim_links.le);
603 if (ccb->ccb_h.target_id == target_id
604 || target_id == ahb->scsi_id) {
605 untimeout(ahbtimeout, pending_ecb,
606 ccb->ccb_h.timeout_ch);
607 LIST_REMOVE(&ccb->ccb_h, sim_links.le);
608 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE)
609 bus_dmamap_unload(ahb->buffer_dmat,
610 pending_ecb->dmamap);
611 if (pending_ecb == ahb->immed_ecb)
612 ccb->ccb_h.status =
613 CAM_CMD_TIMEOUT|CAM_RELEASE_SIMQ;
614 else if (target_id == ahb->scsi_id)
615 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
616 else
617 ccb->ccb_h.status = CAM_BDR_SENT;
618 ahbecbfree(ahb, pending_ecb);
619 xpt_done(ccb);
620 } else if (ahb->immed_ecb != NULL) {
621 /* Re-instate timeout */
622 ccb->ccb_h.timeout_ch =
623 timeout(ahbtimeout, (caddr_t)pending_ecb,
624 (ccb->ccb_h.timeout * hz) / 1000);
625 }
626 }
627
628 if (ahb->immed_ecb != NULL) {
629 ahb->immed_ecb = NULL;
630 printf("ahb%ld: No longer in timeout\n", ahb->unit);
631 } else if (target_id == ahb->scsi_id)
632 printf("ahb%ld: SCSI Bus Reset Delivered\n", ahb->unit);
633 else
634 printf("ahb%ld: Bus Device Reset Delibered to target %d\n",
635 ahb->unit, target_id);
636
637 ahb->immed_cmd = 0;
638 }
639
640 static void
641 ahbcalcresid(struct ahb_softc *ahb, struct ecb *ecb, union ccb *ccb)
642 {
643 if (ecb->status.data_overrun != 0) {
644 /*
645 * Overrun Condition. The hardware doesn't
646 * provide a meaningful byte count in this case
647 * (the residual is always 0). Tell the XPT
648 * layer about the error.
649 */
650 ccb->ccb_h.status = CAM_DATA_RUN_ERR;
651 } else {
652 ccb->csio.resid = ecb->status.resid_count;
653
654 if ((ecb->hecb.flag_word1 & FW1_SG_ECB) != 0) {
655 /*
656 * For S/G transfers, the adapter provides a pointer
657 * to the address in the last S/G element used and a
658 * residual for that element. So, we need to sum up
659 * the elements that follow it in order to get a real
660 * residual number. If we have an overrun, the residual
661 * reported will be 0 and we already know that all S/G
662 * segments have been exhausted, so we can skip this
663 * step.
664 */
665 ahb_sg_t *sg;
666 int num_sg;
667
668 num_sg = ecb->hecb.data_len / sizeof(ahb_sg_t);
669
670 /* Find the S/G the adapter was working on */
671 for (sg = ecb->sg_list;
672 num_sg != 0 && sg->addr != ecb->status.resid_addr;
673 num_sg--, sg++)
674 ;
675
676 /* Skip it */
677 num_sg--;
678 sg++;
679
680 /* Sum the rest */
681 for (; num_sg != 0; num_sg--, sg++)
682 ccb->csio.resid += sg->len;
683 }
684 /* Underruns are not errors */
685 ccb->ccb_h.status = CAM_REQ_CMP;
686 }
687 }
688
689 static void
690 ahbprocesserror(struct ahb_softc *ahb, struct ecb *ecb, union ccb *ccb)
691 {
692 struct hardware_ecb *hecb;
693 struct ecb_status *status;
694
695 hecb = &ecb->hecb;
696 status = &ecb->status;
697 switch (status->ha_status) {
698 case HS_OK:
699 ccb->csio.scsi_status = status->scsi_status;
700 if (status->scsi_status != 0) {
701 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
702 if (status->sense_stored) {
703 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
704 ccb->csio.sense_resid =
705 ccb->csio.sense_len - status->sense_len;
706 bcopy(&ecb->sense, &ccb->csio.sense_data,
707 status->sense_len);
708 }
709 }
710 break;
711 case HS_TARGET_NOT_ASSIGNED:
712 ccb->ccb_h.status = CAM_PATH_INVALID;
713 break;
714 case HS_SEL_TIMEOUT:
715 ccb->ccb_h.status = CAM_SEL_TIMEOUT;
716 break;
717 case HS_DATA_RUN_ERR:
718 ahbcalcresid(ahb, ecb, ccb);
719 break;
720 case HS_UNEXPECTED_BUSFREE:
721 ccb->ccb_h.status = CAM_UNEXP_BUSFREE;
722 break;
723 case HS_INVALID_PHASE:
724 ccb->ccb_h.status = CAM_SEQUENCE_FAIL;
725 break;
726 case HS_REQUEST_SENSE_FAILED:
727 ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
728 break;
729 case HS_TAG_MSG_REJECTED:
730 {
731 struct ccb_trans_settings neg;
732
733 xpt_print_path(ccb->ccb_h.path);
734 printf("refuses tagged commands. Performing "
735 "non-tagged I/O\n");
736 neg.flags = 0;
737 neg.valid = CCB_TRANS_TQ_VALID;
738 xpt_setup_ccb(&neg.ccb_h, ccb->ccb_h.path, /*priority*/1);
739 xpt_async(AC_TRANSFER_NEG, ccb->ccb_h.path, &neg);
740 ahb->tags_permitted &= ~(0x01 << ccb->ccb_h.target_id);
741 ccb->ccb_h.status = CAM_MSG_REJECT_REC;
742 break;
743 }
744 case HS_FIRMWARE_LOAD_REQ:
745 case HS_HARDWARE_ERR:
746 /*
747 * Tell the system that the Adapter
748 * is no longer functional.
749 */
750 ccb->ccb_h.status = CAM_NO_HBA;
751 break;
752 case HS_CMD_ABORTED_HOST:
753 case HS_CMD_ABORTED_ADAPTER:
754 case HS_ATN_TARGET_FAILED:
755 case HS_SCSI_RESET_ADAPTER:
756 case HS_SCSI_RESET_INCOMING:
757 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
758 break;
759 case HS_INVALID_ECB_PARAM:
760 printf("ahb%ld: opcode 0x%02x, flag_word1 0x%02x, flag_word2 0x%02x\n",
761 ahb->unit, hecb->opcode, hecb->flag_word1, hecb->flag_word2);
762 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
763 break;
764 case HS_DUP_TCB_RECEIVED:
765 case HS_INVALID_OPCODE:
766 case HS_INVALID_CMD_LINK:
767 case HS_PROGRAM_CKSUM_ERROR:
768 panic("ahb%ld: Can't happen host status %x occurred",
769 ahb->unit, status->ha_status);
770 break;
771 }
772 if (ccb->ccb_h.status != CAM_REQ_CMP) {
773 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
774 ccb->ccb_h.status |= CAM_DEV_QFRZN;
775 }
776 }
777
778 static void
779 ahbdone(struct ahb_softc *ahb, u_int32_t mbox, u_int intstat)
780 {
781 struct ecb *ecb;
782 union ccb *ccb;
783
784 ecb = ahbecbptov(ahb, mbox);
785
786 if ((ecb->state & ECB_ACTIVE) == 0)
787 panic("ecb not active");
788
789 ccb = ecb->ccb;
790
791 if (ccb != NULL) {
792 untimeout(ahbtimeout, ecb, ccb->ccb_h.timeout_ch);
793 LIST_REMOVE(&ccb->ccb_h, sim_links.le);
794
795 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
796 bus_dmasync_op_t op;
797
798 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
799 op = BUS_DMASYNC_POSTREAD;
800 else
801 op = BUS_DMASYNC_POSTWRITE;
802 bus_dmamap_sync(ahb->buffer_dmat, ecb->dmamap, op);
803 bus_dmamap_unload(ahb->buffer_dmat, ecb->dmamap);
804 }
805
806 if ((intstat & INTSTAT_MASK) == INTSTAT_ECB_OK) {
807 ccb->ccb_h.status = CAM_REQ_CMP;
808 ccb->csio.resid = 0;
809 } else {
810 ahbprocesserror(ahb, ecb, ccb);
811 }
812 ahbecbfree(ahb, ecb);
813 xpt_done(ccb);
814 } else {
815 /* Non CCB Command */
816 if ((intstat & INTSTAT_MASK) != INTSTAT_ECB_OK) {
817 printf("ahb%ld: Command 0%x Failed %x:%x:%x\n",
818 ahb->unit, ecb->hecb.opcode,
819 *((u_int16_t*)&ecb->status),
820 ecb->status.ha_status, ecb->status.resid_count);
821 }
822 /* Client owns this ECB and will release it. */
823 }
824 }
825
826 /*
827 * Catch an interrupt from the adaptor
828 */
829 static void
830 ahbintr(void *arg)
831 {
832 struct ahb_softc *ahb;
833 u_int intstat;
834 u_int32_t mbox;
835
836 ahb = (struct ahb_softc *)arg;
837
838 while (ahb_inb(ahb, HOSTSTAT) & HOSTSTAT_INTPEND) {
839 /*
840 * Fetch information about this interrupt.
841 */
842 intstat = ahb_inb(ahb, INTSTAT);
843 mbox = ahb_inl(ahb, MBOXIN0);
844
845 /*
846 * Reset interrupt latch.
847 */
848 ahb_outb(ahb, CONTROL, CNTRL_CLRINT);
849
850 /*
851 * Process the completed operation
852 */
853 switch (intstat & INTSTAT_MASK) {
854 case INTSTAT_ECB_OK:
855 case INTSTAT_ECB_CMPWRETRY:
856 case INTSTAT_ECB_CMPWERR:
857 ahbdone(ahb, mbox, intstat);
858 break;
859 case INTSTAT_AEN_OCCURED:
860 if ((intstat & INTSTAT_TARGET_MASK) == ahb->scsi_id) {
861 /* Bus Reset */
862 xpt_print_path(ahb->path);
863 switch (mbox) {
864 case HS_SCSI_RESET_ADAPTER:
865 printf("Host Adapter Initiated "
866 "Bus Reset occurred\n");
867 break;
868 case HS_SCSI_RESET_INCOMING:
869 printf("Bus Reset Initiated "
870 "by another device occurred\n");
871 break;
872 }
873 /* Notify the XPT */
874 xpt_async(AC_BUS_RESET, ahb->path, NULL);
875 break;
876 }
877 printf("Unsupported initiator selection AEN occured\n");
878 break;
879 case INTSTAT_IMMED_OK:
880 case INTSTAT_IMMED_ERR:
881 ahbhandleimmed(ahb, mbox, intstat);
882 break;
883 case INTSTAT_HW_ERR:
884 panic("Unrecoverable hardware Error Occurred\n");
885 }
886 }
887 }
888
889 static void
890 ahbexecuteecb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
891 {
892 struct ecb *ecb;
893 union ccb *ccb;
894 struct ahb_softc *ahb;
895 u_int32_t ecb_paddr;
896 int s;
897
898 ecb = (struct ecb *)arg;
899 ccb = ecb->ccb;
900 ahb = (struct ahb_softc *)ccb->ccb_h.ccb_ahb_ptr;
901
902 if (error != 0) {
903 if (error != EFBIG)
904 printf("ahb%ld: Unexepected error 0x%x returned from "
905 "bus_dmamap_load\n", ahb->unit, error);
906 if (ccb->ccb_h.status == CAM_REQ_INPROG) {
907 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
908 ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN;
909 }
910 ahbecbfree(ahb, ecb);
911 xpt_done(ccb);
912 return;
913 }
914
915 ecb_paddr = ahbecbvtop(ahb, ecb);
916
917 if (nseg != 0) {
918 ahb_sg_t *sg;
919 bus_dma_segment_t *end_seg;
920 bus_dmasync_op_t op;
921
922 end_seg = dm_segs + nseg;
923
924 /* Copy the segments into our SG list */
925 sg = ecb->sg_list;
926 while (dm_segs < end_seg) {
927 sg->addr = dm_segs->ds_addr;
928 sg->len = dm_segs->ds_len;
929 sg++;
930 dm_segs++;
931 }
932
933 if (nseg > 1) {
934 ecb->hecb.flag_word1 |= FW1_SG_ECB;
935 ecb->hecb.data_ptr = ahbsgpaddr(ecb_paddr);
936 ecb->hecb.data_len = sizeof(ahb_sg_t) * nseg;
937 } else {
938 ecb->hecb.data_ptr = ecb->sg_list->addr;
939 ecb->hecb.data_len = ecb->sg_list->len;
940 }
941
942 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
943 /* ecb->hecb.flag_word2 |= FW2_DATA_DIR_IN; */
944 op = BUS_DMASYNC_PREREAD;
945 } else {
946 op = BUS_DMASYNC_PREWRITE;
947 }
948 /* ecb->hecb.flag_word2 |= FW2_CHECK_DATA_DIR; */
949
950 bus_dmamap_sync(ahb->buffer_dmat, ecb->dmamap, op);
951
952 } else {
953 ecb->hecb.data_ptr = 0;
954 ecb->hecb.data_len = 0;
955 }
956
957 s = splcam();
958
959 /*
960 * Last time we need to check if this CCB needs to
961 * be aborted.
962 */
963 if (ccb->ccb_h.status != CAM_REQ_INPROG) {
964 if (nseg != 0)
965 bus_dmamap_unload(ahb->buffer_dmat, ecb->dmamap);
966 ahbecbfree(ahb, ecb);
967 xpt_done(ccb);
968 splx(s);
969 return;
970 }
971
972 ecb->state = ECB_ACTIVE;
973 ccb->ccb_h.status |= CAM_SIM_QUEUED;
974 LIST_INSERT_HEAD(&ahb->pending_ccbs, &ccb->ccb_h, sim_links.le);
975
976 /* Tell the adapter about this command */
977 ahbqueuembox(ahb, ecb_paddr, ATTN_STARTECB|ccb->ccb_h.target_id);
978
979 ccb->ccb_h.timeout_ch = timeout(ahbtimeout, (caddr_t)ecb,
980 (ccb->ccb_h.timeout * hz) / 1000);
981 splx(s);
982 }
983
984 static void
985 ahbaction(struct cam_sim *sim, union ccb *ccb)
986 {
987 struct ahb_softc *ahb;
988
989 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ahbaction\n"));
990
991 ahb = (struct ahb_softc *)cam_sim_softc(sim);
992
993 switch (ccb->ccb_h.func_code) {
994 /* Common cases first */
995 case XPT_SCSI_IO: /* Execute the requested I/O operation */
996 {
997 struct ecb *ecb;
998 struct hardware_ecb *hecb;
999
1000 /*
1001 * get an ecb to use.
1002 */
1003 if ((ecb = ahbecbget(ahb)) == NULL) {
1004 /* Should never occur */
1005 panic("Failed to get an ecb");
1006 }
1007
1008 /*
1009 * So we can find the ECB when an abort is requested
1010 */
1011 ecb->ccb = ccb;
1012 ccb->ccb_h.ccb_ecb_ptr = ecb;
1013 ccb->ccb_h.ccb_ahb_ptr = ahb;
1014
1015 /*
1016 * Put all the arguments for the xfer in the ecb
1017 */
1018 hecb = &ecb->hecb;
1019 hecb->opcode = ECBOP_INITIATOR_SCSI_CMD;
1020 hecb->flag_word1 = FW1_AUTO_REQUEST_SENSE
1021 | FW1_ERR_STATUS_BLK_ONLY;
1022 hecb->flag_word2 = ccb->ccb_h.target_lun
1023 | FW2_NO_RETRY_ON_BUSY;
1024 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
1025 hecb->flag_word2 |= FW2_TAG_ENB
1026 | ((ccb->csio.tag_action & 0x3)
1027 << FW2_TAG_TYPE_SHIFT);
1028 }
1029 if ((ccb->ccb_h.flags & CAM_DIS_DISCONNECT) != 0)
1030 hecb->flag_word2 |= FW2_DISABLE_DISC;
1031 hecb->sense_len = ccb->csio.sense_len;
1032 hecb->cdb_len = ccb->csio.cdb_len;
1033 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
1034 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) == 0) {
1035 bcopy(ccb->csio.cdb_io.cdb_ptr,
1036 hecb->cdb, hecb->cdb_len);
1037 } else {
1038 /* I guess I could map it in... */
1039 ccb->ccb_h.status = CAM_REQ_INVALID;
1040 ahbecbfree(ahb, ecb);
1041 xpt_done(ccb);
1042 return;
1043 }
1044 } else {
1045 bcopy(ccb->csio.cdb_io.cdb_bytes,
1046 hecb->cdb, hecb->cdb_len);
1047 }
1048
1049 /*
1050 * If we have any data to send with this command,
1051 * map it into bus space.
1052 */
1053 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1054 if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
1055 /*
1056 * We've been given a pointer
1057 * to a single buffer.
1058 */
1059 if ((ccb->ccb_h.flags & CAM_DATA_PHYS)==0) {
1060 int s;
1061 int error;
1062
1063 s = splsoftvm();
1064 error = bus_dmamap_load(
1065 ahb->buffer_dmat,
1066 ecb->dmamap,
1067 ccb->csio.data_ptr,
1068 ccb->csio.dxfer_len,
1069 ahbexecuteecb,
1070 ecb, /*flags*/0);
1071 if (error == EINPROGRESS) {
1072 /*
1073 * So as to maintain ordering,
1074 * freeze the controller queue
1075 * until our mapping is
1076 * returned.
1077 */
1078 xpt_freeze_simq(ahb->sim, 1);
1079 ccb->ccb_h.status |=
1080 CAM_RELEASE_SIMQ;
1081 }
1082 splx(s);
1083 } else {
1084 struct bus_dma_segment seg;
1085
1086 /* Pointer to physical buffer */
1087 seg.ds_addr =
1088 (bus_addr_t)ccb->csio.data_ptr;
1089 seg.ds_len = ccb->csio.dxfer_len;
1090 ahbexecuteecb(ecb, &seg, 1, 0);
1091 }
1092 } else {
1093 struct bus_dma_segment *segs;
1094
1095 if ((ccb->ccb_h.flags & CAM_DATA_PHYS) != 0)
1096 panic("ahbaction - Physical segment "
1097 "pointers unsupported");
1098
1099 if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0)
1100 panic("btaction - Virtual segment "
1101 "addresses unsupported");
1102
1103 /* Just use the segments provided */
1104 segs = (struct bus_dma_segment *)
1105 ccb->csio.data_ptr;
1106 ahbexecuteecb(ecb, segs, ccb->csio.sglist_cnt,
1107 0);
1108 }
1109 } else {
1110 ahbexecuteecb(ecb, NULL, 0, 0);
1111 }
1112 break;
1113 }
1114 case XPT_EN_LUN: /* Enable LUN as a target */
1115 case XPT_TARGET_IO: /* Execute target I/O request */
1116 case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */
1117 case XPT_CONT_TARGET_IO: /* Continue Host Target I/O Connection*/
1118 case XPT_ABORT: /* Abort the specified CCB */
1119 /* XXX Implement */
1120 ccb->ccb_h.status = CAM_REQ_INVALID;
1121 xpt_done(ccb);
1122 break;
1123 case XPT_SET_TRAN_SETTINGS:
1124 {
1125 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1126 xpt_done(ccb);
1127 break;
1128 }
1129 case XPT_GET_TRAN_SETTINGS:
1130 /* Get default/user set transfer settings for the target */
1131 {
1132 struct ccb_trans_settings *cts;
1133 u_int target_mask;
1134
1135 cts = &ccb->cts;
1136 target_mask = 0x01 << ccb->ccb_h.target_id;
1137 if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) {
1138 cts->flags = 0;
1139 if ((ahb->disc_permitted & target_mask) != 0)
1140 cts->flags |= CCB_TRANS_DISC_ENB;
1141 if ((ahb->tags_permitted & target_mask) != 0)
1142 cts->flags |= CCB_TRANS_TAG_ENB;
1143 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
1144 cts->sync_period = 25; /* 10MHz */
1145
1146 if (cts->sync_period != 0)
1147 cts->sync_offset = 15;
1148
1149 cts->valid = CCB_TRANS_SYNC_RATE_VALID
1150 | CCB_TRANS_SYNC_OFFSET_VALID
1151 | CCB_TRANS_BUS_WIDTH_VALID
1152 | CCB_TRANS_DISC_VALID
1153 | CCB_TRANS_TQ_VALID;
1154 ccb->ccb_h.status = CAM_REQ_CMP;
1155 } else {
1156 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1157 }
1158 xpt_done(ccb);
1159 break;
1160 }
1161 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */
1162 {
1163 int i;
1164 int s;
1165
1166 s = splcam();
1167 ahb->immed_cmd = IMMED_RESET;
1168 ahbqueuembox(ahb, IMMED_RESET, ATTN_IMMED|ccb->ccb_h.target_id);
1169 /* Poll for interrupt completion */
1170 for (i = 1000; ahb->immed_cmd != 0 && i != 0; i--) {
1171 DELAY(1000);
1172 ahbintr(cam_sim_softc(sim));
1173 }
1174 splx(s);
1175 break;
1176 }
1177 case XPT_CALC_GEOMETRY:
1178 {
1179 cam_calc_geometry(&ccb->ccg, ahb->extended_trans);
1180 xpt_done(ccb);
1181 break;
1182 }
1183 case XPT_RESET_BUS: /* Reset the specified SCSI bus */
1184 {
1185 int i;
1186
1187 ahb->immed_cmd = IMMED_RESET;
1188 ahbqueuembox(ahb, IMMED_RESET, ATTN_IMMED|ahb->scsi_id);
1189 /* Poll for interrupt completion */
1190 for (i = 1000; ahb->immed_cmd != 0 && i != 0; i--)
1191 DELAY(1000);
1192 ccb->ccb_h.status = CAM_REQ_CMP;
1193 xpt_done(ccb);
1194 break;
1195 }
1196 case XPT_TERM_IO: /* Terminate the I/O process */
1197 /* XXX Implement */
1198 ccb->ccb_h.status = CAM_REQ_INVALID;
1199 xpt_done(ccb);
1200 break;
1201 case XPT_PATH_INQ: /* Path routing inquiry */
1202 {
1203 struct ccb_pathinq *cpi = &ccb->cpi;
1204
1205 cpi->version_num = 1; /* XXX??? */
1206 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE;
1207 cpi->target_sprt = 0;
1208 cpi->hba_misc = 0;
1209 cpi->hba_eng_cnt = 0;
1210 cpi->max_target = 7;
1211 cpi->max_lun = 7;
1212 cpi->initiator_id = ahb->scsi_id;
1213 cpi->bus_id = cam_sim_bus(sim);
1214 cpi->base_transfer_speed = 3300;
1215 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1216 strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN);
1217 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1218 cpi->unit_number = cam_sim_unit(sim);
1219 cpi->ccb_h.status = CAM_REQ_CMP;
1220 xpt_done(ccb);
1221 break;
1222 }
1223 #if 0
1224 /* Need these??? */
1225 case XPT_IMMED_NOTIFY: /* Notify Host Target driver of event */
1226 case XPT_NOTIFY_ACK: /* Acknowledgement of event */
1227 #endif
1228 default:
1229 ccb->ccb_h.status = CAM_REQ_INVALID;
1230 xpt_done(ccb);
1231 break;
1232 }
1233 }
1234
1235 static void
1236 ahbpoll(struct cam_sim *sim)
1237 {
1238 ahbintr(cam_sim_softc(sim));
1239 }
1240
1241 static void
1242 ahbtimeout(void *arg)
1243 {
1244 struct ecb *ecb;
1245 union ccb *ccb;
1246 struct ahb_softc *ahb;
1247 int s;
1248
1249 ecb = (struct ecb *)arg;
1250 ccb = ecb->ccb;
1251 ahb = (struct ahb_softc *)ccb->ccb_h.ccb_ahb_ptr;
1252 xpt_print_path(ccb->ccb_h.path);
1253 printf("ECB %p - timed out\n", (void *)ecb);
1254
1255 s = splcam();
1256
1257 if ((ecb->state & ECB_ACTIVE) == 0) {
1258 xpt_print_path(ccb->ccb_h.path);
1259 printf("ECB %p - timed out ECB already completed\n",
1260 (void *)ecb);
1261 splx(s);
1262 return;
1263 }
1264 /*
1265 * In order to simplify the recovery process, we ask the XPT
1266 * layer to halt the queue of new transactions and we traverse
1267 * the list of pending CCBs and remove their timeouts. This
1268 * means that the driver attempts to clear only one error
1269 * condition at a time. In general, timeouts that occur
1270 * close together are related anyway, so there is no benefit
1271 * in attempting to handle errors in parrallel. Timeouts will
1272 * be reinstated when the recovery process ends.
1273 */
1274 if ((ecb->state & ECB_DEVICE_RESET) == 0) {
1275 struct ccb_hdr *ccb_h;
1276
1277 if ((ecb->state & ECB_RELEASE_SIMQ) == 0) {
1278 xpt_freeze_simq(ahb->sim, /*count*/1);
1279 ecb->state |= ECB_RELEASE_SIMQ;
1280 }
1281
1282 ccb_h = LIST_FIRST(&ahb->pending_ccbs);
1283 while (ccb_h != NULL) {
1284 struct ecb *pending_ecb;
1285
1286 pending_ecb = (struct ecb *)ccb_h->ccb_ecb_ptr;
1287 untimeout(ahbtimeout, pending_ecb, ccb_h->timeout_ch);
1288 ccb_h = LIST_NEXT(ccb_h, sim_links.le);
1289 }
1290
1291 /* Store for our interrupt handler */
1292 ahb->immed_ecb = ecb;
1293
1294 /*
1295 * Send a Bus Device Reset message:
1296 * The target that is holding up the bus may not
1297 * be the same as the one that triggered this timeout
1298 * (different commands have different timeout lengths),
1299 * but we have no way of determining this from our
1300 * timeout handler. Our strategy here is to queue a
1301 * BDR message to the target of the timed out command.
1302 * If this fails, we'll get another timeout 2 seconds
1303 * later which will attempt a bus reset.
1304 */
1305 xpt_print_path(ccb->ccb_h.path);
1306 printf("Queuing BDR\n");
1307 ecb->state |= ECB_DEVICE_RESET;
1308 ccb->ccb_h.timeout_ch =
1309 timeout(ahbtimeout, (caddr_t)ecb, 2 * hz);
1310
1311 ahb->immed_cmd = IMMED_RESET;
1312 ahbqueuembox(ahb, IMMED_RESET, ATTN_IMMED|ccb->ccb_h.target_id);
1313 } else if ((ecb->state & ECB_SCSIBUS_RESET) != 0) {
1314 /*
1315 * Try a SCSI bus reset. We do this only if we
1316 * have already attempted to clear the condition with a BDR.
1317 */
1318 xpt_print_path(ccb->ccb_h.path);
1319 printf("Attempting SCSI Bus reset\n");
1320 ecb->state |= ECB_SCSIBUS_RESET;
1321 ccb->ccb_h.timeout_ch =
1322 timeout(ahbtimeout, (caddr_t)ecb, 2 * hz);
1323 ahb->immed_cmd = IMMED_RESET;
1324 ahbqueuembox(ahb, IMMED_RESET, ATTN_IMMED|ahb->scsi_id);
1325 } else {
1326 /* Bring out the hammer... */
1327 ahbreset(ahb);
1328
1329 /* Simulate the reset complete interrupt */
1330 ahbhandleimmed(ahb, 0, ahb->scsi_id|INTSTAT_IMMED_OK);
1331 }
1332
1333 splx(s);
1334 }
1335
1336 static device_method_t ahb_eisa_methods[] = {
1337 /* Device interface */
1338 DEVMETHOD(device_probe, ahbprobe),
1339 DEVMETHOD(device_attach, ahbattach),
1340
1341 { 0, 0 }
1342 };
1343
1344 static driver_t ahb_eisa_driver = {
1345 "ahb",
1346 ahb_eisa_methods,
1347 1, /* unused */
1348 };
1349
1350 static devclass_t ahb_devclass;
1351
1352 DRIVER_MODULE(ahb, eisa, ahb_eisa_driver, ahb_devclass, 0, 0);
Cache object: aa6c2f8ec8e43432d6bae030335b0d4a
|