FreeBSD/Linux Kernel Cross Reference
sys/dev/ahb/ahb.c
1 /*-
2 * CAM SCSI device driver for the Adaptec 174X SCSI Host adapter
3 *
4 * Copyright (c) 1998 Justin T. Gibbs
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice immediately at the beginning of the file, without modification,
12 * this list of conditions, and the following disclaimer.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * $FreeBSD: releng/8.0/sys/dev/ahb/ahb.c 170883 2007-06-17 15:21:09Z scottl $
29 */
30
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/malloc.h>
35 #include <sys/module.h>
36 #include <sys/lock.h>
37 #include <sys/mutex.h>
38 #include <sys/bus.h>
39
40 #include <machine/bus.h>
41 #include <machine/resource.h>
42 #include <sys/rman.h>
43
44 #include <cam/cam.h>
45 #include <cam/cam_ccb.h>
46 #include <cam/cam_sim.h>
47 #include <cam/cam_xpt_sim.h>
48 #include <cam/cam_debug.h>
49
50 #include <cam/scsi/scsi_message.h>
51
52 #include <dev/eisa/eisaconf.h>
53
54 #include <dev/ahb/ahbreg.h>
55
56 #define ccb_ecb_ptr spriv_ptr0
57 #define ccb_ahb_ptr spriv_ptr1
58
59 #define ahb_inb(ahb, port) \
60 bus_space_read_1((ahb)->tag, (ahb)->bsh, port)
61
62 #define ahb_inl(ahb, port) \
63 bus_space_read_4((ahb)->tag, (ahb)->bsh, port)
64
65 #define ahb_outb(ahb, port, value) \
66 bus_space_write_1((ahb)->tag, (ahb)->bsh, port, value)
67
68 #define ahb_outl(ahb, port, value) \
69 bus_space_write_4((ahb)->tag, (ahb)->bsh, port, value)
70
71 static const char *ahbmatch(eisa_id_t type);
72 static struct ahb_softc *ahballoc(device_t dev, struct resource *res);
73 static void ahbfree(struct ahb_softc *ahb);
74 static int ahbreset(struct ahb_softc *ahb);
75 static void ahbmapecbs(void *arg, bus_dma_segment_t *segs,
76 int nseg, int error);
77 static int ahbxptattach(struct ahb_softc *ahb);
78 static void ahbhandleimmed(struct ahb_softc *ahb,
79 u_int32_t mbox, u_int intstat);
80 static void ahbcalcresid(struct ahb_softc *ahb,
81 struct ecb *ecb, union ccb *ccb);
82 static __inline void ahbdone(struct ahb_softc *ahb, u_int32_t mbox,
83 u_int intstat);
84 static void ahbintr(void *arg);
85 static bus_dmamap_callback_t ahbexecuteecb;
86 static void ahbaction(struct cam_sim *sim, union ccb *ccb);
87 static void ahbpoll(struct cam_sim *sim);
88
89 /* Our timeout handler */
90 static timeout_t ahbtimeout;
91
92 static __inline struct ecb* ahbecbget(struct ahb_softc *ahb);
93 static __inline void ahbecbfree(struct ahb_softc* ahb,
94 struct ecb* ecb);
95 static __inline u_int32_t ahbecbvtop(struct ahb_softc *ahb,
96 struct ecb *ecb);
97 static __inline struct ecb* ahbecbptov(struct ahb_softc *ahb,
98 u_int32_t ecb_addr);
99 static __inline u_int32_t ahbstatuspaddr(u_int32_t ecb_paddr);
100 static __inline u_int32_t ahbsensepaddr(u_int32_t ecb_paddr);
101 static __inline u_int32_t ahbsgpaddr(u_int32_t ecb_paddr);
102 static __inline void ahbqueuembox(struct ahb_softc *ahb,
103 u_int32_t mboxval,
104 u_int attn_code);
105
106 static __inline struct ecb*
107 ahbecbget(struct ahb_softc *ahb)
108 {
109 struct ecb* ecb;
110 int s;
111
112 s = splcam();
113 if ((ecb = SLIST_FIRST(&ahb->free_ecbs)) != NULL)
114 SLIST_REMOVE_HEAD(&ahb->free_ecbs, links);
115 splx(s);
116
117 return (ecb);
118 }
119
120 static __inline void
121 ahbecbfree(struct ahb_softc* ahb, struct ecb* ecb)
122 {
123 int s;
124
125 s = splcam();
126 ecb->state = ECB_FREE;
127 SLIST_INSERT_HEAD(&ahb->free_ecbs, ecb, links);
128 splx(s);
129 }
130
131 static __inline u_int32_t
132 ahbecbvtop(struct ahb_softc *ahb, struct ecb *ecb)
133 {
134 return (ahb->ecb_physbase
135 + (u_int32_t)((caddr_t)ecb - (caddr_t)ahb->ecb_array));
136 }
137
138 static __inline struct ecb*
139 ahbecbptov(struct ahb_softc *ahb, u_int32_t ecb_addr)
140 {
141 return (ahb->ecb_array
142 + ((struct ecb*)(uintptr_t)ecb_addr
143 - (struct ecb*)(uintptr_t)ahb->ecb_physbase));
144 }
145
146 static __inline u_int32_t
147 ahbstatuspaddr(u_int32_t ecb_paddr)
148 {
149 return (ecb_paddr + offsetof(struct ecb, status));
150 }
151
152 static __inline u_int32_t
153 ahbsensepaddr(u_int32_t ecb_paddr)
154 {
155 return (ecb_paddr + offsetof(struct ecb, sense));
156 }
157
158 static __inline u_int32_t
159 ahbsgpaddr(u_int32_t ecb_paddr)
160 {
161 return (ecb_paddr + offsetof(struct ecb, sg_list));
162 }
163
164 static __inline void
165 ahbqueuembox(struct ahb_softc *ahb, u_int32_t mboxval, u_int attn_code)
166 {
167 u_int loopmax = 300;
168 while (--loopmax) {
169 u_int status;
170
171 status = ahb_inb(ahb, HOSTSTAT);
172 if ((status & (HOSTSTAT_MBOX_EMPTY|HOSTSTAT_BUSY))
173 == HOSTSTAT_MBOX_EMPTY)
174 break;
175 DELAY(20);
176 }
177 if (loopmax == 0)
178 panic("ahb%ld: adapter not taking commands\n", ahb->unit);
179
180 ahb_outl(ahb, MBOXOUT0, mboxval);
181 ahb_outb(ahb, ATTN, attn_code);
182 }
183
184 static const char *
185 ahbmatch(eisa_id_t type)
186 {
187 switch(type & 0xfffffe00) {
188 case EISA_DEVICE_ID_ADAPTEC_1740:
189 return ("Adaptec 174x SCSI host adapter");
190 break;
191 default:
192 break;
193 }
194 return (NULL);
195 }
196
197 static int
198 ahbprobe(device_t dev)
199 {
200 const char *desc;
201 u_int32_t iobase;
202 u_int32_t irq;
203 u_int8_t intdef;
204 int shared;
205
206 desc = ahbmatch(eisa_get_id(dev));
207 if (!desc)
208 return (ENXIO);
209 device_set_desc(dev, desc);
210
211 iobase = (eisa_get_slot(dev) * EISA_SLOT_SIZE) +
212 AHB_EISA_SLOT_OFFSET;
213
214 eisa_add_iospace(dev, iobase, AHB_EISA_IOSIZE, RESVADDR_NONE);
215
216 intdef = inb(INTDEF + iobase);
217 switch (intdef & 0x7) {
218 case INT9:
219 irq = 9;
220 break;
221 case INT10:
222 irq = 10;
223 break;
224 case INT11:
225 irq = 11;
226 break;
227 case INT12:
228 irq = 12;
229 break;
230 case INT14:
231 irq = 14;
232 break;
233 case INT15:
234 irq = 15;
235 break;
236 default:
237 printf("Adaptec 174X at slot %d: illegal "
238 "irq setting %d\n", eisa_get_slot(dev),
239 (intdef & 0x7));
240 irq = 0;
241 break;
242 }
243 if (irq == 0)
244 return ENXIO;
245
246 shared = (inb(INTDEF + iobase) & INTLEVEL) ?
247 EISA_TRIGGER_LEVEL : EISA_TRIGGER_EDGE;
248
249 eisa_add_intr(dev, irq, shared);
250
251 return 0;
252 }
253
254 static int
255 ahbattach(device_t dev)
256 {
257 /*
258 * find unit and check we have that many defined
259 */
260 struct ahb_softc *ahb;
261 struct ecb* next_ecb;
262 struct resource *io = 0;
263 struct resource *irq = 0;
264 int rid;
265 void *ih;
266
267 rid = 0;
268 io = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE);
269 if (!io) {
270 device_printf(dev, "No I/O space?!\n");
271 return ENOMEM;
272 }
273
274 if ((ahb = ahballoc(dev, io)) == NULL) {
275 goto error_exit2;
276 }
277
278 if (ahbreset(ahb) != 0)
279 goto error_exit;
280
281 rid = 0;
282 irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE);
283 if (!irq) {
284 device_printf(dev, "Can't allocate interrupt\n");
285 goto error_exit;
286 }
287
288 /*
289 * Create our DMA tags. These tags define the kinds of device
290 * accessible memory allocations and memory mappings we will
291 * need to perform during normal operation.
292 */
293 /* DMA tag for mapping buffers into device visible space. */
294 /* XXX Should be a child of the EISA bus dma tag */
295 if (bus_dma_tag_create( /* parent */ NULL,
296 /* alignment */ 1,
297 /* boundary */ 0,
298 /* lowaddr */ BUS_SPACE_MAXADDR_32BIT,
299 /* highaddr */ BUS_SPACE_MAXADDR,
300 /* filter */ NULL,
301 /* filterarg */ NULL,
302 /* maxsize */ MAXBSIZE,
303 /* nsegments */ AHB_NSEG,
304 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT,
305 /* flags */ BUS_DMA_ALLOCNOW,
306 /* lockfunc */ busdma_lock_mutex,
307 /* lockarg */ &Giant,
308 &ahb->buffer_dmat) != 0)
309 goto error_exit;
310
311 ahb->init_level++;
312
313 /* DMA tag for our ccb structures and ha inquiry data */
314 if (bus_dma_tag_create( /* parent */ NULL,
315 /* alignment */ 1,
316 /* boundary */ 0,
317 /* lowaddr */ BUS_SPACE_MAXADDR_32BIT,
318 /* highaddr */ BUS_SPACE_MAXADDR,
319 /* filter */ NULL,
320 /* filterarg */ NULL,
321 /* maxsize */ (AHB_NECB *
322 sizeof(struct ecb))
323 + sizeof(*ahb->ha_inq_data),
324 /* nsegments */ 1,
325 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT,
326 /* flags */ 0,
327 /* lockfunc */ busdma_lock_mutex,
328 /* lockarg */ &Giant,
329 &ahb->ecb_dmat) != 0)
330 goto error_exit;
331
332 ahb->init_level++;
333
334 /* Allocation for our ccbs */
335 if (bus_dmamem_alloc(ahb->ecb_dmat, (void **)&ahb->ecb_array,
336 BUS_DMA_NOWAIT, &ahb->ecb_dmamap) != 0)
337 goto error_exit;
338
339 ahb->ha_inq_data = (struct ha_inquiry_data *)&ahb->ecb_array[AHB_NECB];
340
341 ahb->init_level++;
342
343 /* And permanently map them */
344 bus_dmamap_load(ahb->ecb_dmat, ahb->ecb_dmamap,
345 ahb->ecb_array, AHB_NSEG * sizeof(struct ecb),
346 ahbmapecbs, ahb, /*flags*/0);
347
348 ahb->init_level++;
349
350 /* Allocate the buffer dmamaps for each of our ECBs */
351 bzero(ahb->ecb_array, (AHB_NECB * sizeof(struct ecb))
352 + sizeof(*ahb->ha_inq_data));
353 next_ecb = ahb->ecb_array;
354 while (ahb->num_ecbs < AHB_NECB) {
355 u_int32_t ecb_paddr;
356
357 if (bus_dmamap_create(ahb->buffer_dmat, /*flags*/0,
358 &next_ecb->dmamap))
359 break;
360 ecb_paddr = ahbecbvtop(ahb, next_ecb);
361 next_ecb->hecb.status_ptr = ahbstatuspaddr(ecb_paddr);
362 next_ecb->hecb.sense_ptr = ahbsensepaddr(ecb_paddr);
363 ahb->num_ecbs++;
364 ahbecbfree(ahb, next_ecb);
365 next_ecb++;
366 }
367
368 if (ahb->num_ecbs == 0)
369 goto error_exit;
370
371 ahb->init_level++;
372
373 /*
374 * Now that we know we own the resources we need, register
375 * our bus with the XPT.
376 */
377 if (ahbxptattach(ahb))
378 goto error_exit;
379
380 /* Enable our interrupt */
381 if (bus_setup_intr(dev, irq, INTR_TYPE_CAM|INTR_ENTROPY, NULL, ahbintr,
382 ahb, &ih) != 0)
383 goto error_exit;
384
385 return (0);
386
387 error_exit:
388 /*
389 * The board's IRQ line will not be left enabled
390 * if we can't intialize correctly, so its safe
391 * to release the irq.
392 */
393 ahbfree(ahb);
394 error_exit2:
395 if (io)
396 bus_release_resource(dev, SYS_RES_IOPORT, 0, io);
397 if (irq)
398 bus_release_resource(dev, SYS_RES_IRQ, 0, irq);
399 return (-1);
400 }
401
402 static struct ahb_softc *
403 ahballoc(device_t dev, struct resource *res)
404 {
405 struct ahb_softc *ahb;
406
407 /*
408 * Allocate a storage area for us
409 */
410 ahb = malloc(sizeof(struct ahb_softc), M_DEVBUF, M_NOWAIT | M_ZERO);
411 if (!ahb) {
412 device_printf(dev, "cannot malloc!\n");
413 return (NULL);
414 }
415 SLIST_INIT(&ahb->free_ecbs);
416 LIST_INIT(&ahb->pending_ccbs);
417 ahb->unit = device_get_unit(dev);
418 ahb->tag = rman_get_bustag(res);
419 ahb->bsh = rman_get_bushandle(res);
420 ahb->disc_permitted = ~0;
421 ahb->tags_permitted = ~0;
422 ahb->dev = dev;
423
424 return (ahb);
425 }
426
427 static void
428 ahbfree(struct ahb_softc *ahb)
429 {
430 switch (ahb->init_level) {
431 default:
432 case 4:
433 bus_dmamap_unload(ahb->ecb_dmat, ahb->ecb_dmamap);
434 case 3:
435 bus_dmamem_free(ahb->ecb_dmat, ahb->ecb_array,
436 ahb->ecb_dmamap);
437 bus_dmamap_destroy(ahb->ecb_dmat, ahb->ecb_dmamap);
438 case 2:
439 bus_dma_tag_destroy(ahb->ecb_dmat);
440 case 1:
441 bus_dma_tag_destroy(ahb->buffer_dmat);
442 case 0:
443 break;
444 }
445 free(ahb, M_DEVBUF);
446 }
447
448 /*
449 * reset board, If it doesn't respond, return failure
450 */
451 static int
452 ahbreset(struct ahb_softc *ahb)
453 {
454 int wait = 1000; /* 1 sec enough? */
455 int test;
456
457 if ((ahb_inb(ahb, PORTADDR) & PORTADDR_ENHANCED) == 0) {
458 printf("ahb_reset: Controller not in enhanced mode\n");
459 return (-1);
460 }
461
462 ahb_outb(ahb, CONTROL, CNTRL_HARD_RST);
463 DELAY(1000);
464 ahb_outb(ahb, CONTROL, 0);
465 while (--wait) {
466 DELAY(1000);
467 if ((ahb_inb(ahb, HOSTSTAT) & HOSTSTAT_BUSY) == 0)
468 break;
469 }
470
471 if (wait == 0) {
472 printf("ahbreset: No answer from aha1742 board\n");
473 return (-1);
474 }
475 if ((test = ahb_inb(ahb, MBOXIN0)) != 0) {
476 printf("ahb_reset: self test failed, val = 0x%x\n", test);
477 return (-1);
478 }
479 while (ahb_inb(ahb, HOSTSTAT) & HOSTSTAT_INTPEND) {
480 ahb_outb(ahb, CONTROL, CNTRL_CLRINT);
481 DELAY(10000);
482 }
483 return (0);
484 }
485
486 static void
487 ahbmapecbs(void *arg, bus_dma_segment_t *segs, int nseg, int error)
488 {
489 struct ahb_softc* ahb;
490
491 ahb = (struct ahb_softc*)arg;
492 ahb->ecb_physbase = segs->ds_addr;
493 /*
494 * Space for adapter inquiry information is on the
495 * tail of the ecb array.
496 */
497 ahb->ha_inq_physbase = ahbecbvtop(ahb, &ahb->ecb_array[AHB_NECB]);
498 }
499
500 static int
501 ahbxptattach(struct ahb_softc *ahb)
502 {
503 struct cam_devq *devq;
504 struct ecb *ecb;
505 u_int i;
506
507 /* Remeber who are we on the scsi bus */
508 ahb->scsi_id = ahb_inb(ahb, SCSIDEF) & HSCSIID;
509
510 /* Use extended translation?? */
511 ahb->extended_trans = ahb_inb(ahb, RESV1) & EXTENDED_TRANS;
512
513 /* Fetch adapter inquiry data */
514 ecb = ahbecbget(ahb); /* Always succeeds - no outstanding commands */
515 ecb->hecb.opcode = ECBOP_READ_HA_INQDATA;
516 ecb->hecb.flag_word1 = FW1_SUPPRESS_URUN_ERR|FW1_ERR_STATUS_BLK_ONLY;
517 ecb->hecb.data_ptr = ahb->ha_inq_physbase;
518 ecb->hecb.data_len = sizeof(struct ha_inquiry_data);
519 ecb->hecb.sense_ptr = 0;
520 ecb->state = ECB_ACTIVE;
521
522 /* Tell the adapter about this command */
523 ahbqueuembox(ahb, ahbecbvtop(ahb, ecb),
524 ATTN_STARTECB|ahb->scsi_id);
525
526 /* Poll for interrupt completion */
527 for (i = 1000; ecb->state != ECB_FREE && i != 0; i--) {
528 ahbintr(ahb);
529 DELAY(1000);
530 }
531
532 ahb->num_ecbs = MIN(ahb->num_ecbs,
533 ahb->ha_inq_data->scsi_data.spc2_flags);
534 printf("ahb%ld: %.8s %s SCSI Adapter, FW Rev. %.4s, ID=%d, %d ECBs\n",
535 ahb->unit, ahb->ha_inq_data->scsi_data.product,
536 (ahb->ha_inq_data->scsi_data.flags & 0x4) ? "Differential"
537 : "Single Ended",
538 ahb->ha_inq_data->scsi_data.revision,
539 ahb->scsi_id, ahb->num_ecbs);
540
541 /* Restore sense paddr for future CCB clients */
542 ecb->hecb.sense_ptr = ahbsensepaddr(ahbecbvtop(ahb, ecb));
543
544 ahbecbfree(ahb, ecb);
545
546 /*
547 * Create the device queue for our SIM.
548 */
549 devq = cam_simq_alloc(ahb->num_ecbs);
550 if (devq == NULL)
551 return (ENOMEM);
552
553 /*
554 * Construct our SIM entry
555 */
556 ahb->sim = cam_sim_alloc(ahbaction, ahbpoll, "ahb", ahb, ahb->unit,
557 &Giant, 2, ahb->num_ecbs, devq);
558 if (ahb->sim == NULL) {
559 cam_simq_free(devq);
560 return (ENOMEM);
561 }
562
563 if (xpt_bus_register(ahb->sim, ahb->dev, 0) != CAM_SUCCESS) {
564 cam_sim_free(ahb->sim, /*free_devq*/TRUE);
565 return (ENXIO);
566 }
567
568 if (xpt_create_path(&ahb->path, /*periph*/NULL,
569 cam_sim_path(ahb->sim), CAM_TARGET_WILDCARD,
570 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
571 xpt_bus_deregister(cam_sim_path(ahb->sim));
572 cam_sim_free(ahb->sim, /*free_devq*/TRUE);
573 return (ENXIO);
574 }
575
576 /*
577 * Allow the board to generate interrupts.
578 */
579 ahb_outb(ahb, INTDEF, ahb_inb(ahb, INTDEF) | INTEN);
580
581 return (0);
582 }
583
584 static void
585 ahbhandleimmed(struct ahb_softc *ahb, u_int32_t mbox, u_int intstat)
586 {
587 struct ccb_hdr *ccb_h;
588 u_int target_id;
589
590 if (ahb->immed_cmd == 0) {
591 printf("ahb%ld: Immediate Command complete with no "
592 " pending command\n", ahb->unit);
593 return;
594 }
595
596 target_id = intstat & INTSTAT_TARGET_MASK;
597
598 ccb_h = LIST_FIRST(&ahb->pending_ccbs);
599 while (ccb_h != NULL) {
600 struct ecb *pending_ecb;
601 union ccb *ccb;
602
603 pending_ecb = (struct ecb *)ccb_h->ccb_ecb_ptr;
604 ccb = pending_ecb->ccb;
605 ccb_h = LIST_NEXT(ccb_h, sim_links.le);
606 if (ccb->ccb_h.target_id == target_id
607 || target_id == ahb->scsi_id) {
608 untimeout(ahbtimeout, pending_ecb,
609 ccb->ccb_h.timeout_ch);
610 LIST_REMOVE(&ccb->ccb_h, sim_links.le);
611 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE)
612 bus_dmamap_unload(ahb->buffer_dmat,
613 pending_ecb->dmamap);
614 if (pending_ecb == ahb->immed_ecb)
615 ccb->ccb_h.status =
616 CAM_CMD_TIMEOUT|CAM_RELEASE_SIMQ;
617 else if (target_id == ahb->scsi_id)
618 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
619 else
620 ccb->ccb_h.status = CAM_BDR_SENT;
621 ahbecbfree(ahb, pending_ecb);
622 xpt_done(ccb);
623 } else if (ahb->immed_ecb != NULL) {
624 /* Re-instate timeout */
625 ccb->ccb_h.timeout_ch =
626 timeout(ahbtimeout, (caddr_t)pending_ecb,
627 (ccb->ccb_h.timeout * hz) / 1000);
628 }
629 }
630
631 if (ahb->immed_ecb != NULL) {
632 ahb->immed_ecb = NULL;
633 printf("ahb%ld: No longer in timeout\n", ahb->unit);
634 } else if (target_id == ahb->scsi_id)
635 printf("ahb%ld: SCSI Bus Reset Delivered\n", ahb->unit);
636 else
637 printf("ahb%ld: Bus Device Reset Delibered to target %d\n",
638 ahb->unit, target_id);
639
640 ahb->immed_cmd = 0;
641 }
642
643 static void
644 ahbcalcresid(struct ahb_softc *ahb, struct ecb *ecb, union ccb *ccb)
645 {
646 if (ecb->status.data_overrun != 0) {
647 /*
648 * Overrun Condition. The hardware doesn't
649 * provide a meaningful byte count in this case
650 * (the residual is always 0). Tell the XPT
651 * layer about the error.
652 */
653 ccb->ccb_h.status = CAM_DATA_RUN_ERR;
654 } else {
655 ccb->csio.resid = ecb->status.resid_count;
656
657 if ((ecb->hecb.flag_word1 & FW1_SG_ECB) != 0) {
658 /*
659 * For S/G transfers, the adapter provides a pointer
660 * to the address in the last S/G element used and a
661 * residual for that element. So, we need to sum up
662 * the elements that follow it in order to get a real
663 * residual number. If we have an overrun, the residual
664 * reported will be 0 and we already know that all S/G
665 * segments have been exhausted, so we can skip this
666 * step.
667 */
668 ahb_sg_t *sg;
669 int num_sg;
670
671 num_sg = ecb->hecb.data_len / sizeof(ahb_sg_t);
672
673 /* Find the S/G the adapter was working on */
674 for (sg = ecb->sg_list;
675 num_sg != 0 && sg->addr != ecb->status.resid_addr;
676 num_sg--, sg++)
677 ;
678
679 /* Skip it */
680 num_sg--;
681 sg++;
682
683 /* Sum the rest */
684 for (; num_sg != 0; num_sg--, sg++)
685 ccb->csio.resid += sg->len;
686 }
687 /* Underruns are not errors */
688 ccb->ccb_h.status = CAM_REQ_CMP;
689 }
690 }
691
692 static void
693 ahbprocesserror(struct ahb_softc *ahb, struct ecb *ecb, union ccb *ccb)
694 {
695 struct hardware_ecb *hecb;
696 struct ecb_status *status;
697
698 hecb = &ecb->hecb;
699 status = &ecb->status;
700 switch (status->ha_status) {
701 case HS_OK:
702 ccb->csio.scsi_status = status->scsi_status;
703 if (status->scsi_status != 0) {
704 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
705 if (status->sense_stored) {
706 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
707 ccb->csio.sense_resid =
708 ccb->csio.sense_len - status->sense_len;
709 bcopy(&ecb->sense, &ccb->csio.sense_data,
710 status->sense_len);
711 }
712 }
713 break;
714 case HS_TARGET_NOT_ASSIGNED:
715 ccb->ccb_h.status = CAM_PATH_INVALID;
716 break;
717 case HS_SEL_TIMEOUT:
718 ccb->ccb_h.status = CAM_SEL_TIMEOUT;
719 break;
720 case HS_DATA_RUN_ERR:
721 ahbcalcresid(ahb, ecb, ccb);
722 break;
723 case HS_UNEXPECTED_BUSFREE:
724 ccb->ccb_h.status = CAM_UNEXP_BUSFREE;
725 break;
726 case HS_INVALID_PHASE:
727 ccb->ccb_h.status = CAM_SEQUENCE_FAIL;
728 break;
729 case HS_REQUEST_SENSE_FAILED:
730 ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
731 break;
732 case HS_TAG_MSG_REJECTED:
733 {
734 struct ccb_trans_settings neg;
735 struct ccb_trans_settings_scsi *scsi = &neg.proto_specific.scsi;
736
737 xpt_print_path(ccb->ccb_h.path);
738 printf("refuses tagged commands. Performing "
739 "non-tagged I/O\n");
740 memset(&neg, 0, sizeof (neg));
741 neg.protocol = PROTO_SCSI;
742 neg.protocol_version = SCSI_REV_2;
743 neg.transport = XPORT_SPI;
744 neg.transport_version = 2;
745 scsi->flags = CTS_SCSI_VALID_TQ;
746 xpt_setup_ccb(&neg.ccb_h, ccb->ccb_h.path, /*priority*/1);
747 xpt_async(AC_TRANSFER_NEG, ccb->ccb_h.path, &neg);
748 ahb->tags_permitted &= ~(0x01 << ccb->ccb_h.target_id);
749 ccb->ccb_h.status = CAM_MSG_REJECT_REC;
750 break;
751 }
752 case HS_FIRMWARE_LOAD_REQ:
753 case HS_HARDWARE_ERR:
754 /*
755 * Tell the system that the Adapter
756 * is no longer functional.
757 */
758 ccb->ccb_h.status = CAM_NO_HBA;
759 break;
760 case HS_CMD_ABORTED_HOST:
761 case HS_CMD_ABORTED_ADAPTER:
762 case HS_ATN_TARGET_FAILED:
763 case HS_SCSI_RESET_ADAPTER:
764 case HS_SCSI_RESET_INCOMING:
765 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
766 break;
767 case HS_INVALID_ECB_PARAM:
768 printf("ahb%ld: opcode 0x%02x, flag_word1 0x%02x, flag_word2 0x%02x\n",
769 ahb->unit, hecb->opcode, hecb->flag_word1, hecb->flag_word2);
770 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
771 break;
772 case HS_DUP_TCB_RECEIVED:
773 case HS_INVALID_OPCODE:
774 case HS_INVALID_CMD_LINK:
775 case HS_PROGRAM_CKSUM_ERROR:
776 panic("ahb%ld: Can't happen host status %x occurred",
777 ahb->unit, status->ha_status);
778 break;
779 }
780 if (ccb->ccb_h.status != CAM_REQ_CMP) {
781 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
782 ccb->ccb_h.status |= CAM_DEV_QFRZN;
783 }
784 }
785
786 static void
787 ahbdone(struct ahb_softc *ahb, u_int32_t mbox, u_int intstat)
788 {
789 struct ecb *ecb;
790 union ccb *ccb;
791
792 ecb = ahbecbptov(ahb, mbox);
793
794 if ((ecb->state & ECB_ACTIVE) == 0)
795 panic("ecb not active");
796
797 ccb = ecb->ccb;
798
799 if (ccb != NULL) {
800 untimeout(ahbtimeout, ecb, ccb->ccb_h.timeout_ch);
801 LIST_REMOVE(&ccb->ccb_h, sim_links.le);
802
803 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
804 bus_dmasync_op_t op;
805
806 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
807 op = BUS_DMASYNC_POSTREAD;
808 else
809 op = BUS_DMASYNC_POSTWRITE;
810 bus_dmamap_sync(ahb->buffer_dmat, ecb->dmamap, op);
811 bus_dmamap_unload(ahb->buffer_dmat, ecb->dmamap);
812 }
813
814 if ((intstat & INTSTAT_MASK) == INTSTAT_ECB_OK) {
815 ccb->ccb_h.status = CAM_REQ_CMP;
816 ccb->csio.resid = 0;
817 } else {
818 ahbprocesserror(ahb, ecb, ccb);
819 }
820 ahbecbfree(ahb, ecb);
821 xpt_done(ccb);
822 } else {
823 /* Non CCB Command */
824 if ((intstat & INTSTAT_MASK) != INTSTAT_ECB_OK) {
825 printf("ahb%ld: Command 0%x Failed %x:%x:%x\n",
826 ahb->unit, ecb->hecb.opcode,
827 *((u_int16_t*)&ecb->status),
828 ecb->status.ha_status, ecb->status.resid_count);
829 }
830 /* Client owns this ECB and will release it. */
831 }
832 }
833
834 /*
835 * Catch an interrupt from the adaptor
836 */
837 static void
838 ahbintr(void *arg)
839 {
840 struct ahb_softc *ahb;
841 u_int intstat;
842 u_int32_t mbox;
843
844 ahb = (struct ahb_softc *)arg;
845
846 while (ahb_inb(ahb, HOSTSTAT) & HOSTSTAT_INTPEND) {
847 /*
848 * Fetch information about this interrupt.
849 */
850 intstat = ahb_inb(ahb, INTSTAT);
851 mbox = ahb_inl(ahb, MBOXIN0);
852
853 /*
854 * Reset interrupt latch.
855 */
856 ahb_outb(ahb, CONTROL, CNTRL_CLRINT);
857
858 /*
859 * Process the completed operation
860 */
861 switch (intstat & INTSTAT_MASK) {
862 case INTSTAT_ECB_OK:
863 case INTSTAT_ECB_CMPWRETRY:
864 case INTSTAT_ECB_CMPWERR:
865 ahbdone(ahb, mbox, intstat);
866 break;
867 case INTSTAT_AEN_OCCURED:
868 if ((intstat & INTSTAT_TARGET_MASK) == ahb->scsi_id) {
869 /* Bus Reset */
870 xpt_print_path(ahb->path);
871 switch (mbox) {
872 case HS_SCSI_RESET_ADAPTER:
873 printf("Host Adapter Initiated "
874 "Bus Reset occurred\n");
875 break;
876 case HS_SCSI_RESET_INCOMING:
877 printf("Bus Reset Initiated "
878 "by another device occurred\n");
879 break;
880 }
881 /* Notify the XPT */
882 xpt_async(AC_BUS_RESET, ahb->path, NULL);
883 break;
884 }
885 printf("Unsupported initiator selection AEN occured\n");
886 break;
887 case INTSTAT_IMMED_OK:
888 case INTSTAT_IMMED_ERR:
889 ahbhandleimmed(ahb, mbox, intstat);
890 break;
891 case INTSTAT_HW_ERR:
892 panic("Unrecoverable hardware Error Occurred\n");
893 }
894 }
895 }
896
897 static void
898 ahbexecuteecb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
899 {
900 struct ecb *ecb;
901 union ccb *ccb;
902 struct ahb_softc *ahb;
903 u_int32_t ecb_paddr;
904 int s;
905
906 ecb = (struct ecb *)arg;
907 ccb = ecb->ccb;
908 ahb = (struct ahb_softc *)ccb->ccb_h.ccb_ahb_ptr;
909
910 if (error != 0) {
911 if (error != EFBIG)
912 printf("ahb%ld: Unexepected error 0x%x returned from "
913 "bus_dmamap_load\n", ahb->unit, error);
914 if (ccb->ccb_h.status == CAM_REQ_INPROG) {
915 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
916 ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN;
917 }
918 ahbecbfree(ahb, ecb);
919 xpt_done(ccb);
920 return;
921 }
922
923 ecb_paddr = ahbecbvtop(ahb, ecb);
924
925 if (nseg != 0) {
926 ahb_sg_t *sg;
927 bus_dma_segment_t *end_seg;
928 bus_dmasync_op_t op;
929
930 end_seg = dm_segs + nseg;
931
932 /* Copy the segments into our SG list */
933 sg = ecb->sg_list;
934 while (dm_segs < end_seg) {
935 sg->addr = dm_segs->ds_addr;
936 sg->len = dm_segs->ds_len;
937 sg++;
938 dm_segs++;
939 }
940
941 if (nseg > 1) {
942 ecb->hecb.flag_word1 |= FW1_SG_ECB;
943 ecb->hecb.data_ptr = ahbsgpaddr(ecb_paddr);
944 ecb->hecb.data_len = sizeof(ahb_sg_t) * nseg;
945 } else {
946 ecb->hecb.data_ptr = ecb->sg_list->addr;
947 ecb->hecb.data_len = ecb->sg_list->len;
948 }
949
950 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
951 /* ecb->hecb.flag_word2 |= FW2_DATA_DIR_IN; */
952 op = BUS_DMASYNC_PREREAD;
953 } else {
954 op = BUS_DMASYNC_PREWRITE;
955 }
956 /* ecb->hecb.flag_word2 |= FW2_CHECK_DATA_DIR; */
957
958 bus_dmamap_sync(ahb->buffer_dmat, ecb->dmamap, op);
959
960 } else {
961 ecb->hecb.data_ptr = 0;
962 ecb->hecb.data_len = 0;
963 }
964
965 s = splcam();
966
967 /*
968 * Last time we need to check if this CCB needs to
969 * be aborted.
970 */
971 if (ccb->ccb_h.status != CAM_REQ_INPROG) {
972 if (nseg != 0)
973 bus_dmamap_unload(ahb->buffer_dmat, ecb->dmamap);
974 ahbecbfree(ahb, ecb);
975 xpt_done(ccb);
976 splx(s);
977 return;
978 }
979
980 ecb->state = ECB_ACTIVE;
981 ccb->ccb_h.status |= CAM_SIM_QUEUED;
982 LIST_INSERT_HEAD(&ahb->pending_ccbs, &ccb->ccb_h, sim_links.le);
983
984 /* Tell the adapter about this command */
985 ahbqueuembox(ahb, ecb_paddr, ATTN_STARTECB|ccb->ccb_h.target_id);
986
987 ccb->ccb_h.timeout_ch = timeout(ahbtimeout, (caddr_t)ecb,
988 (ccb->ccb_h.timeout * hz) / 1000);
989 splx(s);
990 }
991
992 static void
993 ahbaction(struct cam_sim *sim, union ccb *ccb)
994 {
995 struct ahb_softc *ahb;
996
997 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ahbaction\n"));
998
999 ahb = (struct ahb_softc *)cam_sim_softc(sim);
1000
1001 switch (ccb->ccb_h.func_code) {
1002 /* Common cases first */
1003 case XPT_SCSI_IO: /* Execute the requested I/O operation */
1004 {
1005 struct ecb *ecb;
1006 struct hardware_ecb *hecb;
1007
1008 /*
1009 * get an ecb to use.
1010 */
1011 if ((ecb = ahbecbget(ahb)) == NULL) {
1012 /* Should never occur */
1013 panic("Failed to get an ecb");
1014 }
1015
1016 /*
1017 * So we can find the ECB when an abort is requested
1018 */
1019 ecb->ccb = ccb;
1020 ccb->ccb_h.ccb_ecb_ptr = ecb;
1021 ccb->ccb_h.ccb_ahb_ptr = ahb;
1022
1023 /*
1024 * Put all the arguments for the xfer in the ecb
1025 */
1026 hecb = &ecb->hecb;
1027 hecb->opcode = ECBOP_INITIATOR_SCSI_CMD;
1028 hecb->flag_word1 = FW1_AUTO_REQUEST_SENSE
1029 | FW1_ERR_STATUS_BLK_ONLY;
1030 hecb->flag_word2 = ccb->ccb_h.target_lun
1031 | FW2_NO_RETRY_ON_BUSY;
1032 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
1033 hecb->flag_word2 |= FW2_TAG_ENB
1034 | ((ccb->csio.tag_action & 0x3)
1035 << FW2_TAG_TYPE_SHIFT);
1036 }
1037 if ((ccb->ccb_h.flags & CAM_DIS_DISCONNECT) != 0)
1038 hecb->flag_word2 |= FW2_DISABLE_DISC;
1039 hecb->sense_len = ccb->csio.sense_len;
1040 hecb->cdb_len = ccb->csio.cdb_len;
1041 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
1042 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) == 0) {
1043 bcopy(ccb->csio.cdb_io.cdb_ptr,
1044 hecb->cdb, hecb->cdb_len);
1045 } else {
1046 /* I guess I could map it in... */
1047 ccb->ccb_h.status = CAM_REQ_INVALID;
1048 ahbecbfree(ahb, ecb);
1049 xpt_done(ccb);
1050 return;
1051 }
1052 } else {
1053 bcopy(ccb->csio.cdb_io.cdb_bytes,
1054 hecb->cdb, hecb->cdb_len);
1055 }
1056
1057 /*
1058 * If we have any data to send with this command,
1059 * map it into bus space.
1060 */
1061 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1062 if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
1063 /*
1064 * We've been given a pointer
1065 * to a single buffer.
1066 */
1067 if ((ccb->ccb_h.flags & CAM_DATA_PHYS)==0) {
1068 int s;
1069 int error;
1070
1071 s = splsoftvm();
1072 error = bus_dmamap_load(
1073 ahb->buffer_dmat,
1074 ecb->dmamap,
1075 ccb->csio.data_ptr,
1076 ccb->csio.dxfer_len,
1077 ahbexecuteecb,
1078 ecb, /*flags*/0);
1079 if (error == EINPROGRESS) {
1080 /*
1081 * So as to maintain ordering,
1082 * freeze the controller queue
1083 * until our mapping is
1084 * returned.
1085 */
1086 xpt_freeze_simq(ahb->sim, 1);
1087 ccb->ccb_h.status |=
1088 CAM_RELEASE_SIMQ;
1089 }
1090 splx(s);
1091 } else {
1092 struct bus_dma_segment seg;
1093
1094 /* Pointer to physical buffer */
1095 seg.ds_addr =
1096 (bus_addr_t)ccb->csio.data_ptr;
1097 seg.ds_len = ccb->csio.dxfer_len;
1098 ahbexecuteecb(ecb, &seg, 1, 0);
1099 }
1100 } else {
1101 struct bus_dma_segment *segs;
1102
1103 if ((ccb->ccb_h.flags & CAM_DATA_PHYS) != 0)
1104 panic("ahbaction - Physical segment "
1105 "pointers unsupported");
1106
1107 if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0)
1108 panic("btaction - Virtual segment "
1109 "addresses unsupported");
1110
1111 /* Just use the segments provided */
1112 segs = (struct bus_dma_segment *)
1113 ccb->csio.data_ptr;
1114 ahbexecuteecb(ecb, segs, ccb->csio.sglist_cnt,
1115 0);
1116 }
1117 } else {
1118 ahbexecuteecb(ecb, NULL, 0, 0);
1119 }
1120 break;
1121 }
1122 case XPT_EN_LUN: /* Enable LUN as a target */
1123 case XPT_TARGET_IO: /* Execute target I/O request */
1124 case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */
1125 case XPT_CONT_TARGET_IO: /* Continue Host Target I/O Connection*/
1126 case XPT_ABORT: /* Abort the specified CCB */
1127 /* XXX Implement */
1128 ccb->ccb_h.status = CAM_REQ_INVALID;
1129 xpt_done(ccb);
1130 break;
1131 case XPT_SET_TRAN_SETTINGS:
1132 {
1133 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1134 xpt_done(ccb);
1135 break;
1136 }
1137 case XPT_GET_TRAN_SETTINGS:
1138 /* Get default/user set transfer settings for the target */
1139 {
1140 struct ccb_trans_settings *cts = &ccb->cts;
1141 u_int target_mask = 0x01 << ccb->ccb_h.target_id;
1142 struct ccb_trans_settings_scsi *scsi =
1143 &cts->proto_specific.scsi;
1144 struct ccb_trans_settings_spi *spi =
1145 &cts->xport_specific.spi;
1146
1147 if (cts->type == CTS_TYPE_USER_SETTINGS) {
1148 cts->protocol = PROTO_SCSI;
1149 cts->protocol_version = SCSI_REV_2;
1150 cts->transport = XPORT_SPI;
1151 cts->transport_version = 2;
1152
1153 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
1154 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
1155 if ((ahb->disc_permitted & target_mask) != 0)
1156 spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
1157 if ((ahb->tags_permitted & target_mask) != 0)
1158 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
1159 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
1160 spi->sync_period = 25; /* 10MHz */
1161
1162 if (spi->sync_period != 0)
1163 spi->sync_offset = 15;
1164
1165 spi->valid = CTS_SPI_VALID_SYNC_RATE
1166 | CTS_SPI_VALID_SYNC_OFFSET
1167 | CTS_SPI_VALID_BUS_WIDTH
1168 | CTS_SPI_VALID_DISC;
1169 scsi->valid = CTS_SCSI_VALID_TQ;
1170 ccb->ccb_h.status = CAM_REQ_CMP;
1171 } else {
1172 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1173 }
1174 xpt_done(ccb);
1175 break;
1176 }
1177 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */
1178 {
1179 int i;
1180 int s;
1181
1182 s = splcam();
1183 ahb->immed_cmd = IMMED_RESET;
1184 ahbqueuembox(ahb, IMMED_RESET, ATTN_IMMED|ccb->ccb_h.target_id);
1185 /* Poll for interrupt completion */
1186 for (i = 1000; ahb->immed_cmd != 0 && i != 0; i--) {
1187 DELAY(1000);
1188 ahbintr(cam_sim_softc(sim));
1189 }
1190 splx(s);
1191 break;
1192 }
1193 case XPT_CALC_GEOMETRY:
1194 {
1195 cam_calc_geometry(&ccb->ccg, ahb->extended_trans);
1196 xpt_done(ccb);
1197 break;
1198 }
1199 case XPT_RESET_BUS: /* Reset the specified SCSI bus */
1200 {
1201 int i;
1202
1203 ahb->immed_cmd = IMMED_RESET;
1204 ahbqueuembox(ahb, IMMED_RESET, ATTN_IMMED|ahb->scsi_id);
1205 /* Poll for interrupt completion */
1206 for (i = 1000; ahb->immed_cmd != 0 && i != 0; i--)
1207 DELAY(1000);
1208 ccb->ccb_h.status = CAM_REQ_CMP;
1209 xpt_done(ccb);
1210 break;
1211 }
1212 case XPT_TERM_IO: /* Terminate the I/O process */
1213 /* XXX Implement */
1214 ccb->ccb_h.status = CAM_REQ_INVALID;
1215 xpt_done(ccb);
1216 break;
1217 case XPT_PATH_INQ: /* Path routing inquiry */
1218 {
1219 struct ccb_pathinq *cpi = &ccb->cpi;
1220
1221 cpi->version_num = 1; /* XXX??? */
1222 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE;
1223 cpi->target_sprt = 0;
1224 cpi->hba_misc = 0;
1225 cpi->hba_eng_cnt = 0;
1226 cpi->max_target = 7;
1227 cpi->max_lun = 7;
1228 cpi->initiator_id = ahb->scsi_id;
1229 cpi->bus_id = cam_sim_bus(sim);
1230 cpi->base_transfer_speed = 3300;
1231 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1232 strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN);
1233 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1234 cpi->unit_number = cam_sim_unit(sim);
1235 cpi->transport = XPORT_SPI;
1236 cpi->transport_version = 2;
1237 cpi->protocol = PROTO_SCSI;
1238 cpi->protocol_version = SCSI_REV_2;
1239 cpi->ccb_h.status = CAM_REQ_CMP;
1240 xpt_done(ccb);
1241 break;
1242 }
1243 #if 0
1244 /* Need these??? */
1245 case XPT_IMMED_NOTIFY: /* Notify Host Target driver of event */
1246 case XPT_NOTIFY_ACK: /* Acknowledgement of event */
1247 #endif
1248 default:
1249 ccb->ccb_h.status = CAM_REQ_INVALID;
1250 xpt_done(ccb);
1251 break;
1252 }
1253 }
1254
1255 static void
1256 ahbpoll(struct cam_sim *sim)
1257 {
1258 ahbintr(cam_sim_softc(sim));
1259 }
1260
1261 static void
1262 ahbtimeout(void *arg)
1263 {
1264 struct ecb *ecb;
1265 union ccb *ccb;
1266 struct ahb_softc *ahb;
1267 int s;
1268
1269 ecb = (struct ecb *)arg;
1270 ccb = ecb->ccb;
1271 ahb = (struct ahb_softc *)ccb->ccb_h.ccb_ahb_ptr;
1272 xpt_print_path(ccb->ccb_h.path);
1273 printf("ECB %p - timed out\n", (void *)ecb);
1274
1275 s = splcam();
1276
1277 if ((ecb->state & ECB_ACTIVE) == 0) {
1278 xpt_print_path(ccb->ccb_h.path);
1279 printf("ECB %p - timed out ECB already completed\n",
1280 (void *)ecb);
1281 splx(s);
1282 return;
1283 }
1284 /*
1285 * In order to simplify the recovery process, we ask the XPT
1286 * layer to halt the queue of new transactions and we traverse
1287 * the list of pending CCBs and remove their timeouts. This
1288 * means that the driver attempts to clear only one error
1289 * condition at a time. In general, timeouts that occur
1290 * close together are related anyway, so there is no benefit
1291 * in attempting to handle errors in parrallel. Timeouts will
1292 * be reinstated when the recovery process ends.
1293 */
1294 if ((ecb->state & ECB_DEVICE_RESET) == 0) {
1295 struct ccb_hdr *ccb_h;
1296
1297 if ((ecb->state & ECB_RELEASE_SIMQ) == 0) {
1298 xpt_freeze_simq(ahb->sim, /*count*/1);
1299 ecb->state |= ECB_RELEASE_SIMQ;
1300 }
1301
1302 ccb_h = LIST_FIRST(&ahb->pending_ccbs);
1303 while (ccb_h != NULL) {
1304 struct ecb *pending_ecb;
1305
1306 pending_ecb = (struct ecb *)ccb_h->ccb_ecb_ptr;
1307 untimeout(ahbtimeout, pending_ecb, ccb_h->timeout_ch);
1308 ccb_h = LIST_NEXT(ccb_h, sim_links.le);
1309 }
1310
1311 /* Store for our interrupt handler */
1312 ahb->immed_ecb = ecb;
1313
1314 /*
1315 * Send a Bus Device Reset message:
1316 * The target that is holding up the bus may not
1317 * be the same as the one that triggered this timeout
1318 * (different commands have different timeout lengths),
1319 * but we have no way of determining this from our
1320 * timeout handler. Our strategy here is to queue a
1321 * BDR message to the target of the timed out command.
1322 * If this fails, we'll get another timeout 2 seconds
1323 * later which will attempt a bus reset.
1324 */
1325 xpt_print_path(ccb->ccb_h.path);
1326 printf("Queuing BDR\n");
1327 ecb->state |= ECB_DEVICE_RESET;
1328 ccb->ccb_h.timeout_ch =
1329 timeout(ahbtimeout, (caddr_t)ecb, 2 * hz);
1330
1331 ahb->immed_cmd = IMMED_RESET;
1332 ahbqueuembox(ahb, IMMED_RESET, ATTN_IMMED|ccb->ccb_h.target_id);
1333 } else if ((ecb->state & ECB_SCSIBUS_RESET) != 0) {
1334 /*
1335 * Try a SCSI bus reset. We do this only if we
1336 * have already attempted to clear the condition with a BDR.
1337 */
1338 xpt_print_path(ccb->ccb_h.path);
1339 printf("Attempting SCSI Bus reset\n");
1340 ecb->state |= ECB_SCSIBUS_RESET;
1341 ccb->ccb_h.timeout_ch =
1342 timeout(ahbtimeout, (caddr_t)ecb, 2 * hz);
1343 ahb->immed_cmd = IMMED_RESET;
1344 ahbqueuembox(ahb, IMMED_RESET, ATTN_IMMED|ahb->scsi_id);
1345 } else {
1346 /* Bring out the hammer... */
1347 ahbreset(ahb);
1348
1349 /* Simulate the reset complete interrupt */
1350 ahbhandleimmed(ahb, 0, ahb->scsi_id|INTSTAT_IMMED_OK);
1351 }
1352
1353 splx(s);
1354 }
1355
1356 static device_method_t ahb_eisa_methods[] = {
1357 /* Device interface */
1358 DEVMETHOD(device_probe, ahbprobe),
1359 DEVMETHOD(device_attach, ahbattach),
1360
1361 { 0, 0 }
1362 };
1363
1364 static driver_t ahb_eisa_driver = {
1365 "ahb",
1366 ahb_eisa_methods,
1367 1, /* unused */
1368 };
1369
1370 static devclass_t ahb_devclass;
1371
1372 DRIVER_MODULE(ahb, eisa, ahb_eisa_driver, ahb_devclass, 0, 0);
1373 MODULE_DEPEND(ahb, eisa, 1, 1, 1);
1374 MODULE_DEPEND(ahb, cam, 1, 1, 1);
Cache object: 63f492c40aac5d24c29521c3cff33b2e
|