FreeBSD/Linux Kernel Cross Reference
sys/dev/ic/adw.c
1 /* $NetBSD: adw.c,v 1.49 2008/04/08 12:07:25 cegger Exp $ */
2
3 /*
4 * Generic driver for the Advanced Systems Inc. SCSI controllers
5 *
6 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
7 * All rights reserved.
8 *
9 * Author: Baldassare Dante Profeta <dante@mclink.it>
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: adw.c,v 1.49 2008/04/08 12:07:25 cegger Exp $");
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/callout.h>
46 #include <sys/kernel.h>
47 #include <sys/errno.h>
48 #include <sys/ioctl.h>
49 #include <sys/device.h>
50 #include <sys/malloc.h>
51 #include <sys/buf.h>
52 #include <sys/proc.h>
53 #include <sys/user.h>
54
55 #include <sys/bus.h>
56 #include <sys/intr.h>
57
58 #include <uvm/uvm_extern.h>
59
60 #include <dev/scsipi/scsi_all.h>
61 #include <dev/scsipi/scsipi_all.h>
62 #include <dev/scsipi/scsiconf.h>
63
64 #include <dev/ic/adwlib.h>
65 #include <dev/ic/adwmcode.h>
66 #include <dev/ic/adw.h>
67
68 #ifndef DDB
69 #define Debugger() panic("should call debugger here (adw.c)")
70 #endif /* ! DDB */
71
72 /******************************************************************************/
73
74
75 static int adw_alloc_controls(ADW_SOFTC *);
76 static int adw_alloc_carriers(ADW_SOFTC *);
77 static int adw_create_ccbs(ADW_SOFTC *, ADW_CCB *, int);
78 static void adw_free_ccb(ADW_SOFTC *, ADW_CCB *);
79 static void adw_reset_ccb(ADW_CCB *);
80 static int adw_init_ccb(ADW_SOFTC *, ADW_CCB *);
81 static ADW_CCB *adw_get_ccb(ADW_SOFTC *);
82 static int adw_queue_ccb(ADW_SOFTC *, ADW_CCB *);
83
84 static void adw_scsipi_request(struct scsipi_channel *,
85 scsipi_adapter_req_t, void *);
86 static int adw_build_req(ADW_SOFTC *, ADW_CCB *);
87 static void adw_build_sglist(ADW_CCB *, ADW_SCSI_REQ_Q *, ADW_SG_BLOCK *);
88 static void adwminphys(struct buf *);
89 static void adw_isr_callback(ADW_SOFTC *, ADW_SCSI_REQ_Q *);
90 static void adw_async_callback(ADW_SOFTC *, u_int8_t);
91
92 static void adw_print_info(ADW_SOFTC *, int);
93
94 static int adw_poll(ADW_SOFTC *, struct scsipi_xfer *, int);
95 static void adw_timeout(void *);
96 static void adw_reset_bus(ADW_SOFTC *);
97
98
99 /******************************************************************************/
100 /* DMA Mapping for Control Blocks */
101 /******************************************************************************/
102
103
104 static int
105 adw_alloc_controls(ADW_SOFTC *sc)
106 {
107 bus_dma_segment_t seg;
108 int error, rseg;
109
110 /*
111 * Allocate the control structure.
112 */
113 if ((error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct adw_control),
114 PAGE_SIZE, 0, &seg, 1, &rseg,
115 BUS_DMA_NOWAIT)) != 0) {
116 aprint_error_dev(&sc->sc_dev, "unable to allocate control structures,"
117 " error = %d\n", error);
118 return (error);
119 }
120 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
121 sizeof(struct adw_control), (void **) & sc->sc_control,
122 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
123 aprint_error_dev(&sc->sc_dev, "unable to map control structures, error = %d\n",
124 error);
125 return (error);
126 }
127
128 /*
129 * Create and load the DMA map used for the control blocks.
130 */
131 if ((error = bus_dmamap_create(sc->sc_dmat, sizeof(struct adw_control),
132 1, sizeof(struct adw_control), 0, BUS_DMA_NOWAIT,
133 &sc->sc_dmamap_control)) != 0) {
134 aprint_error_dev(&sc->sc_dev, "unable to create control DMA map, error = %d\n",
135 error);
136 return (error);
137 }
138 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_control,
139 sc->sc_control, sizeof(struct adw_control), NULL,
140 BUS_DMA_NOWAIT)) != 0) {
141 aprint_error_dev(&sc->sc_dev, "unable to load control DMA map, error = %d\n",
142 error);
143 return (error);
144 }
145
146 return (0);
147 }
148
149
150 static int
151 adw_alloc_carriers(ADW_SOFTC *sc)
152 {
153 bus_dma_segment_t seg;
154 int error, rseg;
155
156 /*
157 * Allocate the control structure.
158 */
159 sc->sc_control->carriers = malloc(sizeof(ADW_CARRIER) * ADW_MAX_CARRIER,
160 M_DEVBUF, M_WAITOK);
161 if(!sc->sc_control->carriers) {
162 aprint_error_dev(&sc->sc_dev,
163 "malloc() failed in allocating carrier structures\n");
164 return (ENOMEM);
165 }
166
167 if ((error = bus_dmamem_alloc(sc->sc_dmat,
168 sizeof(ADW_CARRIER) * ADW_MAX_CARRIER,
169 0x10, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
170 aprint_error_dev(&sc->sc_dev, "unable to allocate carrier structures,"
171 " error = %d\n", error);
172 return (error);
173 }
174 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
175 sizeof(ADW_CARRIER) * ADW_MAX_CARRIER,
176 (void **) &sc->sc_control->carriers,
177 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
178 aprint_error_dev(&sc->sc_dev, "unable to map carrier structures,"
179 " error = %d\n", error);
180 return (error);
181 }
182
183 /*
184 * Create and load the DMA map used for the control blocks.
185 */
186 if ((error = bus_dmamap_create(sc->sc_dmat,
187 sizeof(ADW_CARRIER) * ADW_MAX_CARRIER, 1,
188 sizeof(ADW_CARRIER) * ADW_MAX_CARRIER, 0,BUS_DMA_NOWAIT,
189 &sc->sc_dmamap_carrier)) != 0) {
190 aprint_error_dev(&sc->sc_dev, "unable to create carriers DMA map,"
191 " error = %d\n", error);
192 return (error);
193 }
194 if ((error = bus_dmamap_load(sc->sc_dmat,
195 sc->sc_dmamap_carrier, sc->sc_control->carriers,
196 sizeof(ADW_CARRIER) * ADW_MAX_CARRIER, NULL,
197 BUS_DMA_NOWAIT)) != 0) {
198 aprint_error_dev(&sc->sc_dev, "unable to load carriers DMA map,"
199 " error = %d\n", error);
200 return (error);
201 }
202
203 return (0);
204 }
205
206
207 /******************************************************************************/
208 /* Control Blocks routines */
209 /******************************************************************************/
210
211
212 /*
213 * Create a set of ccbs and add them to the free list. Called once
214 * by adw_init(). We return the number of CCBs successfully created.
215 */
216 static int
217 adw_create_ccbs(ADW_SOFTC *sc, ADW_CCB *ccbstore, int count)
218 {
219 ADW_CCB *ccb;
220 int i, error;
221
222 for (i = 0; i < count; i++) {
223 ccb = &ccbstore[i];
224 if ((error = adw_init_ccb(sc, ccb)) != 0) {
225 aprint_error_dev(&sc->sc_dev, "unable to initialize ccb, error = %d\n",
226 error);
227 return (i);
228 }
229 TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, chain);
230 }
231
232 return (i);
233 }
234
235
236 /*
237 * A ccb is put onto the free list.
238 */
239 static void
240 adw_free_ccb(ADW_SOFTC *sc, ADW_CCB *ccb)
241 {
242 int s;
243
244 s = splbio();
245
246 adw_reset_ccb(ccb);
247 TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, chain);
248
249 splx(s);
250 }
251
252
253 static void
254 adw_reset_ccb(ADW_CCB *ccb)
255 {
256
257 ccb->flags = 0;
258 }
259
260
261 static int
262 adw_init_ccb(ADW_SOFTC *sc, ADW_CCB *ccb)
263 {
264 int hashnum, error;
265
266 /*
267 * Create the DMA map for this CCB.
268 */
269 error = bus_dmamap_create(sc->sc_dmat,
270 (ADW_MAX_SG_LIST - 1) * PAGE_SIZE,
271 ADW_MAX_SG_LIST, (ADW_MAX_SG_LIST - 1) * PAGE_SIZE,
272 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->dmamap_xfer);
273 if (error) {
274 aprint_error_dev(&sc->sc_dev, "unable to create CCB DMA map, error = %d\n",
275 error);
276 return (error);
277 }
278
279 /*
280 * put in the phystokv hash table
281 * Never gets taken out.
282 */
283 ccb->hashkey = htole32(sc->sc_dmamap_control->dm_segs[0].ds_addr +
284 ADW_CCB_OFF(ccb));
285 hashnum = CCB_HASH(ccb->hashkey);
286 ccb->nexthash = sc->sc_ccbhash[hashnum];
287 sc->sc_ccbhash[hashnum] = ccb;
288 adw_reset_ccb(ccb);
289 return (0);
290 }
291
292
293 /*
294 * Get a free ccb
295 *
296 * If there are none, see if we can allocate a new one
297 */
298 static ADW_CCB *
299 adw_get_ccb(ADW_SOFTC *sc)
300 {
301 ADW_CCB *ccb = 0;
302 int s;
303
304 s = splbio();
305
306 ccb = sc->sc_free_ccb.tqh_first;
307 if (ccb != NULL) {
308 TAILQ_REMOVE(&sc->sc_free_ccb, ccb, chain);
309 ccb->flags |= CCB_ALLOC;
310 }
311 splx(s);
312 return (ccb);
313 }
314
315
316 /*
317 * Given a physical address, find the ccb that it corresponds to.
318 */
319 ADW_CCB *
320 adw_ccb_phys_kv(ADW_SOFTC *sc, u_int32_t ccb_phys)
321 {
322 int hashnum = CCB_HASH(ccb_phys);
323 ADW_CCB *ccb = sc->sc_ccbhash[hashnum];
324
325 while (ccb) {
326 if (ccb->hashkey == ccb_phys)
327 break;
328 ccb = ccb->nexthash;
329 }
330 return (ccb);
331 }
332
333
334 /*
335 * Queue a CCB to be sent to the controller, and send it if possible.
336 */
337 static int
338 adw_queue_ccb(ADW_SOFTC *sc, ADW_CCB *ccb)
339 {
340 int errcode = ADW_SUCCESS;
341
342 TAILQ_INSERT_TAIL(&sc->sc_waiting_ccb, ccb, chain);
343
344 while ((ccb = sc->sc_waiting_ccb.tqh_first) != NULL) {
345
346 TAILQ_REMOVE(&sc->sc_waiting_ccb, ccb, chain);
347 errcode = AdwExeScsiQueue(sc, &ccb->scsiq);
348 switch(errcode) {
349 case ADW_SUCCESS:
350 break;
351
352 case ADW_BUSY:
353 printf("ADW_BUSY\n");
354 return(ADW_BUSY);
355
356 case ADW_ERROR:
357 printf("ADW_ERROR\n");
358 return(ADW_ERROR);
359 }
360
361 TAILQ_INSERT_TAIL(&sc->sc_pending_ccb, ccb, chain);
362
363 if ((ccb->xs->xs_control & XS_CTL_POLL) == 0)
364 callout_reset(&ccb->xs->xs_callout,
365 mstohz(ccb->timeout), adw_timeout, ccb);
366 }
367
368 return(errcode);
369 }
370
371
372 /******************************************************************************/
373 /* SCSI layer interfacing routines */
374 /******************************************************************************/
375
376
377 int
378 adw_init(ADW_SOFTC *sc)
379 {
380 u_int16_t warn_code;
381
382
383 sc->cfg.lib_version = (ADW_LIB_VERSION_MAJOR << 8) |
384 ADW_LIB_VERSION_MINOR;
385 sc->cfg.chip_version =
386 ADW_GET_CHIP_VERSION(sc->sc_iot, sc->sc_ioh, sc->bus_type);
387
388 /*
389 * Reset the chip to start and allow register writes.
390 */
391 if (ADW_FIND_SIGNATURE(sc->sc_iot, sc->sc_ioh) == 0) {
392 panic("adw_init: adw_find_signature failed");
393 } else {
394 AdwResetChip(sc->sc_iot, sc->sc_ioh);
395
396 warn_code = AdwInitFromEEPROM(sc);
397
398 if (warn_code & ADW_WARN_EEPROM_CHKSUM)
399 aprint_error_dev(&sc->sc_dev, "Bad checksum found. "
400 "Setting default values\n");
401 if (warn_code & ADW_WARN_EEPROM_TERMINATION)
402 aprint_error_dev(&sc->sc_dev, "Bad bus termination setting."
403 "Using automatic termination.\n");
404 }
405
406 sc->isr_callback = (ADW_CALLBACK) adw_isr_callback;
407 sc->async_callback = (ADW_CALLBACK) adw_async_callback;
408
409 return 0;
410 }
411
412
413 void
414 adw_attach(ADW_SOFTC *sc)
415 {
416 struct scsipi_adapter *adapt = &sc->sc_adapter;
417 struct scsipi_channel *chan = &sc->sc_channel;
418 int ncontrols, error;
419
420 TAILQ_INIT(&sc->sc_free_ccb);
421 TAILQ_INIT(&sc->sc_waiting_ccb);
422 TAILQ_INIT(&sc->sc_pending_ccb);
423
424 /*
425 * Allocate the Control Blocks.
426 */
427 error = adw_alloc_controls(sc);
428 if (error)
429 return; /* (error) */ ;
430
431 memset(sc->sc_control, 0, sizeof(struct adw_control));
432
433 /*
434 * Create and initialize the Control Blocks.
435 */
436 ncontrols = adw_create_ccbs(sc, sc->sc_control->ccbs, ADW_MAX_CCB);
437 if (ncontrols == 0) {
438 aprint_error_dev(&sc->sc_dev, "unable to create Control Blocks\n");
439 return; /* (ENOMEM) */ ;
440 } else if (ncontrols != ADW_MAX_CCB) {
441 aprint_error_dev(&sc->sc_dev, "WARNING: only %d of %d Control Blocks"
442 " created\n",
443 ncontrols, ADW_MAX_CCB);
444 }
445
446 /*
447 * Create and initialize the Carriers.
448 */
449 error = adw_alloc_carriers(sc);
450 if (error)
451 return; /* (error) */ ;
452
453 /*
454 * Zero's the freeze_device status
455 */
456 memset(sc->sc_freeze_dev, 0, sizeof(sc->sc_freeze_dev));
457
458 /*
459 * Initialize the adapter
460 */
461 switch (AdwInitDriver(sc)) {
462 case ADW_IERR_BIST_PRE_TEST:
463 panic("%s: BIST pre-test error",
464 device_xname(&sc->sc_dev));
465 break;
466
467 case ADW_IERR_BIST_RAM_TEST:
468 panic("%s: BIST RAM test error",
469 device_xname(&sc->sc_dev));
470 break;
471
472 case ADW_IERR_MCODE_CHKSUM:
473 panic("%s: Microcode checksum error",
474 device_xname(&sc->sc_dev));
475 break;
476
477 case ADW_IERR_ILLEGAL_CONNECTION:
478 panic("%s: All three connectors are in use",
479 device_xname(&sc->sc_dev));
480 break;
481
482 case ADW_IERR_REVERSED_CABLE:
483 panic("%s: Cable is reversed",
484 device_xname(&sc->sc_dev));
485 break;
486
487 case ADW_IERR_HVD_DEVICE:
488 panic("%s: HVD attached to LVD connector",
489 device_xname(&sc->sc_dev));
490 break;
491
492 case ADW_IERR_SINGLE_END_DEVICE:
493 panic("%s: single-ended device is attached to"
494 " one of the connectors",
495 device_xname(&sc->sc_dev));
496 break;
497
498 case ADW_IERR_NO_CARRIER:
499 panic("%s: unable to create Carriers",
500 device_xname(&sc->sc_dev));
501 break;
502
503 case ADW_WARN_BUSRESET_ERROR:
504 aprint_error_dev(&sc->sc_dev, "WARNING: Bus Reset Error\n");
505 break;
506 }
507
508 /*
509 * Fill in the scsipi_adapter.
510 */
511 memset(adapt, 0, sizeof(*adapt));
512 adapt->adapt_dev = &sc->sc_dev;
513 adapt->adapt_nchannels = 1;
514 adapt->adapt_openings = ncontrols;
515 adapt->adapt_max_periph = adapt->adapt_openings;
516 adapt->adapt_request = adw_scsipi_request;
517 adapt->adapt_minphys = adwminphys;
518
519 /*
520 * Fill in the scsipi_channel.
521 */
522 memset(chan, 0, sizeof(*chan));
523 chan->chan_adapter = adapt;
524 chan->chan_bustype = &scsi_bustype;
525 chan->chan_channel = 0;
526 chan->chan_ntargets = ADW_MAX_TID + 1;
527 chan->chan_nluns = 8;
528 chan->chan_id = sc->chip_scsi_id;
529
530 config_found(&sc->sc_dev, &sc->sc_channel, scsiprint);
531 }
532
533
534 static void
535 adwminphys(struct buf *bp)
536 {
537
538 if (bp->b_bcount > ((ADW_MAX_SG_LIST - 1) * PAGE_SIZE))
539 bp->b_bcount = ((ADW_MAX_SG_LIST - 1) * PAGE_SIZE);
540 minphys(bp);
541 }
542
543
544 /*
545 * start a scsi operation given the command and the data address.
546 * Also needs the unit, target and lu.
547 */
548 static void
549 adw_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
550 void *arg)
551 {
552 struct scsipi_xfer *xs;
553 ADW_SOFTC *sc = (void *)chan->chan_adapter->adapt_dev;
554 ADW_CCB *ccb;
555 int s, retry;
556
557 switch (req) {
558 case ADAPTER_REQ_RUN_XFER:
559 xs = arg;
560
561 /*
562 * get a ccb to use. If the transfer
563 * is from a buf (possibly from interrupt time)
564 * then we can't allow it to sleep
565 */
566
567 ccb = adw_get_ccb(sc);
568 #ifdef DIAGNOSTIC
569 /*
570 * This should never happen as we track the resources
571 * in the mid-layer.
572 */
573 if (ccb == NULL) {
574 scsipi_printaddr(xs->xs_periph);
575 printf("unable to allocate ccb\n");
576 panic("adw_scsipi_request");
577 }
578 #endif
579
580 ccb->xs = xs;
581 ccb->timeout = xs->timeout;
582
583 if (adw_build_req(sc, ccb)) {
584 s = splbio();
585 retry = adw_queue_ccb(sc, ccb);
586 splx(s);
587
588 switch(retry) {
589 case ADW_BUSY:
590 xs->error = XS_RESOURCE_SHORTAGE;
591 adw_free_ccb(sc, ccb);
592 scsipi_done(xs);
593 return;
594
595 case ADW_ERROR:
596 xs->error = XS_DRIVER_STUFFUP;
597 adw_free_ccb(sc, ccb);
598 scsipi_done(xs);
599 return;
600 }
601 if ((xs->xs_control & XS_CTL_POLL) == 0)
602 return;
603 /*
604 * Not allowed to use interrupts, poll for completion.
605 */
606 if (adw_poll(sc, xs, ccb->timeout)) {
607 adw_timeout(ccb);
608 if (adw_poll(sc, xs, ccb->timeout))
609 adw_timeout(ccb);
610 }
611 }
612 return;
613
614 case ADAPTER_REQ_GROW_RESOURCES:
615 /* XXX Not supported. */
616 return;
617
618 case ADAPTER_REQ_SET_XFER_MODE:
619 /* XXX XXX XXX */
620 return;
621 }
622 }
623
624
625 /*
626 * Build a request structure for the Wide Boards.
627 */
628 static int
629 adw_build_req(ADW_SOFTC *sc, ADW_CCB *ccb)
630 {
631 struct scsipi_xfer *xs = ccb->xs;
632 struct scsipi_periph *periph = xs->xs_periph;
633 bus_dma_tag_t dmat = sc->sc_dmat;
634 ADW_SCSI_REQ_Q *scsiqp;
635 int error;
636
637 scsiqp = &ccb->scsiq;
638 memset(scsiqp, 0, sizeof(ADW_SCSI_REQ_Q));
639
640 /*
641 * Set the ADW_SCSI_REQ_Q 'ccb_ptr' to point to the
642 * physical CCB structure.
643 */
644 scsiqp->ccb_ptr = ccb->hashkey;
645
646 /*
647 * Build the ADW_SCSI_REQ_Q request.
648 */
649
650 /*
651 * Set CDB length and copy it to the request structure.
652 * For wide boards a CDB length maximum of 16 bytes
653 * is supported.
654 */
655 memcpy(&scsiqp->cdb, xs->cmd, ((scsiqp->cdb_len = xs->cmdlen) <= 12)?
656 xs->cmdlen : 12 );
657 if(xs->cmdlen > 12)
658 memcpy(&scsiqp->cdb16, &(xs->cmd[12]), xs->cmdlen - 12);
659
660 scsiqp->target_id = periph->periph_target;
661 scsiqp->target_lun = periph->periph_lun;
662
663 scsiqp->vsense_addr = &ccb->scsi_sense;
664 scsiqp->sense_addr = htole32(sc->sc_dmamap_control->dm_segs[0].ds_addr +
665 ADW_CCB_OFF(ccb) + offsetof(struct adw_ccb, scsi_sense));
666 scsiqp->sense_len = sizeof(struct scsi_sense_data);
667
668 /*
669 * Build ADW_SCSI_REQ_Q for a scatter-gather buffer command.
670 */
671 if (xs->datalen) {
672 /*
673 * Map the DMA transfer.
674 */
675 #ifdef TFS
676 if (xs->xs_control & SCSI_DATA_UIO) {
677 error = bus_dmamap_load_uio(dmat,
678 ccb->dmamap_xfer, (struct uio *) xs->data,
679 ((flags & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT :
680 BUS_DMA_WAITOK) | BUS_DMA_STREAMING |
681 ((flags & XS_CTL_DATA_IN) ? BUS_DMA_READ :
682 BUS_DMA_WRITE));
683 } else
684 #endif /* TFS */
685 {
686 error = bus_dmamap_load(dmat,
687 ccb->dmamap_xfer, xs->data, xs->datalen, NULL,
688 ((xs->xs_control & XS_CTL_NOSLEEP) ?
689 BUS_DMA_NOWAIT : BUS_DMA_WAITOK) |
690 BUS_DMA_STREAMING |
691 ((xs->xs_control & XS_CTL_DATA_IN) ?
692 BUS_DMA_READ : BUS_DMA_WRITE));
693 }
694
695 switch (error) {
696 case 0:
697 break;
698 case ENOMEM:
699 case EAGAIN:
700 xs->error = XS_RESOURCE_SHORTAGE;
701 goto out_bad;
702
703 default:
704 xs->error = XS_DRIVER_STUFFUP;
705 aprint_error_dev(&sc->sc_dev, "error %d loading DMA map\n",
706 error);
707 out_bad:
708 adw_free_ccb(sc, ccb);
709 scsipi_done(xs);
710 return(0);
711 }
712
713 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
714 ccb->dmamap_xfer->dm_mapsize,
715 (xs->xs_control & XS_CTL_DATA_IN) ?
716 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
717
718 /*
719 * Build scatter-gather list.
720 */
721 scsiqp->data_cnt = htole32(xs->datalen);
722 scsiqp->vdata_addr = xs->data;
723 scsiqp->data_addr = htole32(ccb->dmamap_xfer->dm_segs[0].ds_addr);
724 memset(ccb->sg_block, 0,
725 sizeof(ADW_SG_BLOCK) * ADW_NUM_SG_BLOCK);
726 adw_build_sglist(ccb, scsiqp, ccb->sg_block);
727 } else {
728 /*
729 * No data xfer, use non S/G values.
730 */
731 scsiqp->data_cnt = 0;
732 scsiqp->vdata_addr = 0;
733 scsiqp->data_addr = 0;
734 }
735
736 return (1);
737 }
738
739
740 /*
741 * Build scatter-gather list for Wide Boards.
742 */
743 static void
744 adw_build_sglist(ADW_CCB *ccb, ADW_SCSI_REQ_Q *scsiqp, ADW_SG_BLOCK *sg_block)
745 {
746 u_long sg_block_next_addr; /* block and its next */
747 u_int32_t sg_block_physical_addr;
748 int i; /* how many SG entries */
749 bus_dma_segment_t *sg_list = &ccb->dmamap_xfer->dm_segs[0];
750 int sg_elem_cnt = ccb->dmamap_xfer->dm_nsegs;
751
752
753 sg_block_next_addr = (u_long) sg_block; /* allow math operation */
754 sg_block_physical_addr = le32toh(ccb->hashkey) +
755 offsetof(struct adw_ccb, sg_block[0]);
756 scsiqp->sg_real_addr = htole32(sg_block_physical_addr);
757
758 /*
759 * If there are more than NO_OF_SG_PER_BLOCK DMA segments (hw sg-list)
760 * then split the request into multiple sg-list blocks.
761 */
762
763 do {
764 for (i = 0; i < NO_OF_SG_PER_BLOCK; i++) {
765 sg_block->sg_list[i].sg_addr = htole32(sg_list->ds_addr);
766 sg_block->sg_list[i].sg_count = htole32(sg_list->ds_len);
767
768 if (--sg_elem_cnt == 0) {
769 /* last entry, get out */
770 sg_block->sg_cnt = i + 1;
771 sg_block->sg_ptr = 0; /* next link = NULL */
772 return;
773 }
774 sg_list++;
775 }
776 sg_block_next_addr += sizeof(ADW_SG_BLOCK);
777 sg_block_physical_addr += sizeof(ADW_SG_BLOCK);
778
779 sg_block->sg_cnt = NO_OF_SG_PER_BLOCK;
780 sg_block->sg_ptr = htole32(sg_block_physical_addr);
781 sg_block = (ADW_SG_BLOCK *) sg_block_next_addr; /* virt. addr */
782 } while (1);
783 }
784
785
786 /******************************************************************************/
787 /* Interrupts and TimeOut routines */
788 /******************************************************************************/
789
790
791 int
792 adw_intr(void *arg)
793 {
794 ADW_SOFTC *sc = arg;
795
796
797 if(AdwISR(sc) != ADW_FALSE) {
798 return (1);
799 }
800
801 return (0);
802 }
803
804
805 /*
806 * Poll a particular unit, looking for a particular xs
807 */
808 static int
809 adw_poll(ADW_SOFTC *sc, struct scsipi_xfer *xs, int count)
810 {
811
812 /* timeouts are in msec, so we loop in 1000 usec cycles */
813 while (count) {
814 adw_intr(sc);
815 if (xs->xs_status & XS_STS_DONE)
816 return (0);
817 delay(1000); /* only happens in boot so ok */
818 count--;
819 }
820 return (1);
821 }
822
823
824 static void
825 adw_timeout(void *arg)
826 {
827 ADW_CCB *ccb = arg;
828 struct scsipi_xfer *xs = ccb->xs;
829 struct scsipi_periph *periph = xs->xs_periph;
830 ADW_SOFTC *sc =
831 (void *)periph->periph_channel->chan_adapter->adapt_dev;
832 int s;
833
834 scsipi_printaddr(periph);
835 printf("timed out");
836
837 s = splbio();
838
839 if (ccb->flags & CCB_ABORTED) {
840 /*
841 * Abort Timed Out
842 *
843 * No more opportunities. Lets try resetting the bus and
844 * reinitialize the host adapter.
845 */
846 callout_stop(&xs->xs_callout);
847 printf(" AGAIN. Resetting SCSI Bus\n");
848 adw_reset_bus(sc);
849 splx(s);
850 return;
851 } else if (ccb->flags & CCB_ABORTING) {
852 /*
853 * Abort the operation that has timed out.
854 *
855 * Second opportunity.
856 */
857 printf("\n");
858 xs->error = XS_TIMEOUT;
859 ccb->flags |= CCB_ABORTED;
860 #if 0
861 /*
862 * - XXX - 3.3a microcode is BROKEN!!!
863 *
864 * We cannot abort a CCB, so we can only hope the command
865 * get completed before the next timeout, otherwise a
866 * Bus Reset will arrive inexorably.
867 */
868 /*
869 * ADW_ABORT_CCB() makes the board to generate an interrupt
870 *
871 * - XXX - The above assertion MUST be verified (and this
872 * code changed as well [callout_*()]), when the
873 * ADW_ABORT_CCB will be working again
874 */
875 ADW_ABORT_CCB(sc, ccb);
876 #endif
877 /*
878 * waiting for multishot callout_reset() let's restart it
879 * by hand so the next time a timeout event will occur
880 * we will reset the bus.
881 */
882 callout_reset(&xs->xs_callout,
883 mstohz(ccb->timeout), adw_timeout, ccb);
884 } else {
885 /*
886 * Abort the operation that has timed out.
887 *
888 * First opportunity.
889 */
890 printf("\n");
891 xs->error = XS_TIMEOUT;
892 ccb->flags |= CCB_ABORTING;
893 #if 0
894 /*
895 * - XXX - 3.3a microcode is BROKEN!!!
896 *
897 * We cannot abort a CCB, so we can only hope the command
898 * get completed before the next 2 timeout, otherwise a
899 * Bus Reset will arrive inexorably.
900 */
901 /*
902 * ADW_ABORT_CCB() makes the board to generate an interrupt
903 *
904 * - XXX - The above assertion MUST be verified (and this
905 * code changed as well [callout_*()]), when the
906 * ADW_ABORT_CCB will be working again
907 */
908 ADW_ABORT_CCB(sc, ccb);
909 #endif
910 /*
911 * waiting for multishot callout_reset() let's restart it
912 * by hand so to give a second opportunity to the command
913 * which timed-out.
914 */
915 callout_reset(&xs->xs_callout,
916 mstohz(ccb->timeout), adw_timeout, ccb);
917 }
918
919 splx(s);
920 }
921
922
923 static void
924 adw_reset_bus(ADW_SOFTC *sc)
925 {
926 ADW_CCB *ccb;
927 int s;
928 struct scsipi_xfer *xs;
929
930 s = splbio();
931 AdwResetSCSIBus(sc);
932 while((ccb = TAILQ_LAST(&sc->sc_pending_ccb,
933 adw_pending_ccb)) != NULL) {
934 callout_stop(&ccb->xs->xs_callout);
935 TAILQ_REMOVE(&sc->sc_pending_ccb, ccb, chain);
936 xs = ccb->xs;
937 adw_free_ccb(sc, ccb);
938 xs->error = XS_RESOURCE_SHORTAGE;
939 scsipi_done(xs);
940 }
941 splx(s);
942 }
943
944
945 /******************************************************************************/
946 /* Host Adapter and Peripherals Information Routines */
947 /******************************************************************************/
948
949
950 static void
951 adw_print_info(ADW_SOFTC *sc, int tid)
952 {
953 bus_space_tag_t iot = sc->sc_iot;
954 bus_space_handle_t ioh = sc->sc_ioh;
955 u_int16_t wdtr_able, wdtr_done, wdtr;
956 u_int16_t sdtr_able, sdtr_done, sdtr, period;
957 static int wdtr_reneg = 0, sdtr_reneg = 0;
958
959 if (tid == 0){
960 wdtr_reneg = sdtr_reneg = 0;
961 }
962
963 printf("%s: target %d ", device_xname(&sc->sc_dev), tid);
964
965 ADW_READ_WORD_LRAM(iot, ioh, ADW_MC_SDTR_ABLE, wdtr_able);
966 if(wdtr_able & ADW_TID_TO_TIDMASK(tid)) {
967 ADW_READ_WORD_LRAM(iot, ioh, ADW_MC_SDTR_DONE, wdtr_done);
968 ADW_READ_WORD_LRAM(iot, ioh, ADW_MC_DEVICE_HSHK_CFG_TABLE +
969 (2 * tid), wdtr);
970 printf("using %d-bits wide, ", (wdtr & 0x8000)? 16 : 8);
971 if((wdtr_done & ADW_TID_TO_TIDMASK(tid)) == 0)
972 wdtr_reneg = 1;
973 } else {
974 printf("wide transfers disabled, ");
975 }
976
977 ADW_READ_WORD_LRAM(iot, ioh, ADW_MC_SDTR_ABLE, sdtr_able);
978 if(sdtr_able & ADW_TID_TO_TIDMASK(tid)) {
979 ADW_READ_WORD_LRAM(iot, ioh, ADW_MC_SDTR_DONE, sdtr_done);
980 ADW_READ_WORD_LRAM(iot, ioh, ADW_MC_DEVICE_HSHK_CFG_TABLE +
981 (2 * tid), sdtr);
982 sdtr &= ~0x8000;
983 if((sdtr & 0x1F) != 0) {
984 if((sdtr & 0x1F00) == 0x1100){
985 printf("80.0 MHz");
986 } else if((sdtr & 0x1F00) == 0x1000){
987 printf("40.0 MHz");
988 } else {
989 /* <= 20.0 MHz */
990 period = (((sdtr >> 8) * 25) + 50)/4;
991 if(period == 0) {
992 /* Should never happen. */
993 printf("? MHz");
994 } else {
995 printf("%d.%d MHz", 250/period,
996 ADW_TENTHS(250, period));
997 }
998 }
999 printf(" synchronous transfers\n");
1000 } else {
1001 printf("asynchronous transfers\n");
1002 }
1003 if((sdtr_done & ADW_TID_TO_TIDMASK(tid)) == 0)
1004 sdtr_reneg = 1;
1005 } else {
1006 printf("synchronous transfers disabled\n");
1007 }
1008
1009 if(wdtr_reneg || sdtr_reneg) {
1010 printf("%s: target %d %s", device_xname(&sc->sc_dev), tid,
1011 (wdtr_reneg)? ((sdtr_reneg)? "wide/sync" : "wide") :
1012 ((sdtr_reneg)? "sync" : "") );
1013 printf(" renegotiation pending before next command.\n");
1014 }
1015 }
1016
1017
1018 /******************************************************************************/
1019 /* WIDE boards Interrupt callbacks */
1020 /******************************************************************************/
1021
1022
1023 /*
1024 * adw_isr_callback() - Second Level Interrupt Handler called by AdwISR()
1025 *
1026 * Interrupt callback function for the Wide SCSI Adv Library.
1027 *
1028 * Notice:
1029 * Interrupts are disabled by the caller (AdwISR() function), and will be
1030 * enabled at the end of the caller.
1031 */
1032 static void
1033 adw_isr_callback(ADW_SOFTC *sc, ADW_SCSI_REQ_Q *scsiq)
1034 {
1035 bus_dma_tag_t dmat = sc->sc_dmat;
1036 ADW_CCB *ccb;
1037 struct scsipi_xfer *xs;
1038 struct scsi_sense_data *s1, *s2;
1039
1040
1041 ccb = adw_ccb_phys_kv(sc, scsiq->ccb_ptr);
1042
1043 callout_stop(&ccb->xs->xs_callout);
1044
1045 xs = ccb->xs;
1046
1047 /*
1048 * If we were a data transfer, unload the map that described
1049 * the data buffer.
1050 */
1051 if (xs->datalen) {
1052 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
1053 ccb->dmamap_xfer->dm_mapsize,
1054 (xs->xs_control & XS_CTL_DATA_IN) ?
1055 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1056 bus_dmamap_unload(dmat, ccb->dmamap_xfer);
1057 }
1058
1059 if ((ccb->flags & CCB_ALLOC) == 0) {
1060 aprint_error_dev(&sc->sc_dev, "exiting ccb not allocated!\n");
1061 Debugger();
1062 return;
1063 }
1064
1065 /*
1066 * 'done_status' contains the command's ending status.
1067 * 'host_status' contains the host adapter status.
1068 * 'scsi_status' contains the scsi peripheral status.
1069 */
1070 if ((scsiq->host_status == QHSTA_NO_ERROR) &&
1071 ((scsiq->done_status == QD_NO_ERROR) ||
1072 (scsiq->done_status == QD_WITH_ERROR))) {
1073 switch (scsiq->scsi_status) {
1074 case SCSI_STATUS_GOOD:
1075 if ((scsiq->cdb[0] == INQUIRY) &&
1076 (scsiq->target_lun == 0)) {
1077 adw_print_info(sc, scsiq->target_id);
1078 }
1079 xs->error = XS_NOERROR;
1080 xs->resid = le32toh(scsiq->data_cnt);
1081 sc->sc_freeze_dev[scsiq->target_id] = 0;
1082 break;
1083
1084 case SCSI_STATUS_CHECK_CONDITION:
1085 case SCSI_STATUS_CMD_TERMINATED:
1086 s1 = &ccb->scsi_sense;
1087 s2 = &xs->sense.scsi_sense;
1088 *s2 = *s1;
1089 xs->error = XS_SENSE;
1090 sc->sc_freeze_dev[scsiq->target_id] = 1;
1091 break;
1092
1093 default:
1094 xs->error = XS_BUSY;
1095 sc->sc_freeze_dev[scsiq->target_id] = 1;
1096 break;
1097 }
1098 } else if (scsiq->done_status == QD_ABORTED_BY_HOST) {
1099 xs->error = XS_DRIVER_STUFFUP;
1100 } else {
1101 switch (scsiq->host_status) {
1102 case QHSTA_M_SEL_TIMEOUT:
1103 xs->error = XS_SELTIMEOUT;
1104 break;
1105
1106 case QHSTA_M_SXFR_OFF_UFLW:
1107 case QHSTA_M_SXFR_OFF_OFLW:
1108 case QHSTA_M_DATA_OVER_RUN:
1109 aprint_error_dev(&sc->sc_dev, "Overrun/Overflow/Underflow condition\n");
1110 xs->error = XS_DRIVER_STUFFUP;
1111 break;
1112
1113 case QHSTA_M_SXFR_DESELECTED:
1114 case QHSTA_M_UNEXPECTED_BUS_FREE:
1115 aprint_error_dev(&sc->sc_dev, "Unexpected BUS free\n");
1116 xs->error = XS_DRIVER_STUFFUP;
1117 break;
1118
1119 case QHSTA_M_SCSI_BUS_RESET:
1120 case QHSTA_M_SCSI_BUS_RESET_UNSOL:
1121 aprint_error_dev(&sc->sc_dev, "BUS Reset\n");
1122 xs->error = XS_DRIVER_STUFFUP;
1123 break;
1124
1125 case QHSTA_M_BUS_DEVICE_RESET:
1126 aprint_error_dev(&sc->sc_dev, "Device Reset\n");
1127 xs->error = XS_DRIVER_STUFFUP;
1128 break;
1129
1130 case QHSTA_M_QUEUE_ABORTED:
1131 aprint_error_dev(&sc->sc_dev, "Queue Aborted\n");
1132 xs->error = XS_DRIVER_STUFFUP;
1133 break;
1134
1135 case QHSTA_M_SXFR_SDMA_ERR:
1136 case QHSTA_M_SXFR_SXFR_PERR:
1137 case QHSTA_M_RDMA_PERR:
1138 /*
1139 * DMA Error. This should *NEVER* happen!
1140 *
1141 * Lets try resetting the bus and reinitialize
1142 * the host adapter.
1143 */
1144 aprint_error_dev(&sc->sc_dev, "DMA Error. Reseting bus\n");
1145 TAILQ_REMOVE(&sc->sc_pending_ccb, ccb, chain);
1146 adw_reset_bus(sc);
1147 xs->error = XS_BUSY;
1148 goto done;
1149
1150 case QHSTA_M_WTM_TIMEOUT:
1151 case QHSTA_M_SXFR_WD_TMO:
1152 /* The SCSI bus hung in a phase */
1153 printf("%s: Watch Dog timer expired. Reseting bus\n",
1154 device_xname(&sc->sc_dev));
1155 TAILQ_REMOVE(&sc->sc_pending_ccb, ccb, chain);
1156 adw_reset_bus(sc);
1157 xs->error = XS_BUSY;
1158 goto done;
1159
1160 case QHSTA_M_SXFR_XFR_PH_ERR:
1161 aprint_error_dev(&sc->sc_dev, "Transfer Error\n");
1162 xs->error = XS_DRIVER_STUFFUP;
1163 break;
1164
1165 case QHSTA_M_BAD_CMPL_STATUS_IN:
1166 /* No command complete after a status message */
1167 printf("%s: Bad Completion Status\n",
1168 device_xname(&sc->sc_dev));
1169 xs->error = XS_DRIVER_STUFFUP;
1170 break;
1171
1172 case QHSTA_M_AUTO_REQ_SENSE_FAIL:
1173 aprint_error_dev(&sc->sc_dev, "Auto Sense Failed\n");
1174 xs->error = XS_DRIVER_STUFFUP;
1175 break;
1176
1177 case QHSTA_M_INVALID_DEVICE:
1178 aprint_error_dev(&sc->sc_dev, "Invalid Device\n");
1179 xs->error = XS_DRIVER_STUFFUP;
1180 break;
1181
1182 case QHSTA_M_NO_AUTO_REQ_SENSE:
1183 /*
1184 * User didn't request sense, but we got a
1185 * check condition.
1186 */
1187 aprint_error_dev(&sc->sc_dev, "Unexpected Check Condition\n");
1188 xs->error = XS_DRIVER_STUFFUP;
1189 break;
1190
1191 case QHSTA_M_SXFR_UNKNOWN_ERROR:
1192 aprint_error_dev(&sc->sc_dev, "Unknown Error\n");
1193 xs->error = XS_DRIVER_STUFFUP;
1194 break;
1195
1196 default:
1197 panic("%s: Unhandled Host Status Error %x",
1198 device_xname(&sc->sc_dev), scsiq->host_status);
1199 }
1200 }
1201
1202 TAILQ_REMOVE(&sc->sc_pending_ccb, ccb, chain);
1203 done: adw_free_ccb(sc, ccb);
1204 scsipi_done(xs);
1205 }
1206
1207
1208 /*
1209 * adw_async_callback() - Adv Library asynchronous event callback function.
1210 */
1211 static void
1212 adw_async_callback(ADW_SOFTC *sc, u_int8_t code)
1213 {
1214 switch (code) {
1215 case ADV_ASYNC_SCSI_BUS_RESET_DET:
1216 /* The firmware detected a SCSI Bus reset. */
1217 printf("%s: SCSI Bus reset detected\n", device_xname(&sc->sc_dev));
1218 break;
1219
1220 case ADV_ASYNC_RDMA_FAILURE:
1221 /*
1222 * Handle RDMA failure by resetting the SCSI Bus and
1223 * possibly the chip if it is unresponsive.
1224 */
1225 printf("%s: RDMA failure. Resetting the SCSI Bus and"
1226 " the adapter\n", device_xname(&sc->sc_dev));
1227 AdwResetSCSIBus(sc);
1228 break;
1229
1230 case ADV_HOST_SCSI_BUS_RESET:
1231 /* Host generated SCSI bus reset occurred. */
1232 printf("%s: Host generated SCSI bus reset occurred\n",
1233 device_xname(&sc->sc_dev));
1234 break;
1235
1236 case ADV_ASYNC_CARRIER_READY_FAILURE:
1237 /* Carrier Ready failure. */
1238 printf("%s: Carrier Ready failure!\n", device_xname(&sc->sc_dev));
1239 break;
1240
1241 default:
1242 break;
1243 }
1244 }
Cache object: e9bf2660f9903969b0ae8995c8131843
|