FreeBSD/Linux Kernel Cross Reference
sys/dev/ic/bha.c
1 /* $NetBSD: bha.c,v 1.71 2008/04/28 20:23:49 martin Exp $ */
2
3 /*-
4 * Copyright (c) 1997, 1998, 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * Originally written by Julian Elischer (julian@tfs.com)
35 * for TRW Financial Systems for use under the MACH(2.5) operating system.
36 *
37 * TRW Financial Systems, in accordance with their agreement with Carnegie
38 * Mellon University, makes this software available to CMU to distribute
39 * or use in any manner that they see fit as long as this message is kept with
40 * the software. For this reason TFS also grants any other persons or
41 * organisations permission to use or modify this software.
42 *
43 * TFS supplies this software to be publicly redistributed
44 * on the understanding that TFS is not responsible for the correct
45 * functioning of this software in any circumstances.
46 */
47
48 #include <sys/cdefs.h>
49 __KERNEL_RCSID(0, "$NetBSD: bha.c,v 1.71 2008/04/28 20:23:49 martin Exp $");
50
51 #include "opt_ddb.h"
52
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <sys/callout.h>
56 #include <sys/kernel.h>
57 #include <sys/errno.h>
58 #include <sys/ioctl.h>
59 #include <sys/device.h>
60 #include <sys/malloc.h>
61 #include <sys/buf.h>
62 #include <sys/proc.h>
63 #include <sys/user.h>
64
65 #include <uvm/uvm_extern.h>
66
67 #include <sys/bus.h>
68 #include <sys/intr.h>
69
70 #include <dev/scsipi/scsi_all.h>
71 #include <dev/scsipi/scsipi_all.h>
72 #include <dev/scsipi/scsiconf.h>
73
74 #include <dev/ic/bhareg.h>
75 #include <dev/ic/bhavar.h>
76
77 #ifndef DDB
78 #define Debugger() panic("should call debugger here (bha.c)")
79 #endif /* ! DDB */
80
81 #define BHA_MAXXFER ((BHA_NSEG - 1) << PGSHIFT)
82
83 #ifdef BHADEBUG
84 int bha_debug = 0;
85 #endif /* BHADEBUG */
86
87 static int bha_cmd(bus_space_tag_t, bus_space_handle_t, const char *, int,
88 u_char *, int, u_char *);
89
90 static void bha_scsipi_request(struct scsipi_channel *,
91 scsipi_adapter_req_t, void *);
92 static void bha_minphys(struct buf *);
93
94 static void bha_get_xfer_mode(struct bha_softc *,
95 struct scsipi_xfer_mode *);
96
97 static void bha_done(struct bha_softc *, struct bha_ccb *);
98 static int bha_poll(struct bha_softc *, struct scsipi_xfer *, int);
99 static void bha_timeout(void *arg);
100
101 static int bha_init(struct bha_softc *);
102
103 static int bha_create_mailbox(struct bha_softc *);
104 static void bha_collect_mbo(struct bha_softc *);
105
106 static void bha_queue_ccb(struct bha_softc *, struct bha_ccb *);
107 static void bha_start_ccbs(struct bha_softc *);
108 static void bha_finish_ccbs(struct bha_softc *);
109
110 static struct bha_ccb *bha_ccb_phys_kv(struct bha_softc *, bus_addr_t);
111 static void bha_create_ccbs(struct bha_softc *, int);
112 static int bha_init_ccb(struct bha_softc *, struct bha_ccb *);
113 static struct bha_ccb *bha_get_ccb(struct bha_softc *);
114 static void bha_free_ccb(struct bha_softc *, struct bha_ccb *);
115
116 #define BHA_RESET_TIMEOUT 2000 /* time to wait for reset (mSec) */
117 #define BHA_ABORT_TIMEOUT 2000 /* time to wait for abort (mSec) */
118
119 /*
120 * Number of CCBs in an allocation group; must be computed at run-time.
121 */
122 static int bha_ccbs_per_group;
123
124 static inline struct bha_mbx_out *
125 bha_nextmbo(struct bha_softc *sc, struct bha_mbx_out *mbo)
126 {
127
128 if (mbo == &sc->sc_mbo[sc->sc_mbox_count - 1])
129 return (&sc->sc_mbo[0]);
130 return (mbo + 1);
131 }
132
133 static inline struct bha_mbx_in *
134 bha_nextmbi(struct bha_softc *sc, struct bha_mbx_in *mbi)
135 {
136 if (mbi == &sc->sc_mbi[sc->sc_mbox_count - 1])
137 return (&sc->sc_mbi[0]);
138 return (mbi + 1);
139 }
140
141 /*
142 * bha_attach:
143 *
144 * Finish attaching a Buslogic controller, and configure children.
145 */
146 void
147 bha_attach(struct bha_softc *sc)
148 {
149 struct scsipi_adapter *adapt = &sc->sc_adapter;
150 struct scsipi_channel *chan = &sc->sc_channel;
151 int initial_ccbs;
152
153 /*
154 * Initialize the number of CCBs per group.
155 */
156 if (bha_ccbs_per_group == 0)
157 bha_ccbs_per_group = BHA_CCBS_PER_GROUP;
158
159 initial_ccbs = bha_info(sc);
160 if (initial_ccbs == 0) {
161 aprint_error_dev(&sc->sc_dev, "unable to get adapter info\n");
162 return;
163 }
164
165 /*
166 * Fill in the scsipi_adapter.
167 */
168 memset(adapt, 0, sizeof(*adapt));
169 adapt->adapt_dev = &sc->sc_dev;
170 adapt->adapt_nchannels = 1;
171 /* adapt_openings initialized below */
172 adapt->adapt_max_periph = sc->sc_mbox_count;
173 adapt->adapt_request = bha_scsipi_request;
174 adapt->adapt_minphys = bha_minphys;
175
176 /*
177 * Fill in the scsipi_channel.
178 */
179 memset(chan, 0, sizeof(*chan));
180 chan->chan_adapter = adapt;
181 chan->chan_bustype = &scsi_bustype;
182 chan->chan_channel = 0;
183 chan->chan_flags = SCSIPI_CHAN_CANGROW;
184 chan->chan_ntargets = (sc->sc_flags & BHAF_WIDE) ? 16 : 8;
185 chan->chan_nluns = (sc->sc_flags & BHAF_WIDE_LUN) ? 32 : 8;
186 chan->chan_id = sc->sc_scsi_id;
187
188 TAILQ_INIT(&sc->sc_free_ccb);
189 TAILQ_INIT(&sc->sc_waiting_ccb);
190 TAILQ_INIT(&sc->sc_allocating_ccbs);
191
192 if (bha_create_mailbox(sc) != 0)
193 return;
194
195 bha_create_ccbs(sc, initial_ccbs);
196 if (sc->sc_cur_ccbs < 2) {
197 aprint_error_dev(&sc->sc_dev, "not enough CCBs to run\n");
198 return;
199 }
200
201 adapt->adapt_openings = sc->sc_cur_ccbs;
202
203 if (bha_init(sc) != 0)
204 return;
205
206 (void) config_found(&sc->sc_dev, &sc->sc_channel, scsiprint);
207 }
208
209 /*
210 * bha_intr:
211 *
212 * Interrupt service routine.
213 */
214 int
215 bha_intr(void *arg)
216 {
217 struct bha_softc *sc = arg;
218 bus_space_tag_t iot = sc->sc_iot;
219 bus_space_handle_t ioh = sc->sc_ioh;
220 u_char sts;
221
222 #ifdef BHADEBUG
223 printf("%s: bha_intr ", device_xname(&sc->sc_dev));
224 #endif /* BHADEBUG */
225
226 /*
227 * First acknowledge the interrupt, Then if it's not telling about
228 * a completed operation just return.
229 */
230 sts = bus_space_read_1(iot, ioh, BHA_INTR_PORT);
231 if ((sts & BHA_INTR_ANYINTR) == 0)
232 return (0);
233 bus_space_write_1(iot, ioh, BHA_CTRL_PORT, BHA_CTRL_IRST);
234
235 #ifdef BHADIAG
236 /* Make sure we clear CCB_SENDING before finishing a CCB. */
237 bha_collect_mbo(sc);
238 #endif
239
240 /* Mail box out empty? */
241 if (sts & BHA_INTR_MBOA) {
242 struct bha_toggle toggle;
243
244 toggle.cmd.opcode = BHA_MBO_INTR_EN;
245 toggle.cmd.enable = 0;
246 bha_cmd(iot, ioh, device_xname(&sc->sc_dev),
247 sizeof(toggle.cmd), (u_char *)&toggle.cmd,
248 0, (u_char *)0);
249 bha_start_ccbs(sc);
250 }
251
252 /* Mail box in full? */
253 if (sts & BHA_INTR_MBIF)
254 bha_finish_ccbs(sc);
255
256 return (1);
257 }
258
259 /*****************************************************************************
260 * SCSI interface routines
261 *****************************************************************************/
262
263 /*
264 * bha_scsipi_request:
265 *
266 * Perform a request for the SCSIPI layer.
267 */
268 static void
269 bha_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
270 void *arg)
271 {
272 struct scsipi_adapter *adapt = chan->chan_adapter;
273 struct bha_softc *sc = (void *)adapt->adapt_dev;
274 struct scsipi_xfer *xs;
275 struct scsipi_periph *periph;
276 bus_dma_tag_t dmat = sc->sc_dmat;
277 struct bha_ccb *ccb;
278 int error, seg, flags, s;
279
280 switch (req) {
281 case ADAPTER_REQ_RUN_XFER:
282 xs = arg;
283 periph = xs->xs_periph;
284 flags = xs->xs_control;
285
286 SC_DEBUG(periph, SCSIPI_DB2, ("bha_scsipi_request\n"));
287
288 /* Get a CCB to use. */
289 ccb = bha_get_ccb(sc);
290 #ifdef DIAGNOSTIC
291 /*
292 * This should never happen as we track the resources
293 * in the mid-layer.
294 */
295 if (ccb == NULL) {
296 scsipi_printaddr(periph);
297 printf("unable to allocate ccb\n");
298 panic("bha_scsipi_request");
299 }
300 #endif
301
302 ccb->xs = xs;
303 ccb->timeout = xs->timeout;
304
305 /*
306 * Put all the arguments for the xfer in the ccb
307 */
308 if (flags & XS_CTL_RESET) {
309 ccb->opcode = BHA_RESET_CCB;
310 ccb->scsi_cmd_length = 0;
311 } else {
312 /* can't use S/G if zero length */
313 if (xs->cmdlen > sizeof(ccb->scsi_cmd)) {
314 printf("%s: cmdlen %d too large for CCB\n",
315 device_xname(&sc->sc_dev), xs->cmdlen);
316 xs->error = XS_DRIVER_STUFFUP;
317 goto out_bad;
318 }
319 ccb->opcode = (xs->datalen ? BHA_INIT_SCAT_GATH_CCB
320 : BHA_INITIATOR_CCB);
321 memcpy(&ccb->scsi_cmd, xs->cmd,
322 ccb->scsi_cmd_length = xs->cmdlen);
323 }
324
325 if (xs->datalen) {
326 /*
327 * Map the DMA transfer.
328 */
329 #ifdef TFS
330 if (flags & XS_CTL_DATA_UIO) {
331 error = bus_dmamap_load_uio(dmat,
332 ccb->dmamap_xfer, (struct uio *)xs->data,
333 ((flags & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT :
334 BUS_DMA_WAITOK) | BUS_DMA_STREAMING |
335 ((flags & XS_CTL_DATA_IN) ? BUS_DMA_READ :
336 BUS_DMA_WRITE));
337 } else
338 #endif /* TFS */
339 {
340 error = bus_dmamap_load(dmat,
341 ccb->dmamap_xfer, xs->data, xs->datalen,
342 NULL,
343 ((flags & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT :
344 BUS_DMA_WAITOK) | BUS_DMA_STREAMING |
345 ((flags & XS_CTL_DATA_IN) ? BUS_DMA_READ :
346 BUS_DMA_WRITE));
347 }
348
349 switch (error) {
350 case 0:
351 break;
352
353 case ENOMEM:
354 case EAGAIN:
355 xs->error = XS_RESOURCE_SHORTAGE;
356 goto out_bad;
357
358 default:
359 xs->error = XS_DRIVER_STUFFUP;
360 aprint_error_dev(&sc->sc_dev, "error %d loading DMA map\n", error);
361 out_bad:
362 bha_free_ccb(sc, ccb);
363 scsipi_done(xs);
364 return;
365 }
366
367 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
368 ccb->dmamap_xfer->dm_mapsize,
369 (flags & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD :
370 BUS_DMASYNC_PREWRITE);
371
372 /*
373 * Load the hardware scatter/gather map with the
374 * contents of the DMA map.
375 */
376 for (seg = 0; seg < ccb->dmamap_xfer->dm_nsegs; seg++) {
377 ltophys(ccb->dmamap_xfer->dm_segs[seg].ds_addr,
378 ccb->scat_gath[seg].seg_addr);
379 ltophys(ccb->dmamap_xfer->dm_segs[seg].ds_len,
380 ccb->scat_gath[seg].seg_len);
381 }
382
383 ltophys(ccb->hashkey + offsetof(struct bha_ccb,
384 scat_gath), ccb->data_addr);
385 ltophys(ccb->dmamap_xfer->dm_nsegs *
386 sizeof(struct bha_scat_gath), ccb->data_length);
387 } else {
388 /*
389 * No data xfer, use non S/G values.
390 */
391 ltophys(0, ccb->data_addr);
392 ltophys(0, ccb->data_length);
393 }
394
395 if (XS_CTL_TAGTYPE(xs) != 0) {
396 ccb->tag_enable = 1;
397 ccb->tag_type = xs->xs_tag_type & 0x03;
398 } else {
399 ccb->tag_enable = 0;
400 ccb->tag_type = 0;
401 }
402
403 ccb->data_out = 0;
404 ccb->data_in = 0;
405 ccb->target = periph->periph_target;
406 ccb->lun = periph->periph_lun;
407 ltophys(ccb->hashkey + offsetof(struct bha_ccb, scsi_sense),
408 ccb->sense_ptr);
409 ccb->req_sense_length = sizeof(ccb->scsi_sense);
410 ccb->host_stat = 0x00;
411 ccb->target_stat = 0x00;
412 ccb->link_id = 0;
413 ltophys(0, ccb->link_addr);
414
415 BHA_CCB_SYNC(sc, ccb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
416
417 s = splbio();
418 bha_queue_ccb(sc, ccb);
419 splx(s);
420
421 SC_DEBUG(periph, SCSIPI_DB3, ("cmd_sent\n"));
422 if ((flags & XS_CTL_POLL) == 0)
423 return;
424
425 /*
426 * If we can't use interrupts, poll on completion
427 */
428 if (bha_poll(sc, xs, ccb->timeout)) {
429 bha_timeout(ccb);
430 if (bha_poll(sc, xs, ccb->timeout))
431 bha_timeout(ccb);
432 }
433 return;
434
435 case ADAPTER_REQ_GROW_RESOURCES:
436 if (sc->sc_cur_ccbs == sc->sc_max_ccbs) {
437 chan->chan_flags &= ~SCSIPI_CHAN_CANGROW;
438 return;
439 }
440 seg = sc->sc_cur_ccbs;
441 bha_create_ccbs(sc, bha_ccbs_per_group);
442 adapt->adapt_openings += sc->sc_cur_ccbs - seg;
443 return;
444
445 case ADAPTER_REQ_SET_XFER_MODE:
446 /*
447 * Can't really do this on the Buslogic. It has its
448 * own setup info. But we do know how to query what
449 * the settings are.
450 */
451 bha_get_xfer_mode(sc, (struct scsipi_xfer_mode *)arg);
452 return;
453 }
454 }
455
456 /*
457 * bha_minphys:
458 *
459 * Limit a transfer to our maximum transfer size.
460 */
461 void
462 bha_minphys(struct buf *bp)
463 {
464
465 if (bp->b_bcount > BHA_MAXXFER)
466 bp->b_bcount = BHA_MAXXFER;
467 minphys(bp);
468 }
469
470 /*****************************************************************************
471 * SCSI job execution helper routines
472 *****************************************************************************/
473
474 /*
475 * bha_get_xfer_mode;
476 *
477 * Negotiate the xfer mode for the specified periph, and report
478 * back the mode to the midlayer.
479 *
480 * NOTE: we must be called at splbio().
481 */
482 static void
483 bha_get_xfer_mode(struct bha_softc *sc, struct scsipi_xfer_mode *xm)
484 {
485 struct bha_setup hwsetup;
486 struct bha_period hwperiod;
487 struct bha_sync *bs;
488 int toff = xm->xm_target & 7, tmask = (1 << toff);
489 int wide, period, offset, rlen;
490
491 /*
492 * Issue an Inquire Setup Information. We can extract
493 * sync and wide information from here.
494 */
495 rlen = sizeof(hwsetup.reply) +
496 ((sc->sc_flags & BHAF_WIDE) ? sizeof(hwsetup.reply_w) : 0);
497 hwsetup.cmd.opcode = BHA_INQUIRE_SETUP;
498 hwsetup.cmd.len = rlen;
499 bha_cmd(sc->sc_iot, sc->sc_ioh, device_xname(&sc->sc_dev),
500 sizeof(hwsetup.cmd), (u_char *)&hwsetup.cmd,
501 rlen, (u_char *)&hwsetup.reply);
502
503 xm->xm_mode = 0;
504 xm->xm_period = 0;
505 xm->xm_offset = 0;
506
507 /*
508 * First check for wide. On later boards, we can check
509 * directly in the setup info if wide is currently active.
510 *
511 * On earlier boards, we have to make an educated guess.
512 */
513 if (sc->sc_flags & BHAF_WIDE) {
514 if (strcmp(sc->sc_firmware, "5.06L") >= 0) {
515 if (xm->xm_target > 7) {
516 wide =
517 hwsetup.reply_w.high_wide_active & tmask;
518 } else {
519 wide =
520 hwsetup.reply_w.low_wide_active & tmask;
521 }
522 if (wide)
523 xm->xm_mode |= PERIPH_CAP_WIDE16;
524 } else {
525 /* XXX Check `wide permitted' in the config info. */
526 xm->xm_mode |= PERIPH_CAP_WIDE16;
527 }
528 }
529
530 /*
531 * Now get basic sync info.
532 */
533 bs = (xm->xm_target > 7) ?
534 &hwsetup.reply_w.sync_high[toff] :
535 &hwsetup.reply.sync_low[toff];
536
537 if (bs->valid) {
538 xm->xm_mode |= PERIPH_CAP_SYNC;
539 period = (bs->period * 50) + 20;
540 offset = bs->offset;
541
542 /*
543 * On boards that can do Fast and Ultra, use the Inquire Period
544 * command to get the period.
545 */
546 if (sc->sc_firmware[0] >= '3') {
547 rlen = sizeof(hwperiod.reply) +
548 ((sc->sc_flags & BHAF_WIDE) ?
549 sizeof(hwperiod.reply_w) : 0);
550 hwperiod.cmd.opcode = BHA_INQUIRE_PERIOD;
551 hwperiod.cmd.len = rlen;
552 bha_cmd(sc->sc_iot, sc->sc_ioh, device_xname(&sc->sc_dev),
553 sizeof(hwperiod.cmd), (u_char *)&hwperiod.cmd,
554 rlen, (u_char *)&hwperiod.reply);
555
556 if (xm->xm_target > 7)
557 period = hwperiod.reply_w.period[toff];
558 else
559 period = hwperiod.reply.period[toff];
560
561 period *= 10;
562 }
563
564 xm->xm_period =
565 scsipi_sync_period_to_factor(period * 100);
566 xm->xm_offset = offset;
567 }
568
569 /*
570 * Now check for tagged queueing support.
571 *
572 * XXX Check `tags permitted' in the config info.
573 */
574 if (sc->sc_flags & BHAF_TAGGED_QUEUEING)
575 xm->xm_mode |= PERIPH_CAP_TQING;
576
577 scsipi_async_event(&sc->sc_channel, ASYNC_EVENT_XFER_MODE, xm);
578 }
579
580 /*
581 * bha_done:
582 *
583 * A CCB has completed execution. Pass the status back to the
584 * upper layer.
585 */
586 static void
587 bha_done(struct bha_softc *sc, struct bha_ccb *ccb)
588 {
589 bus_dma_tag_t dmat = sc->sc_dmat;
590 struct scsipi_xfer *xs = ccb->xs;
591
592 SC_DEBUG(xs->xs_periph, SCSIPI_DB2, ("bha_done\n"));
593
594 #ifdef BHADIAG
595 if (ccb->flags & CCB_SENDING) {
596 printf("%s: exiting ccb still in transit!\n",
597 device_xname(&sc->sc_dev));
598 Debugger();
599 return;
600 }
601 #endif
602 if ((ccb->flags & CCB_ALLOC) == 0) {
603 aprint_error_dev(&sc->sc_dev, "exiting ccb not allocated!\n");
604 Debugger();
605 return;
606 }
607
608 /*
609 * If we were a data transfer, unload the map that described
610 * the data buffer.
611 */
612 if (xs->datalen) {
613 bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
614 ccb->dmamap_xfer->dm_mapsize,
615 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_POSTREAD :
616 BUS_DMASYNC_POSTWRITE);
617 bus_dmamap_unload(dmat, ccb->dmamap_xfer);
618 }
619
620 if (xs->error == XS_NOERROR) {
621 if (ccb->host_stat != BHA_OK) {
622 switch (ccb->host_stat) {
623 case BHA_SEL_TIMEOUT: /* No response */
624 xs->error = XS_SELTIMEOUT;
625 break;
626 default: /* Other scsi protocol messes */
627 printf("%s: host_stat %x\n",
628 device_xname(&sc->sc_dev), ccb->host_stat);
629 xs->error = XS_DRIVER_STUFFUP;
630 break;
631 }
632 } else if (ccb->target_stat != SCSI_OK) {
633 switch (ccb->target_stat) {
634 case SCSI_CHECK:
635 memcpy(&xs->sense.scsi_sense,
636 &ccb->scsi_sense,
637 sizeof(xs->sense.scsi_sense));
638 xs->error = XS_SENSE;
639 break;
640 case SCSI_BUSY:
641 xs->error = XS_BUSY;
642 break;
643 default:
644 printf("%s: target_stat %x\n",
645 device_xname(&sc->sc_dev), ccb->target_stat);
646 xs->error = XS_DRIVER_STUFFUP;
647 break;
648 }
649 } else
650 xs->resid = 0;
651 }
652
653 bha_free_ccb(sc, ccb);
654 scsipi_done(xs);
655 }
656
657 /*
658 * bha_poll:
659 *
660 * Poll for completion of the specified job.
661 */
662 static int
663 bha_poll(struct bha_softc *sc, struct scsipi_xfer *xs, int count)
664 {
665 bus_space_tag_t iot = sc->sc_iot;
666 bus_space_handle_t ioh = sc->sc_ioh;
667
668 /* timeouts are in msec, so we loop in 1000 usec cycles */
669 while (count) {
670 /*
671 * If we had interrupts enabled, would we
672 * have got an interrupt?
673 */
674 if (bus_space_read_1(iot, ioh, BHA_INTR_PORT) &
675 BHA_INTR_ANYINTR)
676 bha_intr(sc);
677 if (xs->xs_status & XS_STS_DONE)
678 return (0);
679 delay(1000); /* only happens in boot so ok */
680 count--;
681 }
682 return (1);
683 }
684
685 /*
686 * bha_timeout:
687 *
688 * CCB timeout handler.
689 */
690 static void
691 bha_timeout(void *arg)
692 {
693 struct bha_ccb *ccb = arg;
694 struct scsipi_xfer *xs = ccb->xs;
695 struct scsipi_periph *periph = xs->xs_periph;
696 struct bha_softc *sc =
697 (void *)periph->periph_channel->chan_adapter->adapt_dev;
698 int s;
699
700 scsipi_printaddr(periph);
701 printf("timed out");
702
703 s = splbio();
704
705 #ifdef BHADIAG
706 /*
707 * If the ccb's mbx is not free, then the board has gone Far East?
708 */
709 bha_collect_mbo(sc);
710 if (ccb->flags & CCB_SENDING) {
711 aprint_error_dev(&sc->sc_dev, "not taking commands!\n");
712 Debugger();
713 }
714 #endif
715
716 /*
717 * If it has been through before, then
718 * a previous abort has failed, don't
719 * try abort again
720 */
721 if (ccb->flags & CCB_ABORT) {
722 /* abort timed out */
723 printf(" AGAIN\n");
724 /* XXX Must reset! */
725 } else {
726 /* abort the operation that has timed out */
727 printf("\n");
728 ccb->xs->error = XS_TIMEOUT;
729 ccb->timeout = BHA_ABORT_TIMEOUT;
730 ccb->flags |= CCB_ABORT;
731 bha_queue_ccb(sc, ccb);
732 }
733
734 splx(s);
735 }
736
737 /*****************************************************************************
738 * Misc. subroutines.
739 *****************************************************************************/
740
741 /*
742 * bha_cmd:
743 *
744 * Send a command to the Buglogic controller.
745 */
746 static int
747 bha_cmd(bus_space_tag_t iot, bus_space_handle_t ioh, const char *name, int icnt,
748 u_char *ibuf, int ocnt, u_char *obuf)
749 {
750 int i;
751 int wait;
752 u_char sts;
753 u_char opcode = ibuf[0];
754
755 /*
756 * Calculate a reasonable timeout for the command.
757 */
758 switch (opcode) {
759 case BHA_INQUIRE_DEVICES:
760 case BHA_INQUIRE_DEVICES_2:
761 wait = 90 * 20000;
762 break;
763 default:
764 wait = 1 * 20000;
765 break;
766 }
767
768 /*
769 * Wait for the adapter to go idle, unless it's one of
770 * the commands which don't need this
771 */
772 if (opcode != BHA_MBO_INTR_EN) {
773 for (i = 20000; i; i--) { /* 1 sec? */
774 sts = bus_space_read_1(iot, ioh, BHA_STAT_PORT);
775 if (sts & BHA_STAT_IDLE)
776 break;
777 delay(50);
778 }
779 if (!i) {
780 printf("%s: bha_cmd, host not idle(0x%x)\n",
781 name, sts);
782 return (1);
783 }
784 }
785
786 /*
787 * Now that it is idle, if we expect output, preflush the
788 * queue feeding to us.
789 */
790 if (ocnt) {
791 while ((bus_space_read_1(iot, ioh, BHA_STAT_PORT)) &
792 BHA_STAT_DF)
793 (void)bus_space_read_1(iot, ioh, BHA_DATA_PORT);
794 }
795
796 /*
797 * Output the command and the number of arguments given
798 * for each byte, first check the port is empty.
799 */
800 while (icnt--) {
801 for (i = wait; i; i--) {
802 sts = bus_space_read_1(iot, ioh, BHA_STAT_PORT);
803 if (!(sts & BHA_STAT_CDF))
804 break;
805 delay(50);
806 }
807 if (!i) {
808 if (opcode != BHA_INQUIRE_REVISION)
809 printf("%s: bha_cmd, cmd/data port full\n",
810 name);
811 goto bad;
812 }
813 bus_space_write_1(iot, ioh, BHA_CMD_PORT, *ibuf++);
814 }
815
816 /*
817 * If we expect input, loop that many times, each time,
818 * looking for the data register to have valid data
819 */
820 while (ocnt--) {
821 for (i = wait; i; i--) {
822 sts = bus_space_read_1(iot, ioh, BHA_STAT_PORT);
823 if (sts & BHA_STAT_DF)
824 break;
825 delay(50);
826 }
827 if (!i) {
828 #ifdef BHADEBUG
829 if (opcode != BHA_INQUIRE_REVISION)
830 printf("%s: bha_cmd, cmd/data port empty %d\n",
831 name, ocnt);
832 #endif /* BHADEBUG */
833 goto bad;
834 }
835 *obuf++ = bus_space_read_1(iot, ioh, BHA_DATA_PORT);
836 }
837
838 /*
839 * Wait for the board to report a finished instruction.
840 * We may get an extra interrupt for the HACC signal, but this is
841 * unimportant.
842 */
843 if (opcode != BHA_MBO_INTR_EN && opcode != BHA_MODIFY_IOPORT) {
844 for (i = 20000; i; i--) { /* 1 sec? */
845 sts = bus_space_read_1(iot, ioh, BHA_INTR_PORT);
846 /* XXX Need to save this in the interrupt handler? */
847 if (sts & BHA_INTR_HACC)
848 break;
849 delay(50);
850 }
851 if (!i) {
852 printf("%s: bha_cmd, host not finished(0x%x)\n",
853 name, sts);
854 return (1);
855 }
856 }
857 bus_space_write_1(iot, ioh, BHA_CTRL_PORT, BHA_CTRL_IRST);
858 return (0);
859
860 bad:
861 bus_space_write_1(iot, ioh, BHA_CTRL_PORT, BHA_CTRL_SRST);
862 return (1);
863 }
864
865 /*
866 * bha_find:
867 *
868 * Find the board.
869 */
870 int
871 bha_find(bus_space_tag_t iot, bus_space_handle_t ioh)
872 {
873 int i;
874 u_char sts;
875 struct bha_extended_inquire inquire;
876
877 /* Check something is at the ports we need to access */
878 sts = bus_space_read_1(iot, ioh, BHA_STAT_PORT);
879 if (sts == 0xFF)
880 return (0);
881
882 /*
883 * Reset board, If it doesn't respond, assume
884 * that it's not there.. good for the probe
885 */
886
887 bus_space_write_1(iot, ioh, BHA_CTRL_PORT,
888 BHA_CTRL_HRST | BHA_CTRL_SRST);
889
890 delay(100);
891 for (i = BHA_RESET_TIMEOUT; i; i--) {
892 sts = bus_space_read_1(iot, ioh, BHA_STAT_PORT);
893 if (sts == (BHA_STAT_IDLE | BHA_STAT_INIT))
894 break;
895 delay(1000);
896 }
897 if (!i) {
898 #ifdef BHADEBUG
899 if (bha_debug)
900 printf("bha_find: No answer from buslogic board\n");
901 #endif /* BHADEBUG */
902 return (0);
903 }
904
905 /*
906 * The BusLogic cards implement an Adaptec 1542 (aha)-compatible
907 * interface. The native bha interface is not compatible with
908 * an aha. 1542. We need to ensure that we never match an
909 * Adaptec 1542. We must also avoid sending Adaptec-compatible
910 * commands to a real bha, lest it go into 1542 emulation mode.
911 * (On an indirect bus like ISA, we should always probe for BusLogic
912 * interfaces before Adaptec interfaces).
913 */
914
915 /*
916 * Make sure we don't match an AHA-1542A or AHA-1542B, by checking
917 * for an extended-geometry register. The 1542[AB] don't have one.
918 */
919 sts = bus_space_read_1(iot, ioh, BHA_EXTGEOM_PORT);
920 if (sts == 0xFF)
921 return (0);
922
923 /*
924 * Check that we actually know how to use this board.
925 */
926 delay(1000);
927 inquire.cmd.opcode = BHA_INQUIRE_EXTENDED;
928 inquire.cmd.len = sizeof(inquire.reply);
929 i = bha_cmd(iot, ioh, "(bha_find)",
930 sizeof(inquire.cmd), (u_char *)&inquire.cmd,
931 sizeof(inquire.reply), (u_char *)&inquire.reply);
932
933 /*
934 * Some 1542Cs (CP, perhaps not CF, may depend on firmware rev)
935 * have the extended-geometry register and also respond to
936 * BHA_INQUIRE_EXTENDED. Make sure we never match such cards,
937 * by checking the size of the reply is what a BusLogic card returns.
938 */
939 if (i) {
940 #ifdef BHADEBUG
941 printf("bha_find: board returned %d instead of %d to %s\n",
942 i, sizeof(inquire.reply), "INQUIRE_EXTENDED");
943 #endif
944 return (0);
945 }
946
947 /* OK, we know we've found a buslogic adaptor. */
948
949 switch (inquire.reply.bus_type) {
950 case BHA_BUS_TYPE_24BIT:
951 case BHA_BUS_TYPE_32BIT:
952 break;
953 case BHA_BUS_TYPE_MCA:
954 /* We don't grok MicroChannel (yet). */
955 return (0);
956 default:
957 printf("bha_find: illegal bus type %c\n",
958 inquire.reply.bus_type);
959 return (0);
960 }
961
962 return (1);
963 }
964
965
966 /*
967 * bha_inquire_config:
968 *
969 * Determine irq/drq.
970 */
971 int
972 bha_inquire_config(bus_space_tag_t iot, bus_space_handle_t ioh,
973 struct bha_probe_data *sc)
974 {
975 int irq, drq;
976 struct bha_config config;
977
978 /*
979 * Assume we have a board at this stage setup DMA channel from
980 * jumpers and save int level
981 */
982 delay(1000);
983 config.cmd.opcode = BHA_INQUIRE_CONFIG;
984 bha_cmd(iot, ioh, "(bha_inquire_config)",
985 sizeof(config.cmd), (u_char *)&config.cmd,
986 sizeof(config.reply), (u_char *)&config.reply);
987 switch (config.reply.chan) {
988 case EISADMA:
989 drq = -1;
990 break;
991 case CHAN0:
992 drq = 0;
993 break;
994 case CHAN5:
995 drq = 5;
996 break;
997 case CHAN6:
998 drq = 6;
999 break;
1000 case CHAN7:
1001 drq = 7;
1002 break;
1003 default:
1004 printf("bha: illegal drq setting %x\n",
1005 config.reply.chan);
1006 return (0);
1007 }
1008
1009 switch (config.reply.intr) {
1010 case INT9:
1011 irq = 9;
1012 break;
1013 case INT10:
1014 irq = 10;
1015 break;
1016 case INT11:
1017 irq = 11;
1018 break;
1019 case INT12:
1020 irq = 12;
1021 break;
1022 case INT14:
1023 irq = 14;
1024 break;
1025 case INT15:
1026 irq = 15;
1027 break;
1028 default:
1029 printf("bha: illegal irq setting %x\n",
1030 config.reply.intr);
1031 return (0);
1032 }
1033
1034 /* if we want to fill in softc, do so now */
1035 if (sc != NULL) {
1036 sc->sc_irq = irq;
1037 sc->sc_drq = drq;
1038 }
1039
1040 return (1);
1041 }
1042
1043 int
1044 bha_probe_inquiry(bus_space_tag_t iot, bus_space_handle_t ioh,
1045 struct bha_probe_data *bpd)
1046 {
1047 return bha_find(iot, ioh) && bha_inquire_config(iot, ioh, bpd);
1048 }
1049
1050 /*
1051 * bha_disable_isacompat:
1052 *
1053 * Disable the ISA-compatibility ioports on PCI bha devices,
1054 * to ensure they're not autoconfigured a second time as an ISA bha.
1055 */
1056 int
1057 bha_disable_isacompat(struct bha_softc *sc)
1058 {
1059 struct bha_isadisable isa_disable;
1060
1061 isa_disable.cmd.opcode = BHA_MODIFY_IOPORT;
1062 isa_disable.cmd.modifier = BHA_IOMODIFY_DISABLE1;
1063 bha_cmd(sc->sc_iot, sc->sc_ioh, device_xname(&sc->sc_dev),
1064 sizeof(isa_disable.cmd), (u_char*)&isa_disable.cmd,
1065 0, (u_char *)0);
1066 return (0);
1067 }
1068
1069 /*
1070 * bha_info:
1071 *
1072 * Get information about the board, and report it. We
1073 * return the initial number of CCBs, 0 if we failed.
1074 */
1075 int
1076 bha_info(struct bha_softc *sc)
1077 {
1078 bus_space_tag_t iot = sc->sc_iot;
1079 bus_space_handle_t ioh = sc->sc_ioh;
1080 struct bha_extended_inquire inquire;
1081 struct bha_config config;
1082 struct bha_devices devices;
1083 struct bha_setup setup;
1084 struct bha_model model;
1085 struct bha_revision revision;
1086 struct bha_digit digit;
1087 int i, j, initial_ccbs, rlen;
1088 const char *name = device_xname(&sc->sc_dev);
1089 char *p;
1090
1091 /*
1092 * Fetch the extended inquire information.
1093 */
1094 inquire.cmd.opcode = BHA_INQUIRE_EXTENDED;
1095 inquire.cmd.len = sizeof(inquire.reply);
1096 bha_cmd(iot, ioh, name,
1097 sizeof(inquire.cmd), (u_char *)&inquire.cmd,
1098 sizeof(inquire.reply), (u_char *)&inquire.reply);
1099
1100 /*
1101 * Fetch the configuration information.
1102 */
1103 config.cmd.opcode = BHA_INQUIRE_CONFIG;
1104 bha_cmd(iot, ioh, name,
1105 sizeof(config.cmd), (u_char *)&config.cmd,
1106 sizeof(config.reply), (u_char *)&config.reply);
1107
1108 sc->sc_scsi_id = config.reply.scsi_dev;
1109
1110 /*
1111 * Get the firmware revision.
1112 */
1113 p = sc->sc_firmware;
1114 revision.cmd.opcode = BHA_INQUIRE_REVISION;
1115 bha_cmd(iot, ioh, name,
1116 sizeof(revision.cmd), (u_char *)&revision.cmd,
1117 sizeof(revision.reply), (u_char *)&revision.reply);
1118 *p++ = revision.reply.firm_revision;
1119 *p++ = '.';
1120 *p++ = revision.reply.firm_version;
1121 digit.cmd.opcode = BHA_INQUIRE_REVISION_3;
1122 bha_cmd(iot, ioh, name,
1123 sizeof(digit.cmd), (u_char *)&digit.cmd,
1124 sizeof(digit.reply), (u_char *)&digit.reply);
1125 *p++ = digit.reply.digit;
1126 if (revision.reply.firm_revision >= '3' ||
1127 (revision.reply.firm_revision == '3' &&
1128 revision.reply.firm_version >= '3')) {
1129 digit.cmd.opcode = BHA_INQUIRE_REVISION_4;
1130 bha_cmd(iot, ioh, name,
1131 sizeof(digit.cmd), (u_char *)&digit.cmd,
1132 sizeof(digit.reply), (u_char *)&digit.reply);
1133 *p++ = digit.reply.digit;
1134 }
1135 while (p > sc->sc_firmware && (p[-1] == ' ' || p[-1] == '\0'))
1136 p--;
1137 *p = '\0';
1138
1139 /*
1140 * Get the model number.
1141 *
1142 * Some boards do not handle the Inquire Board Model Number
1143 * command correctly, or don't give correct information.
1144 *
1145 * So, we use the Firmware Revision and Extended Setup
1146 * information to fixup the model number in these cases.
1147 *
1148 * The firmware version indicates:
1149 *
1150 * 5.xx BusLogic "W" Series Host Adapters
1151 * BT-948/958/958D
1152 *
1153 * 4.xx BusLogic "C" Series Host Adapters
1154 * BT-946C/956C/956CD/747C/757C/757CD/445C/545C/540CF
1155 *
1156 * 3.xx BusLogic "S" Series Host Adapters
1157 * BT-747S/747D/757S/757D/445S/545S/542D
1158 * BT-542B/742A (revision H)
1159 *
1160 * 2.xx BusLogic "A" Series Host Adapters
1161 * BT-542B/742A (revision G and below)
1162 *
1163 * 0.xx AMI FastDisk VLB/EISA BusLogic Clone Host Adapter
1164 */
1165 if (inquire.reply.bus_type == BHA_BUS_TYPE_24BIT &&
1166 sc->sc_firmware[0] < '3')
1167 snprintf(sc->sc_model, sizeof(sc->sc_model), "542B");
1168 else if (inquire.reply.bus_type == BHA_BUS_TYPE_32BIT &&
1169 sc->sc_firmware[0] == '2' &&
1170 (sc->sc_firmware[2] == '1' ||
1171 (sc->sc_firmware[2] == '2' && sc->sc_firmware[3] == '')))
1172 snprintf(sc->sc_model, sizeof(sc->sc_model), "742A");
1173 else if (inquire.reply.bus_type == BHA_BUS_TYPE_32BIT &&
1174 sc->sc_firmware[0] == '')
1175 snprintf(sc->sc_model, sizeof(sc->sc_model), "747A");
1176 else {
1177 p = sc->sc_model;
1178 model.cmd.opcode = BHA_INQUIRE_MODEL;
1179 model.cmd.len = sizeof(model.reply);
1180 bha_cmd(iot, ioh, name,
1181 sizeof(model.cmd), (u_char *)&model.cmd,
1182 sizeof(model.reply), (u_char *)&model.reply);
1183 *p++ = model.reply.id[0];
1184 *p++ = model.reply.id[1];
1185 *p++ = model.reply.id[2];
1186 *p++ = model.reply.id[3];
1187 while (p > sc->sc_model && (p[-1] == ' ' || p[-1] == '\0'))
1188 p--;
1189 *p++ = model.reply.version[0];
1190 *p++ = model.reply.version[1];
1191 while (p > sc->sc_model && (p[-1] == ' ' || p[-1] == '\0'))
1192 p--;
1193 *p = '\0';
1194 }
1195
1196 /* Enable round-robin scheme - appeared at firmware rev. 3.31. */
1197 if (strcmp(sc->sc_firmware, "3.31") >= 0)
1198 sc->sc_flags |= BHAF_STRICT_ROUND_ROBIN;
1199
1200 /*
1201 * Determine some characteristics about our bus.
1202 */
1203 if (inquire.reply.scsi_flags & BHA_SCSI_WIDE)
1204 sc->sc_flags |= BHAF_WIDE;
1205 if (inquire.reply.scsi_flags & BHA_SCSI_DIFFERENTIAL)
1206 sc->sc_flags |= BHAF_DIFFERENTIAL;
1207 if (inquire.reply.scsi_flags & BHA_SCSI_ULTRA)
1208 sc->sc_flags |= BHAF_ULTRA;
1209
1210 /*
1211 * Determine some characterists of the board.
1212 */
1213 sc->sc_max_dmaseg = inquire.reply.sg_limit;
1214
1215 /*
1216 * Determine the maximum CCB count and whether or not
1217 * tagged queueing is available on this host adapter.
1218 *
1219 * Tagged queueing works on:
1220 *
1221 * "W" Series adapters
1222 * "C" Series adapters with firmware >= 4.22
1223 * "S" Series adapters with firmware >= 3.35
1224 *
1225 * The internal CCB counts are:
1226 *
1227 * 192 BT-948/958/958D
1228 * 100 BT-946C/956C/956CD/747C/757C/757CD/445C
1229 * 50 BT-545C/540CF
1230 * 30 BT-747S/747D/757S/757D/445S/545S/542D/542B/742A
1231 */
1232 switch (sc->sc_firmware[0]) {
1233 case '5':
1234 sc->sc_max_ccbs = 192;
1235 sc->sc_flags |= BHAF_TAGGED_QUEUEING;
1236 break;
1237
1238 case '4':
1239 if (sc->sc_model[0] == '5')
1240 sc->sc_max_ccbs = 50;
1241 else
1242 sc->sc_max_ccbs = 100;
1243 if (strcmp(sc->sc_firmware, "4.22") >= 0)
1244 sc->sc_flags |= BHAF_TAGGED_QUEUEING;
1245 break;
1246
1247 case '3':
1248 if (strcmp(sc->sc_firmware, "3.35") >= 0)
1249 sc->sc_flags |= BHAF_TAGGED_QUEUEING;
1250 /* FALLTHROUGH */
1251
1252 default:
1253 sc->sc_max_ccbs = 30;
1254 }
1255
1256 /*
1257 * Set the mailbox count to precisely the number of HW CCBs
1258 * available. A mailbox isn't required while a CCB is executing,
1259 * but this allows us to actually enqueue up to our resource
1260 * limit.
1261 *
1262 * This will keep the mailbox count small on boards which don't
1263 * have strict round-robin (they have to scan the entire set of
1264 * mailboxes each time they run a command).
1265 */
1266 sc->sc_mbox_count = sc->sc_max_ccbs;
1267
1268 /*
1269 * Obtain setup information.
1270 */
1271 rlen = sizeof(setup.reply) +
1272 ((sc->sc_flags & BHAF_WIDE) ? sizeof(setup.reply_w) : 0);
1273 setup.cmd.opcode = BHA_INQUIRE_SETUP;
1274 setup.cmd.len = rlen;
1275 bha_cmd(iot, ioh, name,
1276 sizeof(setup.cmd), (u_char *)&setup.cmd,
1277 rlen, (u_char *)&setup.reply);
1278
1279 aprint_normal_dev(&sc->sc_dev, "model BT-%s, firmware %s\n",
1280 sc->sc_model, sc->sc_firmware);
1281
1282 aprint_normal_dev(&sc->sc_dev, "%d H/W CCBs", sc->sc_max_ccbs);
1283 if (setup.reply.sync_neg)
1284 aprint_normal(", sync");
1285 if (setup.reply.parity)
1286 aprint_normal(", parity");
1287 if (sc->sc_flags & BHAF_TAGGED_QUEUEING)
1288 aprint_normal(", tagged queueing");
1289 if (sc->sc_flags & BHAF_WIDE_LUN)
1290 aprint_normal(", wide LUN support");
1291 aprint_normal("\n");
1292
1293 /*
1294 * Poll targets 0 - 7.
1295 */
1296 devices.cmd.opcode = BHA_INQUIRE_DEVICES;
1297 bha_cmd(iot, ioh, name,
1298 sizeof(devices.cmd), (u_char *)&devices.cmd,
1299 sizeof(devices.reply), (u_char *)&devices.reply);
1300
1301 /* Count installed units. */
1302 initial_ccbs = 0;
1303 for (i = 0; i < 8; i++) {
1304 for (j = 0; j < 8; j++) {
1305 if (((devices.reply.lun_map[i] >> j) & 1) == 1)
1306 initial_ccbs++;
1307 }
1308 }
1309
1310 /*
1311 * Poll targets 8 - 15 if we have a wide bus.
1312 */
1313 if (sc->sc_flags & BHAF_WIDE) {
1314 devices.cmd.opcode = BHA_INQUIRE_DEVICES_2;
1315 bha_cmd(iot, ioh, name,
1316 sizeof(devices.cmd), (u_char *)&devices.cmd,
1317 sizeof(devices.reply), (u_char *)&devices.reply);
1318
1319 for (i = 0; i < 8; i++) {
1320 for (j = 0; j < 8; j++) {
1321 if (((devices.reply.lun_map[i] >> j) & 1) == 1)
1322 initial_ccbs++;
1323 }
1324 }
1325 }
1326
1327 /*
1328 * Double the initial CCB count, for good measure.
1329 */
1330 initial_ccbs *= 2;
1331
1332 /*
1333 * Sanity check the initial CCB count; don't create more than
1334 * we can enqueue (sc_max_ccbs), and make sure there are some
1335 * at all.
1336 */
1337 if (initial_ccbs > sc->sc_max_ccbs)
1338 initial_ccbs = sc->sc_max_ccbs;
1339 if (initial_ccbs == 0)
1340 initial_ccbs = 2;
1341
1342 return (initial_ccbs);
1343 }
1344
1345 /*
1346 * bha_init:
1347 *
1348 * Initialize the board.
1349 */
1350 static int
1351 bha_init(struct bha_softc *sc)
1352 {
1353 const char *name = device_xname(&sc->sc_dev);
1354 struct bha_toggle toggle;
1355 struct bha_mailbox mailbox;
1356 struct bha_mbx_out *mbo;
1357 struct bha_mbx_in *mbi;
1358 int i;
1359
1360 /*
1361 * Set up the mailbox. We always run the mailbox in round-robin.
1362 */
1363 for (i = 0; i < sc->sc_mbox_count; i++) {
1364 mbo = &sc->sc_mbo[i];
1365 mbi = &sc->sc_mbi[i];
1366
1367 mbo->cmd = BHA_MBO_FREE;
1368 BHA_MBO_SYNC(sc, mbo, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1369
1370 mbi->comp_stat = BHA_MBI_FREE;
1371 BHA_MBI_SYNC(sc, mbi, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1372 }
1373
1374 sc->sc_cmbo = sc->sc_tmbo = &sc->sc_mbo[0];
1375 sc->sc_tmbi = &sc->sc_mbi[0];
1376
1377 sc->sc_mbofull = 0;
1378
1379 /*
1380 * If the board supports strict round-robin, enable that.
1381 */
1382 if (sc->sc_flags & BHAF_STRICT_ROUND_ROBIN) {
1383 toggle.cmd.opcode = BHA_ROUND_ROBIN;
1384 toggle.cmd.enable = 1;
1385 bha_cmd(sc->sc_iot, sc->sc_ioh, name,
1386 sizeof(toggle.cmd), (u_char *)&toggle.cmd,
1387 0, NULL);
1388 }
1389
1390 /*
1391 * Give the mailbox to the board.
1392 */
1393 mailbox.cmd.opcode = BHA_MBX_INIT_EXTENDED;
1394 mailbox.cmd.nmbx = sc->sc_mbox_count;
1395 ltophys(sc->sc_dmamap_mbox->dm_segs[0].ds_addr, mailbox.cmd.addr);
1396 bha_cmd(sc->sc_iot, sc->sc_ioh, name,
1397 sizeof(mailbox.cmd), (u_char *)&mailbox.cmd,
1398 0, (u_char *)0);
1399
1400 return (0);
1401 }
1402
1403 /*****************************************************************************
1404 * CCB execution engine
1405 *****************************************************************************/
1406
1407 /*
1408 * bha_queue_ccb:
1409 *
1410 * Queue a CCB to be sent to the controller, and send it if possible.
1411 */
1412 static void
1413 bha_queue_ccb(struct bha_softc *sc, struct bha_ccb *ccb)
1414 {
1415
1416 TAILQ_INSERT_TAIL(&sc->sc_waiting_ccb, ccb, chain);
1417 bha_start_ccbs(sc);
1418 }
1419
1420 /*
1421 * bha_start_ccbs:
1422 *
1423 * Send as many CCBs as we have empty mailboxes for.
1424 */
1425 static void
1426 bha_start_ccbs(struct bha_softc *sc)
1427 {
1428 bus_space_tag_t iot = sc->sc_iot;
1429 bus_space_handle_t ioh = sc->sc_ioh;
1430 struct bha_ccb_group *bcg;
1431 struct bha_mbx_out *mbo;
1432 struct bha_ccb *ccb;
1433
1434 mbo = sc->sc_tmbo;
1435
1436 while ((ccb = TAILQ_FIRST(&sc->sc_waiting_ccb)) != NULL) {
1437 if (sc->sc_mbofull >= sc->sc_mbox_count) {
1438 #ifdef DIAGNOSTIC
1439 if (sc->sc_mbofull > sc->sc_mbox_count)
1440 panic("bha_start_ccbs: mbofull > mbox_count");
1441 #endif
1442 /*
1443 * No mailboxes available; attempt to collect ones
1444 * that have already been used.
1445 */
1446 bha_collect_mbo(sc);
1447 if (sc->sc_mbofull == sc->sc_mbox_count) {
1448 /*
1449 * Still no more available; have the
1450 * controller interrupt us when it
1451 * frees one.
1452 */
1453 struct bha_toggle toggle;
1454
1455 toggle.cmd.opcode = BHA_MBO_INTR_EN;
1456 toggle.cmd.enable = 1;
1457 bha_cmd(iot, ioh, device_xname(&sc->sc_dev),
1458 sizeof(toggle.cmd), (u_char *)&toggle.cmd,
1459 0, (u_char *)0);
1460 break;
1461 }
1462 }
1463
1464 TAILQ_REMOVE(&sc->sc_waiting_ccb, ccb, chain);
1465 #ifdef BHADIAG
1466 ccb->flags |= CCB_SENDING;
1467 #endif
1468
1469 /*
1470 * Put the CCB in the mailbox.
1471 */
1472 bcg = BHA_CCB_GROUP(ccb);
1473 ltophys(bcg->bcg_dmamap->dm_segs[0].ds_addr +
1474 BHA_CCB_OFFSET(ccb), mbo->ccb_addr);
1475 if (ccb->flags & CCB_ABORT)
1476 mbo->cmd = BHA_MBO_ABORT;
1477 else
1478 mbo->cmd = BHA_MBO_START;
1479
1480 BHA_MBO_SYNC(sc, mbo,
1481 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1482
1483 /* Tell the card to poll immediately. */
1484 bus_space_write_1(iot, ioh, BHA_CMD_PORT, BHA_START_SCSI);
1485
1486 if ((ccb->xs->xs_control & XS_CTL_POLL) == 0)
1487 callout_reset(&ccb->xs->xs_callout,
1488 mstohz(ccb->timeout), bha_timeout, ccb);
1489
1490 ++sc->sc_mbofull;
1491 mbo = bha_nextmbo(sc, mbo);
1492 }
1493
1494 sc->sc_tmbo = mbo;
1495 }
1496
1497 /*
1498 * bha_finish_ccbs:
1499 *
1500 * Finalize the execution of CCBs in our incoming mailbox.
1501 */
1502 static void
1503 bha_finish_ccbs(struct bha_softc *sc)
1504 {
1505 struct bha_mbx_in *mbi;
1506 struct bha_ccb *ccb;
1507 int i;
1508
1509 mbi = sc->sc_tmbi;
1510
1511 BHA_MBI_SYNC(sc, mbi, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1512
1513 if (mbi->comp_stat == BHA_MBI_FREE) {
1514 for (i = 0; i < sc->sc_mbox_count; i++) {
1515 if (mbi->comp_stat != BHA_MBI_FREE) {
1516 #ifdef BHADIAG
1517 /*
1518 * This can happen in normal operation if
1519 * we use all mailbox slots.
1520 */
1521 printf("%s: mbi not in round-robin order\n",
1522 device_xname(&sc->sc_dev));
1523 #endif
1524 goto again;
1525 }
1526 mbi = bha_nextmbi(sc, mbi);
1527 BHA_MBI_SYNC(sc, mbi,
1528 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1529 }
1530 #ifdef BHADIAGnot
1531 printf("%s: mbi interrupt with no full mailboxes\n",
1532 device_xname(&sc->sc_dev));
1533 #endif
1534 return;
1535 }
1536
1537 again:
1538 do {
1539 ccb = bha_ccb_phys_kv(sc, phystol(mbi->ccb_addr));
1540 if (ccb == NULL) {
1541 aprint_error_dev(&sc->sc_dev, "bad mbi ccb pointer 0x%08x; skipping\n",
1542 phystol(mbi->ccb_addr));
1543 goto next;
1544 }
1545
1546 BHA_CCB_SYNC(sc, ccb,
1547 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1548
1549 #ifdef BHADEBUG
1550 if (bha_debug) {
1551 u_char *cp = ccb->scsi_cmd;
1552 printf("op=%x %x %x %x %x %x\n",
1553 cp[0], cp[1], cp[2], cp[3], cp[4], cp[5]);
1554 printf("comp_stat %x for mbi addr = %p, ",
1555 mbi->comp_stat, mbi);
1556 printf("ccb addr = %p\n", ccb);
1557 }
1558 #endif /* BHADEBUG */
1559
1560 switch (mbi->comp_stat) {
1561 case BHA_MBI_OK:
1562 case BHA_MBI_ERROR:
1563 if ((ccb->flags & CCB_ABORT) != 0) {
1564 /*
1565 * If we already started an abort, wait for it
1566 * to complete before clearing the CCB. We
1567 * could instead just clear CCB_SENDING, but
1568 * what if the mailbox was already received?
1569 * The worst that happens here is that we clear
1570 * the CCB a bit later than we need to. BFD.
1571 */
1572 goto next;
1573 }
1574 break;
1575
1576 case BHA_MBI_ABORT:
1577 case BHA_MBI_UNKNOWN:
1578 /*
1579 * Even if the CCB wasn't found, we clear it anyway.
1580 * See preceding comment.
1581 */
1582 break;
1583
1584 default:
1585 aprint_error_dev(&sc->sc_dev, "bad mbi comp_stat %02x; skipping\n",
1586 mbi->comp_stat);
1587 goto next;
1588 }
1589
1590 callout_stop(&ccb->xs->xs_callout);
1591 bha_done(sc, ccb);
1592
1593 next:
1594 mbi->comp_stat = BHA_MBI_FREE;
1595 BHA_CCB_SYNC(sc, ccb,
1596 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1597
1598 mbi = bha_nextmbi(sc, mbi);
1599 BHA_MBI_SYNC(sc, mbi,
1600 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1601 } while (mbi->comp_stat != BHA_MBI_FREE);
1602
1603 sc->sc_tmbi = mbi;
1604 }
1605
1606 /*****************************************************************************
1607 * Mailbox management functions.
1608 *****************************************************************************/
1609
1610 /*
1611 * bha_create_mailbox:
1612 *
1613 * Create the mailbox structures. Helper function for bha_attach().
1614 *
1615 * NOTE: The Buslogic hardware only gets one DMA address for the
1616 * mailbox! It expects:
1617 *
1618 * mailbox_out[mailbox_size]
1619 * mailbox_in[mailbox_size]
1620 */
1621 static int
1622 bha_create_mailbox(struct bha_softc *sc)
1623 {
1624 bus_dma_segment_t seg;
1625 size_t size;
1626 int error, rseg;
1627
1628 size = (sizeof(struct bha_mbx_out) * sc->sc_mbox_count) +
1629 (sizeof(struct bha_mbx_in) * sc->sc_mbox_count);
1630
1631 error = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &seg,
1632 1, &rseg, sc->sc_dmaflags);
1633 if (error) {
1634 aprint_error_dev(&sc->sc_dev, "unable to allocate mailboxes, error = %d\n",
1635 error);
1636 goto bad_0;
1637 }
1638
1639 error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, size,
1640 (void **)&sc->sc_mbo, sc->sc_dmaflags | BUS_DMA_COHERENT);
1641 if (error) {
1642 aprint_error_dev(&sc->sc_dev, "unable to map mailboxes, error = %d\n",
1643 error);
1644 goto bad_1;
1645 }
1646
1647 memset(sc->sc_mbo, 0, size);
1648
1649 error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1650 sc->sc_dmaflags, &sc->sc_dmamap_mbox);
1651 if (error) {
1652 aprint_error_dev(&sc->sc_dev,
1653 "unable to create mailbox DMA map, error = %d\n",
1654 error);
1655 goto bad_2;
1656 }
1657
1658 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_mbox,
1659 sc->sc_mbo, size, NULL, 0);
1660 if (error) {
1661 aprint_error_dev(&sc->sc_dev, "unable to load mailbox DMA map, error = %d\n",
1662 error);
1663 goto bad_3;
1664 }
1665
1666 sc->sc_mbi = (struct bha_mbx_in *)(sc->sc_mbo + sc->sc_mbox_count);
1667
1668 return (0);
1669
1670 bad_3:
1671 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap_mbox);
1672 bad_2:
1673 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_mbo, size);
1674 bad_1:
1675 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1676 bad_0:
1677 return (error);
1678 }
1679
1680 /*
1681 * bha_collect_mbo:
1682 *
1683 * Garbage collect mailboxes that are no longer in use.
1684 */
1685 static void
1686 bha_collect_mbo(struct bha_softc *sc)
1687 {
1688 struct bha_mbx_out *mbo;
1689 #ifdef BHADIAG
1690 struct bha_ccb *ccb;
1691 #endif
1692
1693 mbo = sc->sc_cmbo;
1694
1695 while (sc->sc_mbofull > 0) {
1696 BHA_MBO_SYNC(sc, mbo,
1697 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1698 if (mbo->cmd != BHA_MBO_FREE)
1699 break;
1700
1701 #ifdef BHADIAG
1702 ccb = bha_ccb_phys_kv(sc, phystol(mbo->ccb_addr));
1703 ccb->flags &= ~CCB_SENDING;
1704 #endif
1705
1706 --sc->sc_mbofull;
1707 mbo = bha_nextmbo(sc, mbo);
1708 }
1709
1710 sc->sc_cmbo = mbo;
1711 }
1712
1713 /*****************************************************************************
1714 * CCB management functions
1715 *****************************************************************************/
1716
1717 static inline void
1718 bha_reset_ccb(struct bha_ccb *ccb)
1719 {
1720
1721 ccb->flags = 0;
1722 }
1723
1724 /*
1725 * bha_create_ccbs:
1726 *
1727 * Create a set of CCBs.
1728 *
1729 * We determine the target CCB count, and then keep creating them
1730 * until we reach the target, or fail. CCBs that are allocated
1731 * but not "created" are left on the allocating list.
1732 *
1733 * XXX AB_QUIET/AB_SILENT lossage here; this is called during
1734 * boot as well as at run-time.
1735 */
1736 static void
1737 bha_create_ccbs(struct bha_softc *sc, int count)
1738 {
1739 struct bha_ccb_group *bcg;
1740 struct bha_ccb *ccb;
1741 bus_dma_segment_t seg;
1742 bus_dmamap_t ccbmap;
1743 int target, i, error, rseg;
1744
1745 /*
1746 * If the current CCB count is already the max number we're
1747 * allowed to have, bail out now.
1748 */
1749 if (sc->sc_cur_ccbs == sc->sc_max_ccbs)
1750 return;
1751
1752 /*
1753 * Compute our target count, and clamp it down to the max
1754 * number we're allowed to have.
1755 */
1756 target = sc->sc_cur_ccbs + count;
1757 if (target > sc->sc_max_ccbs)
1758 target = sc->sc_max_ccbs;
1759
1760 /*
1761 * If there are CCBs on the allocating list, don't allocate a
1762 * CCB group yet.
1763 */
1764 if (TAILQ_FIRST(&sc->sc_allocating_ccbs) != NULL)
1765 goto have_allocating_ccbs;
1766
1767 allocate_group:
1768 error = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE,
1769 PAGE_SIZE, 0, &seg, 1, &rseg, sc->sc_dmaflags | BUS_DMA_NOWAIT);
1770 if (error) {
1771 aprint_error_dev(&sc->sc_dev, "unable to allocate CCB group, error = %d\n",
1772 error);
1773 goto bad_0;
1774 }
1775
1776 error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, PAGE_SIZE,
1777 (void *)&bcg,
1778 sc->sc_dmaflags | BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1779 if (error) {
1780 aprint_error_dev(&sc->sc_dev, "unable to map CCB group, error = %d\n",
1781 error);
1782 goto bad_1;
1783 }
1784
1785 memset(bcg, 0, PAGE_SIZE);
1786
1787 error = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE,
1788 1, PAGE_SIZE, 0, sc->sc_dmaflags | BUS_DMA_NOWAIT, &ccbmap);
1789 if (error) {
1790 aprint_error_dev(&sc->sc_dev, "unable to create CCB group DMA map, error = %d\n",
1791 error);
1792 goto bad_2;
1793 }
1794
1795 error = bus_dmamap_load(sc->sc_dmat, ccbmap, bcg, PAGE_SIZE, NULL,
1796 sc->sc_dmaflags | BUS_DMA_NOWAIT);
1797 if (error) {
1798 aprint_error_dev(&sc->sc_dev, "unable to load CCB group DMA map, error = %d\n",
1799 error);
1800 goto bad_3;
1801 }
1802
1803 bcg->bcg_dmamap = ccbmap;
1804
1805 #ifdef DIAGNOSTIC
1806 if (BHA_CCB_GROUP(&bcg->bcg_ccbs[0]) !=
1807 BHA_CCB_GROUP(&bcg->bcg_ccbs[bha_ccbs_per_group - 1]))
1808 panic("bha_create_ccbs: CCB group size botch");
1809 #endif
1810
1811 /*
1812 * Add all of the CCBs in this group to the allocating list.
1813 */
1814 for (i = 0; i < bha_ccbs_per_group; i++) {
1815 ccb = &bcg->bcg_ccbs[i];
1816 TAILQ_INSERT_TAIL(&sc->sc_allocating_ccbs, ccb, chain);
1817 }
1818
1819 have_allocating_ccbs:
1820 /*
1821 * Loop over the allocating list until we reach our CCB target.
1822 * If we run out on the list, we'll allocate another group's
1823 * worth.
1824 */
1825 while (sc->sc_cur_ccbs < target) {
1826 ccb = TAILQ_FIRST(&sc->sc_allocating_ccbs);
1827 if (ccb == NULL)
1828 goto allocate_group;
1829 if (bha_init_ccb(sc, ccb) != 0) {
1830 /*
1831 * We were unable to initialize the CCB.
1832 * This is likely due to a resource shortage,
1833 * so bail out now.
1834 */
1835 return;
1836 }
1837 }
1838
1839 /*
1840 * If we got here, we've reached our target!
1841 */
1842 return;
1843
1844 bad_3:
1845 bus_dmamap_destroy(sc->sc_dmat, ccbmap);
1846 bad_2:
1847 bus_dmamem_unmap(sc->sc_dmat, (void *)bcg, PAGE_SIZE);
1848 bad_1:
1849 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1850 bad_0:
1851 return;
1852 }
1853
1854 /*
1855 * bha_init_ccb:
1856 *
1857 * Initialize a CCB; helper function for bha_create_ccbs().
1858 */
1859 static int
1860 bha_init_ccb(struct bha_softc *sc, struct bha_ccb *ccb)
1861 {
1862 struct bha_ccb_group *bcg = BHA_CCB_GROUP(ccb);
1863 int hashnum, error;
1864
1865 /*
1866 * Create the DMA map for this CCB.
1867 *
1868 * XXX ALLOCNOW is a hack to prevent bounce buffer shortages
1869 * XXX in the ISA case. A better solution is needed.
1870 */
1871 error = bus_dmamap_create(sc->sc_dmat, BHA_MAXXFER, BHA_NSEG,
1872 BHA_MAXXFER, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW | sc->sc_dmaflags,
1873 &ccb->dmamap_xfer);
1874 if (error) {
1875 aprint_error_dev(&sc->sc_dev, "unable to create CCB DMA map, error = %d\n",
1876 error);
1877 return (error);
1878 }
1879
1880 TAILQ_REMOVE(&sc->sc_allocating_ccbs, ccb, chain);
1881
1882 /*
1883 * Put the CCB into the phystokv hash table.
1884 */
1885 ccb->hashkey = bcg->bcg_dmamap->dm_segs[0].ds_addr +
1886 BHA_CCB_OFFSET(ccb);
1887 hashnum = CCB_HASH(ccb->hashkey);
1888 ccb->nexthash = sc->sc_ccbhash[hashnum];
1889 sc->sc_ccbhash[hashnum] = ccb;
1890 bha_reset_ccb(ccb);
1891
1892 TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, chain);
1893 sc->sc_cur_ccbs++;
1894
1895 return (0);
1896 }
1897
1898 /*
1899 * bha_get_ccb:
1900 *
1901 * Get a CCB for the SCSI operation. If there are none left,
1902 * wait until one becomes available, if we can.
1903 */
1904 static struct bha_ccb *
1905 bha_get_ccb(struct bha_softc *sc)
1906 {
1907 struct bha_ccb *ccb;
1908 int s;
1909
1910 s = splbio();
1911 ccb = TAILQ_FIRST(&sc->sc_free_ccb);
1912 if (ccb != NULL) {
1913 TAILQ_REMOVE(&sc->sc_free_ccb, ccb, chain);
1914 ccb->flags |= CCB_ALLOC;
1915 }
1916 splx(s);
1917 return (ccb);
1918 }
1919
1920 /*
1921 * bha_free_ccb:
1922 *
1923 * Put a CCB back onto the free list.
1924 */
1925 static void
1926 bha_free_ccb(struct bha_softc *sc, struct bha_ccb *ccb)
1927 {
1928 int s;
1929
1930 s = splbio();
1931 bha_reset_ccb(ccb);
1932 TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, chain);
1933 splx(s);
1934 }
1935
1936 /*
1937 * bha_ccb_phys_kv:
1938 *
1939 * Given a CCB DMA address, locate the CCB in kernel virtual space.
1940 */
1941 static struct bha_ccb *
1942 bha_ccb_phys_kv(struct bha_softc *sc, bus_addr_t ccb_phys)
1943 {
1944 int hashnum = CCB_HASH(ccb_phys);
1945 struct bha_ccb *ccb = sc->sc_ccbhash[hashnum];
1946
1947 while (ccb) {
1948 if (ccb->hashkey == ccb_phys)
1949 break;
1950 ccb = ccb->nexthash;
1951 }
1952 return (ccb);
1953 }
Cache object: 7c41f9e0b4aa61dde570fb51e739d31d
|