FreeBSD/Linux Kernel Cross Reference
sys/dev/vme/si.c
1 /* $NetBSD: si.c,v 1.13 2003/05/03 18:11:42 wiz Exp $ */
2
3 /*-
4 * Copyright (c) 1996,2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Adam Glass, David Jones, Gordon W. Ross, Jason R. Thorpe and
9 * Paul Kranenburg.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * This file contains VME bus-dependent of the `si' SCSI adapter.
42 * This hardware is frequently found on Sun 3 and Sun 4 machines.
43 *
44 * The SCSI machinery on this adapter is implemented by an NCR5380,
45 * which is taken care of by the chipset driver in /sys/dev/ic/ncr5380sbc.c
46 *
47 * The logic has a bit to enable or disable the DMA engine,
48 * but that bit also gates the interrupt line from the NCR5380!
49 * Therefore, in order to get any interrupt from the 5380, (i.e.
50 * for reselect) one must clear the DMA engine transfer count and
51 * then enable DMA. This has the further complication that you
52 * CAN NOT touch the NCR5380 while the DMA enable bit is set, so
53 * we have to turn DMA back off before we even look at the 5380.
54 *
55 * What wonderfully whacky hardware this is!
56 *
57 */
58
59 /*
60 * This driver originated as an MD implementation for the sun3 and sun4
61 * ports. The notes pertaining to that history are included below.
62 *
63 * David Jones wrote the initial version of this module for NetBSD/sun3,
64 * which included support for the VME adapter only. (no reselection).
65 *
66 * Gordon Ross added support for the Sun 3 OBIO adapter, and re-worked
67 * both the VME and OBIO code to support disconnect/reselect.
68 * (Required figuring out the hardware "features" noted above.)
69 *
70 * The autoconfiguration boilerplate came from Adam Glass.
71 *
72 * Jason R. Thorpe ported the autoconfiguration and VME portions to
73 * NetBSD/sparc, and added initial support for the 4/100 "SCSI Weird",
74 * a wacky OBIO variant of the VME SCSI-3. Many thanks to Chuck Cranor
75 * for lots of helpful tips and suggestions. Thanks also to Paul Kranenburg
76 * and Chris Torek for bits of insight needed along the way. Thanks to
77 * David Gilbert and Andrew Gillham who risked filesystem life-and-limb
78 * for the sake of testing. Andrew Gillham helped work out the bugs
79 * the 4/100 DMA code.
80 */
81
82 #include <sys/cdefs.h>
83 __KERNEL_RCSID(0, "$NetBSD: si.c,v 1.13 2003/05/03 18:11:42 wiz Exp $");
84
85 #include "opt_ddb.h"
86
87 #include <sys/param.h>
88 #include <sys/systm.h>
89 #include <sys/kernel.h>
90 #include <sys/malloc.h>
91 #include <sys/errno.h>
92 #include <sys/device.h>
93 #include <sys/buf.h>
94
95 #include <machine/bus.h>
96 #include <machine/intr.h>
97
98 #include <dev/vme/vmereg.h>
99 #include <dev/vme/vmevar.h>
100
101 #include <dev/scsipi/scsi_all.h>
102 #include <dev/scsipi/scsipi_all.h>
103 #include <dev/scsipi/scsipi_debug.h>
104 #include <dev/scsipi/scsiconf.h>
105
106 #ifndef Debugger
107 #define Debugger()
108 #endif
109
110 #ifndef DEBUG
111 #define DEBUG XXX
112 #endif
113
114 #include <dev/ic/ncr5380reg.h>
115 #include <dev/ic/ncr5380var.h>
116
117 #include <dev/vme/sireg.h>
118
119 /*
120 * Transfers smaller than this are done using PIO
121 * (on assumption they're not worth DMA overhead)
122 */
123 #define MIN_DMA_LEN 128
124
125 #ifdef DEBUG
126 int si_debug = 0;
127 #endif
128
129 /*
130 * This structure is used to keep track of mapped DMA requests.
131 */
132 struct si_dma_handle {
133 int dh_flags;
134 #define SIDH_BUSY 0x01 /* This DH is in use */
135 #define SIDH_OUT 0x02 /* DMA does data out (write) */
136 int dh_maplen; /* Original data length */
137 bus_dmamap_t dh_dmamap;
138 #define dh_dvma dh_dmamap->dm_segs[0].ds_addr /* VA of buffer in DVMA space */
139 };
140
141 /*
142 * The first structure member has to be the ncr5380_softc
143 * so we can just cast to go back and fourth between them.
144 */
145 struct si_softc {
146 struct ncr5380_softc ncr_sc;
147 bus_space_tag_t sc_bustag; /* bus tags */
148 bus_dma_tag_t sc_dmatag;
149 vme_chipset_tag_t sc_vctag;
150
151 int sc_adapter_iv_am; /* int. vec + address modifier */
152 struct si_dma_handle *sc_dma;
153 int sc_xlen; /* length of current DMA segment. */
154 int sc_options; /* options for this instance. */
155 };
156
157 /*
158 * Options. By default, DMA is enabled and DMA completion interrupts
159 * and reselect are disabled. You may enable additional features
160 * the `flags' directive in your kernel's configuration file.
161 *
162 * Alternatively, you can patch your kernel with DDB or some other
163 * mechanism. The sc_options member of the softc is OR'd with
164 * the value in si_options.
165 *
166 * Note, there's a separate sw_options to make life easier.
167 */
168 #define SI_ENABLE_DMA 0x01 /* Use DMA (maybe polled) */
169 #define SI_DMA_INTR 0x02 /* DMA completion interrupts */
170 #define SI_DO_RESELECT 0x04 /* Allow disconnect/reselect */
171 #define SI_OPTIONS_MASK (SI_ENABLE_DMA|SI_DMA_INTR|SI_DO_RESELECT)
172 #define SI_OPTIONS_BITS "\1\3RESELECT\2DMA_INTR\1DMA"
173 int si_options = SI_ENABLE_DMA|SI_DMA_INTR|SI_DO_RESELECT;
174
175 static int si_match __P((struct device *, struct cfdata *, void *));
176 static void si_attach __P((struct device *, struct device *, void *));
177 static int si_intr __P((void *));
178 static void si_reset_adapter __P((struct ncr5380_softc *));
179
180 void si_dma_alloc __P((struct ncr5380_softc *));
181 void si_dma_free __P((struct ncr5380_softc *));
182 void si_dma_poll __P((struct ncr5380_softc *));
183
184 void si_dma_setup __P((struct ncr5380_softc *));
185 void si_dma_start __P((struct ncr5380_softc *));
186 void si_dma_eop __P((struct ncr5380_softc *));
187 void si_dma_stop __P((struct ncr5380_softc *));
188
189 void si_intr_on __P((struct ncr5380_softc *));
190 void si_intr_off __P((struct ncr5380_softc *));
191
192 /*
193 * Shorthand bus space access
194 * XXX - must look into endian issues here.
195 */
196 #define SIREG_READ(sc, index) \
197 bus_space_read_2((sc)->sc_regt, (sc)->sc_regh, index)
198 #define SIREG_WRITE(sc, index, v) \
199 bus_space_write_2((sc)->sc_regt, (sc)->sc_regh, index, v)
200
201
202 /* Auto-configuration glue. */
203 CFATTACH_DECL(si, sizeof(struct si_softc),
204 si_match, si_attach, NULL, NULL);
205
206 static int
207 si_match(parent, cf, aux)
208 struct device *parent;
209 struct cfdata *cf;
210 void *aux;
211 {
212 struct vme_attach_args *va = aux;
213 vme_chipset_tag_t ct = va->va_vct;
214 vme_am_t mod;
215 vme_addr_t vme_addr;
216
217 /* Make sure there is something there... */
218 mod = VME_AM_A24 | VME_AM_MBO | VME_AM_SUPER | VME_AM_DATA;
219 vme_addr = va->r[0].offset;
220
221 if (vme_probe(ct, vme_addr, 1, mod, VME_D8, NULL, 0) != 0)
222 return (0);
223
224 /*
225 * If this is a VME SCSI board, we have to determine whether
226 * it is an "sc" (Sun2) or "si" (Sun3) SCSI board. This can
227 * be determined using the fact that the "sc" board occupies
228 * 4K bytes in VME space but the "si" board occupies 2K bytes.
229 */
230 return (vme_probe(ct, vme_addr + 0x801, 1, mod, VME_D8, NULL, 0) != 0);
231 }
232
233 static void
234 si_attach(parent, self, aux)
235 struct device *parent, *self;
236 void *aux;
237 {
238 struct si_softc *sc = (struct si_softc *) self;
239 struct ncr5380_softc *ncr_sc = &sc->ncr_sc;
240 struct vme_attach_args *va = aux;
241 vme_chipset_tag_t ct = va->va_vct;
242 bus_space_tag_t bt;
243 bus_space_handle_t bh;
244 vme_mapresc_t resc;
245 vme_intr_handle_t ih;
246 vme_am_t mod;
247 char bits[64];
248 int i;
249
250 sc->sc_dmatag = va->va_bdt;
251 sc->sc_vctag = ct;
252
253 mod = VME_AM_A24 | VME_AM_MBO | VME_AM_SUPER | VME_AM_DATA;
254
255 if (vme_space_map(ct, va->r[0].offset, SIREG_BANK_SZ,
256 mod, VME_D8, 0, &bt, &bh, &resc) != 0)
257 panic("%s: vme_space_map", ncr_sc->sc_dev.dv_xname);
258
259 ncr_sc->sc_regt = bt;
260 ncr_sc->sc_regh = bh;
261
262 sc->sc_options = si_options;
263
264 ncr_sc->sc_dma_setup = si_dma_setup;
265 ncr_sc->sc_dma_start = si_dma_start;
266 ncr_sc->sc_dma_eop = si_dma_stop;
267 ncr_sc->sc_dma_stop = si_dma_stop;
268
269 vme_intr_map(ct, va->ilevel, va->ivector, &ih);
270 vme_intr_establish(ct, ih, IPL_BIO, si_intr, sc);
271
272 printf("\n");
273
274 sc->sc_adapter_iv_am = (mod << 8) | (va->ivector & 0xFF);
275
276 /*
277 * Pull in the options flags. Allow the user to completely
278 * override the default values.
279 */
280 if ((ncr_sc->sc_dev.dv_cfdata->cf_flags & SI_OPTIONS_MASK) != 0)
281 sc->sc_options =
282 (ncr_sc->sc_dev.dv_cfdata->cf_flags & SI_OPTIONS_MASK);
283
284 /*
285 * Initialize fields used by the MI code
286 */
287
288 /* NCR5380 register bank offsets */
289 ncr_sc->sci_r0 = 0;
290 ncr_sc->sci_r1 = 1;
291 ncr_sc->sci_r2 = 2;
292 ncr_sc->sci_r3 = 3;
293 ncr_sc->sci_r4 = 4;
294 ncr_sc->sci_r5 = 5;
295 ncr_sc->sci_r6 = 6;
296 ncr_sc->sci_r7 = 7;
297
298 ncr_sc->sc_rev = NCR_VARIANT_NCR5380;
299
300 /*
301 * MD function pointers used by the MI code.
302 */
303 ncr_sc->sc_pio_out = ncr5380_pio_out;
304 ncr_sc->sc_pio_in = ncr5380_pio_in;
305 ncr_sc->sc_dma_alloc = si_dma_alloc;
306 ncr_sc->sc_dma_free = si_dma_free;
307 ncr_sc->sc_dma_poll = si_dma_poll;
308
309 ncr_sc->sc_flags = 0;
310 if ((sc->sc_options & SI_DO_RESELECT) == 0)
311 ncr_sc->sc_no_disconnect = 0xFF;
312 if ((sc->sc_options & SI_DMA_INTR) == 0)
313 ncr_sc->sc_flags |= NCR5380_FORCE_POLLING;
314 ncr_sc->sc_min_dma_len = MIN_DMA_LEN;
315
316 /*
317 * Allocate DMA handles.
318 */
319 i = SCI_OPENINGS * sizeof(struct si_dma_handle);
320 sc->sc_dma = (struct si_dma_handle *)malloc(i, M_DEVBUF, M_NOWAIT);
321 if (sc->sc_dma == NULL)
322 panic("si: DMA handle malloc failed");
323
324 for (i = 0; i < SCI_OPENINGS; i++) {
325 sc->sc_dma[i].dh_flags = 0;
326
327 /* Allocate a DMA handle */
328 if (vme_dmamap_create(
329 sc->sc_vctag, /* VME chip tag */
330 MAXPHYS, /* size */
331 VME_AM_A24, /* address modifier */
332 VME_D16, /* data size */
333 0, /* swap */
334 1, /* nsegments */
335 MAXPHYS, /* maxsegsz */
336 0, /* boundary */
337 BUS_DMA_NOWAIT,
338 &sc->sc_dma[i].dh_dmamap) != 0) {
339
340 printf("%s: DMA buffer map create error\n",
341 ncr_sc->sc_dev.dv_xname);
342 return;
343 }
344 }
345
346 if (sc->sc_options) {
347 printf("%s: options=%s\n", ncr_sc->sc_dev.dv_xname,
348 bitmask_snprintf(sc->sc_options, SI_OPTIONS_BITS,
349 bits, sizeof(bits)));
350 }
351
352 ncr_sc->sc_channel.chan_id = 7;
353 ncr_sc->sc_adapter.adapt_minphys = minphys;
354
355 /*
356 * Initialize si board itself.
357 */
358 si_reset_adapter(ncr_sc);
359 ncr5380_attach(ncr_sc);
360
361 if (sc->sc_options & SI_DO_RESELECT) {
362 /*
363 * Need to enable interrupts (and DMA!)
364 * on this H/W for reselect to work.
365 */
366 ncr_sc->sc_intr_on = si_intr_on;
367 ncr_sc->sc_intr_off = si_intr_off;
368 }
369 }
370
371 #define CSR_WANT (SI_CSR_SBC_IP | SI_CSR_DMA_IP | \
372 SI_CSR_DMA_CONFLICT | SI_CSR_DMA_BUS_ERR )
373
374 static int
375 si_intr(void *arg)
376 {
377 struct si_softc *sc = arg;
378 struct ncr5380_softc *ncr_sc = (struct ncr5380_softc *)arg;
379 int dma_error, claimed;
380 u_short csr;
381
382 claimed = 0;
383 dma_error = 0;
384
385 /* SBC interrupt? DMA interrupt? */
386 csr = SIREG_READ(ncr_sc, SIREG_CSR);
387
388 NCR_TRACE("si_intr: csr=0x%x\n", csr);
389
390 if (csr & SI_CSR_DMA_CONFLICT) {
391 dma_error |= SI_CSR_DMA_CONFLICT;
392 printf("si_intr: DMA conflict\n");
393 }
394 if (csr & SI_CSR_DMA_BUS_ERR) {
395 dma_error |= SI_CSR_DMA_BUS_ERR;
396 printf("si_intr: DMA bus error\n");
397 }
398 if (dma_error) {
399 if (sc->ncr_sc.sc_state & NCR_DOINGDMA)
400 sc->ncr_sc.sc_state |= NCR_ABORTING;
401 /* Make sure we will call the main isr. */
402 csr |= SI_CSR_DMA_IP;
403 }
404
405 if (csr & (SI_CSR_SBC_IP | SI_CSR_DMA_IP)) {
406 claimed = ncr5380_intr(&sc->ncr_sc);
407 #ifdef DEBUG
408 if (!claimed) {
409 printf("si_intr: spurious from SBC\n");
410 if (si_debug & 4) {
411 Debugger(); /* XXX */
412 }
413 }
414 #endif
415 }
416
417 return (claimed);
418 }
419
420
421 static void
422 si_reset_adapter(struct ncr5380_softc *ncr_sc)
423 {
424 struct si_softc *sc = (struct si_softc *)ncr_sc;
425
426 #ifdef DEBUG
427 if (si_debug) {
428 printf("si_reset_adapter\n");
429 }
430 #endif
431
432 /*
433 * The SCSI3 controller has an 8K FIFO to buffer data between the
434 * 5380 and the DMA. Make sure it starts out empty.
435 *
436 * The reset bits in the CSR are active low.
437 */
438 SIREG_WRITE(ncr_sc, SIREG_CSR, 0);
439 delay(10);
440 SIREG_WRITE(ncr_sc, SIREG_CSR,
441 SI_CSR_FIFO_RES | SI_CSR_SCSI_RES | SI_CSR_INTR_EN);
442 delay(10);
443
444 SIREG_WRITE(ncr_sc, SIREG_FIFO_CNT, 0);
445 SIREG_WRITE(ncr_sc, SIREG_DMA_ADDRH, 0);
446 SIREG_WRITE(ncr_sc, SIREG_DMA_ADDRL, 0);
447 SIREG_WRITE(ncr_sc, SIREG_DMA_CNTH, 0);
448 SIREG_WRITE(ncr_sc, SIREG_DMA_CNTL, 0);
449 SIREG_WRITE(ncr_sc, SIREG_IV_AM, sc->sc_adapter_iv_am);
450 SIREG_WRITE(ncr_sc, SIREG_FIFO_CNTH, 0);
451
452 SCI_CLR_INTR(ncr_sc);
453 }
454
455 /*****************************************************************
456 * Common functions for DMA
457 ****************************************************************/
458
459 /*
460 * Allocate a DMA handle and put it in sc->sc_dma. Prepare
461 * for DMA transfer.
462 */
463 void
464 si_dma_alloc(ncr_sc)
465 struct ncr5380_softc *ncr_sc;
466 {
467 struct si_softc *sc = (struct si_softc *)ncr_sc;
468 struct sci_req *sr = ncr_sc->sc_current;
469 struct scsipi_xfer *xs = sr->sr_xs;
470 struct si_dma_handle *dh;
471 int i, xlen;
472 u_long addr;
473
474 #ifdef DIAGNOSTIC
475 if (sr->sr_dma_hand != NULL)
476 panic("si_dma_alloc: already have DMA handle");
477 #endif
478
479 #if 1 /* XXX - Temporary */
480 /* XXX - In case we think DMA is completely broken... */
481 if ((sc->sc_options & SI_ENABLE_DMA) == 0)
482 return;
483 #endif
484
485 addr = (u_long) ncr_sc->sc_dataptr;
486 xlen = ncr_sc->sc_datalen;
487
488 /* If the DMA start addr is misaligned then do PIO */
489 if ((addr & 1) || (xlen & 1)) {
490 printf("si_dma_alloc: misaligned.\n");
491 return;
492 }
493
494 /* Make sure our caller checked sc_min_dma_len. */
495 if (xlen < MIN_DMA_LEN)
496 panic("si_dma_alloc: xlen=0x%x", xlen);
497
498 /* Find free DMA handle. Guaranteed to find one since we have
499 as many DMA handles as the driver has processes. */
500 for (i = 0; i < SCI_OPENINGS; i++) {
501 if ((sc->sc_dma[i].dh_flags & SIDH_BUSY) == 0)
502 goto found;
503 }
504 panic("si: no free DMA handles.");
505
506 found:
507 dh = &sc->sc_dma[i];
508 dh->dh_flags = SIDH_BUSY;
509 dh->dh_maplen = xlen;
510
511 /* Copy the "write" flag for convenience. */
512 if ((xs->xs_control & XS_CTL_DATA_OUT) != 0)
513 dh->dh_flags |= SIDH_OUT;
514
515 /*
516 * Double-map the buffer into DVMA space. If we can't re-map
517 * the buffer, we print a warning and fall back to PIO mode.
518 *
519 * NOTE: it is not safe to sleep here!
520 */
521 if (bus_dmamap_load(sc->sc_dmatag, dh->dh_dmamap,
522 (caddr_t)addr, xlen, NULL, BUS_DMA_NOWAIT) != 0) {
523 /* Can't remap segment */
524 printf("si_dma_alloc: can't remap 0x%lx/0x%x, doing PIO\n",
525 addr, dh->dh_maplen);
526 dh->dh_flags = 0;
527 return;
528 }
529 bus_dmamap_sync(sc->sc_dmatag, dh->dh_dmamap, addr, xlen,
530 (dh->dh_flags & SIDH_OUT)
531 ? BUS_DMASYNC_PREWRITE
532 : BUS_DMASYNC_PREREAD);
533
534 /* success */
535 sr->sr_dma_hand = dh;
536
537 return;
538 }
539
540
541 void
542 si_dma_free(ncr_sc)
543 struct ncr5380_softc *ncr_sc;
544 {
545 struct si_softc *sc = (struct si_softc *)ncr_sc;
546 struct sci_req *sr = ncr_sc->sc_current;
547 struct si_dma_handle *dh = sr->sr_dma_hand;
548
549 #ifdef DIAGNOSTIC
550 if (dh == NULL)
551 panic("si_dma_free: no DMA handle");
552 #endif
553
554 if (ncr_sc->sc_state & NCR_DOINGDMA)
555 panic("si_dma_free: free while in progress");
556
557 if (dh->dh_flags & SIDH_BUSY) {
558 /* Give back the DVMA space. */
559 bus_dmamap_sync(sc->sc_dmatag, dh->dh_dmamap,
560 dh->dh_dvma, dh->dh_maplen,
561 (dh->dh_flags & SIDH_OUT)
562 ? BUS_DMASYNC_POSTWRITE
563 : BUS_DMASYNC_POSTREAD);
564 bus_dmamap_unload(sc->sc_dmatag, dh->dh_dmamap);
565 dh->dh_flags = 0;
566 }
567 sr->sr_dma_hand = NULL;
568 }
569
570
571 /*
572 * Poll (spin-wait) for DMA completion.
573 * Called right after xx_dma_start(), and
574 * xx_dma_stop() will be called next.
575 * Same for either VME or OBIO.
576 */
577 void
578 si_dma_poll(ncr_sc)
579 struct ncr5380_softc *ncr_sc;
580 {
581 struct sci_req *sr = ncr_sc->sc_current;
582 int tmo, csr_mask, csr;
583
584 /* Make sure DMA started successfully. */
585 if (ncr_sc->sc_state & NCR_ABORTING)
586 return;
587
588 csr_mask = SI_CSR_SBC_IP | SI_CSR_DMA_IP |
589 SI_CSR_DMA_CONFLICT | SI_CSR_DMA_BUS_ERR;
590
591 tmo = 50000; /* X100 = 5 sec. */
592 for (;;) {
593 csr = SIREG_READ(ncr_sc, SIREG_CSR);
594 if (csr & csr_mask)
595 break;
596 if (--tmo <= 0) {
597 printf("%s: DMA timeout (while polling)\n",
598 ncr_sc->sc_dev.dv_xname);
599 /* Indicate timeout as MI code would. */
600 sr->sr_flags |= SR_OVERDUE;
601 break;
602 }
603 delay(100);
604 }
605
606 #ifdef DEBUG
607 if (si_debug) {
608 printf("si_dma_poll: done, csr=0x%x\n", csr);
609 }
610 #endif
611 }
612
613
614 /*****************************************************************
615 * VME functions for DMA
616 ****************************************************************/
617
618
619 /*
620 * This is called when the bus is going idle,
621 * so we want to enable the SBC interrupts.
622 * That is controlled by the DMA enable!
623 * Who would have guessed!
624 * What a NASTY trick!
625 */
626 void
627 si_intr_on(ncr_sc)
628 struct ncr5380_softc *ncr_sc;
629 {
630 u_int16_t csr;
631
632 /* Clear DMA start address and counters */
633 SIREG_WRITE(ncr_sc, SIREG_DMA_ADDRH, 0);
634 SIREG_WRITE(ncr_sc, SIREG_DMA_ADDRL, 0);
635 SIREG_WRITE(ncr_sc, SIREG_DMA_CNTH, 0);
636 SIREG_WRITE(ncr_sc, SIREG_DMA_CNTL, 0);
637
638 /* Enter receive mode (for safety) and enable DMA engine */
639 csr = SIREG_READ(ncr_sc, SIREG_CSR);
640 csr &= ~SI_CSR_SEND;
641 csr |= SI_CSR_DMA_EN;
642 SIREG_WRITE(ncr_sc, SIREG_CSR, csr);
643 }
644
645 /*
646 * This is called when the bus is idle and we are
647 * about to start playing with the SBC chip.
648 */
649 void
650 si_intr_off(ncr_sc)
651 struct ncr5380_softc *ncr_sc;
652 {
653 u_int16_t csr;
654
655 csr = SIREG_READ(ncr_sc, SIREG_CSR);
656 csr &= ~SI_CSR_DMA_EN;
657 SIREG_WRITE(ncr_sc, SIREG_CSR, csr);
658 }
659
660 /*
661 * This function is called during the COMMAND or MSG_IN phase
662 * that precedes a DATA_IN or DATA_OUT phase, in case we need
663 * to setup the DMA engine before the bus enters a DATA phase.
664 *
665 * XXX: The VME adapter appears to suppress SBC interrupts
666 * when the FIFO is not empty or the FIFO count is non-zero!
667 *
668 * On the VME version we just clear the DMA count and address
669 * here (to make sure it stays idle) and do the real setup
670 * later, in dma_start.
671 */
672 void
673 si_dma_setup(ncr_sc)
674 struct ncr5380_softc *ncr_sc;
675 {
676 struct si_softc *sc = (struct si_softc *)ncr_sc;
677 struct sci_req *sr = ncr_sc->sc_current;
678 struct si_dma_handle *dh = sr->sr_dma_hand;
679 u_int16_t csr;
680 u_long dva;
681 int xlen;
682
683 /*
684 * Set up the DMA controller.
685 * Note that (dh->dh_len < sc_datalen)
686 */
687
688 csr = SIREG_READ(ncr_sc, SIREG_CSR);
689
690 /* Disable DMA while we're setting up the transfer */
691 csr &= ~SI_CSR_DMA_EN;
692
693 /* Reset the FIFO */
694 csr &= ~SI_CSR_FIFO_RES; /* active low */
695 SIREG_WRITE(ncr_sc, SIREG_CSR, csr);
696 csr |= SI_CSR_FIFO_RES;
697 SIREG_WRITE(ncr_sc, SIREG_CSR, csr);
698
699 /*
700 * Get the DVMA mapping for this segment.
701 */
702 dva = (u_long)(dh->dh_dvma);
703 if (dva & 1)
704 panic("si_dma_setup: bad dmaaddr=0x%lx", dva);
705 xlen = ncr_sc->sc_datalen;
706 xlen &= ~1;
707 sc->sc_xlen = xlen; /* XXX: or less... */
708
709 #ifdef DEBUG
710 if (si_debug & 2) {
711 printf("si_dma_start: dh=%p, dmaaddr=0x%lx, xlen=%d\n",
712 dh, dva, xlen);
713 }
714 #endif
715 /* Set direction (send/recv) */
716 if (dh->dh_flags & SIDH_OUT) {
717 csr |= SI_CSR_SEND;
718 } else {
719 csr &= ~SI_CSR_SEND;
720 }
721
722 /* Set byte-packing control */
723 if (dva & 2) {
724 csr |= SI_CSR_BPCON;
725 } else {
726 csr &= ~SI_CSR_BPCON;
727 }
728
729 SIREG_WRITE(ncr_sc, SIREG_CSR, csr);
730
731 /* Load start address */
732 SIREG_WRITE(ncr_sc, SIREG_DMA_ADDRH, (u_int16_t)(dva >> 16));
733 SIREG_WRITE(ncr_sc, SIREG_DMA_ADDRL, (u_int16_t)(dva & 0xFFFF));
734
735 /* Clear DMA counters; these will be set in si_dma_start() */
736 SIREG_WRITE(ncr_sc, SIREG_DMA_CNTH, 0);
737 SIREG_WRITE(ncr_sc, SIREG_DMA_CNTL, 0);
738
739 /* Clear FIFO counter. (also hits dma_count) */
740 SIREG_WRITE(ncr_sc, SIREG_FIFO_CNTH, 0);
741 SIREG_WRITE(ncr_sc, SIREG_FIFO_CNT, 0);
742 }
743
744
745 void
746 si_dma_start(ncr_sc)
747 struct ncr5380_softc *ncr_sc;
748 {
749 struct si_softc *sc = (struct si_softc *)ncr_sc;
750 struct sci_req *sr = ncr_sc->sc_current;
751 struct si_dma_handle *dh = sr->sr_dma_hand;
752 int xlen;
753 u_int mode;
754 u_int16_t csr;
755
756 xlen = sc->sc_xlen;
757
758 /* Load transfer length */
759 SIREG_WRITE(ncr_sc, SIREG_DMA_CNTH, (u_int16_t)(xlen >> 16));
760 SIREG_WRITE(ncr_sc, SIREG_DMA_CNTL, (u_int16_t)(xlen & 0xFFFF));
761 SIREG_WRITE(ncr_sc, SIREG_FIFO_CNTH, (u_int16_t)(xlen >> 16));
762 SIREG_WRITE(ncr_sc, SIREG_FIFO_CNT, (u_int16_t)(xlen & 0xFFFF));
763
764 /*
765 * Acknowledge the phase change. (After DMA setup!)
766 * Put the SBIC into DMA mode, and start the transfer.
767 */
768 if (dh->dh_flags & SIDH_OUT) {
769 NCR5380_WRITE(ncr_sc, sci_tcmd, PHASE_DATA_OUT);
770 SCI_CLR_INTR(ncr_sc);
771 NCR5380_WRITE(ncr_sc, sci_icmd, SCI_ICMD_DATA);
772
773 mode = NCR5380_READ(ncr_sc, sci_mode);
774 mode |= (SCI_MODE_DMA | SCI_MODE_DMA_IE);
775 NCR5380_WRITE(ncr_sc, sci_mode, mode);
776
777 NCR5380_WRITE(ncr_sc, sci_dma_send, 0); /* start it */
778 } else {
779 NCR5380_WRITE(ncr_sc, sci_tcmd, PHASE_DATA_IN);
780 SCI_CLR_INTR(ncr_sc);
781 NCR5380_WRITE(ncr_sc, sci_icmd, 0);
782
783 mode = NCR5380_READ(ncr_sc, sci_mode);
784 mode |= (SCI_MODE_DMA | SCI_MODE_DMA_IE);
785 NCR5380_WRITE(ncr_sc, sci_mode, mode);
786
787 NCR5380_WRITE(ncr_sc, sci_irecv, 0); /* start it */
788 }
789
790 ncr_sc->sc_state |= NCR_DOINGDMA;
791
792 /* Enable DMA engine */
793 csr = SIREG_READ(ncr_sc, SIREG_CSR);
794 csr |= SI_CSR_DMA_EN;
795 SIREG_WRITE(ncr_sc, SIREG_CSR, csr);
796
797 #ifdef DEBUG
798 if (si_debug & 2) {
799 printf("si_dma_start: started, flags=0x%x\n",
800 ncr_sc->sc_state);
801 }
802 #endif
803 }
804
805
806 void
807 si_dma_eop(ncr_sc)
808 struct ncr5380_softc *ncr_sc;
809 {
810
811 /* Not needed - DMA was stopped prior to examining sci_csr */
812 }
813
814
815 void
816 si_dma_stop(ncr_sc)
817 struct ncr5380_softc *ncr_sc;
818 {
819 struct si_softc *sc = (struct si_softc *)ncr_sc;
820 struct sci_req *sr = ncr_sc->sc_current;
821 struct si_dma_handle *dh = sr->sr_dma_hand;
822 int resid, ntrans;
823 u_int16_t csr;
824 u_int mode;
825
826 if ((ncr_sc->sc_state & NCR_DOINGDMA) == 0) {
827 #ifdef DEBUG
828 printf("si_dma_stop: DMA not running\n");
829 #endif
830 return;
831 }
832
833 ncr_sc->sc_state &= ~NCR_DOINGDMA;
834
835 csr = SIREG_READ(ncr_sc, SIREG_CSR);
836
837 /* First, halt the DMA engine. */
838 csr &= ~SI_CSR_DMA_EN;
839 SIREG_WRITE(ncr_sc, SIREG_CSR, csr);
840
841 if (csr & (SI_CSR_DMA_CONFLICT | SI_CSR_DMA_BUS_ERR)) {
842 printf("si: DMA error, csr=0x%x, reset\n", csr);
843 sr->sr_xs->error = XS_DRIVER_STUFFUP;
844 ncr_sc->sc_state |= NCR_ABORTING;
845 si_reset_adapter(ncr_sc);
846 }
847
848 /* Note that timeout may have set the error flag. */
849 if (ncr_sc->sc_state & NCR_ABORTING)
850 goto out;
851
852 /*
853 * Now try to figure out how much actually transferred
854 *
855 * The fifo_count does not reflect how many bytes were
856 * actually transferred for VME.
857 *
858 * SCSI-3 VME interface is a little funny on writes:
859 * if we have a disconnect, the DMA has overshot by
860 * one byte and the resid needs to be incremented.
861 * Only happens for partial transfers.
862 * (Thanks to Matt Jacob)
863 */
864
865 resid = SIREG_READ(ncr_sc, SIREG_FIFO_CNTH) << 16;
866 resid |= SIREG_READ(ncr_sc, SIREG_FIFO_CNT) & 0xFFFF;
867 if (dh->dh_flags & SIDH_OUT)
868 if ((resid > 0) && (resid < sc->sc_xlen))
869 resid++;
870 ntrans = sc->sc_xlen - resid;
871
872 #ifdef DEBUG
873 if (si_debug & 2) {
874 printf("si_dma_stop: resid=0x%x ntrans=0x%x\n",
875 resid, ntrans);
876 }
877 #endif
878
879 if (ntrans > ncr_sc->sc_datalen)
880 panic("si_dma_stop: excess transfer");
881
882 /* Adjust data pointer */
883 ncr_sc->sc_dataptr += ntrans;
884 ncr_sc->sc_datalen -= ntrans;
885
886 #ifdef DEBUG
887 if (si_debug & 2) {
888 printf("si_dma_stop: ntrans=0x%x\n", ntrans);
889 }
890 #endif
891
892 /*
893 * After a read, we may need to clean-up
894 * "Left-over bytes" (yuck!)
895 */
896 if (((dh->dh_flags & SIDH_OUT) == 0) &&
897 ((csr & SI_CSR_LOB) != 0))
898 {
899 char *cp = ncr_sc->sc_dataptr;
900 u_int16_t bprh, bprl;
901
902 bprh = SIREG_READ(ncr_sc, SIREG_BPRH);
903 bprl = SIREG_READ(ncr_sc, SIREG_BPRL);
904
905 #ifdef DEBUG
906 printf("si: got left-over bytes: bprh=%x, bprl=%x, csr=%x\n",
907 bprh, bprl, csr);
908 #endif
909
910 if (csr & SI_CSR_BPCON) {
911 /* have SI_CSR_BPCON */
912 cp[-1] = (bprl & 0xff00) >> 8;
913 } else {
914 switch (csr & SI_CSR_LOB) {
915 case SI_CSR_LOB_THREE:
916 cp[-3] = (bprh & 0xff00) >> 8;
917 cp[-2] = (bprh & 0x00ff);
918 cp[-1] = (bprl & 0xff00) >> 8;
919 break;
920 case SI_CSR_LOB_TWO:
921 cp[-2] = (bprh & 0xff00) >> 8;
922 cp[-1] = (bprh & 0x00ff);
923 break;
924 case SI_CSR_LOB_ONE:
925 cp[-1] = (bprh & 0xff00) >> 8;
926 break;
927 }
928 }
929 }
930
931 out:
932 SIREG_WRITE(ncr_sc, SIREG_DMA_ADDRH, 0);
933 SIREG_WRITE(ncr_sc, SIREG_DMA_ADDRL, 0);
934
935 SIREG_WRITE(ncr_sc, SIREG_DMA_CNTH, 0);
936 SIREG_WRITE(ncr_sc, SIREG_DMA_CNTL, 0);
937
938 SIREG_WRITE(ncr_sc, SIREG_FIFO_CNTH, 0);
939 SIREG_WRITE(ncr_sc, SIREG_FIFO_CNT, 0);
940
941 mode = NCR5380_READ(ncr_sc, sci_mode);
942 /* Put SBIC back in PIO mode. */
943 mode &= ~(SCI_MODE_DMA | SCI_MODE_DMA_IE);
944 NCR5380_WRITE(ncr_sc, sci_mode, mode);
945 NCR5380_WRITE(ncr_sc, sci_icmd, 0);
946 }
Cache object: 3bc62aaddb869fd6ca9b7ff87af64ecc
|