FreeBSD/Linux Kernel Cross Reference
sys/dev/ic/dpt.c
1 /* $NetBSD: dpt.c,v 1.54.2.1 2006/12/04 18:34:15 tron Exp $ */
2
3 /*-
4 * Copyright (c) 1997, 1998, 1999, 2000, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran, Charles M. Hannum and by Jason R. Thorpe of the Numerical
9 * Aerospace Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Copyright (c) 1996-2000 Distributed Processing Technology Corporation
42 * Copyright (c) 2000 Adaptec Corporation
43 * All rights reserved.
44 *
45 * TERMS AND CONDITIONS OF USE
46 *
47 * Redistribution and use in source form, with or without modification, are
48 * permitted provided that redistributions of source code must retain the
49 * above copyright notice, this list of conditions and the following disclaimer.
50 *
51 * This software is provided `as is' by Adaptec and any express or implied
52 * warranties, including, but not limited to, the implied warranties of
53 * merchantability and fitness for a particular purpose, are disclaimed. In no
54 * event shall Adaptec be liable for any direct, indirect, incidental, special,
55 * exemplary or consequential damages (including, but not limited to,
56 * procurement of substitute goods or services; loss of use, data, or profits;
57 * or business interruptions) however caused and on any theory of liability,
58 * whether in contract, strict liability, or tort (including negligence or
59 * otherwise) arising in any way out of the use of this driver software, even
60 * if advised of the possibility of such damage.
61 */
62
63 /*
64 * Portions of this code fall under the following copyright:
65 *
66 * Originally written by Julian Elischer (julian@tfs.com)
67 * for TRW Financial Systems for use under the MACH(2.5) operating system.
68 *
69 * TRW Financial Systems, in accordance with their agreement with Carnegie
70 * Mellon University, makes this software available to CMU to distribute
71 * or use in any manner that they see fit as long as this message is kept with
72 * the software. For this reason TFS also grants any other persons or
73 * organisations permission to use or modify this software.
74 *
75 * TFS supplies this software to be publicly redistributed
76 * on the understanding that TFS is not responsible for the correct
77 * functioning of this software in any circumstances.
78 */
79
80 #include <sys/cdefs.h>
81 __KERNEL_RCSID(0, "$NetBSD: dpt.c,v 1.54.2.1 2006/12/04 18:34:15 tron Exp $");
82
83 #include <sys/param.h>
84 #include <sys/systm.h>
85 #include <sys/device.h>
86 #include <sys/queue.h>
87 #include <sys/buf.h>
88 #include <sys/endian.h>
89 #include <sys/conf.h>
90 #include <sys/kauth.h>
91
92 #include <uvm/uvm_extern.h>
93
94 #include <machine/bus.h>
95 #ifdef i386
96 #include <machine/pio.h>
97 #endif
98
99 #include <dev/scsipi/scsi_all.h>
100 #include <dev/scsipi/scsipi_all.h>
101 #include <dev/scsipi/scsiconf.h>
102
103 #include <dev/ic/dptreg.h>
104 #include <dev/ic/dptvar.h>
105
106 #include <dev/i2o/dptivar.h>
107
108 #ifdef DEBUG
109 #define DPRINTF(x) printf x
110 #else
111 #define DPRINTF(x)
112 #endif
113
114 #define dpt_inb(x, o) \
115 bus_space_read_1((x)->sc_iot, (x)->sc_ioh, (o))
116 #define dpt_outb(x, o, d) \
117 bus_space_write_1((x)->sc_iot, (x)->sc_ioh, (o), (d))
118
119 static const char * const dpt_cname[] = {
120 "3334", "SmartRAID IV",
121 "3332", "SmartRAID IV",
122 "2144", "SmartCache IV",
123 "2044", "SmartCache IV",
124 "2142", "SmartCache IV",
125 "2042", "SmartCache IV",
126 "2041", "SmartCache IV",
127 "3224", "SmartRAID III",
128 "3222", "SmartRAID III",
129 "3021", "SmartRAID III",
130 "2124", "SmartCache III",
131 "2024", "SmartCache III",
132 "2122", "SmartCache III",
133 "2022", "SmartCache III",
134 "2021", "SmartCache III",
135 "2012", "SmartCache Plus",
136 "2011", "SmartCache Plus",
137 NULL, "<unknown>",
138 };
139
140 static void *dpt_sdh;
141
142 dev_type_open(dptopen);
143 dev_type_ioctl(dptioctl);
144
145 const struct cdevsw dpt_cdevsw = {
146 dptopen, nullclose, noread, nowrite, dptioctl,
147 nostop, notty, nopoll, nommap, nokqfilter, D_OTHER,
148 };
149
150 extern struct cfdriver dpt_cd;
151
152 static struct dpt_sig dpt_sig = {
153 { 'd', 'P', 't', 'S', 'i', 'G'},
154 SIG_VERSION,
155 #if defined(i386)
156 PROC_INTEL,
157 #elif defined(powerpc)
158 PROC_POWERPC,
159 #elif defined(alpha)
160 PROC_ALPHA,
161 #elif defined(__mips__)
162 PROC_MIPS,
163 #elif defined(sparc64)
164 PROC_ULTRASPARC,
165 #else
166 0xff,
167 #endif
168 #if defined(i386)
169 PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
170 #else
171 0,
172 #endif
173 FT_HBADRVR,
174 0,
175 OEM_DPT,
176 OS_FREE_BSD, /* XXX */
177 CAP_ABOVE16MB,
178 DEV_ALL,
179 ADF_ALL_EATA,
180 0,
181 0,
182 DPT_VERSION,
183 DPT_REVISION,
184 DPT_SUBREVISION,
185 DPT_MONTH,
186 DPT_DAY,
187 DPT_YEAR,
188 "" /* Will be filled later */
189 };
190
191 static void dpt_ccb_abort(struct dpt_softc *, struct dpt_ccb *);
192 static void dpt_ccb_done(struct dpt_softc *, struct dpt_ccb *);
193 static int dpt_ccb_map(struct dpt_softc *, struct dpt_ccb *);
194 static int dpt_ccb_poll(struct dpt_softc *, struct dpt_ccb *);
195 static void dpt_ccb_unmap(struct dpt_softc *, struct dpt_ccb *);
196 static int dpt_cmd(struct dpt_softc *, struct dpt_ccb *, int, int);
197 static void dpt_ctlrinfo(struct dpt_softc *, struct dpt_eata_ctlrinfo *);
198 static void dpt_hba_inquire(struct dpt_softc *, struct eata_inquiry_data **);
199 static void dpt_minphys(struct buf *);
200 static int dpt_passthrough(struct dpt_softc *, struct eata_ucp *,
201 struct lwp *);
202 static void dpt_scsipi_request(struct scsipi_channel *,
203 scsipi_adapter_req_t, void *);
204 static void dpt_shutdown(void *);
205 static void dpt_sysinfo(struct dpt_softc *, struct dpt_sysinfo *);
206 static int dpt_wait(struct dpt_softc *, u_int8_t, u_int8_t, int);
207
208 static inline struct dpt_ccb *dpt_ccb_alloc(struct dpt_softc *);
209 static inline void dpt_ccb_free(struct dpt_softc *, struct dpt_ccb *);
210
211 static inline struct dpt_ccb *
212 dpt_ccb_alloc(struct dpt_softc *sc)
213 {
214 struct dpt_ccb *ccb;
215 int s;
216
217 s = splbio();
218 ccb = SLIST_FIRST(&sc->sc_ccb_free);
219 SLIST_REMOVE_HEAD(&sc->sc_ccb_free, ccb_chain);
220 splx(s);
221
222 return (ccb);
223 }
224
225 static inline void
226 dpt_ccb_free(struct dpt_softc *sc, struct dpt_ccb *ccb)
227 {
228 int s;
229
230 ccb->ccb_flg = 0;
231 ccb->ccb_savesp = NULL;
232 s = splbio();
233 SLIST_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_chain);
234 splx(s);
235 }
236
237 /*
238 * Handle an interrupt from the HBA.
239 */
240 int
241 dpt_intr(void *cookie)
242 {
243 struct dpt_softc *sc;
244 struct dpt_ccb *ccb;
245 struct eata_sp *sp;
246 volatile int junk;
247 int forus;
248
249 sc = cookie;
250 sp = sc->sc_stp;
251 forus = 0;
252
253 for (;;) {
254 /*
255 * HBA might have interrupted while we were dealing with the
256 * last completed command, since we ACK before we deal; keep
257 * polling.
258 */
259 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) == 0)
260 break;
261 forus = 1;
262
263 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
264 sizeof(struct eata_sp), BUS_DMASYNC_POSTREAD);
265
266 /* Might have looped before HBA can reset HBA_AUX_INTR. */
267 if (sp->sp_ccbid == -1) {
268 DELAY(50);
269
270 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) == 0)
271 return (0);
272
273 printf("%s: no status\n", sc->sc_dv.dv_xname);
274
275 /* Re-sync DMA map */
276 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
277 sc->sc_stpoff, sizeof(struct eata_sp),
278 BUS_DMASYNC_POSTREAD);
279 }
280
281 /* Make sure CCB ID from status packet is realistic. */
282 if ((u_int)sp->sp_ccbid >= sc->sc_nccbs) {
283 printf("%s: bogus status (returned CCB id %d)\n",
284 sc->sc_dv.dv_xname, sp->sp_ccbid);
285
286 /* Ack the interrupt */
287 sp->sp_ccbid = -1;
288 junk = dpt_inb(sc, HA_STATUS);
289 continue;
290 }
291
292 /* Sync up DMA map and cache cmd status. */
293 ccb = sc->sc_ccbs + sp->sp_ccbid;
294
295 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb),
296 sizeof(struct dpt_ccb), BUS_DMASYNC_POSTWRITE);
297
298 ccb->ccb_hba_status = sp->sp_hba_status & 0x7f;
299 ccb->ccb_scsi_status = sp->sp_scsi_status;
300 if (ccb->ccb_savesp != NULL)
301 memcpy(ccb->ccb_savesp, sp, sizeof(*sp));
302
303 /*
304 * Ack the interrupt and process the CCB. If this
305 * is a private CCB it's up to dpt_ccb_poll() to
306 * notice.
307 */
308 sp->sp_ccbid = -1;
309 ccb->ccb_flg |= CCB_INTR;
310 junk = dpt_inb(sc, HA_STATUS);
311 if ((ccb->ccb_flg & CCB_PRIVATE) == 0)
312 dpt_ccb_done(sc, ccb);
313 else if ((ccb->ccb_flg & CCB_WAIT) != 0)
314 wakeup(ccb);
315 }
316
317 return (forus);
318 }
319
320 /*
321 * Initialize and attach the HBA. This is the entry point from bus
322 * specific probe-and-attach code.
323 */
324 void
325 dpt_init(struct dpt_softc *sc, const char *intrstr)
326 {
327 struct scsipi_adapter *adapt;
328 struct scsipi_channel *chan;
329 struct eata_inquiry_data *ei;
330 int i, j, rv, rseg, maxchannel, maxtarget, mapsize;
331 bus_dma_segment_t seg;
332 struct eata_cfg *ec;
333 struct dpt_ccb *ccb;
334 char model[__arraycount(ei->ei_model) + __arraycount(ei->ei_suffix) + 1];
335 char vendor[__arraycount(ei->ei_vendor) + 1];
336
337 ec = &sc->sc_ec;
338 snprintf(dpt_sig.dsDescription, sizeof(dpt_sig.dsDescription),
339 "NetBSD %s DPT driver", osrelease);
340
341 /*
342 * Allocate the CCB/status packet/scratch DMA map and load.
343 */
344 sc->sc_nccbs =
345 min(be16toh(*(int16_t *)ec->ec_queuedepth), DPT_MAX_CCBS);
346 sc->sc_stpoff = sc->sc_nccbs * sizeof(struct dpt_ccb);
347 sc->sc_scroff = sc->sc_stpoff + sizeof(struct eata_sp);
348 mapsize = sc->sc_nccbs * sizeof(struct dpt_ccb) +
349 DPT_SCRATCH_SIZE + sizeof(struct eata_sp);
350
351 if ((rv = bus_dmamem_alloc(sc->sc_dmat, mapsize,
352 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
353 aprint_error("%s: unable to allocate CCBs, rv = %d\n",
354 sc->sc_dv.dv_xname, rv);
355 return;
356 }
357
358 if ((rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, mapsize,
359 (caddr_t *)&sc->sc_ccbs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
360 aprint_error("%s: unable to map CCBs, rv = %d\n",
361 sc->sc_dv.dv_xname, rv);
362 return;
363 }
364
365 if ((rv = bus_dmamap_create(sc->sc_dmat, mapsize,
366 mapsize, 1, 0, BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) {
367 aprint_error("%s: unable to create CCB DMA map, rv = %d\n",
368 sc->sc_dv.dv_xname, rv);
369 return;
370 }
371
372 if ((rv = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap,
373 sc->sc_ccbs, mapsize, NULL, BUS_DMA_NOWAIT)) != 0) {
374 aprint_error("%s: unable to load CCB DMA map, rv = %d\n",
375 sc->sc_dv.dv_xname, rv);
376 return;
377 }
378
379 sc->sc_stp = (struct eata_sp *)((caddr_t)sc->sc_ccbs + sc->sc_stpoff);
380 sc->sc_stppa = sc->sc_dmamap->dm_segs[0].ds_addr + sc->sc_stpoff;
381 sc->sc_scr = (caddr_t)sc->sc_ccbs + sc->sc_scroff;
382 sc->sc_scrpa = sc->sc_dmamap->dm_segs[0].ds_addr + sc->sc_scroff;
383 sc->sc_stp->sp_ccbid = -1;
384
385 /*
386 * Create the CCBs.
387 */
388 SLIST_INIT(&sc->sc_ccb_free);
389 memset(sc->sc_ccbs, 0, sizeof(struct dpt_ccb) * sc->sc_nccbs);
390
391 for (i = 0, ccb = sc->sc_ccbs; i < sc->sc_nccbs; i++, ccb++) {
392 rv = bus_dmamap_create(sc->sc_dmat, DPT_MAX_XFER,
393 DPT_SG_SIZE, DPT_MAX_XFER, 0,
394 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
395 &ccb->ccb_dmamap_xfer);
396 if (rv) {
397 aprint_error("%s: can't create ccb dmamap (%d)\n",
398 sc->sc_dv.dv_xname, rv);
399 break;
400 }
401
402 ccb->ccb_id = i;
403 ccb->ccb_ccbpa = sc->sc_dmamap->dm_segs[0].ds_addr +
404 CCB_OFF(sc, ccb);
405 SLIST_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_chain);
406 }
407
408 if (i == 0) {
409 aprint_error("%s: unable to create CCBs\n", sc->sc_dv.dv_xname);
410 return;
411 } else if (i != sc->sc_nccbs) {
412 aprint_error("%s: %d/%d CCBs created!\n", sc->sc_dv.dv_xname,
413 i, sc->sc_nccbs);
414 sc->sc_nccbs = i;
415 }
416
417 /* Set shutdownhook before we start any device activity. */
418 if (dpt_sdh == NULL)
419 dpt_sdh = shutdownhook_establish(dpt_shutdown, NULL);
420
421 /* Get the inquiry data from the HBA. */
422 dpt_hba_inquire(sc, &ei);
423
424 /*
425 * dpt0 at pci0 dev 12 function 0: DPT SmartRAID III (PM3224A/9X-R)
426 * dpt0: interrupting at irq 10
427 * dpt0: 64 queued commands, 1 channel(s), adapter on ID(s) 7
428 */
429 for (i = 0; ei->ei_vendor[i] != ' ' && i < __arraycount(ei->ei_vendor);
430 i++)
431 vendor[i] = ei->ei_vendor[i];
432 vendor[i] = '\0';
433
434 for (i = 0; ei->ei_model[i] != ' ' && i < __arraycount(ei->ei_model);
435 i++)
436 model[i] = ei->ei_model[i];
437 for (j = 0; ei->ei_suffix[j] != ' ' && j < __arraycount(ei->ei_suffix);
438 i++, j++)
439 model[i] = ei->ei_suffix[j];
440 model[i] = '\0';
441
442 /* Find the marketing name for the board. */
443 for (i = 0; dpt_cname[i] != NULL; i += 2)
444 if (memcmp(ei->ei_model + 2, dpt_cname[i], 4) == 0)
445 break;
446
447 aprint_normal("%s %s (%s)\n", vendor, dpt_cname[i + 1], model);
448
449 if (intrstr != NULL)
450 aprint_normal("%s: interrupting at %s\n", sc->sc_dv.dv_xname,
451 intrstr);
452
453 maxchannel = (ec->ec_feat3 & EC_F3_MAX_CHANNEL_MASK) >>
454 EC_F3_MAX_CHANNEL_SHIFT;
455 maxtarget = (ec->ec_feat3 & EC_F3_MAX_TARGET_MASK) >>
456 EC_F3_MAX_TARGET_SHIFT;
457
458 aprint_normal("%s: %d queued commands, %d channel(s), adapter on ID(s)",
459 sc->sc_dv.dv_xname, sc->sc_nccbs, maxchannel + 1);
460
461 for (i = 0; i <= maxchannel; i++) {
462 sc->sc_hbaid[i] = ec->ec_hba[3 - i];
463 aprint_normal(" %d", sc->sc_hbaid[i]);
464 }
465 aprint_normal("\n");
466
467 /*
468 * Reset the SCSI controller chip(s) and bus. XXX Do we need to do
469 * this for each bus?
470 */
471 if (dpt_cmd(sc, NULL, CP_IMMEDIATE, CPI_BUS_RESET))
472 panic("%s: dpt_cmd failed", sc->sc_dv.dv_xname);
473
474 /* Fill in the scsipi_adapter. */
475 adapt = &sc->sc_adapt;
476 memset(adapt, 0, sizeof(*adapt));
477 adapt->adapt_dev = &sc->sc_dv;
478 adapt->adapt_nchannels = maxchannel + 1;
479 adapt->adapt_openings = sc->sc_nccbs - 1;
480 adapt->adapt_max_periph = sc->sc_nccbs - 1;
481 adapt->adapt_request = dpt_scsipi_request;
482 adapt->adapt_minphys = dpt_minphys;
483
484 for (i = 0; i <= maxchannel; i++) {
485 /* Fill in the scsipi_channel. */
486 chan = &sc->sc_chans[i];
487 memset(chan, 0, sizeof(*chan));
488 chan->chan_adapter = adapt;
489 chan->chan_bustype = &scsi_bustype;
490 chan->chan_channel = i;
491 chan->chan_ntargets = maxtarget + 1;
492 chan->chan_nluns = ec->ec_maxlun + 1;
493 chan->chan_id = sc->sc_hbaid[i];
494 config_found(&sc->sc_dv, chan, scsiprint);
495 }
496 }
497
498 /*
499 * Read the EATA configuration from the HBA and perform some sanity checks.
500 */
501 int
502 dpt_readcfg(struct dpt_softc *sc)
503 {
504 struct eata_cfg *ec;
505 int i, j, stat;
506 u_int16_t *p;
507
508 ec = &sc->sc_ec;
509
510 /* Older firmware may puke if we talk to it too soon after reset. */
511 dpt_outb(sc, HA_COMMAND, CP_RESET);
512 DELAY(750000);
513
514 for (i = 1000; i; i--) {
515 if ((dpt_inb(sc, HA_STATUS) & HA_ST_READY) != 0)
516 break;
517 DELAY(2000);
518 }
519
520 if (i == 0) {
521 printf("%s: HBA not ready after reset (hba status:%02x)\n",
522 sc->sc_dv.dv_xname, dpt_inb(sc, HA_STATUS));
523 return (-1);
524 }
525
526 while((((stat = dpt_inb(sc, HA_STATUS))
527 != (HA_ST_READY|HA_ST_SEEK_COMPLETE))
528 && (stat != (HA_ST_READY|HA_ST_SEEK_COMPLETE|HA_ST_ERROR))
529 && (stat != (HA_ST_READY|HA_ST_SEEK_COMPLETE|HA_ST_ERROR|HA_ST_DRQ)))
530 || (dpt_wait(sc, HA_ST_BUSY, 0, 2000))) {
531 /* RAID drives still spinning up? */
532 if(dpt_inb(sc, HA_ERROR) != 'D' ||
533 dpt_inb(sc, HA_ERROR + 1) != 'P' ||
534 dpt_inb(sc, HA_ERROR + 2) != 'T') {
535 printf("%s: HBA not ready\n", sc->sc_dv.dv_xname);
536 return (-1);
537 }
538 }
539
540 /*
541 * Issue the read-config command and wait for the data to appear.
542 *
543 * Apparently certian firmware revisions won't DMA later on if we
544 * request the config data using PIO, but it makes it a lot easier
545 * as no DMA setup is required.
546 */
547 dpt_outb(sc, HA_COMMAND, CP_PIO_GETCFG);
548 memset(ec, 0, sizeof(*ec));
549 i = ((int)&((struct eata_cfg *)0)->ec_cfglen +
550 sizeof(ec->ec_cfglen)) >> 1;
551 p = (u_int16_t *)ec;
552
553 if (dpt_wait(sc, 0xFF, HA_ST_DATA_RDY, 2000)) {
554 printf("%s: cfg data didn't appear (hba status:%02x)\n",
555 sc->sc_dv.dv_xname, dpt_inb(sc, HA_STATUS));
556 return (-1);
557 }
558
559 /* Begin reading. */
560 while (i--)
561 *p++ = bus_space_read_stream_2(sc->sc_iot, sc->sc_ioh, HA_DATA);
562
563 if ((i = ec->ec_cfglen) > (sizeof(struct eata_cfg)
564 - (int)(&(((struct eata_cfg *)0L)->ec_cfglen))
565 - sizeof(ec->ec_cfglen)))
566 i = sizeof(struct eata_cfg)
567 - (int)(&(((struct eata_cfg *)0L)->ec_cfglen))
568 - sizeof(ec->ec_cfglen);
569
570 j = i + (int)(&(((struct eata_cfg *)0L)->ec_cfglen)) +
571 sizeof(ec->ec_cfglen);
572 i >>= 1;
573
574 while (i--)
575 *p++ = bus_space_read_stream_2(sc->sc_iot, sc->sc_ioh, HA_DATA);
576
577 /* Flush until we have read 512 bytes. */
578 i = (512 - j + 1) >> 1;
579 while (i--)
580 (void)bus_space_read_stream_2(sc->sc_iot, sc->sc_ioh, HA_DATA);
581
582 /* Defaults for older firmware... */
583 if (p <= (u_short *)&ec->ec_hba[DPT_MAX_CHANNELS - 1])
584 ec->ec_hba[DPT_MAX_CHANNELS - 1] = 7;
585
586 if ((dpt_inb(sc, HA_STATUS) & HA_ST_ERROR) != 0) {
587 printf("%s: HBA error\n", sc->sc_dv.dv_xname);
588 return (-1);
589 }
590
591 if (memcmp(ec->ec_eatasig, "EATA", 4) != 0) {
592 printf("%s: EATA signature mismatch\n", sc->sc_dv.dv_xname);
593 return (-1);
594 }
595
596 if ((ec->ec_feat0 & EC_F0_HBA_VALID) == 0) {
597 printf("%s: ec_hba field invalid\n", sc->sc_dv.dv_xname);
598 return (-1);
599 }
600
601 if ((ec->ec_feat0 & EC_F0_DMA_SUPPORTED) == 0) {
602 printf("%s: DMA not supported\n", sc->sc_dv.dv_xname);
603 return (-1);
604 }
605
606 return (0);
607 }
608
609 /*
610 * Our `shutdownhook' to cleanly shut down the HBA. The HBA must flush all
611 * data from it's cache and mark array groups as clean.
612 *
613 * XXX This doesn't always work (i.e., the HBA may still be flushing after
614 * we tell root that it's safe to power off).
615 */
616 static void
617 dpt_shutdown(void *cookie)
618 {
619 extern struct cfdriver dpt_cd;
620 struct dpt_softc *sc;
621 int i;
622
623 printf("shutting down dpt devices...");
624
625 for (i = 0; i < dpt_cd.cd_ndevs; i++) {
626 if ((sc = device_lookup(&dpt_cd, i)) == NULL)
627 continue;
628 dpt_cmd(sc, NULL, CP_IMMEDIATE, CPI_POWEROFF_WARN);
629 }
630
631 delay(10000*1000);
632 printf(" done\n");
633 }
634
635 /*
636 * Send an EATA command to the HBA.
637 */
638 static int
639 dpt_cmd(struct dpt_softc *sc, struct dpt_ccb *ccb, int eatacmd, int icmd)
640 {
641 u_int32_t pa;
642 int i, s;
643
644 s = splbio();
645
646 for (i = 20000; i != 0; i--) {
647 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_BUSY) == 0)
648 break;
649 DELAY(50);
650 }
651 if (i == 0) {
652 splx(s);
653 return (-1);
654 }
655
656 pa = (ccb != NULL ? ccb->ccb_ccbpa : 0);
657 dpt_outb(sc, HA_DMA_BASE + 0, (pa ) & 0xff);
658 dpt_outb(sc, HA_DMA_BASE + 1, (pa >> 8) & 0xff);
659 dpt_outb(sc, HA_DMA_BASE + 2, (pa >> 16) & 0xff);
660 dpt_outb(sc, HA_DMA_BASE + 3, (pa >> 24) & 0xff);
661
662 if (eatacmd == CP_IMMEDIATE)
663 dpt_outb(sc, HA_ICMD, icmd);
664
665 dpt_outb(sc, HA_COMMAND, eatacmd);
666
667 splx(s);
668 return (0);
669 }
670
671 /*
672 * Wait for the HBA status register to reach a specific state.
673 */
674 static int
675 dpt_wait(struct dpt_softc *sc, u_int8_t mask, u_int8_t state, int ms)
676 {
677
678 for (ms *= 10; ms != 0; ms--) {
679 if ((dpt_inb(sc, HA_STATUS) & mask) == state)
680 return (0);
681 DELAY(100);
682 }
683
684 return (-1);
685 }
686
687 /*
688 * Spin waiting for a command to finish. The timeout value from the CCB is
689 * used. The CCB must be marked with CCB_PRIVATE, otherwise it'll will get
690 * recycled before we get a look at it.
691 */
692 static int
693 dpt_ccb_poll(struct dpt_softc *sc, struct dpt_ccb *ccb)
694 {
695 int i, s;
696
697 #ifdef DEBUG
698 if ((ccb->ccb_flg & CCB_PRIVATE) == 0)
699 panic("dpt_ccb_poll: called for non-CCB_PRIVATE request");
700 #endif
701
702 s = splbio();
703
704 if ((ccb->ccb_flg & CCB_INTR) != 0) {
705 splx(s);
706 return (0);
707 }
708
709 for (i = ccb->ccb_timeout * 20; i != 0; i--) {
710 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) != 0)
711 dpt_intr(sc);
712 if ((ccb->ccb_flg & CCB_INTR) != 0)
713 break;
714 DELAY(50);
715 }
716
717 splx(s);
718 return (i == 0);
719 }
720
721 /*
722 * We have a command which has been processed by the HBA, so now we look to
723 * see how the operation went. CCBs marked CCB_PRIVATE are not passed here
724 * by dpt_intr().
725 */
726 static void
727 dpt_ccb_done(struct dpt_softc *sc, struct dpt_ccb *ccb)
728 {
729 struct scsipi_xfer *xs;
730
731 xs = ccb->ccb_xs;
732
733 SC_DEBUG(xs->xs_periph, SCSIPI_DB2, ("dpt_ccb_done\n"));
734
735 /*
736 * If we were a data transfer, unload the map that described the
737 * data buffer.
738 */
739 if (xs->datalen != 0)
740 dpt_ccb_unmap(sc, ccb);
741
742 if (xs->error == XS_NOERROR) {
743 if (ccb->ccb_hba_status != SP_HBA_NO_ERROR) {
744 switch (ccb->ccb_hba_status) {
745 case SP_HBA_ERROR_SEL_TO:
746 xs->error = XS_SELTIMEOUT;
747 break;
748 case SP_HBA_ERROR_RESET:
749 xs->error = XS_RESET;
750 break;
751 default:
752 printf("%s: HBA status %x\n",
753 sc->sc_dv.dv_xname, ccb->ccb_hba_status);
754 xs->error = XS_DRIVER_STUFFUP;
755 break;
756 }
757 } else if (ccb->ccb_scsi_status != SCSI_OK) {
758 switch (ccb->ccb_scsi_status) {
759 case SCSI_CHECK:
760 memcpy(&xs->sense.scsi_sense, &ccb->ccb_sense,
761 sizeof(xs->sense.scsi_sense));
762 xs->error = XS_SENSE;
763 break;
764 case SCSI_BUSY:
765 case SCSI_QUEUE_FULL:
766 xs->error = XS_BUSY;
767 break;
768 default:
769 scsipi_printaddr(xs->xs_periph);
770 printf("SCSI status %x\n",
771 ccb->ccb_scsi_status);
772 xs->error = XS_DRIVER_STUFFUP;
773 break;
774 }
775 } else
776 xs->resid = 0;
777
778 xs->status = ccb->ccb_scsi_status;
779 }
780
781 /* Free up the CCB and mark the command as done. */
782 dpt_ccb_free(sc, ccb);
783 scsipi_done(xs);
784 }
785
786 /*
787 * Specified CCB has timed out, abort it.
788 */
789 static void
790 dpt_ccb_abort(struct dpt_softc *sc, struct dpt_ccb *ccb)
791 {
792 struct scsipi_periph *periph;
793 struct scsipi_xfer *xs;
794 int s;
795
796 xs = ccb->ccb_xs;
797 periph = xs->xs_periph;
798
799 scsipi_printaddr(periph);
800 printf("timed out (status:%02x aux status:%02x)",
801 dpt_inb(sc, HA_STATUS), dpt_inb(sc, HA_AUX_STATUS));
802
803 s = splbio();
804
805 if ((ccb->ccb_flg & CCB_ABORT) != 0) {
806 /* Abort timed out, reset the HBA */
807 printf(" AGAIN, resetting HBA\n");
808 dpt_outb(sc, HA_COMMAND, CP_RESET);
809 DELAY(750000);
810 } else {
811 /* Abort the operation that has timed out */
812 printf("\n");
813 xs->error = XS_TIMEOUT;
814 ccb->ccb_timeout = DPT_ABORT_TIMEOUT;
815 ccb->ccb_flg |= CCB_ABORT;
816 /* Start the abort */
817 if (dpt_cmd(sc, ccb, CP_IMMEDIATE, CPI_SPEC_ABORT))
818 printf("%s: dpt_cmd failed\n", sc->sc_dv.dv_xname);
819 }
820
821 splx(s);
822 }
823
824 /*
825 * Map a data transfer.
826 */
827 static int
828 dpt_ccb_map(struct dpt_softc *sc, struct dpt_ccb *ccb)
829 {
830 struct scsipi_xfer *xs;
831 bus_dmamap_t xfer;
832 bus_dma_segment_t *ds;
833 struct eata_sg *sg;
834 struct eata_cp *cp;
835 int rv, i;
836
837 xs = ccb->ccb_xs;
838 xfer = ccb->ccb_dmamap_xfer;
839 cp = &ccb->ccb_eata_cp;
840
841 rv = bus_dmamap_load(sc->sc_dmat, xfer, xs->data, xs->datalen, NULL,
842 ((xs->xs_control & XS_CTL_NOSLEEP) != 0 ?
843 BUS_DMA_NOWAIT : BUS_DMA_WAITOK) | BUS_DMA_STREAMING |
844 ((xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ : BUS_DMA_WRITE));
845
846 switch (rv) {
847 case 0:
848 break;
849 case ENOMEM:
850 case EAGAIN:
851 xs->error = XS_RESOURCE_SHORTAGE;
852 break;
853 default:
854 xs->error = XS_DRIVER_STUFFUP;
855 printf("%s: error %d loading map\n", sc->sc_dv.dv_xname, rv);
856 break;
857 }
858
859 if (xs->error != XS_NOERROR) {
860 dpt_ccb_free(sc, ccb);
861 scsipi_done(xs);
862 return (-1);
863 }
864
865 bus_dmamap_sync(sc->sc_dmat, xfer, 0, xfer->dm_mapsize,
866 (xs->xs_control & XS_CTL_DATA_IN) != 0 ? BUS_DMASYNC_PREREAD :
867 BUS_DMASYNC_PREWRITE);
868
869 /* Don't bother using scatter/gather for just 1 seg */
870 if (xfer->dm_nsegs == 1) {
871 cp->cp_dataaddr = htobe32(xfer->dm_segs[0].ds_addr);
872 cp->cp_datalen = htobe32(xfer->dm_segs[0].ds_len);
873 } else {
874 /*
875 * Load the hardware scatter/gather map with
876 * the contents of the DMA map.
877 */
878 sg = ccb->ccb_sg;
879 ds = xfer->dm_segs;
880 for (i = 0; i < xfer->dm_nsegs; i++, sg++, ds++) {
881 sg->sg_addr = htobe32(ds->ds_addr);
882 sg->sg_len = htobe32(ds->ds_len);
883 }
884 cp->cp_dataaddr = htobe32(CCB_OFF(sc, ccb) +
885 sc->sc_dmamap->dm_segs[0].ds_addr +
886 offsetof(struct dpt_ccb, ccb_sg));
887 cp->cp_datalen = htobe32(i * sizeof(struct eata_sg));
888 cp->cp_ctl0 |= CP_C0_SCATTER;
889 }
890
891 return (0);
892 }
893
894 /*
895 * Unmap a transfer.
896 */
897 static void
898 dpt_ccb_unmap(struct dpt_softc *sc, struct dpt_ccb *ccb)
899 {
900
901 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap_xfer, 0,
902 ccb->ccb_dmamap_xfer->dm_mapsize,
903 (ccb->ccb_eata_cp.cp_ctl0 & CP_C0_DATA_IN) != 0 ?
904 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
905 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap_xfer);
906 }
907
908 /*
909 * Adjust the size of each I/O before it passes to the SCSI layer.
910 */
911 static void
912 dpt_minphys(struct buf *bp)
913 {
914
915 if (bp->b_bcount > DPT_MAX_XFER)
916 bp->b_bcount = DPT_MAX_XFER;
917 minphys(bp);
918 }
919
920 /*
921 * Start a SCSI command.
922 */
923 static void
924 dpt_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
925 void *arg)
926 {
927 struct dpt_softc *sc;
928 struct scsipi_xfer *xs;
929 int flags;
930 struct scsipi_periph *periph;
931 struct dpt_ccb *ccb;
932 struct eata_cp *cp;
933
934 sc = (struct dpt_softc *)chan->chan_adapter->adapt_dev;
935
936 switch (req) {
937 case ADAPTER_REQ_RUN_XFER:
938 xs = arg;
939 periph = xs->xs_periph;
940 flags = xs->xs_control;
941
942 #ifdef DIAGNOSTIC
943 /* Cmds must be no more than 12 bytes for us. */
944 if (xs->cmdlen > 12) {
945 xs->error = XS_DRIVER_STUFFUP;
946 scsipi_done(xs);
947 break;
948 }
949 #endif
950 /*
951 * XXX We can't reset devices just yet. Apparently some
952 * older firmware revisions don't even support it.
953 */
954 if ((flags & XS_CTL_RESET) != 0) {
955 xs->error = XS_DRIVER_STUFFUP;
956 scsipi_done(xs);
957 break;
958 }
959
960 /*
961 * Get a CCB and fill it.
962 */
963 ccb = dpt_ccb_alloc(sc);
964 ccb->ccb_xs = xs;
965 ccb->ccb_timeout = xs->timeout;
966
967 cp = &ccb->ccb_eata_cp;
968 memcpy(&cp->cp_cdb_cmd, xs->cmd, xs->cmdlen);
969 cp->cp_ccbid = ccb->ccb_id;
970 cp->cp_senselen = sizeof(ccb->ccb_sense);
971 cp->cp_stataddr = htobe32(sc->sc_stppa);
972 cp->cp_ctl0 = CP_C0_AUTO_SENSE;
973 cp->cp_ctl1 = 0;
974 cp->cp_ctl2 = 0;
975 cp->cp_ctl3 = periph->periph_target << CP_C3_ID_SHIFT;
976 cp->cp_ctl3 |= chan->chan_channel << CP_C3_CHANNEL_SHIFT;
977 cp->cp_ctl4 = periph->periph_lun << CP_C4_LUN_SHIFT;
978 cp->cp_ctl4 |= CP_C4_DIS_PRI | CP_C4_IDENTIFY;
979
980 if ((flags & XS_CTL_DATA_IN) != 0)
981 cp->cp_ctl0 |= CP_C0_DATA_IN;
982 if ((flags & XS_CTL_DATA_OUT) != 0)
983 cp->cp_ctl0 |= CP_C0_DATA_OUT;
984 if (sc->sc_hbaid[chan->chan_channel] == periph->periph_target)
985 cp->cp_ctl0 |= CP_C0_INTERPRET;
986
987 /* Synchronous xfers musn't write-back through the cache. */
988 if (xs->bp != NULL)
989 if ((xs->bp->b_flags & (B_ASYNC | B_READ)) == 0)
990 cp->cp_ctl2 |= CP_C2_NO_CACHE;
991
992 cp->cp_senseaddr =
993 htobe32(sc->sc_dmamap->dm_segs[0].ds_addr +
994 CCB_OFF(sc, ccb) + offsetof(struct dpt_ccb, ccb_sense));
995
996 if (xs->datalen != 0) {
997 if (dpt_ccb_map(sc, ccb))
998 break;
999 } else {
1000 cp->cp_dataaddr = 0;
1001 cp->cp_datalen = 0;
1002 }
1003
1004 /* Sync up CCB and status packet. */
1005 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1006 CCB_OFF(sc, ccb), sizeof(struct dpt_ccb),
1007 BUS_DMASYNC_PREWRITE);
1008 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
1009 sizeof(struct eata_sp), BUS_DMASYNC_PREREAD);
1010
1011 /*
1012 * Start the command.
1013 */
1014 if ((xs->xs_control & XS_CTL_POLL) != 0)
1015 ccb->ccb_flg |= CCB_PRIVATE;
1016
1017 if (dpt_cmd(sc, ccb, CP_DMA_CMD, 0)) {
1018 printf("%s: dpt_cmd failed\n", sc->sc_dv.dv_xname);
1019 xs->error = XS_DRIVER_STUFFUP;
1020 if (xs->datalen != 0)
1021 dpt_ccb_unmap(sc, ccb);
1022 dpt_ccb_free(sc, ccb);
1023 break;
1024 }
1025
1026 if ((xs->xs_control & XS_CTL_POLL) == 0)
1027 break;
1028
1029 if (dpt_ccb_poll(sc, ccb)) {
1030 dpt_ccb_abort(sc, ccb);
1031 /* Wait for abort to complete... */
1032 if (dpt_ccb_poll(sc, ccb))
1033 dpt_ccb_abort(sc, ccb);
1034 }
1035
1036 dpt_ccb_done(sc, ccb);
1037 break;
1038
1039 case ADAPTER_REQ_GROW_RESOURCES:
1040 /*
1041 * Not supported, since we allocate the maximum number of
1042 * CCBs up front.
1043 */
1044 break;
1045
1046 case ADAPTER_REQ_SET_XFER_MODE:
1047 /*
1048 * This will be handled by the HBA itself, and we can't
1049 * modify that (ditto for tagged queueing).
1050 */
1051 break;
1052 }
1053 }
1054
1055 /*
1056 * Get inquiry data from the adapter.
1057 */
1058 static void
1059 dpt_hba_inquire(struct dpt_softc *sc, struct eata_inquiry_data **ei)
1060 {
1061 struct dpt_ccb *ccb;
1062 struct eata_cp *cp;
1063
1064 *ei = (struct eata_inquiry_data *)sc->sc_scr;
1065
1066 /* Get a CCB and mark as private */
1067 ccb = dpt_ccb_alloc(sc);
1068 ccb->ccb_flg |= CCB_PRIVATE;
1069 ccb->ccb_timeout = 200;
1070
1071 /* Put all the arguments into the CCB. */
1072 cp = &ccb->ccb_eata_cp;
1073 cp->cp_ccbid = ccb->ccb_id;
1074 cp->cp_senselen = sizeof(ccb->ccb_sense);
1075 cp->cp_senseaddr = 0;
1076 cp->cp_stataddr = htobe32(sc->sc_stppa);
1077 cp->cp_dataaddr = htobe32(sc->sc_scrpa);
1078 cp->cp_datalen = htobe32(sizeof(struct eata_inquiry_data));
1079 cp->cp_ctl0 = CP_C0_DATA_IN | CP_C0_INTERPRET;
1080 cp->cp_ctl1 = 0;
1081 cp->cp_ctl2 = 0;
1082 cp->cp_ctl3 = sc->sc_hbaid[0] << CP_C3_ID_SHIFT;
1083 cp->cp_ctl4 = CP_C4_DIS_PRI | CP_C4_IDENTIFY;
1084
1085 /* Put together the SCSI inquiry command. */
1086 memset(&cp->cp_cdb_cmd, 0, 12);
1087 cp->cp_cdb_cmd = INQUIRY;
1088 cp->cp_cdb_len = sizeof(struct eata_inquiry_data);
1089
1090 /* Sync up CCB, status packet and scratch area. */
1091 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb),
1092 sizeof(struct dpt_ccb), BUS_DMASYNC_PREWRITE);
1093 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
1094 sizeof(struct eata_sp), BUS_DMASYNC_PREREAD);
1095 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_scroff,
1096 sizeof(struct eata_inquiry_data), BUS_DMASYNC_PREREAD);
1097
1098 /* Start the command and poll on completion. */
1099 if (dpt_cmd(sc, ccb, CP_DMA_CMD, 0))
1100 panic("%s: dpt_cmd failed", sc->sc_dv.dv_xname);
1101
1102 if (dpt_ccb_poll(sc, ccb))
1103 panic("%s: inquiry timed out", sc->sc_dv.dv_xname);
1104
1105 if (ccb->ccb_hba_status != SP_HBA_NO_ERROR ||
1106 ccb->ccb_scsi_status != SCSI_OK)
1107 panic("%s: inquiry failed (hba:%02x scsi:%02x)",
1108 sc->sc_dv.dv_xname, ccb->ccb_hba_status,
1109 ccb->ccb_scsi_status);
1110
1111 /* Sync up the DMA map and free CCB, returning. */
1112 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_scroff,
1113 sizeof(struct eata_inquiry_data), BUS_DMASYNC_POSTREAD);
1114 dpt_ccb_free(sc, ccb);
1115 }
1116
1117 int
1118 dptopen(dev_t dev, int flag, int mode, struct lwp *l)
1119 {
1120
1121 if (device_lookup(&dpt_cd, minor(dev)) == NULL)
1122 return (ENXIO);
1123
1124 return (0);
1125 }
1126
1127 int
1128 dptioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct lwp *l)
1129 {
1130 struct dpt_softc *sc;
1131 int rv;
1132
1133 sc = device_lookup(&dpt_cd, minor(dev));
1134
1135 switch (cmd & 0xffff) {
1136 case DPT_SIGNATURE:
1137 memcpy(data, &dpt_sig, min(IOCPARM_LEN(cmd), sizeof(dpt_sig)));
1138 break;
1139
1140 case DPT_CTRLINFO:
1141 dpt_ctlrinfo(sc, (struct dpt_eata_ctlrinfo *)data);
1142 break;
1143
1144 case DPT_SYSINFO:
1145 dpt_sysinfo(sc, (struct dpt_sysinfo *)data);
1146 break;
1147
1148 case DPT_BLINKLED:
1149 /*
1150 * XXX Don't know how to get this from EATA boards. I think
1151 * it involves waiting for a "DPT" sequence from HA_ERROR
1152 * and then reading one of the HA_ICMD registers.
1153 */
1154 *(int *)data = 0;
1155 break;
1156
1157 case DPT_EATAUSRCMD:
1158 rv = kauth_authorize_device_passthru(l->l_cred, dev,
1159 KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_ALL, data);
1160 if (rv)
1161 return (rv);
1162
1163 if (IOCPARM_LEN(cmd) < sizeof(struct eata_ucp)) {
1164 DPRINTF(("%s: ucp %lu vs %lu bytes\n",
1165 sc->sc_dv.dv_xname, IOCPARM_LEN(cmd),
1166 (unsigned long int)sizeof(struct eata_ucp)));
1167 return (EINVAL);
1168 }
1169
1170 if (sc->sc_uactive++)
1171 tsleep(&sc->sc_uactive, PRIBIO, "dptslp", 0);
1172
1173 rv = dpt_passthrough(sc, (struct eata_ucp *)data, l);
1174
1175 sc->sc_uactive--;
1176 wakeup_one(&sc->sc_uactive);
1177 return (rv);
1178
1179 default:
1180 DPRINTF(("%s: unknown ioctl %lx\n", sc->sc_dv.dv_xname, cmd));
1181 return (ENOTTY);
1182 }
1183
1184 return (0);
1185 }
1186
1187 void
1188 dpt_ctlrinfo(struct dpt_softc *sc, struct dpt_eata_ctlrinfo *info)
1189 {
1190
1191 memset(info, 0, sizeof(*info));
1192 info->id = sc->sc_hbaid[0];
1193 info->vect = sc->sc_isairq;
1194 info->base = sc->sc_isaport;
1195 info->qdepth = sc->sc_nccbs;
1196 info->sgsize = DPT_SG_SIZE * sizeof(struct eata_sg);
1197 info->heads = 16;
1198 info->sectors = 63;
1199 info->do_drive32 = 1;
1200 info->primary = 1;
1201 info->cpLength = sizeof(struct eata_cp);
1202 info->spLength = sizeof(struct eata_sp);
1203 info->drqNum = sc->sc_isadrq;
1204 }
1205
1206 void
1207 dpt_sysinfo(struct dpt_softc *sc, struct dpt_sysinfo *info)
1208 {
1209 #ifdef i386
1210 int i, j;
1211 #endif
1212
1213 memset(info, 0, sizeof(*info));
1214
1215 #ifdef i386
1216 outb (0x70, 0x12);
1217 i = inb(0x71);
1218 j = i >> 4;
1219 if (i == 0x0f) {
1220 outb (0x70, 0x19);
1221 j = inb (0x71);
1222 }
1223 info->drive0CMOS = j;
1224
1225 j = i & 0x0f;
1226 if (i == 0x0f) {
1227 outb (0x70, 0x1a);
1228 j = inb (0x71);
1229 }
1230 info->drive1CMOS = j;
1231 info->processorFamily = dpt_sig.dsProcessorFamily;
1232
1233 /*
1234 * Get the conventional memory size from CMOS.
1235 */
1236 outb(0x70, 0x16);
1237 j = inb(0x71);
1238 j <<= 8;
1239 outb(0x70, 0x15);
1240 j |= inb(0x71);
1241 info->conventionalMemSize = j;
1242
1243 /*
1244 * Get the extended memory size from CMOS.
1245 */
1246 outb(0x70, 0x31);
1247 j = inb(0x71);
1248 j <<= 8;
1249 outb(0x70, 0x30);
1250 j |= inb(0x71);
1251 info->extendedMemSize = j;
1252
1253 switch (cpu_class) {
1254 case CPUCLASS_386:
1255 info->processorType = PROC_386;
1256 break;
1257 case CPUCLASS_486:
1258 info->processorType = PROC_486;
1259 break;
1260 case CPUCLASS_586:
1261 info->processorType = PROC_PENTIUM;
1262 break;
1263 case CPUCLASS_686:
1264 default:
1265 info->processorType = PROC_SEXIUM;
1266 break;
1267 }
1268
1269 info->flags = SI_CMOS_Valid | SI_BusTypeValid |
1270 SI_MemorySizeValid | SI_NO_SmartROM;
1271 #else
1272 info->flags = SI_BusTypeValid | SI_NO_SmartROM;
1273 #endif
1274
1275 info->busType = sc->sc_bustype;
1276 }
1277
1278 int
1279 dpt_passthrough(struct dpt_softc *sc, struct eata_ucp *ucp, struct lwp *l)
1280 {
1281 struct dpt_ccb *ccb;
1282 struct eata_sp sp;
1283 struct eata_cp *cp;
1284 struct eata_sg *sg;
1285 bus_dmamap_t xfer = 0; /* XXX: gcc */
1286 bus_dma_segment_t *ds;
1287 int datain = 0, s, rv = 0, i, uslen; /* XXX: gcc */
1288
1289 /*
1290 * Get a CCB and fill.
1291 */
1292 ccb = dpt_ccb_alloc(sc);
1293 ccb->ccb_flg |= CCB_PRIVATE | CCB_WAIT;
1294 ccb->ccb_timeout = 0;
1295 ccb->ccb_savesp = &sp;
1296
1297 cp = &ccb->ccb_eata_cp;
1298 memcpy(cp, ucp->ucp_cp, sizeof(ucp->ucp_cp));
1299 uslen = cp->cp_senselen;
1300 cp->cp_ccbid = ccb->ccb_id;
1301 cp->cp_senselen = sizeof(ccb->ccb_sense);
1302 cp->cp_senseaddr = htobe32(sc->sc_dmamap->dm_segs[0].ds_addr +
1303 CCB_OFF(sc, ccb) + offsetof(struct dpt_ccb, ccb_sense));
1304 cp->cp_stataddr = htobe32(sc->sc_stppa);
1305
1306 /*
1307 * Map data transfers.
1308 */
1309 if (ucp->ucp_dataaddr && ucp->ucp_datalen) {
1310 xfer = ccb->ccb_dmamap_xfer;
1311 datain = ((cp->cp_ctl0 & CP_C0_DATA_IN) != 0);
1312
1313 if (ucp->ucp_datalen > DPT_MAX_XFER) {
1314 DPRINTF(("%s: xfer too big\n", sc->sc_dv.dv_xname));
1315 dpt_ccb_free(sc, ccb);
1316 return (EFBIG);
1317 }
1318 rv = bus_dmamap_load(sc->sc_dmat, xfer,
1319 ucp->ucp_dataaddr, ucp->ucp_datalen, l->l_proc,
1320 BUS_DMA_WAITOK | BUS_DMA_STREAMING |
1321 (datain ? BUS_DMA_READ : BUS_DMA_WRITE));
1322 if (rv != 0) {
1323 DPRINTF(("%s: map failed; %d\n", sc->sc_dv.dv_xname,
1324 rv));
1325 dpt_ccb_free(sc, ccb);
1326 return (rv);
1327 }
1328
1329 bus_dmamap_sync(sc->sc_dmat, xfer, 0, xfer->dm_mapsize,
1330 (datain ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE));
1331
1332 sg = ccb->ccb_sg;
1333 ds = xfer->dm_segs;
1334 for (i = 0; i < xfer->dm_nsegs; i++, sg++, ds++) {
1335 sg->sg_addr = htobe32(ds->ds_addr);
1336 sg->sg_len = htobe32(ds->ds_len);
1337 }
1338 cp->cp_dataaddr = htobe32(CCB_OFF(sc, ccb) +
1339 sc->sc_dmamap->dm_segs[0].ds_addr +
1340 offsetof(struct dpt_ccb, ccb_sg));
1341 cp->cp_datalen = htobe32(i * sizeof(struct eata_sg));
1342 cp->cp_ctl0 |= CP_C0_SCATTER;
1343 } else {
1344 cp->cp_dataaddr = 0;
1345 cp->cp_datalen = 0;
1346 }
1347
1348 /*
1349 * Start the command and sleep on completion.
1350 */
1351 PHOLD(curlwp); /* XXXJRT curlwp */
1352 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb),
1353 sizeof(struct dpt_ccb), BUS_DMASYNC_PREWRITE);
1354 s = splbio();
1355 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
1356 sizeof(struct eata_sp), BUS_DMASYNC_PREREAD);
1357 if (dpt_cmd(sc, ccb, CP_DMA_CMD, 0))
1358 panic("%s: dpt_cmd failed", sc->sc_dv.dv_xname);
1359 tsleep(ccb, PWAIT, "dptucmd", 0);
1360 splx(s);
1361 PRELE(curlwp); /* XXXJRT curlwp */
1362
1363 /*
1364 * Sync up the DMA map and copy out results.
1365 */
1366 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb),
1367 sizeof(struct dpt_ccb), BUS_DMASYNC_POSTWRITE);
1368
1369 if (cp->cp_datalen != 0) {
1370 bus_dmamap_sync(sc->sc_dmat, xfer, 0, xfer->dm_mapsize,
1371 (datain ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE));
1372 bus_dmamap_unload(sc->sc_dmat, xfer);
1373 }
1374
1375 if (ucp->ucp_stataddr != NULL) {
1376 rv = copyout(&sp, ucp->ucp_stataddr, sizeof(sp));
1377 if (rv != 0) {
1378 DPRINTF(("%s: sp copyout() failed\n",
1379 sc->sc_dv.dv_xname));
1380 }
1381 }
1382 if (rv == 0 && ucp->ucp_senseaddr != NULL) {
1383 i = min(uslen, sizeof(ccb->ccb_sense));
1384 rv = copyout(&ccb->ccb_sense, ucp->ucp_senseaddr, i);
1385 if (rv != 0) {
1386 DPRINTF(("%s: sense copyout() failed\n",
1387 sc->sc_dv.dv_xname));
1388 }
1389 }
1390
1391 ucp->ucp_hstatus = (u_int8_t)ccb->ccb_hba_status;
1392 ucp->ucp_tstatus = (u_int8_t)ccb->ccb_scsi_status;
1393 dpt_ccb_free(sc, ccb);
1394 return (rv);
1395 }
Cache object: db58c5379fca8488a4809b070bc47232
|