1 /* $NetBSD: aic79xx_osm.c,v 1.7.2.1 2005/12/16 20:05:39 jmc Exp $ */
2
3 /*
4 * Bus independent NetBSD shim for the aic7xxx based adaptec SCSI controllers
5 *
6 * Copyright (c) 1994-2002 Justin T. Gibbs.
7 * Copyright (c) 2001-2002 Adaptec Inc.
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions, and the following disclaimer,
15 * without modification.
16 * 2. The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 * Alternatively, this software may be distributed under the terms of the
20 * GNU Public License ("GPL").
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
26 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * //depot/aic7xxx/freebsd/dev/aic7xxx/aic79xx_osm.c#26 $
35 *
36 * $FreeBSD: src/sys/dev/aic7xxx/aic79xx_osm.c,v 1.11 2003/05/04 00:20:07 gibbs Exp $
37 */
38 /*
39 * Ported from FreeBSD by Pascal Renauld, Network Storage Solutions, Inc.
40 * - April 2003
41 */
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: aic79xx_osm.c,v 1.7.2.1 2005/12/16 20:05:39 jmc Exp $");
45
46 #include <dev/ic/aic79xx_osm.h>
47 #include <dev/ic/aic7xxx_cam.h>
48 #include <dev/ic/aic79xx_inline.h>
49
50 #ifndef AHD_TMODE_ENABLE
51 #define AHD_TMODE_ENABLE 0
52 #endif
53
54 static int ahd_ioctl(struct scsipi_channel *channel, u_long cmd,
55 caddr_t addr, int flag, struct proc *p);
56 static void ahd_action(struct scsipi_channel *chan,
57 scsipi_adapter_req_t req, void *arg);
58 static void ahd_execute_scb(void *arg, bus_dma_segment_t *dm_segs,
59 int nsegments);
60 static int ahd_poll(struct ahd_softc *ahd, int wait);
61 static void ahd_setup_data(struct ahd_softc *ahd, struct scsipi_xfer *xs,
62 struct scb *scb);
63
64 #if NOT_YET
65 static void ahd_set_recoveryscb(struct ahd_softc *ahd, struct scb *scb);
66 #endif
67
68 /*
69 * Attach all the sub-devices we can find
70 */
71 int
72 ahd_attach(struct ahd_softc *ahd)
73 {
74 int s;
75 char ahd_info[256];
76
77 ahd_controller_info(ahd, ahd_info);
78 printf("%s: %s\n", ahd->sc_dev.dv_xname, ahd_info);
79
80 ahd_lock(ahd, &s);
81
82 ahd->sc_adapter.adapt_dev = &ahd->sc_dev;
83 ahd->sc_adapter.adapt_nchannels = 1;
84
85 ahd->sc_adapter.adapt_openings = ahd->scb_data.numscbs - 1;
86 ahd->sc_adapter.adapt_max_periph = 32;
87
88 ahd->sc_adapter.adapt_ioctl = ahd_ioctl;
89 ahd->sc_adapter.adapt_minphys = ahd_minphys;
90 ahd->sc_adapter.adapt_request = ahd_action;
91
92 ahd->sc_channel.chan_adapter = &ahd->sc_adapter;
93 ahd->sc_channel.chan_bustype = &scsi_bustype;
94 ahd->sc_channel.chan_channel = 0;
95 ahd->sc_channel.chan_ntargets = AHD_NUM_TARGETS;
96 ahd->sc_channel.chan_nluns = 8 /*AHD_NUM_LUNS*/;
97 ahd->sc_channel.chan_id = ahd->our_id;
98 ahd->sc_channel.chan_flags |= SCSIPI_CHAN_CANGROW;
99
100 ahd->sc_child = config_found((void *)ahd, &ahd->sc_channel, scsiprint);
101
102 ahd_intr_enable(ahd, TRUE);
103
104 if (ahd->flags & AHD_RESET_BUS_A)
105 ahd_reset_channel(ahd, 'A', TRUE);
106
107 ahd_unlock(ahd, &s);
108
109 return (1);
110 }
111
112 static int
113 ahd_ioctl(struct scsipi_channel *channel, u_long cmd,
114 caddr_t addr, int flag, struct proc *p)
115 {
116 struct ahd_softc *ahd = (void *)channel->chan_adapter->adapt_dev;
117 int s, ret = ENOTTY;
118
119 switch (cmd) {
120 case SCBUSIORESET:
121 s = splbio();
122 ahd_reset_channel(ahd, channel->chan_channel == 1 ? 'B' : 'A', TRUE);
123 splx(s);
124 ret = 0;
125 break;
126 default:
127 break;
128 }
129
130 return ret;
131 }
132
133 /*
134 * Catch an interrupt from the adapter
135 */
136 void
137 ahd_platform_intr(void *arg)
138 {
139 struct ahd_softc *ahd;
140
141 ahd = (struct ahd_softc *)arg;
142
143 printf("%s; ahd_platform_intr\n", ahd_name(ahd));
144
145 ahd_intr(ahd);
146 }
147
148 /*
149 * We have an scb which has been processed by the
150 * adaptor, now we look to see how the operation * went.
151 */
152 void
153 ahd_done(struct ahd_softc *ahd, struct scb *scb)
154 {
155 struct scsipi_xfer *xs;
156 struct scsipi_periph *periph;
157 int s;
158
159 LIST_REMOVE(scb, pending_links);
160
161 xs = scb->xs;
162 periph = xs->xs_periph;
163
164 callout_stop(&scb->xs->xs_callout);
165
166 if (xs->datalen) {
167 int op;
168
169 if (xs->xs_control & XS_CTL_DATA_IN)
170 op = BUS_DMASYNC_POSTREAD;
171 else
172 op = BUS_DMASYNC_POSTWRITE;
173
174 bus_dmamap_sync(ahd->parent_dmat, scb->dmamap, 0,
175 scb->dmamap->dm_mapsize, op);
176 bus_dmamap_unload(ahd->parent_dmat, scb->dmamap);
177 }
178
179 /*
180 * If the recovery SCB completes, we have to be
181 * out of our timeout.
182 */
183 if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
184 struct scb *list_scb;
185
186 /*
187 * We were able to complete the command successfully,
188 * so reinstate the timeouts for all other pending
189 * commands.
190 */
191 LIST_FOREACH(list_scb, &ahd->pending_scbs, pending_links) {
192 struct scsipi_xfer *txs = list_scb->xs;
193
194 if (!(txs->xs_control & XS_CTL_POLL)) {
195 callout_reset(&txs->xs_callout,
196 (txs->timeout > 1000000) ?
197 (txs->timeout / 1000) * hz :
198 (txs->timeout * hz) / 1000,
199 ahd_timeout, list_scb);
200 }
201 }
202
203 if (ahd_get_transaction_status(scb) != XS_NOERROR)
204 ahd_set_transaction_status(scb, XS_TIMEOUT);
205 scsipi_printaddr(xs->xs_periph);
206 printf("%s: no longer in timeout, status = %x\n",
207 ahd_name(ahd), xs->status);
208 }
209
210 if (xs->error != XS_NOERROR) {
211 /* Don't clobber any existing error state */
212 } else if ((xs->status == SCSI_STATUS_BUSY) ||
213 (xs->status == SCSI_STATUS_QUEUE_FULL)) {
214 ahd_set_transaction_status(scb, XS_BUSY);
215 printf("%s: drive (ID %d, LUN %d) queue full (SCB 0x%x)\n",
216 ahd_name(ahd), SCB_GET_TARGET(ahd,scb), SCB_GET_LUN(scb), SCB_GET_TAG(scb));
217 } else if ((scb->flags & SCB_SENSE) != 0) {
218 /*
219 * We performed autosense retrieval.
220 *
221 * zero the sense data before having
222 * the drive fill it. The SCSI spec mandates
223 * that any untransferred data should be
224 * assumed to be zero. Complete the 'bounce'
225 * of sense information through buffers accessible
226 * via bus-space by copying it into the clients
227 * csio.
228 */
229 memset(&xs->sense.scsi_sense, 0, sizeof(xs->sense.scsi_sense));
230 memcpy(&xs->sense.scsi_sense, ahd_get_sense_buf(ahd, scb),
231 sizeof(struct scsipi_sense_data));
232
233 ahd_set_transaction_status(scb, XS_SENSE);
234 } else if ((scb->flags & SCB_PKT_SENSE) != 0) {
235 struct scsi_status_iu_header *siu;
236 u_int sense_len;
237 int i;
238
239 /*
240 * Copy only the sense data into the provided buffer.
241 */
242 siu = (struct scsi_status_iu_header *)scb->sense_data;
243 sense_len = MIN(scsi_4btoul(siu->sense_length),
244 sizeof(&xs->sense.scsi_sense));
245 memset(&xs->sense.scsi_sense, 0, sizeof(xs->sense.scsi_sense));
246 memcpy(&xs->sense.scsi_sense,
247 scb->sense_data + SIU_SENSE_OFFSET(siu), sense_len);
248 printf("Copied %d bytes of sense data offset %d:", sense_len,
249 SIU_SENSE_OFFSET(siu));
250 for (i = 0; i < sense_len; i++)
251 printf(" 0x%x", ((uint8_t *)&xs->sense.scsi_sense)[i]);
252 printf("\n");
253
254 ahd_set_transaction_status(scb, XS_SENSE);
255 }
256
257 if (scb->flags & SCB_FREEZE_QUEUE) {
258 scsipi_periph_thaw(periph, 1);
259 scb->flags &= ~SCB_FREEZE_QUEUE;
260 }
261
262 if (scb->flags & SCB_REQUEUE)
263 ahd_set_transaction_status(scb, XS_REQUEUE);
264
265 ahd_lock(ahd, &s);
266 ahd_free_scb(ahd, scb);
267 ahd_unlock(ahd, &s);
268
269 scsipi_done(xs);
270 }
271
272 static void
273 ahd_action(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
274 {
275 struct ahd_softc *ahd;
276 struct ahd_initiator_tinfo *tinfo;
277 struct ahd_tmode_tstate *tstate;
278
279 ahd = (void *)chan->chan_adapter->adapt_dev;
280
281 switch(req) {
282
283 case ADAPTER_REQ_RUN_XFER:
284 {
285 struct scsipi_xfer *xs;
286 struct scsipi_periph *periph;
287 struct scb *scb;
288 struct hardware_scb *hscb;
289 u_int target_id;
290 u_int our_id;
291 u_int col_idx;
292 char channel;
293 int s;
294
295 xs = arg;
296 periph = xs->xs_periph;
297
298 SC_DEBUG(periph, SCSIPI_DB3, ("ahd_action\n"));
299
300 target_id = periph->periph_target;
301 our_id = ahd->our_id;
302 channel = (chan->chan_channel == 1) ? 'B' : 'A';
303
304 /*
305 * get an scb to use.
306 */
307 ahd_lock(ahd, &s);
308 tinfo = ahd_fetch_transinfo(ahd, channel, our_id,
309 target_id, &tstate);
310
311 if (xs->xs_tag_type != 0 ||
312 (tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0)
313 col_idx = AHD_NEVER_COL_IDX;
314 else
315 col_idx = AHD_BUILD_COL_IDX(target_id,
316 periph->periph_lun);
317
318 if ((scb = ahd_get_scb(ahd, col_idx)) == NULL) {
319 xs->error = XS_RESOURCE_SHORTAGE;
320 ahd_unlock(ahd, &s);
321 scsipi_done(xs);
322 return;
323 }
324 ahd_unlock(ahd, &s);
325
326 hscb = scb->hscb;
327
328 SC_DEBUG(periph, SCSIPI_DB3, ("start scb(%p)\n", scb));
329 scb->xs = xs;
330
331 /*
332 * Put all the arguments for the xfer in the scb
333 */
334 hscb->control = 0;
335 hscb->scsiid = BUILD_SCSIID(ahd, sim, target_id, our_id);
336 hscb->lun = periph->periph_lun;
337 if (xs->xs_control & XS_CTL_RESET) {
338 hscb->cdb_len = 0;
339 scb->flags |= SCB_DEVICE_RESET;
340 hscb->control |= MK_MESSAGE;
341 hscb->task_management = SIU_TASKMGMT_LUN_RESET;
342 ahd_execute_scb(scb, NULL, 0);
343 } else {
344 hscb->task_management = 0;
345 }
346
347 ahd_setup_data(ahd, xs, scb);
348 break;
349 }
350
351 case ADAPTER_REQ_GROW_RESOURCES:
352 #ifdef AHC_DEBUG
353 printf("%s: ADAPTER_REQ_GROW_RESOURCES\n", ahd_name(ahd));
354 #endif
355 chan->chan_adapter->adapt_openings += ahd_alloc_scbs(ahd);
356 if (ahd->scb_data.numscbs >= AHD_SCB_MAX_ALLOC)
357 chan->chan_flags &= ~SCSIPI_CHAN_CANGROW;
358 break;
359
360 case ADAPTER_REQ_SET_XFER_MODE:
361 {
362 struct scsipi_xfer_mode *xm = arg;
363 struct ahd_devinfo devinfo;
364 int target_id, our_id, first;
365 u_int width;
366 int s;
367 char channel;
368 u_int ppr_options, period, offset;
369 uint16_t old_autoneg;
370
371 target_id = xm->xm_target;
372 our_id = chan->chan_id;
373 channel = 'A';
374 s = splbio();
375 tinfo = ahd_fetch_transinfo(ahd, channel, our_id, target_id,
376 &tstate);
377 ahd_compile_devinfo(&devinfo, our_id, target_id,
378 0, channel, ROLE_INITIATOR);
379
380 old_autoneg = tstate->auto_negotiate;
381
382 /*
383 * XXX since the period and offset are not provided here,
384 * fake things by forcing a renegotiation using the user
385 * settings if this is called for the first time (i.e.
386 * during probe). Also, cap various values at the user
387 * values, assuming that the user set it up that way.
388 */
389 if (ahd->inited_target[target_id] == 0) {
390 period = tinfo->user.period;
391 offset = tinfo->user.offset;
392 ppr_options = tinfo->user.ppr_options;
393 width = tinfo->user.width;
394 tstate->tagenable |=
395 (ahd->user_tagenable & devinfo.target_mask);
396 tstate->discenable |=
397 (ahd->user_discenable & devinfo.target_mask);
398 ahd->inited_target[target_id] = 1;
399 first = 1;
400 } else
401 first = 0;
402
403 if (xm->xm_mode & (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
404 width = MSG_EXT_WDTR_BUS_16_BIT;
405 else
406 width = MSG_EXT_WDTR_BUS_8_BIT;
407
408 ahd_validate_width(ahd, NULL, &width, ROLE_UNKNOWN);
409 if (width > tinfo->user.width)
410 width = tinfo->user.width;
411 ahd_set_width(ahd, &devinfo, width, AHD_TRANS_GOAL, FALSE);
412
413 if (!(xm->xm_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT))) {
414 period = 0;
415 offset = 0;
416 ppr_options = 0;
417 }
418
419 if ((xm->xm_mode & PERIPH_CAP_DT) &&
420 (tinfo->user.ppr_options & MSG_EXT_PPR_DT_REQ))
421 ppr_options |= MSG_EXT_PPR_DT_REQ;
422 else
423 ppr_options &= ~MSG_EXT_PPR_DT_REQ;
424
425 if ((tstate->discenable & devinfo.target_mask) == 0 ||
426 (tstate->tagenable & devinfo.target_mask) == 0)
427 ppr_options &= ~MSG_EXT_PPR_IU_REQ;
428
429 if ((xm->xm_mode & PERIPH_CAP_TQING) &&
430 (ahd->user_tagenable & devinfo.target_mask))
431 tstate->tagenable |= devinfo.target_mask;
432 else
433 tstate->tagenable &= ~devinfo.target_mask;
434
435 ahd_find_syncrate(ahd, &period, &ppr_options, AHD_SYNCRATE_MAX);
436 ahd_validate_offset(ahd, NULL, period, &offset,
437 MSG_EXT_WDTR_BUS_8_BIT, ROLE_UNKNOWN);
438 if (offset == 0) {
439 period = 0;
440 ppr_options = 0;
441 }
442 if (ppr_options != 0
443 && tinfo->user.transport_version >= 3) {
444 tinfo->goal.transport_version =
445 tinfo->user.transport_version;
446 tinfo->curr.transport_version =
447 tinfo->user.transport_version;
448 }
449
450 ahd_set_syncrate(ahd, &devinfo, period, offset,
451 ppr_options, AHD_TRANS_GOAL, FALSE);
452
453 /*
454 * If this is the first request, and no negotiation is
455 * needed, just confirm the state to the scsipi layer,
456 * so that it can print a message.
457 */
458 if (old_autoneg == tstate->auto_negotiate && first) {
459 xm->xm_mode = 0;
460 xm->xm_period = tinfo->curr.period;
461 xm->xm_offset = tinfo->curr.offset;
462 if (tinfo->curr.width == MSG_EXT_WDTR_BUS_16_BIT)
463 xm->xm_mode |= PERIPH_CAP_WIDE16;
464 if (tinfo->curr.period)
465 xm->xm_mode |= PERIPH_CAP_SYNC;
466 if (tstate->tagenable & devinfo.target_mask)
467 xm->xm_mode |= PERIPH_CAP_TQING;
468 if (tinfo->curr.ppr_options & MSG_EXT_PPR_DT_REQ)
469 xm->xm_mode |= PERIPH_CAP_DT;
470 scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, xm);
471 }
472 splx(s);
473 }
474 }
475
476 return;
477 }
478
479 static void
480 ahd_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments)
481 {
482 struct scb *scb;
483 struct scsipi_xfer *xs;
484 struct ahd_softc *ahd;
485 struct ahd_initiator_tinfo *tinfo;
486 struct ahd_tmode_tstate *tstate;
487 u_int mask;
488 int s;
489
490 scb = (struct scb*)arg;
491 xs = scb->xs;
492 xs->error = 0;
493 xs->status = 0;
494 xs->xs_status = 0;
495 ahd = (void*)xs->xs_periph->periph_channel->chan_adapter->adapt_dev;
496
497 scb->sg_count = 0;
498 if (nsegments != 0) {
499 void *sg;
500 int op;
501 u_int i;
502
503 ahd_setup_data_scb(ahd, scb);
504
505 /* Copy the segments into our SG list */
506 for (i = nsegments, sg = scb->sg_list; i > 0; i--) {
507
508 sg = ahd_sg_setup(ahd, scb, sg, dm_segs->ds_addr,
509 dm_segs->ds_len,
510 /*last*/i == 1);
511 dm_segs++;
512 }
513
514 if (xs->xs_control & XS_CTL_DATA_IN)
515 op = BUS_DMASYNC_PREREAD;
516 else
517 op = BUS_DMASYNC_PREWRITE;
518
519 bus_dmamap_sync(ahd->parent_dmat, scb->dmamap, 0,
520 scb->dmamap->dm_mapsize, op);
521 }
522
523 ahd_lock(ahd, &s);
524
525 /*
526 * Last time we need to check if this SCB needs to
527 * be aborted.
528 */
529 if (ahd_get_scsi_status(scb) == XS_STS_DONE) {
530 if (nsegments != 0)
531 bus_dmamap_unload(ahd->parent_dmat,
532 scb->dmamap);
533 ahd_free_scb(ahd, scb);
534 ahd_unlock(ahd, &s);
535 return;
536 }
537
538 tinfo = ahd_fetch_transinfo(ahd, SCSIID_CHANNEL(ahd, scb->hscb->scsiid),
539 SCSIID_OUR_ID(scb->hscb->scsiid),
540 SCSIID_TARGET(ahd, scb->hscb->scsiid),
541 &tstate);
542
543 mask = SCB_GET_TARGET_MASK(ahd, scb);
544
545 if ((tstate->discenable & mask) != 0)
546 scb->hscb->control |= DISCENB;
547
548 if ((tstate->tagenable & mask) != 0)
549 scb->hscb->control |= xs->xs_tag_type|TAG_ENB;
550
551 if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU) != 0) {
552 scb->flags |= SCB_PACKETIZED;
553 if (scb->hscb->task_management != 0)
554 scb->hscb->control &= ~MK_MESSAGE;
555 }
556
557 #if 0 /* This looks like it makes sense at first, but it can loop */
558 if ((xs->xs_control & XS_CTL_DISCOVERY) &&
559 (tinfo->goal.width != 0
560 || tinfo->goal.period != 0
561 || tinfo->goal.ppr_options != 0)) {
562 scb->flags |= SCB_NEGOTIATE;
563 scb->hscb->control |= MK_MESSAGE;
564 } else
565 #endif
566 if ((tstate->auto_negotiate & mask) != 0) {
567 scb->flags |= SCB_AUTO_NEGOTIATE;
568 scb->hscb->control |= MK_MESSAGE;
569 }
570
571 LIST_INSERT_HEAD(&ahd->pending_scbs, scb, pending_links);
572
573 scb->flags |= SCB_ACTIVE;
574
575 if (!(xs->xs_control & XS_CTL_POLL)) {
576 callout_reset(&scb->xs->xs_callout, xs->timeout > 1000000 ?
577 (xs->timeout / 1000) * hz : (xs->timeout * hz) / 1000,
578 ahd_timeout, scb);
579 }
580
581 if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) {
582 /* Define a mapping from our tag to the SCB. */
583 ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = scb;
584 ahd_pause(ahd);
585 ahd_set_scbptr(ahd, SCB_GET_TAG(scb));
586 ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_TARG);
587 ahd_unpause(ahd);
588 } else {
589 ahd_queue_scb(ahd, scb);
590 }
591
592 if (!(xs->xs_control & XS_CTL_POLL)) {
593 ahd_unlock(ahd, &s);
594 return;
595 }
596 /*
597 * If we can't use interrupts, poll for completion
598 */
599 SC_DEBUG(xs->xs_periph, SCSIPI_DB3, ("cmd_poll\n"));
600 do {
601 if (ahd_poll(ahd, xs->timeout)) {
602 if (!(xs->xs_control & XS_CTL_SILENT))
603 printf("cmd fail\n");
604 ahd_timeout(scb);
605 break;
606 }
607 } while (!(xs->xs_status & XS_STS_DONE));
608
609 ahd_unlock(ahd, &s);
610 }
611
612 static int
613 ahd_poll(struct ahd_softc *ahd, int wait)
614 {
615
616 while (--wait) {
617 DELAY(1000);
618 if (ahd_inb(ahd, INTSTAT) & INT_PEND)
619 break;
620 }
621
622 if (wait == 0) {
623 printf("%s: board is not responding\n", ahd_name(ahd));
624 return (EIO);
625 }
626
627 ahd_intr((void *)ahd);
628 return (0);
629 }
630
631
632 static void
633 ahd_setup_data(struct ahd_softc *ahd, struct scsipi_xfer *xs,
634 struct scb *scb)
635 {
636 struct hardware_scb *hscb;
637
638 hscb = scb->hscb;
639 xs->resid = xs->status = 0;
640
641 hscb->cdb_len = xs->cmdlen;
642 if (hscb->cdb_len > MAX_CDB_LEN) {
643 int s;
644 /*
645 * Should CAM start to support CDB sizes
646 * greater than 16 bytes, we could use
647 * the sense buffer to store the CDB.
648 */
649 ahd_set_transaction_status(scb,
650 XS_DRIVER_STUFFUP);
651
652 ahd_lock(ahd, &s);
653 ahd_free_scb(ahd, scb);
654 ahd_unlock(ahd, &s);
655 scsipi_done(xs);
656 }
657 memcpy(hscb->shared_data.idata.cdb, xs->cmd, hscb->cdb_len);
658
659 /* Only use S/G if there is a transfer */
660 if (xs->datalen) {
661 int error;
662
663 error = bus_dmamap_load(ahd->parent_dmat,
664 scb->dmamap, xs->data,
665 xs->datalen, NULL,
666 ((xs->xs_control & XS_CTL_NOSLEEP) ?
667 BUS_DMA_NOWAIT : BUS_DMA_WAITOK) |
668 BUS_DMA_STREAMING |
669 ((xs->xs_control & XS_CTL_DATA_IN) ?
670 BUS_DMA_READ : BUS_DMA_WRITE));
671 if (error) {
672 #ifdef AHD_DEBUG
673 printf("%s: in ahc_setup_data(): bus_dmamap_load() "
674 "= %d\n",
675 ahd_name(ahd), error);
676 #endif
677 xs->error = XS_RESOURCE_SHORTAGE;
678 scsipi_done(xs);
679 return;
680 }
681 ahd_execute_scb(scb,
682 scb->dmamap->dm_segs,
683 scb->dmamap->dm_nsegs);
684 } else {
685 ahd_execute_scb(scb, NULL, 0);
686 }
687 }
688
689 void
690 ahd_timeout(void *arg)
691 {
692 struct scb *scb;
693 struct ahd_softc *ahd;
694 ahd_mode_state saved_modes;
695 int s;
696
697 scb = (struct scb *)arg;
698 ahd = (struct ahd_softc *)scb->ahd_softc;
699
700 printf("%s: ahd_timeout\n", ahd_name(ahd));
701
702 ahd_lock(ahd, &s);
703
704 ahd_pause_and_flushwork(ahd);
705 saved_modes = ahd_save_modes(ahd);
706 #if 0
707 ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
708 ahd_outb(ahd, SCSISIGO, ACKO);
709 printf("set ACK\n");
710 ahd_outb(ahd, SCSISIGO, 0);
711 printf("clearing Ack\n");
712 ahd_restore_modes(ahd, saved_modes);
713 #endif
714 if ((scb->flags & SCB_ACTIVE) == 0) {
715 /* Previous timeout took care of me already */
716 printf("%s: Timedout SCB already complete. "
717 "Interrupts may not be functioning.\n", ahd_name(ahd));
718 ahd_unpause(ahd);
719 ahd_unlock(ahd, &s);
720 return;
721 }
722
723 ahd_print_path(ahd, scb);
724 printf("SCB 0x%x - timed out\n", SCB_GET_TAG(scb));
725 ahd_dump_card_state(ahd);
726 ahd_reset_channel(ahd, SIM_CHANNEL(ahd, sim),
727 /*initiate reset*/TRUE);
728 ahd_unlock(ahd, &s);
729 return;
730 }
731
732 int
733 ahd_platform_alloc(struct ahd_softc *ahd, void *platform_arg)
734 {
735 ahd->platform_data = malloc(sizeof(struct ahd_platform_data), M_DEVBUF,
736 M_NOWAIT /*| M_ZERO*/);
737 if (ahd->platform_data == NULL)
738 return (ENOMEM);
739
740 memset(ahd->platform_data, 0, sizeof(struct ahd_platform_data));
741
742 return (0);
743 }
744
745 void
746 ahd_platform_free(struct ahd_softc *ahd)
747 {
748 free(ahd->platform_data, M_DEVBUF);
749 }
750
751 int
752 ahd_softc_comp(struct ahd_softc *lahd, struct ahd_softc *rahd)
753 {
754 /* We don't sort softcs under NetBSD so report equal always */
755 return (0);
756 }
757
758 int
759 ahd_detach(struct device *self, int flags)
760 {
761 int rv = 0;
762
763 struct ahd_softc *ahd = (struct ahd_softc*)self;
764
765 if (ahd->sc_child != NULL)
766 rv = config_detach((void *)ahd->sc_child, flags);
767
768 shutdownhook_disestablish(ahd->shutdown_hook);
769
770 ahd_free(ahd);
771
772 return rv;
773 }
774
775 void
776 ahd_platform_set_tags(struct ahd_softc *ahd,
777 struct ahd_devinfo *devinfo, ahd_queue_alg alg)
778 {
779 struct ahd_tmode_tstate *tstate;
780
781 ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid,
782 devinfo->target, &tstate);
783
784 if (alg != AHD_QUEUE_NONE)
785 tstate->tagenable |= devinfo->target_mask;
786 else
787 tstate->tagenable &= ~devinfo->target_mask;
788 }
789
790 void
791 ahd_send_async(struct ahd_softc *ahc, char channel, u_int target, u_int lun,
792 ac_code code, void *opt_arg)
793 {
794 struct ahd_tmode_tstate *tstate;
795 struct ahd_initiator_tinfo *tinfo;
796 struct ahd_devinfo devinfo;
797 struct scsipi_channel *chan;
798 struct scsipi_xfer_mode xm;
799
800 #ifdef DIAGNOSTIC
801 if (channel != 'A')
802 panic("ahd_send_async: not channel A");
803 #endif
804 chan = &ahc->sc_channel;
805 switch (code) {
806 case AC_TRANSFER_NEG:
807 tinfo = ahd_fetch_transinfo(ahc, channel, ahc->our_id, target,
808 &tstate);
809 ahd_compile_devinfo(&devinfo, ahc->our_id, target, lun,
810 channel, ROLE_UNKNOWN);
811 /*
812 * Don't bother if negotiating. XXX?
813 */
814 if (tinfo->curr.period != tinfo->goal.period
815 || tinfo->curr.width != tinfo->goal.width
816 || tinfo->curr.offset != tinfo->goal.offset
817 || tinfo->curr.ppr_options != tinfo->goal.ppr_options)
818 break;
819 xm.xm_target = target;
820 xm.xm_mode = 0;
821 xm.xm_period = tinfo->curr.period;
822 xm.xm_offset = tinfo->curr.offset;
823 if (tinfo->goal.ppr_options & MSG_EXT_PPR_DT_REQ)
824 xm.xm_mode |= PERIPH_CAP_DT;
825 if (tinfo->curr.width == MSG_EXT_WDTR_BUS_16_BIT)
826 xm.xm_mode |= PERIPH_CAP_WIDE16;
827 if (tinfo->curr.period)
828 xm.xm_mode |= PERIPH_CAP_SYNC;
829 if (tstate->tagenable & devinfo.target_mask)
830 xm.xm_mode |= PERIPH_CAP_TQING;
831 scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, &xm);
832 break;
833 case AC_BUS_RESET:
834 scsipi_async_event(chan, ASYNC_EVENT_RESET, NULL);
835 case AC_SENT_BDR:
836 default:
837 break;
838 }
839 }
Cache object: e7f46946d2ac1b2e5bb41a5ed308a638
|