1 /* $NetBSD: aic7xxx_osm.c,v 1.14.2.1 2005/12/16 20:05:42 jmc Exp $ */
2
3 /*
4 * Bus independent FreeBSD shim for the aic7xxx based adaptec SCSI controllers
5 *
6 * Copyright (c) 1994-2001 Justin T. Gibbs.
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions, and the following disclaimer,
14 * without modification.
15 * 2. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * Alternatively, this software may be distributed under the terms of the
19 * GNU Public License ("GPL").
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
25 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * //depot/aic7xxx/freebsd/dev/aic7xxx/aic7xxx_osm.c#12 $
34 *
35 * $FreeBSD: src/sys/dev/aic7xxx/aic7xxx_osm.c,v 1.31 2002/11/30 19:08:58 scottl Exp $
36 */
37 /*
38 * Ported from FreeBSD by Pascal Renauld, Network Storage Solutions, Inc. - April 2003
39 */
40
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: aic7xxx_osm.c,v 1.14.2.1 2005/12/16 20:05:42 jmc Exp $");
43
44 #include <dev/ic/aic7xxx_osm.h>
45 #include <dev/ic/aic7xxx_inline.h>
46
47 #ifndef AHC_TMODE_ENABLE
48 #define AHC_TMODE_ENABLE 0
49 #endif
50
51
52 static void ahc_action(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg);
53 static void ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments);
54 static int ahc_poll(struct ahc_softc *ahc, int wait);
55 static void ahc_setup_data(struct ahc_softc *ahc,
56 struct scsipi_xfer *xs, struct scb *scb);
57 static void ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb);
58 static int ahc_ioctl(struct scsipi_channel *channel, u_long cmd, caddr_t addr, int flag,
59 struct proc *p);
60
61
62
63 /*
64 * Attach all the sub-devices we can find
65 */
66 int
67 ahc_attach(struct ahc_softc *ahc)
68 {
69 u_long s;
70 int i;
71 char ahc_info[256];
72
73 LIST_INIT(&ahc->pending_scbs);
74 for (i = 0; i < AHC_NUM_TARGETS; i++)
75 TAILQ_INIT(&ahc->untagged_queues[i]);
76
77 ahc_lock(ahc, &s);
78
79 ahc->sc_adapter.adapt_dev = &ahc->sc_dev;
80 ahc->sc_adapter.adapt_nchannels = (ahc->features & AHC_TWIN) ? 2 : 1;
81
82 ahc->sc_adapter.adapt_openings = ahc->scb_data->numscbs - 1;
83 ahc->sc_adapter.adapt_max_periph = 16;
84
85 ahc->sc_adapter.adapt_ioctl = ahc_ioctl;
86 ahc->sc_adapter.adapt_minphys = ahc_minphys;
87 ahc->sc_adapter.adapt_request = ahc_action;
88
89 ahc->sc_channel.chan_adapter = &ahc->sc_adapter;
90 ahc->sc_channel.chan_bustype = &scsi_bustype;
91 ahc->sc_channel.chan_channel = 0;
92 ahc->sc_channel.chan_ntargets = (ahc->features & AHC_WIDE) ? 16 : 8;
93 ahc->sc_channel.chan_nluns = 8 /*AHC_NUM_LUNS*/;
94 ahc->sc_channel.chan_id = ahc->our_id;
95 ahc->sc_channel.chan_flags |= SCSIPI_CHAN_CANGROW;
96
97 if (ahc->features & AHC_TWIN) {
98 ahc->sc_channel_b = ahc->sc_channel;
99 ahc->sc_channel_b.chan_id = ahc->our_id_b;
100 ahc->sc_channel_b.chan_channel = 1;
101 }
102
103 ahc_controller_info(ahc, ahc_info);
104 printf("%s: %s\n", ahc->sc_dev.dv_xname, ahc_info);
105
106 if ((ahc->flags & AHC_PRIMARY_CHANNEL) == 0) {
107 ahc->sc_child = config_found((void *)&ahc->sc_dev,
108 &ahc->sc_channel, scsiprint);
109 if (ahc->features & AHC_TWIN)
110 ahc->sc_child_b = config_found((void *)&ahc->sc_dev,
111 &ahc->sc_channel_b, scsiprint);
112 } else {
113 if (ahc->features & AHC_TWIN)
114 ahc->sc_child = config_found((void *)&ahc->sc_dev,
115 &ahc->sc_channel_b, scsiprint);
116 ahc->sc_child_b = config_found((void *)&ahc->sc_dev,
117 &ahc->sc_channel, scsiprint);
118 }
119
120 ahc_intr_enable(ahc, TRUE);
121
122 if (ahc->flags & AHC_RESET_BUS_A)
123 ahc_reset_channel(ahc, 'A', TRUE);
124 if ((ahc->features & AHC_TWIN) && ahc->flags & AHC_RESET_BUS_B)
125 ahc_reset_channel(ahc, 'B', TRUE);
126
127 ahc_unlock(ahc, &s);
128 return (1);
129 }
130
131 /*
132 * Catch an interrupt from the adapter
133 */
134 void
135 ahc_platform_intr(void *arg)
136 {
137 struct ahc_softc *ahc;
138
139 ahc = (struct ahc_softc *)arg;
140 ahc_intr(ahc);
141 }
142
143 /*
144 * We have an scb which has been processed by the
145 * adaptor, now we look to see how the operation
146 * went.
147 */
148 void
149 ahc_done(struct ahc_softc *ahc, struct scb *scb)
150 {
151 struct scsipi_xfer *xs;
152 struct scsipi_periph *periph;
153 u_long s;
154
155 xs = scb->xs;
156 periph = xs->xs_periph;
157 LIST_REMOVE(scb, pending_links);
158 if ((scb->flags & SCB_UNTAGGEDQ) != 0) {
159 struct scb_tailq *untagged_q;
160 int target_offset;
161
162 target_offset = SCB_GET_TARGET_OFFSET(ahc, scb);
163 untagged_q = &ahc->untagged_queues[target_offset];
164 TAILQ_REMOVE(untagged_q, scb, links.tqe);
165 scb->flags &= ~SCB_UNTAGGEDQ;
166 ahc_run_untagged_queue(ahc, untagged_q);
167 }
168
169 callout_stop(&scb->xs->xs_callout);
170
171 if (xs->datalen) {
172 int op;
173
174 if (xs->xs_control & XS_CTL_DATA_IN)
175 op = BUS_DMASYNC_POSTREAD;
176 else
177 op = BUS_DMASYNC_POSTWRITE;
178 bus_dmamap_sync(ahc->parent_dmat, scb->dmamap, 0,
179 scb->dmamap->dm_mapsize, op);
180 bus_dmamap_unload(ahc->parent_dmat, scb->dmamap);
181 }
182
183 /*
184 * If the recovery SCB completes, we have to be
185 * out of our timeout.
186 */
187 if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
188 struct scb *list_scb;
189
190 /*
191 * We were able to complete the command successfully,
192 * so reinstate the timeouts for all other pending
193 * commands.
194 */
195 LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) {
196 struct scsipi_xfer *xs = list_scb->xs;
197
198 if (!(xs->xs_control & XS_CTL_POLL)) {
199 callout_reset(&list_scb->xs->xs_callout,
200 (list_scb->xs->timeout > 1000000) ?
201 (list_scb->xs->timeout / 1000) * hz :
202 (list_scb->xs->timeout * hz) / 1000,
203 ahc_timeout, list_scb);
204 }
205 }
206
207 if (ahc_get_transaction_status(scb) == CAM_BDR_SENT
208 || ahc_get_transaction_status(scb) == CAM_REQ_ABORTED)
209 ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
210 scsipi_printaddr(xs->xs_periph);
211 printf("%s: no longer in timeout, status = %x\n",
212 ahc_name(ahc), xs->status);
213 }
214
215 /* Don't clobber any existing error state */
216 if (xs->error != XS_NOERROR) {
217 /* Don't clobber any existing error state */
218 } else if ((scb->flags & SCB_SENSE) != 0) {
219 /*
220 * We performed autosense retrieval.
221 *
222 * Zero any sense not transferred by the
223 * device. The SCSI spec mandates that any
224 * untransferred data should be assumed to be
225 * zero. Complete the 'bounce' of sense information
226 * through buffers accessible via bus-space by
227 * copying it into the clients csio.
228 */
229 memset(&xs->sense.scsi_sense, 0, sizeof(xs->sense.scsi_sense));
230 memcpy(&xs->sense.scsi_sense,
231 ahc_get_sense_buf(ahc, scb),
232 sizeof(xs->sense.scsi_sense));
233 xs->error = XS_SENSE;
234 }
235 if (scb->flags & SCB_FREEZE_QUEUE) {
236 scsipi_periph_thaw(periph, 1);
237 scb->flags &= ~SCB_FREEZE_QUEUE;
238 }
239
240 ahc_lock(ahc, &s);
241 ahc_free_scb(ahc, scb);
242 ahc_unlock(ahc, &s);
243
244 scsipi_done(xs);
245 }
246
247 static int
248 ahc_ioctl(struct scsipi_channel *channel, u_long cmd, caddr_t addr, int flag,
249 struct proc *p)
250 {
251 struct ahc_softc *ahc = (void *)channel->chan_adapter->adapt_dev;
252 int s, ret = ENOTTY;
253
254 switch (cmd) {
255 case SCBUSIORESET:
256 s = splbio();
257 ahc_reset_channel(ahc, channel->chan_channel == 1 ? 'B' : 'A',
258 TRUE);
259 splx(s);
260 ret = 0;
261 break;
262 default:
263 break;
264 }
265
266 return ret;
267 }
268
269 static void
270 ahc_action(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
271 {
272 struct ahc_softc *ahc;
273 int s;
274 struct ahc_initiator_tinfo *tinfo;
275 struct ahc_tmode_tstate *tstate;
276
277 ahc = (void *)chan->chan_adapter->adapt_dev;
278
279 switch (req) {
280
281 case ADAPTER_REQ_RUN_XFER:
282 {
283 struct scsipi_xfer *xs;
284 struct scsipi_periph *periph;
285 struct scb *scb;
286 struct hardware_scb *hscb;
287 u_int target_id;
288 u_int our_id;
289 u_long s;
290
291 xs = arg;
292 periph = xs->xs_periph;
293
294 target_id = periph->periph_target;
295 our_id = ahc->our_id;
296
297 SC_DEBUG(xs->xs_periph, SCSIPI_DB3, ("ahc_action\n"));
298
299 /*
300 * get an scb to use.
301 */
302 ahc_lock(ahc, &s);
303 if ((scb = ahc_get_scb(ahc)) == NULL) {
304 xs->error = XS_RESOURCE_SHORTAGE;
305 ahc_unlock(ahc, &s);
306 scsipi_done(xs);
307 return;
308 }
309 ahc_unlock(ahc, &s);
310
311 hscb = scb->hscb;
312
313 SC_DEBUG(periph, SCSIPI_DB3, ("start scb(%p)\n", scb));
314 scb->xs = xs;
315
316 /*
317 * Put all the arguments for the xfer in the scb
318 */
319 hscb->control = 0;
320 hscb->scsiid = BUILD_SCSIID(ahc, 0, target_id, our_id);
321 hscb->lun = periph->periph_lun;
322 if (xs->xs_control & XS_CTL_RESET) {
323 hscb->cdb_len = 0;
324 scb->flags |= SCB_DEVICE_RESET;
325 hscb->control |= MK_MESSAGE;
326 ahc_execute_scb(scb, NULL, 0);
327 }
328
329 ahc_setup_data(ahc, xs, scb);
330
331 break;
332 }
333 case ADAPTER_REQ_GROW_RESOURCES:
334 #ifdef AHC_DEBUG
335 printf("%s: ADAPTER_REQ_GROW_RESOURCES\n", ahc_name(ahc));
336 #endif
337 chan->chan_adapter->adapt_openings += ahc_alloc_scbs(ahc);
338 if (ahc->scb_data->numscbs >= AHC_SCB_MAX_ALLOC)
339 chan->chan_flags &= ~SCSIPI_CHAN_CANGROW;
340 return;
341
342 case ADAPTER_REQ_SET_XFER_MODE:
343 {
344 struct scsipi_xfer_mode *xm = arg;
345 struct ahc_devinfo devinfo;
346 int target_id, our_id, first;
347 u_int width;
348 char channel;
349 u_int ppr_options, period, offset;
350 struct ahc_syncrate *syncrate;
351 uint16_t old_autoneg;
352
353 target_id = xm->xm_target;
354 our_id = chan->chan_id;
355 channel = (chan->chan_channel == 1) ? 'B' : 'A';
356 s = splbio();
357 tinfo = ahc_fetch_transinfo(ahc, channel, our_id, target_id,
358 &tstate);
359 ahc_compile_devinfo(&devinfo, our_id, target_id,
360 0, channel, ROLE_INITIATOR);
361
362 old_autoneg = tstate->auto_negotiate;
363
364 /*
365 * XXX since the period and offset are not provided here,
366 * fake things by forcing a renegotiation using the user
367 * settings if this is called for the first time (i.e.
368 * during probe). Also, cap various values at the user
369 * values, assuming that the user set it up that way.
370 */
371 if (ahc->inited_target[target_id] == 0) {
372 period = tinfo->user.period;
373 offset = tinfo->user.offset;
374 ppr_options = tinfo->user.ppr_options;
375 width = tinfo->user.width;
376 tstate->tagenable |=
377 (ahc->user_tagenable & devinfo.target_mask);
378 tstate->discenable |=
379 (ahc->user_discenable & devinfo.target_mask);
380 ahc->inited_target[target_id] = 1;
381 first = 1;
382 } else
383 first = 0;
384
385 if (xm->xm_mode & (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
386 width = MSG_EXT_WDTR_BUS_16_BIT;
387 else
388 width = MSG_EXT_WDTR_BUS_8_BIT;
389
390 ahc_validate_width(ahc, NULL, &width, ROLE_UNKNOWN);
391 if (width > tinfo->user.width)
392 width = tinfo->user.width;
393 ahc_set_width(ahc, &devinfo, width, AHC_TRANS_GOAL, FALSE);
394
395 if (!(xm->xm_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT))) {
396 period = 0;
397 offset = 0;
398 ppr_options = 0;
399 }
400
401 if ((xm->xm_mode & PERIPH_CAP_DT) &&
402 (ppr_options & MSG_EXT_PPR_DT_REQ))
403 ppr_options |= MSG_EXT_PPR_DT_REQ;
404 else
405 ppr_options &= ~MSG_EXT_PPR_DT_REQ;
406 if ((tstate->discenable & devinfo.target_mask) == 0 ||
407 (tstate->tagenable & devinfo.target_mask) == 0)
408 ppr_options &= ~MSG_EXT_PPR_IU_REQ;
409
410 if ((xm->xm_mode & PERIPH_CAP_TQING) &&
411 (ahc->user_tagenable & devinfo.target_mask))
412 tstate->tagenable |= devinfo.target_mask;
413 else
414 tstate->tagenable &= ~devinfo.target_mask;
415
416 syncrate = ahc_find_syncrate(ahc, &period, &ppr_options,
417 AHC_SYNCRATE_MAX);
418 ahc_validate_offset(ahc, NULL, syncrate, &offset,
419 width, ROLE_UNKNOWN);
420
421 if (offset == 0) {
422 period = 0;
423 ppr_options = 0;
424 }
425
426 if (ppr_options != 0
427 && tinfo->user.transport_version >= 3) {
428 tinfo->goal.transport_version =
429 tinfo->user.transport_version;
430 tinfo->curr.transport_version =
431 tinfo->user.transport_version;
432 }
433
434 ahc_set_syncrate(ahc, &devinfo, syncrate, period, offset,
435 ppr_options, AHC_TRANS_GOAL, FALSE);
436
437 /*
438 * If this is the first request, and no negotiation is
439 * needed, just confirm the state to the scsipi layer,
440 * so that it can print a message.
441 */
442 if (old_autoneg == tstate->auto_negotiate && first) {
443 xm->xm_mode = 0;
444 xm->xm_period = tinfo->curr.period;
445 xm->xm_offset = tinfo->curr.offset;
446 if (tinfo->curr.width == MSG_EXT_WDTR_BUS_16_BIT)
447 xm->xm_mode |= PERIPH_CAP_WIDE16;
448 if (tinfo->curr.period)
449 xm->xm_mode |= PERIPH_CAP_SYNC;
450 if (tstate->tagenable & devinfo.target_mask)
451 xm->xm_mode |= PERIPH_CAP_TQING;
452 if (tinfo->curr.ppr_options & MSG_EXT_PPR_DT_REQ)
453 xm->xm_mode |= PERIPH_CAP_DT;
454 scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, xm);
455 }
456 splx(s);
457 }
458 }
459
460 return;
461 }
462
463 static void
464 ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments)
465 {
466 struct scb *scb;
467 struct scsipi_xfer *xs;
468 struct ahc_softc *ahc;
469 struct ahc_initiator_tinfo *tinfo;
470 struct ahc_tmode_tstate *tstate;
471
472 u_int mask;
473 long s;
474
475 scb = (struct scb *)arg;
476 xs = scb->xs;
477 xs->error = 0;
478 xs->status = 0;
479 xs->xs_status = 0;
480 ahc = (void *)xs->xs_periph->periph_channel->chan_adapter->adapt_dev;
481
482 if (nsegments != 0) {
483 struct ahc_dma_seg *sg;
484 bus_dma_segment_t *end_seg;
485 int op;
486
487 end_seg = dm_segs + nsegments;
488
489 /* Copy the segments into our SG list */
490 sg = scb->sg_list;
491 while (dm_segs < end_seg) {
492 uint32_t len;
493
494 sg->addr = ahc_htole32(dm_segs->ds_addr);
495 len = dm_segs->ds_len
496 | ((dm_segs->ds_addr >> 8) & 0x7F000000);
497 sg->len = ahc_htole32(len);
498 sg++;
499 dm_segs++;
500 }
501
502 /*
503 * Note where to find the SG entries in bus space.
504 * We also set the full residual flag which the
505 * sequencer will clear as soon as a data transfer
506 * occurs.
507 */
508 scb->hscb->sgptr = ahc_htole32(scb->sg_list_phys|SG_FULL_RESID);
509
510 if (xs->xs_control & XS_CTL_DATA_IN)
511 op = BUS_DMASYNC_PREREAD;
512 else
513 op = BUS_DMASYNC_PREWRITE;
514
515 bus_dmamap_sync(ahc->parent_dmat, scb->dmamap, 0,
516 scb->dmamap->dm_mapsize, op);
517
518 sg--;
519 sg->len |= ahc_htole32(AHC_DMA_LAST_SEG);
520
521 /* Copy the first SG into the "current" data pointer area */
522 scb->hscb->dataptr = scb->sg_list->addr;
523 scb->hscb->datacnt = scb->sg_list->len;
524 } else {
525 scb->hscb->sgptr = ahc_htole32(SG_LIST_NULL);
526 scb->hscb->dataptr = 0;
527 scb->hscb->datacnt = 0;
528 }
529
530 scb->sg_count = nsegments;
531
532 ahc_lock(ahc, &s);
533
534 /*
535 * Last time we need to check if this SCB needs to
536 * be aborted.
537 */
538 if (xs->xs_status & XS_STS_DONE) {
539 if (nsegments != 0)
540 bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap);
541 ahc_free_scb(ahc, scb);
542 ahc_unlock(ahc, &s);
543 scsipi_done(xs);
544 return;
545 }
546
547 tinfo = ahc_fetch_transinfo(ahc, ahc->channel,
548 SCSIID_OUR_ID(scb->hscb->scsiid),
549 SCSIID_TARGET(ahc, scb->hscb->scsiid),
550 &tstate);
551
552 mask = SCB_GET_TARGET_MASK(ahc, scb);
553 scb->hscb->scsirate = tinfo->scsirate;
554 scb->hscb->scsioffset = tinfo->curr.offset;
555
556 if ((tstate->ultraenb & mask) != 0)
557 scb->hscb->control |= ULTRAENB;
558
559 if ((tstate->discenable & mask) != 0)
560 scb->hscb->control |= DISCENB;
561
562 if (xs->xs_tag_type)
563 scb->hscb->control |= xs->xs_tag_type;
564
565 #if 1 /* This looks like it makes sense at first, but it can loop */
566 if ((xs->xs_control & XS_CTL_DISCOVERY) && (tinfo->goal.width == 0
567 && tinfo->goal.offset == 0
568 && tinfo->goal.ppr_options == 0)) {
569 scb->flags |= SCB_NEGOTIATE;
570 scb->hscb->control |= MK_MESSAGE;
571 } else
572 #endif
573 if ((tstate->auto_negotiate & mask) != 0) {
574 scb->flags |= SCB_AUTO_NEGOTIATE;
575 scb->hscb->control |= MK_MESSAGE;
576 }
577
578 LIST_INSERT_HEAD(&ahc->pending_scbs, scb, pending_links);
579
580 if (!(xs->xs_control & XS_CTL_POLL)) {
581 callout_reset(&scb->xs->xs_callout, xs->timeout > 1000000 ?
582 (xs->timeout / 1000) * hz : (xs->timeout * hz) / 1000,
583 ahc_timeout, scb);
584 }
585
586 /*
587 * We only allow one untagged transaction
588 * per target in the initiator role unless
589 * we are storing a full busy target *lun*
590 * table in SCB space.
591 */
592 if ((scb->hscb->control & (TARGET_SCB|TAG_ENB)) == 0
593 && (ahc->flags & AHC_SCB_BTT) == 0) {
594 struct scb_tailq *untagged_q;
595 int target_offset;
596
597 target_offset = SCB_GET_TARGET_OFFSET(ahc, scb);
598 untagged_q = &(ahc->untagged_queues[target_offset]);
599 TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe);
600 scb->flags |= SCB_UNTAGGEDQ;
601 if (TAILQ_FIRST(untagged_q) != scb) {
602 ahc_unlock(ahc, &s);
603 return;
604 }
605 }
606 scb->flags |= SCB_ACTIVE;
607
608 if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) {
609 /* Define a mapping from our tag to the SCB. */
610 ahc->scb_data->scbindex[scb->hscb->tag] = scb;
611 ahc_pause(ahc);
612 if ((ahc->flags & AHC_PAGESCBS) == 0)
613 ahc_outb(ahc, SCBPTR, scb->hscb->tag);
614 ahc_outb(ahc, TARG_IMMEDIATE_SCB, scb->hscb->tag);
615 ahc_unpause(ahc);
616 } else {
617 ahc_queue_scb(ahc, scb);
618 }
619
620 if (!(xs->xs_control & XS_CTL_POLL)) {
621 ahc_unlock(ahc, &s);
622 return;
623 }
624
625 /*
626 * If we can't use interrupts, poll for completion
627 */
628 SC_DEBUG(xs->xs_periph, SCSIPI_DB3, ("cmd_poll\n"));
629 do {
630 if (ahc_poll(ahc, xs->timeout)) {
631 if (!(xs->xs_control & XS_CTL_SILENT))
632 printf("cmd fail\n");
633 ahc_timeout(scb);
634 break;
635 }
636 } while (!(xs->xs_status & XS_STS_DONE));
637 ahc_unlock(ahc, &s);
638
639 return;
640 }
641
642 static int
643 ahc_poll(struct ahc_softc *ahc, int wait)
644 {
645 while (--wait) {
646 DELAY(1000);
647 if (ahc_inb(ahc, INTSTAT) & INT_PEND)
648 break;
649 }
650
651 if (wait == 0) {
652 printf("%s: board is not responding\n", ahc_name(ahc));
653 return (EIO);
654 }
655
656 ahc_intr((void *)ahc);
657 return (0);
658 }
659
660 static void
661 ahc_setup_data(struct ahc_softc *ahc, struct scsipi_xfer *xs,
662 struct scb *scb)
663 {
664 struct hardware_scb *hscb;
665
666 hscb = scb->hscb;
667 xs->resid = xs->status = 0;
668
669 hscb->cdb_len = xs->cmdlen;
670 if (hscb->cdb_len > sizeof(hscb->cdb32)) {
671 u_long s;
672
673 ahc_set_transaction_status(scb, CAM_REQ_INVALID);
674 ahc_lock(ahc, &s);
675 ahc_free_scb(ahc, scb);
676 ahc_unlock(ahc, &s);
677 scsipi_done(xs);
678 return;
679 }
680
681 if (hscb->cdb_len > 12) {
682 memcpy(hscb->cdb32, xs->cmd, hscb->cdb_len);
683 scb->flags |= SCB_CDB32_PTR;
684 } else {
685 memcpy(hscb->shared_data.cdb, xs->cmd, hscb->cdb_len);
686 }
687
688 /* Only use S/G if there is a transfer */
689 if (xs->datalen) {
690 int error;
691
692 error = bus_dmamap_load(ahc->parent_dmat,
693 scb->dmamap, xs->data,
694 xs->datalen, NULL,
695 ((xs->xs_control & XS_CTL_NOSLEEP) ?
696 BUS_DMA_NOWAIT : BUS_DMA_WAITOK) |
697 BUS_DMA_STREAMING |
698 ((xs->xs_control & XS_CTL_DATA_IN) ?
699 BUS_DMA_READ : BUS_DMA_WRITE));
700 if (error) {
701 #ifdef AHC_DEBUG
702 printf("%s: in ahc_setup_data(): bus_dmamap_load() "
703 "= %d\n",
704 ahc_name(ahc), error);
705 #endif
706 xs->error = XS_RESOURCE_SHORTAGE;
707 scsipi_done(xs);
708 return;
709 }
710 ahc_execute_scb(scb,
711 scb->dmamap->dm_segs,
712 scb->dmamap->dm_nsegs);
713 } else {
714 ahc_execute_scb(scb, NULL, 0);
715 }
716 }
717
718 static void
719 ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb) {
720
721 if ((scb->flags & SCB_RECOVERY_SCB) == 0) {
722 struct scb *list_scb;
723
724 scb->flags |= SCB_RECOVERY_SCB;
725
726 /*
727 * Take all queued, but not sent SCBs out of the equation.
728 * Also ensure that no new CCBs are queued to us while we
729 * try to fix this problem.
730 */
731 scsipi_channel_freeze(&ahc->sc_channel, 1);
732 if (ahc->features & AHC_TWIN)
733 scsipi_channel_freeze(&ahc->sc_channel_b, 1);
734
735 /*
736 * Go through all of our pending SCBs and remove
737 * any scheduled timeouts for them. We will reschedule
738 * them after we've successfully fixed this problem.
739 */
740 LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) {
741 callout_stop(&list_scb->xs->xs_callout);
742 }
743 }
744 }
745
746 void
747 ahc_timeout(void *arg)
748 {
749 struct scb *scb;
750 struct ahc_softc *ahc;
751 long s;
752 int found;
753 u_int last_phase;
754 int target;
755 int lun;
756 int i;
757 char channel;
758
759 scb = (struct scb *)arg;
760 ahc = (struct ahc_softc *)scb->ahc_softc;
761
762 ahc_lock(ahc, &s);
763
764 ahc_pause_and_flushwork(ahc);
765
766 if ((scb->flags & SCB_ACTIVE) == 0) {
767 /* Previous timeout took care of me already */
768 printf("%s: Timedout SCB already complete. "
769 "Interrupts may not be functioning.\n", ahc_name(ahc));
770 ahc_unpause(ahc);
771 ahc_unlock(ahc, &s);
772 return;
773 }
774
775 target = SCB_GET_TARGET(ahc, scb);
776 channel = SCB_GET_CHANNEL(ahc, scb);
777 lun = SCB_GET_LUN(scb);
778
779 ahc_print_path(ahc, scb);
780 printf("SCB 0x%x - timed out\n", scb->hscb->tag);
781 ahc_dump_card_state(ahc);
782 last_phase = ahc_inb(ahc, LASTPHASE);
783 if (scb->sg_count > 0) {
784 for (i = 0; i < scb->sg_count; i++) {
785 printf("sg[%d] - Addr 0x%x : Length %d\n",
786 i,
787 scb->sg_list[i].addr,
788 scb->sg_list[i].len & AHC_SG_LEN_MASK);
789 }
790 }
791 if (scb->flags & (SCB_DEVICE_RESET|SCB_ABORT)) {
792 /*
793 * Been down this road before.
794 * Do a full bus reset.
795 */
796 bus_reset:
797 ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
798 found = ahc_reset_channel(ahc, channel, /*Initiate Reset*/TRUE);
799 printf("%s: Issued Channel %c Bus Reset. "
800 "%d SCBs aborted\n", ahc_name(ahc), channel, found);
801 } else {
802 /*
803 * If we are a target, transition to bus free and report
804 * the timeout.
805 *
806 * The target/initiator that is holding up the bus may not
807 * be the same as the one that triggered this timeout
808 * (different commands have different timeout lengths).
809 * If the bus is idle and we are acting as the initiator
810 * for this request, queue a BDR message to the timed out
811 * target. Otherwise, if the timed out transaction is
812 * active:
813 * Initiator transaction:
814 * Stuff the message buffer with a BDR message and assert
815 * ATN in the hopes that the target will let go of the bus
816 * and go to the mesgout phase. If this fails, we'll
817 * get another timeout 2 seconds later which will attempt
818 * a bus reset.
819 *
820 * Target transaction:
821 * Transition to BUS FREE and report the error.
822 * It's good to be the target!
823 */
824 u_int active_scb_index;
825 u_int saved_scbptr;
826
827 saved_scbptr = ahc_inb(ahc, SCBPTR);
828 active_scb_index = ahc_inb(ahc, SCB_TAG);
829
830 if ((ahc_inb(ahc, SEQ_FLAGS) & NOT_IDENTIFIED) == 0
831 && (active_scb_index < ahc->scb_data->numscbs)) {
832 struct scb *active_scb;
833
834 /*
835 * If the active SCB is not us, assume that
836 * the active SCB has a longer timeout than
837 * the timedout SCB, and wait for the active
838 * SCB to timeout.
839 */
840 active_scb = ahc_lookup_scb(ahc, active_scb_index);
841 if (active_scb != scb) {
842 uint64_t newtimeout;
843
844 ahc_print_path(ahc, scb);
845 printf("Other SCB Timeout%s",
846 (scb->flags & SCB_OTHERTCL_TIMEOUT) != 0
847 ? " again\n" : "\n");
848 scb->flags |= SCB_OTHERTCL_TIMEOUT;
849 newtimeout = MAX(active_scb->xs->timeout,
850 scb->xs->timeout);
851 callout_reset(&scb->xs->xs_callout,
852 newtimeout > 1000000 ?
853 (newtimeout / 1000) * hz :
854 (newtimeout * hz) / 1000,
855 ahc_timeout, scb);
856 ahc_unpause(ahc);
857 ahc_unlock(ahc, &s);
858 return;
859 }
860
861 /* It's us */
862 if ((scb->flags & SCB_TARGET_SCB) != 0) {
863
864 /*
865 * Send back any queued up transactions
866 * and properly record the error condition.
867 */
868 ahc_abort_scbs(ahc, SCB_GET_TARGET(ahc, scb),
869 SCB_GET_CHANNEL(ahc, scb),
870 SCB_GET_LUN(scb),
871 scb->hscb->tag,
872 ROLE_TARGET,
873 CAM_CMD_TIMEOUT);
874
875 /* Will clear us from the bus */
876 ahc_restart(ahc);
877 ahc_unlock(ahc, &s);
878 return;
879 }
880
881 ahc_set_recoveryscb(ahc, active_scb);
882 ahc_outb(ahc, MSG_OUT, HOST_MSG);
883 ahc_outb(ahc, SCSISIGO, last_phase|ATNO);
884 ahc_print_path(ahc, active_scb);
885 printf("BDR message in message buffer\n");
886 active_scb->flags |= SCB_DEVICE_RESET;
887 callout_reset(&active_scb->xs->xs_callout,
888 2 * hz, ahc_timeout, active_scb);
889 ahc_unpause(ahc);
890 } else {
891 int disconnected;
892
893 /* XXX Shouldn't panic. Just punt instead? */
894 if ((scb->flags & SCB_TARGET_SCB) != 0)
895 panic("Timed-out target SCB but bus idle");
896
897 if (last_phase != P_BUSFREE
898 && (ahc_inb(ahc, SSTAT0) & TARGET) != 0) {
899 /* XXX What happened to the SCB? */
900 /* Hung target selection. Goto busfree */
901 printf("%s: Hung target selection\n",
902 ahc_name(ahc));
903 ahc_restart(ahc);
904 ahc_unlock(ahc, &s);
905 return;
906 }
907
908 if (ahc_search_qinfifo(ahc, target, channel, lun,
909 scb->hscb->tag, ROLE_INITIATOR,
910 /*status*/0, SEARCH_COUNT) > 0) {
911 disconnected = FALSE;
912 } else {
913 disconnected = TRUE;
914 }
915
916 if (disconnected) {
917
918 ahc_set_recoveryscb(ahc, scb);
919 /*
920 * Actually re-queue this SCB in an attempt
921 * to select the device before it reconnects.
922 * In either case (selection or reselection),
923 * we will now issue a target reset to the
924 * timed-out device.
925 *
926 * Set the MK_MESSAGE control bit indicating
927 * that we desire to send a message. We
928 * also set the disconnected flag since
929 * in the paging case there is no guarantee
930 * that our SCB control byte matches the
931 * version on the card. We don't want the
932 * sequencer to abort the command thinking
933 * an unsolicited reselection occurred.
934 */
935 scb->hscb->control |= MK_MESSAGE|DISCONNECTED;
936 scb->flags |= SCB_DEVICE_RESET;
937
938 /*
939 * Remove any cached copy of this SCB in the
940 * disconnected list in preparation for the
941 * queuing of our abort SCB. We use the
942 * same element in the SCB, SCB_NEXT, for
943 * both the qinfifo and the disconnected list.
944 */
945 ahc_search_disc_list(ahc, target, channel,
946 lun, scb->hscb->tag,
947 /*stop_on_first*/TRUE,
948 /*remove*/TRUE,
949 /*save_state*/FALSE);
950
951 /*
952 * In the non-paging case, the sequencer will
953 * never re-reference the in-core SCB.
954 * To make sure we are notified during
955 * reslection, set the MK_MESSAGE flag in
956 * the card's copy of the SCB.
957 */
958 if ((ahc->flags & AHC_PAGESCBS) == 0) {
959 ahc_outb(ahc, SCBPTR, scb->hscb->tag);
960 ahc_outb(ahc, SCB_CONTROL,
961 ahc_inb(ahc, SCB_CONTROL)
962 | MK_MESSAGE);
963 }
964
965 /*
966 * Clear out any entries in the QINFIFO first
967 * so we are the next SCB for this target
968 * to run.
969 */
970 ahc_search_qinfifo(ahc,
971 SCB_GET_TARGET(ahc, scb),
972 channel, SCB_GET_LUN(scb),
973 SCB_LIST_NULL,
974 ROLE_INITIATOR,
975 CAM_REQUEUE_REQ,
976 SEARCH_COMPLETE);
977 ahc_print_path(ahc, scb);
978 printf("Queuing a BDR SCB\n");
979 ahc_qinfifo_requeue_tail(ahc, scb);
980 ahc_outb(ahc, SCBPTR, saved_scbptr);
981 callout_reset(&scb->xs->xs_callout, 2 * hz,
982 ahc_timeout, scb);
983 ahc_unpause(ahc);
984 } else {
985 /* Go "immediatly" to the bus reset */
986 /* This shouldn't happen */
987 ahc_set_recoveryscb(ahc, scb);
988 ahc_print_path(ahc, scb);
989 printf("SCB %d: Immediate reset. "
990 "Flags = 0x%x\n", scb->hscb->tag,
991 scb->flags);
992 goto bus_reset;
993 }
994 }
995 }
996 ahc_unlock(ahc, &s);
997 }
998
999 void
1000 ahc_platform_set_tags(struct ahc_softc *ahc,
1001 struct ahc_devinfo *devinfo, int enable)
1002 {
1003 struct ahc_tmode_tstate *tstate;
1004
1005 ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
1006 devinfo->target, &tstate);
1007
1008 if (enable)
1009 tstate->tagenable |= devinfo->target_mask;
1010 else
1011 tstate->tagenable &= ~devinfo->target_mask;
1012 }
1013
1014 int
1015 ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg)
1016 {
1017 if (sizeof(struct ahc_platform_data) == 0)
1018 return 0;
1019 ahc->platform_data = malloc(sizeof(struct ahc_platform_data), M_DEVBUF,
1020 M_NOWAIT);
1021 if (ahc->platform_data == NULL)
1022 return (ENOMEM);
1023 return (0);
1024 }
1025
1026 void
1027 ahc_platform_free(struct ahc_softc *ahc)
1028 {
1029 if (sizeof(struct ahc_platform_data) == 0)
1030 return;
1031 free(ahc->platform_data, M_DEVBUF);
1032 }
1033
1034 int
1035 ahc_softc_comp(struct ahc_softc *lahc, struct ahc_softc *rahc)
1036 {
1037 return (0);
1038 }
1039
1040 int
1041 ahc_detach(struct device *self, int flags)
1042 {
1043 int rv = 0;
1044
1045 struct ahc_softc *ahc = (struct ahc_softc*)self;
1046
1047 ahc_intr_enable(ahc, FALSE);
1048 if (ahc->sc_child != NULL)
1049 rv = config_detach(ahc->sc_child, flags);
1050 if (rv == 0 && ahc->sc_child_b != NULL)
1051 rv = config_detach(ahc->sc_child_b, flags);
1052
1053 shutdownhook_disestablish(ahc->shutdown_hook);
1054
1055 ahc_free(ahc);
1056
1057 return (rv);
1058 }
1059
1060
1061 void
1062 ahc_send_async(struct ahc_softc *ahc, char channel, u_int target, u_int lun,
1063 ac_code code, void *opt_arg)
1064 {
1065 struct ahc_tmode_tstate *tstate;
1066 struct ahc_initiator_tinfo *tinfo;
1067 struct ahc_devinfo devinfo;
1068 struct scsipi_channel *chan;
1069 struct scsipi_xfer_mode xm;
1070
1071 chan = channel == 'B' ? &ahc->sc_channel_b : &ahc->sc_channel;
1072 switch (code) {
1073 case AC_TRANSFER_NEG:
1074 tinfo = ahc_fetch_transinfo(ahc, channel, ahc->our_id, target,
1075 &tstate);
1076 ahc_compile_devinfo(&devinfo, ahc->our_id, target, lun,
1077 channel, ROLE_UNKNOWN);
1078 /*
1079 * Don't bother if negotiating. XXX?
1080 */
1081 if (tinfo->curr.period != tinfo->goal.period
1082 || tinfo->curr.width != tinfo->goal.width
1083 || tinfo->curr.offset != tinfo->goal.offset
1084 || tinfo->curr.ppr_options != tinfo->goal.ppr_options)
1085 break;
1086 xm.xm_target = target;
1087 xm.xm_mode = 0;
1088 xm.xm_period = tinfo->curr.period;
1089 xm.xm_offset = tinfo->curr.offset;
1090 if (tinfo->curr.width == MSG_EXT_WDTR_BUS_16_BIT)
1091 xm.xm_mode |= PERIPH_CAP_WIDE16;
1092 if (tinfo->curr.period)
1093 xm.xm_mode |= PERIPH_CAP_SYNC;
1094 if (tstate->tagenable & devinfo.target_mask)
1095 xm.xm_mode |= PERIPH_CAP_TQING;
1096 if (tinfo->curr.ppr_options & MSG_EXT_PPR_DT_REQ)
1097 xm.xm_mode |= PERIPH_CAP_DT;
1098 scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, &xm);
1099 break;
1100 case AC_BUS_RESET:
1101 scsipi_async_event(chan, ASYNC_EVENT_RESET, NULL);
1102 case AC_SENT_BDR:
1103 default:
1104 break;
1105 }
1106 }
Cache object: 0ec05a36f31baf30188f7e262f1689cc
|