1 /*-
2 * Generic driver for the Advanced Systems Inc. SCSI controllers
3 * Product specific probe and attach routines can be found in:
4 *
5 * i386/isa/adv_isa.c ABP5140, ABP542, ABP5150, ABP842, ABP852
6 * i386/eisa/adv_eisa.c ABP742, ABP752
7 * pci/adv_pci.c ABP920, ABP930, ABP930U, ABP930UA, ABP940, ABP940U,
8 * ABP940UA, ABP950, ABP960, ABP960U, ABP960UA,
9 * ABP970, ABP970U
10 *
11 * Copyright (c) 1996-2000 Justin Gibbs.
12 * All rights reserved.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions, and the following disclaimer,
19 * without modification, immediately at the beginning of the file.
20 * 2. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
27 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 */
35 /*-
36 * Ported from:
37 * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
38 *
39 * Copyright (c) 1995-1997 Advanced System Products, Inc.
40 * All Rights Reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that redistributions of source
44 * code retain the above copyright notice and this comment without
45 * modification.
46 */
47
48 #include <sys/cdefs.h>
49 __FBSDID("$FreeBSD: releng/6.0/sys/dev/advansys/advansys.c 146734 2005-05-29 04:42:30Z nyan $");
50
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/malloc.h>
54 #include <sys/kernel.h>
55 #include <sys/lock.h>
56 #include <sys/mutex.h>
57
58 #include <machine/bus.h>
59 #include <machine/resource.h>
60 #include <sys/bus.h>
61 #include <sys/rman.h>
62
63 #include <cam/cam.h>
64 #include <cam/cam_ccb.h>
65 #include <cam/cam_sim.h>
66 #include <cam/cam_xpt_sim.h>
67 #include <cam/cam_xpt_periph.h>
68 #include <cam/cam_debug.h>
69
70 #include <cam/scsi/scsi_all.h>
71 #include <cam/scsi/scsi_message.h>
72
73 #include <vm/vm.h>
74 #include <vm/vm_param.h>
75 #include <vm/pmap.h>
76
77 #include <dev/advansys/advansys.h>
78
79 static void adv_action(struct cam_sim *sim, union ccb *ccb);
80 static void adv_execute_ccb(void *arg, bus_dma_segment_t *dm_segs,
81 int nsegments, int error);
82 static void adv_poll(struct cam_sim *sim);
83 static void adv_run_doneq(struct adv_softc *adv);
84 static struct adv_ccb_info *
85 adv_alloc_ccb_info(struct adv_softc *adv);
86 static void adv_destroy_ccb_info(struct adv_softc *adv,
87 struct adv_ccb_info *cinfo);
88 static __inline struct adv_ccb_info *
89 adv_get_ccb_info(struct adv_softc *adv);
90 static __inline void adv_free_ccb_info(struct adv_softc *adv,
91 struct adv_ccb_info *cinfo);
92 static __inline void adv_set_state(struct adv_softc *adv, adv_state state);
93 static __inline void adv_clear_state(struct adv_softc *adv, union ccb* ccb);
94 static void adv_clear_state_really(struct adv_softc *adv, union ccb* ccb);
95
96 static __inline struct adv_ccb_info *
97 adv_get_ccb_info(struct adv_softc *adv)
98 {
99 struct adv_ccb_info *cinfo;
100 int opri;
101
102 opri = splcam();
103 if ((cinfo = SLIST_FIRST(&adv->free_ccb_infos)) != NULL) {
104 SLIST_REMOVE_HEAD(&adv->free_ccb_infos, links);
105 } else {
106 cinfo = adv_alloc_ccb_info(adv);
107 }
108 splx(opri);
109
110 return (cinfo);
111 }
112
113 static __inline void
114 adv_free_ccb_info(struct adv_softc *adv, struct adv_ccb_info *cinfo)
115 {
116 int opri;
117
118 opri = splcam();
119 cinfo->state = ACCB_FREE;
120 SLIST_INSERT_HEAD(&adv->free_ccb_infos, cinfo, links);
121 splx(opri);
122 }
123
124 static __inline void
125 adv_set_state(struct adv_softc *adv, adv_state state)
126 {
127 if (adv->state == 0)
128 xpt_freeze_simq(adv->sim, /*count*/1);
129 adv->state |= state;
130 }
131
132 static __inline void
133 adv_clear_state(struct adv_softc *adv, union ccb* ccb)
134 {
135 if (adv->state != 0)
136 adv_clear_state_really(adv, ccb);
137 }
138
139 static void
140 adv_clear_state_really(struct adv_softc *adv, union ccb* ccb)
141 {
142 if ((adv->state & ADV_BUSDMA_BLOCK_CLEARED) != 0)
143 adv->state &= ~(ADV_BUSDMA_BLOCK_CLEARED|ADV_BUSDMA_BLOCK);
144 if ((adv->state & ADV_RESOURCE_SHORTAGE) != 0) {
145 int openings;
146
147 openings = adv->max_openings - adv->cur_active - ADV_MIN_FREE_Q;
148 if (openings >= adv->openings_needed) {
149 adv->state &= ~ADV_RESOURCE_SHORTAGE;
150 adv->openings_needed = 0;
151 }
152 }
153
154 if ((adv->state & ADV_IN_TIMEOUT) != 0) {
155 struct adv_ccb_info *cinfo;
156
157 cinfo = (struct adv_ccb_info *)ccb->ccb_h.ccb_cinfo_ptr;
158 if ((cinfo->state & ACCB_RECOVERY_CCB) != 0) {
159 struct ccb_hdr *ccb_h;
160
161 /*
162 * We now traverse our list of pending CCBs
163 * and reinstate their timeouts.
164 */
165 ccb_h = LIST_FIRST(&adv->pending_ccbs);
166 while (ccb_h != NULL) {
167 ccb_h->timeout_ch =
168 timeout(adv_timeout, (caddr_t)ccb_h,
169 (ccb_h->timeout * hz) / 1000);
170 ccb_h = LIST_NEXT(ccb_h, sim_links.le);
171 }
172 adv->state &= ~ADV_IN_TIMEOUT;
173 printf("%s: No longer in timeout\n", adv_name(adv));
174 }
175 }
176 if (adv->state == 0)
177 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
178 }
179
180 void
181 adv_map(void *arg, bus_dma_segment_t *segs, int nseg, int error)
182 {
183 bus_addr_t* physaddr;
184
185 physaddr = (bus_addr_t*)arg;
186 *physaddr = segs->ds_addr;
187 }
188
189 char *
190 adv_name(struct adv_softc *adv)
191 {
192 static char name[10];
193
194 snprintf(name, sizeof(name), "adv%d", adv->unit);
195 return (name);
196 }
197
198 static void
199 adv_action(struct cam_sim *sim, union ccb *ccb)
200 {
201 struct adv_softc *adv;
202
203 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("adv_action\n"));
204
205 adv = (struct adv_softc *)cam_sim_softc(sim);
206
207 switch (ccb->ccb_h.func_code) {
208 /* Common cases first */
209 case XPT_SCSI_IO: /* Execute the requested I/O operation */
210 {
211 struct ccb_hdr *ccb_h;
212 struct ccb_scsiio *csio;
213 struct adv_ccb_info *cinfo;
214
215 ccb_h = &ccb->ccb_h;
216 csio = &ccb->csio;
217 cinfo = adv_get_ccb_info(adv);
218 if (cinfo == NULL)
219 panic("XXX Handle CCB info error!!!");
220
221 ccb_h->ccb_cinfo_ptr = cinfo;
222 cinfo->ccb = ccb;
223
224 /* Only use S/G if there is a transfer */
225 if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
226 if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) {
227 /*
228 * We've been given a pointer
229 * to a single buffer
230 */
231 if ((ccb_h->flags & CAM_DATA_PHYS) == 0) {
232 int s;
233 int error;
234
235 s = splsoftvm();
236 error =
237 bus_dmamap_load(adv->buffer_dmat,
238 cinfo->dmamap,
239 csio->data_ptr,
240 csio->dxfer_len,
241 adv_execute_ccb,
242 csio, /*flags*/0);
243 if (error == EINPROGRESS) {
244 /*
245 * So as to maintain ordering,
246 * freeze the controller queue
247 * until our mapping is
248 * returned.
249 */
250 adv_set_state(adv,
251 ADV_BUSDMA_BLOCK);
252 }
253 splx(s);
254 } else {
255 struct bus_dma_segment seg;
256
257 /* Pointer to physical buffer */
258 seg.ds_addr =
259 (bus_addr_t)csio->data_ptr;
260 seg.ds_len = csio->dxfer_len;
261 adv_execute_ccb(csio, &seg, 1, 0);
262 }
263 } else {
264 struct bus_dma_segment *segs;
265 if ((ccb_h->flags & CAM_DATA_PHYS) != 0)
266 panic("adv_setup_data - Physical "
267 "segment pointers unsupported");
268
269 if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0)
270 panic("adv_setup_data - Virtual "
271 "segment addresses unsupported");
272
273 /* Just use the segments provided */
274 segs = (struct bus_dma_segment *)csio->data_ptr;
275 adv_execute_ccb(ccb, segs, csio->sglist_cnt, 0);
276 }
277 } else {
278 adv_execute_ccb(ccb, NULL, 0, 0);
279 }
280 break;
281 }
282 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */
283 case XPT_TARGET_IO: /* Execute target I/O request */
284 case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */
285 case XPT_CONT_TARGET_IO: /* Continue Host Target I/O Connection*/
286 case XPT_EN_LUN: /* Enable LUN as a target */
287 case XPT_ABORT: /* Abort the specified CCB */
288 /* XXX Implement */
289 ccb->ccb_h.status = CAM_REQ_INVALID;
290 xpt_done(ccb);
291 break;
292 case XPT_SET_TRAN_SETTINGS:
293 {
294 struct ccb_trans_settings *cts;
295 target_bit_vector targ_mask;
296 struct adv_transinfo *tconf;
297 u_int update_type;
298 int s;
299
300 cts = &ccb->cts;
301 targ_mask = ADV_TID_TO_TARGET_MASK(cts->ccb_h.target_id);
302 update_type = 0;
303
304 /*
305 * The user must specify which type of settings he wishes
306 * to change.
307 */
308 if (((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0)
309 && ((cts->flags & CCB_TRANS_USER_SETTINGS) == 0)) {
310 tconf = &adv->tinfo[cts->ccb_h.target_id].current;
311 update_type |= ADV_TRANS_GOAL;
312 } else if (((cts->flags & CCB_TRANS_USER_SETTINGS) != 0)
313 && ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) == 0)) {
314 tconf = &adv->tinfo[cts->ccb_h.target_id].user;
315 update_type |= ADV_TRANS_USER;
316 } else {
317 ccb->ccb_h.status = CAM_REQ_INVALID;
318 break;
319 }
320
321 s = splcam();
322
323 if ((update_type & ADV_TRANS_GOAL) != 0) {
324 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
325 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0)
326 adv->disc_enable |= targ_mask;
327 else
328 adv->disc_enable &= ~targ_mask;
329 adv_write_lram_8(adv, ADVV_DISC_ENABLE_B,
330 adv->disc_enable);
331 }
332
333 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
334 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0)
335 adv->cmd_qng_enabled |= targ_mask;
336 else
337 adv->cmd_qng_enabled &= ~targ_mask;
338 }
339 }
340
341 if ((update_type & ADV_TRANS_USER) != 0) {
342 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
343 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0)
344 adv->user_disc_enable |= targ_mask;
345 else
346 adv->user_disc_enable &= ~targ_mask;
347 }
348
349 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
350 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0)
351 adv->user_cmd_qng_enabled |= targ_mask;
352 else
353 adv->user_cmd_qng_enabled &= ~targ_mask;
354 }
355 }
356
357 /*
358 * If the user specifies either the sync rate, or offset,
359 * but not both, the unspecified parameter defaults to its
360 * current value in transfer negotiations.
361 */
362 if (((cts->valid & CCB_TRANS_SYNC_RATE_VALID) != 0)
363 || ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0)) {
364 /*
365 * If the user provided a sync rate but no offset,
366 * use the current offset.
367 */
368 if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0)
369 cts->sync_offset = tconf->offset;
370
371 /*
372 * If the user provided an offset but no sync rate,
373 * use the current sync rate.
374 */
375 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0)
376 cts->sync_period = tconf->period;
377
378 adv_period_offset_to_sdtr(adv, &cts->sync_period,
379 &cts->sync_offset,
380 cts->ccb_h.target_id);
381
382 adv_set_syncrate(adv, /*struct cam_path */NULL,
383 cts->ccb_h.target_id, cts->sync_period,
384 cts->sync_offset, update_type);
385 }
386
387 splx(s);
388 ccb->ccb_h.status = CAM_REQ_CMP;
389 xpt_done(ccb);
390 break;
391 }
392 case XPT_GET_TRAN_SETTINGS:
393 /* Get default/user set transfer settings for the target */
394 {
395 struct ccb_trans_settings *cts;
396 struct adv_transinfo *tconf;
397 target_bit_vector target_mask;
398 int s;
399
400 cts = &ccb->cts;
401 target_mask = ADV_TID_TO_TARGET_MASK(cts->ccb_h.target_id);
402
403 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
404
405 s = splcam();
406 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) {
407 tconf = &adv->tinfo[cts->ccb_h.target_id].current;
408 if ((adv->disc_enable & target_mask) != 0)
409 cts->flags |= CCB_TRANS_DISC_ENB;
410 if ((adv->cmd_qng_enabled & target_mask) != 0)
411 cts->flags |= CCB_TRANS_TAG_ENB;
412 } else {
413 tconf = &adv->tinfo[cts->ccb_h.target_id].user;
414 if ((adv->user_disc_enable & target_mask) != 0)
415 cts->flags |= CCB_TRANS_DISC_ENB;
416 if ((adv->user_cmd_qng_enabled & target_mask) != 0)
417 cts->flags |= CCB_TRANS_TAG_ENB;
418 }
419
420 cts->sync_period = tconf->period;
421 cts->sync_offset = tconf->offset;
422 splx(s);
423
424 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
425 cts->valid = CCB_TRANS_SYNC_RATE_VALID
426 | CCB_TRANS_SYNC_OFFSET_VALID
427 | CCB_TRANS_BUS_WIDTH_VALID
428 | CCB_TRANS_DISC_VALID
429 | CCB_TRANS_TQ_VALID;
430 ccb->ccb_h.status = CAM_REQ_CMP;
431 xpt_done(ccb);
432 break;
433 }
434 case XPT_CALC_GEOMETRY:
435 {
436 int extended;
437
438 extended = (adv->control & ADV_CNTL_BIOS_GT_1GB) != 0;
439 cam_calc_geometry(&ccb->ccg, extended);
440 xpt_done(ccb);
441 break;
442 }
443 case XPT_RESET_BUS: /* Reset the specified SCSI bus */
444 {
445 int s;
446
447 s = splcam();
448 adv_stop_execution(adv);
449 adv_reset_bus(adv, /*initiate_reset*/TRUE);
450 adv_start_execution(adv);
451 splx(s);
452
453 ccb->ccb_h.status = CAM_REQ_CMP;
454 xpt_done(ccb);
455 break;
456 }
457 case XPT_TERM_IO: /* Terminate the I/O process */
458 /* XXX Implement */
459 ccb->ccb_h.status = CAM_REQ_INVALID;
460 xpt_done(ccb);
461 break;
462 case XPT_PATH_INQ: /* Path routing inquiry */
463 {
464 struct ccb_pathinq *cpi = &ccb->cpi;
465
466 cpi->version_num = 1; /* XXX??? */
467 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE;
468 cpi->target_sprt = 0;
469 cpi->hba_misc = 0;
470 cpi->hba_eng_cnt = 0;
471 cpi->max_target = 7;
472 cpi->max_lun = 7;
473 cpi->initiator_id = adv->scsi_id;
474 cpi->bus_id = cam_sim_bus(sim);
475 cpi->base_transfer_speed = 3300;
476 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
477 strncpy(cpi->hba_vid, "Advansys", HBA_IDLEN);
478 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
479 cpi->unit_number = cam_sim_unit(sim);
480 cpi->ccb_h.status = CAM_REQ_CMP;
481 xpt_done(ccb);
482 break;
483 }
484 default:
485 ccb->ccb_h.status = CAM_REQ_INVALID;
486 xpt_done(ccb);
487 break;
488 }
489 }
490
491 /*
492 * Currently, the output of bus_dmammap_load suits our needs just
493 * fine, but should it change, we'd need to do something here.
494 */
495 #define adv_fixup_dmasegs(adv, dm_segs) (struct adv_sg_entry *)(dm_segs)
496
497 static void
498 adv_execute_ccb(void *arg, bus_dma_segment_t *dm_segs,
499 int nsegments, int error)
500 {
501 struct ccb_scsiio *csio;
502 struct ccb_hdr *ccb_h;
503 struct cam_sim *sim;
504 struct adv_softc *adv;
505 struct adv_ccb_info *cinfo;
506 struct adv_scsi_q scsiq;
507 struct adv_sg_head sghead;
508 int s;
509
510 csio = (struct ccb_scsiio *)arg;
511 ccb_h = &csio->ccb_h;
512 sim = xpt_path_sim(ccb_h->path);
513 adv = (struct adv_softc *)cam_sim_softc(sim);
514 cinfo = (struct adv_ccb_info *)csio->ccb_h.ccb_cinfo_ptr;
515
516 /*
517 * Setup our done routine to release the simq on
518 * the next ccb that completes.
519 */
520 if ((adv->state & ADV_BUSDMA_BLOCK) != 0)
521 adv->state |= ADV_BUSDMA_BLOCK_CLEARED;
522
523 if ((ccb_h->flags & CAM_CDB_POINTER) != 0) {
524 if ((ccb_h->flags & CAM_CDB_PHYS) == 0) {
525 /* XXX Need phystovirt!!!! */
526 /* How about pmap_kenter??? */
527 scsiq.cdbptr = csio->cdb_io.cdb_ptr;
528 } else {
529 scsiq.cdbptr = csio->cdb_io.cdb_ptr;
530 }
531 } else {
532 scsiq.cdbptr = csio->cdb_io.cdb_bytes;
533 }
534 /*
535 * Build up the request
536 */
537 scsiq.q1.status = 0;
538 scsiq.q1.q_no = 0;
539 scsiq.q1.cntl = 0;
540 scsiq.q1.sg_queue_cnt = 0;
541 scsiq.q1.target_id = ADV_TID_TO_TARGET_MASK(ccb_h->target_id);
542 scsiq.q1.target_lun = ccb_h->target_lun;
543 scsiq.q1.sense_len = csio->sense_len;
544 scsiq.q1.extra_bytes = 0;
545 scsiq.q2.ccb_index = cinfo - adv->ccb_infos;
546 scsiq.q2.target_ix = ADV_TIDLUN_TO_IX(ccb_h->target_id,
547 ccb_h->target_lun);
548 scsiq.q2.flag = 0;
549 scsiq.q2.cdb_len = csio->cdb_len;
550 if ((ccb_h->flags & CAM_TAG_ACTION_VALID) != 0)
551 scsiq.q2.tag_code = csio->tag_action;
552 else
553 scsiq.q2.tag_code = 0;
554 scsiq.q2.vm_id = 0;
555
556 if (nsegments != 0) {
557 bus_dmasync_op_t op;
558
559 scsiq.q1.data_addr = dm_segs->ds_addr;
560 scsiq.q1.data_cnt = dm_segs->ds_len;
561 if (nsegments > 1) {
562 scsiq.q1.cntl |= QC_SG_HEAD;
563 sghead.entry_cnt
564 = sghead.entry_to_copy
565 = nsegments;
566 sghead.res = 0;
567 sghead.sg_list = adv_fixup_dmasegs(adv, dm_segs);
568 scsiq.sg_head = &sghead;
569 } else {
570 scsiq.sg_head = NULL;
571 }
572 if ((ccb_h->flags & CAM_DIR_MASK) == CAM_DIR_IN)
573 op = BUS_DMASYNC_PREREAD;
574 else
575 op = BUS_DMASYNC_PREWRITE;
576 bus_dmamap_sync(adv->buffer_dmat, cinfo->dmamap, op);
577 } else {
578 scsiq.q1.data_addr = 0;
579 scsiq.q1.data_cnt = 0;
580 scsiq.sg_head = NULL;
581 }
582
583 s = splcam();
584
585 /*
586 * Last time we need to check if this SCB needs to
587 * be aborted.
588 */
589 if (ccb_h->status != CAM_REQ_INPROG) {
590 if (nsegments != 0)
591 bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap);
592 adv_clear_state(adv, (union ccb *)csio);
593 adv_free_ccb_info(adv, cinfo);
594 xpt_done((union ccb *)csio);
595 splx(s);
596 return;
597 }
598
599 if (adv_execute_scsi_queue(adv, &scsiq, csio->dxfer_len) != 0) {
600 /* Temporary resource shortage */
601 adv_set_state(adv, ADV_RESOURCE_SHORTAGE);
602 if (nsegments != 0)
603 bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap);
604 csio->ccb_h.status = CAM_REQUEUE_REQ;
605 adv_clear_state(adv, (union ccb *)csio);
606 adv_free_ccb_info(adv, cinfo);
607 xpt_done((union ccb *)csio);
608 splx(s);
609 return;
610 }
611 cinfo->state |= ACCB_ACTIVE;
612 ccb_h->status |= CAM_SIM_QUEUED;
613 LIST_INSERT_HEAD(&adv->pending_ccbs, ccb_h, sim_links.le);
614 /* Schedule our timeout */
615 ccb_h->timeout_ch =
616 timeout(adv_timeout, csio, (ccb_h->timeout * hz)/1000);
617 splx(s);
618 }
619
620 static struct adv_ccb_info *
621 adv_alloc_ccb_info(struct adv_softc *adv)
622 {
623 int error;
624 struct adv_ccb_info *cinfo;
625
626 cinfo = &adv->ccb_infos[adv->ccb_infos_allocated];
627 cinfo->state = ACCB_FREE;
628 error = bus_dmamap_create(adv->buffer_dmat, /*flags*/0,
629 &cinfo->dmamap);
630 if (error != 0) {
631 printf("%s: Unable to allocate CCB info "
632 "dmamap - error %d\n", adv_name(adv), error);
633 return (NULL);
634 }
635 adv->ccb_infos_allocated++;
636 return (cinfo);
637 }
638
639 static void
640 adv_destroy_ccb_info(struct adv_softc *adv, struct adv_ccb_info *cinfo)
641 {
642 bus_dmamap_destroy(adv->buffer_dmat, cinfo->dmamap);
643 }
644
645 void
646 adv_timeout(void *arg)
647 {
648 int s;
649 union ccb *ccb;
650 struct adv_softc *adv;
651 struct adv_ccb_info *cinfo;
652
653 ccb = (union ccb *)arg;
654 adv = (struct adv_softc *)xpt_path_sim(ccb->ccb_h.path)->softc;
655 cinfo = (struct adv_ccb_info *)ccb->ccb_h.ccb_cinfo_ptr;
656
657 xpt_print_path(ccb->ccb_h.path);
658 printf("Timed out\n");
659
660 s = splcam();
661 /* Have we been taken care of already?? */
662 if (cinfo == NULL || cinfo->state == ACCB_FREE) {
663 splx(s);
664 return;
665 }
666
667 adv_stop_execution(adv);
668
669 if ((cinfo->state & ACCB_ABORT_QUEUED) == 0) {
670 struct ccb_hdr *ccb_h;
671
672 /*
673 * In order to simplify the recovery process, we ask the XPT
674 * layer to halt the queue of new transactions and we traverse
675 * the list of pending CCBs and remove their timeouts. This
676 * means that the driver attempts to clear only one error
677 * condition at a time. In general, timeouts that occur
678 * close together are related anyway, so there is no benefit
679 * in attempting to handle errors in parrallel. Timeouts will
680 * be reinstated when the recovery process ends.
681 */
682 adv_set_state(adv, ADV_IN_TIMEOUT);
683
684 /* This CCB is the CCB representing our recovery actions */
685 cinfo->state |= ACCB_RECOVERY_CCB|ACCB_ABORT_QUEUED;
686
687 ccb_h = LIST_FIRST(&adv->pending_ccbs);
688 while (ccb_h != NULL) {
689 untimeout(adv_timeout, ccb_h, ccb_h->timeout_ch);
690 ccb_h = LIST_NEXT(ccb_h, sim_links.le);
691 }
692
693 /* XXX Should send a BDR */
694 /* Attempt an abort as our first tact */
695 xpt_print_path(ccb->ccb_h.path);
696 printf("Attempting abort\n");
697 adv_abort_ccb(adv, ccb->ccb_h.target_id,
698 ccb->ccb_h.target_lun, ccb,
699 CAM_CMD_TIMEOUT, /*queued_only*/FALSE);
700 ccb->ccb_h.timeout_ch =
701 timeout(adv_timeout, ccb, 2 * hz);
702 } else {
703 /* Our attempt to perform an abort failed, go for a reset */
704 xpt_print_path(ccb->ccb_h.path);
705 printf("Resetting bus\n");
706 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
707 ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
708 adv_reset_bus(adv, /*initiate_reset*/TRUE);
709 }
710 adv_start_execution(adv);
711 splx(s);
712 }
713
714 struct adv_softc *
715 adv_alloc(device_t dev, bus_space_tag_t tag, bus_space_handle_t bsh)
716 {
717 struct adv_softc *adv = device_get_softc(dev);
718
719 /*
720 * Allocate a storage area for us
721 */
722 LIST_INIT(&adv->pending_ccbs);
723 SLIST_INIT(&adv->free_ccb_infos);
724 adv->dev = dev;
725 adv->unit = device_get_unit(dev);
726 adv->tag = tag;
727 adv->bsh = bsh;
728
729 return(adv);
730 }
731
732 void
733 adv_free(struct adv_softc *adv)
734 {
735 switch (adv->init_level) {
736 case 6:
737 {
738 struct adv_ccb_info *cinfo;
739
740 while ((cinfo = SLIST_FIRST(&adv->free_ccb_infos)) != NULL) {
741 SLIST_REMOVE_HEAD(&adv->free_ccb_infos, links);
742 adv_destroy_ccb_info(adv, cinfo);
743 }
744
745 bus_dmamap_unload(adv->sense_dmat, adv->sense_dmamap);
746 }
747 case 5:
748 bus_dmamem_free(adv->sense_dmat, adv->sense_buffers,
749 adv->sense_dmamap);
750 case 4:
751 bus_dma_tag_destroy(adv->sense_dmat);
752 case 3:
753 bus_dma_tag_destroy(adv->buffer_dmat);
754 case 2:
755 bus_dma_tag_destroy(adv->parent_dmat);
756 case 1:
757 if (adv->ccb_infos != NULL)
758 free(adv->ccb_infos, M_DEVBUF);
759 case 0:
760 break;
761 }
762 }
763
764 int
765 adv_init(struct adv_softc *adv)
766 {
767 struct adv_eeprom_config eeprom_config;
768 int checksum, i;
769 int max_sync;
770 u_int16_t config_lsw;
771 u_int16_t config_msw;
772
773 adv_lib_init(adv);
774
775 /*
776 * Stop script execution.
777 */
778 adv_write_lram_16(adv, ADV_HALTCODE_W, 0x00FE);
779 adv_stop_execution(adv);
780 if (adv_stop_chip(adv) == 0 || adv_is_chip_halted(adv) == 0) {
781 printf("adv%d: Unable to halt adapter. Initialization"
782 "failed\n", adv->unit);
783 return (1);
784 }
785 ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
786 if (ADV_INW(adv, ADV_REG_PROG_COUNTER) != ADV_MCODE_START_ADDR) {
787 printf("adv%d: Unable to set program counter. Initialization"
788 "failed\n", adv->unit);
789 return (1);
790 }
791
792 config_msw = ADV_INW(adv, ADV_CONFIG_MSW);
793 config_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
794
795 if ((config_msw & ADV_CFG_MSW_CLR_MASK) != 0) {
796 config_msw &= ~ADV_CFG_MSW_CLR_MASK;
797 /*
798 * XXX The Linux code flags this as an error,
799 * but what should we report to the user???
800 * It seems that clearing the config register
801 * makes this error recoverable.
802 */
803 ADV_OUTW(adv, ADV_CONFIG_MSW, config_msw);
804 }
805
806 /* Suck in the configuration from the EEProm */
807 checksum = adv_get_eeprom_config(adv, &eeprom_config);
808
809 if (ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_AUTO_CONFIG) {
810 /*
811 * XXX The Linux code sets a warning level for this
812 * condition, yet nothing of meaning is printed to
813 * the user. What does this mean???
814 */
815 if (adv->chip_version == 3) {
816 if (eeprom_config.cfg_lsw != config_lsw)
817 eeprom_config.cfg_lsw = config_lsw;
818 if (eeprom_config.cfg_msw != config_msw) {
819 eeprom_config.cfg_msw = config_msw;
820 }
821 }
822 }
823 if (checksum == eeprom_config.chksum) {
824
825 /* Range/Sanity checking */
826 if (eeprom_config.max_total_qng < ADV_MIN_TOTAL_QNG) {
827 eeprom_config.max_total_qng = ADV_MIN_TOTAL_QNG;
828 }
829 if (eeprom_config.max_total_qng > ADV_MAX_TOTAL_QNG) {
830 eeprom_config.max_total_qng = ADV_MAX_TOTAL_QNG;
831 }
832 if (eeprom_config.max_tag_qng > eeprom_config.max_total_qng) {
833 eeprom_config.max_tag_qng = eeprom_config.max_total_qng;
834 }
835 if (eeprom_config.max_tag_qng < ADV_MIN_TAG_Q_PER_DVC) {
836 eeprom_config.max_tag_qng = ADV_MIN_TAG_Q_PER_DVC;
837 }
838 adv->max_openings = eeprom_config.max_total_qng;
839 adv->user_disc_enable = eeprom_config.disc_enable;
840 adv->user_cmd_qng_enabled = eeprom_config.use_cmd_qng;
841 adv->isa_dma_speed = EEPROM_DMA_SPEED(eeprom_config);
842 adv->scsi_id = EEPROM_SCSIID(eeprom_config) & ADV_MAX_TID;
843 EEPROM_SET_SCSIID(eeprom_config, adv->scsi_id);
844 adv->control = eeprom_config.cntl;
845 for (i = 0; i <= ADV_MAX_TID; i++) {
846 u_int8_t sync_data;
847
848 if ((eeprom_config.init_sdtr & (0x1 << i)) == 0)
849 sync_data = 0;
850 else
851 sync_data = eeprom_config.sdtr_data[i];
852 adv_sdtr_to_period_offset(adv,
853 sync_data,
854 &adv->tinfo[i].user.period,
855 &adv->tinfo[i].user.offset,
856 i);
857 }
858 config_lsw = eeprom_config.cfg_lsw;
859 eeprom_config.cfg_msw = config_msw;
860 } else {
861 u_int8_t sync_data;
862
863 printf("adv%d: Warning EEPROM Checksum mismatch. "
864 "Using default device parameters\n", adv->unit);
865
866 /* Set reasonable defaults since we can't read the EEPROM */
867 adv->isa_dma_speed = /*ADV_DEF_ISA_DMA_SPEED*/1;
868 adv->max_openings = ADV_DEF_MAX_TOTAL_QNG;
869 adv->disc_enable = TARGET_BIT_VECTOR_SET;
870 adv->user_disc_enable = TARGET_BIT_VECTOR_SET;
871 adv->cmd_qng_enabled = TARGET_BIT_VECTOR_SET;
872 adv->user_cmd_qng_enabled = TARGET_BIT_VECTOR_SET;
873 adv->scsi_id = 7;
874 adv->control = 0xFFFF;
875
876 if (adv->chip_version == ADV_CHIP_VER_PCI_ULTRA_3050)
877 /* Default to no Ultra to support the 3030 */
878 adv->control &= ~ADV_CNTL_SDTR_ENABLE_ULTRA;
879 sync_data = ADV_DEF_SDTR_OFFSET | (ADV_DEF_SDTR_INDEX << 4);
880 for (i = 0; i <= ADV_MAX_TID; i++) {
881 adv_sdtr_to_period_offset(adv, sync_data,
882 &adv->tinfo[i].user.period,
883 &adv->tinfo[i].user.offset,
884 i);
885 }
886 config_lsw |= ADV_CFG_LSW_SCSI_PARITY_ON;
887 }
888 config_msw &= ~ADV_CFG_MSW_CLR_MASK;
889 config_lsw |= ADV_CFG_LSW_HOST_INT_ON;
890 if ((adv->type & (ADV_PCI|ADV_ULTRA)) == (ADV_PCI|ADV_ULTRA)
891 && (adv->control & ADV_CNTL_SDTR_ENABLE_ULTRA) == 0)
892 /* 25ns or 10MHz */
893 max_sync = 25;
894 else
895 /* Unlimited */
896 max_sync = 0;
897 for (i = 0; i <= ADV_MAX_TID; i++) {
898 if (adv->tinfo[i].user.period < max_sync)
899 adv->tinfo[i].user.period = max_sync;
900 }
901
902 if (adv_test_external_lram(adv) == 0) {
903 if ((adv->type & (ADV_PCI|ADV_ULTRA)) == (ADV_PCI|ADV_ULTRA)) {
904 eeprom_config.max_total_qng =
905 ADV_MAX_PCI_ULTRA_INRAM_TOTAL_QNG;
906 eeprom_config.max_tag_qng =
907 ADV_MAX_PCI_ULTRA_INRAM_TAG_QNG;
908 } else {
909 eeprom_config.cfg_msw |= 0x0800;
910 config_msw |= 0x0800;
911 eeprom_config.max_total_qng =
912 ADV_MAX_PCI_INRAM_TOTAL_QNG;
913 eeprom_config.max_tag_qng = ADV_MAX_INRAM_TAG_QNG;
914 }
915 adv->max_openings = eeprom_config.max_total_qng;
916 }
917 ADV_OUTW(adv, ADV_CONFIG_MSW, config_msw);
918 ADV_OUTW(adv, ADV_CONFIG_LSW, config_lsw);
919 #if 0
920 /*
921 * Don't write the eeprom data back for now.
922 * I'd rather not mess up the user's card. We also don't
923 * fully sanitize the eeprom settings above for the write-back
924 * to be 100% correct.
925 */
926 if (adv_set_eeprom_config(adv, &eeprom_config) != 0)
927 printf("%s: WARNING! Failure writing to EEPROM.\n",
928 adv_name(adv));
929 #endif
930
931 adv_set_chip_scsiid(adv, adv->scsi_id);
932 if (adv_init_lram_and_mcode(adv))
933 return (1);
934
935 adv->disc_enable = adv->user_disc_enable;
936
937 adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, adv->disc_enable);
938 for (i = 0; i <= ADV_MAX_TID; i++) {
939 /*
940 * Start off in async mode.
941 */
942 adv_set_syncrate(adv, /*struct cam_path */NULL,
943 i, /*period*/0, /*offset*/0,
944 ADV_TRANS_CUR);
945 /*
946 * Enable the use of tagged commands on all targets.
947 * This allows the kernel driver to make up it's own mind
948 * as it sees fit to tag queue instead of having the
949 * firmware try and second guess the tag_code settins.
950 */
951 adv_write_lram_8(adv, ADVV_MAX_DVC_QNG_BEG + i,
952 adv->max_openings);
953 }
954 adv_write_lram_8(adv, ADVV_USE_TAGGED_QNG_B, TARGET_BIT_VECTOR_SET);
955 adv_write_lram_8(adv, ADVV_CAN_TAGGED_QNG_B, TARGET_BIT_VECTOR_SET);
956 printf("adv%d: AdvanSys %s Host Adapter, SCSI ID %d, queue depth %d\n",
957 adv->unit, (adv->type & ADV_ULTRA) && (max_sync == 0)
958 ? "Ultra SCSI" : "SCSI",
959 adv->scsi_id, adv->max_openings);
960 return (0);
961 }
962
963 void
964 adv_intr(void *arg)
965 {
966 struct adv_softc *adv;
967 u_int16_t chipstat;
968 u_int16_t saved_ram_addr;
969 u_int8_t ctrl_reg;
970 u_int8_t saved_ctrl_reg;
971 u_int8_t host_flag;
972
973 adv = (struct adv_softc *)arg;
974
975 chipstat = ADV_INW(adv, ADV_CHIP_STATUS);
976
977 /* Is it for us? */
978 if ((chipstat & (ADV_CSW_INT_PENDING|ADV_CSW_SCSI_RESET_LATCH)) == 0)
979 return;
980
981 ctrl_reg = ADV_INB(adv, ADV_CHIP_CTRL);
982 saved_ctrl_reg = ctrl_reg & (~(ADV_CC_SCSI_RESET | ADV_CC_CHIP_RESET |
983 ADV_CC_SINGLE_STEP | ADV_CC_DIAG |
984 ADV_CC_TEST));
985
986 if ((chipstat & (ADV_CSW_SCSI_RESET_LATCH|ADV_CSW_SCSI_RESET_ACTIVE))) {
987 printf("Detected Bus Reset\n");
988 adv_reset_bus(adv, /*initiate_reset*/FALSE);
989 return;
990 }
991
992 if ((chipstat & ADV_CSW_INT_PENDING) != 0) {
993
994 saved_ram_addr = ADV_INW(adv, ADV_LRAM_ADDR);
995 host_flag = adv_read_lram_8(adv, ADVV_HOST_FLAG_B);
996 adv_write_lram_8(adv, ADVV_HOST_FLAG_B,
997 host_flag | ADV_HOST_FLAG_IN_ISR);
998
999 adv_ack_interrupt(adv);
1000
1001 if ((chipstat & ADV_CSW_HALTED) != 0
1002 && (ctrl_reg & ADV_CC_SINGLE_STEP) != 0) {
1003 adv_isr_chip_halted(adv);
1004 saved_ctrl_reg &= ~ADV_CC_HALT;
1005 } else {
1006 adv_run_doneq(adv);
1007 }
1008 ADV_OUTW(adv, ADV_LRAM_ADDR, saved_ram_addr);
1009 #ifdef DIAGNOSTIC
1010 if (ADV_INW(adv, ADV_LRAM_ADDR) != saved_ram_addr)
1011 panic("adv_intr: Unable to set LRAM addr");
1012 #endif
1013 adv_write_lram_8(adv, ADVV_HOST_FLAG_B, host_flag);
1014 }
1015
1016 ADV_OUTB(adv, ADV_CHIP_CTRL, saved_ctrl_reg);
1017 }
1018
1019 static void
1020 adv_run_doneq(struct adv_softc *adv)
1021 {
1022 struct adv_q_done_info scsiq;
1023 u_int doneq_head;
1024 u_int done_qno;
1025
1026 doneq_head = adv_read_lram_16(adv, ADVV_DONE_Q_TAIL_W) & 0xFF;
1027 done_qno = adv_read_lram_8(adv, ADV_QNO_TO_QADDR(doneq_head)
1028 + ADV_SCSIQ_B_FWD);
1029 while (done_qno != ADV_QLINK_END) {
1030 union ccb* ccb;
1031 struct adv_ccb_info *cinfo;
1032 u_int done_qaddr;
1033 u_int sg_queue_cnt;
1034 int aborted;
1035
1036 done_qaddr = ADV_QNO_TO_QADDR(done_qno);
1037
1038 /* Pull status from this request */
1039 sg_queue_cnt = adv_copy_lram_doneq(adv, done_qaddr, &scsiq,
1040 adv->max_dma_count);
1041
1042 /* Mark it as free */
1043 adv_write_lram_8(adv, done_qaddr + ADV_SCSIQ_B_STATUS,
1044 scsiq.q_status & ~(QS_READY|QS_ABORTED));
1045
1046 /* Process request based on retrieved info */
1047 if ((scsiq.cntl & QC_SG_HEAD) != 0) {
1048 u_int i;
1049
1050 /*
1051 * S/G based request. Free all of the queue
1052 * structures that contained S/G information.
1053 */
1054 for (i = 0; i < sg_queue_cnt; i++) {
1055 done_qno = adv_read_lram_8(adv, done_qaddr
1056 + ADV_SCSIQ_B_FWD);
1057
1058 #ifdef DIAGNOSTIC
1059 if (done_qno == ADV_QLINK_END) {
1060 panic("adv_qdone: Corrupted SG "
1061 "list encountered");
1062 }
1063 #endif
1064 done_qaddr = ADV_QNO_TO_QADDR(done_qno);
1065
1066 /* Mark SG queue as free */
1067 adv_write_lram_8(adv, done_qaddr
1068 + ADV_SCSIQ_B_STATUS, QS_FREE);
1069 }
1070 } else
1071 sg_queue_cnt = 0;
1072 #ifdef DIAGNOSTIC
1073 if (adv->cur_active < (sg_queue_cnt + 1))
1074 panic("adv_qdone: Attempting to free more "
1075 "queues than are active");
1076 #endif
1077 adv->cur_active -= sg_queue_cnt + 1;
1078
1079 aborted = (scsiq.q_status & QS_ABORTED) != 0;
1080
1081 if ((scsiq.q_status != QS_DONE)
1082 && (scsiq.q_status & QS_ABORTED) == 0)
1083 panic("adv_qdone: completed scsiq with unknown status");
1084
1085 scsiq.remain_bytes += scsiq.extra_bytes;
1086
1087 if ((scsiq.d3.done_stat == QD_WITH_ERROR) &&
1088 (scsiq.d3.host_stat == QHSTA_M_DATA_OVER_RUN)) {
1089 if ((scsiq.cntl & (QC_DATA_IN|QC_DATA_OUT)) == 0) {
1090 scsiq.d3.done_stat = QD_NO_ERROR;
1091 scsiq.d3.host_stat = QHSTA_NO_ERROR;
1092 }
1093 }
1094
1095 cinfo = &adv->ccb_infos[scsiq.d2.ccb_index];
1096 ccb = cinfo->ccb;
1097 ccb->csio.resid = scsiq.remain_bytes;
1098 adv_done(adv, ccb,
1099 scsiq.d3.done_stat, scsiq.d3.host_stat,
1100 scsiq.d3.scsi_stat, scsiq.q_no);
1101
1102 doneq_head = done_qno;
1103 done_qno = adv_read_lram_8(adv, done_qaddr + ADV_SCSIQ_B_FWD);
1104 }
1105 adv_write_lram_16(adv, ADVV_DONE_Q_TAIL_W, doneq_head);
1106 }
1107
1108
1109 void
1110 adv_done(struct adv_softc *adv, union ccb *ccb, u_int done_stat,
1111 u_int host_stat, u_int scsi_status, u_int q_no)
1112 {
1113 struct adv_ccb_info *cinfo;
1114
1115 cinfo = (struct adv_ccb_info *)ccb->ccb_h.ccb_cinfo_ptr;
1116 LIST_REMOVE(&ccb->ccb_h, sim_links.le);
1117 untimeout(adv_timeout, ccb, ccb->ccb_h.timeout_ch);
1118 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1119 bus_dmasync_op_t op;
1120
1121 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1122 op = BUS_DMASYNC_POSTREAD;
1123 else
1124 op = BUS_DMASYNC_POSTWRITE;
1125 bus_dmamap_sync(adv->buffer_dmat, cinfo->dmamap, op);
1126 bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap);
1127 }
1128
1129 switch (done_stat) {
1130 case QD_NO_ERROR:
1131 if (host_stat == QHSTA_NO_ERROR) {
1132 ccb->ccb_h.status = CAM_REQ_CMP;
1133 break;
1134 }
1135 xpt_print_path(ccb->ccb_h.path);
1136 printf("adv_done - queue done without error, "
1137 "but host status non-zero(%x)\n", host_stat);
1138 /*FALLTHROUGH*/
1139 case QD_WITH_ERROR:
1140 switch (host_stat) {
1141 case QHSTA_M_TARGET_STATUS_BUSY:
1142 case QHSTA_M_BAD_QUEUE_FULL_OR_BUSY:
1143 /*
1144 * Assume that if we were a tagged transaction
1145 * the target reported queue full. Otherwise,
1146 * report busy. The firmware really should just
1147 * pass the original status back up to us even
1148 * if it thinks the target was in error for
1149 * returning this status as no other transactions
1150 * from this initiator are in effect, but this
1151 * ignores multi-initiator setups and there is
1152 * evidence that the firmware gets its per-device
1153 * transaction counts screwed up occassionally.
1154 */
1155 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
1156 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0
1157 && host_stat != QHSTA_M_TARGET_STATUS_BUSY)
1158 scsi_status = SCSI_STATUS_QUEUE_FULL;
1159 else
1160 scsi_status = SCSI_STATUS_BUSY;
1161 adv_abort_ccb(adv, ccb->ccb_h.target_id,
1162 ccb->ccb_h.target_lun,
1163 /*ccb*/NULL, CAM_REQUEUE_REQ,
1164 /*queued_only*/TRUE);
1165 /*FALLTHROUGH*/
1166 case QHSTA_M_NO_AUTO_REQ_SENSE:
1167 case QHSTA_NO_ERROR:
1168 ccb->csio.scsi_status = scsi_status;
1169 switch (scsi_status) {
1170 case SCSI_STATUS_CHECK_COND:
1171 case SCSI_STATUS_CMD_TERMINATED:
1172 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1173 /* Structure copy */
1174 ccb->csio.sense_data =
1175 adv->sense_buffers[q_no - 1];
1176 /* FALLTHROUGH */
1177 case SCSI_STATUS_BUSY:
1178 case SCSI_STATUS_RESERV_CONFLICT:
1179 case SCSI_STATUS_QUEUE_FULL:
1180 case SCSI_STATUS_COND_MET:
1181 case SCSI_STATUS_INTERMED:
1182 case SCSI_STATUS_INTERMED_COND_MET:
1183 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
1184 break;
1185 case SCSI_STATUS_OK:
1186 ccb->ccb_h.status |= CAM_REQ_CMP;
1187 break;
1188 }
1189 break;
1190 case QHSTA_M_SEL_TIMEOUT:
1191 ccb->ccb_h.status = CAM_SEL_TIMEOUT;
1192 break;
1193 case QHSTA_M_DATA_OVER_RUN:
1194 ccb->ccb_h.status = CAM_DATA_RUN_ERR;
1195 break;
1196 case QHSTA_M_UNEXPECTED_BUS_FREE:
1197 ccb->ccb_h.status = CAM_UNEXP_BUSFREE;
1198 break;
1199 case QHSTA_M_BAD_BUS_PHASE_SEQ:
1200 ccb->ccb_h.status = CAM_SEQUENCE_FAIL;
1201 break;
1202 case QHSTA_M_BAD_CMPL_STATUS_IN:
1203 /* No command complete after a status message */
1204 ccb->ccb_h.status = CAM_SEQUENCE_FAIL;
1205 break;
1206 case QHSTA_D_EXE_SCSI_Q_BUSY_TIMEOUT:
1207 case QHSTA_M_WTM_TIMEOUT:
1208 case QHSTA_M_HUNG_REQ_SCSI_BUS_RESET:
1209 /* The SCSI bus hung in a phase */
1210 ccb->ccb_h.status = CAM_SEQUENCE_FAIL;
1211 adv_reset_bus(adv, /*initiate_reset*/TRUE);
1212 break;
1213 case QHSTA_M_AUTO_REQ_SENSE_FAIL:
1214 ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
1215 break;
1216 case QHSTA_D_QDONE_SG_LIST_CORRUPTED:
1217 case QHSTA_D_ASC_DVC_ERROR_CODE_SET:
1218 case QHSTA_D_HOST_ABORT_FAILED:
1219 case QHSTA_D_EXE_SCSI_Q_FAILED:
1220 case QHSTA_D_ASPI_NO_BUF_POOL:
1221 case QHSTA_M_BAD_TAG_CODE:
1222 case QHSTA_D_LRAM_CMP_ERROR:
1223 case QHSTA_M_MICRO_CODE_ERROR_HALT:
1224 default:
1225 panic("%s: Unhandled Host status error %x",
1226 adv_name(adv), host_stat);
1227 /* NOTREACHED */
1228 }
1229 break;
1230
1231 case QD_ABORTED_BY_HOST:
1232 /* Don't clobber any, more explicit, error codes we've set */
1233 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)
1234 ccb->ccb_h.status = CAM_REQ_ABORTED;
1235 break;
1236
1237 default:
1238 xpt_print_path(ccb->ccb_h.path);
1239 printf("adv_done - queue done with unknown status %x:%x\n",
1240 done_stat, host_stat);
1241 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1242 break;
1243 }
1244 adv_clear_state(adv, ccb);
1245 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP
1246 && (ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
1247 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1248 ccb->ccb_h.status |= CAM_DEV_QFRZN;
1249 }
1250 adv_free_ccb_info(adv, cinfo);
1251 /*
1252 * Null this out so that we catch driver bugs that cause a
1253 * ccb to be completed twice.
1254 */
1255 ccb->ccb_h.ccb_cinfo_ptr = NULL;
1256 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1257 xpt_done(ccb);
1258 }
1259
1260 /*
1261 * Function to poll for command completion when
1262 * interrupts are disabled (crash dumps)
1263 */
1264 static void
1265 adv_poll(struct cam_sim *sim)
1266 {
1267 adv_intr(cam_sim_softc(sim));
1268 }
1269
1270 /*
1271 * Attach all the sub-devices we can find
1272 */
1273 int
1274 adv_attach(adv)
1275 struct adv_softc *adv;
1276 {
1277 struct ccb_setasync csa;
1278 struct cam_devq *devq;
1279 int max_sg;
1280
1281 /*
1282 * Allocate an array of ccb mapping structures. We put the
1283 * index of the ccb_info structure into the queue representing
1284 * a transaction and use it for mapping the queue to the
1285 * upper level SCSI transaction it represents.
1286 */
1287 adv->ccb_infos = malloc(sizeof(*adv->ccb_infos) * adv->max_openings,
1288 M_DEVBUF, M_NOWAIT);
1289
1290 if (adv->ccb_infos == NULL)
1291 return (ENOMEM);
1292
1293 adv->init_level++;
1294
1295 /*
1296 * Create our DMA tags. These tags define the kinds of device
1297 * accessible memory allocations and memory mappings we will
1298 * need to perform during normal operation.
1299 *
1300 * Unless we need to further restrict the allocation, we rely
1301 * on the restrictions of the parent dmat, hence the common
1302 * use of MAXADDR and MAXSIZE.
1303 *
1304 * The ASC boards use chains of "queues" (the transactional
1305 * resources on the board) to represent long S/G lists.
1306 * The first queue represents the command and holds a
1307 * single address and data pair. The queues that follow
1308 * can each hold ADV_SG_LIST_PER_Q entries. Given the
1309 * total number of queues, we can express the largest
1310 * transaction we can map. We reserve a few queues for
1311 * error recovery. Take those into account as well.
1312 *
1313 * There is a way to take an interrupt to download the
1314 * next batch of S/G entries if there are more than 255
1315 * of them (the counter in the queue structure is a u_int8_t).
1316 * We don't use this feature, so limit the S/G list size
1317 * accordingly.
1318 */
1319 max_sg = (adv->max_openings - ADV_MIN_FREE_Q - 1) * ADV_SG_LIST_PER_Q;
1320 if (max_sg > 255)
1321 max_sg = 255;
1322
1323 /* DMA tag for mapping buffers into device visible space. */
1324 if (bus_dma_tag_create(
1325 /* parent */ adv->parent_dmat,
1326 /* alignment */ 1,
1327 /* boundary */ 0,
1328 /* lowaddr */ BUS_SPACE_MAXADDR,
1329 /* highaddr */ BUS_SPACE_MAXADDR,
1330 /* filter */ NULL,
1331 /* filterarg */ NULL,
1332 /* maxsize */ MAXPHYS,
1333 /* nsegments */ max_sg,
1334 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT,
1335 /* flags */ BUS_DMA_ALLOCNOW,
1336 /* lockfunc */ busdma_lock_mutex,
1337 /* lockarg */ &Giant,
1338 &adv->buffer_dmat) != 0) {
1339 return (ENXIO);
1340 }
1341 adv->init_level++;
1342
1343 /* DMA tag for our sense buffers */
1344 if (bus_dma_tag_create(
1345 /* parent */ adv->parent_dmat,
1346 /* alignment */ 1,
1347 /* boundary */ 0,
1348 /* lowaddr */ BUS_SPACE_MAXADDR,
1349 /* highaddr */ BUS_SPACE_MAXADDR,
1350 /* filter */ NULL,
1351 /* filterarg */ NULL,
1352 /* maxsize */ sizeof(struct scsi_sense_data) *
1353 adv->max_openings,
1354 /* nsegments */ 1,
1355 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT,
1356 /* flags */ 0,
1357 /* lockfunc */ busdma_lock_mutex,
1358 /* lockarg */ &Giant,
1359 &adv->sense_dmat) != 0) {
1360 return (ENXIO);
1361 }
1362
1363 adv->init_level++;
1364
1365 /* Allocation for our sense buffers */
1366 if (bus_dmamem_alloc(adv->sense_dmat, (void **)&adv->sense_buffers,
1367 BUS_DMA_NOWAIT, &adv->sense_dmamap) != 0) {
1368 return (ENOMEM);
1369 }
1370
1371 adv->init_level++;
1372
1373 /* And permanently map them */
1374 bus_dmamap_load(adv->sense_dmat, adv->sense_dmamap,
1375 adv->sense_buffers,
1376 sizeof(struct scsi_sense_data)*adv->max_openings,
1377 adv_map, &adv->sense_physbase, /*flags*/0);
1378
1379 adv->init_level++;
1380
1381 /*
1382 * Fire up the chip
1383 */
1384 if (adv_start_chip(adv) != 1) {
1385 printf("adv%d: Unable to start on board processor. Aborting.\n",
1386 adv->unit);
1387 return (ENXIO);
1388 }
1389
1390 /*
1391 * Create the device queue for our SIM.
1392 */
1393 devq = cam_simq_alloc(adv->max_openings);
1394 if (devq == NULL)
1395 return (ENOMEM);
1396
1397 /*
1398 * Construct our SIM entry.
1399 */
1400 adv->sim = cam_sim_alloc(adv_action, adv_poll, "adv", adv, adv->unit,
1401 1, adv->max_openings, devq);
1402 if (adv->sim == NULL)
1403 return (ENOMEM);
1404
1405 /*
1406 * Register the bus.
1407 *
1408 * XXX Twin Channel EISA Cards???
1409 */
1410 if (xpt_bus_register(adv->sim, 0) != CAM_SUCCESS) {
1411 cam_sim_free(adv->sim, /*free devq*/TRUE);
1412 return (ENXIO);
1413 }
1414
1415 if (xpt_create_path(&adv->path, /*periph*/NULL, cam_sim_path(adv->sim),
1416 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD)
1417 != CAM_REQ_CMP) {
1418 xpt_bus_deregister(cam_sim_path(adv->sim));
1419 cam_sim_free(adv->sim, /*free devq*/TRUE);
1420 return (ENXIO);
1421 }
1422
1423 xpt_setup_ccb(&csa.ccb_h, adv->path, /*priority*/5);
1424 csa.ccb_h.func_code = XPT_SASYNC_CB;
1425 csa.event_enable = AC_FOUND_DEVICE|AC_LOST_DEVICE;
1426 csa.callback = advasync;
1427 csa.callback_arg = adv;
1428 xpt_action((union ccb *)&csa);
1429 return (0);
1430 }
Cache object: 2e68c98c799b3be538ee0c9a23c4660d
|