1 /*
2 * Generic driver for the Advanced Systems Inc. SCSI controllers
3 * Product specific probe and attach routines can be found in:
4 *
5 * i386/isa/adv_isa.c ABP5140, ABP542, ABP5150, ABP842, ABP852
6 * i386/eisa/adv_eisa.c ABP742, ABP752
7 * pci/adv_pci.c ABP920, ABP930, ABP930U, ABP930UA, ABP940, ABP940U,
8 * ABP940UA, ABP950, ABP960, ABP960U, ABP960UA,
9 * ABP970, ABP970U
10 *
11 * Copyright (c) 1996-2000 Justin Gibbs.
12 * All rights reserved.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions, and the following disclaimer,
19 * without modification, immediately at the beginning of the file.
20 * 2. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
27 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * $FreeBSD$
36 */
37 /*
38 * Ported from:
39 * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
40 *
41 * Copyright (c) 1995-1997 Advanced System Products, Inc.
42 * All Rights Reserved.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that redistributions of source
46 * code retain the above copyright notice and this comment without
47 * modification.
48 */
49
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/malloc.h>
53 #include <sys/kernel.h>
54
55 #include <machine/bus_pio.h>
56 #include <machine/bus.h>
57 #include <machine/resource.h>
58 #include <sys/bus.h>
59 #include <sys/rman.h>
60
61 #include <cam/cam.h>
62 #include <cam/cam_ccb.h>
63 #include <cam/cam_sim.h>
64 #include <cam/cam_xpt_sim.h>
65 #include <cam/cam_xpt_periph.h>
66 #include <cam/cam_debug.h>
67
68 #include <cam/scsi/scsi_all.h>
69 #include <cam/scsi/scsi_message.h>
70
71 #include <vm/vm.h>
72 #include <vm/vm_param.h>
73 #include <vm/pmap.h>
74
75 #include <dev/advansys/advansys.h>
76
77 static void adv_action(struct cam_sim *sim, union ccb *ccb);
78 static void adv_execute_ccb(void *arg, bus_dma_segment_t *dm_segs,
79 int nsegments, int error);
80 static void adv_poll(struct cam_sim *sim);
81 static void adv_run_doneq(struct adv_softc *adv);
82 static struct adv_ccb_info *
83 adv_alloc_ccb_info(struct adv_softc *adv);
84 static void adv_destroy_ccb_info(struct adv_softc *adv,
85 struct adv_ccb_info *cinfo);
86 static __inline struct adv_ccb_info *
87 adv_get_ccb_info(struct adv_softc *adv);
88 static __inline void adv_free_ccb_info(struct adv_softc *adv,
89 struct adv_ccb_info *cinfo);
90 static __inline void adv_set_state(struct adv_softc *adv, adv_state state);
91 static __inline void adv_clear_state(struct adv_softc *adv, union ccb* ccb);
92 static void adv_clear_state_really(struct adv_softc *adv, union ccb* ccb);
93
94 static __inline struct adv_ccb_info *
95 adv_get_ccb_info(struct adv_softc *adv)
96 {
97 struct adv_ccb_info *cinfo;
98 int opri;
99
100 opri = splcam();
101 if ((cinfo = SLIST_FIRST(&adv->free_ccb_infos)) != NULL) {
102 SLIST_REMOVE_HEAD(&adv->free_ccb_infos, links);
103 } else {
104 cinfo = adv_alloc_ccb_info(adv);
105 }
106 splx(opri);
107
108 return (cinfo);
109 }
110
111 static __inline void
112 adv_free_ccb_info(struct adv_softc *adv, struct adv_ccb_info *cinfo)
113 {
114 int opri;
115
116 opri = splcam();
117 cinfo->state = ACCB_FREE;
118 SLIST_INSERT_HEAD(&adv->free_ccb_infos, cinfo, links);
119 splx(opri);
120 }
121
122 static __inline void
123 adv_set_state(struct adv_softc *adv, adv_state state)
124 {
125 if (adv->state == 0)
126 xpt_freeze_simq(adv->sim, /*count*/1);
127 adv->state |= state;
128 }
129
130 static __inline void
131 adv_clear_state(struct adv_softc *adv, union ccb* ccb)
132 {
133 if (adv->state != 0)
134 adv_clear_state_really(adv, ccb);
135 }
136
137 static void
138 adv_clear_state_really(struct adv_softc *adv, union ccb* ccb)
139 {
140 if ((adv->state & ADV_BUSDMA_BLOCK_CLEARED) != 0)
141 adv->state &= ~(ADV_BUSDMA_BLOCK_CLEARED|ADV_BUSDMA_BLOCK);
142 if ((adv->state & ADV_RESOURCE_SHORTAGE) != 0) {
143 int openings;
144
145 openings = adv->max_openings - adv->cur_active - ADV_MIN_FREE_Q;
146 if (openings >= adv->openings_needed) {
147 adv->state &= ~ADV_RESOURCE_SHORTAGE;
148 adv->openings_needed = 0;
149 }
150 }
151
152 if ((adv->state & ADV_IN_TIMEOUT) != 0) {
153 struct adv_ccb_info *cinfo;
154
155 cinfo = (struct adv_ccb_info *)ccb->ccb_h.ccb_cinfo_ptr;
156 if ((cinfo->state & ACCB_RECOVERY_CCB) != 0) {
157 struct ccb_hdr *ccb_h;
158
159 /*
160 * We now traverse our list of pending CCBs
161 * and reinstate their timeouts.
162 */
163 ccb_h = LIST_FIRST(&adv->pending_ccbs);
164 while (ccb_h != NULL) {
165 ccb_h->timeout_ch =
166 timeout(adv_timeout, (caddr_t)ccb_h,
167 (ccb_h->timeout * hz) / 1000);
168 ccb_h = LIST_NEXT(ccb_h, sim_links.le);
169 }
170 adv->state &= ~ADV_IN_TIMEOUT;
171 printf("%s: No longer in timeout\n", adv_name(adv));
172 }
173 }
174 if (adv->state == 0)
175 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
176 }
177
178 void
179 adv_map(void *arg, bus_dma_segment_t *segs, int nseg, int error)
180 {
181 bus_addr_t* physaddr;
182
183 physaddr = (bus_addr_t*)arg;
184 *physaddr = segs->ds_addr;
185 }
186
187 char *
188 adv_name(struct adv_softc *adv)
189 {
190 static char name[10];
191
192 snprintf(name, sizeof(name), "adv%d", adv->unit);
193 return (name);
194 }
195
196 static void
197 adv_action(struct cam_sim *sim, union ccb *ccb)
198 {
199 struct adv_softc *adv;
200
201 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("adv_action\n"));
202
203 adv = (struct adv_softc *)cam_sim_softc(sim);
204
205 switch (ccb->ccb_h.func_code) {
206 /* Common cases first */
207 case XPT_SCSI_IO: /* Execute the requested I/O operation */
208 {
209 struct ccb_hdr *ccb_h;
210 struct ccb_scsiio *csio;
211 struct adv_ccb_info *cinfo;
212
213 ccb_h = &ccb->ccb_h;
214 csio = &ccb->csio;
215 cinfo = adv_get_ccb_info(adv);
216 if (cinfo == NULL)
217 panic("XXX Handle CCB info error!!!");
218
219 ccb_h->ccb_cinfo_ptr = cinfo;
220 cinfo->ccb = ccb;
221
222 /* Only use S/G if there is a transfer */
223 if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
224 if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) {
225 /*
226 * We've been given a pointer
227 * to a single buffer
228 */
229 if ((ccb_h->flags & CAM_DATA_PHYS) == 0) {
230 int s;
231 int error;
232
233 s = splsoftvm();
234 error =
235 bus_dmamap_load(adv->buffer_dmat,
236 cinfo->dmamap,
237 csio->data_ptr,
238 csio->dxfer_len,
239 adv_execute_ccb,
240 csio, /*flags*/0);
241 if (error == EINPROGRESS) {
242 /*
243 * So as to maintain ordering,
244 * freeze the controller queue
245 * until our mapping is
246 * returned.
247 */
248 adv_set_state(adv,
249 ADV_BUSDMA_BLOCK);
250 }
251 splx(s);
252 } else {
253 struct bus_dma_segment seg;
254
255 /* Pointer to physical buffer */
256 seg.ds_addr =
257 (bus_addr_t)csio->data_ptr;
258 seg.ds_len = csio->dxfer_len;
259 adv_execute_ccb(csio, &seg, 1, 0);
260 }
261 } else {
262 struct bus_dma_segment *segs;
263 if ((ccb_h->flags & CAM_DATA_PHYS) != 0)
264 panic("adv_setup_data - Physical "
265 "segment pointers unsupported");
266
267 if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0)
268 panic("adv_setup_data - Virtual "
269 "segment addresses unsupported");
270
271 /* Just use the segments provided */
272 segs = (struct bus_dma_segment *)csio->data_ptr;
273 adv_execute_ccb(ccb, segs, csio->sglist_cnt, 0);
274 }
275 } else {
276 adv_execute_ccb(ccb, NULL, 0, 0);
277 }
278 break;
279 }
280 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */
281 case XPT_TARGET_IO: /* Execute target I/O request */
282 case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */
283 case XPT_CONT_TARGET_IO: /* Continue Host Target I/O Connection*/
284 case XPT_EN_LUN: /* Enable LUN as a target */
285 case XPT_ABORT: /* Abort the specified CCB */
286 /* XXX Implement */
287 ccb->ccb_h.status = CAM_REQ_INVALID;
288 xpt_done(ccb);
289 break;
290 case XPT_SET_TRAN_SETTINGS:
291 {
292 struct ccb_trans_settings *cts;
293 target_bit_vector targ_mask;
294 struct adv_transinfo *tconf;
295 u_int update_type;
296 int s;
297
298 cts = &ccb->cts;
299 targ_mask = ADV_TID_TO_TARGET_MASK(cts->ccb_h.target_id);
300 update_type = 0;
301
302 /*
303 * The user must specify which type of settings he wishes
304 * to change.
305 */
306 if (((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0)
307 && ((cts->flags & CCB_TRANS_USER_SETTINGS) == 0)) {
308 tconf = &adv->tinfo[cts->ccb_h.target_id].current;
309 update_type |= ADV_TRANS_GOAL;
310 } else if (((cts->flags & CCB_TRANS_USER_SETTINGS) != 0)
311 && ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) == 0)) {
312 tconf = &adv->tinfo[cts->ccb_h.target_id].user;
313 update_type |= ADV_TRANS_USER;
314 } else {
315 ccb->ccb_h.status = CAM_REQ_INVALID;
316 break;
317 }
318
319 s = splcam();
320
321 if ((update_type & ADV_TRANS_GOAL) != 0) {
322 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
323 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0)
324 adv->disc_enable |= targ_mask;
325 else
326 adv->disc_enable &= ~targ_mask;
327 adv_write_lram_8(adv, ADVV_DISC_ENABLE_B,
328 adv->disc_enable);
329 }
330
331 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
332 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0)
333 adv->cmd_qng_enabled |= targ_mask;
334 else
335 adv->cmd_qng_enabled &= ~targ_mask;
336 }
337 }
338
339 if ((update_type & ADV_TRANS_USER) != 0) {
340 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
341 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0)
342 adv->user_disc_enable |= targ_mask;
343 else
344 adv->user_disc_enable &= ~targ_mask;
345 }
346
347 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
348 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0)
349 adv->user_cmd_qng_enabled |= targ_mask;
350 else
351 adv->user_cmd_qng_enabled &= ~targ_mask;
352 }
353 }
354
355 /*
356 * If the user specifies either the sync rate, or offset,
357 * but not both, the unspecified parameter defaults to its
358 * current value in transfer negotiations.
359 */
360 if (((cts->valid & CCB_TRANS_SYNC_RATE_VALID) != 0)
361 || ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0)) {
362 /*
363 * If the user provided a sync rate but no offset,
364 * use the current offset.
365 */
366 if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0)
367 cts->sync_offset = tconf->offset;
368
369 /*
370 * If the user provided an offset but no sync rate,
371 * use the current sync rate.
372 */
373 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0)
374 cts->sync_period = tconf->period;
375
376 adv_period_offset_to_sdtr(adv, &cts->sync_period,
377 &cts->sync_offset,
378 cts->ccb_h.target_id);
379
380 adv_set_syncrate(adv, /*struct cam_path */NULL,
381 cts->ccb_h.target_id, cts->sync_period,
382 cts->sync_offset, update_type);
383 }
384
385 splx(s);
386 ccb->ccb_h.status = CAM_REQ_CMP;
387 xpt_done(ccb);
388 break;
389 }
390 case XPT_GET_TRAN_SETTINGS:
391 /* Get default/user set transfer settings for the target */
392 {
393 struct ccb_trans_settings *cts;
394 struct adv_transinfo *tconf;
395 target_bit_vector target_mask;
396 int s;
397
398 cts = &ccb->cts;
399 target_mask = ADV_TID_TO_TARGET_MASK(cts->ccb_h.target_id);
400
401 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
402
403 s = splcam();
404 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) {
405 tconf = &adv->tinfo[cts->ccb_h.target_id].current;
406 if ((adv->disc_enable & target_mask) != 0)
407 cts->flags |= CCB_TRANS_DISC_ENB;
408 if ((adv->cmd_qng_enabled & target_mask) != 0)
409 cts->flags |= CCB_TRANS_TAG_ENB;
410 } else {
411 tconf = &adv->tinfo[cts->ccb_h.target_id].user;
412 if ((adv->user_disc_enable & target_mask) != 0)
413 cts->flags |= CCB_TRANS_DISC_ENB;
414 if ((adv->user_cmd_qng_enabled & target_mask) != 0)
415 cts->flags |= CCB_TRANS_TAG_ENB;
416 }
417
418 cts->sync_period = tconf->period;
419 cts->sync_offset = tconf->offset;
420 splx(s);
421
422 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
423 cts->valid = CCB_TRANS_SYNC_RATE_VALID
424 | CCB_TRANS_SYNC_OFFSET_VALID
425 | CCB_TRANS_BUS_WIDTH_VALID
426 | CCB_TRANS_DISC_VALID
427 | CCB_TRANS_TQ_VALID;
428 ccb->ccb_h.status = CAM_REQ_CMP;
429 xpt_done(ccb);
430 break;
431 }
432 case XPT_CALC_GEOMETRY:
433 {
434 struct ccb_calc_geometry *ccg;
435 u_int32_t size_mb;
436 u_int32_t secs_per_cylinder;
437 int extended;
438
439 ccg = &ccb->ccg;
440 size_mb = ccg->volume_size
441 / ((1024L * 1024L) / ccg->block_size);
442 extended = (adv->control & ADV_CNTL_BIOS_GT_1GB) != 0;
443
444 if (size_mb > 1024 && extended) {
445 ccg->heads = 255;
446 ccg->secs_per_track = 63;
447 } else {
448 ccg->heads = 64;
449 ccg->secs_per_track = 32;
450 }
451 secs_per_cylinder = ccg->heads * ccg->secs_per_track;
452 ccg->cylinders = ccg->volume_size / secs_per_cylinder;
453 ccb->ccb_h.status = CAM_REQ_CMP;
454 xpt_done(ccb);
455 break;
456 }
457 case XPT_RESET_BUS: /* Reset the specified SCSI bus */
458 {
459 int s;
460
461 s = splcam();
462 adv_stop_execution(adv);
463 adv_reset_bus(adv, /*initiate_reset*/TRUE);
464 adv_start_execution(adv);
465 splx(s);
466
467 ccb->ccb_h.status = CAM_REQ_CMP;
468 xpt_done(ccb);
469 break;
470 }
471 case XPT_TERM_IO: /* Terminate the I/O process */
472 /* XXX Implement */
473 ccb->ccb_h.status = CAM_REQ_INVALID;
474 xpt_done(ccb);
475 break;
476 case XPT_PATH_INQ: /* Path routing inquiry */
477 {
478 struct ccb_pathinq *cpi = &ccb->cpi;
479
480 cpi->version_num = 1; /* XXX??? */
481 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE;
482 cpi->target_sprt = 0;
483 cpi->hba_misc = 0;
484 cpi->hba_eng_cnt = 0;
485 cpi->max_target = 7;
486 cpi->max_lun = 7;
487 cpi->initiator_id = adv->scsi_id;
488 cpi->bus_id = cam_sim_bus(sim);
489 cpi->base_transfer_speed = 3300;
490 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
491 strncpy(cpi->hba_vid, "Advansys", HBA_IDLEN);
492 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
493 cpi->unit_number = cam_sim_unit(sim);
494 cpi->ccb_h.status = CAM_REQ_CMP;
495 xpt_done(ccb);
496 break;
497 }
498 default:
499 ccb->ccb_h.status = CAM_REQ_INVALID;
500 xpt_done(ccb);
501 break;
502 }
503 }
504
505 /*
506 * Currently, the output of bus_dmammap_load suits our needs just
507 * fine, but should it change, we'd need to do something here.
508 */
509 #define adv_fixup_dmasegs(adv, dm_segs) (struct adv_sg_entry *)(dm_segs)
510
511 static void
512 adv_execute_ccb(void *arg, bus_dma_segment_t *dm_segs,
513 int nsegments, int error)
514 {
515 struct ccb_scsiio *csio;
516 struct ccb_hdr *ccb_h;
517 struct cam_sim *sim;
518 struct adv_softc *adv;
519 struct adv_ccb_info *cinfo;
520 struct adv_scsi_q scsiq;
521 struct adv_sg_head sghead;
522 int s;
523
524 csio = (struct ccb_scsiio *)arg;
525 ccb_h = &csio->ccb_h;
526 sim = xpt_path_sim(ccb_h->path);
527 adv = (struct adv_softc *)cam_sim_softc(sim);
528 cinfo = (struct adv_ccb_info *)csio->ccb_h.ccb_cinfo_ptr;
529
530 /*
531 * Setup our done routine to release the simq on
532 * the next ccb that completes.
533 */
534 if ((adv->state & ADV_BUSDMA_BLOCK) != 0)
535 adv->state |= ADV_BUSDMA_BLOCK_CLEARED;
536
537 if ((ccb_h->flags & CAM_CDB_POINTER) != 0) {
538 if ((ccb_h->flags & CAM_CDB_PHYS) == 0) {
539 /* XXX Need phystovirt!!!! */
540 /* How about pmap_kenter??? */
541 scsiq.cdbptr = csio->cdb_io.cdb_ptr;
542 } else {
543 scsiq.cdbptr = csio->cdb_io.cdb_ptr;
544 }
545 } else {
546 scsiq.cdbptr = csio->cdb_io.cdb_bytes;
547 }
548 /*
549 * Build up the request
550 */
551 scsiq.q1.status = 0;
552 scsiq.q1.q_no = 0;
553 scsiq.q1.cntl = 0;
554 scsiq.q1.sg_queue_cnt = 0;
555 scsiq.q1.target_id = ADV_TID_TO_TARGET_MASK(ccb_h->target_id);
556 scsiq.q1.target_lun = ccb_h->target_lun;
557 scsiq.q1.sense_len = csio->sense_len;
558 scsiq.q1.extra_bytes = 0;
559 scsiq.q2.ccb_index = cinfo - adv->ccb_infos;
560 scsiq.q2.target_ix = ADV_TIDLUN_TO_IX(ccb_h->target_id,
561 ccb_h->target_lun);
562 scsiq.q2.flag = 0;
563 scsiq.q2.cdb_len = csio->cdb_len;
564 if ((ccb_h->flags & CAM_TAG_ACTION_VALID) != 0)
565 scsiq.q2.tag_code = csio->tag_action;
566 else
567 scsiq.q2.tag_code = 0;
568 scsiq.q2.vm_id = 0;
569
570 if (nsegments != 0) {
571 bus_dmasync_op_t op;
572
573 scsiq.q1.data_addr = dm_segs->ds_addr;
574 scsiq.q1.data_cnt = dm_segs->ds_len;
575 if (nsegments > 1) {
576 scsiq.q1.cntl |= QC_SG_HEAD;
577 sghead.entry_cnt
578 = sghead.entry_to_copy
579 = nsegments;
580 sghead.res = 0;
581 sghead.sg_list = adv_fixup_dmasegs(adv, dm_segs);
582 scsiq.sg_head = &sghead;
583 } else {
584 scsiq.sg_head = NULL;
585 }
586 if ((ccb_h->flags & CAM_DIR_MASK) == CAM_DIR_IN)
587 op = BUS_DMASYNC_PREREAD;
588 else
589 op = BUS_DMASYNC_PREWRITE;
590 bus_dmamap_sync(adv->buffer_dmat, cinfo->dmamap, op);
591 } else {
592 scsiq.q1.data_addr = 0;
593 scsiq.q1.data_cnt = 0;
594 scsiq.sg_head = NULL;
595 }
596
597 s = splcam();
598
599 /*
600 * Last time we need to check if this SCB needs to
601 * be aborted.
602 */
603 if (ccb_h->status != CAM_REQ_INPROG) {
604 if (nsegments != 0)
605 bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap);
606 adv_clear_state(adv, (union ccb *)csio);
607 adv_free_ccb_info(adv, cinfo);
608 xpt_done((union ccb *)csio);
609 splx(s);
610 return;
611 }
612
613 if (adv_execute_scsi_queue(adv, &scsiq, csio->dxfer_len) != 0) {
614 /* Temporary resource shortage */
615 adv_set_state(adv, ADV_RESOURCE_SHORTAGE);
616 if (nsegments != 0)
617 bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap);
618 csio->ccb_h.status = CAM_REQUEUE_REQ;
619 adv_clear_state(adv, (union ccb *)csio);
620 adv_free_ccb_info(adv, cinfo);
621 xpt_done((union ccb *)csio);
622 splx(s);
623 return;
624 }
625 cinfo->state |= ACCB_ACTIVE;
626 ccb_h->status |= CAM_SIM_QUEUED;
627 LIST_INSERT_HEAD(&adv->pending_ccbs, ccb_h, sim_links.le);
628 /* Schedule our timeout */
629 ccb_h->timeout_ch =
630 timeout(adv_timeout, csio, (ccb_h->timeout * hz)/1000);
631 splx(s);
632 }
633
634 static struct adv_ccb_info *
635 adv_alloc_ccb_info(struct adv_softc *adv)
636 {
637 int error;
638 struct adv_ccb_info *cinfo;
639
640 cinfo = &adv->ccb_infos[adv->ccb_infos_allocated];
641 cinfo->state = ACCB_FREE;
642 error = bus_dmamap_create(adv->buffer_dmat, /*flags*/0,
643 &cinfo->dmamap);
644 if (error != 0) {
645 printf("%s: Unable to allocate CCB info "
646 "dmamap - error %d\n", adv_name(adv), error);
647 return (NULL);
648 }
649 adv->ccb_infos_allocated++;
650 return (cinfo);
651 }
652
653 static void
654 adv_destroy_ccb_info(struct adv_softc *adv, struct adv_ccb_info *cinfo)
655 {
656 bus_dmamap_destroy(adv->buffer_dmat, cinfo->dmamap);
657 }
658
659 void
660 adv_timeout(void *arg)
661 {
662 int s;
663 union ccb *ccb;
664 struct adv_softc *adv;
665 struct adv_ccb_info *cinfo;
666
667 ccb = (union ccb *)arg;
668 adv = (struct adv_softc *)xpt_path_sim(ccb->ccb_h.path)->softc;
669 cinfo = (struct adv_ccb_info *)ccb->ccb_h.ccb_cinfo_ptr;
670
671 xpt_print_path(ccb->ccb_h.path);
672 printf("Timed out\n");
673
674 s = splcam();
675 /* Have we been taken care of already?? */
676 if (cinfo == NULL || cinfo->state == ACCB_FREE) {
677 splx(s);
678 return;
679 }
680
681 adv_stop_execution(adv);
682
683 if ((cinfo->state & ACCB_ABORT_QUEUED) == 0) {
684 struct ccb_hdr *ccb_h;
685
686 /*
687 * In order to simplify the recovery process, we ask the XPT
688 * layer to halt the queue of new transactions and we traverse
689 * the list of pending CCBs and remove their timeouts. This
690 * means that the driver attempts to clear only one error
691 * condition at a time. In general, timeouts that occur
692 * close together are related anyway, so there is no benefit
693 * in attempting to handle errors in parrallel. Timeouts will
694 * be reinstated when the recovery process ends.
695 */
696 adv_set_state(adv, ADV_IN_TIMEOUT);
697
698 /* This CCB is the CCB representing our recovery actions */
699 cinfo->state |= ACCB_RECOVERY_CCB|ACCB_ABORT_QUEUED;
700
701 ccb_h = LIST_FIRST(&adv->pending_ccbs);
702 while (ccb_h != NULL) {
703 untimeout(adv_timeout, ccb_h, ccb_h->timeout_ch);
704 ccb_h = LIST_NEXT(ccb_h, sim_links.le);
705 }
706
707 /* XXX Should send a BDR */
708 /* Attempt an abort as our first tact */
709 xpt_print_path(ccb->ccb_h.path);
710 printf("Attempting abort\n");
711 adv_abort_ccb(adv, ccb->ccb_h.target_id,
712 ccb->ccb_h.target_lun, ccb,
713 CAM_CMD_TIMEOUT, /*queued_only*/FALSE);
714 ccb->ccb_h.timeout_ch =
715 timeout(adv_timeout, ccb, 2 * hz);
716 } else {
717 /* Our attempt to perform an abort failed, go for a reset */
718 xpt_print_path(ccb->ccb_h.path);
719 printf("Resetting bus\n");
720 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
721 ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
722 adv_reset_bus(adv, /*initiate_reset*/TRUE);
723 }
724 adv_start_execution(adv);
725 splx(s);
726 }
727
728 struct adv_softc *
729 adv_alloc(device_t dev, bus_space_tag_t tag, bus_space_handle_t bsh)
730 {
731 struct adv_softc *adv = device_get_softc(dev);
732
733 /*
734 * Allocate a storage area for us
735 */
736 LIST_INIT(&adv->pending_ccbs);
737 SLIST_INIT(&adv->free_ccb_infos);
738 adv->dev = dev;
739 adv->unit = device_get_unit(dev);
740 adv->tag = tag;
741 adv->bsh = bsh;
742
743 return(adv);
744 }
745
746 void
747 adv_free(struct adv_softc *adv)
748 {
749 switch (adv->init_level) {
750 case 6:
751 {
752 struct adv_ccb_info *cinfo;
753
754 while ((cinfo = SLIST_FIRST(&adv->free_ccb_infos)) != NULL) {
755 SLIST_REMOVE_HEAD(&adv->free_ccb_infos, links);
756 adv_destroy_ccb_info(adv, cinfo);
757 }
758
759 bus_dmamap_unload(adv->sense_dmat, adv->sense_dmamap);
760 }
761 case 5:
762 bus_dmamem_free(adv->sense_dmat, adv->sense_buffers,
763 adv->sense_dmamap);
764 case 4:
765 bus_dma_tag_destroy(adv->sense_dmat);
766 case 3:
767 bus_dma_tag_destroy(adv->buffer_dmat);
768 case 2:
769 bus_dma_tag_destroy(adv->parent_dmat);
770 case 1:
771 if (adv->ccb_infos != NULL)
772 free(adv->ccb_infos, M_DEVBUF);
773 case 0:
774 break;
775 }
776 }
777
778 int
779 adv_init(struct adv_softc *adv)
780 {
781 struct adv_eeprom_config eeprom_config;
782 int checksum, i;
783 int max_sync;
784 u_int16_t config_lsw;
785 u_int16_t config_msw;
786
787 adv_lib_init(adv);
788
789 /*
790 * Stop script execution.
791 */
792 adv_write_lram_16(adv, ADV_HALTCODE_W, 0x00FE);
793 adv_stop_execution(adv);
794 if (adv_stop_chip(adv) == 0 || adv_is_chip_halted(adv) == 0) {
795 printf("adv%d: Unable to halt adapter. Initialization"
796 "failed\n", adv->unit);
797 return (1);
798 }
799 ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
800 if (ADV_INW(adv, ADV_REG_PROG_COUNTER) != ADV_MCODE_START_ADDR) {
801 printf("adv%d: Unable to set program counter. Initialization"
802 "failed\n", adv->unit);
803 return (1);
804 }
805
806 config_msw = ADV_INW(adv, ADV_CONFIG_MSW);
807 config_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
808
809 if ((config_msw & ADV_CFG_MSW_CLR_MASK) != 0) {
810 config_msw &= ~ADV_CFG_MSW_CLR_MASK;
811 /*
812 * XXX The Linux code flags this as an error,
813 * but what should we report to the user???
814 * It seems that clearing the config register
815 * makes this error recoverable.
816 */
817 ADV_OUTW(adv, ADV_CONFIG_MSW, config_msw);
818 }
819
820 /* Suck in the configuration from the EEProm */
821 checksum = adv_get_eeprom_config(adv, &eeprom_config);
822
823 if (ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_AUTO_CONFIG) {
824 /*
825 * XXX The Linux code sets a warning level for this
826 * condition, yet nothing of meaning is printed to
827 * the user. What does this mean???
828 */
829 if (adv->chip_version == 3) {
830 if (eeprom_config.cfg_lsw != config_lsw)
831 eeprom_config.cfg_lsw = config_lsw;
832 if (eeprom_config.cfg_msw != config_msw) {
833 eeprom_config.cfg_msw = config_msw;
834 }
835 }
836 }
837 if (checksum == eeprom_config.chksum) {
838
839 /* Range/Sanity checking */
840 if (eeprom_config.max_total_qng < ADV_MIN_TOTAL_QNG) {
841 eeprom_config.max_total_qng = ADV_MIN_TOTAL_QNG;
842 }
843 if (eeprom_config.max_total_qng > ADV_MAX_TOTAL_QNG) {
844 eeprom_config.max_total_qng = ADV_MAX_TOTAL_QNG;
845 }
846 if (eeprom_config.max_tag_qng > eeprom_config.max_total_qng) {
847 eeprom_config.max_tag_qng = eeprom_config.max_total_qng;
848 }
849 if (eeprom_config.max_tag_qng < ADV_MIN_TAG_Q_PER_DVC) {
850 eeprom_config.max_tag_qng = ADV_MIN_TAG_Q_PER_DVC;
851 }
852 adv->max_openings = eeprom_config.max_total_qng;
853 adv->user_disc_enable = eeprom_config.disc_enable;
854 adv->user_cmd_qng_enabled = eeprom_config.use_cmd_qng;
855 adv->isa_dma_speed = EEPROM_DMA_SPEED(eeprom_config);
856 adv->scsi_id = EEPROM_SCSIID(eeprom_config) & ADV_MAX_TID;
857 EEPROM_SET_SCSIID(eeprom_config, adv->scsi_id);
858 adv->control = eeprom_config.cntl;
859 for (i = 0; i <= ADV_MAX_TID; i++) {
860 u_int8_t sync_data;
861
862 if ((eeprom_config.init_sdtr & (0x1 << i)) == 0)
863 sync_data = 0;
864 else
865 sync_data = eeprom_config.sdtr_data[i];
866 adv_sdtr_to_period_offset(adv,
867 sync_data,
868 &adv->tinfo[i].user.period,
869 &adv->tinfo[i].user.offset,
870 i);
871 }
872 config_lsw = eeprom_config.cfg_lsw;
873 eeprom_config.cfg_msw = config_msw;
874 } else {
875 u_int8_t sync_data;
876
877 printf("adv%d: Warning EEPROM Checksum mismatch. "
878 "Using default device parameters\n", adv->unit);
879
880 /* Set reasonable defaults since we can't read the EEPROM */
881 adv->isa_dma_speed = /*ADV_DEF_ISA_DMA_SPEED*/1;
882 adv->max_openings = ADV_DEF_MAX_TOTAL_QNG;
883 adv->disc_enable = TARGET_BIT_VECTOR_SET;
884 adv->user_disc_enable = TARGET_BIT_VECTOR_SET;
885 adv->cmd_qng_enabled = TARGET_BIT_VECTOR_SET;
886 adv->user_cmd_qng_enabled = TARGET_BIT_VECTOR_SET;
887 adv->scsi_id = 7;
888 adv->control = 0xFFFF;
889
890 if (adv->chip_version == ADV_CHIP_VER_PCI_ULTRA_3050)
891 /* Default to no Ultra to support the 3030 */
892 adv->control &= ~ADV_CNTL_SDTR_ENABLE_ULTRA;
893 sync_data = ADV_DEF_SDTR_OFFSET | (ADV_DEF_SDTR_INDEX << 4);
894 for (i = 0; i <= ADV_MAX_TID; i++) {
895 adv_sdtr_to_period_offset(adv, sync_data,
896 &adv->tinfo[i].user.period,
897 &adv->tinfo[i].user.offset,
898 i);
899 }
900 config_lsw |= ADV_CFG_LSW_SCSI_PARITY_ON;
901 }
902 config_msw &= ~ADV_CFG_MSW_CLR_MASK;
903 config_lsw |= ADV_CFG_LSW_HOST_INT_ON;
904 if ((adv->type & (ADV_PCI|ADV_ULTRA)) == (ADV_PCI|ADV_ULTRA)
905 && (adv->control & ADV_CNTL_SDTR_ENABLE_ULTRA) == 0)
906 /* 25ns or 10MHz */
907 max_sync = 25;
908 else
909 /* Unlimited */
910 max_sync = 0;
911 for (i = 0; i <= ADV_MAX_TID; i++) {
912 if (adv->tinfo[i].user.period < max_sync)
913 adv->tinfo[i].user.period = max_sync;
914 }
915
916 if (adv_test_external_lram(adv) == 0) {
917 if ((adv->type & (ADV_PCI|ADV_ULTRA)) == (ADV_PCI|ADV_ULTRA)) {
918 eeprom_config.max_total_qng =
919 ADV_MAX_PCI_ULTRA_INRAM_TOTAL_QNG;
920 eeprom_config.max_tag_qng =
921 ADV_MAX_PCI_ULTRA_INRAM_TAG_QNG;
922 } else {
923 eeprom_config.cfg_msw |= 0x0800;
924 config_msw |= 0x0800;
925 eeprom_config.max_total_qng =
926 ADV_MAX_PCI_INRAM_TOTAL_QNG;
927 eeprom_config.max_tag_qng = ADV_MAX_INRAM_TAG_QNG;
928 }
929 adv->max_openings = eeprom_config.max_total_qng;
930 }
931 ADV_OUTW(adv, ADV_CONFIG_MSW, config_msw);
932 ADV_OUTW(adv, ADV_CONFIG_LSW, config_lsw);
933 #if 0
934 /*
935 * Don't write the eeprom data back for now.
936 * I'd rather not mess up the user's card. We also don't
937 * fully sanitize the eeprom settings above for the write-back
938 * to be 100% correct.
939 */
940 if (adv_set_eeprom_config(adv, &eeprom_config) != 0)
941 printf("%s: WARNING! Failure writing to EEPROM.\n",
942 adv_name(adv));
943 #endif
944
945 adv_set_chip_scsiid(adv, adv->scsi_id);
946 if (adv_init_lram_and_mcode(adv))
947 return (1);
948
949 adv->disc_enable = adv->user_disc_enable;
950
951 adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, adv->disc_enable);
952 for (i = 0; i <= ADV_MAX_TID; i++) {
953 /*
954 * Start off in async mode.
955 */
956 adv_set_syncrate(adv, /*struct cam_path */NULL,
957 i, /*period*/0, /*offset*/0,
958 ADV_TRANS_CUR);
959 /*
960 * Enable the use of tagged commands on all targets.
961 * This allows the kernel driver to make up it's own mind
962 * as it sees fit to tag queue instead of having the
963 * firmware try and second guess the tag_code settins.
964 */
965 adv_write_lram_8(adv, ADVV_MAX_DVC_QNG_BEG + i,
966 adv->max_openings);
967 }
968 adv_write_lram_8(adv, ADVV_USE_TAGGED_QNG_B, TARGET_BIT_VECTOR_SET);
969 adv_write_lram_8(adv, ADVV_CAN_TAGGED_QNG_B, TARGET_BIT_VECTOR_SET);
970 printf("adv%d: AdvanSys %s Host Adapter, SCSI ID %d, queue depth %d\n",
971 adv->unit, (adv->type & ADV_ULTRA) && (max_sync == 0)
972 ? "Ultra SCSI" : "SCSI",
973 adv->scsi_id, adv->max_openings);
974 return (0);
975 }
976
977 void
978 adv_intr(void *arg)
979 {
980 struct adv_softc *adv;
981 u_int16_t chipstat;
982 u_int16_t saved_ram_addr;
983 u_int8_t ctrl_reg;
984 u_int8_t saved_ctrl_reg;
985 u_int8_t host_flag;
986
987 adv = (struct adv_softc *)arg;
988
989 chipstat = ADV_INW(adv, ADV_CHIP_STATUS);
990
991 /* Is it for us? */
992 if ((chipstat & (ADV_CSW_INT_PENDING|ADV_CSW_SCSI_RESET_LATCH)) == 0)
993 return;
994
995 ctrl_reg = ADV_INB(adv, ADV_CHIP_CTRL);
996 saved_ctrl_reg = ctrl_reg & (~(ADV_CC_SCSI_RESET | ADV_CC_CHIP_RESET |
997 ADV_CC_SINGLE_STEP | ADV_CC_DIAG |
998 ADV_CC_TEST));
999
1000 if ((chipstat & (ADV_CSW_SCSI_RESET_LATCH|ADV_CSW_SCSI_RESET_ACTIVE))) {
1001 printf("Detected Bus Reset\n");
1002 adv_reset_bus(adv, /*initiate_reset*/FALSE);
1003 return;
1004 }
1005
1006 if ((chipstat & ADV_CSW_INT_PENDING) != 0) {
1007
1008 saved_ram_addr = ADV_INW(adv, ADV_LRAM_ADDR);
1009 host_flag = adv_read_lram_8(adv, ADVV_HOST_FLAG_B);
1010 adv_write_lram_8(adv, ADVV_HOST_FLAG_B,
1011 host_flag | ADV_HOST_FLAG_IN_ISR);
1012
1013 adv_ack_interrupt(adv);
1014
1015 if ((chipstat & ADV_CSW_HALTED) != 0
1016 && (ctrl_reg & ADV_CC_SINGLE_STEP) != 0) {
1017 adv_isr_chip_halted(adv);
1018 saved_ctrl_reg &= ~ADV_CC_HALT;
1019 } else {
1020 adv_run_doneq(adv);
1021 }
1022 ADV_OUTW(adv, ADV_LRAM_ADDR, saved_ram_addr);
1023 #ifdef DIAGNOSTIC
1024 if (ADV_INW(adv, ADV_LRAM_ADDR) != saved_ram_addr)
1025 panic("adv_intr: Unable to set LRAM addr");
1026 #endif
1027 adv_write_lram_8(adv, ADVV_HOST_FLAG_B, host_flag);
1028 }
1029
1030 ADV_OUTB(adv, ADV_CHIP_CTRL, saved_ctrl_reg);
1031 }
1032
1033 void
1034 adv_run_doneq(struct adv_softc *adv)
1035 {
1036 struct adv_q_done_info scsiq;
1037 u_int doneq_head;
1038 u_int done_qno;
1039
1040 doneq_head = adv_read_lram_16(adv, ADVV_DONE_Q_TAIL_W) & 0xFF;
1041 done_qno = adv_read_lram_8(adv, ADV_QNO_TO_QADDR(doneq_head)
1042 + ADV_SCSIQ_B_FWD);
1043 while (done_qno != ADV_QLINK_END) {
1044 union ccb* ccb;
1045 struct adv_ccb_info *cinfo;
1046 u_int done_qaddr;
1047 u_int sg_queue_cnt;
1048 int aborted;
1049
1050 done_qaddr = ADV_QNO_TO_QADDR(done_qno);
1051
1052 /* Pull status from this request */
1053 sg_queue_cnt = adv_copy_lram_doneq(adv, done_qaddr, &scsiq,
1054 adv->max_dma_count);
1055
1056 /* Mark it as free */
1057 adv_write_lram_8(adv, done_qaddr + ADV_SCSIQ_B_STATUS,
1058 scsiq.q_status & ~(QS_READY|QS_ABORTED));
1059
1060 /* Process request based on retrieved info */
1061 if ((scsiq.cntl & QC_SG_HEAD) != 0) {
1062 u_int i;
1063
1064 /*
1065 * S/G based request. Free all of the queue
1066 * structures that contained S/G information.
1067 */
1068 for (i = 0; i < sg_queue_cnt; i++) {
1069 done_qno = adv_read_lram_8(adv, done_qaddr
1070 + ADV_SCSIQ_B_FWD);
1071
1072 #ifdef DIAGNOSTIC
1073 if (done_qno == ADV_QLINK_END) {
1074 panic("adv_qdone: Corrupted SG "
1075 "list encountered");
1076 }
1077 #endif
1078 done_qaddr = ADV_QNO_TO_QADDR(done_qno);
1079
1080 /* Mark SG queue as free */
1081 adv_write_lram_8(adv, done_qaddr
1082 + ADV_SCSIQ_B_STATUS, QS_FREE);
1083 }
1084 } else
1085 sg_queue_cnt = 0;
1086 #ifdef DIAGNOSTIC
1087 if (adv->cur_active < (sg_queue_cnt + 1))
1088 panic("adv_qdone: Attempting to free more "
1089 "queues than are active");
1090 #endif
1091 adv->cur_active -= sg_queue_cnt + 1;
1092
1093 aborted = (scsiq.q_status & QS_ABORTED) != 0;
1094
1095 if ((scsiq.q_status != QS_DONE)
1096 && (scsiq.q_status & QS_ABORTED) == 0)
1097 panic("adv_qdone: completed scsiq with unknown status");
1098
1099 scsiq.remain_bytes += scsiq.extra_bytes;
1100
1101 if ((scsiq.d3.done_stat == QD_WITH_ERROR) &&
1102 (scsiq.d3.host_stat == QHSTA_M_DATA_OVER_RUN)) {
1103 if ((scsiq.cntl & (QC_DATA_IN|QC_DATA_OUT)) == 0) {
1104 scsiq.d3.done_stat = QD_NO_ERROR;
1105 scsiq.d3.host_stat = QHSTA_NO_ERROR;
1106 }
1107 }
1108
1109 cinfo = &adv->ccb_infos[scsiq.d2.ccb_index];
1110 ccb = cinfo->ccb;
1111 ccb->csio.resid = scsiq.remain_bytes;
1112 adv_done(adv, ccb,
1113 scsiq.d3.done_stat, scsiq.d3.host_stat,
1114 scsiq.d3.scsi_stat, scsiq.q_no);
1115
1116 doneq_head = done_qno;
1117 done_qno = adv_read_lram_8(adv, done_qaddr + ADV_SCSIQ_B_FWD);
1118 }
1119 adv_write_lram_16(adv, ADVV_DONE_Q_TAIL_W, doneq_head);
1120 }
1121
1122
1123 void
1124 adv_done(struct adv_softc *adv, union ccb *ccb, u_int done_stat,
1125 u_int host_stat, u_int scsi_status, u_int q_no)
1126 {
1127 struct adv_ccb_info *cinfo;
1128
1129 cinfo = (struct adv_ccb_info *)ccb->ccb_h.ccb_cinfo_ptr;
1130 LIST_REMOVE(&ccb->ccb_h, sim_links.le);
1131 untimeout(adv_timeout, ccb, ccb->ccb_h.timeout_ch);
1132 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1133 bus_dmasync_op_t op;
1134
1135 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1136 op = BUS_DMASYNC_POSTREAD;
1137 else
1138 op = BUS_DMASYNC_POSTWRITE;
1139 bus_dmamap_sync(adv->buffer_dmat, cinfo->dmamap, op);
1140 bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap);
1141 }
1142
1143 switch (done_stat) {
1144 case QD_NO_ERROR:
1145 if (host_stat == QHSTA_NO_ERROR) {
1146 ccb->ccb_h.status = CAM_REQ_CMP;
1147 break;
1148 }
1149 xpt_print_path(ccb->ccb_h.path);
1150 printf("adv_done - queue done without error, "
1151 "but host status non-zero(%x)\n", host_stat);
1152 /*FALLTHROUGH*/
1153 case QD_WITH_ERROR:
1154 switch (host_stat) {
1155 case QHSTA_M_TARGET_STATUS_BUSY:
1156 case QHSTA_M_BAD_QUEUE_FULL_OR_BUSY:
1157 /*
1158 * Assume that if we were a tagged transaction
1159 * the target reported queue full. Otherwise,
1160 * report busy. The firmware really should just
1161 * pass the original status back up to us even
1162 * if it thinks the target was in error for
1163 * returning this status as no other transactions
1164 * from this initiator are in effect, but this
1165 * ignores multi-initiator setups and there is
1166 * evidence that the firmware gets its per-device
1167 * transaction counts screwed up occassionally.
1168 */
1169 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
1170 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0
1171 && host_stat != QHSTA_M_TARGET_STATUS_BUSY)
1172 scsi_status = SCSI_STATUS_QUEUE_FULL;
1173 else
1174 scsi_status = SCSI_STATUS_BUSY;
1175 adv_abort_ccb(adv, ccb->ccb_h.target_id,
1176 ccb->ccb_h.target_lun,
1177 /*ccb*/NULL, CAM_REQUEUE_REQ,
1178 /*queued_only*/TRUE);
1179 /*FALLTHROUGH*/
1180 case QHSTA_M_NO_AUTO_REQ_SENSE:
1181 case QHSTA_NO_ERROR:
1182 ccb->csio.scsi_status = scsi_status;
1183 switch (scsi_status) {
1184 case SCSI_STATUS_CHECK_COND:
1185 case SCSI_STATUS_CMD_TERMINATED:
1186 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1187 /* Structure copy */
1188 ccb->csio.sense_data =
1189 adv->sense_buffers[q_no - 1];
1190 /* FALLTHROUGH */
1191 case SCSI_STATUS_BUSY:
1192 case SCSI_STATUS_RESERV_CONFLICT:
1193 case SCSI_STATUS_QUEUE_FULL:
1194 case SCSI_STATUS_COND_MET:
1195 case SCSI_STATUS_INTERMED:
1196 case SCSI_STATUS_INTERMED_COND_MET:
1197 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
1198 break;
1199 case SCSI_STATUS_OK:
1200 ccb->ccb_h.status |= CAM_REQ_CMP;
1201 break;
1202 }
1203 break;
1204 case QHSTA_M_SEL_TIMEOUT:
1205 ccb->ccb_h.status = CAM_SEL_TIMEOUT;
1206 break;
1207 case QHSTA_M_DATA_OVER_RUN:
1208 ccb->ccb_h.status = CAM_DATA_RUN_ERR;
1209 break;
1210 case QHSTA_M_UNEXPECTED_BUS_FREE:
1211 ccb->ccb_h.status = CAM_UNEXP_BUSFREE;
1212 break;
1213 case QHSTA_M_BAD_BUS_PHASE_SEQ:
1214 ccb->ccb_h.status = CAM_SEQUENCE_FAIL;
1215 break;
1216 case QHSTA_M_BAD_CMPL_STATUS_IN:
1217 /* No command complete after a status message */
1218 ccb->ccb_h.status = CAM_SEQUENCE_FAIL;
1219 break;
1220 case QHSTA_D_EXE_SCSI_Q_BUSY_TIMEOUT:
1221 case QHSTA_M_WTM_TIMEOUT:
1222 case QHSTA_M_HUNG_REQ_SCSI_BUS_RESET:
1223 /* The SCSI bus hung in a phase */
1224 ccb->ccb_h.status = CAM_SEQUENCE_FAIL;
1225 adv_reset_bus(adv, /*initiate_reset*/TRUE);
1226 break;
1227 case QHSTA_M_AUTO_REQ_SENSE_FAIL:
1228 ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
1229 break;
1230 case QHSTA_D_QDONE_SG_LIST_CORRUPTED:
1231 case QHSTA_D_ASC_DVC_ERROR_CODE_SET:
1232 case QHSTA_D_HOST_ABORT_FAILED:
1233 case QHSTA_D_EXE_SCSI_Q_FAILED:
1234 case QHSTA_D_ASPI_NO_BUF_POOL:
1235 case QHSTA_M_BAD_TAG_CODE:
1236 case QHSTA_D_LRAM_CMP_ERROR:
1237 case QHSTA_M_MICRO_CODE_ERROR_HALT:
1238 default:
1239 panic("%s: Unhandled Host status error %x",
1240 adv_name(adv), host_stat);
1241 /* NOTREACHED */
1242 }
1243 break;
1244
1245 case QD_ABORTED_BY_HOST:
1246 /* Don't clobber any, more explicit, error codes we've set */
1247 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)
1248 ccb->ccb_h.status = CAM_REQ_ABORTED;
1249 break;
1250
1251 default:
1252 xpt_print_path(ccb->ccb_h.path);
1253 printf("adv_done - queue done with unknown status %x:%x\n",
1254 done_stat, host_stat);
1255 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1256 break;
1257 }
1258 adv_clear_state(adv, ccb);
1259 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP
1260 && (ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
1261 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
1262 ccb->ccb_h.status |= CAM_DEV_QFRZN;
1263 }
1264 adv_free_ccb_info(adv, cinfo);
1265 /*
1266 * Null this out so that we catch driver bugs that cause a
1267 * ccb to be completed twice.
1268 */
1269 ccb->ccb_h.ccb_cinfo_ptr = NULL;
1270 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1271 xpt_done(ccb);
1272 }
1273
1274 /*
1275 * Function to poll for command completion when
1276 * interrupts are disabled (crash dumps)
1277 */
1278 static void
1279 adv_poll(struct cam_sim *sim)
1280 {
1281 adv_intr(cam_sim_softc(sim));
1282 }
1283
1284 /*
1285 * Attach all the sub-devices we can find
1286 */
1287 int
1288 adv_attach(adv)
1289 struct adv_softc *adv;
1290 {
1291 struct ccb_setasync csa;
1292 struct cam_devq *devq;
1293 int max_sg;
1294
1295 /*
1296 * Allocate an array of ccb mapping structures. We put the
1297 * index of the ccb_info structure into the queue representing
1298 * a transaction and use it for mapping the queue to the
1299 * upper level SCSI transaction it represents.
1300 */
1301 adv->ccb_infos = malloc(sizeof(*adv->ccb_infos) * adv->max_openings,
1302 M_DEVBUF, M_NOWAIT);
1303
1304 if (adv->ccb_infos == NULL)
1305 return (ENOMEM);
1306
1307 adv->init_level++;
1308
1309 /*
1310 * Create our DMA tags. These tags define the kinds of device
1311 * accessible memory allocations and memory mappings we will
1312 * need to perform during normal operation.
1313 *
1314 * Unless we need to further restrict the allocation, we rely
1315 * on the restrictions of the parent dmat, hence the common
1316 * use of MAXADDR and MAXSIZE.
1317 *
1318 * The ASC boards use chains of "queues" (the transactional
1319 * resources on the board) to represent long S/G lists.
1320 * The first queue represents the command and holds a
1321 * single address and data pair. The queues that follow
1322 * can each hold ADV_SG_LIST_PER_Q entries. Given the
1323 * total number of queues, we can express the largest
1324 * transaction we can map. We reserve a few queues for
1325 * error recovery. Take those into account as well.
1326 *
1327 * There is a way to take an interrupt to download the
1328 * next batch of S/G entries if there are more than 255
1329 * of them (the counter in the queue structure is a u_int8_t).
1330 * We don't use this feature, so limit the S/G list size
1331 * accordingly.
1332 */
1333 max_sg = (adv->max_openings - ADV_MIN_FREE_Q - 1) * ADV_SG_LIST_PER_Q;
1334 if (max_sg > 255)
1335 max_sg = 255;
1336
1337 /* DMA tag for mapping buffers into device visible space. */
1338 if (bus_dma_tag_create(adv->parent_dmat, /*alignment*/1, /*boundary*/0,
1339 /*lowaddr*/BUS_SPACE_MAXADDR,
1340 /*highaddr*/BUS_SPACE_MAXADDR,
1341 /*filter*/NULL, /*filterarg*/NULL,
1342 /*maxsize*/MAXPHYS,
1343 /*nsegments*/max_sg,
1344 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
1345 /*flags*/BUS_DMA_ALLOCNOW,
1346 &adv->buffer_dmat) != 0) {
1347 return (ENXIO);
1348 }
1349 adv->init_level++;
1350
1351 /* DMA tag for our sense buffers */
1352 if (bus_dma_tag_create(adv->parent_dmat, /*alignment*/1, /*boundary*/0,
1353 /*lowaddr*/BUS_SPACE_MAXADDR,
1354 /*highaddr*/BUS_SPACE_MAXADDR,
1355 /*filter*/NULL, /*filterarg*/NULL,
1356 sizeof(struct scsi_sense_data)*adv->max_openings,
1357 /*nsegments*/1,
1358 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
1359 /*flags*/0, &adv->sense_dmat) != 0) {
1360 return (ENXIO);
1361 }
1362
1363 adv->init_level++;
1364
1365 /* Allocation for our sense buffers */
1366 if (bus_dmamem_alloc(adv->sense_dmat, (void **)&adv->sense_buffers,
1367 BUS_DMA_NOWAIT, &adv->sense_dmamap) != 0) {
1368 return (ENOMEM);
1369 }
1370
1371 adv->init_level++;
1372
1373 /* And permanently map them */
1374 bus_dmamap_load(adv->sense_dmat, adv->sense_dmamap,
1375 adv->sense_buffers,
1376 sizeof(struct scsi_sense_data)*adv->max_openings,
1377 adv_map, &adv->sense_physbase, /*flags*/0);
1378
1379 adv->init_level++;
1380
1381 /*
1382 * Fire up the chip
1383 */
1384 if (adv_start_chip(adv) != 1) {
1385 printf("adv%d: Unable to start on board processor. Aborting.\n",
1386 adv->unit);
1387 return (ENXIO);
1388 }
1389
1390 /*
1391 * Create the device queue for our SIM.
1392 */
1393 devq = cam_simq_alloc(adv->max_openings);
1394 if (devq == NULL)
1395 return (ENOMEM);
1396
1397 /*
1398 * Construct our SIM entry.
1399 */
1400 adv->sim = cam_sim_alloc(adv_action, adv_poll, "adv", adv, adv->unit,
1401 1, adv->max_openings, devq);
1402 if (adv->sim == NULL)
1403 return (ENOMEM);
1404
1405 /*
1406 * Register the bus.
1407 *
1408 * XXX Twin Channel EISA Cards???
1409 */
1410 if (xpt_bus_register(adv->sim, 0) != CAM_SUCCESS) {
1411 cam_sim_free(adv->sim, /*free devq*/TRUE);
1412 return (ENXIO);
1413 }
1414
1415 if (xpt_create_path(&adv->path, /*periph*/NULL, cam_sim_path(adv->sim),
1416 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD)
1417 != CAM_REQ_CMP) {
1418 xpt_bus_deregister(cam_sim_path(adv->sim));
1419 cam_sim_free(adv->sim, /*free devq*/TRUE);
1420 return (ENXIO);
1421 }
1422
1423 xpt_setup_ccb(&csa.ccb_h, adv->path, /*priority*/5);
1424 csa.ccb_h.func_code = XPT_SASYNC_CB;
1425 csa.event_enable = AC_FOUND_DEVICE|AC_LOST_DEVICE;
1426 csa.callback = advasync;
1427 csa.callback_arg = adv;
1428 xpt_action((union ccb *)&csa);
1429 return (0);
1430 }
Cache object: 73ccb249218b276c84e53eb006b350fa
|