FreeBSD/Linux Kernel Cross Reference
sys/dev/amd/amd.c
1 /*-
2 *********************************************************************
3 * FILE NAME : amd.c
4 * BY : C.L. Huang (ching@tekram.com.tw)
5 * Erich Chen (erich@tekram.com.tw)
6 * Description: Device Driver for the amd53c974 PCI Bus Master
7 * SCSI Host adapter found on cards such as
8 * the Tekram DC-390(T).
9 * (C)Copyright 1995-1999 Tekram Technology Co., Ltd.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. The name of the author may not be used to endorse or promote products
20 * derived from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *********************************************************************
33 * $FreeBSD$
34 */
35
36 /*
37 *********************************************************************
38 * HISTORY:
39 *
40 * REV# DATE NAME DESCRIPTION
41 * 1.00 07/02/96 CLH First release for RELEASE-2.1.0
42 * 1.01 08/20/96 CLH Update for RELEASE-2.1.5
43 * 1.02 11/06/96 CLH Fixed more than 1 LUN scanning
44 * 1.03 12/20/96 CLH Modify to support 2.2-ALPHA
45 * 1.04 12/26/97 CLH Modify to support RELEASE-2.2.5
46 * 1.05 01/01/99 ERICH CHEN Modify to support RELEASE-3.0.x (CAM)
47 *********************************************************************
48 */
49
50 /* #define AMD_DEBUG0 */
51 /* #define AMD_DEBUG_SCSI_PHASE */
52
53 #include <sys/param.h>
54
55 #include <sys/systm.h>
56 #include <sys/queue.h>
57 #include <sys/kernel.h>
58 #include <sys/module.h>
59 #include <sys/lock.h>
60 #include <sys/mutex.h>
61
62 #include <vm/vm.h>
63 #include <vm/pmap.h>
64
65 #include <machine/bus_pio.h>
66 #include <machine/bus.h>
67 #include <machine/resource.h>
68 #include <sys/bus.h>
69 #include <sys/rman.h>
70
71 #include <cam/cam.h>
72 #include <cam/cam_ccb.h>
73 #include <cam/cam_sim.h>
74 #include <cam/cam_xpt_sim.h>
75 #include <cam/cam_debug.h>
76
77 #include <cam/scsi/scsi_all.h>
78 #include <cam/scsi/scsi_message.h>
79
80 #include <dev/pci/pcivar.h>
81 #include <dev/pci/pcireg.h>
82 #include <dev/amd/amd.h>
83
84 #define PCI_DEVICE_ID_AMD53C974 0x20201022ul
85 #define PCI_BASE_ADDR0 0x10
86
87 typedef u_int (phase_handler_t)(struct amd_softc *, struct amd_srb *, u_int);
88 typedef phase_handler_t *phase_handler_func_t;
89
90 static void amd_intr(void *vamd);
91 static int amdstart(struct amd_softc *amd, struct amd_srb * pSRB);
92 static phase_handler_t amd_NopPhase;
93
94 static phase_handler_t amd_DataOutPhase0;
95 static phase_handler_t amd_DataInPhase0;
96 #define amd_CommandPhase0 amd_NopPhase
97 static phase_handler_t amd_StatusPhase0;
98 static phase_handler_t amd_MsgOutPhase0;
99 static phase_handler_t amd_MsgInPhase0;
100 static phase_handler_t amd_DataOutPhase1;
101 static phase_handler_t amd_DataInPhase1;
102 static phase_handler_t amd_CommandPhase1;
103 static phase_handler_t amd_StatusPhase1;
104 static phase_handler_t amd_MsgOutPhase1;
105 static phase_handler_t amd_MsgInPhase1;
106
107 static void amdsetupcommand(struct amd_softc *amd, struct amd_srb *srb);
108 static int amdparsemsg(struct amd_softc *amd);
109 static int amdhandlemsgreject(struct amd_softc *amd);
110 static void amdconstructsdtr(struct amd_softc *amd,
111 u_int period, u_int offset);
112 static u_int amdfindclockrate(struct amd_softc *amd, u_int *period);
113 static int amdsentmsg(struct amd_softc *amd, u_int msgtype, int full);
114
115 static void DataIO_Comm(struct amd_softc *amd, struct amd_srb *pSRB, u_int dir);
116 static void amd_Disconnect(struct amd_softc *amd);
117 static void amd_Reselect(struct amd_softc *amd);
118 static void SRBdone(struct amd_softc *amd, struct amd_srb *pSRB);
119 static void amd_ScsiRstDetect(struct amd_softc *amd);
120 static void amd_ResetSCSIBus(struct amd_softc *amd);
121 static void RequestSense(struct amd_softc *amd, struct amd_srb *pSRB);
122 static void amd_InvalidCmd(struct amd_softc *amd);
123
124 static void amd_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs,
125 int error);
126
127 #if 0
128 static void amd_timeout(void *arg1);
129 static void amd_reset(struct amd_softc *amd);
130 #endif
131 static u_int8_t * phystovirt(struct amd_srb *pSRB, u_int32_t xferCnt);
132
133 void amd_linkSRB(struct amd_softc *amd);
134 static int amd_init(device_t);
135 static void amd_load_defaults(struct amd_softc *amd);
136 static void amd_load_eeprom_or_defaults(struct amd_softc *amd);
137 static int amd_EEpromInDO(struct amd_softc *amd);
138 static u_int16_t EEpromGetData1(struct amd_softc *amd);
139 static void amd_EnDisableCE(struct amd_softc *amd, int mode, int *regval);
140 static void amd_EEpromOutDI(struct amd_softc *amd, int *regval, int Carry);
141 static void amd_Prepare(struct amd_softc *amd, int *regval, u_int8_t EEpromCmd);
142 static void amd_ReadEEprom(struct amd_softc *amd);
143
144 static int amd_probe(device_t);
145 static int amd_attach(device_t);
146 static void amdcompletematch(struct amd_softc *amd, target_id_t target,
147 lun_id_t lun, u_int tag, struct srb_queue *queue,
148 cam_status status);
149 static void amdsetsync(struct amd_softc *amd, u_int target, u_int clockrate,
150 u_int period, u_int offset, u_int type);
151 static void amdsettags(struct amd_softc *amd, u_int target, int tagenb);
152
153 static __inline void amd_clear_msg_state(struct amd_softc *amd);
154
155 static __inline void
156 amd_clear_msg_state(struct amd_softc *amd)
157 {
158 amd->msgout_len = 0;
159 amd->msgout_index = 0;
160 amd->msgin_index = 0;
161 }
162
163 static __inline uint32_t
164 amd_get_sense_bufaddr(struct amd_softc *amd, struct amd_srb *pSRB)
165 {
166 int offset;
167
168 offset = pSRB->TagNumber;
169 return (amd->sense_busaddr + (offset * sizeof(struct scsi_sense_data)));
170 }
171
172 static __inline struct scsi_sense_data *
173 amd_get_sense_buf(struct amd_softc *amd, struct amd_srb *pSRB)
174 {
175 int offset;
176
177 offset = pSRB->TagNumber;
178 return (&amd->sense_buffers[offset]);
179 }
180
181 static __inline uint32_t
182 amd_get_sense_bufsize(struct amd_softc *amd, struct amd_srb *pSRB)
183 {
184 return (sizeof(struct scsi_sense_data));
185 }
186
187 /* CAM SIM entry points */
188 #define ccb_srb_ptr spriv_ptr0
189 #define ccb_amd_ptr spriv_ptr1
190 static void amd_action(struct cam_sim *sim, union ccb *ccb);
191 static void amd_poll(struct cam_sim *sim);
192
193 /*
194 * State engine function tables indexed by SCSI phase number
195 */
196 phase_handler_func_t amd_SCSI_phase0[] = {
197 amd_DataOutPhase0,
198 amd_DataInPhase0,
199 amd_CommandPhase0,
200 amd_StatusPhase0,
201 amd_NopPhase,
202 amd_NopPhase,
203 amd_MsgOutPhase0,
204 amd_MsgInPhase0
205 };
206
207 phase_handler_func_t amd_SCSI_phase1[] = {
208 amd_DataOutPhase1,
209 amd_DataInPhase1,
210 amd_CommandPhase1,
211 amd_StatusPhase1,
212 amd_NopPhase,
213 amd_NopPhase,
214 amd_MsgOutPhase1,
215 amd_MsgInPhase1
216 };
217
218 /*
219 * EEProm/BIOS negotiation periods
220 */
221 u_int8_t eeprom_period[] = {
222 25, /* 10.0MHz */
223 32, /* 8.0MHz */
224 38, /* 6.6MHz */
225 44, /* 5.7MHz */
226 50, /* 5.0MHz */
227 63, /* 4.0MHz */
228 83, /* 3.0MHz */
229 125 /* 2.0MHz */
230 };
231
232 /*
233 * chip clock setting to SCSI specified sync parameter table.
234 */
235 u_int8_t tinfo_sync_period[] = {
236 25, /* 10.0 */
237 32, /* 8.0 */
238 38, /* 6.6 */
239 44, /* 5.7 */
240 50, /* 5.0 */
241 57, /* 4.4 */
242 63, /* 4.0 */
243 70, /* 3.6 */
244 76, /* 3.3 */
245 83 /* 3.0 */
246 };
247
248 static __inline struct amd_srb *
249 amdgetsrb(struct amd_softc * amd)
250 {
251 int intflag;
252 struct amd_srb * pSRB;
253
254 intflag = splcam();
255 pSRB = TAILQ_FIRST(&amd->free_srbs);
256 if (pSRB)
257 TAILQ_REMOVE(&amd->free_srbs, pSRB, links);
258 splx(intflag);
259 return (pSRB);
260 }
261
262 static void
263 amdsetupcommand(struct amd_softc *amd, struct amd_srb *srb)
264 {
265 struct scsi_request_sense sense_cmd;
266 u_int8_t *cdb;
267 u_int cdb_len;
268
269 if (srb->SRBFlag & AUTO_REQSENSE) {
270 sense_cmd.opcode = REQUEST_SENSE;
271 sense_cmd.byte2 = srb->pccb->ccb_h.target_lun << 5;
272 sense_cmd.unused[0] = 0;
273 sense_cmd.unused[1] = 0;
274 sense_cmd.length = sizeof(struct scsi_sense_data);
275 sense_cmd.control = 0;
276 cdb = &sense_cmd.opcode;
277 cdb_len = sizeof(sense_cmd);
278 } else {
279 cdb = &srb->CmdBlock[0];
280 cdb_len = srb->ScsiCmdLen;
281 }
282 amd_write8_multi(amd, SCSIFIFOREG, cdb, cdb_len);
283 }
284
285 /*
286 * Attempt to start a waiting transaction. Interrupts must be disabled
287 * upon entry to this function.
288 */
289 static void
290 amdrunwaiting(struct amd_softc *amd) {
291 struct amd_srb *srb;
292
293 if (amd->last_phase != SCSI_BUS_FREE)
294 return;
295
296 srb = TAILQ_FIRST(&amd->waiting_srbs);
297 if (srb == NULL)
298 return;
299
300 if (amdstart(amd, srb) == 0) {
301 TAILQ_REMOVE(&amd->waiting_srbs, srb, links);
302 TAILQ_INSERT_HEAD(&amd->running_srbs, srb, links);
303 }
304 }
305
306 static void
307 amdexecutesrb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
308 {
309 struct amd_srb *srb;
310 union ccb *ccb;
311 struct amd_softc *amd;
312 int s;
313
314 srb = (struct amd_srb *)arg;
315 ccb = srb->pccb;
316 amd = (struct amd_softc *)ccb->ccb_h.ccb_amd_ptr;
317
318 if (error != 0) {
319 if (error != EFBIG)
320 printf("amd%d: Unexepected error 0x%x returned from "
321 "bus_dmamap_load\n", amd->unit, error);
322 if (ccb->ccb_h.status == CAM_REQ_INPROG) {
323 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
324 ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN;
325 }
326 TAILQ_INSERT_HEAD(&amd->free_srbs, srb, links);
327 xpt_done(ccb);
328 return;
329 }
330
331 if (nseg != 0) {
332 struct amd_sg *sg;
333 bus_dma_segment_t *end_seg;
334 bus_dmasync_op_t op;
335
336 end_seg = dm_segs + nseg;
337
338 /* Copy the segments into our SG list */
339 srb->pSGlist = &srb->SGsegment[0];
340 sg = srb->pSGlist;
341 while (dm_segs < end_seg) {
342 sg->SGXLen = dm_segs->ds_len;
343 sg->SGXPtr = dm_segs->ds_addr;
344 sg++;
345 dm_segs++;
346 }
347
348 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
349 op = BUS_DMASYNC_PREREAD;
350 else
351 op = BUS_DMASYNC_PREWRITE;
352
353 bus_dmamap_sync(amd->buffer_dmat, srb->dmamap, op);
354
355 }
356 srb->SGcount = nseg;
357 srb->SGIndex = 0;
358 srb->AdaptStatus = 0;
359 srb->TargetStatus = 0;
360 srb->MsgCnt = 0;
361 srb->SRBStatus = 0;
362 srb->SRBFlag = 0;
363 srb->SRBState = 0;
364 srb->TotalXferredLen = 0;
365 srb->SGPhysAddr = 0;
366 srb->SGToBeXferLen = 0;
367 srb->EndMessage = 0;
368
369 s = splcam();
370
371 /*
372 * Last time we need to check if this CCB needs to
373 * be aborted.
374 */
375 if (ccb->ccb_h.status != CAM_REQ_INPROG) {
376 if (nseg != 0)
377 bus_dmamap_unload(amd->buffer_dmat, srb->dmamap);
378 TAILQ_INSERT_HEAD(&amd->free_srbs, srb, links);
379 xpt_done(ccb);
380 splx(s);
381 return;
382 }
383 ccb->ccb_h.status |= CAM_SIM_QUEUED;
384 #if 0
385 /* XXX Need a timeout handler */
386 ccb->ccb_h.timeout_ch =
387 timeout(amdtimeout, (caddr_t)srb,
388 (ccb->ccb_h.timeout * hz) / 1000);
389 #endif
390 TAILQ_INSERT_TAIL(&amd->waiting_srbs, srb, links);
391 amdrunwaiting(amd);
392 splx(s);
393 }
394
395 static void
396 amd_action(struct cam_sim * psim, union ccb * pccb)
397 {
398 struct amd_softc * amd;
399 u_int target_id;
400
401 CAM_DEBUG(pccb->ccb_h.path, CAM_DEBUG_TRACE, ("amd_action\n"));
402
403 amd = (struct amd_softc *) cam_sim_softc(psim);
404 target_id = pccb->ccb_h.target_id;
405
406 switch (pccb->ccb_h.func_code) {
407 case XPT_SCSI_IO:
408 {
409 struct amd_srb * pSRB;
410 struct ccb_scsiio *pcsio;
411
412 pcsio = &pccb->csio;
413
414 /*
415 * Assign an SRB and connect it with this ccb.
416 */
417 pSRB = amdgetsrb(amd);
418
419 if (!pSRB) {
420 /* Freeze SIMQ */
421 pccb->ccb_h.status = CAM_RESRC_UNAVAIL;
422 xpt_done(pccb);
423 return;
424 }
425 pSRB->pccb = pccb;
426 pccb->ccb_h.ccb_srb_ptr = pSRB;
427 pccb->ccb_h.ccb_amd_ptr = amd;
428 pSRB->ScsiCmdLen = pcsio->cdb_len;
429 bcopy(pcsio->cdb_io.cdb_bytes, pSRB->CmdBlock, pcsio->cdb_len);
430 if ((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
431 if ((pccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
432 /*
433 * We've been given a pointer
434 * to a single buffer.
435 */
436 if ((pccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
437 int s;
438 int error;
439
440 s = splsoftvm();
441 error =
442 bus_dmamap_load(amd->buffer_dmat,
443 pSRB->dmamap,
444 pcsio->data_ptr,
445 pcsio->dxfer_len,
446 amdexecutesrb,
447 pSRB, /*flags*/0);
448 if (error == EINPROGRESS) {
449 /*
450 * So as to maintain
451 * ordering, freeze the
452 * controller queue
453 * until our mapping is
454 * returned.
455 */
456 xpt_freeze_simq(amd->psim, 1);
457 pccb->ccb_h.status |=
458 CAM_RELEASE_SIMQ;
459 }
460 splx(s);
461 } else {
462 struct bus_dma_segment seg;
463
464 /* Pointer to physical buffer */
465 seg.ds_addr =
466 (bus_addr_t)pcsio->data_ptr;
467 seg.ds_len = pcsio->dxfer_len;
468 amdexecutesrb(pSRB, &seg, 1, 0);
469 }
470 } else {
471 struct bus_dma_segment *segs;
472
473 if ((pccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0
474 || (pccb->ccb_h.flags & CAM_DATA_PHYS) != 0) {
475 TAILQ_INSERT_HEAD(&amd->free_srbs,
476 pSRB, links);
477 pccb->ccb_h.status = CAM_PROVIDE_FAIL;
478 xpt_done(pccb);
479 return;
480 }
481
482 /* Just use the segments provided */
483 segs =
484 (struct bus_dma_segment *)pcsio->data_ptr;
485 amdexecutesrb(pSRB, segs, pcsio->sglist_cnt, 0);
486 }
487 } else
488 amdexecutesrb(pSRB, NULL, 0, 0);
489 break;
490 }
491 case XPT_PATH_INQ:
492 {
493 struct ccb_pathinq *cpi = &pccb->cpi;
494
495 cpi->version_num = 1;
496 cpi->hba_inquiry = PI_SDTR_ABLE | PI_TAG_ABLE;
497 cpi->target_sprt = 0;
498 cpi->hba_misc = 0;
499 cpi->hba_eng_cnt = 0;
500 cpi->max_target = 7;
501 cpi->max_lun = amd->max_lun; /* 7 or 0 */
502 cpi->initiator_id = amd->AdaptSCSIID;
503 cpi->bus_id = cam_sim_bus(psim);
504 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
505 strncpy(cpi->hba_vid, "TRM-AMD", HBA_IDLEN);
506 strncpy(cpi->dev_name, cam_sim_name(psim), DEV_IDLEN);
507 cpi->unit_number = cam_sim_unit(psim);
508 cpi->ccb_h.status = CAM_REQ_CMP;
509 xpt_done(pccb);
510 break;
511 }
512 case XPT_ABORT:
513 pccb->ccb_h.status = CAM_REQ_INVALID;
514 xpt_done(pccb);
515 break;
516 case XPT_RESET_BUS:
517 {
518
519 int i;
520
521 amd_ResetSCSIBus(amd);
522 amd->ACBFlag = 0;
523
524 for (i = 0; i < 500; i++) {
525 DELAY(1000); /* Wait until our interrupt
526 * handler sees it */
527 }
528
529 pccb->ccb_h.status = CAM_REQ_CMP;
530 xpt_done(pccb);
531 break;
532 }
533 case XPT_RESET_DEV:
534 pccb->ccb_h.status = CAM_REQ_INVALID;
535 xpt_done(pccb);
536 break;
537 case XPT_TERM_IO:
538 pccb->ccb_h.status = CAM_REQ_INVALID;
539 xpt_done(pccb);
540 /* XXX: intentional fall-through ?? */
541 case XPT_GET_TRAN_SETTINGS:
542 {
543 struct ccb_trans_settings *cts;
544 struct amd_target_info *targ_info;
545 struct amd_transinfo *tinfo;
546 int intflag;
547
548 cts = &pccb->cts;
549 intflag = splcam();
550 targ_info = &amd->tinfo[target_id];
551 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) {
552 /* current transfer settings */
553 if (targ_info->disc_tag & AMD_CUR_DISCENB) {
554 cts->flags = CCB_TRANS_DISC_ENB;
555 } else {
556 cts->flags = 0; /* no tag & disconnect */
557 }
558 if (targ_info->disc_tag & AMD_CUR_TAGENB) {
559 cts->flags |= CCB_TRANS_TAG_ENB;
560 }
561 tinfo = &targ_info->current;
562 } else {
563 /* default(user) transfer settings */
564 if (targ_info->disc_tag & AMD_USR_DISCENB) {
565 cts->flags = CCB_TRANS_DISC_ENB;
566 } else {
567 cts->flags = 0;
568 }
569 if (targ_info->disc_tag & AMD_USR_TAGENB) {
570 cts->flags |= CCB_TRANS_TAG_ENB;
571 }
572 tinfo = &targ_info->user;
573 }
574
575 cts->sync_period = tinfo->period;
576 cts->sync_offset = tinfo->offset;
577 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
578 splx(intflag);
579 cts->valid = CCB_TRANS_SYNC_RATE_VALID
580 | CCB_TRANS_SYNC_OFFSET_VALID
581 | CCB_TRANS_BUS_WIDTH_VALID
582 | CCB_TRANS_DISC_VALID
583 | CCB_TRANS_TQ_VALID;
584 pccb->ccb_h.status = CAM_REQ_CMP;
585 xpt_done(pccb);
586 break;
587 }
588 case XPT_SET_TRAN_SETTINGS:
589 {
590 struct ccb_trans_settings *cts;
591 struct amd_target_info *targ_info;
592 u_int update_type;
593 int intflag;
594 int last_entry;
595
596 cts = &pccb->cts;
597 update_type = 0;
598 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) {
599 update_type |= AMD_TRANS_GOAL;
600 } else if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) {
601 update_type |= AMD_TRANS_USER;
602 }
603 if (update_type == 0
604 || update_type == (AMD_TRANS_USER|AMD_TRANS_GOAL)) {
605 cts->ccb_h.status = CAM_REQ_INVALID;
606 xpt_done(pccb);
607 }
608
609 intflag = splcam();
610 targ_info = &amd->tinfo[target_id];
611
612 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
613 if (update_type & AMD_TRANS_GOAL) {
614 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) {
615 targ_info->disc_tag |= AMD_CUR_DISCENB;
616 } else {
617 targ_info->disc_tag &= ~AMD_CUR_DISCENB;
618 }
619 }
620 if (update_type & AMD_TRANS_USER) {
621 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) {
622 targ_info->disc_tag |= AMD_USR_DISCENB;
623 } else {
624 targ_info->disc_tag &= ~AMD_USR_DISCENB;
625 }
626 }
627 }
628 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
629 if (update_type & AMD_TRANS_GOAL) {
630 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
631 targ_info->disc_tag |= AMD_CUR_TAGENB;
632 } else {
633 targ_info->disc_tag &= ~AMD_CUR_TAGENB;
634 }
635 }
636 if (update_type & AMD_TRANS_USER) {
637 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
638 targ_info->disc_tag |= AMD_USR_TAGENB;
639 } else {
640 targ_info->disc_tag &= ~AMD_USR_TAGENB;
641 }
642 }
643 }
644
645 if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0) {
646 if (update_type & AMD_TRANS_GOAL)
647 cts->sync_offset = targ_info->goal.offset;
648 else
649 cts->sync_offset = targ_info->user.offset;
650 }
651
652 if (cts->sync_offset > AMD_MAX_SYNC_OFFSET)
653 cts->sync_offset = AMD_MAX_SYNC_OFFSET;
654
655 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0) {
656 if (update_type & AMD_TRANS_GOAL)
657 cts->sync_period = targ_info->goal.period;
658 else
659 cts->sync_period = targ_info->user.period;
660 }
661
662 last_entry = sizeof(tinfo_sync_period) - 1;
663 if ((cts->sync_period != 0)
664 && (cts->sync_period < tinfo_sync_period[0]))
665 cts->sync_period = tinfo_sync_period[0];
666 if (cts->sync_period > tinfo_sync_period[last_entry])
667 cts->sync_period = 0;
668 if (cts->sync_offset == 0)
669 cts->sync_period = 0;
670
671 if ((update_type & AMD_TRANS_USER) != 0) {
672 targ_info->user.period = cts->sync_period;
673 targ_info->user.offset = cts->sync_offset;
674 }
675 if ((update_type & AMD_TRANS_GOAL) != 0) {
676 targ_info->goal.period = cts->sync_period;
677 targ_info->goal.offset = cts->sync_offset;
678 }
679 splx(intflag);
680 pccb->ccb_h.status = CAM_REQ_CMP;
681 xpt_done(pccb);
682 break;
683 }
684 case XPT_CALC_GEOMETRY:
685 {
686 int extended;
687
688 extended = (amd->eepromBuf[EE_MODE2] & GREATER_1G) != 0;
689 cam_calc_geometry(&pccb->ccg, extended);
690 xpt_done(pccb);
691 break;
692 }
693 default:
694 pccb->ccb_h.status = CAM_REQ_INVALID;
695 xpt_done(pccb);
696 break;
697 }
698 }
699
700 static void
701 amd_poll(struct cam_sim * psim)
702 {
703 amd_intr(cam_sim_softc(psim));
704 }
705
706 static u_int8_t *
707 phystovirt(struct amd_srb * pSRB, u_int32_t xferCnt)
708 {
709 intptr_t dataPtr;
710 struct ccb_scsiio *pcsio;
711 u_int8_t i;
712 struct amd_sg * pseg;
713
714 dataPtr = 0;
715 pcsio = &pSRB->pccb->csio;
716
717 dataPtr = (intptr_t) pcsio->data_ptr;
718 pseg = pSRB->SGsegment;
719 for (i = 0; i < pSRB->SGIndex; i++) {
720 dataPtr += (int) pseg->SGXLen;
721 pseg++;
722 }
723 dataPtr += (int) xferCnt;
724 return ((u_int8_t *) dataPtr);
725 }
726
727 static void
728 amd_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
729 {
730 bus_addr_t *baddr;
731
732 baddr = (bus_addr_t *)arg;
733 *baddr = segs->ds_addr;
734 }
735
736 static void
737 ResetDevParam(struct amd_softc * amd)
738 {
739 u_int target;
740
741 for (target = 0; target <= amd->max_id; target++) {
742 if (amd->AdaptSCSIID != target) {
743 amdsetsync(amd, target, /*clockrate*/0,
744 /*period*/0, /*offset*/0, AMD_TRANS_CUR);
745 }
746 }
747 }
748
749 static void
750 amdcompletematch(struct amd_softc *amd, target_id_t target, lun_id_t lun,
751 u_int tag, struct srb_queue *queue, cam_status status)
752 {
753 struct amd_srb *srb;
754 struct amd_srb *next_srb;
755
756 for (srb = TAILQ_FIRST(queue); srb != NULL; srb = next_srb) {
757 union ccb *ccb;
758
759 next_srb = TAILQ_NEXT(srb, links);
760 if (srb->pccb->ccb_h.target_id != target
761 && target != CAM_TARGET_WILDCARD)
762 continue;
763
764 if (srb->pccb->ccb_h.target_lun != lun
765 && lun != CAM_LUN_WILDCARD)
766 continue;
767
768 if (srb->TagNumber != tag
769 && tag != AMD_TAG_WILDCARD)
770 continue;
771
772 ccb = srb->pccb;
773 TAILQ_REMOVE(queue, srb, links);
774 TAILQ_INSERT_HEAD(&amd->free_srbs, srb, links);
775 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0
776 && (status & CAM_DEV_QFRZN) != 0)
777 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
778 ccb->ccb_h.status = status;
779 xpt_done(ccb);
780 }
781
782 }
783
784 static void
785 amdsetsync(struct amd_softc *amd, u_int target, u_int clockrate,
786 u_int period, u_int offset, u_int type)
787 {
788 struct amd_target_info *tinfo;
789 u_int old_period;
790 u_int old_offset;
791
792 tinfo = &amd->tinfo[target];
793 old_period = tinfo->current.period;
794 old_offset = tinfo->current.offset;
795 if ((type & AMD_TRANS_CUR) != 0
796 && (old_period != period || old_offset != offset)) {
797 struct cam_path *path;
798
799 tinfo->current.period = period;
800 tinfo->current.offset = offset;
801 tinfo->sync_period_reg = clockrate;
802 tinfo->sync_offset_reg = offset;
803 tinfo->CtrlR3 &= ~FAST_SCSI;
804 tinfo->CtrlR4 &= ~EATER_25NS;
805 if (clockrate > 7)
806 tinfo->CtrlR4 |= EATER_25NS;
807 else
808 tinfo->CtrlR3 |= FAST_SCSI;
809
810 if ((type & AMD_TRANS_ACTIVE) == AMD_TRANS_ACTIVE) {
811 amd_write8(amd, SYNCPERIOREG, tinfo->sync_period_reg);
812 amd_write8(amd, SYNCOFFREG, tinfo->sync_offset_reg);
813 amd_write8(amd, CNTLREG3, tinfo->CtrlR3);
814 amd_write8(amd, CNTLREG4, tinfo->CtrlR4);
815 }
816 /* If possible, update the XPT's notion of our transfer rate */
817 if (xpt_create_path(&path, /*periph*/NULL,
818 cam_sim_path(amd->psim), target,
819 CAM_LUN_WILDCARD) == CAM_REQ_CMP) {
820 struct ccb_trans_settings neg;
821
822 xpt_setup_ccb(&neg.ccb_h, path, /*priority*/1);
823 neg.sync_period = period;
824 neg.sync_offset = offset;
825 neg.valid = CCB_TRANS_SYNC_RATE_VALID
826 | CCB_TRANS_SYNC_OFFSET_VALID;
827 xpt_async(AC_TRANSFER_NEG, path, &neg);
828 xpt_free_path(path);
829 }
830 }
831 if ((type & AMD_TRANS_GOAL) != 0) {
832 tinfo->goal.period = period;
833 tinfo->goal.offset = offset;
834 }
835
836 if ((type & AMD_TRANS_USER) != 0) {
837 tinfo->user.period = period;
838 tinfo->user.offset = offset;
839 }
840 }
841
842 static void
843 amdsettags(struct amd_softc *amd, u_int target, int tagenb)
844 {
845 panic("Implement me!\n");
846 }
847
848
849 #if 0
850 /*
851 **********************************************************************
852 * Function : amd_reset (struct amd_softc * amd)
853 * Purpose : perform a hard reset on the SCSI bus( and AMD chip).
854 * Inputs : cmd - command which caused the SCSI RESET
855 **********************************************************************
856 */
857 static void
858 amd_reset(struct amd_softc * amd)
859 {
860 int intflag;
861 u_int8_t bval;
862 u_int16_t i;
863
864
865 #ifdef AMD_DEBUG0
866 printf("DC390: RESET");
867 #endif
868
869 intflag = splcam();
870 bval = amd_read8(amd, CNTLREG1);
871 bval |= DIS_INT_ON_SCSI_RST;
872 amd_write8(amd, CNTLREG1, bval); /* disable interrupt */
873 amd_ResetSCSIBus(amd);
874
875 for (i = 0; i < 500; i++) {
876 DELAY(1000);
877 }
878
879 bval = amd_read8(amd, CNTLREG1);
880 bval &= ~DIS_INT_ON_SCSI_RST;
881 amd_write8(amd, CNTLREG1, bval); /* re-enable interrupt */
882
883 amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD);
884 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
885
886 ResetDevParam(amd);
887 amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
888 AMD_TAG_WILDCARD, &amd->running_srbs,
889 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
890 amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
891 AMD_TAG_WILDCARD, &amd->waiting_srbs,
892 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
893 amd->active_srb = NULL;
894 amd->ACBFlag = 0;
895 splx(intflag);
896 return;
897 }
898
899 void
900 amd_timeout(void *arg1)
901 {
902 struct amd_srb * pSRB;
903
904 pSRB = (struct amd_srb *) arg1;
905 }
906 #endif
907
908 static int
909 amdstart(struct amd_softc *amd, struct amd_srb *pSRB)
910 {
911 union ccb *pccb;
912 struct ccb_scsiio *pcsio;
913 struct amd_target_info *targ_info;
914 u_int identify_msg;
915 u_int command;
916 u_int target;
917 u_int lun;
918
919 pccb = pSRB->pccb;
920 pcsio = &pccb->csio;
921 target = pccb->ccb_h.target_id;
922 lun = pccb->ccb_h.target_lun;
923 targ_info = &amd->tinfo[target];
924
925 amd_clear_msg_state(amd);
926 amd_write8(amd, SCSIDESTIDREG, target);
927 amd_write8(amd, SYNCPERIOREG, targ_info->sync_period_reg);
928 amd_write8(amd, SYNCOFFREG, targ_info->sync_offset_reg);
929 amd_write8(amd, CNTLREG1, targ_info->CtrlR1);
930 amd_write8(amd, CNTLREG3, targ_info->CtrlR3);
931 amd_write8(amd, CNTLREG4, targ_info->CtrlR4);
932 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
933
934 identify_msg = MSG_IDENTIFYFLAG | lun;
935 if ((targ_info->disc_tag & AMD_CUR_DISCENB) != 0
936 && (pccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0
937 && (pSRB->CmdBlock[0] != REQUEST_SENSE)
938 && (pSRB->SRBFlag & AUTO_REQSENSE) == 0)
939 identify_msg |= MSG_IDENTIFY_DISCFLAG;
940
941 amd_write8(amd, SCSIFIFOREG, identify_msg);
942 if ((targ_info->disc_tag & AMD_CUR_TAGENB) == 0
943 || (identify_msg & MSG_IDENTIFY_DISCFLAG) == 0)
944 pccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
945 if (targ_info->current.period != targ_info->goal.period
946 || targ_info->current.offset != targ_info->goal.offset) {
947 command = SEL_W_ATN_STOP;
948 amdconstructsdtr(amd, targ_info->goal.period,
949 targ_info->goal.offset);
950 } else if ((pccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
951 command = SEL_W_ATN2;
952 pSRB->SRBState = SRB_START;
953 amd_write8(amd, SCSIFIFOREG, pcsio->tag_action);
954 amd_write8(amd, SCSIFIFOREG, pSRB->TagNumber);
955 } else {
956 command = SEL_W_ATN;
957 pSRB->SRBState = SRB_START;
958 }
959 if (command != SEL_W_ATN_STOP)
960 amdsetupcommand(amd, pSRB);
961
962 if (amd_read8(amd, SCSISTATREG) & INTERRUPT) {
963 pSRB->SRBState = SRB_READY;
964 return (1);
965 } else {
966 amd->last_phase = SCSI_ARBITRATING;
967 amd_write8(amd, SCSICMDREG, command);
968 amd->active_srb = pSRB;
969 amd->cur_target = target;
970 amd->cur_lun = lun;
971 return (0);
972 }
973 }
974
975 /*
976 * Catch an interrupt from the adapter.
977 * Process pending device interrupts.
978 */
979 static void
980 amd_intr(void *arg)
981 {
982 struct amd_softc *amd;
983 struct amd_srb *pSRB;
984 u_int internstat = 0;
985 u_int scsistat;
986 u_int intstat;
987
988 amd = (struct amd_softc *)arg;
989
990 if (amd == NULL) {
991 #ifdef AMD_DEBUG0
992 printf("amd_intr: amd NULL return......");
993 #endif
994 return;
995 }
996
997 scsistat = amd_read8(amd, SCSISTATREG);
998 if (!(scsistat & INTERRUPT)) {
999 #ifdef AMD_DEBUG0
1000 printf("amd_intr: scsistat = NULL ,return......");
1001 #endif
1002 return;
1003 }
1004 #ifdef AMD_DEBUG_SCSI_PHASE
1005 printf("scsistat=%2x,", scsistat);
1006 #endif
1007
1008 internstat = amd_read8(amd, INTERNSTATREG);
1009 intstat = amd_read8(amd, INTSTATREG);
1010
1011 #ifdef AMD_DEBUG_SCSI_PHASE
1012 printf("intstat=%2x,", intstat);
1013 #endif
1014
1015 if (intstat & DISCONNECTED) {
1016 amd_Disconnect(amd);
1017 return;
1018 }
1019 if (intstat & RESELECTED) {
1020 amd_Reselect(amd);
1021 return;
1022 }
1023 if (intstat & INVALID_CMD) {
1024 amd_InvalidCmd(amd);
1025 return;
1026 }
1027 if (intstat & SCSI_RESET_) {
1028 amd_ScsiRstDetect(amd);
1029 return;
1030 }
1031 if (intstat & (SUCCESSFUL_OP + SERVICE_REQUEST)) {
1032 pSRB = amd->active_srb;
1033 /*
1034 * Run our state engine. First perform
1035 * post processing for the last phase we
1036 * were in, followed by any processing
1037 * required to handle the current phase.
1038 */
1039 scsistat =
1040 amd_SCSI_phase0[amd->last_phase](amd, pSRB, scsistat);
1041 amd->last_phase = scsistat & SCSI_PHASE_MASK;
1042 (void)amd_SCSI_phase1[amd->last_phase](amd, pSRB, scsistat);
1043 }
1044 }
1045
1046 static u_int
1047 amd_DataOutPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1048 {
1049 struct amd_sg *psgl;
1050 u_int32_t ResidCnt, xferCnt;
1051
1052 if (!(pSRB->SRBState & SRB_XFERPAD)) {
1053 if (scsistat & PARITY_ERR) {
1054 pSRB->SRBStatus |= PARITY_ERROR;
1055 }
1056 if (scsistat & COUNT_2_ZERO) {
1057 while ((amd_read8(amd, DMA_Status)&DMA_XFER_DONE) == 0)
1058 ;
1059 pSRB->TotalXferredLen += pSRB->SGToBeXferLen;
1060 pSRB->SGIndex++;
1061 if (pSRB->SGIndex < pSRB->SGcount) {
1062 pSRB->pSGlist++;
1063 psgl = pSRB->pSGlist;
1064 pSRB->SGPhysAddr = psgl->SGXPtr;
1065 pSRB->SGToBeXferLen = psgl->SGXLen;
1066 } else {
1067 pSRB->SGToBeXferLen = 0;
1068 }
1069 } else {
1070 ResidCnt = amd_read8(amd, CURRENTFIFOREG) & 0x1f;
1071 ResidCnt += amd_read8(amd, CTCREG_LOW)
1072 | (amd_read8(amd, CTCREG_MID) << 8)
1073 | (amd_read8(amd, CURTXTCNTREG) << 16);
1074
1075 xferCnt = pSRB->SGToBeXferLen - ResidCnt;
1076 pSRB->SGPhysAddr += xferCnt;
1077 pSRB->TotalXferredLen += xferCnt;
1078 pSRB->SGToBeXferLen = ResidCnt;
1079 }
1080 }
1081 amd_write8(amd, DMA_Cmd, WRITE_DIRECTION | DMA_IDLE_CMD);
1082 return (scsistat);
1083 }
1084
1085 static u_int
1086 amd_DataInPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1087 {
1088 u_int8_t bval;
1089 u_int16_t i, residual;
1090 struct amd_sg *psgl;
1091 u_int32_t ResidCnt, xferCnt;
1092 u_int8_t * ptr;
1093
1094 if (!(pSRB->SRBState & SRB_XFERPAD)) {
1095 if (scsistat & PARITY_ERR) {
1096 pSRB->SRBStatus |= PARITY_ERROR;
1097 }
1098 if (scsistat & COUNT_2_ZERO) {
1099 while (1) {
1100 bval = amd_read8(amd, DMA_Status);
1101 if ((bval & DMA_XFER_DONE) != 0)
1102 break;
1103 }
1104 amd_write8(amd, DMA_Cmd, READ_DIRECTION|DMA_IDLE_CMD);
1105
1106 pSRB->TotalXferredLen += pSRB->SGToBeXferLen;
1107 pSRB->SGIndex++;
1108 if (pSRB->SGIndex < pSRB->SGcount) {
1109 pSRB->pSGlist++;
1110 psgl = pSRB->pSGlist;
1111 pSRB->SGPhysAddr = psgl->SGXPtr;
1112 pSRB->SGToBeXferLen = psgl->SGXLen;
1113 } else {
1114 pSRB->SGToBeXferLen = 0;
1115 }
1116 } else { /* phase changed */
1117 residual = 0;
1118 bval = amd_read8(amd, CURRENTFIFOREG);
1119 while (bval & 0x1f) {
1120 if ((bval & 0x1f) == 1) {
1121 for (i = 0; i < 0x100; i++) {
1122 bval = amd_read8(amd, CURRENTFIFOREG);
1123 if (!(bval & 0x1f)) {
1124 goto din_1;
1125 } else if (i == 0x0ff) {
1126 residual = 1;
1127 goto din_1;
1128 }
1129 }
1130 } else {
1131 bval = amd_read8(amd, CURRENTFIFOREG);
1132 }
1133 }
1134 din_1:
1135 amd_write8(amd, DMA_Cmd, READ_DIRECTION|DMA_BLAST_CMD);
1136 for (i = 0; i < 0x8000; i++) {
1137 if ((amd_read8(amd, DMA_Status)&BLAST_COMPLETE))
1138 break;
1139 }
1140 amd_write8(amd, DMA_Cmd, READ_DIRECTION|DMA_IDLE_CMD);
1141
1142 ResidCnt = amd_read8(amd, CTCREG_LOW)
1143 | (amd_read8(amd, CTCREG_MID) << 8)
1144 | (amd_read8(amd, CURTXTCNTREG) << 16);
1145 xferCnt = pSRB->SGToBeXferLen - ResidCnt;
1146 pSRB->SGPhysAddr += xferCnt;
1147 pSRB->TotalXferredLen += xferCnt;
1148 pSRB->SGToBeXferLen = ResidCnt;
1149 if (residual) {
1150 /* get residual byte */
1151 bval = amd_read8(amd, SCSIFIFOREG);
1152 ptr = phystovirt(pSRB, xferCnt);
1153 *ptr = bval;
1154 pSRB->SGPhysAddr++;
1155 pSRB->TotalXferredLen++;
1156 pSRB->SGToBeXferLen--;
1157 }
1158 }
1159 }
1160 return (scsistat);
1161 }
1162
1163 static u_int
1164 amd_StatusPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1165 {
1166 pSRB->TargetStatus = amd_read8(amd, SCSIFIFOREG);
1167 /* get message */
1168 pSRB->EndMessage = amd_read8(amd, SCSIFIFOREG);
1169 pSRB->SRBState = SRB_COMPLETED;
1170 amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);
1171 return (SCSI_NOP0);
1172 }
1173
1174 static u_int
1175 amd_MsgOutPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1176 {
1177 if (pSRB->SRBState & (SRB_UNEXPECT_RESEL + SRB_ABORT_SENT)) {
1178 scsistat = SCSI_NOP0;
1179 }
1180 return (scsistat);
1181 }
1182
1183 static u_int
1184 amd_MsgInPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1185 {
1186 int done;
1187
1188 amd->msgin_buf[amd->msgin_index] = amd_read8(amd, SCSIFIFOREG);
1189
1190 done = amdparsemsg(amd);
1191 if (done)
1192 amd->msgin_index = 0;
1193 else
1194 amd->msgin_index++;
1195 return (SCSI_NOP0);
1196 }
1197
1198 static int
1199 amdparsemsg(struct amd_softc *amd)
1200 {
1201 int reject;
1202 int done;
1203 int response;
1204
1205 done = FALSE;
1206 response = FALSE;
1207 reject = FALSE;
1208
1209 /*
1210 * Parse as much of the message as is availible,
1211 * rejecting it if we don't support it. When
1212 * the entire message is availible and has been
1213 * handled, return TRUE indicating that we have
1214 * parsed an entire message.
1215 */
1216 switch (amd->msgin_buf[0]) {
1217 case MSG_DISCONNECT:
1218 amd->active_srb->SRBState = SRB_DISCONNECT;
1219 amd->disc_count[amd->cur_target][amd->cur_lun]++;
1220 done = TRUE;
1221 break;
1222 case MSG_SIMPLE_Q_TAG:
1223 {
1224 struct amd_srb *disc_srb;
1225
1226 if (amd->msgin_index < 1)
1227 break;
1228 disc_srb = &amd->SRB_array[amd->msgin_buf[1]];
1229 if (amd->active_srb != NULL
1230 || disc_srb->SRBState != SRB_DISCONNECT
1231 || disc_srb->pccb->ccb_h.target_id != amd->cur_target
1232 || disc_srb->pccb->ccb_h.target_lun != amd->cur_lun) {
1233 printf("amd%d: Unexpected tagged reselection "
1234 "for target %d, Issuing Abort\n", amd->unit,
1235 amd->cur_target);
1236 amd->msgout_buf[0] = MSG_ABORT;
1237 amd->msgout_len = 1;
1238 response = TRUE;
1239 break;
1240 }
1241 amd->active_srb = disc_srb;
1242 amd->disc_count[amd->cur_target][amd->cur_lun]--;
1243 done = TRUE;
1244 break;
1245 }
1246 case MSG_MESSAGE_REJECT:
1247 response = amdhandlemsgreject(amd);
1248 if (response == FALSE)
1249 amd_write8(amd, SCSICMDREG, RESET_ATN_CMD);
1250 /* FALLTHROUGH */
1251 case MSG_NOOP:
1252 done = TRUE;
1253 break;
1254 case MSG_EXTENDED:
1255 {
1256 u_int clockrate;
1257 u_int period;
1258 u_int offset;
1259 u_int saved_offset;
1260
1261 /* Wait for enough of the message to begin validation */
1262 if (amd->msgin_index < 1)
1263 break;
1264 if (amd->msgin_buf[1] != MSG_EXT_SDTR_LEN) {
1265 reject = TRUE;
1266 break;
1267 }
1268
1269 /* Wait for opcode */
1270 if (amd->msgin_index < 2)
1271 break;
1272
1273 if (amd->msgin_buf[2] != MSG_EXT_SDTR) {
1274 reject = TRUE;
1275 break;
1276 }
1277
1278 /*
1279 * Wait until we have both args before validating
1280 * and acting on this message.
1281 *
1282 * Add one to MSG_EXT_SDTR_LEN to account for
1283 * the extended message preamble.
1284 */
1285 if (amd->msgin_index < (MSG_EXT_SDTR_LEN + 1))
1286 break;
1287
1288 period = amd->msgin_buf[3];
1289 saved_offset = offset = amd->msgin_buf[4];
1290 clockrate = amdfindclockrate(amd, &period);
1291 if (offset > AMD_MAX_SYNC_OFFSET)
1292 offset = AMD_MAX_SYNC_OFFSET;
1293 if (period == 0 || offset == 0) {
1294 offset = 0;
1295 period = 0;
1296 clockrate = 0;
1297 }
1298 amdsetsync(amd, amd->cur_target, clockrate, period, offset,
1299 AMD_TRANS_ACTIVE|AMD_TRANS_GOAL);
1300
1301 /*
1302 * See if we initiated Sync Negotiation
1303 * and didn't have to fall down to async
1304 * transfers.
1305 */
1306 if (amdsentmsg(amd, MSG_EXT_SDTR, /*full*/TRUE)) {
1307 /* We started it */
1308 if (saved_offset != offset) {
1309 /* Went too low - force async */
1310 reject = TRUE;
1311 }
1312 } else {
1313 /*
1314 * Send our own SDTR in reply
1315 */
1316 if (bootverbose)
1317 printf("Sending SDTR!\n");
1318 amd->msgout_index = 0;
1319 amd->msgout_len = 0;
1320 amdconstructsdtr(amd, period, offset);
1321 amd->msgout_index = 0;
1322 response = TRUE;
1323 }
1324 done = TRUE;
1325 break;
1326 }
1327 case MSG_SAVEDATAPOINTER:
1328 case MSG_RESTOREPOINTERS:
1329 /* XXX Implement!!! */
1330 done = TRUE;
1331 break;
1332 default:
1333 reject = TRUE;
1334 break;
1335 }
1336
1337 if (reject) {
1338 amd->msgout_index = 0;
1339 amd->msgout_len = 1;
1340 amd->msgout_buf[0] = MSG_MESSAGE_REJECT;
1341 done = TRUE;
1342 response = TRUE;
1343 }
1344
1345 if (response)
1346 amd_write8(amd, SCSICMDREG, SET_ATN_CMD);
1347
1348 if (done && !response)
1349 /* Clear the outgoing message buffer */
1350 amd->msgout_len = 0;
1351
1352 /* Drop Ack */
1353 amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);
1354
1355 return (done);
1356 }
1357
1358 static u_int
1359 amdfindclockrate(struct amd_softc *amd, u_int *period)
1360 {
1361 u_int i;
1362 u_int clockrate;
1363
1364 for (i = 0; i < sizeof(tinfo_sync_period); i++) {
1365 u_int8_t *table_entry;
1366
1367 table_entry = &tinfo_sync_period[i];
1368 if (*period <= *table_entry) {
1369 /*
1370 * When responding to a target that requests
1371 * sync, the requested rate may fall between
1372 * two rates that we can output, but still be
1373 * a rate that we can receive. Because of this,
1374 * we want to respond to the target with
1375 * the same rate that it sent to us even
1376 * if the period we use to send data to it
1377 * is lower. Only lower the response period
1378 * if we must.
1379 */
1380 if (i == 0) {
1381 *period = *table_entry;
1382 }
1383 break;
1384 }
1385 }
1386
1387 if (i == sizeof(tinfo_sync_period)) {
1388 /* Too slow for us. Use asnyc transfers. */
1389 *period = 0;
1390 clockrate = 0;
1391 } else
1392 clockrate = i + 4;
1393
1394 return (clockrate);
1395 }
1396
1397 /*
1398 * See if we sent a particular extended message to the target.
1399 * If "full" is true, the target saw the full message.
1400 * If "full" is false, the target saw at least the first
1401 * byte of the message.
1402 */
1403 static int
1404 amdsentmsg(struct amd_softc *amd, u_int msgtype, int full)
1405 {
1406 int found;
1407 int index;
1408
1409 found = FALSE;
1410 index = 0;
1411
1412 while (index < amd->msgout_len) {
1413 if ((amd->msgout_buf[index] & MSG_IDENTIFYFLAG) != 0
1414 || amd->msgout_buf[index] == MSG_MESSAGE_REJECT)
1415 index++;
1416 else if (amd->msgout_buf[index] >= MSG_SIMPLE_Q_TAG
1417 && amd->msgout_buf[index] < MSG_IGN_WIDE_RESIDUE) {
1418 /* Skip tag type and tag id */
1419 index += 2;
1420 } else if (amd->msgout_buf[index] == MSG_EXTENDED) {
1421 /* Found a candidate */
1422 if (amd->msgout_buf[index+2] == msgtype) {
1423 u_int end_index;
1424
1425 end_index = index + 1
1426 + amd->msgout_buf[index + 1];
1427 if (full) {
1428 if (amd->msgout_index > end_index)
1429 found = TRUE;
1430 } else if (amd->msgout_index > index)
1431 found = TRUE;
1432 }
1433 break;
1434 } else {
1435 panic("amdsentmsg: Inconsistent msg buffer");
1436 }
1437 }
1438 return (found);
1439 }
1440
1441 static void
1442 amdconstructsdtr(struct amd_softc *amd, u_int period, u_int offset)
1443 {
1444 amd->msgout_buf[amd->msgout_index++] = MSG_EXTENDED;
1445 amd->msgout_buf[amd->msgout_index++] = MSG_EXT_SDTR_LEN;
1446 amd->msgout_buf[amd->msgout_index++] = MSG_EXT_SDTR;
1447 amd->msgout_buf[amd->msgout_index++] = period;
1448 amd->msgout_buf[amd->msgout_index++] = offset;
1449 amd->msgout_len += 5;
1450 }
1451
1452 static int
1453 amdhandlemsgreject(struct amd_softc *amd)
1454 {
1455 /*
1456 * If we had an outstanding SDTR for this
1457 * target, this is a signal that the target
1458 * is refusing negotiation. Also watch out
1459 * for rejected tag messages.
1460 */
1461 struct amd_srb *srb;
1462 struct amd_target_info *targ_info;
1463 int response = FALSE;
1464
1465 srb = amd->active_srb;
1466 targ_info = &amd->tinfo[amd->cur_target];
1467 if (amdsentmsg(amd, MSG_EXT_SDTR, /*full*/FALSE)) {
1468 /* note asynch xfers and clear flag */
1469 amdsetsync(amd, amd->cur_target, /*clockrate*/0,
1470 /*period*/0, /*offset*/0,
1471 AMD_TRANS_ACTIVE|AMD_TRANS_GOAL);
1472 printf("amd%d:%d: refuses synchronous negotiation. "
1473 "Using asynchronous transfers\n",
1474 amd->unit, amd->cur_target);
1475 } else if ((srb != NULL)
1476 && (srb->pccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
1477 struct ccb_trans_settings neg;
1478
1479 printf("amd%d:%d: refuses tagged commands. Performing "
1480 "non-tagged I/O\n", amd->unit, amd->cur_target);
1481
1482 amdsettags(amd, amd->cur_target, FALSE);
1483 neg.flags = 0;
1484 neg.valid = CCB_TRANS_TQ_VALID;
1485 xpt_setup_ccb(&neg.ccb_h, srb->pccb->ccb_h.path, /*priority*/1);
1486 xpt_async(AC_TRANSFER_NEG, srb->pccb->ccb_h.path, &neg);
1487
1488 /*
1489 * Resend the identify for this CCB as the target
1490 * may believe that the selection is invalid otherwise.
1491 */
1492 if (amd->msgout_len != 0)
1493 bcopy(&amd->msgout_buf[0], &amd->msgout_buf[1],
1494 amd->msgout_len);
1495 amd->msgout_buf[0] = MSG_IDENTIFYFLAG
1496 | srb->pccb->ccb_h.target_lun;
1497 amd->msgout_len++;
1498 if ((targ_info->disc_tag & AMD_CUR_DISCENB) != 0
1499 && (srb->pccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0)
1500 amd->msgout_buf[0] |= MSG_IDENTIFY_DISCFLAG;
1501
1502 srb->pccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
1503
1504 /*
1505 * Requeue all tagged commands for this target
1506 * currently in our posession so they can be
1507 * converted to untagged commands.
1508 */
1509 amdcompletematch(amd, amd->cur_target, amd->cur_lun,
1510 AMD_TAG_WILDCARD, &amd->waiting_srbs,
1511 CAM_DEV_QFRZN|CAM_REQUEUE_REQ);
1512 } else {
1513 /*
1514 * Otherwise, we ignore it.
1515 */
1516 printf("amd%d:%d: Message reject received -- ignored\n",
1517 amd->unit, amd->cur_target);
1518 }
1519 return (response);
1520 }
1521
1522 #if 0
1523 if (!(pSRB->SRBState & SRB_MSGIN_MULTI)) {
1524 if (bval == MSG_DISCONNECT) {
1525 pSRB->SRBState = SRB_DISCONNECT;
1526 } else if (bval == MSG_SAVEDATAPOINTER) {
1527 goto min6;
1528 } else if ((bval == MSG_EXTENDED)
1529 || ((bval >= MSG_SIMPLE_Q_TAG)
1530 && (bval <= MSG_ORDERED_Q_TAG))) {
1531 pSRB->SRBState |= SRB_MSGIN_MULTI;
1532 pSRB->MsgInBuf[0] = bval;
1533 pSRB->MsgCnt = 1;
1534 pSRB->pMsgPtr = &pSRB->MsgInBuf[1];
1535 } else if (bval == MSG_MESSAGE_REJECT) {
1536 amd_write8(amd, SCSICMDREG, RESET_ATN_CMD);
1537
1538 if (pSRB->SRBState & DO_SYNC_NEGO) {
1539 goto set_async;
1540 }
1541 } else if (bval == MSG_RESTOREPOINTERS) {
1542 goto min6;
1543 } else {
1544 goto min6;
1545 }
1546 } else { /* minx: */
1547 *pSRB->pMsgPtr = bval;
1548 pSRB->MsgCnt++;
1549 pSRB->pMsgPtr++;
1550 if ((pSRB->MsgInBuf[0] >= MSG_SIMPLE_Q_TAG)
1551 && (pSRB->MsgInBuf[0] <= MSG_ORDERED_Q_TAG)) {
1552 if (pSRB->MsgCnt == 2) {
1553 pSRB->SRBState = 0;
1554 pSRB = &amd->SRB_array[pSRB->MsgInBuf[1]];
1555 if (pSRB->SRBState & SRB_DISCONNECT) == 0) {
1556 pSRB = amd->pTmpSRB;
1557 pSRB->SRBState = SRB_UNEXPECT_RESEL;
1558 pDCB->pActiveSRB = pSRB;
1559 pSRB->MsgOutBuf[0] = MSG_ABORT_TAG;
1560 EnableMsgOut2(amd, pSRB);
1561 } else {
1562 if (pDCB->DCBFlag & ABORT_DEV_) {
1563 pSRB->SRBState = SRB_ABORT_SENT;
1564 EnableMsgOut1(amd, pSRB);
1565 }
1566 pDCB->pActiveSRB = pSRB;
1567 pSRB->SRBState = SRB_DATA_XFER;
1568 }
1569 }
1570 } else if ((pSRB->MsgInBuf[0] == MSG_EXTENDED)
1571 && (pSRB->MsgCnt == 5)) {
1572 pSRB->SRBState &= ~(SRB_MSGIN_MULTI + DO_SYNC_NEGO);
1573 if ((pSRB->MsgInBuf[1] != 3)
1574 || (pSRB->MsgInBuf[2] != 1)) { /* reject_msg: */
1575 pSRB->MsgCnt = 1;
1576 pSRB->MsgInBuf[0] = MSG_MESSAGE_REJECT;
1577 amd_write8(amd, SCSICMDREG, SET_ATN_CMD);
1578 } else if (!(pSRB->MsgInBuf[3])
1579 || !(pSRB->MsgInBuf[4])) {
1580 set_async: /* set async */
1581
1582 pDCB = pSRB->pSRBDCB;
1583 /* disable sync & sync nego */
1584 pDCB->SyncMode &= ~(SYNC_ENABLE|SYNC_NEGO_DONE);
1585 pDCB->SyncPeriod = 0;
1586 pDCB->SyncOffset = 0;
1587
1588 pDCB->tinfo.goal.period = 0;
1589 pDCB->tinfo.goal.offset = 0;
1590
1591 pDCB->tinfo.current.period = 0;
1592 pDCB->tinfo.current.offset = 0;
1593 pDCB->tinfo.current.width =
1594 MSG_EXT_WDTR_BUS_8_BIT;
1595
1596 pDCB->CtrlR3 = FAST_CLK; /* non_fast */
1597 pDCB->CtrlR4 &= 0x3f;
1598 pDCB->CtrlR4 |= EATER_25NS;
1599 goto re_prog;
1600 } else {/* set sync */
1601
1602 pDCB = pSRB->pSRBDCB;
1603 /* enable sync & sync nego */
1604 pDCB->SyncMode |= SYNC_ENABLE|SYNC_NEGO_DONE;
1605
1606 /* set sync offset */
1607 pDCB->SyncOffset &= 0x0f0;
1608 pDCB->SyncOffset |= pSRB->MsgInBuf[4];
1609
1610 /* set sync period */
1611 pDCB->MaxNegoPeriod = pSRB->MsgInBuf[3];
1612
1613 wval = (u_int16_t) pSRB->MsgInBuf[3];
1614 wval = wval << 2;
1615 wval--;
1616 wval1 = wval / 25;
1617 if ((wval1 * 25) != wval) {
1618 wval1++;
1619 }
1620 bval = FAST_CLK|FAST_SCSI;
1621 pDCB->CtrlR4 &= 0x3f;
1622 if (wval1 >= 8) {
1623 /* Fast SCSI */
1624 wval1--;
1625 bval = FAST_CLK;
1626 pDCB->CtrlR4 |= EATER_25NS;
1627 }
1628 pDCB->CtrlR3 = bval;
1629 pDCB->SyncPeriod = (u_int8_t) wval1;
1630
1631 pDCB->tinfo.goal.period =
1632 tinfo_sync_period[pDCB->SyncPeriod - 4];
1633 pDCB->tinfo.goal.offset = pDCB->SyncOffset;
1634 pDCB->tinfo.current.period =
1635 tinfo_sync_period[pDCB->SyncPeriod - 4];;
1636 pDCB->tinfo.current.offset = pDCB->SyncOffset;
1637
1638 /*
1639 * program SCSI control register
1640 */
1641 re_prog:
1642 amd_write8(amd, SYNCPERIOREG, pDCB->SyncPeriod);
1643 amd_write8(amd, SYNCOFFREG, pDCB->SyncOffset);
1644 amd_write8(amd, CNTLREG3, pDCB->CtrlR3);
1645 amd_write8(amd, CNTLREG4, pDCB->CtrlR4);
1646 }
1647 }
1648 }
1649 min6:
1650 amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);
1651 return (SCSI_NOP0);
1652 }
1653 #endif
1654
1655 static u_int
1656 amd_DataOutPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1657 {
1658 DataIO_Comm(amd, pSRB, WRITE_DIRECTION);
1659 return (scsistat);
1660 }
1661
1662 static u_int
1663 amd_DataInPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1664 {
1665 DataIO_Comm(amd, pSRB, READ_DIRECTION);
1666 return (scsistat);
1667 }
1668
1669 static void
1670 DataIO_Comm(struct amd_softc *amd, struct amd_srb *pSRB, u_int ioDir)
1671 {
1672 struct amd_sg * psgl;
1673 u_int32_t lval;
1674
1675 if (pSRB->SGIndex < pSRB->SGcount) {
1676 amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD|ioDir);/* |EN_DMA_INT */
1677
1678 if (!pSRB->SGToBeXferLen) {
1679 psgl = pSRB->pSGlist;
1680 pSRB->SGPhysAddr = psgl->SGXPtr;
1681 pSRB->SGToBeXferLen = psgl->SGXLen;
1682 }
1683 lval = pSRB->SGToBeXferLen;
1684 amd_write8(amd, CTCREG_LOW, lval);
1685 amd_write8(amd, CTCREG_MID, lval >> 8);
1686 amd_write8(amd, CURTXTCNTREG, lval >> 16);
1687
1688 amd_write32(amd, DMA_XferCnt, pSRB->SGToBeXferLen);
1689
1690 amd_write32(amd, DMA_XferAddr, pSRB->SGPhysAddr);
1691
1692 pSRB->SRBState = SRB_DATA_XFER;
1693
1694 amd_write8(amd, SCSICMDREG, DMA_COMMAND|INFO_XFER_CMD);
1695
1696 amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD|ioDir); /* |EN_DMA_INT */
1697
1698 amd_write8(amd, DMA_Cmd, DMA_START_CMD|ioDir);/* |EN_DMA_INT */
1699 } else { /* xfer pad */
1700 if (pSRB->SGcount) {
1701 pSRB->AdaptStatus = H_OVER_UNDER_RUN;
1702 pSRB->SRBStatus |= OVER_RUN;
1703 }
1704 amd_write8(amd, CTCREG_LOW, 0);
1705 amd_write8(amd, CTCREG_MID, 0);
1706 amd_write8(amd, CURTXTCNTREG, 0);
1707
1708 pSRB->SRBState |= SRB_XFERPAD;
1709 amd_write8(amd, SCSICMDREG, DMA_COMMAND|XFER_PAD_BYTE);
1710 }
1711 }
1712
1713 static u_int
1714 amd_CommandPhase1(struct amd_softc *amd, struct amd_srb *srb, u_int scsistat)
1715 {
1716 amd_write8(amd, SCSICMDREG, RESET_ATN_CMD);
1717 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1718
1719 amdsetupcommand(amd, srb);
1720
1721 srb->SRBState = SRB_COMMAND;
1722 amd_write8(amd, SCSICMDREG, INFO_XFER_CMD);
1723 return (scsistat);
1724 }
1725
1726 static u_int
1727 amd_StatusPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1728 {
1729 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1730 pSRB->SRBState = SRB_STATUS;
1731 amd_write8(amd, SCSICMDREG, INITIATOR_CMD_CMPLTE);
1732 return (scsistat);
1733 }
1734
1735 static u_int
1736 amd_MsgOutPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1737 {
1738 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1739
1740 if (amd->msgout_len == 0) {
1741 amd->msgout_buf[0] = MSG_NOOP;
1742 amd->msgout_len = 1;
1743 }
1744 amd_write8_multi(amd, SCSIFIFOREG, amd->msgout_buf, amd->msgout_len);
1745 amd_write8(amd, SCSICMDREG, INFO_XFER_CMD);
1746 return (scsistat);
1747 }
1748
1749 static u_int
1750 amd_MsgInPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1751 {
1752 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1753 amd_write8(amd, SCSICMDREG, INFO_XFER_CMD);
1754 return (scsistat);
1755 }
1756
1757 static u_int
1758 amd_NopPhase(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1759 {
1760 return (scsistat);
1761 }
1762
1763 static void
1764 amd_Disconnect(struct amd_softc * amd)
1765 {
1766 struct amd_srb *srb;
1767 int target;
1768 int lun;
1769
1770 srb = amd->active_srb;
1771 amd->active_srb = NULL;
1772 amd->last_phase = SCSI_BUS_FREE;
1773 amd_write8(amd, SCSICMDREG, EN_SEL_RESEL);
1774 target = amd->cur_target;
1775 lun = amd->cur_lun;
1776
1777 if (srb == NULL) {
1778 /* Invalid reselection */
1779 amdrunwaiting(amd);
1780 } else if (srb->SRBState & SRB_ABORT_SENT) {
1781 /* Clean up and done this srb */
1782 #if 0
1783 while (( = TAILQ_FIRST(&amd->running_srbs)) != NULL) {
1784 /* XXX What about "done'ing" these srbs??? */
1785 if (pSRB->pSRBDCB == pDCB) {
1786 TAILQ_REMOVE(&amd->running_srbs, pSRB, links);
1787 TAILQ_INSERT_HEAD(&amd->free_srbs, pSRB, links);
1788 }
1789 }
1790 amdrunwaiting(amd);
1791 #endif
1792 } else {
1793 if ((srb->SRBState & (SRB_START | SRB_MSGOUT))
1794 || !(srb->SRBState & (SRB_DISCONNECT | SRB_COMPLETED))) {
1795 srb->TargetStatus = AMD_SCSI_STAT_SEL_TIMEOUT;
1796 goto disc1;
1797 } else if (srb->SRBState & SRB_DISCONNECT) {
1798 if (!(srb->pccb->ccb_h.flags & CAM_TAG_ACTION_VALID))
1799 amd->untagged_srbs[target][lun] = srb;
1800 amdrunwaiting(amd);
1801 } else if (srb->SRBState & SRB_COMPLETED) {
1802 disc1:
1803 srb->SRBState = SRB_FREE;
1804 SRBdone(amd, srb);
1805 }
1806 }
1807 return;
1808 }
1809
1810 static void
1811 amd_Reselect(struct amd_softc *amd)
1812 {
1813 struct amd_target_info *tinfo;
1814 u_int16_t disc_count;
1815
1816 amd_clear_msg_state(amd);
1817 if (amd->active_srb != NULL) {
1818 /* Requeue the SRB for our attempted Selection */
1819 TAILQ_REMOVE(&amd->running_srbs, amd->active_srb, links);
1820 TAILQ_INSERT_HEAD(&amd->waiting_srbs, amd->active_srb, links);
1821 amd->active_srb = NULL;
1822 }
1823 /* get ID */
1824 amd->cur_target = amd_read8(amd, SCSIFIFOREG);
1825 amd->cur_target ^= amd->HostID_Bit;
1826 amd->cur_target = ffs(amd->cur_target) - 1;
1827 amd->cur_lun = amd_read8(amd, SCSIFIFOREG) & 7;
1828 tinfo = &amd->tinfo[amd->cur_target];
1829 amd->active_srb = amd->untagged_srbs[amd->cur_target][amd->cur_lun];
1830 disc_count = amd->disc_count[amd->cur_target][amd->cur_lun];
1831 if (disc_count == 0) {
1832 printf("amd%d: Unexpected reselection for target %d, "
1833 "Issuing Abort\n", amd->unit, amd->cur_target);
1834 amd->msgout_buf[0] = MSG_ABORT;
1835 amd->msgout_len = 1;
1836 amd_write8(amd, SCSICMDREG, SET_ATN_CMD);
1837 }
1838 if (amd->active_srb != NULL) {
1839 amd->disc_count[amd->cur_target][amd->cur_lun]--;
1840 amd->untagged_srbs[amd->cur_target][amd->cur_lun] = NULL;
1841 }
1842
1843 amd_write8(amd, SCSIDESTIDREG, amd->cur_target);
1844 amd_write8(amd, SYNCPERIOREG, tinfo->sync_period_reg);
1845 amd_write8(amd, SYNCOFFREG, tinfo->sync_offset_reg);
1846 amd_write8(amd, CNTLREG1, tinfo->CtrlR1);
1847 amd_write8(amd, CNTLREG3, tinfo->CtrlR3);
1848 amd_write8(amd, CNTLREG4, tinfo->CtrlR4);
1849 amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);/* drop /ACK */
1850 amd->last_phase = SCSI_NOP0;
1851 }
1852
1853 static void
1854 SRBdone(struct amd_softc *amd, struct amd_srb *pSRB)
1855 {
1856 u_int8_t bval, i, status;
1857 union ccb *pccb;
1858 struct ccb_scsiio *pcsio;
1859 int intflag;
1860 struct amd_sg *ptr2;
1861 u_int32_t swlval;
1862
1863 pccb = pSRB->pccb;
1864 pcsio = &pccb->csio;
1865
1866 CAM_DEBUG(pccb->ccb_h.path, CAM_DEBUG_TRACE,
1867 ("SRBdone - TagNumber %d\n", pSRB->TagNumber));
1868
1869 if ((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1870 bus_dmasync_op_t op;
1871
1872 if ((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1873 op = BUS_DMASYNC_POSTREAD;
1874 else
1875 op = BUS_DMASYNC_POSTWRITE;
1876 bus_dmamap_sync(amd->buffer_dmat, pSRB->dmamap, op);
1877 bus_dmamap_unload(amd->buffer_dmat, pSRB->dmamap);
1878 }
1879
1880 status = pSRB->TargetStatus;
1881 pccb->ccb_h.status = CAM_REQ_CMP;
1882 if (pSRB->SRBFlag & AUTO_REQSENSE) {
1883 pSRB->SRBFlag &= ~AUTO_REQSENSE;
1884 pSRB->AdaptStatus = 0;
1885 pSRB->TargetStatus = SCSI_STATUS_CHECK_COND;
1886
1887 if (status == SCSI_STATUS_CHECK_COND) {
1888 pccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
1889 goto ckc_e;
1890 }
1891 *((u_int32_t *)&(pSRB->CmdBlock[0])) = pSRB->Segment0[0];
1892
1893 pcsio->sense_resid = pcsio->sense_len
1894 - pSRB->TotalXferredLen;
1895 pSRB->TotalXferredLen = pSRB->Segment1[1];
1896 if (pSRB->TotalXferredLen) {
1897 /* ???? */
1898 pcsio->resid = pcsio->dxfer_len
1899 - pSRB->TotalXferredLen;
1900 /* The resid field contains valid data */
1901 /* Flush resid bytes on complete */
1902 } else {
1903 pcsio->scsi_status = SCSI_STATUS_CHECK_COND;
1904 }
1905 bzero(&pcsio->sense_data, pcsio->sense_len);
1906 bcopy(amd_get_sense_buf(amd, pSRB), &pcsio->sense_data,
1907 pcsio->sense_len);
1908 pccb->ccb_h.status = CAM_AUTOSNS_VALID;
1909 goto ckc_e;
1910 }
1911 if (status) {
1912 if (status == SCSI_STATUS_CHECK_COND) {
1913
1914 if ((pSRB->SGIndex < pSRB->SGcount)
1915 && (pSRB->SGcount) && (pSRB->SGToBeXferLen)) {
1916 bval = pSRB->SGcount;
1917 swlval = pSRB->SGToBeXferLen;
1918 ptr2 = pSRB->pSGlist;
1919 ptr2++;
1920 for (i = pSRB->SGIndex + 1; i < bval; i++) {
1921 swlval += ptr2->SGXLen;
1922 ptr2++;
1923 }
1924 /* ??????? */
1925 pcsio->resid = (u_int32_t) swlval;
1926
1927 #ifdef AMD_DEBUG0
1928 printf("XferredLen=%8x,NotYetXferLen=%8x,",
1929 pSRB->TotalXferredLen, swlval);
1930 #endif
1931 }
1932 if ((pcsio->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0) {
1933 #ifdef AMD_DEBUG0
1934 printf("RequestSense..................\n");
1935 #endif
1936 RequestSense(amd, pSRB);
1937 return;
1938 }
1939 pcsio->scsi_status = SCSI_STATUS_CHECK_COND;
1940 pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1941 goto ckc_e;
1942 } else if (status == SCSI_STATUS_QUEUE_FULL) {
1943 pSRB->AdaptStatus = 0;
1944 pSRB->TargetStatus = 0;
1945 pcsio->scsi_status = SCSI_STATUS_QUEUE_FULL;
1946 pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1947 goto ckc_e;
1948 } else if (status == AMD_SCSI_STAT_SEL_TIMEOUT) {
1949 pSRB->AdaptStatus = H_SEL_TIMEOUT;
1950 pSRB->TargetStatus = 0;
1951
1952 pcsio->scsi_status = AMD_SCSI_STAT_SEL_TIMEOUT;
1953 pccb->ccb_h.status = CAM_SEL_TIMEOUT;
1954 } else if (status == SCSI_STATUS_BUSY) {
1955 #ifdef AMD_DEBUG0
1956 printf("DC390: target busy at %s %d\n",
1957 __FILE__, __LINE__);
1958 #endif
1959 pcsio->scsi_status = SCSI_STATUS_BUSY;
1960 pccb->ccb_h.status = CAM_SCSI_BUSY;
1961 } else if (status == SCSI_STATUS_RESERV_CONFLICT) {
1962 #ifdef AMD_DEBUG0
1963 printf("DC390: target reserved at %s %d\n",
1964 __FILE__, __LINE__);
1965 #endif
1966 pcsio->scsi_status = SCSI_STATUS_RESERV_CONFLICT;
1967 pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; /* XXX */
1968 } else {
1969 pSRB->AdaptStatus = 0;
1970 #ifdef AMD_DEBUG0
1971 printf("DC390: driver stuffup at %s %d\n",
1972 __FILE__, __LINE__);
1973 #endif
1974 pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1975 }
1976 } else {
1977 status = pSRB->AdaptStatus;
1978 if (status & H_OVER_UNDER_RUN) {
1979 pSRB->TargetStatus = 0;
1980
1981 pccb->ccb_h.status = CAM_DATA_RUN_ERR;
1982 } else if (pSRB->SRBStatus & PARITY_ERROR) {
1983 #ifdef AMD_DEBUG0
1984 printf("DC390: driver stuffup %s %d\n",
1985 __FILE__, __LINE__);
1986 #endif
1987 /* Driver failed to perform operation */
1988 pccb->ccb_h.status = CAM_UNCOR_PARITY;
1989 } else { /* No error */
1990 pSRB->AdaptStatus = 0;
1991 pSRB->TargetStatus = 0;
1992 pcsio->resid = 0;
1993 /* there is no error, (sense is invalid) */
1994 }
1995 }
1996 ckc_e:
1997 intflag = splcam();
1998 if ((pccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1999 /* CAM request not yet complete =>device_Q frozen */
2000 xpt_freeze_devq(pccb->ccb_h.path, 1);
2001 pccb->ccb_h.status |= CAM_DEV_QFRZN;
2002 }
2003 TAILQ_REMOVE(&amd->running_srbs, pSRB, links);
2004 TAILQ_INSERT_HEAD(&amd->free_srbs, pSRB, links);
2005 amdrunwaiting(amd);
2006 splx(intflag);
2007 xpt_done(pccb);
2008
2009 }
2010
2011 static void
2012 amd_ResetSCSIBus(struct amd_softc * amd)
2013 {
2014 int intflag;
2015
2016 intflag = splcam();
2017 amd->ACBFlag |= RESET_DEV;
2018 amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD);
2019 amd_write8(amd, SCSICMDREG, RST_SCSI_BUS_CMD);
2020 splx(intflag);
2021 return;
2022 }
2023
2024 static void
2025 amd_ScsiRstDetect(struct amd_softc * amd)
2026 {
2027 int intflag;
2028 u_int32_t wlval;
2029
2030 #ifdef AMD_DEBUG0
2031 printf("amd_ScsiRstDetect \n");
2032 #endif
2033
2034 wlval = 1000;
2035 while (--wlval) { /* delay 1 sec */
2036 DELAY(1000);
2037 }
2038 intflag = splcam();
2039
2040 amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD);
2041 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
2042
2043 if (amd->ACBFlag & RESET_DEV) {
2044 amd->ACBFlag |= RESET_DONE;
2045 } else {
2046 amd->ACBFlag |= RESET_DETECT;
2047 ResetDevParam(amd);
2048 amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
2049 AMD_TAG_WILDCARD, &amd->running_srbs,
2050 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
2051 amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
2052 AMD_TAG_WILDCARD, &amd->waiting_srbs,
2053 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
2054 amd->active_srb = NULL;
2055 amd->ACBFlag = 0;
2056 amdrunwaiting(amd);
2057 }
2058 splx(intflag);
2059 return;
2060 }
2061
2062 static void
2063 RequestSense(struct amd_softc *amd, struct amd_srb *pSRB)
2064 {
2065 union ccb *pccb;
2066 struct ccb_scsiio *pcsio;
2067
2068 pccb = pSRB->pccb;
2069 pcsio = &pccb->csio;
2070
2071 pSRB->SRBFlag |= AUTO_REQSENSE;
2072 pSRB->Segment0[0] = *((u_int32_t *) & (pSRB->CmdBlock[0]));
2073 pSRB->Segment0[1] = *((u_int32_t *) & (pSRB->CmdBlock[4]));
2074 pSRB->Segment1[0] = (pSRB->ScsiCmdLen << 8) + pSRB->SGcount;
2075 pSRB->Segment1[1] = pSRB->TotalXferredLen;
2076
2077 pSRB->AdaptStatus = 0;
2078 pSRB->TargetStatus = 0;
2079
2080 pSRB->Segmentx.SGXPtr = amd_get_sense_bufaddr(amd, pSRB);
2081 pSRB->Segmentx.SGXLen = amd_get_sense_bufsize(amd, pSRB);
2082
2083 pSRB->pSGlist = &pSRB->Segmentx;
2084 pSRB->SGcount = 1;
2085 pSRB->SGIndex = 0;
2086
2087 pSRB->CmdBlock[0] = REQUEST_SENSE;
2088 pSRB->CmdBlock[1] = pSRB->pccb->ccb_h.target_lun << 5;
2089 pSRB->CmdBlock[2] = 0;
2090 pSRB->CmdBlock[3] = 0;
2091 pSRB->CmdBlock[4] = pcsio->sense_len;
2092 pSRB->CmdBlock[5] = 0;
2093 pSRB->ScsiCmdLen = 6;
2094
2095 pSRB->TotalXferredLen = 0;
2096 pSRB->SGToBeXferLen = 0;
2097 if (amdstart(amd, pSRB) != 0) {
2098 TAILQ_REMOVE(&amd->running_srbs, pSRB, links);
2099 TAILQ_INSERT_HEAD(&amd->waiting_srbs, pSRB, links);
2100 }
2101 }
2102
2103 static void
2104 amd_InvalidCmd(struct amd_softc * amd)
2105 {
2106 struct amd_srb *srb;
2107
2108 srb = amd->active_srb;
2109 if (srb->SRBState & (SRB_START|SRB_MSGOUT))
2110 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
2111 }
2112
2113 void
2114 amd_linkSRB(struct amd_softc *amd)
2115 {
2116 u_int16_t count, i;
2117 struct amd_srb *psrb;
2118 int error;
2119
2120 count = amd->SRBCount;
2121
2122 for (i = 0; i < count; i++) {
2123 psrb = (struct amd_srb *)&amd->SRB_array[i];
2124 psrb->TagNumber = i;
2125
2126 /*
2127 * Create the dmamap. This is no longer optional!
2128 *
2129 * XXX Since there is no detach method in this driver,
2130 * this does not get freed!
2131 */
2132 if ((error = bus_dmamap_create(amd->buffer_dmat, 0,
2133 &psrb->dmamap)) != 0) {
2134 device_printf(amd->dev, "Error %d creating buffer "
2135 "dmamap!\n", error);
2136 return;
2137 }
2138 TAILQ_INSERT_TAIL(&amd->free_srbs, psrb, links);
2139 }
2140 }
2141
2142 static void
2143 amd_EnDisableCE(struct amd_softc *amd, int mode, int *regval)
2144 {
2145 if (mode == ENABLE_CE) {
2146 *regval = 0xc0;
2147 } else {
2148 *regval = 0x80;
2149 }
2150 pci_write_config(amd->dev, *regval, 0, /*bytes*/1);
2151 if (mode == DISABLE_CE) {
2152 pci_write_config(amd->dev, *regval, 0, /*bytes*/1);
2153 }
2154 DELAY(160);
2155 }
2156
2157 static void
2158 amd_EEpromOutDI(struct amd_softc *amd, int *regval, int Carry)
2159 {
2160 u_int bval;
2161
2162 bval = 0;
2163 if (Carry) {
2164 bval = 0x40;
2165 *regval = 0x80;
2166 pci_write_config(amd->dev, *regval, bval, /*bytes*/1);
2167 }
2168 DELAY(160);
2169 bval |= 0x80;
2170 pci_write_config(amd->dev, *regval, bval, /*bytes*/1);
2171 DELAY(160);
2172 pci_write_config(amd->dev, *regval, 0, /*bytes*/1);
2173 DELAY(160);
2174 }
2175
2176 static int
2177 amd_EEpromInDO(struct amd_softc *amd)
2178 {
2179 pci_write_config(amd->dev, 0x80, 0x80, /*bytes*/1);
2180 DELAY(160);
2181 pci_write_config(amd->dev, 0x80, 0x40, /*bytes*/1);
2182 DELAY(160);
2183 if (pci_read_config(amd->dev, 0, /*bytes*/1) == 0x22)
2184 return (1);
2185 return (0);
2186 }
2187
2188 static u_int16_t
2189 EEpromGetData1(struct amd_softc *amd)
2190 {
2191 u_int i;
2192 u_int carryFlag;
2193 u_int16_t wval;
2194
2195 wval = 0;
2196 for (i = 0; i < 16; i++) {
2197 wval <<= 1;
2198 carryFlag = amd_EEpromInDO(amd);
2199 wval |= carryFlag;
2200 }
2201 return (wval);
2202 }
2203
2204 static void
2205 amd_Prepare(struct amd_softc *amd, int *regval, u_int8_t EEpromCmd)
2206 {
2207 u_int i, j;
2208 int carryFlag;
2209
2210 carryFlag = 1;
2211 j = 0x80;
2212 for (i = 0; i < 9; i++) {
2213 amd_EEpromOutDI(amd, regval, carryFlag);
2214 carryFlag = (EEpromCmd & j) ? 1 : 0;
2215 j >>= 1;
2216 }
2217 }
2218
2219 static void
2220 amd_ReadEEprom(struct amd_softc *amd)
2221 {
2222 int regval;
2223 u_int i;
2224 u_int16_t *ptr;
2225 u_int8_t cmd;
2226
2227 ptr = (u_int16_t *)&amd->eepromBuf[0];
2228 cmd = EEPROM_READ;
2229 for (i = 0; i < 0x40; i++) {
2230 amd_EnDisableCE(amd, ENABLE_CE, ®val);
2231 amd_Prepare(amd, ®val, cmd);
2232 *ptr = EEpromGetData1(amd);
2233 ptr++;
2234 cmd++;
2235 amd_EnDisableCE(amd, DISABLE_CE, ®val);
2236 }
2237 }
2238
2239 static void
2240 amd_load_defaults(struct amd_softc *amd)
2241 {
2242 int target;
2243
2244 bzero(&amd->eepromBuf, sizeof amd->eepromBuf);
2245 for (target = 0; target < MAX_SCSI_ID; target++)
2246 amd->eepromBuf[target << 2] =
2247 (TAG_QUEUING|EN_DISCONNECT|SYNC_NEGO|PARITY_CHK);
2248 amd->eepromBuf[EE_ADAPT_SCSI_ID] = 7;
2249 amd->eepromBuf[EE_MODE2] = ACTIVE_NEGATION|LUN_CHECK|GREATER_1G;
2250 amd->eepromBuf[EE_TAG_CMD_NUM] = 4;
2251 }
2252
2253 static void
2254 amd_load_eeprom_or_defaults(struct amd_softc *amd)
2255 {
2256 u_int16_t wval, *ptr;
2257 u_int8_t i;
2258
2259 amd_ReadEEprom(amd);
2260 wval = 0;
2261 ptr = (u_int16_t *) & amd->eepromBuf[0];
2262 for (i = 0; i < EE_DATA_SIZE; i += 2, ptr++)
2263 wval += *ptr;
2264
2265 if (wval != EE_CHECKSUM) {
2266 if (bootverbose)
2267 printf("amd%d: SEEPROM data unavailable. "
2268 "Using default device parameters.\n",
2269 amd->unit);
2270 amd_load_defaults(amd);
2271 }
2272 }
2273
2274 /*
2275 **********************************************************************
2276 * Function : static int amd_init (struct Scsi_Host *host)
2277 * Purpose : initialize the internal structures for a given SCSI host
2278 * Inputs : host - pointer to this host adapter's structure/
2279 **********************************************************************
2280 */
2281 static int
2282 amd_init(device_t dev)
2283 {
2284 struct amd_softc *amd = device_get_softc(dev);
2285 struct resource *iores;
2286 int i, rid;
2287 u_int bval;
2288
2289 rid = PCI_BASE_ADDR0;
2290 iores = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE);
2291 if (iores == NULL) {
2292 if (bootverbose)
2293 printf("amd_init: bus_alloc_resource failure!\n");
2294 return ENXIO;
2295 }
2296 amd->tag = rman_get_bustag(iores);
2297 amd->bsh = rman_get_bushandle(iores);
2298
2299 /* DMA tag for mapping buffers into device visible space. */
2300 if (bus_dma_tag_create(/*parent_dmat*/NULL, /*alignment*/1,
2301 /*boundary*/0,
2302 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
2303 /*highaddr*/BUS_SPACE_MAXADDR,
2304 /*filter*/NULL, /*filterarg*/NULL,
2305 /*maxsize*/MAXBSIZE, /*nsegments*/AMD_NSEG,
2306 /*maxsegsz*/AMD_MAXTRANSFER_SIZE,
2307 /*flags*/BUS_DMA_ALLOCNOW,
2308 /*lockfunc*/busdma_lock_mutex,
2309 /*lockarg*/&Giant,
2310 &amd->buffer_dmat) != 0) {
2311 if (bootverbose)
2312 printf("amd_init: bus_dma_tag_create failure!\n");
2313 return ENXIO;
2314 }
2315
2316 /* Create, allocate, and map DMA buffers for autosense data */
2317 if (bus_dma_tag_create(/*parent_dmat*/NULL, /*alignment*/1,
2318 /*boundary*/0,
2319 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
2320 /*highaddr*/BUS_SPACE_MAXADDR,
2321 /*filter*/NULL, /*filterarg*/NULL,
2322 sizeof(struct scsi_sense_data) * MAX_SRB_CNT,
2323 /*nsegments*/1,
2324 /*maxsegsz*/AMD_MAXTRANSFER_SIZE,
2325 /*flags*/0,
2326 /*lockfunc*/busdma_lock_mutex,
2327 /*lockarg*/&Giant, &amd->sense_dmat) != 0) {
2328 if (bootverbose)
2329 device_printf(dev, "cannot create sense buffer dmat\n");
2330 return (ENXIO);
2331 }
2332
2333 if (bus_dmamem_alloc(amd->sense_dmat, (void **)&amd->sense_buffers,
2334 BUS_DMA_NOWAIT, &amd->sense_dmamap) != 0)
2335 return (ENOMEM);
2336
2337 bus_dmamap_load(amd->sense_dmat, amd->sense_dmamap,
2338 amd->sense_buffers,
2339 sizeof(struct scsi_sense_data) * MAX_SRB_CNT,
2340 amd_dmamap_cb, &amd->sense_busaddr, /*flags*/0);
2341
2342 TAILQ_INIT(&amd->free_srbs);
2343 TAILQ_INIT(&amd->running_srbs);
2344 TAILQ_INIT(&amd->waiting_srbs);
2345 amd->last_phase = SCSI_BUS_FREE;
2346 amd->dev = dev;
2347 amd->unit = device_get_unit(dev);
2348 amd->SRBCount = MAX_SRB_CNT;
2349 amd->status = 0;
2350 amd_load_eeprom_or_defaults(amd);
2351 amd->max_id = 7;
2352 if (amd->eepromBuf[EE_MODE2] & LUN_CHECK) {
2353 amd->max_lun = 7;
2354 } else {
2355 amd->max_lun = 0;
2356 }
2357 amd->AdaptSCSIID = amd->eepromBuf[EE_ADAPT_SCSI_ID];
2358 amd->HostID_Bit = (1 << amd->AdaptSCSIID);
2359 amd->AdaptSCSILUN = 0;
2360 /* (eepromBuf[EE_TAG_CMD_NUM]) << 2; */
2361 amd->ACBFlag = 0;
2362 amd->Gmode2 = amd->eepromBuf[EE_MODE2];
2363 amd_linkSRB(amd);
2364 for (i = 0; i <= amd->max_id; i++) {
2365
2366 if (amd->AdaptSCSIID != i) {
2367 struct amd_target_info *tinfo;
2368 PEEprom prom;
2369
2370 tinfo = &amd->tinfo[i];
2371 prom = (PEEprom)&amd->eepromBuf[i << 2];
2372 if ((prom->EE_MODE1 & EN_DISCONNECT) != 0) {
2373 tinfo->disc_tag |= AMD_USR_DISCENB;
2374 if ((prom->EE_MODE1 & TAG_QUEUING) != 0)
2375 tinfo->disc_tag |= AMD_USR_TAGENB;
2376 }
2377 if ((prom->EE_MODE1 & SYNC_NEGO) != 0) {
2378 tinfo->user.period =
2379 eeprom_period[prom->EE_SPEED];
2380 tinfo->user.offset = AMD_MAX_SYNC_OFFSET;
2381 }
2382 tinfo->CtrlR1 = amd->AdaptSCSIID;
2383 if ((prom->EE_MODE1 & PARITY_CHK) != 0)
2384 tinfo->CtrlR1 |= PARITY_ERR_REPO;
2385 tinfo->CtrlR3 = FAST_CLK;
2386 tinfo->CtrlR4 = EATER_25NS;
2387 if ((amd->eepromBuf[EE_MODE2] & ACTIVE_NEGATION) != 0)
2388 tinfo->CtrlR4 |= NEGATE_REQACKDATA;
2389 }
2390 }
2391 amd_write8(amd, SCSITIMEOUTREG, 153); /* 250ms selection timeout */
2392 /* Conversion factor = 0 , 40MHz clock */
2393 amd_write8(amd, CLKFACTREG, CLK_FREQ_40MHZ);
2394 /* NOP cmd - clear command register */
2395 amd_write8(amd, SCSICMDREG, NOP_CMD);
2396 amd_write8(amd, CNTLREG2, EN_FEATURE|EN_SCSI2_CMD);
2397 amd_write8(amd, CNTLREG3, FAST_CLK);
2398 bval = EATER_25NS;
2399 if (amd->eepromBuf[EE_MODE2] & ACTIVE_NEGATION) {
2400 bval |= NEGATE_REQACKDATA;
2401 }
2402 amd_write8(amd, CNTLREG4, bval);
2403
2404 /* Disable SCSI bus reset interrupt */
2405 amd_write8(amd, CNTLREG1, DIS_INT_ON_SCSI_RST);
2406
2407 return 0;
2408 }
2409
2410 /*
2411 * attach and init a host adapter
2412 */
2413 static int
2414 amd_attach(device_t dev)
2415 {
2416 struct cam_devq *devq; /* Device Queue to use for this SIM */
2417 u_int8_t intstat;
2418 struct amd_softc *amd = device_get_softc(dev);
2419 int unit = device_get_unit(dev);
2420 int rid;
2421 void *ih;
2422 struct resource *irqres;
2423
2424 if (amd_init(dev)) {
2425 if (bootverbose)
2426 printf("amd_attach: amd_init failure!\n");
2427 return ENXIO;
2428 }
2429
2430 /* Reset Pending INT */
2431 intstat = amd_read8(amd, INTSTATREG);
2432
2433 /* After setting up the adapter, map our interrupt */
2434 rid = 0;
2435 irqres = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2436 RF_SHAREABLE | RF_ACTIVE);
2437 if (irqres == NULL ||
2438 bus_setup_intr(dev, irqres, INTR_TYPE_CAM | INTR_ENTROPY,
2439 amd_intr, amd, &ih)) {
2440 if (bootverbose)
2441 printf("amd%d: unable to register interrupt handler!\n",
2442 unit);
2443 return ENXIO;
2444 }
2445
2446 /*
2447 * Now let the CAM generic SCSI layer find the SCSI devices on
2448 * the bus * start queue to reset to the idle loop. *
2449 * Create device queue of SIM(s) * (MAX_START_JOB - 1) :
2450 * max_sim_transactions
2451 */
2452 devq = cam_simq_alloc(MAX_START_JOB);
2453 if (devq == NULL) {
2454 if (bootverbose)
2455 printf("amd_attach: cam_simq_alloc failure!\n");
2456 return ENXIO;
2457 }
2458
2459 amd->psim = cam_sim_alloc(amd_action, amd_poll, "amd",
2460 amd, amd->unit, 1, MAX_TAGS_CMD_QUEUE,
2461 devq);
2462 if (amd->psim == NULL) {
2463 cam_simq_free(devq);
2464 if (bootverbose)
2465 printf("amd_attach: cam_sim_alloc failure!\n");
2466 return ENXIO;
2467 }
2468
2469 if (xpt_bus_register(amd->psim, 0) != CAM_SUCCESS) {
2470 cam_sim_free(amd->psim, /*free_devq*/TRUE);
2471 if (bootverbose)
2472 printf("amd_attach: xpt_bus_register failure!\n");
2473 return ENXIO;
2474 }
2475
2476 if (xpt_create_path(&amd->ppath, /* periph */ NULL,
2477 cam_sim_path(amd->psim), CAM_TARGET_WILDCARD,
2478 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2479 xpt_bus_deregister(cam_sim_path(amd->psim));
2480 cam_sim_free(amd->psim, /* free_simq */ TRUE);
2481 if (bootverbose)
2482 printf("amd_attach: xpt_create_path failure!\n");
2483 return ENXIO;
2484 }
2485
2486 return 0;
2487 }
2488
2489 static int
2490 amd_probe(device_t dev)
2491 {
2492 if (pci_get_devid(dev) == PCI_DEVICE_ID_AMD53C974) {
2493 device_set_desc(dev,
2494 "Tekram DC390(T)/AMD53c974 SCSI Host Adapter");
2495 return 0;
2496 }
2497 return ENXIO;
2498 }
2499
2500 static device_method_t amd_methods[] = {
2501 /* Device interface */
2502 DEVMETHOD(device_probe, amd_probe),
2503 DEVMETHOD(device_attach, amd_attach),
2504 { 0, 0 }
2505 };
2506
2507 static driver_t amd_driver = {
2508 "amd", amd_methods, sizeof(struct amd_softc)
2509 };
2510
2511 static devclass_t amd_devclass;
2512 DRIVER_MODULE(amd, pci, amd_driver, amd_devclass, 0, 0);
2513 MODULE_DEPEND(amd, cam, 1, 1, 1);
Cache object: a502e8bf09ac954ace80ed19c6ea90ed
|