FreeBSD/Linux Kernel Cross Reference
sys/pci/amd.c
1 /*
2 *********************************************************************
3 * FILE NAME : amd.c
4 * BY : C.L. Huang (ching@tekram.com.tw)
5 * Erich Chen (erich@tekram.com.tw)
6 * Description: Device Driver for the amd53c974 PCI Bus Master
7 * SCSI Host adapter found on cards such as
8 * the Tekram DC-390(T).
9 * (C)Copyright 1995-1999 Tekram Technology Co., Ltd.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. The name of the author may not be used to endorse or promote products
20 * derived from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *********************************************************************
33 * $FreeBSD: releng/5.0/sys/pci/amd.c 106668 2002-11-08 21:30:26Z jhb $
34 */
35
36 /*
37 *********************************************************************
38 * HISTORY:
39 *
40 * REV# DATE NAME DESCRIPTION
41 * 1.00 07/02/96 CLH First release for RELEASE-2.1.0
42 * 1.01 08/20/96 CLH Update for RELEASE-2.1.5
43 * 1.02 11/06/96 CLH Fixed more than 1 LUN scanning
44 * 1.03 12/20/96 CLH Modify to support 2.2-ALPHA
45 * 1.04 12/26/97 CLH Modify to support RELEASE-2.2.5
46 * 1.05 01/01/99 ERICH CHEN Modify to support RELEASE-3.0.x (CAM)
47 *********************************************************************
48 */
49
50 /* #define AMD_DEBUG0 */
51 /* #define AMD_DEBUG_SCSI_PHASE */
52
53 #include <sys/param.h>
54
55 #include <sys/systm.h>
56 #include <sys/queue.h>
57 #include <sys/kernel.h>
58
59 #include <vm/vm.h>
60 #include <vm/pmap.h>
61
62 #include <machine/bus_pio.h>
63 #include <machine/bus.h>
64 #include <machine/resource.h>
65 #include <sys/bus.h>
66 #include <sys/rman.h>
67
68 #include <cam/cam.h>
69 #include <cam/cam_ccb.h>
70 #include <cam/cam_sim.h>
71 #include <cam/cam_xpt_sim.h>
72 #include <cam/cam_debug.h>
73
74 #include <cam/scsi/scsi_all.h>
75 #include <cam/scsi/scsi_message.h>
76
77 #include <pci/pcivar.h>
78 #include <pci/pcireg.h>
79 #include <pci/amd.h>
80
81 #define PCI_DEVICE_ID_AMD53C974 0x20201022ul
82 #define PCI_BASE_ADDR0 0x10
83
84 typedef u_int (phase_handler_t)(struct amd_softc *, struct amd_srb *, u_int);
85 typedef phase_handler_t *phase_handler_func_t;
86
87 static void amd_intr(void *vamd);
88 static int amdstart(struct amd_softc *amd, struct amd_srb * pSRB);
89 static phase_handler_t amd_NopPhase;
90
91 static phase_handler_t amd_DataOutPhase0;
92 static phase_handler_t amd_DataInPhase0;
93 #define amd_CommandPhase0 amd_NopPhase
94 static phase_handler_t amd_StatusPhase0;
95 static phase_handler_t amd_MsgOutPhase0;
96 static phase_handler_t amd_MsgInPhase0;
97 static phase_handler_t amd_DataOutPhase1;
98 static phase_handler_t amd_DataInPhase1;
99 static phase_handler_t amd_CommandPhase1;
100 static phase_handler_t amd_StatusPhase1;
101 static phase_handler_t amd_MsgOutPhase1;
102 static phase_handler_t amd_MsgInPhase1;
103
104 static void amdsetupcommand(struct amd_softc *amd, struct amd_srb *srb);
105 static int amdparsemsg(struct amd_softc *amd);
106 static int amdhandlemsgreject(struct amd_softc *amd);
107 static void amdconstructsdtr(struct amd_softc *amd,
108 u_int period, u_int offset);
109 static u_int amdfindclockrate(struct amd_softc *amd, u_int *period);
110 static int amdsentmsg(struct amd_softc *amd, u_int msgtype, int full);
111
112 static void DataIO_Comm(struct amd_softc *amd, struct amd_srb *pSRB, u_int dir);
113 static void amd_Disconnect(struct amd_softc *amd);
114 static void amd_Reselect(struct amd_softc *amd);
115 static void SRBdone(struct amd_softc *amd, struct amd_srb *pSRB);
116 static void amd_ScsiRstDetect(struct amd_softc *amd);
117 static void amd_ResetSCSIBus(struct amd_softc *amd);
118 static void RequestSense(struct amd_softc *amd, struct amd_srb *pSRB);
119 static void amd_InvalidCmd(struct amd_softc *amd);
120
121 #if 0
122 static void amd_timeout(void *arg1);
123 static void amd_reset(struct amd_softc *amd);
124 #endif
125 static u_int8_t * phystovirt(struct amd_srb *pSRB, u_int32_t xferCnt);
126
127 void amd_linkSRB(struct amd_softc *amd);
128 static int amd_init(device_t);
129 static void amd_load_defaults(struct amd_softc *amd);
130 static void amd_load_eeprom_or_defaults(struct amd_softc *amd);
131 static int amd_EEpromInDO(struct amd_softc *amd);
132 static u_int16_t EEpromGetData1(struct amd_softc *amd);
133 static void amd_EnDisableCE(struct amd_softc *amd, int mode, int *regval);
134 static void amd_EEpromOutDI(struct amd_softc *amd, int *regval, int Carry);
135 static void amd_Prepare(struct amd_softc *amd, int *regval, u_int8_t EEpromCmd);
136 static void amd_ReadEEprom(struct amd_softc *amd);
137
138 static int amd_probe(device_t);
139 static int amd_attach(device_t);
140 static void amdcompletematch(struct amd_softc *amd, target_id_t target,
141 lun_id_t lun, u_int tag, struct srb_queue *queue,
142 cam_status status);
143 static void amdsetsync(struct amd_softc *amd, u_int target, u_int clockrate,
144 u_int period, u_int offset, u_int type);
145 static void amdsettags(struct amd_softc *amd, u_int target, int tagenb);
146
147 static __inline void amd_clear_msg_state(struct amd_softc *amd);
148
149 static __inline void
150 amd_clear_msg_state(struct amd_softc *amd)
151 {
152 amd->msgout_len = 0;
153 amd->msgout_index = 0;
154 amd->msgin_index = 0;
155 }
156
157 /* CAM SIM entry points */
158 #define ccb_srb_ptr spriv_ptr0
159 #define ccb_amd_ptr spriv_ptr1
160 static void amd_action(struct cam_sim *sim, union ccb *ccb);
161 static void amd_poll(struct cam_sim *sim);
162
163 /*
164 * State engine function tables indexed by SCSI phase number
165 */
166 phase_handler_func_t amd_SCSI_phase0[] = {
167 amd_DataOutPhase0,
168 amd_DataInPhase0,
169 amd_CommandPhase0,
170 amd_StatusPhase0,
171 amd_NopPhase,
172 amd_NopPhase,
173 amd_MsgOutPhase0,
174 amd_MsgInPhase0
175 };
176
177 phase_handler_func_t amd_SCSI_phase1[] = {
178 amd_DataOutPhase1,
179 amd_DataInPhase1,
180 amd_CommandPhase1,
181 amd_StatusPhase1,
182 amd_NopPhase,
183 amd_NopPhase,
184 amd_MsgOutPhase1,
185 amd_MsgInPhase1
186 };
187
188 /*
189 * EEProm/BIOS negotiation periods
190 */
191 u_int8_t eeprom_period[] = {
192 25, /* 10.0MHz */
193 32, /* 8.0MHz */
194 38, /* 6.6MHz */
195 44, /* 5.7MHz */
196 50, /* 5.0MHz */
197 63, /* 4.0MHz */
198 83, /* 3.0MHz */
199 125 /* 2.0MHz */
200 };
201
202 /*
203 * chip clock setting to SCSI specified sync parameter table.
204 */
205 u_int8_t tinfo_sync_period[] = {
206 25, /* 10.0 */
207 32, /* 8.0 */
208 38, /* 6.6 */
209 44, /* 5.7 */
210 50, /* 5.0 */
211 57, /* 4.4 */
212 63, /* 4.0 */
213 70, /* 3.6 */
214 76, /* 3.3 */
215 83 /* 3.0 */
216 };
217
218 static __inline struct amd_srb *
219 amdgetsrb(struct amd_softc * amd)
220 {
221 int intflag;
222 struct amd_srb * pSRB;
223
224 intflag = splcam();
225 pSRB = TAILQ_FIRST(&amd->free_srbs);
226 if (pSRB)
227 TAILQ_REMOVE(&amd->free_srbs, pSRB, links);
228 splx(intflag);
229 return (pSRB);
230 }
231
232 static void
233 amdsetupcommand(struct amd_softc *amd, struct amd_srb *srb)
234 {
235 struct scsi_request_sense sense_cmd;
236 struct ccb_scsiio *csio;
237 u_int8_t *cdb;
238 u_int cdb_len;
239
240 csio = &srb->pccb->csio;
241
242 if (srb->SRBFlag & AUTO_REQSENSE) {
243 sense_cmd.opcode = REQUEST_SENSE;
244 sense_cmd.byte2 = srb->pccb->ccb_h.target_lun << 5;
245 sense_cmd.unused[0] = 0;
246 sense_cmd.unused[1] = 0;
247 sense_cmd.length = csio->sense_len;
248 sense_cmd.control = 0;
249 cdb = &sense_cmd.opcode;
250 cdb_len = sizeof(sense_cmd);
251 } else {
252 cdb = &srb->CmdBlock[0];
253 cdb_len = srb->ScsiCmdLen;
254 }
255 amd_write8_multi(amd, SCSIFIFOREG, cdb, cdb_len);
256 }
257
258 /*
259 * Attempt to start a waiting transaction. Interrupts must be disabled
260 * upon entry to this function.
261 */
262 static void
263 amdrunwaiting(struct amd_softc *amd) {
264 struct amd_srb *srb;
265
266 if (amd->last_phase != SCSI_BUS_FREE)
267 return;
268
269 srb = TAILQ_FIRST(&amd->waiting_srbs);
270 if (srb == NULL)
271 return;
272
273 if (amdstart(amd, srb) == 0) {
274 TAILQ_REMOVE(&amd->waiting_srbs, srb, links);
275 TAILQ_INSERT_HEAD(&amd->running_srbs, srb, links);
276 }
277 }
278
279 static void
280 amdexecutesrb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
281 {
282 struct amd_srb *srb;
283 union ccb *ccb;
284 struct amd_softc *amd;
285 int s;
286
287 srb = (struct amd_srb *)arg;
288 ccb = srb->pccb;
289 amd = (struct amd_softc *)ccb->ccb_h.ccb_amd_ptr;
290
291 if (error != 0) {
292 if (error != EFBIG)
293 printf("amd%d: Unexepected error 0x%x returned from "
294 "bus_dmamap_load\n", amd->unit, error);
295 if (ccb->ccb_h.status == CAM_REQ_INPROG) {
296 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
297 ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN;
298 }
299 TAILQ_INSERT_HEAD(&amd->free_srbs, srb, links);
300 xpt_done(ccb);
301 return;
302 }
303
304 if (nseg != 0) {
305 struct amd_sg *sg;
306 bus_dma_segment_t *end_seg;
307 bus_dmasync_op_t op;
308
309 end_seg = dm_segs + nseg;
310
311 /* Copy the segments into our SG list */
312 srb->pSGlist = &srb->SGsegment[0];
313 sg = srb->pSGlist;
314 while (dm_segs < end_seg) {
315 sg->SGXLen = dm_segs->ds_len;
316 sg->SGXPtr = dm_segs->ds_addr;
317 sg++;
318 dm_segs++;
319 }
320
321 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
322 op = BUS_DMASYNC_PREREAD;
323 else
324 op = BUS_DMASYNC_PREWRITE;
325
326 bus_dmamap_sync(amd->buffer_dmat, srb->dmamap, op);
327
328 }
329 srb->SGcount = nseg;
330 srb->SGIndex = 0;
331 srb->AdaptStatus = 0;
332 srb->TargetStatus = 0;
333 srb->MsgCnt = 0;
334 srb->SRBStatus = 0;
335 srb->SRBFlag = 0;
336 srb->SRBState = 0;
337 srb->TotalXferredLen = 0;
338 srb->SGPhysAddr = 0;
339 srb->SGToBeXferLen = 0;
340 srb->EndMessage = 0;
341
342 s = splcam();
343
344 /*
345 * Last time we need to check if this CCB needs to
346 * be aborted.
347 */
348 if (ccb->ccb_h.status != CAM_REQ_INPROG) {
349 if (nseg != 0)
350 bus_dmamap_unload(amd->buffer_dmat, srb->dmamap);
351 TAILQ_INSERT_HEAD(&amd->free_srbs, srb, links);
352 xpt_done(ccb);
353 splx(s);
354 return;
355 }
356 ccb->ccb_h.status |= CAM_SIM_QUEUED;
357 #if 0
358 /* XXX Need a timeout handler */
359 ccb->ccb_h.timeout_ch =
360 timeout(amdtimeout, (caddr_t)srb,
361 (ccb->ccb_h.timeout * hz) / 1000);
362 #endif
363 TAILQ_INSERT_TAIL(&amd->waiting_srbs, srb, links);
364 amdrunwaiting(amd);
365 splx(s);
366 }
367
368 static void
369 amd_action(struct cam_sim * psim, union ccb * pccb)
370 {
371 struct amd_softc * amd;
372 u_int target_id, target_lun;
373
374 CAM_DEBUG(pccb->ccb_h.path, CAM_DEBUG_TRACE, ("amd_action\n"));
375
376 amd = (struct amd_softc *) cam_sim_softc(psim);
377 target_id = pccb->ccb_h.target_id;
378 target_lun = pccb->ccb_h.target_lun;
379
380 switch (pccb->ccb_h.func_code) {
381 case XPT_SCSI_IO:
382 {
383 struct amd_srb * pSRB;
384 struct ccb_scsiio *pcsio;
385
386 pcsio = &pccb->csio;
387
388 /*
389 * Assign an SRB and connect it with this ccb.
390 */
391 pSRB = amdgetsrb(amd);
392
393 if (!pSRB) {
394 /* Freeze SIMQ */
395 pccb->ccb_h.status = CAM_RESRC_UNAVAIL;
396 xpt_done(pccb);
397 return;
398 }
399 pSRB->pccb = pccb;
400 pccb->ccb_h.ccb_srb_ptr = pSRB;
401 pccb->ccb_h.ccb_amd_ptr = amd;
402 pSRB->ScsiCmdLen = pcsio->cdb_len;
403 bcopy(pcsio->cdb_io.cdb_bytes, pSRB->CmdBlock, pcsio->cdb_len);
404 if ((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
405 if ((pccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
406 /*
407 * We've been given a pointer
408 * to a single buffer.
409 */
410 if ((pccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
411 int s;
412 int error;
413
414 s = splsoftvm();
415 error =
416 bus_dmamap_load(amd->buffer_dmat,
417 pSRB->dmamap,
418 pcsio->data_ptr,
419 pcsio->dxfer_len,
420 amdexecutesrb,
421 pSRB, /*flags*/0);
422 if (error == EINPROGRESS) {
423 /*
424 * So as to maintain
425 * ordering, freeze the
426 * controller queue
427 * until our mapping is
428 * returned.
429 */
430 xpt_freeze_simq(amd->psim, 1);
431 pccb->ccb_h.status |=
432 CAM_RELEASE_SIMQ;
433 }
434 splx(s);
435 } else {
436 struct bus_dma_segment seg;
437
438 /* Pointer to physical buffer */
439 seg.ds_addr =
440 (bus_addr_t)pcsio->data_ptr;
441 seg.ds_len = pcsio->dxfer_len;
442 amdexecutesrb(pSRB, &seg, 1, 0);
443 }
444 } else {
445 struct bus_dma_segment *segs;
446
447 if ((pccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0
448 || (pccb->ccb_h.flags & CAM_DATA_PHYS) != 0) {
449 TAILQ_INSERT_HEAD(&amd->free_srbs,
450 pSRB, links);
451 pccb->ccb_h.status = CAM_PROVIDE_FAIL;
452 xpt_done(pccb);
453 return;
454 }
455
456 /* Just use the segments provided */
457 segs =
458 (struct bus_dma_segment *)pcsio->data_ptr;
459 amdexecutesrb(pSRB, segs, pcsio->sglist_cnt, 0);
460 }
461 } else
462 amdexecutesrb(pSRB, NULL, 0, 0);
463 break;
464 }
465 case XPT_PATH_INQ:
466 {
467 struct ccb_pathinq *cpi = &pccb->cpi;
468
469 cpi->version_num = 1;
470 cpi->hba_inquiry = PI_SDTR_ABLE | PI_TAG_ABLE;
471 cpi->target_sprt = 0;
472 cpi->hba_misc = 0;
473 cpi->hba_eng_cnt = 0;
474 cpi->max_target = 7;
475 cpi->max_lun = amd->max_lun; /* 7 or 0 */
476 cpi->initiator_id = amd->AdaptSCSIID;
477 cpi->bus_id = cam_sim_bus(psim);
478 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
479 strncpy(cpi->hba_vid, "TRM-AMD", HBA_IDLEN);
480 strncpy(cpi->dev_name, cam_sim_name(psim), DEV_IDLEN);
481 cpi->unit_number = cam_sim_unit(psim);
482 cpi->ccb_h.status = CAM_REQ_CMP;
483 xpt_done(pccb);
484 break;
485 }
486 case XPT_ABORT:
487 pccb->ccb_h.status = CAM_REQ_INVALID;
488 xpt_done(pccb);
489 break;
490 case XPT_RESET_BUS:
491 {
492
493 int i;
494
495 amd_ResetSCSIBus(amd);
496 amd->ACBFlag = 0;
497
498 for (i = 0; i < 500; i++) {
499 DELAY(1000); /* Wait until our interrupt
500 * handler sees it */
501 }
502
503 pccb->ccb_h.status = CAM_REQ_CMP;
504 xpt_done(pccb);
505 break;
506 }
507 case XPT_RESET_DEV:
508 pccb->ccb_h.status = CAM_REQ_INVALID;
509 xpt_done(pccb);
510 break;
511 case XPT_TERM_IO:
512 pccb->ccb_h.status = CAM_REQ_INVALID;
513 xpt_done(pccb);
514 case XPT_GET_TRAN_SETTINGS:
515 {
516 struct ccb_trans_settings *cts;
517 struct amd_target_info *targ_info;
518 struct amd_transinfo *tinfo;
519 int intflag;
520
521 cts = &pccb->cts;
522 intflag = splcam();
523 targ_info = &amd->tinfo[target_id];
524 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) {
525 /* current transfer settings */
526 if (targ_info->disc_tag & AMD_CUR_DISCENB) {
527 cts->flags = CCB_TRANS_DISC_ENB;
528 } else {
529 cts->flags = 0; /* no tag & disconnect */
530 }
531 if (targ_info->disc_tag & AMD_CUR_TAGENB) {
532 cts->flags |= CCB_TRANS_TAG_ENB;
533 }
534 tinfo = &targ_info->current;
535 } else {
536 /* default(user) transfer settings */
537 if (targ_info->disc_tag & AMD_USR_DISCENB) {
538 cts->flags = CCB_TRANS_DISC_ENB;
539 } else {
540 cts->flags = 0;
541 }
542 if (targ_info->disc_tag & AMD_USR_TAGENB) {
543 cts->flags |= CCB_TRANS_TAG_ENB;
544 }
545 tinfo = &targ_info->user;
546 }
547
548 cts->sync_period = tinfo->period;
549 cts->sync_offset = tinfo->offset;
550 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
551 splx(intflag);
552 cts->valid = CCB_TRANS_SYNC_RATE_VALID
553 | CCB_TRANS_SYNC_OFFSET_VALID
554 | CCB_TRANS_BUS_WIDTH_VALID
555 | CCB_TRANS_DISC_VALID
556 | CCB_TRANS_TQ_VALID;
557 pccb->ccb_h.status = CAM_REQ_CMP;
558 xpt_done(pccb);
559 break;
560 }
561 case XPT_SET_TRAN_SETTINGS:
562 {
563 struct ccb_trans_settings *cts;
564 struct amd_target_info *targ_info;
565 u_int update_type;
566 int intflag;
567 int last_entry;
568
569 cts = &pccb->cts;
570 update_type = 0;
571 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) {
572 update_type |= AMD_TRANS_GOAL;
573 } else if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) {
574 update_type |= AMD_TRANS_USER;
575 }
576 if (update_type == 0
577 || update_type == (AMD_TRANS_USER|AMD_TRANS_GOAL)) {
578 cts->ccb_h.status = CAM_REQ_INVALID;
579 xpt_done(pccb);
580 }
581
582 intflag = splcam();
583 targ_info = &amd->tinfo[target_id];
584
585 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
586 if (update_type & AMD_TRANS_GOAL) {
587 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) {
588 targ_info->disc_tag |= AMD_CUR_DISCENB;
589 } else {
590 targ_info->disc_tag &= ~AMD_CUR_DISCENB;
591 }
592 }
593 if (update_type & AMD_TRANS_USER) {
594 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) {
595 targ_info->disc_tag |= AMD_USR_DISCENB;
596 } else {
597 targ_info->disc_tag &= ~AMD_USR_DISCENB;
598 }
599 }
600 }
601 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
602 if (update_type & AMD_TRANS_GOAL) {
603 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
604 targ_info->disc_tag |= AMD_CUR_TAGENB;
605 } else {
606 targ_info->disc_tag &= ~AMD_CUR_TAGENB;
607 }
608 }
609 if (update_type & AMD_TRANS_USER) {
610 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
611 targ_info->disc_tag |= AMD_USR_TAGENB;
612 } else {
613 targ_info->disc_tag &= ~AMD_USR_TAGENB;
614 }
615 }
616 }
617
618 if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0) {
619 if (update_type & AMD_TRANS_GOAL)
620 cts->sync_offset = targ_info->goal.offset;
621 else
622 cts->sync_offset = targ_info->user.offset;
623 }
624
625 if (cts->sync_offset > AMD_MAX_SYNC_OFFSET)
626 cts->sync_offset = AMD_MAX_SYNC_OFFSET;
627
628 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0) {
629 if (update_type & AMD_TRANS_GOAL)
630 cts->sync_period = targ_info->goal.period;
631 else
632 cts->sync_period = targ_info->user.period;
633 }
634
635 last_entry = sizeof(tinfo_sync_period) - 1;
636 if ((cts->sync_period != 0)
637 && (cts->sync_period < tinfo_sync_period[0]))
638 cts->sync_period = tinfo_sync_period[0];
639 if (cts->sync_period > tinfo_sync_period[last_entry])
640 cts->sync_period = 0;
641 if (cts->sync_offset == 0)
642 cts->sync_period = 0;
643
644 if ((update_type & AMD_TRANS_USER) != 0) {
645 targ_info->user.period = cts->sync_period;
646 targ_info->user.offset = cts->sync_offset;
647 }
648 if ((update_type & AMD_TRANS_GOAL) != 0) {
649 targ_info->goal.period = cts->sync_period;
650 targ_info->goal.offset = cts->sync_offset;
651 }
652 splx(intflag);
653 pccb->ccb_h.status = CAM_REQ_CMP;
654 xpt_done(pccb);
655 break;
656 }
657 case XPT_CALC_GEOMETRY:
658 {
659 struct ccb_calc_geometry *ccg;
660 u_int32_t size_mb;
661 u_int32_t secs_per_cylinder;
662 int extended;
663
664 ccg = &pccb->ccg;
665 size_mb = ccg->volume_size/((1024L * 1024L)/ccg->block_size);
666 extended = (amd->eepromBuf[EE_MODE2] & GREATER_1G) != 0;
667
668 if (size_mb > 1024 && extended) {
669 ccg->heads = 255;
670 ccg->secs_per_track = 63;
671 } else {
672 ccg->heads = 64;
673 ccg->secs_per_track = 32;
674 }
675 secs_per_cylinder = ccg->heads * ccg->secs_per_track;
676 ccg->cylinders = ccg->volume_size / secs_per_cylinder;
677 pccb->ccb_h.status = CAM_REQ_CMP;
678 xpt_done(pccb);
679 break;
680 }
681 default:
682 pccb->ccb_h.status = CAM_REQ_INVALID;
683 xpt_done(pccb);
684 break;
685 }
686 }
687
688 static void
689 amd_poll(struct cam_sim * psim)
690 {
691 amd_intr(cam_sim_softc(psim));
692 }
693
694 static u_int8_t *
695 phystovirt(struct amd_srb * pSRB, u_int32_t xferCnt)
696 {
697 intptr_t dataPtr;
698 struct ccb_scsiio *pcsio;
699 u_int8_t i;
700 struct amd_sg * pseg;
701
702 dataPtr = 0;
703 pcsio = &pSRB->pccb->csio;
704
705 dataPtr = (intptr_t) pcsio->data_ptr;
706 pseg = pSRB->SGsegment;
707 for (i = 0; i < pSRB->SGIndex; i++) {
708 dataPtr += (int) pseg->SGXLen;
709 pseg++;
710 }
711 dataPtr += (int) xferCnt;
712 return ((u_int8_t *) dataPtr);
713 }
714
715 static void
716 ResetDevParam(struct amd_softc * amd)
717 {
718 u_int target;
719
720 for (target = 0; target <= amd->max_id; target++) {
721 if (amd->AdaptSCSIID != target) {
722 amdsetsync(amd, target, /*clockrate*/0,
723 /*period*/0, /*offset*/0, AMD_TRANS_CUR);
724 }
725 }
726 }
727
728 static void
729 amdcompletematch(struct amd_softc *amd, target_id_t target, lun_id_t lun,
730 u_int tag, struct srb_queue *queue, cam_status status)
731 {
732 struct amd_srb *srb;
733 struct amd_srb *next_srb;
734
735 for (srb = TAILQ_FIRST(queue); srb != NULL; srb = next_srb) {
736 union ccb *ccb;
737
738 next_srb = TAILQ_NEXT(srb, links);
739 if (srb->pccb->ccb_h.target_id != target
740 && target != CAM_TARGET_WILDCARD)
741 continue;
742
743 if (srb->pccb->ccb_h.target_lun != lun
744 && lun != CAM_LUN_WILDCARD)
745 continue;
746
747 if (srb->TagNumber != tag
748 && tag != AMD_TAG_WILDCARD)
749 continue;
750
751 ccb = srb->pccb;
752 TAILQ_REMOVE(queue, srb, links);
753 TAILQ_INSERT_HEAD(&amd->free_srbs, srb, links);
754 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0
755 && (status & CAM_DEV_QFRZN) != 0)
756 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
757 ccb->ccb_h.status = status;
758 xpt_done(ccb);
759 }
760
761 }
762
763 static void
764 amdsetsync(struct amd_softc *amd, u_int target, u_int clockrate,
765 u_int period, u_int offset, u_int type)
766 {
767 struct amd_target_info *tinfo;
768 u_int old_period;
769 u_int old_offset;
770
771 tinfo = &amd->tinfo[target];
772 old_period = tinfo->current.period;
773 old_offset = tinfo->current.offset;
774 if ((type & AMD_TRANS_CUR) != 0
775 && (old_period != period || old_offset != offset)) {
776 struct cam_path *path;
777
778 tinfo->current.period = period;
779 tinfo->current.offset = offset;
780 tinfo->sync_period_reg = clockrate;
781 tinfo->sync_offset_reg = offset;
782 tinfo->CtrlR3 &= ~FAST_SCSI;
783 tinfo->CtrlR4 &= ~EATER_25NS;
784 if (clockrate > 7)
785 tinfo->CtrlR4 |= EATER_25NS;
786 else
787 tinfo->CtrlR3 |= FAST_SCSI;
788
789 if ((type & AMD_TRANS_ACTIVE) == AMD_TRANS_ACTIVE) {
790 amd_write8(amd, SYNCPERIOREG, tinfo->sync_period_reg);
791 amd_write8(amd, SYNCOFFREG, tinfo->sync_offset_reg);
792 amd_write8(amd, CNTLREG3, tinfo->CtrlR3);
793 amd_write8(amd, CNTLREG4, tinfo->CtrlR4);
794 }
795 /* If possible, update the XPT's notion of our transfer rate */
796 if (xpt_create_path(&path, /*periph*/NULL,
797 cam_sim_path(amd->psim), target,
798 CAM_LUN_WILDCARD) == CAM_REQ_CMP) {
799 struct ccb_trans_settings neg;
800
801 xpt_setup_ccb(&neg.ccb_h, path, /*priority*/1);
802 neg.sync_period = period;
803 neg.sync_offset = offset;
804 neg.valid = CCB_TRANS_SYNC_RATE_VALID
805 | CCB_TRANS_SYNC_OFFSET_VALID;
806 xpt_async(AC_TRANSFER_NEG, path, &neg);
807 xpt_free_path(path);
808 }
809 }
810 if ((type & AMD_TRANS_GOAL) != 0) {
811 tinfo->goal.period = period;
812 tinfo->goal.offset = offset;
813 }
814
815 if ((type & AMD_TRANS_USER) != 0) {
816 tinfo->user.period = period;
817 tinfo->user.offset = offset;
818 }
819 }
820
821 static void
822 amdsettags(struct amd_softc *amd, u_int target, int tagenb)
823 {
824 panic("Implement me!\n");
825 }
826
827
828 #if 0
829 /*
830 **********************************************************************
831 * Function : amd_reset (struct amd_softc * amd)
832 * Purpose : perform a hard reset on the SCSI bus( and AMD chip).
833 * Inputs : cmd - command which caused the SCSI RESET
834 **********************************************************************
835 */
836 static void
837 amd_reset(struct amd_softc * amd)
838 {
839 int intflag;
840 u_int8_t bval;
841 u_int16_t i;
842
843
844 #ifdef AMD_DEBUG0
845 printf("DC390: RESET");
846 #endif
847
848 intflag = splcam();
849 bval = amd_read8(amd, CNTLREG1);
850 bval |= DIS_INT_ON_SCSI_RST;
851 amd_write8(amd, CNTLREG1, bval); /* disable interrupt */
852 amd_ResetSCSIBus(amd);
853
854 for (i = 0; i < 500; i++) {
855 DELAY(1000);
856 }
857
858 bval = amd_read8(amd, CNTLREG1);
859 bval &= ~DIS_INT_ON_SCSI_RST;
860 amd_write8(amd, CNTLREG1, bval); /* re-enable interrupt */
861
862 amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD);
863 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
864
865 ResetDevParam(amd);
866 amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
867 AMD_TAG_WILDCARD, &amd->running_srbs,
868 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
869 amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
870 AMD_TAG_WILDCARD, &amd->waiting_srbs,
871 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
872 amd->active_srb = NULL;
873 amd->ACBFlag = 0;
874 splx(intflag);
875 return;
876 }
877
878 void
879 amd_timeout(void *arg1)
880 {
881 struct amd_srb * pSRB;
882
883 pSRB = (struct amd_srb *) arg1;
884 }
885 #endif
886
887 static int
888 amdstart(struct amd_softc *amd, struct amd_srb *pSRB)
889 {
890 union ccb *pccb;
891 struct ccb_scsiio *pcsio;
892 struct amd_target_info *targ_info;
893 u_int identify_msg;
894 u_int command;
895 u_int target;
896 u_int lun;
897 int tagged;
898
899 pccb = pSRB->pccb;
900 pcsio = &pccb->csio;
901 target = pccb->ccb_h.target_id;
902 lun = pccb->ccb_h.target_lun;
903 targ_info = &amd->tinfo[target];
904
905 amd_clear_msg_state(amd);
906 amd_write8(amd, SCSIDESTIDREG, target);
907 amd_write8(amd, SYNCPERIOREG, targ_info->sync_period_reg);
908 amd_write8(amd, SYNCOFFREG, targ_info->sync_offset_reg);
909 amd_write8(amd, CNTLREG1, targ_info->CtrlR1);
910 amd_write8(amd, CNTLREG3, targ_info->CtrlR3);
911 amd_write8(amd, CNTLREG4, targ_info->CtrlR4);
912 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
913
914 identify_msg = MSG_IDENTIFYFLAG | lun;
915 if ((targ_info->disc_tag & AMD_CUR_DISCENB) != 0
916 && (pccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0
917 && (pSRB->CmdBlock[0] != REQUEST_SENSE)
918 && (pSRB->SRBFlag & AUTO_REQSENSE) == 0)
919 identify_msg |= MSG_IDENTIFY_DISCFLAG;
920
921 amd_write8(amd, SCSIFIFOREG, identify_msg);
922 tagged = 0;
923 if ((targ_info->disc_tag & AMD_CUR_TAGENB) == 0
924 || (identify_msg & MSG_IDENTIFY_DISCFLAG) == 0)
925 pccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
926 if (targ_info->current.period != targ_info->goal.period
927 || targ_info->current.offset != targ_info->goal.offset) {
928 command = SEL_W_ATN_STOP;
929 amdconstructsdtr(amd, targ_info->goal.period,
930 targ_info->goal.offset);
931 } else if ((pccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
932 command = SEL_W_ATN2;
933 pSRB->SRBState = SRB_START;
934 amd_write8(amd, SCSIFIFOREG, pcsio->tag_action);
935 amd_write8(amd, SCSIFIFOREG, pSRB->TagNumber);
936 tagged++;
937 } else {
938 command = SEL_W_ATN;
939 pSRB->SRBState = SRB_START;
940 }
941 if (command != SEL_W_ATN_STOP)
942 amdsetupcommand(amd, pSRB);
943
944 if (amd_read8(amd, SCSISTATREG) & INTERRUPT) {
945 pSRB->SRBState = SRB_READY;
946 return (1);
947 } else {
948 amd->last_phase = SCSI_ARBITRATING;
949 amd_write8(amd, SCSICMDREG, command);
950 amd->active_srb = pSRB;
951 amd->cur_target = target;
952 amd->cur_lun = lun;
953 return (0);
954 }
955 }
956
957 /*
958 * Catch an interrupt from the adapter.
959 * Process pending device interrupts.
960 */
961 static void
962 amd_intr(void *arg)
963 {
964 struct amd_softc *amd;
965 struct amd_srb *pSRB;
966 u_int internstat = 0;
967 u_int scsistat;
968 u_int intstat;
969
970 amd = (struct amd_softc *)arg;
971
972 if (amd == NULL) {
973 #ifdef AMD_DEBUG0
974 printf("amd_intr: amd NULL return......");
975 #endif
976 return;
977 }
978
979 scsistat = amd_read8(amd, SCSISTATREG);
980 if (!(scsistat & INTERRUPT)) {
981 #ifdef AMD_DEBUG0
982 printf("amd_intr: scsistat = NULL ,return......");
983 #endif
984 return;
985 }
986 #ifdef AMD_DEBUG_SCSI_PHASE
987 printf("scsistat=%2x,", scsistat);
988 #endif
989
990 internstat = amd_read8(amd, INTERNSTATREG);
991 intstat = amd_read8(amd, INTSTATREG);
992
993 #ifdef AMD_DEBUG_SCSI_PHASE
994 printf("intstat=%2x,", intstat);
995 #endif
996
997 if (intstat & DISCONNECTED) {
998 amd_Disconnect(amd);
999 return;
1000 }
1001 if (intstat & RESELECTED) {
1002 amd_Reselect(amd);
1003 return;
1004 }
1005 if (intstat & INVALID_CMD) {
1006 amd_InvalidCmd(amd);
1007 return;
1008 }
1009 if (intstat & SCSI_RESET_) {
1010 amd_ScsiRstDetect(amd);
1011 return;
1012 }
1013 if (intstat & (SUCCESSFUL_OP + SERVICE_REQUEST)) {
1014 pSRB = amd->active_srb;
1015 /*
1016 * Run our state engine. First perform
1017 * post processing for the last phase we
1018 * were in, followed by any processing
1019 * required to handle the current phase.
1020 */
1021 scsistat =
1022 amd_SCSI_phase0[amd->last_phase](amd, pSRB, scsistat);
1023 amd->last_phase = scsistat & SCSI_PHASE_MASK;
1024 (void)amd_SCSI_phase1[amd->last_phase](amd, pSRB, scsistat);
1025 }
1026 }
1027
1028 static u_int
1029 amd_DataOutPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1030 {
1031 struct amd_sg *psgl;
1032 u_int32_t ResidCnt, xferCnt;
1033
1034 if (!(pSRB->SRBState & SRB_XFERPAD)) {
1035 if (scsistat & PARITY_ERR) {
1036 pSRB->SRBStatus |= PARITY_ERROR;
1037 }
1038 if (scsistat & COUNT_2_ZERO) {
1039 while ((amd_read8(amd, DMA_Status)&DMA_XFER_DONE) == 0)
1040 ;
1041 pSRB->TotalXferredLen += pSRB->SGToBeXferLen;
1042 pSRB->SGIndex++;
1043 if (pSRB->SGIndex < pSRB->SGcount) {
1044 pSRB->pSGlist++;
1045 psgl = pSRB->pSGlist;
1046 pSRB->SGPhysAddr = psgl->SGXPtr;
1047 pSRB->SGToBeXferLen = psgl->SGXLen;
1048 } else {
1049 pSRB->SGToBeXferLen = 0;
1050 }
1051 } else {
1052 ResidCnt = amd_read8(amd, CURRENTFIFOREG) & 0x1f;
1053 ResidCnt += amd_read8(amd, CTCREG_LOW)
1054 | (amd_read8(amd, CTCREG_MID) << 8)
1055 | (amd_read8(amd, CURTXTCNTREG) << 16);
1056
1057 xferCnt = pSRB->SGToBeXferLen - ResidCnt;
1058 pSRB->SGPhysAddr += xferCnt;
1059 pSRB->TotalXferredLen += xferCnt;
1060 pSRB->SGToBeXferLen = ResidCnt;
1061 }
1062 }
1063 amd_write8(amd, DMA_Cmd, WRITE_DIRECTION | DMA_IDLE_CMD);
1064 return (scsistat);
1065 }
1066
1067 static u_int
1068 amd_DataInPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1069 {
1070 u_int8_t bval;
1071 u_int16_t i, residual;
1072 struct amd_sg *psgl;
1073 u_int32_t ResidCnt, xferCnt;
1074 u_int8_t * ptr;
1075
1076 if (!(pSRB->SRBState & SRB_XFERPAD)) {
1077 if (scsistat & PARITY_ERR) {
1078 pSRB->SRBStatus |= PARITY_ERROR;
1079 }
1080 if (scsistat & COUNT_2_ZERO) {
1081 while (1) {
1082 bval = amd_read8(amd, DMA_Status);
1083 if ((bval & DMA_XFER_DONE) != 0)
1084 break;
1085 }
1086 amd_write8(amd, DMA_Cmd, READ_DIRECTION|DMA_IDLE_CMD);
1087
1088 pSRB->TotalXferredLen += pSRB->SGToBeXferLen;
1089 pSRB->SGIndex++;
1090 if (pSRB->SGIndex < pSRB->SGcount) {
1091 pSRB->pSGlist++;
1092 psgl = pSRB->pSGlist;
1093 pSRB->SGPhysAddr = psgl->SGXPtr;
1094 pSRB->SGToBeXferLen = psgl->SGXLen;
1095 } else {
1096 pSRB->SGToBeXferLen = 0;
1097 }
1098 } else { /* phase changed */
1099 residual = 0;
1100 bval = amd_read8(amd, CURRENTFIFOREG);
1101 while (bval & 0x1f) {
1102 if ((bval & 0x1f) == 1) {
1103 for (i = 0; i < 0x100; i++) {
1104 bval = amd_read8(amd, CURRENTFIFOREG);
1105 if (!(bval & 0x1f)) {
1106 goto din_1;
1107 } else if (i == 0x0ff) {
1108 residual = 1;
1109 goto din_1;
1110 }
1111 }
1112 } else {
1113 bval = amd_read8(amd, CURRENTFIFOREG);
1114 }
1115 }
1116 din_1:
1117 amd_write8(amd, DMA_Cmd, READ_DIRECTION|DMA_BLAST_CMD);
1118 for (i = 0; i < 0x8000; i++) {
1119 if ((amd_read8(amd, DMA_Status)&BLAST_COMPLETE))
1120 break;
1121 }
1122 amd_write8(amd, DMA_Cmd, READ_DIRECTION|DMA_IDLE_CMD);
1123
1124 ResidCnt = amd_read8(amd, CTCREG_LOW)
1125 | (amd_read8(amd, CTCREG_MID) << 8)
1126 | (amd_read8(amd, CURTXTCNTREG) << 16);
1127 xferCnt = pSRB->SGToBeXferLen - ResidCnt;
1128 pSRB->SGPhysAddr += xferCnt;
1129 pSRB->TotalXferredLen += xferCnt;
1130 pSRB->SGToBeXferLen = ResidCnt;
1131 if (residual) {
1132 /* get residual byte */
1133 bval = amd_read8(amd, SCSIFIFOREG);
1134 ptr = phystovirt(pSRB, xferCnt);
1135 *ptr = bval;
1136 pSRB->SGPhysAddr++;
1137 pSRB->TotalXferredLen++;
1138 pSRB->SGToBeXferLen--;
1139 }
1140 }
1141 }
1142 return (scsistat);
1143 }
1144
1145 static u_int
1146 amd_StatusPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1147 {
1148 pSRB->TargetStatus = amd_read8(amd, SCSIFIFOREG);
1149 /* get message */
1150 pSRB->EndMessage = amd_read8(amd, SCSIFIFOREG);
1151 pSRB->SRBState = SRB_COMPLETED;
1152 amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);
1153 return (SCSI_NOP0);
1154 }
1155
1156 static u_int
1157 amd_MsgOutPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1158 {
1159 if (pSRB->SRBState & (SRB_UNEXPECT_RESEL + SRB_ABORT_SENT)) {
1160 scsistat = SCSI_NOP0;
1161 }
1162 return (scsistat);
1163 }
1164
1165 static u_int
1166 amd_MsgInPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1167 {
1168 int done;
1169
1170 amd->msgin_buf[amd->msgin_index] = amd_read8(amd, SCSIFIFOREG);
1171
1172 done = amdparsemsg(amd);
1173 if (done)
1174 amd->msgin_index = 0;
1175 else
1176 amd->msgin_index++;
1177 return (SCSI_NOP0);
1178 }
1179
1180 static int
1181 amdparsemsg(struct amd_softc *amd)
1182 {
1183 struct amd_target_info *targ_info;
1184 int reject;
1185 int done;
1186 int response;
1187
1188 done = FALSE;
1189 response = FALSE;
1190 reject = FALSE;
1191
1192 targ_info = &amd->tinfo[amd->cur_target];
1193
1194 /*
1195 * Parse as much of the message as is availible,
1196 * rejecting it if we don't support it. When
1197 * the entire message is availible and has been
1198 * handled, return TRUE indicating that we have
1199 * parsed an entire message.
1200 */
1201 switch (amd->msgin_buf[0]) {
1202 case MSG_DISCONNECT:
1203 amd->active_srb->SRBState = SRB_DISCONNECT;
1204 amd->disc_count[amd->cur_target][amd->cur_lun]++;
1205 done = TRUE;
1206 break;
1207 case MSG_SIMPLE_Q_TAG:
1208 {
1209 struct amd_srb *disc_srb;
1210
1211 if (amd->msgin_index < 1)
1212 break;
1213 disc_srb = &amd->SRB_array[amd->msgin_buf[1]];
1214 if (amd->active_srb != NULL
1215 || disc_srb->SRBState != SRB_DISCONNECT
1216 || disc_srb->pccb->ccb_h.target_id != amd->cur_target
1217 || disc_srb->pccb->ccb_h.target_lun != amd->cur_lun) {
1218 printf("amd%d: Unexpected tagged reselection "
1219 "for target %d, Issuing Abort\n", amd->unit,
1220 amd->cur_target);
1221 amd->msgout_buf[0] = MSG_ABORT;
1222 amd->msgout_len = 1;
1223 response = TRUE;
1224 break;
1225 }
1226 amd->active_srb = disc_srb;
1227 amd->disc_count[amd->cur_target][amd->cur_lun]--;
1228 done = TRUE;
1229 break;
1230 }
1231 case MSG_MESSAGE_REJECT:
1232 response = amdhandlemsgreject(amd);
1233 if (response == FALSE)
1234 amd_write8(amd, SCSICMDREG, RESET_ATN_CMD);
1235 /* FALLTHROUGH */
1236 case MSG_NOOP:
1237 done = TRUE;
1238 break;
1239 case MSG_EXTENDED:
1240 {
1241 u_int clockrate;
1242 u_int period;
1243 u_int offset;
1244 u_int saved_offset;
1245
1246 /* Wait for enough of the message to begin validation */
1247 if (amd->msgin_index < 1)
1248 break;
1249 if (amd->msgin_buf[1] != MSG_EXT_SDTR_LEN) {
1250 reject = TRUE;
1251 break;
1252 }
1253
1254 /* Wait for opcode */
1255 if (amd->msgin_index < 2)
1256 break;
1257
1258 if (amd->msgin_buf[2] != MSG_EXT_SDTR) {
1259 reject = TRUE;
1260 break;
1261 }
1262
1263 /*
1264 * Wait until we have both args before validating
1265 * and acting on this message.
1266 *
1267 * Add one to MSG_EXT_SDTR_LEN to account for
1268 * the extended message preamble.
1269 */
1270 if (amd->msgin_index < (MSG_EXT_SDTR_LEN + 1))
1271 break;
1272
1273 period = amd->msgin_buf[3];
1274 saved_offset = offset = amd->msgin_buf[4];
1275 clockrate = amdfindclockrate(amd, &period);
1276 if (offset > AMD_MAX_SYNC_OFFSET)
1277 offset = AMD_MAX_SYNC_OFFSET;
1278 if (period == 0 || offset == 0) {
1279 offset = 0;
1280 period = 0;
1281 clockrate = 0;
1282 }
1283 amdsetsync(amd, amd->cur_target, clockrate, period, offset,
1284 AMD_TRANS_ACTIVE|AMD_TRANS_GOAL);
1285
1286 /*
1287 * See if we initiated Sync Negotiation
1288 * and didn't have to fall down to async
1289 * transfers.
1290 */
1291 if (amdsentmsg(amd, MSG_EXT_SDTR, /*full*/TRUE)) {
1292 /* We started it */
1293 if (saved_offset != offset) {
1294 /* Went too low - force async */
1295 reject = TRUE;
1296 }
1297 } else {
1298 /*
1299 * Send our own SDTR in reply
1300 */
1301 if (bootverbose)
1302 printf("Sending SDTR!\n");
1303 amd->msgout_index = 0;
1304 amd->msgout_len = 0;
1305 amdconstructsdtr(amd, period, offset);
1306 amd->msgout_index = 0;
1307 response = TRUE;
1308 }
1309 done = TRUE;
1310 break;
1311 }
1312 case MSG_SAVEDATAPOINTER:
1313 case MSG_RESTOREPOINTERS:
1314 /* XXX Implement!!! */
1315 done = TRUE;
1316 break;
1317 default:
1318 reject = TRUE;
1319 break;
1320 }
1321
1322 if (reject) {
1323 amd->msgout_index = 0;
1324 amd->msgout_len = 1;
1325 amd->msgout_buf[0] = MSG_MESSAGE_REJECT;
1326 done = TRUE;
1327 response = TRUE;
1328 }
1329
1330 if (response)
1331 amd_write8(amd, SCSICMDREG, SET_ATN_CMD);
1332
1333 if (done && !response)
1334 /* Clear the outgoing message buffer */
1335 amd->msgout_len = 0;
1336
1337 /* Drop Ack */
1338 amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);
1339
1340 return (done);
1341 }
1342
1343 static u_int
1344 amdfindclockrate(struct amd_softc *amd, u_int *period)
1345 {
1346 u_int i;
1347 u_int clockrate;
1348
1349 for (i = 0; i < sizeof(tinfo_sync_period); i++) {
1350 u_int8_t *table_entry;
1351
1352 table_entry = &tinfo_sync_period[i];
1353 if (*period <= *table_entry) {
1354 /*
1355 * When responding to a target that requests
1356 * sync, the requested rate may fall between
1357 * two rates that we can output, but still be
1358 * a rate that we can receive. Because of this,
1359 * we want to respond to the target with
1360 * the same rate that it sent to us even
1361 * if the period we use to send data to it
1362 * is lower. Only lower the response period
1363 * if we must.
1364 */
1365 if (i == 0) {
1366 *period = *table_entry;
1367 }
1368 break;
1369 }
1370 }
1371
1372 if (i == sizeof(tinfo_sync_period)) {
1373 /* Too slow for us. Use asnyc transfers. */
1374 *period = 0;
1375 clockrate = 0;
1376 } else
1377 clockrate = i + 4;
1378
1379 return (clockrate);
1380 }
1381
1382 /*
1383 * See if we sent a particular extended message to the target.
1384 * If "full" is true, the target saw the full message.
1385 * If "full" is false, the target saw at least the first
1386 * byte of the message.
1387 */
1388 static int
1389 amdsentmsg(struct amd_softc *amd, u_int msgtype, int full)
1390 {
1391 int found;
1392 int index;
1393
1394 found = FALSE;
1395 index = 0;
1396
1397 while (index < amd->msgout_len) {
1398 if ((amd->msgout_buf[index] & MSG_IDENTIFYFLAG) != 0
1399 || amd->msgout_buf[index] == MSG_MESSAGE_REJECT)
1400 index++;
1401 else if (amd->msgout_buf[index] >= MSG_SIMPLE_Q_TAG
1402 && amd->msgout_buf[index] < MSG_IGN_WIDE_RESIDUE) {
1403 /* Skip tag type and tag id */
1404 index += 2;
1405 } else if (amd->msgout_buf[index] == MSG_EXTENDED) {
1406 /* Found a candidate */
1407 if (amd->msgout_buf[index+2] == msgtype) {
1408 u_int end_index;
1409
1410 end_index = index + 1
1411 + amd->msgout_buf[index + 1];
1412 if (full) {
1413 if (amd->msgout_index > end_index)
1414 found = TRUE;
1415 } else if (amd->msgout_index > index)
1416 found = TRUE;
1417 }
1418 break;
1419 } else {
1420 panic("amdsentmsg: Inconsistent msg buffer");
1421 }
1422 }
1423 return (found);
1424 }
1425
1426 static void
1427 amdconstructsdtr(struct amd_softc *amd, u_int period, u_int offset)
1428 {
1429 amd->msgout_buf[amd->msgout_index++] = MSG_EXTENDED;
1430 amd->msgout_buf[amd->msgout_index++] = MSG_EXT_SDTR_LEN;
1431 amd->msgout_buf[amd->msgout_index++] = MSG_EXT_SDTR;
1432 amd->msgout_buf[amd->msgout_index++] = period;
1433 amd->msgout_buf[amd->msgout_index++] = offset;
1434 amd->msgout_len += 5;
1435 }
1436
1437 static int
1438 amdhandlemsgreject(struct amd_softc *amd)
1439 {
1440 /*
1441 * If we had an outstanding SDTR for this
1442 * target, this is a signal that the target
1443 * is refusing negotiation. Also watch out
1444 * for rejected tag messages.
1445 */
1446 struct amd_srb *srb;
1447 struct amd_target_info *targ_info;
1448 int response = FALSE;
1449
1450 srb = amd->active_srb;
1451 targ_info = &amd->tinfo[amd->cur_target];
1452 if (amdsentmsg(amd, MSG_EXT_SDTR, /*full*/FALSE)) {
1453 /* note asynch xfers and clear flag */
1454 amdsetsync(amd, amd->cur_target, /*clockrate*/0,
1455 /*period*/0, /*offset*/0,
1456 AMD_TRANS_ACTIVE|AMD_TRANS_GOAL);
1457 printf("amd%d:%d: refuses synchronous negotiation. "
1458 "Using asynchronous transfers\n",
1459 amd->unit, amd->cur_target);
1460 } else if ((srb != NULL)
1461 && (srb->pccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
1462 struct ccb_trans_settings neg;
1463
1464 printf("amd%d:%d: refuses tagged commands. Performing "
1465 "non-tagged I/O\n", amd->unit, amd->cur_target);
1466
1467 amdsettags(amd, amd->cur_target, FALSE);
1468 neg.flags = 0;
1469 neg.valid = CCB_TRANS_TQ_VALID;
1470 xpt_setup_ccb(&neg.ccb_h, srb->pccb->ccb_h.path, /*priority*/1);
1471 xpt_async(AC_TRANSFER_NEG, srb->pccb->ccb_h.path, &neg);
1472
1473 /*
1474 * Resend the identify for this CCB as the target
1475 * may believe that the selection is invalid otherwise.
1476 */
1477 if (amd->msgout_len != 0)
1478 bcopy(&amd->msgout_buf[0], &amd->msgout_buf[1],
1479 amd->msgout_len);
1480 amd->msgout_buf[0] = MSG_IDENTIFYFLAG
1481 | srb->pccb->ccb_h.target_lun;
1482 amd->msgout_len++;
1483 if ((targ_info->disc_tag & AMD_CUR_DISCENB) != 0
1484 && (srb->pccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0)
1485 amd->msgout_buf[0] |= MSG_IDENTIFY_DISCFLAG;
1486
1487 srb->pccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
1488
1489 /*
1490 * Requeue all tagged commands for this target
1491 * currently in our posession so they can be
1492 * converted to untagged commands.
1493 */
1494 amdcompletematch(amd, amd->cur_target, amd->cur_lun,
1495 AMD_TAG_WILDCARD, &amd->waiting_srbs,
1496 CAM_DEV_QFRZN|CAM_REQUEUE_REQ);
1497 } else {
1498 /*
1499 * Otherwise, we ignore it.
1500 */
1501 printf("amd%d:%d: Message reject received -- ignored\n",
1502 amd->unit, amd->cur_target);
1503 }
1504 return (response);
1505 }
1506
1507 #if 0
1508 if (!(pSRB->SRBState & SRB_MSGIN_MULTI)) {
1509 if (bval == MSG_DISCONNECT) {
1510 pSRB->SRBState = SRB_DISCONNECT;
1511 } else if (bval == MSG_SAVEDATAPOINTER) {
1512 goto min6;
1513 } else if ((bval == MSG_EXTENDED)
1514 || ((bval >= MSG_SIMPLE_Q_TAG)
1515 && (bval <= MSG_ORDERED_Q_TAG))) {
1516 pSRB->SRBState |= SRB_MSGIN_MULTI;
1517 pSRB->MsgInBuf[0] = bval;
1518 pSRB->MsgCnt = 1;
1519 pSRB->pMsgPtr = &pSRB->MsgInBuf[1];
1520 } else if (bval == MSG_MESSAGE_REJECT) {
1521 amd_write8(amd, SCSICMDREG, RESET_ATN_CMD);
1522
1523 if (pSRB->SRBState & DO_SYNC_NEGO) {
1524 goto set_async;
1525 }
1526 } else if (bval == MSG_RESTOREPOINTERS) {
1527 goto min6;
1528 } else {
1529 goto min6;
1530 }
1531 } else { /* minx: */
1532 *pSRB->pMsgPtr = bval;
1533 pSRB->MsgCnt++;
1534 pSRB->pMsgPtr++;
1535 if ((pSRB->MsgInBuf[0] >= MSG_SIMPLE_Q_TAG)
1536 && (pSRB->MsgInBuf[0] <= MSG_ORDERED_Q_TAG)) {
1537 if (pSRB->MsgCnt == 2) {
1538 pSRB->SRBState = 0;
1539 pSRB = &amd->SRB_array[pSRB->MsgInBuf[1]];
1540 if (pSRB->SRBState & SRB_DISCONNECT) == 0) {
1541 pSRB = amd->pTmpSRB;
1542 pSRB->SRBState = SRB_UNEXPECT_RESEL;
1543 pDCB->pActiveSRB = pSRB;
1544 pSRB->MsgOutBuf[0] = MSG_ABORT_TAG;
1545 EnableMsgOut2(amd, pSRB);
1546 } else {
1547 if (pDCB->DCBFlag & ABORT_DEV_) {
1548 pSRB->SRBState = SRB_ABORT_SENT;
1549 EnableMsgOut1(amd, pSRB);
1550 }
1551 pDCB->pActiveSRB = pSRB;
1552 pSRB->SRBState = SRB_DATA_XFER;
1553 }
1554 }
1555 } else if ((pSRB->MsgInBuf[0] == MSG_EXTENDED)
1556 && (pSRB->MsgCnt == 5)) {
1557 pSRB->SRBState &= ~(SRB_MSGIN_MULTI + DO_SYNC_NEGO);
1558 if ((pSRB->MsgInBuf[1] != 3)
1559 || (pSRB->MsgInBuf[2] != 1)) { /* reject_msg: */
1560 pSRB->MsgCnt = 1;
1561 pSRB->MsgInBuf[0] = MSG_MESSAGE_REJECT;
1562 amd_write8(amd, SCSICMDREG, SET_ATN_CMD);
1563 } else if (!(pSRB->MsgInBuf[3])
1564 || !(pSRB->MsgInBuf[4])) {
1565 set_async: /* set async */
1566
1567 pDCB = pSRB->pSRBDCB;
1568 /* disable sync & sync nego */
1569 pDCB->SyncMode &= ~(SYNC_ENABLE|SYNC_NEGO_DONE);
1570 pDCB->SyncPeriod = 0;
1571 pDCB->SyncOffset = 0;
1572
1573 pDCB->tinfo.goal.period = 0;
1574 pDCB->tinfo.goal.offset = 0;
1575
1576 pDCB->tinfo.current.period = 0;
1577 pDCB->tinfo.current.offset = 0;
1578 pDCB->tinfo.current.width =
1579 MSG_EXT_WDTR_BUS_8_BIT;
1580
1581 pDCB->CtrlR3 = FAST_CLK; /* non_fast */
1582 pDCB->CtrlR4 &= 0x3f;
1583 pDCB->CtrlR4 |= EATER_25NS;
1584 goto re_prog;
1585 } else {/* set sync */
1586
1587 pDCB = pSRB->pSRBDCB;
1588 /* enable sync & sync nego */
1589 pDCB->SyncMode |= SYNC_ENABLE|SYNC_NEGO_DONE;
1590
1591 /* set sync offset */
1592 pDCB->SyncOffset &= 0x0f0;
1593 pDCB->SyncOffset |= pSRB->MsgInBuf[4];
1594
1595 /* set sync period */
1596 pDCB->MaxNegoPeriod = pSRB->MsgInBuf[3];
1597
1598 wval = (u_int16_t) pSRB->MsgInBuf[3];
1599 wval = wval << 2;
1600 wval--;
1601 wval1 = wval / 25;
1602 if ((wval1 * 25) != wval) {
1603 wval1++;
1604 }
1605 bval = FAST_CLK|FAST_SCSI;
1606 pDCB->CtrlR4 &= 0x3f;
1607 if (wval1 >= 8) {
1608 /* Fast SCSI */
1609 wval1--;
1610 bval = FAST_CLK;
1611 pDCB->CtrlR4 |= EATER_25NS;
1612 }
1613 pDCB->CtrlR3 = bval;
1614 pDCB->SyncPeriod = (u_int8_t) wval1;
1615
1616 pDCB->tinfo.goal.period =
1617 tinfo_sync_period[pDCB->SyncPeriod - 4];
1618 pDCB->tinfo.goal.offset = pDCB->SyncOffset;
1619 pDCB->tinfo.current.period =
1620 tinfo_sync_period[pDCB->SyncPeriod - 4];;
1621 pDCB->tinfo.current.offset = pDCB->SyncOffset;
1622
1623 /*
1624 * program SCSI control register
1625 */
1626 re_prog:
1627 amd_write8(amd, SYNCPERIOREG, pDCB->SyncPeriod);
1628 amd_write8(amd, SYNCOFFREG, pDCB->SyncOffset);
1629 amd_write8(amd, CNTLREG3, pDCB->CtrlR3);
1630 amd_write8(amd, CNTLREG4, pDCB->CtrlR4);
1631 }
1632 }
1633 }
1634 min6:
1635 amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);
1636 return (SCSI_NOP0);
1637 }
1638 #endif
1639
1640 static u_int
1641 amd_DataOutPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1642 {
1643 DataIO_Comm(amd, pSRB, WRITE_DIRECTION);
1644 return (scsistat);
1645 }
1646
1647 static u_int
1648 amd_DataInPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1649 {
1650 DataIO_Comm(amd, pSRB, READ_DIRECTION);
1651 return (scsistat);
1652 }
1653
1654 static void
1655 DataIO_Comm(struct amd_softc *amd, struct amd_srb *pSRB, u_int ioDir)
1656 {
1657 struct amd_sg * psgl;
1658 u_int32_t lval;
1659
1660 if (pSRB->SGIndex < pSRB->SGcount) {
1661 amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD|ioDir);/* |EN_DMA_INT */
1662
1663 if (!pSRB->SGToBeXferLen) {
1664 psgl = pSRB->pSGlist;
1665 pSRB->SGPhysAddr = psgl->SGXPtr;
1666 pSRB->SGToBeXferLen = psgl->SGXLen;
1667 }
1668 lval = pSRB->SGToBeXferLen;
1669 amd_write8(amd, CTCREG_LOW, lval);
1670 amd_write8(amd, CTCREG_MID, lval >> 8);
1671 amd_write8(amd, CURTXTCNTREG, lval >> 16);
1672
1673 amd_write32(amd, DMA_XferCnt, pSRB->SGToBeXferLen);
1674
1675 amd_write32(amd, DMA_XferAddr, pSRB->SGPhysAddr);
1676
1677 pSRB->SRBState = SRB_DATA_XFER;
1678
1679 amd_write8(amd, SCSICMDREG, DMA_COMMAND|INFO_XFER_CMD);
1680
1681 amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD|ioDir); /* |EN_DMA_INT */
1682
1683 amd_write8(amd, DMA_Cmd, DMA_START_CMD|ioDir);/* |EN_DMA_INT */
1684 } else { /* xfer pad */
1685 if (pSRB->SGcount) {
1686 pSRB->AdaptStatus = H_OVER_UNDER_RUN;
1687 pSRB->SRBStatus |= OVER_RUN;
1688 }
1689 amd_write8(amd, CTCREG_LOW, 0);
1690 amd_write8(amd, CTCREG_MID, 0);
1691 amd_write8(amd, CURTXTCNTREG, 0);
1692
1693 pSRB->SRBState |= SRB_XFERPAD;
1694 amd_write8(amd, SCSICMDREG, DMA_COMMAND|XFER_PAD_BYTE);
1695 }
1696 }
1697
1698 static u_int
1699 amd_CommandPhase1(struct amd_softc *amd, struct amd_srb *srb, u_int scsistat)
1700 {
1701 amd_write8(amd, SCSICMDREG, RESET_ATN_CMD);
1702 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1703
1704 amdsetupcommand(amd, srb);
1705
1706 srb->SRBState = SRB_COMMAND;
1707 amd_write8(amd, SCSICMDREG, INFO_XFER_CMD);
1708 return (scsistat);
1709 }
1710
1711 static u_int
1712 amd_StatusPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1713 {
1714 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1715 pSRB->SRBState = SRB_STATUS;
1716 amd_write8(amd, SCSICMDREG, INITIATOR_CMD_CMPLTE);
1717 return (scsistat);
1718 }
1719
1720 static u_int
1721 amd_MsgOutPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1722 {
1723 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1724
1725 if (amd->msgout_len == 0) {
1726 amd->msgout_buf[0] = MSG_NOOP;
1727 amd->msgout_len = 1;
1728 }
1729 amd_write8_multi(amd, SCSIFIFOREG, amd->msgout_buf, amd->msgout_len);
1730 amd_write8(amd, SCSICMDREG, INFO_XFER_CMD);
1731 return (scsistat);
1732 }
1733
1734 static u_int
1735 amd_MsgInPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1736 {
1737 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1738 amd_write8(amd, SCSICMDREG, INFO_XFER_CMD);
1739 return (scsistat);
1740 }
1741
1742 static u_int
1743 amd_NopPhase(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1744 {
1745 return (scsistat);
1746 }
1747
1748 static void
1749 amd_Disconnect(struct amd_softc * amd)
1750 {
1751 struct amd_srb *srb;
1752 int target;
1753 int lun;
1754
1755 srb = amd->active_srb;
1756 amd->active_srb = NULL;
1757 amd->last_phase = SCSI_BUS_FREE;
1758 amd_write8(amd, SCSICMDREG, EN_SEL_RESEL);
1759 target = amd->cur_target;
1760 lun = amd->cur_lun;
1761
1762 if (srb == NULL) {
1763 /* Invalid reselection */
1764 amdrunwaiting(amd);
1765 } else if (srb->SRBState & SRB_ABORT_SENT) {
1766 /* Clean up and done this srb */
1767 #if 0
1768 while (( = TAILQ_FIRST(&amd->running_srbs)) != NULL) {
1769 /* XXX What about "done'ing" these srbs??? */
1770 if (pSRB->pSRBDCB == pDCB) {
1771 TAILQ_REMOVE(&amd->running_srbs, pSRB, links);
1772 TAILQ_INSERT_HEAD(&amd->free_srbs, pSRB, links);
1773 }
1774 }
1775 amdrunwaiting(amd);
1776 #endif
1777 } else {
1778 if ((srb->SRBState & (SRB_START | SRB_MSGOUT))
1779 || !(srb->SRBState & (SRB_DISCONNECT | SRB_COMPLETED))) {
1780 srb->TargetStatus = AMD_SCSI_STAT_SEL_TIMEOUT;
1781 goto disc1;
1782 } else if (srb->SRBState & SRB_DISCONNECT) {
1783 if (!(srb->pccb->ccb_h.flags & CAM_TAG_ACTION_VALID))
1784 amd->untagged_srbs[target][lun] = srb;
1785 amdrunwaiting(amd);
1786 } else if (srb->SRBState & SRB_COMPLETED) {
1787 disc1:
1788 srb->SRBState = SRB_FREE;
1789 SRBdone(amd, srb);
1790 }
1791 }
1792 return;
1793 }
1794
1795 static void
1796 amd_Reselect(struct amd_softc *amd)
1797 {
1798 struct amd_target_info *tinfo;
1799 u_int16_t disc_count;
1800
1801 amd_clear_msg_state(amd);
1802 if (amd->active_srb != NULL) {
1803 /* Requeue the SRB for our attempted Selection */
1804 TAILQ_REMOVE(&amd->running_srbs, amd->active_srb, links);
1805 TAILQ_INSERT_HEAD(&amd->waiting_srbs, amd->active_srb, links);
1806 amd->active_srb = NULL;
1807 }
1808 /* get ID */
1809 amd->cur_target = amd_read8(amd, SCSIFIFOREG);
1810 amd->cur_target ^= amd->HostID_Bit;
1811 amd->cur_target = ffs(amd->cur_target) - 1;
1812 amd->cur_lun = amd_read8(amd, SCSIFIFOREG) & 7;
1813 tinfo = &amd->tinfo[amd->cur_target];
1814 amd->active_srb = amd->untagged_srbs[amd->cur_target][amd->cur_lun];
1815 disc_count = amd->disc_count[amd->cur_target][amd->cur_lun];
1816 if (disc_count == 0) {
1817 printf("amd%d: Unexpected reselection for target %d, "
1818 "Issuing Abort\n", amd->unit, amd->cur_target);
1819 amd->msgout_buf[0] = MSG_ABORT;
1820 amd->msgout_len = 1;
1821 amd_write8(amd, SCSICMDREG, SET_ATN_CMD);
1822 }
1823 if (amd->active_srb != NULL) {
1824 amd->disc_count[amd->cur_target][amd->cur_lun]--;
1825 amd->untagged_srbs[amd->cur_target][amd->cur_lun] = NULL;
1826 }
1827
1828 amd_write8(amd, SCSIDESTIDREG, amd->cur_target);
1829 amd_write8(amd, SYNCPERIOREG, tinfo->sync_period_reg);
1830 amd_write8(amd, SYNCOFFREG, tinfo->sync_offset_reg);
1831 amd_write8(amd, CNTLREG1, tinfo->CtrlR1);
1832 amd_write8(amd, CNTLREG3, tinfo->CtrlR3);
1833 amd_write8(amd, CNTLREG4, tinfo->CtrlR4);
1834 amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);/* drop /ACK */
1835 amd->last_phase = SCSI_NOP0;
1836 }
1837
1838 static void
1839 SRBdone(struct amd_softc *amd, struct amd_srb *pSRB)
1840 {
1841 u_int8_t bval, i, status;
1842 union ccb *pccb;
1843 struct ccb_scsiio *pcsio;
1844 int intflag;
1845 struct amd_sg *ptr2;
1846 u_int32_t swlval;
1847 u_int target_id, target_lun;
1848
1849 pccb = pSRB->pccb;
1850 pcsio = &pccb->csio;
1851 target_id = pSRB->pccb->ccb_h.target_id;
1852 target_lun = pSRB->pccb->ccb_h.target_lun;
1853
1854 CAM_DEBUG(pccb->ccb_h.path, CAM_DEBUG_TRACE,
1855 ("SRBdone - TagNumber %d\n", pSRB->TagNumber));
1856
1857 if ((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1858 bus_dmasync_op_t op;
1859
1860 if ((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1861 op = BUS_DMASYNC_POSTREAD;
1862 else
1863 op = BUS_DMASYNC_POSTWRITE;
1864 bus_dmamap_sync(amd->buffer_dmat, pSRB->dmamap, op);
1865 bus_dmamap_unload(amd->buffer_dmat, pSRB->dmamap);
1866 }
1867
1868 status = pSRB->TargetStatus;
1869 pccb->ccb_h.status = CAM_REQ_CMP;
1870 pccb->ccb_h.status = CAM_REQ_CMP;
1871 if (pSRB->SRBFlag & AUTO_REQSENSE) {
1872 pSRB->SRBFlag &= ~AUTO_REQSENSE;
1873 pSRB->AdaptStatus = 0;
1874 pSRB->TargetStatus = SCSI_STATUS_CHECK_COND;
1875
1876 if (status == SCSI_STATUS_CHECK_COND) {
1877 pccb->ccb_h.status = CAM_SEL_TIMEOUT;
1878 goto ckc_e;
1879 }
1880 *((u_int32_t *)&(pSRB->CmdBlock[0])) = pSRB->Segment0[0];
1881
1882 pcsio->sense_resid = pcsio->sense_len
1883 - pSRB->TotalXferredLen;
1884 pSRB->TotalXferredLen = pSRB->Segment1[1];
1885 if (pSRB->TotalXferredLen) {
1886 /* ???? */
1887 pcsio->resid = pcsio->dxfer_len
1888 - pSRB->TotalXferredLen;
1889 /* The resid field contains valid data */
1890 /* Flush resid bytes on complete */
1891 } else {
1892 pcsio->scsi_status = SCSI_STATUS_CHECK_COND;
1893 }
1894 pccb->ccb_h.status = CAM_AUTOSNS_VALID|CAM_SCSI_STATUS_ERROR;
1895 goto ckc_e;
1896 }
1897 if (status) {
1898 if (status == SCSI_STATUS_CHECK_COND) {
1899
1900 if ((pSRB->SGIndex < pSRB->SGcount)
1901 && (pSRB->SGcount) && (pSRB->SGToBeXferLen)) {
1902 bval = pSRB->SGcount;
1903 swlval = pSRB->SGToBeXferLen;
1904 ptr2 = pSRB->pSGlist;
1905 ptr2++;
1906 for (i = pSRB->SGIndex + 1; i < bval; i++) {
1907 swlval += ptr2->SGXLen;
1908 ptr2++;
1909 }
1910 /* ??????? */
1911 pcsio->resid = (u_int32_t) swlval;
1912
1913 #ifdef AMD_DEBUG0
1914 printf("XferredLen=%8x,NotYetXferLen=%8x,",
1915 pSRB->TotalXferredLen, swlval);
1916 #endif
1917 }
1918 if ((pcsio->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0) {
1919 #ifdef AMD_DEBUG0
1920 printf("RequestSense..................\n");
1921 #endif
1922 RequestSense(amd, pSRB);
1923 return;
1924 }
1925 pcsio->scsi_status = SCSI_STATUS_CHECK_COND;
1926 pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1927 goto ckc_e;
1928 } else if (status == SCSI_STATUS_QUEUE_FULL) {
1929 pSRB->AdaptStatus = 0;
1930 pSRB->TargetStatus = 0;
1931 pcsio->scsi_status = SCSI_STATUS_QUEUE_FULL;
1932 pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1933 goto ckc_e;
1934 } else if (status == AMD_SCSI_STAT_SEL_TIMEOUT) {
1935 pSRB->AdaptStatus = H_SEL_TIMEOUT;
1936 pSRB->TargetStatus = 0;
1937
1938 pcsio->scsi_status = AMD_SCSI_STAT_SEL_TIMEOUT;
1939 pccb->ccb_h.status = CAM_SEL_TIMEOUT;
1940 } else if (status == SCSI_STATUS_BUSY) {
1941 #ifdef AMD_DEBUG0
1942 printf("DC390: target busy at %s %d\n",
1943 __FILE__, __LINE__);
1944 #endif
1945 pcsio->scsi_status = SCSI_STATUS_BUSY;
1946 pccb->ccb_h.status = CAM_SCSI_BUSY;
1947 } else if (status == SCSI_STATUS_RESERV_CONFLICT) {
1948 #ifdef AMD_DEBUG0
1949 printf("DC390: target reserved at %s %d\n",
1950 __FILE__, __LINE__);
1951 #endif
1952 pcsio->scsi_status = SCSI_STATUS_RESERV_CONFLICT;
1953 pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; /* XXX */
1954 } else {
1955 pSRB->AdaptStatus = 0;
1956 #ifdef AMD_DEBUG0
1957 printf("DC390: driver stuffup at %s %d\n",
1958 __FILE__, __LINE__);
1959 #endif
1960 pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1961 }
1962 } else {
1963 status = pSRB->AdaptStatus;
1964 if (status & H_OVER_UNDER_RUN) {
1965 pSRB->TargetStatus = 0;
1966
1967 pccb->ccb_h.status = CAM_DATA_RUN_ERR;
1968 } else if (pSRB->SRBStatus & PARITY_ERROR) {
1969 #ifdef AMD_DEBUG0
1970 printf("DC390: driver stuffup %s %d\n",
1971 __FILE__, __LINE__);
1972 #endif
1973 /* Driver failed to perform operation */
1974 pccb->ccb_h.status = CAM_UNCOR_PARITY;
1975 } else { /* No error */
1976 pSRB->AdaptStatus = 0;
1977 pSRB->TargetStatus = 0;
1978 pcsio->resid = 0;
1979 /* there is no error, (sense is invalid) */
1980 }
1981 }
1982 ckc_e:
1983 intflag = splcam();
1984 if ((pccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1985 /* CAM request not yet complete =>device_Q frozen */
1986 xpt_freeze_devq(pccb->ccb_h.path, 1);
1987 pccb->ccb_h.status |= CAM_DEV_QFRZN;
1988 }
1989 TAILQ_REMOVE(&amd->running_srbs, pSRB, links);
1990 TAILQ_INSERT_HEAD(&amd->free_srbs, pSRB, links);
1991 amdrunwaiting(amd);
1992 splx(intflag);
1993 xpt_done(pccb);
1994
1995 }
1996
1997 static void
1998 amd_ResetSCSIBus(struct amd_softc * amd)
1999 {
2000 int intflag;
2001
2002 intflag = splcam();
2003 amd->ACBFlag |= RESET_DEV;
2004 amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD);
2005 amd_write8(amd, SCSICMDREG, RST_SCSI_BUS_CMD);
2006 splx(intflag);
2007 return;
2008 }
2009
2010 static void
2011 amd_ScsiRstDetect(struct amd_softc * amd)
2012 {
2013 int intflag;
2014 u_int32_t wlval;
2015
2016 #ifdef AMD_DEBUG0
2017 printf("amd_ScsiRstDetect \n");
2018 #endif
2019
2020 wlval = 1000;
2021 while (--wlval) { /* delay 1 sec */
2022 DELAY(1000);
2023 }
2024 intflag = splcam();
2025
2026 amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD);
2027 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
2028
2029 if (amd->ACBFlag & RESET_DEV) {
2030 amd->ACBFlag |= RESET_DONE;
2031 } else {
2032 amd->ACBFlag |= RESET_DETECT;
2033 ResetDevParam(amd);
2034 amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
2035 AMD_TAG_WILDCARD, &amd->running_srbs,
2036 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
2037 amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
2038 AMD_TAG_WILDCARD, &amd->waiting_srbs,
2039 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
2040 amd->active_srb = NULL;
2041 amd->ACBFlag = 0;
2042 amdrunwaiting(amd);
2043 }
2044 splx(intflag);
2045 return;
2046 }
2047
2048 static void
2049 RequestSense(struct amd_softc *amd, struct amd_srb *pSRB)
2050 {
2051 union ccb *pccb;
2052 struct ccb_scsiio *pcsio;
2053
2054 pccb = pSRB->pccb;
2055 pcsio = &pccb->csio;
2056
2057 pSRB->SRBFlag |= AUTO_REQSENSE;
2058 pSRB->Segment0[0] = *((u_int32_t *) & (pSRB->CmdBlock[0]));
2059 pSRB->Segment0[1] = *((u_int32_t *) & (pSRB->CmdBlock[4]));
2060 pSRB->Segment1[0] = (pSRB->ScsiCmdLen << 8) + pSRB->SGcount;
2061 pSRB->Segment1[1] = pSRB->TotalXferredLen;
2062
2063 pSRB->AdaptStatus = 0;
2064 pSRB->TargetStatus = 0;
2065
2066 pSRB->Segmentx.SGXPtr = (u_int32_t) vtophys(&pcsio->sense_data);
2067 pSRB->Segmentx.SGXLen = (u_int32_t) pcsio->sense_len;
2068
2069 pSRB->pSGlist = &pSRB->Segmentx;
2070 pSRB->SGcount = 1;
2071 pSRB->SGIndex = 0;
2072
2073 *((u_int32_t *) & (pSRB->CmdBlock[0])) = 0x00000003;
2074 pSRB->CmdBlock[1] = pSRB->pccb->ccb_h.target_lun << 5;
2075 *((u_int16_t *) & (pSRB->CmdBlock[4])) = pcsio->sense_len;
2076 pSRB->ScsiCmdLen = 6;
2077
2078 pSRB->TotalXferredLen = 0;
2079 pSRB->SGToBeXferLen = 0;
2080 if (amdstart(amd, pSRB) != 0) {
2081 TAILQ_REMOVE(&amd->running_srbs, pSRB, links);
2082 TAILQ_INSERT_HEAD(&amd->waiting_srbs, pSRB, links);
2083 }
2084 }
2085
2086 static void
2087 amd_InvalidCmd(struct amd_softc * amd)
2088 {
2089 struct amd_srb *srb;
2090
2091 srb = amd->active_srb;
2092 if (srb->SRBState & (SRB_START|SRB_MSGOUT))
2093 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
2094 }
2095
2096 void
2097 amd_linkSRB(struct amd_softc *amd)
2098 {
2099 u_int16_t count, i;
2100 struct amd_srb *psrb;
2101
2102 count = amd->SRBCount;
2103
2104 for (i = 0; i < count; i++) {
2105 psrb = (struct amd_srb *)&amd->SRB_array[i];
2106 psrb->TagNumber = i;
2107 TAILQ_INSERT_TAIL(&amd->free_srbs, psrb, links);
2108 }
2109 }
2110
2111 static void
2112 amd_EnDisableCE(struct amd_softc *amd, int mode, int *regval)
2113 {
2114 if (mode == ENABLE_CE) {
2115 *regval = 0xc0;
2116 } else {
2117 *regval = 0x80;
2118 }
2119 pci_write_config(amd->dev, *regval, 0, /*bytes*/1);
2120 if (mode == DISABLE_CE) {
2121 pci_write_config(amd->dev, *regval, 0, /*bytes*/1);
2122 }
2123 DELAY(160);
2124 }
2125
2126 static void
2127 amd_EEpromOutDI(struct amd_softc *amd, int *regval, int Carry)
2128 {
2129 u_int bval;
2130
2131 bval = 0;
2132 if (Carry) {
2133 bval = 0x40;
2134 *regval = 0x80;
2135 pci_write_config(amd->dev, *regval, bval, /*bytes*/1);
2136 }
2137 DELAY(160);
2138 bval |= 0x80;
2139 pci_write_config(amd->dev, *regval, bval, /*bytes*/1);
2140 DELAY(160);
2141 pci_write_config(amd->dev, *regval, 0, /*bytes*/1);
2142 DELAY(160);
2143 }
2144
2145 static int
2146 amd_EEpromInDO(struct amd_softc *amd)
2147 {
2148 pci_write_config(amd->dev, 0x80, 0x80, /*bytes*/1);
2149 DELAY(160);
2150 pci_write_config(amd->dev, 0x80, 0x40, /*bytes*/1);
2151 DELAY(160);
2152 if (pci_read_config(amd->dev, 0, /*bytes*/1) == 0x22)
2153 return (1);
2154 return (0);
2155 }
2156
2157 static u_int16_t
2158 EEpromGetData1(struct amd_softc *amd)
2159 {
2160 u_int i;
2161 u_int carryFlag;
2162 u_int16_t wval;
2163
2164 wval = 0;
2165 for (i = 0; i < 16; i++) {
2166 wval <<= 1;
2167 carryFlag = amd_EEpromInDO(amd);
2168 wval |= carryFlag;
2169 }
2170 return (wval);
2171 }
2172
2173 static void
2174 amd_Prepare(struct amd_softc *amd, int *regval, u_int8_t EEpromCmd)
2175 {
2176 u_int i, j;
2177 int carryFlag;
2178
2179 carryFlag = 1;
2180 j = 0x80;
2181 for (i = 0; i < 9; i++) {
2182 amd_EEpromOutDI(amd, regval, carryFlag);
2183 carryFlag = (EEpromCmd & j) ? 1 : 0;
2184 j >>= 1;
2185 }
2186 }
2187
2188 static void
2189 amd_ReadEEprom(struct amd_softc *amd)
2190 {
2191 int regval;
2192 u_int i;
2193 u_int16_t *ptr;
2194 u_int8_t cmd;
2195
2196 ptr = (u_int16_t *)&amd->eepromBuf[0];
2197 cmd = EEPROM_READ;
2198 for (i = 0; i < 0x40; i++) {
2199 amd_EnDisableCE(amd, ENABLE_CE, ®val);
2200 amd_Prepare(amd, ®val, cmd);
2201 *ptr = EEpromGetData1(amd);
2202 ptr++;
2203 cmd++;
2204 amd_EnDisableCE(amd, DISABLE_CE, ®val);
2205 }
2206 }
2207
2208 static void
2209 amd_load_defaults(struct amd_softc *amd)
2210 {
2211 int target;
2212
2213 bzero(&amd->eepromBuf, sizeof amd->eepromBuf);
2214 for (target = 0; target < MAX_SCSI_ID; target++)
2215 amd->eepromBuf[target << 2] =
2216 (TAG_QUEUING|EN_DISCONNECT|SYNC_NEGO|PARITY_CHK);
2217 amd->eepromBuf[EE_ADAPT_SCSI_ID] = 7;
2218 amd->eepromBuf[EE_MODE2] = ACTIVE_NEGATION|LUN_CHECK|GREATER_1G;
2219 amd->eepromBuf[EE_TAG_CMD_NUM] = 4;
2220 }
2221
2222 static void
2223 amd_load_eeprom_or_defaults(struct amd_softc *amd)
2224 {
2225 u_int16_t wval, *ptr;
2226 u_int8_t i;
2227
2228 amd_ReadEEprom(amd);
2229 wval = 0;
2230 ptr = (u_int16_t *) & amd->eepromBuf[0];
2231 for (i = 0; i < EE_DATA_SIZE; i += 2, ptr++)
2232 wval += *ptr;
2233
2234 if (wval != EE_CHECKSUM) {
2235 if (bootverbose)
2236 printf("amd%d: SEEPROM data unavailable. "
2237 "Using default device parameters.\n",
2238 amd->unit);
2239 amd_load_defaults(amd);
2240 }
2241 }
2242
2243 /*
2244 **********************************************************************
2245 * Function : static int amd_init (struct Scsi_Host *host)
2246 * Purpose : initialize the internal structures for a given SCSI host
2247 * Inputs : host - pointer to this host adapter's structure/
2248 **********************************************************************
2249 */
2250 static int
2251 amd_init(device_t dev)
2252 {
2253 struct amd_softc *amd = device_get_softc(dev);
2254 struct resource *iores;
2255 int i, rid;
2256 u_int bval;
2257
2258 rid = PCI_BASE_ADDR0;
2259 iores = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid, 0, ~0, 1,
2260 RF_ACTIVE);
2261 if (iores == NULL) {
2262 if (bootverbose)
2263 printf("amd_init: bus_alloc_resource failure!\n");
2264 return ENXIO;
2265 }
2266 amd->tag = rman_get_bustag(iores);
2267 amd->bsh = rman_get_bushandle(iores);
2268
2269 /* DMA tag for mapping buffers into device visible space. */
2270 if (bus_dma_tag_create(/*parent_dmat*/NULL, /*alignment*/1,
2271 /*boundary*/0,
2272 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
2273 /*highaddr*/BUS_SPACE_MAXADDR,
2274 /*filter*/NULL, /*filterarg*/NULL,
2275 /*maxsize*/MAXBSIZE, /*nsegments*/AMD_NSEG,
2276 /*maxsegsz*/AMD_MAXTRANSFER_SIZE,
2277 /*flags*/BUS_DMA_ALLOCNOW,
2278 &amd->buffer_dmat) != 0) {
2279 if (bootverbose)
2280 printf("amd_init: bus_dma_tag_create failure!\n");
2281 return ENXIO;
2282 }
2283 TAILQ_INIT(&amd->free_srbs);
2284 TAILQ_INIT(&amd->running_srbs);
2285 TAILQ_INIT(&amd->waiting_srbs);
2286 amd->last_phase = SCSI_BUS_FREE;
2287 amd->dev = dev;
2288 amd->unit = device_get_unit(dev);
2289 amd->SRBCount = MAX_SRB_CNT;
2290 amd->status = 0;
2291 amd_load_eeprom_or_defaults(amd);
2292 amd->max_id = 7;
2293 if (amd->eepromBuf[EE_MODE2] & LUN_CHECK) {
2294 amd->max_lun = 7;
2295 } else {
2296 amd->max_lun = 0;
2297 }
2298 amd->AdaptSCSIID = amd->eepromBuf[EE_ADAPT_SCSI_ID];
2299 amd->HostID_Bit = (1 << amd->AdaptSCSIID);
2300 amd->AdaptSCSILUN = 0;
2301 /* (eepromBuf[EE_TAG_CMD_NUM]) << 2; */
2302 amd->ACBFlag = 0;
2303 amd->Gmode2 = amd->eepromBuf[EE_MODE2];
2304 amd_linkSRB(amd);
2305 for (i = 0; i <= amd->max_id; i++) {
2306
2307 if (amd->AdaptSCSIID != i) {
2308 struct amd_target_info *tinfo;
2309 PEEprom prom;
2310
2311 tinfo = &amd->tinfo[i];
2312 prom = (PEEprom)&amd->eepromBuf[i << 2];
2313 if ((prom->EE_MODE1 & EN_DISCONNECT) != 0) {
2314 tinfo->disc_tag |= AMD_USR_DISCENB;
2315 if ((prom->EE_MODE1 & TAG_QUEUING) != 0)
2316 tinfo->disc_tag |= AMD_USR_TAGENB;
2317 }
2318 if ((prom->EE_MODE1 & SYNC_NEGO) != 0) {
2319 tinfo->user.period =
2320 eeprom_period[prom->EE_SPEED];
2321 tinfo->user.offset = AMD_MAX_SYNC_OFFSET;
2322 }
2323 tinfo->CtrlR1 = amd->AdaptSCSIID;
2324 if ((prom->EE_MODE1 & PARITY_CHK) != 0)
2325 tinfo->CtrlR1 |= PARITY_ERR_REPO;
2326 tinfo->CtrlR3 = FAST_CLK;
2327 tinfo->CtrlR4 = EATER_25NS;
2328 if ((amd->eepromBuf[EE_MODE2] & ACTIVE_NEGATION) != 0)
2329 tinfo->CtrlR4 |= NEGATE_REQACKDATA;
2330 }
2331 }
2332 amd_write8(amd, SCSITIMEOUTREG, 153); /* 250ms selection timeout */
2333 /* Conversion factor = 0 , 40MHz clock */
2334 amd_write8(amd, CLKFACTREG, CLK_FREQ_40MHZ);
2335 /* NOP cmd - clear command register */
2336 amd_write8(amd, SCSICMDREG, NOP_CMD);
2337 amd_write8(amd, CNTLREG2, EN_FEATURE|EN_SCSI2_CMD);
2338 amd_write8(amd, CNTLREG3, FAST_CLK);
2339 bval = EATER_25NS;
2340 if (amd->eepromBuf[EE_MODE2] & ACTIVE_NEGATION) {
2341 bval |= NEGATE_REQACKDATA;
2342 }
2343 amd_write8(amd, CNTLREG4, bval);
2344
2345 /* Disable SCSI bus reset interrupt */
2346 amd_write8(amd, CNTLREG1, DIS_INT_ON_SCSI_RST);
2347
2348 return 0;
2349 }
2350
2351 /*
2352 * attach and init a host adapter
2353 */
2354 static int
2355 amd_attach(device_t dev)
2356 {
2357 struct cam_devq *devq; /* Device Queue to use for this SIM */
2358 u_int8_t intstat;
2359 struct amd_softc *amd = device_get_softc(dev);
2360 int unit = device_get_unit(dev);
2361 int rid;
2362 void *ih;
2363 struct resource *irqres;
2364
2365 if (amd_init(dev)) {
2366 if (bootverbose)
2367 printf("amd_attach: amd_init failure!\n");
2368 return ENXIO;
2369 }
2370
2371 /* Reset Pending INT */
2372 intstat = amd_read8(amd, INTSTATREG);
2373
2374 /* After setting up the adapter, map our interrupt */
2375 rid = 0;
2376 irqres = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
2377 RF_SHAREABLE | RF_ACTIVE);
2378 if (irqres == NULL ||
2379 bus_setup_intr(dev, irqres, INTR_TYPE_CAM | INTR_ENTROPY,
2380 amd_intr, amd, &ih)) {
2381 if (bootverbose)
2382 printf("amd%d: unable to register interrupt handler!\n",
2383 unit);
2384 return ENXIO;
2385 }
2386
2387 /*
2388 * Now let the CAM generic SCSI layer find the SCSI devices on
2389 * the bus * start queue to reset to the idle loop. *
2390 * Create device queue of SIM(s) * (MAX_START_JOB - 1) :
2391 * max_sim_transactions
2392 */
2393 devq = cam_simq_alloc(MAX_START_JOB);
2394 if (devq == NULL) {
2395 if (bootverbose)
2396 printf("amd_attach: cam_simq_alloc failure!\n");
2397 return ENXIO;
2398 }
2399
2400 amd->psim = cam_sim_alloc(amd_action, amd_poll, "amd",
2401 amd, amd->unit, 1, MAX_TAGS_CMD_QUEUE,
2402 devq);
2403 if (amd->psim == NULL) {
2404 cam_simq_free(devq);
2405 if (bootverbose)
2406 printf("amd_attach: cam_sim_alloc failure!\n");
2407 return ENXIO;
2408 }
2409
2410 if (xpt_bus_register(amd->psim, 0) != CAM_SUCCESS) {
2411 cam_sim_free(amd->psim, /*free_devq*/TRUE);
2412 if (bootverbose)
2413 printf("amd_attach: xpt_bus_register failure!\n");
2414 return ENXIO;
2415 }
2416
2417 if (xpt_create_path(&amd->ppath, /* periph */ NULL,
2418 cam_sim_path(amd->psim), CAM_TARGET_WILDCARD,
2419 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2420 xpt_bus_deregister(cam_sim_path(amd->psim));
2421 cam_sim_free(amd->psim, /* free_simq */ TRUE);
2422 if (bootverbose)
2423 printf("amd_attach: xpt_create_path failure!\n");
2424 return ENXIO;
2425 }
2426
2427 return 0;
2428 }
2429
2430 static int
2431 amd_probe(device_t dev)
2432 {
2433 if (pci_get_devid(dev) == PCI_DEVICE_ID_AMD53C974) {
2434 device_set_desc(dev,
2435 "Tekram DC390(T)/AMD53c974 SCSI Host Adapter");
2436 return 0;
2437 }
2438 return ENXIO;
2439 }
2440
2441 static device_method_t amd_methods[] = {
2442 /* Device interface */
2443 DEVMETHOD(device_probe, amd_probe),
2444 DEVMETHOD(device_attach, amd_attach),
2445 { 0, 0 }
2446 };
2447
2448 static driver_t amd_driver = {
2449 "amd", amd_methods, sizeof(struct amd_softc)
2450 };
2451
2452 static devclass_t amd_devclass;
2453 DRIVER_MODULE(amd, pci, amd_driver, amd_devclass, 0, 0);
Cache object: 852cd75c2d2690b190bb94c35fda14a3
|