FreeBSD/Linux Kernel Cross Reference
sys/pci/amd.c
1 /*
2 *********************************************************************
3 * FILE NAME : amd.c
4 * BY : C.L. Huang (ching@tekram.com.tw)
5 * Erich Chen (erich@tekram.com.tw)
6 * Description: Device Driver for the amd53c974 PCI Bus Master
7 * SCSI Host adapter found on cards such as
8 * the Tekram DC-390(T).
9 * (C)Copyright 1995-1999 Tekram Technology Co., Ltd.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. The name of the author may not be used to endorse or promote products
20 * derived from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 * $FreeBSD$
34 *
35 *********************************************************************
36 */
37
38 /*
39 *********************************************************************
40 * HISTORY:
41 *
42 * REV# DATE NAME DESCRIPTION
43 * 1.00 07/02/96 CLH First release for RELEASE-2.1.0
44 * 1.01 08/20/96 CLH Update for RELEASE-2.1.5
45 * 1.02 11/06/96 CLH Fixed more than 1 LUN scanning
46 * 1.03 12/20/96 CLH Modify to support 2.2-ALPHA
47 * 1.04 12/26/97 CLH Modify to support RELEASE-2.2.5
48 * 1.05 01/01/99 ERICH CHEN Modify to support RELEASE-3.0.x (CAM)
49 *********************************************************************
50 */
51
52 /* #define AMD_DEBUG0 */
53 /* #define AMD_DEBUG_SCSI_PHASE */
54
55 #include <sys/param.h>
56
57 #include <sys/systm.h>
58 #include <sys/malloc.h>
59 #include <sys/queue.h>
60 #include <sys/buf.h>
61 #include <sys/kernel.h>
62
63 #include <vm/vm.h>
64 #include <vm/pmap.h>
65
66 #include <pci/pcivar.h>
67 #include <pci/pcireg.h>
68
69 #include <machine/bus_pio.h>
70 #include <machine/bus.h>
71 #include <machine/clock.h>
72
73 #include <cam/cam.h>
74 #include <cam/cam_ccb.h>
75 #include <cam/cam_sim.h>
76 #include <cam/cam_xpt_sim.h>
77 #include <cam/cam_debug.h>
78
79 #include <cam/scsi/scsi_all.h>
80 #include <cam/scsi/scsi_message.h>
81
82 #include <pci/amd.h>
83
84 #define PCI_DEVICE_ID_AMD53C974 0x20201022ul
85 #define PCI_BASE_ADDR0 0x10
86
87 typedef u_int (phase_handler_t)(struct amd_softc *, struct amd_srb *, u_int);
88 typedef phase_handler_t *phase_handler_func_t;
89
90 static void amd_intr(void *vamd);
91 static int amdstart(struct amd_softc *amd, struct amd_srb * pSRB);
92 static phase_handler_t amd_NopPhase;
93
94 static phase_handler_t amd_DataOutPhase0;
95 static phase_handler_t amd_DataInPhase0;
96 #define amd_CommandPhase0 amd_NopPhase
97 static phase_handler_t amd_StatusPhase0;
98 static phase_handler_t amd_MsgOutPhase0;
99 static phase_handler_t amd_MsgInPhase0;
100 static phase_handler_t amd_DataOutPhase1;
101 static phase_handler_t amd_DataInPhase1;
102 static phase_handler_t amd_CommandPhase1;
103 static phase_handler_t amd_StatusPhase1;
104 static phase_handler_t amd_MsgOutPhase1;
105 static phase_handler_t amd_MsgInPhase1;
106
107 static void amdsetupcommand(struct amd_softc *amd, struct amd_srb *srb);
108 static int amdparsemsg(struct amd_softc *amd);
109 static int amdhandlemsgreject(struct amd_softc *amd);
110 static void amdconstructsdtr(struct amd_softc *amd,
111 u_int period, u_int offset);
112 static u_int amdfindclockrate(struct amd_softc *amd, u_int *period);
113 static int amdsentmsg(struct amd_softc *amd, u_int msgtype, int full);
114
115 static void DataIO_Comm(struct amd_softc *amd, struct amd_srb *pSRB, u_int dir);
116 static void amd_Disconnect(struct amd_softc *amd);
117 static void amd_Reselect(struct amd_softc *amd);
118 static void SRBdone(struct amd_softc *amd, struct amd_srb *pSRB);
119 static void amd_ScsiRstDetect(struct amd_softc *amd);
120 static void amd_ResetSCSIBus(struct amd_softc *amd);
121 static void RequestSense(struct amd_softc *amd, struct amd_srb *pSRB);
122 static void amd_InvalidCmd(struct amd_softc *amd);
123
124 static void amd_timeout(void *arg1);
125 static void amd_reset(struct amd_softc *amd);
126 static u_int8_t * phystovirt(struct amd_srb *pSRB, u_int32_t xferCnt);
127
128 void amd_linkSRB(struct amd_softc *amd);
129 static struct amd_softc *
130 amd_init(int unit, pcici_t config_id);
131 static void amd_load_defaults(struct amd_softc *amd);
132 static void amd_load_eeprom_or_defaults(struct amd_softc *amd);
133 static int amd_EEpromInDO(struct amd_softc *amd);
134 static u_int16_t EEpromGetData1(struct amd_softc *amd);
135 static void amd_EnDisableCE(struct amd_softc *amd, int mode, int *regval);
136 static void amd_EEpromOutDI(struct amd_softc *amd, int *regval, int Carry);
137 static void amd_Prepare(struct amd_softc *amd, int *regval, u_int8_t EEpromCmd);
138 static void amd_ReadEEprom(struct amd_softc *amd);
139
140 static const char *amd_probe(pcici_t tag, pcidi_t type);
141 static void amd_attach(pcici_t tag, int unit);
142 static void amdcompletematch(struct amd_softc *amd, target_id_t target,
143 lun_id_t lun, u_int tag, struct srb_queue *queue,
144 cam_status status);
145 static void amdsetsync(struct amd_softc *amd, u_int target, u_int clockrate,
146 u_int period, u_int offset, u_int type);
147 static void amdsettags(struct amd_softc *amd, u_int target, int tagenb);
148
149 static __inline void amd_clear_msg_state(struct amd_softc *amd);
150
151 static __inline void
152 amd_clear_msg_state(struct amd_softc *amd)
153 {
154 amd->msgout_len = 0;
155 amd->msgout_index = 0;
156 amd->msgin_index = 0;
157 }
158
159 static u_long amd_count;
160
161 /* CAM SIM entry points */
162 #define ccb_srb_ptr spriv_ptr0
163 #define ccb_amd_ptr spriv_ptr1
164 static void amd_action(struct cam_sim *sim, union ccb *ccb);
165 static void amd_poll(struct cam_sim *sim);
166
167 /*
168 * PCI device module setup
169 */
170 static struct pci_device amd_device =
171 {
172 "amd",
173 amd_probe,
174 amd_attach,
175 &amd_count,
176 NULL
177 };
178
179 #ifdef COMPAT_PCI_DRIVER
180 COMPAT_PCI_DRIVER(amd, amd_device);
181 #else
182 DATA_SET(pcidevice_set, amd_device);
183 #endif
184
185 /*
186 * State engine function tables indexed by SCSI phase number
187 */
188 phase_handler_func_t amd_SCSI_phase0[] = {
189 amd_DataOutPhase0,
190 amd_DataInPhase0,
191 amd_CommandPhase0,
192 amd_StatusPhase0,
193 amd_NopPhase,
194 amd_NopPhase,
195 amd_MsgOutPhase0,
196 amd_MsgInPhase0
197 };
198
199 phase_handler_func_t amd_SCSI_phase1[] = {
200 amd_DataOutPhase1,
201 amd_DataInPhase1,
202 amd_CommandPhase1,
203 amd_StatusPhase1,
204 amd_NopPhase,
205 amd_NopPhase,
206 amd_MsgOutPhase1,
207 amd_MsgInPhase1
208 };
209
210 /*
211 * EEProm/BIOS negotiation periods
212 */
213 u_int8_t eeprom_period[] = {
214 25, /* 10.0MHz */
215 32, /* 8.0MHz */
216 38, /* 6.6MHz */
217 44, /* 5.7MHz */
218 50, /* 5.0MHz */
219 63, /* 4.0MHz */
220 83, /* 3.0MHz */
221 125 /* 2.0MHz */
222 };
223
224 /*
225 * chip clock setting to SCSI specified sync parameter table.
226 */
227 u_int8_t tinfo_sync_period[] = {
228 25, /* 10.0 */
229 32, /* 8.0 */
230 38, /* 6.6 */
231 44, /* 5.7 */
232 50, /* 5.0 */
233 57, /* 4.4 */
234 63, /* 4.0 */
235 70, /* 3.6 */
236 76, /* 3.3 */
237 83 /* 3.0 */
238 };
239
240 static __inline struct amd_srb *
241 amdgetsrb(struct amd_softc * amd)
242 {
243 int intflag;
244 struct amd_srb * pSRB;
245
246 intflag = splcam();
247 pSRB = TAILQ_FIRST(&amd->free_srbs);
248 if (pSRB)
249 TAILQ_REMOVE(&amd->free_srbs, pSRB, links);
250 splx(intflag);
251 return (pSRB);
252 }
253
254 static void
255 amdsetupcommand(struct amd_softc *amd, struct amd_srb *srb)
256 {
257 struct scsi_request_sense sense_cmd;
258 struct ccb_scsiio *csio;
259 u_int8_t *cdb;
260 u_int cdb_len;
261
262 csio = &srb->pccb->csio;
263
264 if (srb->SRBFlag & AUTO_REQSENSE) {
265 sense_cmd.opcode = REQUEST_SENSE;
266 sense_cmd.byte2 = srb->pccb->ccb_h.target_lun << 5;
267 sense_cmd.unused[0] = 0;
268 sense_cmd.unused[1] = 0;
269 sense_cmd.length = csio->sense_len;
270 sense_cmd.control = 0;
271 cdb = &sense_cmd.opcode;
272 cdb_len = sizeof(sense_cmd);
273 } else {
274 cdb = &srb->CmdBlock[0];
275 cdb_len = srb->ScsiCmdLen;
276 }
277 amd_write8_multi(amd, SCSIFIFOREG, cdb, cdb_len);
278 }
279
280 /*
281 * Attempt to start a waiting transaction. Interrupts must be disabled
282 * upon entry to this function.
283 */
284 static void
285 amdrunwaiting(struct amd_softc *amd) {
286 struct amd_srb *srb;
287
288 if (amd->last_phase != SCSI_BUS_FREE)
289 return;
290
291 srb = TAILQ_FIRST(&amd->waiting_srbs);
292 if (srb == NULL)
293 return;
294
295 if (amdstart(amd, srb) == 0) {
296 TAILQ_REMOVE(&amd->waiting_srbs, srb, links);
297 TAILQ_INSERT_HEAD(&amd->running_srbs, srb, links);
298 }
299 }
300
301 static void
302 amdexecutesrb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
303 {
304 struct amd_srb *srb;
305 union ccb *ccb;
306 struct amd_softc *amd;
307 int s;
308
309 srb = (struct amd_srb *)arg;
310 ccb = srb->pccb;
311 amd = (struct amd_softc *)ccb->ccb_h.ccb_amd_ptr;
312
313 if (error != 0) {
314 if (error != EFBIG)
315 printf("amd%d: Unexepected error 0x%x returned from "
316 "bus_dmamap_load\n", amd->unit, error);
317 if (ccb->ccb_h.status == CAM_REQ_INPROG) {
318 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
319 ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN;
320 }
321 TAILQ_INSERT_HEAD(&amd->free_srbs, srb, links);
322 xpt_done(ccb);
323 return;
324 }
325
326 if (nseg != 0) {
327 struct amd_sg *sg;
328 bus_dma_segment_t *end_seg;
329 bus_dmasync_op_t op;
330
331 end_seg = dm_segs + nseg;
332
333 /* Copy the segments into our SG list */
334 srb->pSGlist = &srb->SGsegment[0];
335 sg = srb->pSGlist;
336 while (dm_segs < end_seg) {
337 sg->SGXLen = dm_segs->ds_len;
338 sg->SGXPtr = dm_segs->ds_addr;
339 sg++;
340 dm_segs++;
341 }
342
343 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
344 op = BUS_DMASYNC_PREREAD;
345 else
346 op = BUS_DMASYNC_PREWRITE;
347
348 bus_dmamap_sync(amd->buffer_dmat, srb->dmamap, op);
349
350 }
351 srb->SGcount = nseg;
352 srb->SGIndex = 0;
353 srb->AdaptStatus = 0;
354 srb->TargetStatus = 0;
355 srb->MsgCnt = 0;
356 srb->SRBStatus = 0;
357 srb->SRBFlag = 0;
358 srb->SRBState = 0;
359 srb->TotalXferredLen = 0;
360 srb->SGPhysAddr = 0;
361 srb->SGToBeXferLen = 0;
362 srb->EndMessage = 0;
363
364 s = splcam();
365
366 /*
367 * Last time we need to check if this CCB needs to
368 * be aborted.
369 */
370 if (ccb->ccb_h.status != CAM_REQ_INPROG) {
371 if (nseg != 0)
372 bus_dmamap_unload(amd->buffer_dmat, srb->dmamap);
373 TAILQ_INSERT_HEAD(&amd->free_srbs, srb, links);
374 xpt_done(ccb);
375 splx(s);
376 return;
377 }
378 ccb->ccb_h.status |= CAM_SIM_QUEUED;
379 #if 0
380 /* XXX Need a timeout handler */
381 ccb->ccb_h.timeout_ch =
382 timeout(amdtimeout, (caddr_t)srb,
383 (ccb->ccb_h.timeout * hz) / 1000);
384 #endif
385 TAILQ_INSERT_TAIL(&amd->waiting_srbs, srb, links);
386 amdrunwaiting(amd);
387 splx(s);
388 }
389
390 static void
391 amd_action(struct cam_sim * psim, union ccb * pccb)
392 {
393 struct amd_softc * amd;
394 u_int target_id, target_lun;
395
396 CAM_DEBUG(pccb->ccb_h.path, CAM_DEBUG_TRACE, ("amd_action\n"));
397
398 amd = (struct amd_softc *) cam_sim_softc(psim);
399 target_id = pccb->ccb_h.target_id;
400 target_lun = pccb->ccb_h.target_lun;
401
402 switch (pccb->ccb_h.func_code) {
403 case XPT_SCSI_IO:
404 {
405 struct amd_srb * pSRB;
406 struct ccb_scsiio *pcsio;
407
408 pcsio = &pccb->csio;
409
410 /*
411 * Assign an SRB and connect it with this ccb.
412 */
413 pSRB = amdgetsrb(amd);
414
415 if (!pSRB) {
416 /* Freeze SIMQ */
417 pccb->ccb_h.status = CAM_RESRC_UNAVAIL;
418 xpt_done(pccb);
419 return;
420 }
421 pSRB->pccb = pccb;
422 pccb->ccb_h.ccb_srb_ptr = pSRB;
423 pccb->ccb_h.ccb_amd_ptr = amd;
424 pSRB->ScsiCmdLen = pcsio->cdb_len;
425 bcopy(pcsio->cdb_io.cdb_bytes, pSRB->CmdBlock, pcsio->cdb_len);
426 if ((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
427 if ((pccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
428 /*
429 * We've been given a pointer
430 * to a single buffer.
431 */
432 if ((pccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
433 int s;
434 int error;
435
436 s = splsoftvm();
437 error =
438 bus_dmamap_load(amd->buffer_dmat,
439 pSRB->dmamap,
440 pcsio->data_ptr,
441 pcsio->dxfer_len,
442 amdexecutesrb,
443 pSRB, /*flags*/0);
444 if (error == EINPROGRESS) {
445 /*
446 * So as to maintain
447 * ordering, freeze the
448 * controller queue
449 * until our mapping is
450 * returned.
451 */
452 xpt_freeze_simq(amd->psim, 1);
453 pccb->ccb_h.status |=
454 CAM_RELEASE_SIMQ;
455 }
456 splx(s);
457 } else {
458 struct bus_dma_segment seg;
459
460 /* Pointer to physical buffer */
461 seg.ds_addr =
462 (bus_addr_t)pcsio->data_ptr;
463 seg.ds_len = pcsio->dxfer_len;
464 amdexecutesrb(pSRB, &seg, 1, 0);
465 }
466 } else {
467 struct bus_dma_segment *segs;
468
469 if ((pccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0
470 || (pccb->ccb_h.flags & CAM_DATA_PHYS) != 0) {
471 TAILQ_INSERT_HEAD(&amd->free_srbs,
472 pSRB, links);
473 pccb->ccb_h.status = CAM_PROVIDE_FAIL;
474 xpt_done(pccb);
475 return;
476 }
477
478 /* Just use the segments provided */
479 segs =
480 (struct bus_dma_segment *)pcsio->data_ptr;
481 amdexecutesrb(pSRB, segs, pcsio->sglist_cnt, 0);
482 }
483 } else
484 amdexecutesrb(pSRB, NULL, 0, 0);
485 break;
486 }
487 case XPT_PATH_INQ:
488 {
489 struct ccb_pathinq *cpi = &pccb->cpi;
490
491 cpi->version_num = 1;
492 cpi->hba_inquiry = PI_SDTR_ABLE | PI_TAG_ABLE;
493 cpi->target_sprt = 0;
494 cpi->hba_misc = 0;
495 cpi->hba_eng_cnt = 0;
496 cpi->max_target = 7;
497 cpi->max_lun = amd->max_lun; /* 7 or 0 */
498 cpi->initiator_id = amd->AdaptSCSIID;
499 cpi->bus_id = cam_sim_bus(psim);
500 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
501 strncpy(cpi->hba_vid, "TRM-AMD", HBA_IDLEN);
502 strncpy(cpi->dev_name, cam_sim_name(psim), DEV_IDLEN);
503 cpi->unit_number = cam_sim_unit(psim);
504 cpi->ccb_h.status = CAM_REQ_CMP;
505 xpt_done(pccb);
506 break;
507 }
508 case XPT_ABORT:
509 pccb->ccb_h.status = CAM_REQ_INVALID;
510 xpt_done(pccb);
511 break;
512 case XPT_RESET_BUS:
513 {
514
515 int i;
516
517 amd_ResetSCSIBus(amd);
518 amd->ACBFlag = 0;
519
520 for (i = 0; i < 500; i++) {
521 DELAY(1000); /* Wait until our interrupt
522 * handler sees it */
523 }
524
525 pccb->ccb_h.status = CAM_REQ_CMP;
526 xpt_done(pccb);
527 break;
528 }
529 case XPT_RESET_DEV:
530 pccb->ccb_h.status = CAM_REQ_INVALID;
531 xpt_done(pccb);
532 break;
533 case XPT_TERM_IO:
534 pccb->ccb_h.status = CAM_REQ_INVALID;
535 xpt_done(pccb);
536 case XPT_GET_TRAN_SETTINGS:
537 {
538 struct ccb_trans_settings *cts;
539 struct amd_target_info *targ_info;
540 struct amd_transinfo *tinfo;
541 int intflag;
542
543 cts = &pccb->cts;
544 intflag = splcam();
545 targ_info = &amd->tinfo[target_id];
546 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) {
547 /* current transfer settings */
548 if (targ_info->disc_tag & AMD_CUR_DISCENB) {
549 cts->flags = CCB_TRANS_DISC_ENB;
550 } else {
551 cts->flags = 0; /* no tag & disconnect */
552 }
553 if (targ_info->disc_tag & AMD_CUR_TAGENB) {
554 cts->flags |= CCB_TRANS_TAG_ENB;
555 }
556 tinfo = &targ_info->current;
557 } else {
558 /* default(user) transfer settings */
559 if (targ_info->disc_tag & AMD_USR_DISCENB) {
560 cts->flags = CCB_TRANS_DISC_ENB;
561 } else {
562 cts->flags = 0;
563 }
564 if (targ_info->disc_tag & AMD_USR_TAGENB) {
565 cts->flags |= CCB_TRANS_TAG_ENB;
566 }
567 tinfo = &targ_info->user;
568 }
569
570 cts->sync_period = tinfo->period;
571 cts->sync_offset = tinfo->offset;
572 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
573 splx(intflag);
574 cts->valid = CCB_TRANS_SYNC_RATE_VALID
575 | CCB_TRANS_SYNC_OFFSET_VALID
576 | CCB_TRANS_BUS_WIDTH_VALID
577 | CCB_TRANS_DISC_VALID
578 | CCB_TRANS_TQ_VALID;
579 pccb->ccb_h.status = CAM_REQ_CMP;
580 xpt_done(pccb);
581 break;
582 }
583 case XPT_SET_TRAN_SETTINGS:
584 {
585 struct ccb_trans_settings *cts;
586 struct amd_target_info *targ_info;
587 u_int update_type;
588 int intflag;
589 int last_entry;
590
591 cts = &pccb->cts;
592 update_type = 0;
593 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) {
594 update_type |= AMD_TRANS_GOAL;
595 } else if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) {
596 update_type |= AMD_TRANS_USER;
597 }
598 if (update_type == 0
599 || update_type == (AMD_TRANS_USER|AMD_TRANS_GOAL)) {
600 cts->ccb_h.status = CAM_REQ_INVALID;
601 xpt_done(pccb);
602 }
603
604 intflag = splcam();
605 targ_info = &amd->tinfo[target_id];
606
607 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
608 if (update_type & AMD_TRANS_GOAL) {
609 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) {
610 targ_info->disc_tag |= AMD_CUR_DISCENB;
611 } else {
612 targ_info->disc_tag &= ~AMD_CUR_DISCENB;
613 }
614 }
615 if (update_type & AMD_TRANS_USER) {
616 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) {
617 targ_info->disc_tag |= AMD_USR_DISCENB;
618 } else {
619 targ_info->disc_tag &= ~AMD_USR_DISCENB;
620 }
621 }
622 }
623 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
624 if (update_type & AMD_TRANS_GOAL) {
625 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
626 targ_info->disc_tag |= AMD_CUR_TAGENB;
627 } else {
628 targ_info->disc_tag &= ~AMD_CUR_TAGENB;
629 }
630 }
631 if (update_type & AMD_TRANS_USER) {
632 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
633 targ_info->disc_tag |= AMD_USR_TAGENB;
634 } else {
635 targ_info->disc_tag &= ~AMD_USR_TAGENB;
636 }
637 }
638 }
639
640 if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0) {
641 if (update_type & AMD_TRANS_GOAL)
642 cts->sync_offset = targ_info->goal.offset;
643 else
644 cts->sync_offset = targ_info->user.offset;
645 }
646
647 if (cts->sync_offset > AMD_MAX_SYNC_OFFSET)
648 cts->sync_offset = AMD_MAX_SYNC_OFFSET;
649
650 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0) {
651 if (update_type & AMD_TRANS_GOAL)
652 cts->sync_period = targ_info->goal.period;
653 else
654 cts->sync_period = targ_info->user.period;
655 }
656
657 last_entry = sizeof(tinfo_sync_period) - 1;
658 if ((cts->sync_period != 0)
659 && (cts->sync_period < tinfo_sync_period[0]))
660 cts->sync_period = tinfo_sync_period[0];
661 if (cts->sync_period > tinfo_sync_period[last_entry])
662 cts->sync_period = 0;
663 if (cts->sync_offset == 0)
664 cts->sync_period = 0;
665
666 if ((update_type & AMD_TRANS_USER) != 0) {
667 targ_info->user.period = cts->sync_period;
668 targ_info->user.offset = cts->sync_offset;
669 }
670 if ((update_type & AMD_TRANS_GOAL) != 0) {
671 targ_info->goal.period = cts->sync_period;
672 targ_info->goal.offset = cts->sync_offset;
673 }
674 splx(intflag);
675 pccb->ccb_h.status = CAM_REQ_CMP;
676 xpt_done(pccb);
677 break;
678 }
679 case XPT_CALC_GEOMETRY:
680 {
681 struct ccb_calc_geometry *ccg;
682 u_int32_t size_mb;
683 u_int32_t secs_per_cylinder;
684 int extended;
685
686 ccg = &pccb->ccg;
687 size_mb = ccg->volume_size/((1024L * 1024L)/ccg->block_size);
688 extended = (amd->eepromBuf[EE_MODE2] & GREATER_1G) != 0;
689
690 if (size_mb > 1024 && extended) {
691 ccg->heads = 255;
692 ccg->secs_per_track = 63;
693 } else {
694 ccg->heads = 64;
695 ccg->secs_per_track = 32;
696 }
697 secs_per_cylinder = ccg->heads * ccg->secs_per_track;
698 ccg->cylinders = ccg->volume_size / secs_per_cylinder;
699 pccb->ccb_h.status = CAM_REQ_CMP;
700 xpt_done(pccb);
701 break;
702 }
703 default:
704 pccb->ccb_h.status = CAM_REQ_INVALID;
705 xpt_done(pccb);
706 break;
707 }
708 }
709
710 static void
711 amd_poll(struct cam_sim * psim)
712 {
713 amd_intr(cam_sim_softc(psim));
714 }
715
716 static u_int8_t *
717 phystovirt(struct amd_srb * pSRB, u_int32_t xferCnt)
718 {
719 int dataPtr;
720 struct ccb_scsiio *pcsio;
721 u_int8_t i;
722 struct amd_sg * pseg;
723
724 dataPtr = 0;
725 pcsio = &pSRB->pccb->csio;
726
727 dataPtr = (int) pcsio->data_ptr;
728 pseg = pSRB->SGsegment;
729 for (i = 0; i < pSRB->SGIndex; i++) {
730 dataPtr += (int) pseg->SGXLen;
731 pseg++;
732 }
733 dataPtr += (int) xferCnt;
734 return ((u_int8_t *) dataPtr);
735 }
736
737 static void
738 ResetDevParam(struct amd_softc * amd)
739 {
740 u_int target;
741
742 for (target = 0; target <= amd->max_id; target++) {
743 if (amd->AdaptSCSIID != target) {
744 amdsetsync(amd, target, /*clockrate*/0,
745 /*period*/0, /*offset*/0, AMD_TRANS_CUR);
746 }
747 }
748 }
749
750 static void
751 amdcompletematch(struct amd_softc *amd, target_id_t target, lun_id_t lun,
752 u_int tag, struct srb_queue *queue, cam_status status)
753 {
754 struct amd_srb *srb;
755 struct amd_srb *next_srb;
756
757 for (srb = TAILQ_FIRST(queue); srb != NULL; srb = next_srb) {
758 union ccb *ccb;
759
760 next_srb = TAILQ_NEXT(srb, links);
761 if (srb->pccb->ccb_h.target_id != target
762 && target != CAM_TARGET_WILDCARD)
763 continue;
764
765 if (srb->pccb->ccb_h.target_lun != lun
766 && lun != CAM_LUN_WILDCARD)
767 continue;
768
769 if (srb->TagNumber != tag
770 && tag != AMD_TAG_WILDCARD)
771 continue;
772
773 ccb = srb->pccb;
774 TAILQ_REMOVE(queue, srb, links);
775 TAILQ_INSERT_HEAD(&amd->free_srbs, srb, links);
776 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0
777 && (status & CAM_DEV_QFRZN) != 0)
778 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
779 ccb->ccb_h.status = status;
780 xpt_done(ccb);
781 }
782
783 }
784
785 static void
786 amdsetsync(struct amd_softc *amd, u_int target, u_int clockrate,
787 u_int period, u_int offset, u_int type)
788 {
789 struct amd_target_info *tinfo;
790 u_int old_period;
791 u_int old_offset;
792
793 tinfo = &amd->tinfo[target];
794 old_period = tinfo->current.period;
795 old_offset = tinfo->current.offset;
796 if ((type & AMD_TRANS_CUR) != 0
797 && (old_period != period || old_offset != offset)) {
798 struct cam_path *path;
799
800 tinfo->current.period = period;
801 tinfo->current.offset = offset;
802 tinfo->sync_period_reg = clockrate;
803 tinfo->sync_offset_reg = offset;
804 tinfo->CtrlR3 &= ~FAST_SCSI;
805 tinfo->CtrlR4 &= ~EATER_25NS;
806 if (clockrate > 7)
807 tinfo->CtrlR4 |= EATER_25NS;
808 else
809 tinfo->CtrlR3 |= FAST_SCSI;
810
811 if ((type & AMD_TRANS_ACTIVE) == AMD_TRANS_ACTIVE) {
812 amd_write8(amd, SYNCPERIOREG, tinfo->sync_period_reg);
813 amd_write8(amd, SYNCOFFREG, tinfo->sync_offset_reg);
814 amd_write8(amd, CNTLREG3, tinfo->CtrlR3);
815 amd_write8(amd, CNTLREG4, tinfo->CtrlR4);
816 }
817 /* If possible, update the XPT's notion of our transfer rate */
818 if (xpt_create_path(&path, /*periph*/NULL,
819 cam_sim_path(amd->psim), target,
820 CAM_LUN_WILDCARD) == CAM_REQ_CMP) {
821 struct ccb_trans_settings neg;
822
823 xpt_setup_ccb(&neg.ccb_h, path, /*priority*/1);
824 neg.sync_period = period;
825 neg.sync_offset = offset;
826 neg.valid = CCB_TRANS_SYNC_RATE_VALID
827 | CCB_TRANS_SYNC_OFFSET_VALID;
828 xpt_async(AC_TRANSFER_NEG, path, &neg);
829 xpt_free_path(path);
830 }
831 }
832 if ((type & AMD_TRANS_GOAL) != 0) {
833 tinfo->goal.period = period;
834 tinfo->goal.offset = offset;
835 }
836
837 if ((type & AMD_TRANS_USER) != 0) {
838 tinfo->user.period = period;
839 tinfo->user.offset = offset;
840 }
841 }
842
843 static void
844 amdsettags(struct amd_softc *amd, u_int target, int tagenb)
845 {
846 panic("Implement me!\n");
847 }
848
849
850 /*
851 **********************************************************************
852 * Function : amd_reset (struct amd_softc * amd)
853 * Purpose : perform a hard reset on the SCSI bus( and AMD chip).
854 * Inputs : cmd - command which caused the SCSI RESET
855 **********************************************************************
856 */
857 static void
858 amd_reset(struct amd_softc * amd)
859 {
860 int intflag;
861 u_int8_t bval;
862 u_int16_t i;
863
864
865 #ifdef AMD_DEBUG0
866 printf("DC390: RESET");
867 #endif
868
869 intflag = splcam();
870 bval = amd_read8(amd, CNTLREG1);
871 bval |= DIS_INT_ON_SCSI_RST;
872 amd_write8(amd, CNTLREG1, bval); /* disable interrupt */
873 amd_ResetSCSIBus(amd);
874
875 for (i = 0; i < 500; i++) {
876 DELAY(1000);
877 }
878
879 bval = amd_read8(amd, CNTLREG1);
880 bval &= ~DIS_INT_ON_SCSI_RST;
881 amd_write8(amd, CNTLREG1, bval); /* re-enable interrupt */
882
883 amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD);
884 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
885
886 ResetDevParam(amd);
887 amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
888 AMD_TAG_WILDCARD, &amd->running_srbs,
889 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
890 amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
891 AMD_TAG_WILDCARD, &amd->waiting_srbs,
892 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
893 amd->active_srb = NULL;
894 amd->ACBFlag = 0;
895 splx(intflag);
896 return;
897 }
898
899 void
900 amd_timeout(void *arg1)
901 {
902 struct amd_srb * pSRB;
903
904 pSRB = (struct amd_srb *) arg1;
905 }
906
907 static int
908 amdstart(struct amd_softc *amd, struct amd_srb *pSRB)
909 {
910 union ccb *pccb;
911 struct ccb_scsiio *pcsio;
912 struct amd_target_info *targ_info;
913 u_int identify_msg;
914 u_int command;
915 u_int target;
916 u_int lun;
917 int tagged;
918
919 pccb = pSRB->pccb;
920 pcsio = &pccb->csio;
921 target = pccb->ccb_h.target_id;
922 lun = pccb->ccb_h.target_lun;
923 targ_info = &amd->tinfo[target];
924
925 amd_clear_msg_state(amd);
926 amd_write8(amd, SCSIDESTIDREG, target);
927 amd_write8(amd, SYNCPERIOREG, targ_info->sync_period_reg);
928 amd_write8(amd, SYNCOFFREG, targ_info->sync_offset_reg);
929 amd_write8(amd, CNTLREG1, targ_info->CtrlR1);
930 amd_write8(amd, CNTLREG3, targ_info->CtrlR3);
931 amd_write8(amd, CNTLREG4, targ_info->CtrlR4);
932 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
933
934 identify_msg = MSG_IDENTIFYFLAG | lun;
935 if ((targ_info->disc_tag & AMD_CUR_DISCENB) != 0
936 && (pccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0
937 && (pSRB->CmdBlock[0] != REQUEST_SENSE)
938 && (pSRB->SRBFlag & AUTO_REQSENSE) == 0)
939 identify_msg |= MSG_IDENTIFY_DISCFLAG;
940
941 amd_write8(amd, SCSIFIFOREG, identify_msg);
942 tagged = 0;
943 if ((targ_info->disc_tag & AMD_CUR_TAGENB) == 0
944 || (identify_msg & MSG_IDENTIFY_DISCFLAG) == 0)
945 pccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
946 if (targ_info->current.period != targ_info->goal.period
947 || targ_info->current.offset != targ_info->goal.offset) {
948 command = SEL_W_ATN_STOP;
949 amdconstructsdtr(amd, targ_info->goal.period,
950 targ_info->goal.offset);
951 } else if ((pccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
952 command = SEL_W_ATN2;
953 pSRB->SRBState = SRB_START;
954 amd_write8(amd, SCSIFIFOREG, pcsio->tag_action);
955 amd_write8(amd, SCSIFIFOREG, pSRB->TagNumber);
956 tagged++;
957 } else {
958 command = SEL_W_ATN;
959 pSRB->SRBState = SRB_START;
960 }
961 if (command != SEL_W_ATN_STOP)
962 amdsetupcommand(amd, pSRB);
963
964 if (amd_read8(amd, SCSISTATREG) & INTERRUPT) {
965 pSRB->SRBState = SRB_READY;
966 return (1);
967 } else {
968 amd->last_phase = SCSI_ARBITRATING;
969 amd_write8(amd, SCSICMDREG, command);
970 amd->active_srb = pSRB;
971 amd->cur_target = target;
972 amd->cur_lun = lun;
973 return (0);
974 }
975 }
976
977 /*
978 * Catch an interrupt from the adapter.
979 * Process pending device interrupts.
980 */
981 static void
982 amd_intr(void *arg)
983 {
984 struct amd_softc *amd;
985 struct amd_srb *pSRB;
986 u_int internstat = 0;
987 u_int scsistat;
988 u_int intstat;
989
990 amd = (struct amd_softc *)arg;
991
992 if (amd == NULL) {
993 #ifdef AMD_DEBUG0
994 printf("amd_intr: amd NULL return......");
995 #endif
996 return;
997 }
998
999 scsistat = amd_read8(amd, SCSISTATREG);
1000 if (!(scsistat & INTERRUPT)) {
1001 #ifdef AMD_DEBUG0
1002 printf("amd_intr: scsistat = NULL ,return......");
1003 #endif
1004 return;
1005 }
1006 #ifdef AMD_DEBUG_SCSI_PHASE
1007 printf("scsistat=%2x,", scsistat);
1008 #endif
1009
1010 internstat = amd_read8(amd, INTERNSTATREG);
1011 intstat = amd_read8(amd, INTSTATREG);
1012
1013 #ifdef AMD_DEBUG_SCSI_PHASE
1014 printf("intstat=%2x,", intstat);
1015 #endif
1016
1017 if (intstat & DISCONNECTED) {
1018 amd_Disconnect(amd);
1019 return;
1020 }
1021 if (intstat & RESELECTED) {
1022 amd_Reselect(amd);
1023 return;
1024 }
1025 if (intstat & INVALID_CMD) {
1026 amd_InvalidCmd(amd);
1027 return;
1028 }
1029 if (intstat & SCSI_RESET_) {
1030 amd_ScsiRstDetect(amd);
1031 return;
1032 }
1033 if (intstat & (SUCCESSFUL_OP + SERVICE_REQUEST)) {
1034 pSRB = amd->active_srb;
1035 /*
1036 * Run our state engine. First perform
1037 * post processing for the last phase we
1038 * were in, followed by any processing
1039 * required to handle the current phase.
1040 */
1041 scsistat =
1042 amd_SCSI_phase0[amd->last_phase](amd, pSRB, scsistat);
1043 amd->last_phase = scsistat & SCSI_PHASE_MASK;
1044 (void)amd_SCSI_phase1[amd->last_phase](amd, pSRB, scsistat);
1045 }
1046 }
1047
1048 static u_int
1049 amd_DataOutPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1050 {
1051 struct amd_sg *psgl;
1052 u_int32_t ResidCnt, xferCnt;
1053
1054 if (!(pSRB->SRBState & SRB_XFERPAD)) {
1055 if (scsistat & PARITY_ERR) {
1056 pSRB->SRBStatus |= PARITY_ERROR;
1057 }
1058 if (scsistat & COUNT_2_ZERO) {
1059 while ((amd_read8(amd, DMA_Status)&DMA_XFER_DONE) == 0)
1060 ;
1061 pSRB->TotalXferredLen += pSRB->SGToBeXferLen;
1062 pSRB->SGIndex++;
1063 if (pSRB->SGIndex < pSRB->SGcount) {
1064 pSRB->pSGlist++;
1065 psgl = pSRB->pSGlist;
1066 pSRB->SGPhysAddr = psgl->SGXPtr;
1067 pSRB->SGToBeXferLen = psgl->SGXLen;
1068 } else {
1069 pSRB->SGToBeXferLen = 0;
1070 }
1071 } else {
1072 ResidCnt = amd_read8(amd, CURRENTFIFOREG) & 0x1f;
1073 ResidCnt += amd_read8(amd, CTCREG_LOW)
1074 | (amd_read8(amd, CTCREG_MID) << 8)
1075 | (amd_read8(amd, CURTXTCNTREG) << 16);
1076
1077 xferCnt = pSRB->SGToBeXferLen - ResidCnt;
1078 pSRB->SGPhysAddr += xferCnt;
1079 pSRB->TotalXferredLen += xferCnt;
1080 pSRB->SGToBeXferLen = ResidCnt;
1081 }
1082 }
1083 amd_write8(amd, DMA_Cmd, WRITE_DIRECTION | DMA_IDLE_CMD);
1084 return (scsistat);
1085 }
1086
1087 static u_int
1088 amd_DataInPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1089 {
1090 u_int8_t bval;
1091 u_int16_t i, residual;
1092 struct amd_sg *psgl;
1093 u_int32_t ResidCnt, xferCnt;
1094 u_int8_t * ptr;
1095
1096 if (!(pSRB->SRBState & SRB_XFERPAD)) {
1097 if (scsistat & PARITY_ERR) {
1098 pSRB->SRBStatus |= PARITY_ERROR;
1099 }
1100 if (scsistat & COUNT_2_ZERO) {
1101 while (1) {
1102 bval = amd_read8(amd, DMA_Status);
1103 if ((bval & DMA_XFER_DONE) != 0)
1104 break;
1105 }
1106 amd_write8(amd, DMA_Cmd, READ_DIRECTION|DMA_IDLE_CMD);
1107
1108 pSRB->TotalXferredLen += pSRB->SGToBeXferLen;
1109 pSRB->SGIndex++;
1110 if (pSRB->SGIndex < pSRB->SGcount) {
1111 pSRB->pSGlist++;
1112 psgl = pSRB->pSGlist;
1113 pSRB->SGPhysAddr = psgl->SGXPtr;
1114 pSRB->SGToBeXferLen = psgl->SGXLen;
1115 } else {
1116 pSRB->SGToBeXferLen = 0;
1117 }
1118 } else { /* phase changed */
1119 residual = 0;
1120 bval = amd_read8(amd, CURRENTFIFOREG);
1121 while (bval & 0x1f) {
1122 if ((bval & 0x1f) == 1) {
1123 for (i = 0; i < 0x100; i++) {
1124 bval = amd_read8(amd, CURRENTFIFOREG);
1125 if (!(bval & 0x1f)) {
1126 goto din_1;
1127 } else if (i == 0x0ff) {
1128 residual = 1;
1129 goto din_1;
1130 }
1131 }
1132 } else {
1133 bval = amd_read8(amd, CURRENTFIFOREG);
1134 }
1135 }
1136 din_1:
1137 amd_write8(amd, DMA_Cmd, READ_DIRECTION|DMA_BLAST_CMD);
1138 for (i = 0; i < 0x8000; i++) {
1139 if ((amd_read8(amd, DMA_Status)&BLAST_COMPLETE))
1140 break;
1141 }
1142 amd_write8(amd, DMA_Cmd, READ_DIRECTION|DMA_IDLE_CMD);
1143
1144 ResidCnt = amd_read8(amd, CTCREG_LOW)
1145 | (amd_read8(amd, CTCREG_MID) << 8)
1146 | (amd_read8(amd, CURTXTCNTREG) << 16);
1147 xferCnt = pSRB->SGToBeXferLen - ResidCnt;
1148 pSRB->SGPhysAddr += xferCnt;
1149 pSRB->TotalXferredLen += xferCnt;
1150 pSRB->SGToBeXferLen = ResidCnt;
1151 if (residual) {
1152 /* get residual byte */
1153 bval = amd_read8(amd, SCSIFIFOREG);
1154 ptr = phystovirt(pSRB, xferCnt);
1155 *ptr = bval;
1156 pSRB->SGPhysAddr++;
1157 pSRB->TotalXferredLen++;
1158 pSRB->SGToBeXferLen--;
1159 }
1160 }
1161 }
1162 return (scsistat);
1163 }
1164
1165 static u_int
1166 amd_StatusPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1167 {
1168 pSRB->TargetStatus = amd_read8(amd, SCSIFIFOREG);
1169 /* get message */
1170 pSRB->EndMessage = amd_read8(amd, SCSIFIFOREG);
1171 pSRB->SRBState = SRB_COMPLETED;
1172 amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);
1173 return (SCSI_NOP0);
1174 }
1175
1176 static u_int
1177 amd_MsgOutPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1178 {
1179 if (pSRB->SRBState & (SRB_UNEXPECT_RESEL + SRB_ABORT_SENT)) {
1180 scsistat = SCSI_NOP0;
1181 }
1182 return (scsistat);
1183 }
1184
1185 static u_int
1186 amd_MsgInPhase0(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1187 {
1188 int done;
1189
1190 amd->msgin_buf[amd->msgin_index] = amd_read8(amd, SCSIFIFOREG);
1191
1192 done = amdparsemsg(amd);
1193 if (done)
1194 amd->msgin_index = 0;
1195 else
1196 amd->msgin_index++;
1197 return (SCSI_NOP0);
1198 }
1199
1200 static int
1201 amdparsemsg(struct amd_softc *amd)
1202 {
1203 struct amd_target_info *targ_info;
1204 int reject;
1205 int done;
1206 int response;
1207
1208 done = FALSE;
1209 response = FALSE;
1210 reject = FALSE;
1211
1212 targ_info = &amd->tinfo[amd->cur_target];
1213
1214 /*
1215 * Parse as much of the message as is availible,
1216 * rejecting it if we don't support it. When
1217 * the entire message is availible and has been
1218 * handled, return TRUE indicating that we have
1219 * parsed an entire message.
1220 */
1221 switch (amd->msgin_buf[0]) {
1222 case MSG_DISCONNECT:
1223 amd->active_srb->SRBState = SRB_DISCONNECT;
1224 amd->disc_count[amd->cur_target][amd->cur_lun]++;
1225 done = TRUE;
1226 break;
1227 case MSG_SIMPLE_Q_TAG:
1228 {
1229 struct amd_srb *disc_srb;
1230
1231 if (amd->msgin_index < 1)
1232 break;
1233 disc_srb = &amd->SRB_array[amd->msgin_buf[1]];
1234 if (amd->active_srb != NULL
1235 || disc_srb->SRBState != SRB_DISCONNECT
1236 || disc_srb->pccb->ccb_h.target_id != amd->cur_target
1237 || disc_srb->pccb->ccb_h.target_lun != amd->cur_lun) {
1238 printf("amd%d: Unexpected tagged reselection "
1239 "for target %d, Issuing Abort\n", amd->unit,
1240 amd->cur_target);
1241 amd->msgout_buf[0] = MSG_ABORT;
1242 amd->msgout_len = 1;
1243 response = TRUE;
1244 break;
1245 }
1246 amd->active_srb = disc_srb;
1247 amd->disc_count[amd->cur_target][amd->cur_lun]--;
1248 done = TRUE;
1249 break;
1250 }
1251 case MSG_MESSAGE_REJECT:
1252 response = amdhandlemsgreject(amd);
1253 if (response == FALSE)
1254 amd_write8(amd, SCSICMDREG, RESET_ATN_CMD);
1255 /* FALLTHROUGH */
1256 case MSG_NOOP:
1257 done = TRUE;
1258 break;
1259 case MSG_EXTENDED:
1260 {
1261 u_int clockrate;
1262 u_int period;
1263 u_int offset;
1264 u_int saved_offset;
1265
1266 /* Wait for enough of the message to begin validation */
1267 if (amd->msgin_index < 1)
1268 break;
1269 if (amd->msgin_buf[1] != MSG_EXT_SDTR_LEN) {
1270 reject = TRUE;
1271 break;
1272 }
1273
1274 /* Wait for opcode */
1275 if (amd->msgin_index < 2)
1276 break;
1277
1278 if (amd->msgin_buf[2] != MSG_EXT_SDTR) {
1279 reject = TRUE;
1280 break;
1281 }
1282
1283 /*
1284 * Wait until we have both args before validating
1285 * and acting on this message.
1286 *
1287 * Add one to MSG_EXT_SDTR_LEN to account for
1288 * the extended message preamble.
1289 */
1290 if (amd->msgin_index < (MSG_EXT_SDTR_LEN + 1))
1291 break;
1292
1293 period = amd->msgin_buf[3];
1294 saved_offset = offset = amd->msgin_buf[4];
1295 clockrate = amdfindclockrate(amd, &period);
1296 if (offset > AMD_MAX_SYNC_OFFSET)
1297 offset = AMD_MAX_SYNC_OFFSET;
1298 if (period == 0 || offset == 0) {
1299 offset = 0;
1300 period = 0;
1301 clockrate = 0;
1302 }
1303 amdsetsync(amd, amd->cur_target, clockrate, period, offset,
1304 AMD_TRANS_ACTIVE|AMD_TRANS_GOAL);
1305
1306 /*
1307 * See if we initiated Sync Negotiation
1308 * and didn't have to fall down to async
1309 * transfers.
1310 */
1311 if (amdsentmsg(amd, MSG_EXT_SDTR, /*full*/TRUE)) {
1312 /* We started it */
1313 if (saved_offset != offset) {
1314 /* Went too low - force async */
1315 reject = TRUE;
1316 }
1317 } else {
1318 /*
1319 * Send our own SDTR in reply
1320 */
1321 if (bootverbose)
1322 printf("Sending SDTR!\n");
1323 amd->msgout_index = 0;
1324 amd->msgout_len = 0;
1325 amdconstructsdtr(amd, period, offset);
1326 amd->msgout_index = 0;
1327 response = TRUE;
1328 }
1329 done = TRUE;
1330 break;
1331 }
1332 case MSG_SAVEDATAPOINTER:
1333 case MSG_RESTOREPOINTERS:
1334 /* XXX Implement!!! */
1335 done = TRUE;
1336 break;
1337 default:
1338 reject = TRUE;
1339 break;
1340 }
1341
1342 if (reject) {
1343 amd->msgout_index = 0;
1344 amd->msgout_len = 1;
1345 amd->msgout_buf[0] = MSG_MESSAGE_REJECT;
1346 done = TRUE;
1347 response = TRUE;
1348 }
1349
1350 if (response)
1351 amd_write8(amd, SCSICMDREG, SET_ATN_CMD);
1352
1353 if (done && !response)
1354 /* Clear the outgoing message buffer */
1355 amd->msgout_len = 0;
1356
1357 /* Drop Ack */
1358 amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);
1359
1360 return (done);
1361 }
1362
1363 static u_int
1364 amdfindclockrate(struct amd_softc *amd, u_int *period)
1365 {
1366 u_int i;
1367 u_int clockrate;
1368
1369 for (i = 0; i < sizeof(tinfo_sync_period); i++) {
1370 u_int8_t *table_entry;
1371
1372 table_entry = &tinfo_sync_period[i];
1373 if (*period <= *table_entry) {
1374 /*
1375 * When responding to a target that requests
1376 * sync, the requested rate may fall between
1377 * two rates that we can output, but still be
1378 * a rate that we can receive. Because of this,
1379 * we want to respond to the target with
1380 * the same rate that it sent to us even
1381 * if the period we use to send data to it
1382 * is lower. Only lower the response period
1383 * if we must.
1384 */
1385 if (i == 0) {
1386 *period = *table_entry;
1387 }
1388 break;
1389 }
1390 }
1391
1392 if (i == sizeof(tinfo_sync_period)) {
1393 /* Too slow for us. Use asnyc transfers. */
1394 *period = 0;
1395 clockrate = 0;
1396 } else
1397 clockrate = i + 4;
1398
1399 return (clockrate);
1400 }
1401
1402 /*
1403 * See if we sent a particular extended message to the target.
1404 * If "full" is true, the target saw the full message.
1405 * If "full" is false, the target saw at least the first
1406 * byte of the message.
1407 */
1408 static int
1409 amdsentmsg(struct amd_softc *amd, u_int msgtype, int full)
1410 {
1411 int found;
1412 int index;
1413
1414 found = FALSE;
1415 index = 0;
1416
1417 while (index < amd->msgout_len) {
1418 if ((amd->msgout_buf[index] & MSG_IDENTIFYFLAG) != 0
1419 || amd->msgout_buf[index] == MSG_MESSAGE_REJECT)
1420 index++;
1421 else if (amd->msgout_buf[index] >= MSG_SIMPLE_Q_TAG
1422 && amd->msgout_buf[index] < MSG_IGN_WIDE_RESIDUE) {
1423 /* Skip tag type and tag id */
1424 index += 2;
1425 } else if (amd->msgout_buf[index] == MSG_EXTENDED) {
1426 /* Found a candidate */
1427 if (amd->msgout_buf[index+2] == msgtype) {
1428 u_int end_index;
1429
1430 end_index = index + 1
1431 + amd->msgout_buf[index + 1];
1432 if (full) {
1433 if (amd->msgout_index > end_index)
1434 found = TRUE;
1435 } else if (amd->msgout_index > index)
1436 found = TRUE;
1437 }
1438 break;
1439 } else {
1440 panic("amdsentmsg: Inconsistent msg buffer");
1441 }
1442 }
1443 return (found);
1444 }
1445
1446 static void
1447 amdconstructsdtr(struct amd_softc *amd, u_int period, u_int offset)
1448 {
1449 amd->msgout_buf[amd->msgout_index++] = MSG_EXTENDED;
1450 amd->msgout_buf[amd->msgout_index++] = MSG_EXT_SDTR_LEN;
1451 amd->msgout_buf[amd->msgout_index++] = MSG_EXT_SDTR;
1452 amd->msgout_buf[amd->msgout_index++] = period;
1453 amd->msgout_buf[amd->msgout_index++] = offset;
1454 amd->msgout_len += 5;
1455 }
1456
1457 static int
1458 amdhandlemsgreject(struct amd_softc *amd)
1459 {
1460 /*
1461 * If we had an outstanding SDTR for this
1462 * target, this is a signal that the target
1463 * is refusing negotiation. Also watch out
1464 * for rejected tag messages.
1465 */
1466 struct amd_srb *srb;
1467 struct amd_target_info *targ_info;
1468 int response = FALSE;
1469
1470 srb = amd->active_srb;
1471 targ_info = &amd->tinfo[amd->cur_target];
1472 if (amdsentmsg(amd, MSG_EXT_SDTR, /*full*/FALSE)) {
1473 /* note asynch xfers and clear flag */
1474 amdsetsync(amd, amd->cur_target, /*clockrate*/0,
1475 /*period*/0, /*offset*/0,
1476 AMD_TRANS_ACTIVE|AMD_TRANS_GOAL);
1477 printf("amd%d:%d: refuses synchronous negotiation. "
1478 "Using asynchronous transfers\n",
1479 amd->unit, amd->cur_target);
1480 } else if ((srb != NULL)
1481 && (srb->pccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
1482 struct ccb_trans_settings neg;
1483
1484 printf("amd%d:%d: refuses tagged commands. Performing "
1485 "non-tagged I/O\n", amd->unit, amd->cur_target);
1486
1487 amdsettags(amd, amd->cur_target, FALSE);
1488 neg.flags = 0;
1489 neg.valid = CCB_TRANS_TQ_VALID;
1490 xpt_setup_ccb(&neg.ccb_h, srb->pccb->ccb_h.path, /*priority*/1);
1491 xpt_async(AC_TRANSFER_NEG, srb->pccb->ccb_h.path, &neg);
1492
1493 /*
1494 * Resend the identify for this CCB as the target
1495 * may believe that the selection is invalid otherwise.
1496 */
1497 if (amd->msgout_len != 0)
1498 bcopy(&amd->msgout_buf[0], &amd->msgout_buf[1],
1499 amd->msgout_len);
1500 amd->msgout_buf[0] = MSG_IDENTIFYFLAG
1501 | srb->pccb->ccb_h.target_lun;
1502 amd->msgout_len++;
1503 if ((targ_info->disc_tag & AMD_CUR_DISCENB) != 0
1504 && (srb->pccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0)
1505 amd->msgout_buf[0] |= MSG_IDENTIFY_DISCFLAG;
1506
1507 srb->pccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
1508
1509 /*
1510 * Requeue all tagged commands for this target
1511 * currently in our posession so they can be
1512 * converted to untagged commands.
1513 */
1514 amdcompletematch(amd, amd->cur_target, amd->cur_lun,
1515 AMD_TAG_WILDCARD, &amd->waiting_srbs,
1516 CAM_DEV_QFRZN|CAM_REQUEUE_REQ);
1517 } else {
1518 /*
1519 * Otherwise, we ignore it.
1520 */
1521 printf("amd%d:%d: Message reject received -- ignored\n",
1522 amd->unit, amd->cur_target);
1523 }
1524 return (response);
1525 }
1526
1527 #if 0
1528 if (!(pSRB->SRBState & SRB_MSGIN_MULTI)) {
1529 if (bval == MSG_DISCONNECT) {
1530 pSRB->SRBState = SRB_DISCONNECT;
1531 } else if (bval == MSG_SAVEDATAPOINTER) {
1532 goto min6;
1533 } else if ((bval == MSG_EXTENDED)
1534 || ((bval >= MSG_SIMPLE_Q_TAG)
1535 && (bval <= MSG_ORDERED_Q_TAG))) {
1536 pSRB->SRBState |= SRB_MSGIN_MULTI;
1537 pSRB->MsgInBuf[0] = bval;
1538 pSRB->MsgCnt = 1;
1539 pSRB->pMsgPtr = &pSRB->MsgInBuf[1];
1540 } else if (bval == MSG_MESSAGE_REJECT) {
1541 amd_write8(amd, SCSICMDREG, RESET_ATN_CMD);
1542
1543 if (pSRB->SRBState & DO_SYNC_NEGO) {
1544 goto set_async;
1545 }
1546 } else if (bval == MSG_RESTOREPOINTERS) {
1547 goto min6;
1548 } else {
1549 goto min6;
1550 }
1551 } else { /* minx: */
1552 *pSRB->pMsgPtr = bval;
1553 pSRB->MsgCnt++;
1554 pSRB->pMsgPtr++;
1555 if ((pSRB->MsgInBuf[0] >= MSG_SIMPLE_Q_TAG)
1556 && (pSRB->MsgInBuf[0] <= MSG_ORDERED_Q_TAG)) {
1557 if (pSRB->MsgCnt == 2) {
1558 pSRB->SRBState = 0;
1559 pSRB = &amd->SRB_array[pSRB->MsgInBuf[1]];
1560 if (pSRB->SRBState & SRB_DISCONNECT) == 0) {
1561 pSRB = amd->pTmpSRB;
1562 pSRB->SRBState = SRB_UNEXPECT_RESEL;
1563 pDCB->pActiveSRB = pSRB;
1564 pSRB->MsgOutBuf[0] = MSG_ABORT_TAG;
1565 EnableMsgOut2(amd, pSRB);
1566 } else {
1567 if (pDCB->DCBFlag & ABORT_DEV_) {
1568 pSRB->SRBState = SRB_ABORT_SENT;
1569 EnableMsgOut1(amd, pSRB);
1570 }
1571 pDCB->pActiveSRB = pSRB;
1572 pSRB->SRBState = SRB_DATA_XFER;
1573 }
1574 }
1575 } else if ((pSRB->MsgInBuf[0] == MSG_EXTENDED)
1576 && (pSRB->MsgCnt == 5)) {
1577 pSRB->SRBState &= ~(SRB_MSGIN_MULTI + DO_SYNC_NEGO);
1578 if ((pSRB->MsgInBuf[1] != 3)
1579 || (pSRB->MsgInBuf[2] != 1)) { /* reject_msg: */
1580 pSRB->MsgCnt = 1;
1581 pSRB->MsgInBuf[0] = MSG_MESSAGE_REJECT;
1582 amd_write8(amd, SCSICMDREG, SET_ATN_CMD);
1583 } else if (!(pSRB->MsgInBuf[3])
1584 || !(pSRB->MsgInBuf[4])) {
1585 set_async: /* set async */
1586
1587 pDCB = pSRB->pSRBDCB;
1588 /* disable sync & sync nego */
1589 pDCB->SyncMode &= ~(SYNC_ENABLE|SYNC_NEGO_DONE);
1590 pDCB->SyncPeriod = 0;
1591 pDCB->SyncOffset = 0;
1592
1593 pDCB->tinfo.goal.period = 0;
1594 pDCB->tinfo.goal.offset = 0;
1595
1596 pDCB->tinfo.current.period = 0;
1597 pDCB->tinfo.current.offset = 0;
1598 pDCB->tinfo.current.width =
1599 MSG_EXT_WDTR_BUS_8_BIT;
1600
1601 pDCB->CtrlR3 = FAST_CLK; /* non_fast */
1602 pDCB->CtrlR4 &= 0x3f;
1603 pDCB->CtrlR4 |= EATER_25NS;
1604 goto re_prog;
1605 } else {/* set sync */
1606
1607 pDCB = pSRB->pSRBDCB;
1608 /* enable sync & sync nego */
1609 pDCB->SyncMode |= SYNC_ENABLE|SYNC_NEGO_DONE;
1610
1611 /* set sync offset */
1612 pDCB->SyncOffset &= 0x0f0;
1613 pDCB->SyncOffset |= pSRB->MsgInBuf[4];
1614
1615 /* set sync period */
1616 pDCB->MaxNegoPeriod = pSRB->MsgInBuf[3];
1617
1618 wval = (u_int16_t) pSRB->MsgInBuf[3];
1619 wval = wval << 2;
1620 wval--;
1621 wval1 = wval / 25;
1622 if ((wval1 * 25) != wval) {
1623 wval1++;
1624 }
1625 bval = FAST_CLK|FAST_SCSI;
1626 pDCB->CtrlR4 &= 0x3f;
1627 if (wval1 >= 8) {
1628 /* Fast SCSI */
1629 wval1--;
1630 bval = FAST_CLK;
1631 pDCB->CtrlR4 |= EATER_25NS;
1632 }
1633 pDCB->CtrlR3 = bval;
1634 pDCB->SyncPeriod = (u_int8_t) wval1;
1635
1636 pDCB->tinfo.goal.period =
1637 tinfo_sync_period[pDCB->SyncPeriod - 4];
1638 pDCB->tinfo.goal.offset = pDCB->SyncOffset;
1639 pDCB->tinfo.current.period =
1640 tinfo_sync_period[pDCB->SyncPeriod - 4];;
1641 pDCB->tinfo.current.offset = pDCB->SyncOffset;
1642
1643 /*
1644 * program SCSI control register
1645 */
1646 re_prog:
1647 amd_write8(amd, SYNCPERIOREG, pDCB->SyncPeriod);
1648 amd_write8(amd, SYNCOFFREG, pDCB->SyncOffset);
1649 amd_write8(amd, CNTLREG3, pDCB->CtrlR3);
1650 amd_write8(amd, CNTLREG4, pDCB->CtrlR4);
1651 }
1652 }
1653 }
1654 min6:
1655 amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);
1656 return (SCSI_NOP0);
1657 }
1658 #endif
1659
1660 static u_int
1661 amd_DataOutPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1662 {
1663 DataIO_Comm(amd, pSRB, WRITE_DIRECTION);
1664 return (scsistat);
1665 }
1666
1667 static u_int
1668 amd_DataInPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1669 {
1670 DataIO_Comm(amd, pSRB, READ_DIRECTION);
1671 return (scsistat);
1672 }
1673
1674 static void
1675 DataIO_Comm(struct amd_softc *amd, struct amd_srb *pSRB, u_int ioDir)
1676 {
1677 struct amd_sg * psgl;
1678 u_int32_t lval;
1679
1680 if (pSRB->SGIndex < pSRB->SGcount) {
1681 amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD|ioDir);/* |EN_DMA_INT */
1682
1683 if (!pSRB->SGToBeXferLen) {
1684 psgl = pSRB->pSGlist;
1685 pSRB->SGPhysAddr = psgl->SGXPtr;
1686 pSRB->SGToBeXferLen = psgl->SGXLen;
1687 }
1688 lval = pSRB->SGToBeXferLen;
1689 amd_write8(amd, CTCREG_LOW, lval);
1690 amd_write8(amd, CTCREG_MID, lval >> 8);
1691 amd_write8(amd, CURTXTCNTREG, lval >> 16);
1692
1693 amd_write32(amd, DMA_XferCnt, pSRB->SGToBeXferLen);
1694
1695 amd_write32(amd, DMA_XferAddr, pSRB->SGPhysAddr);
1696
1697 pSRB->SRBState = SRB_DATA_XFER;
1698
1699 amd_write8(amd, SCSICMDREG, DMA_COMMAND|INFO_XFER_CMD);
1700
1701 amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD|ioDir); /* |EN_DMA_INT */
1702
1703 amd_write8(amd, DMA_Cmd, DMA_START_CMD|ioDir);/* |EN_DMA_INT */
1704 } else { /* xfer pad */
1705 if (pSRB->SGcount) {
1706 pSRB->AdaptStatus = H_OVER_UNDER_RUN;
1707 pSRB->SRBStatus |= OVER_RUN;
1708 }
1709 amd_write8(amd, CTCREG_LOW, 0);
1710 amd_write8(amd, CTCREG_MID, 0);
1711 amd_write8(amd, CURTXTCNTREG, 0);
1712
1713 pSRB->SRBState |= SRB_XFERPAD;
1714 amd_write8(amd, SCSICMDREG, DMA_COMMAND|XFER_PAD_BYTE);
1715 }
1716 }
1717
1718 static u_int
1719 amd_CommandPhase1(struct amd_softc *amd, struct amd_srb *srb, u_int scsistat)
1720 {
1721 amd_write8(amd, SCSICMDREG, RESET_ATN_CMD);
1722 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1723
1724 amdsetupcommand(amd, srb);
1725
1726 srb->SRBState = SRB_COMMAND;
1727 amd_write8(amd, SCSICMDREG, INFO_XFER_CMD);
1728 return (scsistat);
1729 }
1730
1731 static u_int
1732 amd_StatusPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1733 {
1734 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1735 pSRB->SRBState = SRB_STATUS;
1736 amd_write8(amd, SCSICMDREG, INITIATOR_CMD_CMPLTE);
1737 return (scsistat);
1738 }
1739
1740 static u_int
1741 amd_MsgOutPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1742 {
1743 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1744
1745 if (amd->msgout_len == 0) {
1746 amd->msgout_buf[0] = MSG_NOOP;
1747 amd->msgout_len = 1;
1748 }
1749 amd_write8_multi(amd, SCSIFIFOREG, amd->msgout_buf, amd->msgout_len);
1750 amd_write8(amd, SCSICMDREG, INFO_XFER_CMD);
1751 return (scsistat);
1752 }
1753
1754 static u_int
1755 amd_MsgInPhase1(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1756 {
1757 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
1758 amd_write8(amd, SCSICMDREG, INFO_XFER_CMD);
1759 return (scsistat);
1760 }
1761
1762 static u_int
1763 amd_NopPhase(struct amd_softc *amd, struct amd_srb *pSRB, u_int scsistat)
1764 {
1765 return (scsistat);
1766 }
1767
1768 static void
1769 amd_Disconnect(struct amd_softc * amd)
1770 {
1771 struct amd_srb *srb;
1772 int target;
1773 int lun;
1774
1775 srb = amd->active_srb;
1776 amd->active_srb = NULL;
1777 amd->last_phase = SCSI_BUS_FREE;
1778 amd_write8(amd, SCSICMDREG, EN_SEL_RESEL);
1779 target = amd->cur_target;
1780 lun = amd->cur_lun;
1781
1782 if (srb == NULL) {
1783 /* Invalid reselection */
1784 amdrunwaiting(amd);
1785 } else if (srb->SRBState & SRB_ABORT_SENT) {
1786 /* Clean up and done this srb */
1787 #if 0
1788 while (( = TAILQ_FIRST(&amd->running_srbs)) != NULL) {
1789 /* XXX What about "done'ing" these srbs??? */
1790 if (pSRB->pSRBDCB == pDCB) {
1791 TAILQ_REMOVE(&amd->running_srbs, pSRB, links);
1792 TAILQ_INSERT_HEAD(&amd->free_srbs, pSRB, links);
1793 }
1794 }
1795 amdrunwaiting(amd);
1796 #endif
1797 } else {
1798 if ((srb->SRBState & (SRB_START | SRB_MSGOUT))
1799 || !(srb->SRBState & (SRB_DISCONNECT | SRB_COMPLETED))) {
1800 srb->TargetStatus = AMD_SCSI_STAT_SEL_TIMEOUT;
1801 goto disc1;
1802 } else if (srb->SRBState & SRB_DISCONNECT) {
1803 if (!(srb->pccb->ccb_h.flags & CAM_TAG_ACTION_VALID))
1804 amd->untagged_srbs[target][lun] = srb;
1805 amdrunwaiting(amd);
1806 } else if (srb->SRBState & SRB_COMPLETED) {
1807 disc1:
1808 srb->SRBState = SRB_FREE;
1809 SRBdone(amd, srb);
1810 }
1811 }
1812 return;
1813 }
1814
1815 static void
1816 amd_Reselect(struct amd_softc *amd)
1817 {
1818 struct amd_target_info *tinfo;
1819 u_int16_t disc_count;
1820
1821 amd_clear_msg_state(amd);
1822 if (amd->active_srb != NULL) {
1823 /* Requeue the SRB for our attempted Selection */
1824 TAILQ_REMOVE(&amd->running_srbs, amd->active_srb, links);
1825 TAILQ_INSERT_HEAD(&amd->waiting_srbs, amd->active_srb, links);
1826 amd->active_srb = NULL;
1827 }
1828 /* get ID */
1829 amd->cur_target = amd_read8(amd, SCSIFIFOREG);
1830 amd->cur_target ^= amd->HostID_Bit;
1831 amd->cur_target = ffs(amd->cur_target) - 1;
1832 amd->cur_lun = amd_read8(amd, SCSIFIFOREG) & 7;
1833 tinfo = &amd->tinfo[amd->cur_target];
1834 amd->active_srb = amd->untagged_srbs[amd->cur_target][amd->cur_lun];
1835 disc_count = amd->disc_count[amd->cur_target][amd->cur_lun];
1836 if (disc_count == 0) {
1837 printf("amd%d: Unexpected reselection for target %d, "
1838 "Issuing Abort\n", amd->unit, amd->cur_target);
1839 amd->msgout_buf[0] = MSG_ABORT;
1840 amd->msgout_len = 1;
1841 amd_write8(amd, SCSICMDREG, SET_ATN_CMD);
1842 }
1843 if (amd->active_srb != NULL) {
1844 amd->disc_count[amd->cur_target][amd->cur_lun]--;
1845 amd->untagged_srbs[amd->cur_target][amd->cur_lun] = NULL;
1846 }
1847
1848 amd_write8(amd, SCSIDESTIDREG, amd->cur_target);
1849 amd_write8(amd, SYNCPERIOREG, tinfo->sync_period_reg);
1850 amd_write8(amd, SYNCOFFREG, tinfo->sync_offset_reg);
1851 amd_write8(amd, CNTLREG1, tinfo->CtrlR1);
1852 amd_write8(amd, CNTLREG3, tinfo->CtrlR3);
1853 amd_write8(amd, CNTLREG4, tinfo->CtrlR4);
1854 amd_write8(amd, SCSICMDREG, MSG_ACCEPTED_CMD);/* drop /ACK */
1855 amd->last_phase = SCSI_NOP0;
1856 }
1857
1858 static void
1859 SRBdone(struct amd_softc *amd, struct amd_srb *pSRB)
1860 {
1861 u_int8_t bval, i, status;
1862 union ccb *pccb;
1863 struct ccb_scsiio *pcsio;
1864 int intflag;
1865 struct amd_sg *ptr2;
1866 u_int32_t swlval;
1867 u_int target_id, target_lun;
1868
1869 pccb = pSRB->pccb;
1870 pcsio = &pccb->csio;
1871 target_id = pSRB->pccb->ccb_h.target_id;
1872 target_lun = pSRB->pccb->ccb_h.target_lun;
1873
1874 CAM_DEBUG(pccb->ccb_h.path, CAM_DEBUG_TRACE,
1875 ("SRBdone - TagNumber %d\n", pSRB->TagNumber));
1876
1877 if ((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1878 bus_dmasync_op_t op;
1879
1880 if ((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1881 op = BUS_DMASYNC_POSTREAD;
1882 else
1883 op = BUS_DMASYNC_POSTWRITE;
1884 bus_dmamap_sync(amd->buffer_dmat, pSRB->dmamap, op);
1885 bus_dmamap_unload(amd->buffer_dmat, pSRB->dmamap);
1886 }
1887
1888 status = pSRB->TargetStatus;
1889 pccb->ccb_h.status = CAM_REQ_CMP;
1890 if (pSRB->SRBFlag & AUTO_REQSENSE) {
1891 pSRB->SRBFlag &= ~AUTO_REQSENSE;
1892 pSRB->AdaptStatus = 0;
1893 pSRB->TargetStatus = SCSI_STATUS_CHECK_COND;
1894
1895 if (status == SCSI_STATUS_CHECK_COND) {
1896 pccb->ccb_h.status = CAM_SEL_TIMEOUT;
1897 goto ckc_e;
1898 }
1899 *((u_int32_t *)&(pSRB->CmdBlock[0])) = pSRB->Segment0[0];
1900
1901 pcsio->sense_resid = pcsio->sense_len
1902 - pSRB->TotalXferredLen;
1903 pSRB->TotalXferredLen = pSRB->Segment1[1];
1904 if (pSRB->TotalXferredLen) {
1905 /* ???? */
1906 pcsio->resid = pcsio->dxfer_len
1907 - pSRB->TotalXferredLen;
1908 /* The resid field contains valid data */
1909 /* Flush resid bytes on complete */
1910 } else {
1911 pcsio->scsi_status = SCSI_STATUS_CHECK_COND;
1912 }
1913 pccb->ccb_h.status = CAM_AUTOSNS_VALID;
1914 goto ckc_e;
1915 }
1916 if (status) {
1917 if (status == SCSI_STATUS_CHECK_COND) {
1918
1919 if ((pSRB->SGIndex < pSRB->SGcount)
1920 && (pSRB->SGcount) && (pSRB->SGToBeXferLen)) {
1921 bval = pSRB->SGcount;
1922 swlval = pSRB->SGToBeXferLen;
1923 ptr2 = pSRB->pSGlist;
1924 ptr2++;
1925 for (i = pSRB->SGIndex + 1; i < bval; i++) {
1926 swlval += ptr2->SGXLen;
1927 ptr2++;
1928 }
1929 /* ??????? */
1930 pcsio->resid = (u_int32_t) swlval;
1931
1932 #ifdef AMD_DEBUG0
1933 printf("XferredLen=%8x,NotYetXferLen=%8x,",
1934 pSRB->TotalXferredLen, swlval);
1935 #endif
1936 }
1937 if ((pcsio->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0) {
1938 #ifdef AMD_DEBUG0
1939 printf("RequestSense..................\n");
1940 #endif
1941 RequestSense(amd, pSRB);
1942 return;
1943 }
1944 pcsio->scsi_status = SCSI_STATUS_CHECK_COND;
1945 pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1946 goto ckc_e;
1947 } else if (status == SCSI_STATUS_QUEUE_FULL) {
1948 pSRB->AdaptStatus = 0;
1949 pSRB->TargetStatus = 0;
1950 pcsio->scsi_status = SCSI_STATUS_QUEUE_FULL;
1951 pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1952 goto ckc_e;
1953 } else if (status == AMD_SCSI_STAT_SEL_TIMEOUT) {
1954 pSRB->AdaptStatus = H_SEL_TIMEOUT;
1955 pSRB->TargetStatus = 0;
1956
1957 pcsio->scsi_status = AMD_SCSI_STAT_SEL_TIMEOUT;
1958 pccb->ccb_h.status = CAM_SEL_TIMEOUT;
1959 } else if (status == SCSI_STATUS_BUSY) {
1960 #ifdef AMD_DEBUG0
1961 printf("DC390: target busy at %s %d\n",
1962 __FILE__, __LINE__);
1963 #endif
1964 pcsio->scsi_status = SCSI_STATUS_BUSY;
1965 pccb->ccb_h.status = CAM_SCSI_BUSY;
1966 } else if (status == SCSI_STATUS_RESERV_CONFLICT) {
1967 #ifdef AMD_DEBUG0
1968 printf("DC390: target reserved at %s %d\n",
1969 __FILE__, __LINE__);
1970 #endif
1971 pcsio->scsi_status = SCSI_STATUS_RESERV_CONFLICT;
1972 pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; /* XXX */
1973 } else {
1974 pSRB->AdaptStatus = 0;
1975 #ifdef AMD_DEBUG0
1976 printf("DC390: driver stuffup at %s %d\n",
1977 __FILE__, __LINE__);
1978 #endif
1979 pccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1980 }
1981 } else {
1982 status = pSRB->AdaptStatus;
1983 if (status & H_OVER_UNDER_RUN) {
1984 pSRB->TargetStatus = 0;
1985
1986 pccb->ccb_h.status = CAM_DATA_RUN_ERR;
1987 } else if (pSRB->SRBStatus & PARITY_ERROR) {
1988 #ifdef AMD_DEBUG0
1989 printf("DC390: driver stuffup %s %d\n",
1990 __FILE__, __LINE__);
1991 #endif
1992 /* Driver failed to perform operation */
1993 pccb->ccb_h.status = CAM_UNCOR_PARITY;
1994 } else { /* No error */
1995 pSRB->AdaptStatus = 0;
1996 pSRB->TargetStatus = 0;
1997 /* there is no error, (sense is invalid) */
1998 }
1999 }
2000 ckc_e:
2001 intflag = splcam();
2002 if ((pccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2003 /* CAM request not yet complete =>device_Q frozen */
2004 xpt_freeze_devq(pccb->ccb_h.path, 1);
2005 pccb->ccb_h.status |= CAM_DEV_QFRZN;
2006 }
2007 TAILQ_REMOVE(&amd->running_srbs, pSRB, links);
2008 TAILQ_INSERT_HEAD(&amd->free_srbs, pSRB, links);
2009 amdrunwaiting(amd);
2010 splx(intflag);
2011 xpt_done(pccb);
2012
2013 }
2014
2015 static void
2016 amd_ResetSCSIBus(struct amd_softc * amd)
2017 {
2018 int intflag;
2019
2020 intflag = splcam();
2021 amd->ACBFlag |= RESET_DEV;
2022 amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD);
2023 amd_write8(amd, SCSICMDREG, RST_SCSI_BUS_CMD);
2024 splx(intflag);
2025 return;
2026 }
2027
2028 static void
2029 amd_ScsiRstDetect(struct amd_softc * amd)
2030 {
2031 int intflag;
2032 u_int32_t wlval;
2033
2034 #ifdef AMD_DEBUG0
2035 printf("amd_ScsiRstDetect \n");
2036 #endif
2037
2038 wlval = 1000;
2039 while (--wlval) { /* delay 1 sec */
2040 DELAY(1000);
2041 }
2042 intflag = splcam();
2043
2044 amd_write8(amd, DMA_Cmd, DMA_IDLE_CMD);
2045 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
2046
2047 if (amd->ACBFlag & RESET_DEV) {
2048 amd->ACBFlag |= RESET_DONE;
2049 } else {
2050 amd->ACBFlag |= RESET_DETECT;
2051 ResetDevParam(amd);
2052 amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
2053 AMD_TAG_WILDCARD, &amd->running_srbs,
2054 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
2055 amdcompletematch(amd, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD,
2056 AMD_TAG_WILDCARD, &amd->waiting_srbs,
2057 CAM_DEV_QFRZN|CAM_SCSI_BUS_RESET);
2058 amd->active_srb = NULL;
2059 amd->ACBFlag = 0;
2060 amdrunwaiting(amd);
2061 }
2062 splx(intflag);
2063 return;
2064 }
2065
2066 static void
2067 RequestSense(struct amd_softc *amd, struct amd_srb *pSRB)
2068 {
2069 union ccb *pccb;
2070 struct ccb_scsiio *pcsio;
2071
2072 pccb = pSRB->pccb;
2073 pcsio = &pccb->csio;
2074
2075 pSRB->SRBFlag |= AUTO_REQSENSE;
2076 pSRB->Segment0[0] = *((u_int32_t *) & (pSRB->CmdBlock[0]));
2077 pSRB->Segment0[1] = *((u_int32_t *) & (pSRB->CmdBlock[4]));
2078 pSRB->Segment1[0] = (pSRB->ScsiCmdLen << 8) + pSRB->SGcount;
2079 pSRB->Segment1[1] = pSRB->TotalXferredLen;
2080
2081 pSRB->AdaptStatus = 0;
2082 pSRB->TargetStatus = 0;
2083
2084 pSRB->Segmentx.SGXPtr = (u_int32_t) vtophys(&pcsio->sense_data);
2085 pSRB->Segmentx.SGXLen = (u_int32_t) pcsio->sense_len;
2086
2087 pSRB->pSGlist = &pSRB->Segmentx;
2088 pSRB->SGcount = 1;
2089 pSRB->SGIndex = 0;
2090
2091 *((u_int32_t *) & (pSRB->CmdBlock[0])) = 0x00000003;
2092 pSRB->CmdBlock[1] = pSRB->pccb->ccb_h.target_lun << 5;
2093 *((u_int16_t *) & (pSRB->CmdBlock[4])) = pcsio->sense_len;
2094 pSRB->ScsiCmdLen = 6;
2095
2096 pSRB->TotalXferredLen = 0;
2097 pSRB->SGToBeXferLen = 0;
2098 if (amdstart(amd, pSRB) != 0) {
2099 TAILQ_REMOVE(&amd->running_srbs, pSRB, links);
2100 TAILQ_INSERT_HEAD(&amd->waiting_srbs, pSRB, links);
2101 }
2102 }
2103
2104 static void
2105 amd_InvalidCmd(struct amd_softc * amd)
2106 {
2107 struct amd_srb *srb;
2108
2109 srb = amd->active_srb;
2110 if (srb->SRBState & (SRB_START|SRB_MSGOUT))
2111 amd_write8(amd, SCSICMDREG, CLEAR_FIFO_CMD);
2112 }
2113
2114 void
2115 amd_linkSRB(struct amd_softc *amd)
2116 {
2117 u_int16_t count, i;
2118 struct amd_srb *psrb;
2119
2120 count = amd->SRBCount;
2121
2122 for (i = 0; i < count; i++) {
2123 psrb = (struct amd_srb *)&amd->SRB_array[i];
2124 psrb->TagNumber = i;
2125 TAILQ_INSERT_TAIL(&amd->free_srbs, psrb, links);
2126 }
2127 }
2128
2129 void
2130 amd_EnDisableCE(struct amd_softc *amd, int mode, int *regval)
2131 {
2132 if (mode == ENABLE_CE) {
2133 *regval = 0xc0;
2134 } else {
2135 *regval = 0x80;
2136 }
2137 pci_cfgwrite(amd->config_id, *regval, 0, /*bytes*/1);
2138 if (mode == DISABLE_CE) {
2139 pci_cfgwrite(amd->config_id, *regval, 0, /*bytes*/1);
2140 }
2141 DELAY(160);
2142 }
2143
2144 void
2145 amd_EEpromOutDI(struct amd_softc *amd, int *regval, int Carry)
2146 {
2147 u_int bval;
2148
2149 bval = 0;
2150 if (Carry) {
2151 bval = 0x40;
2152 *regval = 0x80;
2153 pci_cfgwrite(amd->config_id, *regval, bval, /*bytes*/1);
2154 }
2155 DELAY(160);
2156 bval |= 0x80;
2157 pci_cfgwrite(amd->config_id, *regval, bval, /*bytes*/1);
2158 DELAY(160);
2159 pci_cfgwrite(amd->config_id, *regval, 0, /*bytes*/1);
2160 DELAY(160);
2161 }
2162
2163 static int
2164 amd_EEpromInDO(struct amd_softc *amd)
2165 {
2166 pci_cfgwrite(amd->config_id, 0x80, 0x80, /*bytes*/1);
2167 DELAY(160);
2168 pci_cfgwrite(amd->config_id, 0x80, 0x40, /*bytes*/1);
2169 DELAY(160);
2170 if (pci_cfgread(amd->config_id, 0, /*bytes*/1) == 0x22)
2171 return (1);
2172 return (0);
2173 }
2174
2175 static u_int16_t
2176 EEpromGetData1(struct amd_softc *amd)
2177 {
2178 u_int i;
2179 u_int carryFlag;
2180 u_int16_t wval;
2181
2182 wval = 0;
2183 for (i = 0; i < 16; i++) {
2184 wval <<= 1;
2185 carryFlag = amd_EEpromInDO(amd);
2186 wval |= carryFlag;
2187 }
2188 return (wval);
2189 }
2190
2191 static void
2192 amd_Prepare(struct amd_softc *amd, int *regval, u_int8_t EEpromCmd)
2193 {
2194 u_int i, j;
2195 int carryFlag;
2196
2197 carryFlag = 1;
2198 j = 0x80;
2199 for (i = 0; i < 9; i++) {
2200 amd_EEpromOutDI(amd, regval, carryFlag);
2201 carryFlag = (EEpromCmd & j) ? 1 : 0;
2202 j >>= 1;
2203 }
2204 }
2205
2206 static void
2207 amd_ReadEEprom(struct amd_softc *amd)
2208 {
2209 int regval;
2210 u_int i;
2211 u_int16_t *ptr;
2212 u_int8_t cmd;
2213
2214 ptr = (u_int16_t *)&amd->eepromBuf[0];
2215 cmd = EEPROM_READ;
2216 for (i = 0; i < 0x40; i++) {
2217 amd_EnDisableCE(amd, ENABLE_CE, ®val);
2218 amd_Prepare(amd, ®val, cmd);
2219 *ptr = EEpromGetData1(amd);
2220 ptr++;
2221 cmd++;
2222 amd_EnDisableCE(amd, DISABLE_CE, ®val);
2223 }
2224 }
2225
2226 static void
2227 amd_load_defaults(struct amd_softc *amd)
2228 {
2229 int target;
2230
2231 bzero(&amd->eepromBuf, sizeof amd->eepromBuf);
2232 for (target = 0; target < MAX_SCSI_ID; target++)
2233 amd->eepromBuf[target << 2] =
2234 (TAG_QUEUING|EN_DISCONNECT|SYNC_NEGO|PARITY_CHK);
2235 amd->eepromBuf[EE_ADAPT_SCSI_ID] = 7;
2236 amd->eepromBuf[EE_MODE2] = ACTIVE_NEGATION|LUN_CHECK|GREATER_1G;
2237 amd->eepromBuf[EE_TAG_CMD_NUM] = 4;
2238 }
2239
2240 static void
2241 amd_load_eeprom_or_defaults(struct amd_softc *amd)
2242 {
2243 u_int16_t wval, *ptr;
2244 u_int8_t i;
2245
2246 amd_ReadEEprom(amd);
2247 wval = 0;
2248 ptr = (u_int16_t *) & amd->eepromBuf[0];
2249 for (i = 0; i < EE_DATA_SIZE; i += 2, ptr++)
2250 wval += *ptr;
2251
2252 if (wval != EE_CHECKSUM) {
2253 if (bootverbose)
2254 printf("amd%d: SEEPROM data unavailable. "
2255 "Using default device parameters.\n",
2256 amd->unit);
2257 amd_load_defaults(amd);
2258 }
2259 }
2260
2261 /*
2262 **********************************************************************
2263 * Function : static int amd_init (struct Scsi_Host *host)
2264 * Purpose : initialize the internal structures for a given SCSI host
2265 * Inputs : host - pointer to this host adapter's structure/
2266 **********************************************************************
2267 */
2268 static struct amd_softc *
2269 amd_init(int unit, pcici_t config_id)
2270 {
2271 struct amd_softc *amd;
2272 u_int bval;
2273 u_int i;
2274
2275 amd = (struct amd_softc *)malloc(sizeof(struct amd_softc),
2276 M_DEVBUF, M_WAITOK);
2277 if (amd == NULL) {
2278 printf("DC390%d: cannot allocate ACB !\n", unit);
2279 return (amd);
2280 }
2281 bzero(amd, sizeof(struct amd_softc));
2282 amd->tag = I386_BUS_SPACE_IO;
2283 amd->bsh = pci_conf_read(config_id, PCI_MAP_REG_START) & 0xFFFE;
2284 /* DMA tag for mapping buffers into device visible space. */
2285 if (bus_dma_tag_create(/*parent_dmat*/NULL, /*alignment*/0,
2286 /*boundary*/0,
2287 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
2288 /*highaddr*/BUS_SPACE_MAXADDR,
2289 /*filter*/NULL, /*filterarg*/NULL,
2290 /*maxsize*/MAXBSIZE, /*nsegments*/AMD_NSEG,
2291 /*maxsegsz*/AMD_MAXTRANSFER_SIZE,
2292 /*flags*/BUS_DMA_ALLOCNOW,
2293 &amd->buffer_dmat) != 0) {
2294 free(amd, M_DEVBUF);
2295 return (NULL);
2296 }
2297 TAILQ_INIT(&amd->free_srbs);
2298 TAILQ_INIT(&amd->running_srbs);
2299 TAILQ_INIT(&amd->waiting_srbs);
2300 amd->last_phase = SCSI_BUS_FREE;
2301 amd->config_id = config_id;
2302 amd->unit = unit;
2303 amd->SRBCount = MAX_SRB_CNT;
2304 amd->status = 0;
2305 amd_load_eeprom_or_defaults(amd);
2306 amd->max_id = 7;
2307 if (amd->eepromBuf[EE_MODE2] & LUN_CHECK) {
2308 amd->max_lun = 7;
2309 } else {
2310 amd->max_lun = 0;
2311 }
2312 amd->AdaptSCSIID = amd->eepromBuf[EE_ADAPT_SCSI_ID];
2313 amd->HostID_Bit = (1 << amd->AdaptSCSIID);
2314 amd->AdaptSCSILUN = 0;
2315 /* (eepromBuf[EE_TAG_CMD_NUM]) << 2; */
2316 amd->ACBFlag = 0;
2317 amd->Gmode2 = amd->eepromBuf[EE_MODE2];
2318 amd_linkSRB(amd);
2319 for (i = 0; i <= amd->max_id; i++) {
2320
2321 if (amd->AdaptSCSIID != i) {
2322 struct amd_target_info *tinfo;
2323 PEEprom prom;
2324
2325 tinfo = &amd->tinfo[i];
2326 prom = (PEEprom)&amd->eepromBuf[i << 2];
2327 if ((prom->EE_MODE1 & EN_DISCONNECT) != 0) {
2328 tinfo->disc_tag |= AMD_USR_DISCENB;
2329 if ((prom->EE_MODE1 & TAG_QUEUING) != 0)
2330 tinfo->disc_tag |= AMD_USR_TAGENB;
2331 }
2332 if ((prom->EE_MODE1 & SYNC_NEGO) != 0) {
2333 tinfo->user.period =
2334 eeprom_period[prom->EE_SPEED];
2335 tinfo->user.offset = AMD_MAX_SYNC_OFFSET;
2336 }
2337 tinfo->CtrlR1 = amd->AdaptSCSIID;
2338 if ((prom->EE_MODE1 & PARITY_CHK) != 0)
2339 tinfo->CtrlR1 |= PARITY_ERR_REPO;
2340 tinfo->CtrlR3 = FAST_CLK;
2341 tinfo->CtrlR4 = EATER_25NS;
2342 if ((amd->eepromBuf[EE_MODE2] & ACTIVE_NEGATION) != 0)
2343 tinfo->CtrlR4 |= NEGATE_REQACKDATA;
2344 }
2345 }
2346 amd_write8(amd, SCSITIMEOUTREG, 153); /* 250ms selection timeout */
2347 /* Conversion factor = 0 , 40MHz clock */
2348 amd_write8(amd, CLKFACTREG, CLK_FREQ_40MHZ);
2349 /* NOP cmd - clear command register */
2350 amd_write8(amd, SCSICMDREG, NOP_CMD);
2351 amd_write8(amd, CNTLREG2, EN_FEATURE|EN_SCSI2_CMD);
2352 amd_write8(amd, CNTLREG3, FAST_CLK);
2353 bval = EATER_25NS;
2354 if (amd->eepromBuf[EE_MODE2] & ACTIVE_NEGATION) {
2355 bval |= NEGATE_REQACKDATA;
2356 }
2357 amd_write8(amd, CNTLREG4, bval);
2358
2359 /* Disable SCSI bus reset interrupt */
2360 amd_write8(amd, CNTLREG1, DIS_INT_ON_SCSI_RST);
2361 return (amd);
2362 }
2363
2364 /*
2365 * attach and init a host adapter
2366 */
2367 static void
2368 amd_attach(pcici_t config_id, int unit)
2369 {
2370 struct cam_devq *devq; /* Device Queue to use for this SIM */
2371 u_int8_t intstat;
2372 u_int32_t wlval;
2373 struct amd_softc *amd = NULL;
2374
2375 wlval = pci_conf_read(config_id, PCI_ID_REG);
2376
2377 if (wlval == PCI_DEVICE_ID_AMD53C974) {
2378 if ((amd = amd_init(unit, config_id)) == NULL)
2379 return;
2380
2381 /* Reset Pending INT */
2382 intstat = amd_read8(amd, INTSTATREG);
2383 }
2384
2385 /* After setting up the adapter, map our interrupt */
2386 if (!pci_map_int(config_id, amd_intr, amd, &cam_imask)) {
2387 if (bootverbose)
2388 printf("amd%d: unable to register interrupt handler!\n",
2389 unit);
2390 free(amd, M_DEVBUF);
2391 return;
2392 }
2393
2394 /*
2395 * Now let the CAM generic SCSI layer find the SCSI devices on
2396 * the bus * start queue to reset to the idle loop. *
2397 * Create device queue of SIM(s) * (MAX_START_JOB - 1) :
2398 * max_sim_transactions
2399 */
2400 devq = cam_simq_alloc(MAX_START_JOB);
2401 if (devq == NULL) {
2402 free(amd, M_DEVBUF);
2403 return;
2404 }
2405
2406 amd->psim = cam_sim_alloc(amd_action, amd_poll, "amd",
2407 amd, amd->unit, 1, MAX_TAGS_CMD_QUEUE,
2408 devq);
2409 if (amd->psim == NULL) {
2410 cam_simq_free(devq);
2411 free(amd, M_DEVBUF);
2412 return;
2413 }
2414
2415 if (xpt_bus_register(amd->psim, 0) != CAM_SUCCESS) {
2416 cam_sim_free(amd->psim, /*free_devq*/TRUE);
2417 free(amd, M_DEVBUF);
2418 return;
2419 }
2420
2421 if (xpt_create_path(&amd->ppath, /* periph */ NULL,
2422 cam_sim_path(amd->psim), CAM_TARGET_WILDCARD,
2423 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2424 xpt_bus_deregister(cam_sim_path(amd->psim));
2425 cam_sim_free(amd->psim, /* free_simq */ TRUE);
2426 free(amd, M_DEVBUF);
2427 return;
2428 }
2429 }
2430
2431 static const char *
2432 amd_probe(pcici_t tag, pcidi_t type)
2433 {
2434 if (type == PCI_DEVICE_ID_AMD53C974) {
2435 return ("Tekram DC390(T)/AMD53c974 SCSI Host Adapter");
2436 } else {
2437 return (NULL);
2438 }
2439 }
Cache object: 947055a309a180346a76399a1de58a47
|