FreeBSD/Linux Kernel Cross Reference
sys/pci/isp_pci.c
1 /* $FreeBSD$ */
2 /*
3 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
4 * FreeBSD Version.
5 *
6 * Copyright (c) 1997, 1998, 1999, 2000 by Matthew Jacob
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice immediately at the beginning of the file, without modification,
13 * this list of conditions, and the following disclaimer.
14 * 2. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29 #include <dev/isp/isp_freebsd.h>
30 #include <dev/isp/asm_pci.h>
31 #include <sys/malloc.h>
32 #include <vm/vm.h>
33 #include <vm/pmap.h>
34 #include <vm/vm_extern.h>
35
36
37 #include <pci/pcireg.h>
38 #include <pci/pcivar.h>
39
40 #include <machine/bus_memio.h>
41 #include <machine/bus_pio.h>
42 #include <machine/bus.h>
43 #include <machine/md_var.h>
44
45 static u_int16_t isp_pci_rd_reg __P((struct ispsoftc *, int));
46 static void isp_pci_wr_reg __P((struct ispsoftc *, int, u_int16_t));
47 #if !(defined(ISP_DISABLE_1080_SUPPORT) && defined(ISP_DISABLE_12160_SUPPORT))
48 static u_int16_t isp_pci_rd_reg_1080 __P((struct ispsoftc *, int));
49 static void isp_pci_wr_reg_1080 __P((struct ispsoftc *, int, u_int16_t));
50 #endif
51 static int isp_pci_mbxdma __P((struct ispsoftc *));
52 static int isp_pci_dmasetup __P((struct ispsoftc *, XS_T *,
53 ispreq_t *, u_int16_t *, u_int16_t));
54 static void
55 isp_pci_dmateardown __P((struct ispsoftc *, XS_T *, u_int32_t));
56
57 static void isp_pci_reset1 __P((struct ispsoftc *));
58 static void isp_pci_dumpregs __P((struct ispsoftc *, const char *));
59
60 #ifndef ISP_CODE_ORG
61 #define ISP_CODE_ORG 0x1000
62 #endif
63 #ifndef ISP_1040_RISC_CODE
64 #define ISP_1040_RISC_CODE NULL
65 #endif
66 #ifndef ISP_1080_RISC_CODE
67 #define ISP_1080_RISC_CODE NULL
68 #endif
69 #ifndef ISP_12160_RISC_CODE
70 #define ISP_12160_RISC_CODE NULL
71 #endif
72 #ifndef ISP_2100_RISC_CODE
73 #define ISP_2100_RISC_CODE NULL
74 #endif
75 #ifndef ISP_2200_RISC_CODE
76 #define ISP_2200_RISC_CODE NULL
77 #endif
78
79 #ifndef ISP_DISABLE_1020_SUPPORT
80 static struct ispmdvec mdvec = {
81 isp_pci_rd_reg,
82 isp_pci_wr_reg,
83 isp_pci_mbxdma,
84 isp_pci_dmasetup,
85 isp_pci_dmateardown,
86 NULL,
87 isp_pci_reset1,
88 isp_pci_dumpregs,
89 ISP_1040_RISC_CODE,
90 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
91 };
92 #endif
93
94 #ifndef ISP_DISABLE_1080_SUPPORT
95 static struct ispmdvec mdvec_1080 = {
96 isp_pci_rd_reg_1080,
97 isp_pci_wr_reg_1080,
98 isp_pci_mbxdma,
99 isp_pci_dmasetup,
100 isp_pci_dmateardown,
101 NULL,
102 isp_pci_reset1,
103 isp_pci_dumpregs,
104 ISP_1080_RISC_CODE,
105 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
106 };
107 #endif
108
109 #ifndef ISP_DISABLE_12160_SUPPORT
110 static struct ispmdvec mdvec_12160 = {
111 isp_pci_rd_reg_1080,
112 isp_pci_wr_reg_1080,
113 isp_pci_mbxdma,
114 isp_pci_dmasetup,
115 isp_pci_dmateardown,
116 NULL,
117 isp_pci_reset1,
118 isp_pci_dumpregs,
119 ISP_12160_RISC_CODE,
120 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
121 };
122 #endif
123
124 #ifndef ISP_DISABLE_2100_SUPPORT
125 static struct ispmdvec mdvec_2100 = {
126 isp_pci_rd_reg,
127 isp_pci_wr_reg,
128 isp_pci_mbxdma,
129 isp_pci_dmasetup,
130 isp_pci_dmateardown,
131 NULL,
132 isp_pci_reset1,
133 isp_pci_dumpregs,
134 ISP_2100_RISC_CODE
135 };
136 #endif
137
138 #ifndef ISP_DISABLE_2200_SUPPORT
139 static struct ispmdvec mdvec_2200 = {
140 isp_pci_rd_reg,
141 isp_pci_wr_reg,
142 isp_pci_mbxdma,
143 isp_pci_dmasetup,
144 isp_pci_dmateardown,
145 NULL,
146 isp_pci_reset1,
147 isp_pci_dumpregs,
148 ISP_2200_RISC_CODE
149 };
150 #endif
151
152 #ifndef SCSI_ISP_PREFER_MEM_MAP
153 #define SCSI_ISP_PREFER_MEM_MAP 0
154 #endif
155
156 #ifndef PCIM_CMD_INVEN
157 #define PCIM_CMD_INVEN 0x10
158 #endif
159 #ifndef PCIM_CMD_BUSMASTEREN
160 #define PCIM_CMD_BUSMASTEREN 0x0004
161 #endif
162 #ifndef PCIM_CMD_PERRESPEN
163 #define PCIM_CMD_PERRESPEN 0x0040
164 #endif
165 #ifndef PCIM_CMD_SEREN
166 #define PCIM_CMD_SEREN 0x0100
167 #endif
168
169 #ifndef PCIR_COMMAND
170 #define PCIR_COMMAND 0x04
171 #endif
172
173 #ifndef PCIR_CACHELNSZ
174 #define PCIR_CACHELNSZ 0x0c
175 #endif
176
177 #ifndef PCIR_LATTIMER
178 #define PCIR_LATTIMER 0x0d
179 #endif
180
181 #ifndef PCIR_ROMADDR
182 #define PCIR_ROMADDR 0x30
183 #endif
184
185 #ifndef PCI_VENDOR_QLOGIC
186 #define PCI_VENDOR_QLOGIC 0x1077
187 #endif
188
189 #ifndef PCI_PRODUCT_QLOGIC_ISP1020
190 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020
191 #endif
192
193 #ifndef PCI_PRODUCT_QLOGIC_ISP1080
194 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080
195 #endif
196
197 #ifndef PCI_PRODUCT_QLOGIC_ISP12160
198 #define PCI_PRODUCT_QLOGIC_ISP12160 0x1216
199 #endif
200
201 #ifndef PCI_PRODUCT_QLOGIC_ISP1240
202 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240
203 #endif
204
205 #ifndef PCI_PRODUCT_QLOGIC_ISP1280
206 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280
207 #endif
208
209 #ifndef PCI_PRODUCT_QLOGIC_ISP2100
210 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100
211 #endif
212
213 #ifndef PCI_PRODUCT_QLOGIC_ISP2200
214 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200
215 #endif
216
217 #define PCI_QLOGIC_ISP1020 \
218 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
219
220 #define PCI_QLOGIC_ISP1080 \
221 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC)
222
223 #define PCI_QLOGIC_ISP12160 \
224 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC)
225
226 #define PCI_QLOGIC_ISP1240 \
227 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC)
228
229 #define PCI_QLOGIC_ISP1280 \
230 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC)
231
232 #define PCI_QLOGIC_ISP2100 \
233 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
234
235 #define PCI_QLOGIC_ISP2200 \
236 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC)
237
238 /*
239 * Odd case for some AMI raid cards... We need to *not* attach to this.
240 */
241 #define AMI_RAID_SUBVENDOR_ID 0x101e
242
243 #define IO_MAP_REG 0x10
244 #define MEM_MAP_REG 0x14
245
246 #define PCI_DFLT_LTNCY 0x40
247 #define PCI_DFLT_LNSZ 0x10
248
249 static const char *isp_pci_probe __P((pcici_t tag, pcidi_t type));
250 static void isp_pci_attach __P((pcici_t config_d, int unit));
251
252 /* This distinguishing define is not right, but it does work */
253 #ifdef __alpha__
254 #define IO_SPACE_MAPPING ALPHA_BUS_SPACE_IO
255 #define MEM_SPACE_MAPPING ALPHA_BUS_SPACE_MEM
256 #else
257 #define IO_SPACE_MAPPING I386_BUS_SPACE_IO
258 #define MEM_SPACE_MAPPING I386_BUS_SPACE_MEM
259 #endif
260
261 struct isp_pcisoftc {
262 struct ispsoftc pci_isp;
263 pcici_t pci_id;
264 bus_space_tag_t pci_st;
265 bus_space_handle_t pci_sh;
266 int16_t pci_poff[_NREG_BLKS];
267 bus_dma_tag_t parent_dmat;
268 bus_dma_tag_t cntrol_dmat;
269 bus_dmamap_t cntrol_dmap;
270 bus_dmamap_t *dmaps;
271 };
272
273 static u_long ispunit;
274
275 struct pci_device isp_pci_driver = {
276 "isp",
277 isp_pci_probe,
278 isp_pci_attach,
279 &ispunit,
280 NULL
281 };
282 DATA_SET (pcidevice_set, isp_pci_driver);
283
284
285 static const char *
286 isp_pci_probe(pcici_t tag, pcidi_t type)
287 {
288 static int oneshot = 1;
289 char *x;
290
291 switch (type) {
292 #ifndef ISP_DISABLE_1020_SUPPORT
293 case PCI_QLOGIC_ISP1020:
294 x = "Qlogic ISP 1020/1040 PCI SCSI Adapter";
295 break;
296 #endif
297 #ifndef ISP_DISABLE_1080_SUPPORT
298 case PCI_QLOGIC_ISP1080:
299 x = "Qlogic ISP 1080 PCI SCSI Adapter";
300 break;
301 case PCI_QLOGIC_ISP1240:
302 x = "Qlogic ISP 1240 PCI SCSI Adapter";
303 break;
304 case PCI_QLOGIC_ISP1280:
305 x = "Qlogic ISP 1280 PCI SCSI Adapter";
306 break;
307 #endif
308 #ifndef ISP_DISABLE_12160_SUPPORT
309 case PCI_QLOGIC_ISP12160:
310 x = "Qlogic ISP 12160 PCI SCSI Adapter";
311 break;
312 #endif
313 #ifndef ISP_DISABLE_2100_SUPPORT
314 case PCI_QLOGIC_ISP2100:
315 x = "Qlogic ISP 2100 PCI FC-AL Adapter";
316 break;
317 #endif
318 #ifndef ISP_DISABLE_2200_SUPPORT
319 case PCI_QLOGIC_ISP2200:
320 x = "Qlogic ISP 2200 PCI FC-AL Adapter";
321 break;
322 #endif
323 default:
324 return (NULL);
325 }
326 if (oneshot && bootverbose) {
327 oneshot = 0;
328 printf("Qlogic ISP Driver, FreeBSD Version %d.%d, "
329 "Core Version %d.%d\n",
330 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
331 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
332 }
333 return (x);
334 }
335
336 static void
337 isp_pci_attach(pcici_t cfid, int unit)
338 {
339 #ifdef SCSI_ISP_WWN
340 const char *name = SCSI_ISP_WWN;
341 char *vtp = NULL;
342 #endif
343 int mapped, prefer_mem_map, bitmap;
344 pci_port_t io_port;
345 u_int32_t data, rev, linesz, psize, basetype;
346 struct isp_pcisoftc *pcs;
347 struct ispsoftc *isp;
348 vm_offset_t vaddr, paddr;
349 struct ispmdvec *mdvp;
350 bus_size_t lim;
351
352 /*
353 * Figure out if we're supposed to skip this one.
354 */
355 if (getenv_int("isp_disable", &bitmap)) {
356 if (bitmap & (1 << unit)) {
357 printf("isp%d: not configuring\n", unit);
358 return;
359 }
360 }
361
362 pcs = malloc(sizeof (struct isp_pcisoftc), M_DEVBUF, M_NOWAIT);
363 if (pcs == NULL) {
364 printf("isp%d: cannot allocate softc\n", unit);
365 return;
366 }
367 bzero(pcs, sizeof (struct isp_pcisoftc));
368
369 /*
370 * Figure out which we should try first - memory mapping or i/o mapping?
371 */
372 #if SCSI_ISP_PREFER_MEM_MAP == 1
373 prefer_mem_map = 1;
374 #else
375 prefer_mem_map = 0;
376 #endif
377 bitmap = 0;
378 if (getenv_int("isp_mem_map", &bitmap)) {
379 if (bitmap & (1 << unit))
380 prefer_mem_map = 1;
381 }
382 bitmap = 0;
383 if (getenv_int("isp_io_map", &bitmap)) {
384 if (bitmap & (1 << unit))
385 prefer_mem_map = 0;
386 }
387
388 vaddr = paddr = NULL;
389 mapped = 0;
390 linesz = PCI_DFLT_LNSZ;
391 /*
392 * Note that pci_conf_read is a 32 bit word aligned function.
393 */
394 data = pci_conf_read(cfid, PCIR_COMMAND);
395 if (prefer_mem_map) {
396 if (data & PCI_COMMAND_MEM_ENABLE) {
397 if (pci_map_mem(cfid, MEM_MAP_REG, &vaddr, &paddr)) {
398 pcs->pci_st = MEM_SPACE_MAPPING;
399 pcs->pci_sh = vaddr;
400 mapped++;
401 }
402 }
403 if (mapped == 0 && (data & PCI_COMMAND_IO_ENABLE)) {
404 if (pci_map_port(cfid, PCI_MAP_REG_START, &io_port)) {
405 pcs->pci_st = IO_SPACE_MAPPING;
406 pcs->pci_sh = io_port;
407 mapped++;
408 }
409 }
410 } else {
411 if (data & PCI_COMMAND_IO_ENABLE) {
412 if (pci_map_port(cfid, PCI_MAP_REG_START, &io_port)) {
413 pcs->pci_st = IO_SPACE_MAPPING;
414 pcs->pci_sh = io_port;
415 mapped++;
416 }
417 }
418 if (mapped == 0 && (data & PCI_COMMAND_MEM_ENABLE)) {
419 if (pci_map_mem(cfid, MEM_MAP_REG, &vaddr, &paddr)) {
420 pcs->pci_st = MEM_SPACE_MAPPING;
421 pcs->pci_sh = vaddr;
422 mapped++;
423 }
424 }
425 }
426 if (mapped == 0) {
427 printf("isp%d: unable to map any ports!\n", unit);
428 free(pcs, M_DEVBUF);
429 return;
430 }
431 if (bootverbose)
432 printf("isp%d: using %s space register mapping\n", unit,
433 pcs->pci_st == IO_SPACE_MAPPING? "I/O" : "Memory");
434
435 data = pci_conf_read(cfid, PCI_ID_REG);
436 rev = pci_conf_read(cfid, PCI_CLASS_REG) & 0xff; /* revision */
437 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
438 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF;
439 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF;
440 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF;
441 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
442 /*
443 * GCC!
444 */
445 mdvp = &mdvec;
446 basetype = ISP_HA_SCSI_UNKNOWN;
447 psize = sizeof (sdparam);
448 lim = BUS_SPACE_MAXSIZE_32BIT;
449 #ifndef ISP_DISABLE_1020_SUPPORT
450 if (data == PCI_QLOGIC_ISP1020) {
451 mdvp = &mdvec;
452 basetype = ISP_HA_SCSI_UNKNOWN;
453 psize = sizeof (sdparam);
454 lim = BUS_SPACE_MAXSIZE_24BIT;
455 }
456 #endif
457 #ifndef ISP_DISABLE_1080_SUPPORT
458 if (data == PCI_QLOGIC_ISP1080) {
459 mdvp = &mdvec_1080;
460 basetype = ISP_HA_SCSI_1080;
461 psize = sizeof (sdparam);
462 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
463 ISP1080_DMA_REGS_OFF;
464 }
465 if (data == PCI_QLOGIC_ISP1240) {
466 mdvp = &mdvec_1080;
467 basetype = ISP_HA_SCSI_1240;
468 psize = 2 * sizeof (sdparam);
469 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
470 ISP1080_DMA_REGS_OFF;
471 }
472 if (data == PCI_QLOGIC_ISP1280) {
473 mdvp = &mdvec_1080;
474 basetype = ISP_HA_SCSI_1280;
475 psize = 2 * sizeof (sdparam);
476 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
477 ISP1080_DMA_REGS_OFF;
478 }
479 #endif
480 #ifndef ISP_DISABLE_12160_SUPPORT
481 if (data == PCI_QLOGIC_ISP12160) {
482 mdvp = &mdvec_12160;
483 basetype = ISP_HA_SCSI_12160;
484 psize = 2 * sizeof (sdparam);
485 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
486 ISP1080_DMA_REGS_OFF;
487 }
488 #endif
489 #ifndef ISP_DISABLE_2100_SUPPORT
490 if (data == PCI_QLOGIC_ISP2100) {
491 mdvp = &mdvec_2100;
492 basetype = ISP_HA_FC_2100;
493 psize = sizeof (fcparam);
494 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
495 PCI_MBOX_REGS2100_OFF;
496 if (rev < 3) {
497 /*
498 * XXX: Need to get the actual revision
499 * XXX: number of the 2100 FB. At any rate,
500 * XXX: lower cache line size for early revision
501 * XXX; boards.
502 */
503 linesz = 1;
504 }
505 }
506 #endif
507 #ifndef ISP_DISABLE_2200_SUPPORT
508 if (data == PCI_QLOGIC_ISP2200) {
509 mdvp = &mdvec_2200;
510 basetype = ISP_HA_FC_2200;
511 psize = sizeof (fcparam);
512 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
513 PCI_MBOX_REGS2100_OFF;
514 }
515 #endif
516 isp = &pcs->pci_isp;
517 isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT);
518 if (isp->isp_param == NULL) {
519 printf("isp%d: cannot allocate parameter data\n", unit);
520 free(pcs, M_DEVBUF);
521 return;
522 }
523 bzero(isp->isp_param, psize);
524 isp->isp_mdvec = mdvp;
525 isp->isp_type = basetype;
526 isp->isp_revision = rev;
527 (void) snprintf(isp->isp_name, sizeof (isp->isp_name), "isp%d", unit);
528 isp->isp_osinfo.unit = unit;
529
530 /*
531 * Set up logging levels.
532 */
533 bitmap = 0;
534 (void) getenv_int("isp_debug", &bitmap);
535 if (bitmap) {
536 isp->isp_dblev = bitmap;
537 } else {
538 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR;
539 }
540 if (bootverbose)
541 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO;
542
543 /*
544 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER
545 * are set.
546 */
547 data = pci_cfgread(cfid, PCIR_COMMAND, 2);
548 data |= PCIM_CMD_SEREN |
549 PCIM_CMD_PERRESPEN |
550 PCIM_CMD_BUSMASTEREN |
551 PCIM_CMD_INVEN;
552 pci_cfgwrite(cfid, PCIR_COMMAND, 2, data);
553
554 /*
555 * Make sure the Cache Line Size register is set sensibly.
556 */
557 data = pci_cfgread(cfid, PCIR_CACHELNSZ, 1);
558 if (data != linesz) {
559 data = PCI_DFLT_LNSZ;
560 isp_prt(isp, ISP_LOGCONFIG, "set PCI line size to %d", data);
561 pci_cfgwrite(cfid, PCIR_CACHELNSZ, data, 1);
562 }
563
564 /*
565 * Make sure the Latency Timer is sane.
566 */
567 data = pci_cfgread(cfid, PCIR_LATTIMER, 1);
568 if (data < PCI_DFLT_LTNCY) {
569 data = PCI_DFLT_LTNCY;
570 isp_prt(isp, ISP_LOGCONFIG,"set PCI latency to %d", data);
571 pci_cfgwrite(cfid, PCIR_LATTIMER, data, 1);
572 }
573
574 /*
575 * Make sure we've disabled the ROM.
576 */
577 data = pci_cfgread(cfid, PCIR_ROMADDR, 4);
578 data &= ~1;
579 pci_cfgwrite(cfid, PCIR_ROMADDR, data, 4);
580
581 if (bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT,
582 BUS_SPACE_MAXADDR, NULL, NULL, lim + 1,
583 255, lim, 0, &pcs->parent_dmat) != 0) {
584 DISABLE_INTS(isp);
585 printf("%s: could not create master dma tag\n", isp->isp_name);
586 free(pcs, M_DEVBUF);
587 return;
588 }
589 if (!pci_map_int(cfid, (void (*)(void *))isp_intr, isp, &cam_imask)) {
590 DISABLE_INTS(isp);
591 printf("%s: could not map interrupt\n", isp->isp_name);
592 free(pcs, M_DEVBUF);
593 return;
594 }
595
596 pcs->pci_id = cfid;
597 #ifdef SCSI_ISP_NO_FWLOAD_MASK
598 if (SCSI_ISP_NO_FWLOAD_MASK && (SCSI_ISP_NO_FWLOAD_MASK & (1 << unit)))
599 isp->isp_confopts |= ISP_CFG_NORELOAD;
600 #endif
601 if (getenv_int("isp_no_fwload", &bitmap)) {
602 if (bitmap & (1 << unit))
603 isp->isp_confopts |= ISP_CFG_NORELOAD;
604 }
605 if (getenv_int("isp_fwload", &bitmap)) {
606 if (bitmap & (1 << unit))
607 isp->isp_confopts &= ~ISP_CFG_NORELOAD;
608 }
609
610 #ifdef SCSI_ISP_NO_NVRAM_MASK
611 if (SCSI_ISP_NO_NVRAM_MASK && (SCSI_ISP_NO_NVRAM_MASK & (1 << unit))) {
612 printf("%s: ignoring NVRAM\n", isp->isp_name);
613 isp->isp_confopts |= ISP_CFG_NONVRAM;
614 }
615 #endif
616 if (getenv_int("isp_no_nvram", &bitmap)) {
617 if (bitmap & (1 << unit))
618 isp->isp_confopts |= ISP_CFG_NONVRAM;
619 }
620 if (getenv_int("isp_nvram", &bitmap)) {
621 if (bitmap & (1 << unit))
622 isp->isp_confopts &= ~ISP_CFG_NONVRAM;
623 }
624
625 #ifdef SCSI_ISP_FCDUPLEX
626 if (IS_FC(isp)) {
627 if (SCSI_ISP_FCDUPLEX && (SCSI_ISP_FCDUPLEX & (1 << unit))) {
628 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX;
629 }
630 }
631 #endif
632 if (getenv_int("isp_fcduplex", &bitmap)) {
633 if (bitmap & (1 << unit))
634 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX;
635 }
636 if (getenv_int("isp_no_fcduplex", &bitmap)) {
637 if (bitmap & (1 << unit))
638 isp->isp_confopts &= ~ISP_CFG_FULL_DUPLEX;
639 }
640 if (getenv_int("isp_nport", &bitmap)) {
641 if (bitmap & (1 << unit))
642 isp->isp_confopts |= ISP_CFG_NPORT;
643 }
644 /*
645 * Look for overriding WWN. This is a Node WWN so it binds to
646 * all FC instances. A Port WWN will be constructed from it
647 * as appropriate.
648 */
649 #ifdef SCSI_ISP_WWN
650 isp->isp_osinfo.default_wwn = strtoq(name, &vtp, 16);
651 if (vtp != name && *vtp == 0) {
652 isp->isp_confopts |= ISP_CFG_OWNWWN;
653 } else
654 #endif
655 if (!getenv_quad("isp_wwn", (quad_t *) &isp->isp_osinfo.default_wwn)) {
656 int i;
657 u_int64_t seed = (u_int64_t) (intptr_t) isp;
658
659 seed <<= 16;
660 seed &= ((1LL << 48) - 1LL);
661 /*
662 * This isn't very random, but it's the best we can do for
663 * the real edge case of cards that don't have WWNs. If
664 * you recompile a new vers.c, you'll get a different WWN.
665 */
666 for (i = 0; version[i] != 0; i++) {
667 seed += version[i];
668 }
669 /*
670 * Make sure the top nibble has something vaguely sensible
671 * (NAA == Locally Administered)
672 */
673 isp->isp_osinfo.default_wwn |= (3LL << 60) | seed;
674 } else {
675 isp->isp_confopts |= ISP_CFG_OWNWWN;
676 }
677
678 /*
679 * Make sure we're in reset state.
680 */
681 ISP_LOCK(isp);
682 isp_reset(isp);
683 if (isp->isp_state != ISP_RESETSTATE) {
684 ISP_UNLOCK(isp);
685 goto bad;
686 }
687 isp_init(isp);
688 if (isp->isp_state != ISP_INITSTATE) {
689 /* If we're a Fibre Channel Card, we allow deferred attach */
690 if (IS_SCSI(isp)) {
691 isp_uninit(isp);
692 ISP_UNLOCK(isp);
693 goto bad;
694 }
695 }
696 isp_attach(isp);
697 if (isp->isp_state != ISP_RUNSTATE) {
698 /* If we're a Fibre Channel Card, we allow deferred attach */
699 if (IS_SCSI(isp)) {
700 isp_uninit(isp);
701 ISP_UNLOCK(isp);
702 goto bad;
703 }
704 }
705 #ifdef __alpha__
706 /*
707 * THIS SHOULD NOT HAVE TO BE HERE
708 */
709 alpha_register_pci_scsi(cfid->bus, cfid->slot, isp->isp_sim);
710 #endif
711 ISP_UNLOCK(isp);
712 return;
713
714 bad:
715 if (pcs) {
716 if (pcs->pci_isp.isp_param)
717 free(pcs->pci_isp.isp_param, M_DEVBUF);
718 free(pcs, M_DEVBUF);
719 }
720 return;
721 }
722
723 static u_int16_t
724 isp_pci_rd_reg(isp, regoff)
725 struct ispsoftc *isp;
726 int regoff;
727 {
728 u_int16_t rv;
729 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
730 int offset, oldconf = 0;
731
732 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
733 /*
734 * We will assume that someone has paused the RISC processor.
735 */
736 oldconf = isp_pci_rd_reg(isp, BIU_CONF1);
737 isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP);
738 }
739 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
740 offset += (regoff & 0xff);
741 rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
742 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
743 isp_pci_wr_reg(isp, BIU_CONF1, oldconf);
744 }
745 return (rv);
746 }
747
748 static void
749 isp_pci_wr_reg(isp, regoff, val)
750 struct ispsoftc *isp;
751 int regoff;
752 u_int16_t val;
753 {
754 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
755 int offset, oldconf = 0;
756
757 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
758 /*
759 * We will assume that someone has paused the RISC processor.
760 */
761 oldconf = isp_pci_rd_reg(isp, BIU_CONF1);
762 isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP);
763 }
764 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
765 offset += (regoff & 0xff);
766 bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
767 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
768 isp_pci_wr_reg(isp, BIU_CONF1, oldconf);
769 }
770 }
771
772 #if !(defined(ISP_DISABLE_1080_SUPPORT) && defined(ISP_DISABLE_12160_SUPPORT))
773 static u_int16_t
774 isp_pci_rd_reg_1080(isp, regoff)
775 struct ispsoftc *isp;
776 int regoff;
777 {
778 u_int16_t rv, oc = 0;
779 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
780 int offset;
781
782 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
783 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
784 u_int16_t tc;
785 /*
786 * We will assume that someone has paused the RISC processor.
787 */
788 oc = isp_pci_rd_reg(isp, BIU_CONF1);
789 tc = oc & ~BIU_PCI1080_CONF1_DMA;
790 if (regoff & SXP_BANK1_SELECT)
791 tc |= BIU_PCI1080_CONF1_SXP1;
792 else
793 tc |= BIU_PCI1080_CONF1_SXP0;
794 isp_pci_wr_reg(isp, BIU_CONF1, tc);
795 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
796 oc = isp_pci_rd_reg(isp, BIU_CONF1);
797 isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA);
798 }
799 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
800 offset += (regoff & 0xff);
801 rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
802 if (oc) {
803 isp_pci_wr_reg(isp, BIU_CONF1, oc);
804 }
805 return (rv);
806 }
807
808 static void
809 isp_pci_wr_reg_1080(isp, regoff, val)
810 struct ispsoftc *isp;
811 int regoff;
812 u_int16_t val;
813 {
814 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
815 int offset, oc = 0;
816
817 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
818 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
819 u_int16_t tc;
820 /*
821 * We will assume that someone has paused the RISC processor.
822 */
823 oc = isp_pci_rd_reg(isp, BIU_CONF1);
824 tc = oc & ~BIU_PCI1080_CONF1_DMA;
825 if (regoff & SXP_BANK1_SELECT)
826 tc |= BIU_PCI1080_CONF1_SXP1;
827 else
828 tc |= BIU_PCI1080_CONF1_SXP0;
829 isp_pci_wr_reg(isp, BIU_CONF1, tc);
830 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
831 oc = isp_pci_rd_reg(isp, BIU_CONF1);
832 isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA);
833 }
834 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
835 offset += (regoff & 0xff);
836 bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
837 if (oc) {
838 isp_pci_wr_reg(isp, BIU_CONF1, oc);
839 }
840 }
841 #endif
842
843 static void isp_map_rquest __P((void *, bus_dma_segment_t *, int, int));
844 static void isp_map_result __P((void *, bus_dma_segment_t *, int, int));
845 static void isp_map_fcscrt __P((void *, bus_dma_segment_t *, int, int));
846
847 struct imush {
848 struct ispsoftc *isp;
849 int error;
850 };
851
852 static void
853 isp_map_rquest(void *arg, bus_dma_segment_t *segs, int nseg, int error)
854 {
855 struct imush *imushp = (struct imush *) arg;
856 if (error) {
857 imushp->error = error;
858 } else {
859 imushp->isp->isp_rquest_dma = segs->ds_addr;
860 }
861 }
862
863 static void
864 isp_map_result(void *arg, bus_dma_segment_t *segs, int nseg, int error)
865 {
866 struct imush *imushp = (struct imush *) arg;
867 if (error) {
868 imushp->error = error;
869 } else {
870 imushp->isp->isp_result_dma = segs->ds_addr;
871 }
872 }
873
874 static void
875 isp_map_fcscrt(void *arg, bus_dma_segment_t *segs, int nseg, int error)
876 {
877 struct imush *imushp = (struct imush *) arg;
878 if (error) {
879 imushp->error = error;
880 } else {
881 fcparam *fcp = imushp->isp->isp_param;
882 fcp->isp_scdma = segs->ds_addr;
883 }
884 }
885
886 static int
887 isp_pci_mbxdma(struct ispsoftc *isp)
888 {
889 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
890 caddr_t base;
891 u_int32_t len;
892 int i, error;
893 bus_size_t lim;
894 struct imush im;
895
896
897 /*
898 * Already been here? If so, leave...
899 */
900 if (isp->isp_rquest) {
901 return (0);
902 }
903
904 len = sizeof (XS_T **) * isp->isp_maxcmds;
905 isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK);
906 if (isp->isp_xflist == NULL) {
907 isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array");
908 return (1);
909 }
910 bzero(isp->isp_xflist, len);
911 len = sizeof (bus_dmamap_t) * isp->isp_maxcmds;
912 pci->dmaps = (bus_dmamap_t *) malloc(len, M_DEVBUF, M_WAITOK);
913 if (pci->dmaps == NULL) {
914 isp_prt(isp, ISP_LOGERR, "can't alloc dma maps");
915 free(isp->isp_xflist, M_DEVBUF);
916 return (1);
917 }
918
919 if (IS_FC(isp) || IS_ULTRA2(isp))
920 lim = BUS_SPACE_MAXADDR + 1;
921 else
922 lim = BUS_SPACE_MAXADDR_24BIT + 1;
923
924 /*
925 * Allocate and map the request, result queues, plus FC scratch area.
926 */
927 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
928 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
929 if (IS_FC(isp)) {
930 len += ISP2100_SCRLEN;
931 }
932 if (bus_dma_tag_create(pci->parent_dmat, PAGE_SIZE, lim,
933 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, len, 1,
934 BUS_SPACE_MAXSIZE_32BIT, 0, &pci->cntrol_dmat) != 0) {
935 isp_prt(isp, ISP_LOGERR,
936 "cannot create a dma tag for control spaces");
937 free(isp->isp_xflist, M_DEVBUF);
938 free(pci->dmaps, M_DEVBUF);
939 return (1);
940 }
941 if (bus_dmamem_alloc(pci->cntrol_dmat, (void **)&base,
942 BUS_DMA_NOWAIT, &pci->cntrol_dmap) != 0) {
943 isp_prt(isp, ISP_LOGERR,
944 "cannot allocate %d bytes of CCB memory");
945 free(isp->isp_xflist, M_DEVBUF);
946 free(pci->dmaps, M_DEVBUF);
947 return (1);
948 }
949
950 isp->isp_rquest = base;
951 im.isp = isp;
952 im.error = 0;
953 bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap, isp->isp_rquest,
954 ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)), isp_map_rquest, &im, 0);
955 if (im.error) {
956 isp_prt(isp, ISP_LOGERR,
957 "error %d loading dma map for DMA request queue", im.error);
958 free(isp->isp_xflist, M_DEVBUF);
959 free(pci->dmaps, M_DEVBUF);
960 isp->isp_rquest = NULL;
961 return (1);
962 }
963 isp->isp_result = base + ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
964 im.error = 0;
965 bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap, isp->isp_result,
966 ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)), isp_map_result, &im, 0);
967 if (im.error) {
968 isp_prt(isp, ISP_LOGERR,
969 "error %d loading dma map for DMA result queue", im.error);
970 free(isp->isp_xflist, M_DEVBUF);
971 free(pci->dmaps, M_DEVBUF);
972 isp->isp_rquest = NULL;
973 return (1);
974 }
975
976 for (i = 0; i < isp->isp_maxcmds; i++) {
977 error = bus_dmamap_create(pci->parent_dmat, 0, &pci->dmaps[i]);
978 if (error) {
979 isp_prt(isp, ISP_LOGERR,
980 "error %d creating per-cmd DMA maps", error);
981 free(isp->isp_xflist, M_DEVBUF);
982 free(pci->dmaps, M_DEVBUF);
983 isp->isp_rquest = NULL;
984 return (1);
985 }
986 }
987
988 if (IS_FC(isp)) {
989 fcparam *fcp = (fcparam *) isp->isp_param;
990 fcp->isp_scratch = base +
991 ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)) +
992 ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
993 im.error = 0;
994 bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap,
995 fcp->isp_scratch, ISP2100_SCRLEN, isp_map_fcscrt, &im, 0);
996 if (im.error) {
997 isp_prt(isp, ISP_LOGERR,
998 "error %d loading FC scratch area", im.error);
999 free(isp->isp_xflist, M_DEVBUF);
1000 free(pci->dmaps, M_DEVBUF);
1001 isp->isp_rquest = NULL;
1002 return (1);
1003 }
1004 }
1005 return (0);
1006 }
1007
1008 typedef struct {
1009 struct ispsoftc *isp;
1010 void *cmd_token;
1011 void *rq;
1012 u_int16_t *iptrp;
1013 u_int16_t optr;
1014 u_int error;
1015 } mush_t;
1016
1017 #define MUSHERR_NOQENTRIES -2
1018
1019 #ifdef ISP_TARGET_MODE
1020 /*
1021 * We need to handle DMA for target mode differently from initiator mode.
1022 *
1023 * DMA mapping and construction and submission of CTIO Request Entries
1024 * and rendevous for completion are very tightly coupled because we start
1025 * out by knowing (per platform) how much data we have to move, but we
1026 * don't know, up front, how many DMA mapping segments will have to be used
1027 * cover that data, so we don't know how many CTIO Request Entries we
1028 * will end up using. Further, for performance reasons we may want to
1029 * (on the last CTIO for Fibre Channel), send status too (if all went well).
1030 *
1031 * The standard vector still goes through isp_pci_dmasetup, but the callback
1032 * for the DMA mapping routines comes here instead with the whole transfer
1033 * mapped and a pointer to a partially filled in already allocated request
1034 * queue entry. We finish the job.
1035 */
1036 static void tdma_mk __P((void *, bus_dma_segment_t *, int, int));
1037 static void tdma_mkfc __P((void *, bus_dma_segment_t *, int, int));
1038
1039 static void
1040 tdma_mk(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1041 {
1042 mush_t *mp;
1043 struct ccb_scsiio *csio;
1044 struct isp_pcisoftc *pci;
1045 bus_dmamap_t *dp;
1046 u_int8_t scsi_status;
1047 ct_entry_t *cto;
1048 u_int32_t handle, totxfr, sflags;
1049 int nctios, send_status;
1050 int32_t resid;
1051
1052 mp = (mush_t *) arg;
1053 if (error) {
1054 mp->error = error;
1055 return;
1056 }
1057 csio = mp->cmd_token;
1058 cto = mp->rq;
1059
1060 cto->ct_xfrlen = 0;
1061 cto->ct_seg_count = 0;
1062 cto->ct_header.rqs_entry_count = 1;
1063 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg));
1064
1065 if (nseg == 0) {
1066 cto->ct_header.rqs_seqno = 1;
1067 ISP_TDQE(mp->isp, "tdma_mk[no data]", *mp->iptrp, cto);
1068 isp_prt(mp->isp, ISP_LOGTDEBUG1,
1069 "CTIO lun %d->iid%d flgs 0x%x sts 0x%x ssts 0x%x res %d",
1070 csio->ccb_h.target_lun, cto->ct_iid, cto->ct_flags,
1071 cto->ct_status, cto->ct_scsi_status, cto->ct_resid);
1072 ISP_SWIZ_CTIO(mp->isp, cto, cto);
1073 return;
1074 }
1075
1076 nctios = nseg / ISP_RQDSEG;
1077 if (nseg % ISP_RQDSEG) {
1078 nctios++;
1079 }
1080
1081 /*
1082 * Save handle, and potentially any SCSI status, which we'll reinsert
1083 * on the last CTIO we're going to send.
1084 */
1085 handle = cto->ct_reserved;
1086 cto->ct_reserved = 0;
1087 cto->ct_header.rqs_seqno = 0;
1088 send_status = (cto->ct_flags & CT_SENDSTATUS) != 0;
1089
1090 if (send_status) {
1091 sflags = cto->ct_flags & (CT_SENDSTATUS | CT_CCINCR);
1092 cto->ct_flags &= ~(CT_SENDSTATUS | CT_CCINCR);
1093 /*
1094 * Preserve residual.
1095 */
1096 resid = cto->ct_resid;
1097
1098 /*
1099 * Save actual SCSI status.
1100 */
1101 scsi_status = cto->ct_scsi_status;
1102
1103 /*
1104 * We can't do a status at the same time as a data CTIO, so
1105 * we need to synthesize an extra CTIO at this level.
1106 */
1107 nctios++;
1108 } else {
1109 sflags = scsi_status = resid = 0;
1110 }
1111
1112 totxfr = cto->ct_resid = 0;
1113 cto->ct_scsi_status = 0;
1114
1115 pci = (struct isp_pcisoftc *)mp->isp;
1116 dp = &pci->dmaps[isp_handle_index(handle)];
1117 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1118 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREREAD);
1119 } else {
1120 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREWRITE);
1121 }
1122
1123
1124 while (nctios--) {
1125 int seglim;
1126
1127 seglim = nseg;
1128 if (seglim) {
1129 int seg;
1130
1131 if (seglim > ISP_RQDSEG)
1132 seglim = ISP_RQDSEG;
1133
1134 for (seg = 0; seg < seglim; seg++, nseg--) {
1135 /*
1136 * Unlike normal initiator commands, we don't
1137 * do any swizzling here.
1138 */
1139 cto->ct_dataseg[seg].ds_count = dm_segs->ds_len;
1140 cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr;
1141 cto->ct_xfrlen += dm_segs->ds_len;
1142 totxfr += dm_segs->ds_len;
1143 dm_segs++;
1144 }
1145 cto->ct_seg_count = seg;
1146 } else {
1147 /*
1148 * This case should only happen when we're sending an
1149 * extra CTIO with final status.
1150 */
1151 if (send_status == 0) {
1152 isp_prt(mp->isp, ISP_LOGWARN,
1153 "tdma_mk ran out of segments");
1154 mp->error = EINVAL;
1155 return;
1156 }
1157 }
1158
1159 /*
1160 * At this point, the fields ct_lun, ct_iid, ct_tagval,
1161 * ct_tagtype, and ct_timeout have been carried over
1162 * unchanged from what our caller had set.
1163 *
1164 * The dataseg fields and the seg_count fields we just got
1165 * through setting. The data direction we've preserved all
1166 * along and only clear it if we're now sending status.
1167 */
1168
1169 if (nctios == 0) {
1170 /*
1171 * We're the last in a sequence of CTIOs, so mark
1172 * this CTIO and save the handle to the CCB such that
1173 * when this CTIO completes we can free dma resources
1174 * and do whatever else we need to do to finish the
1175 * rest of the command.
1176 */
1177 cto->ct_reserved = handle;
1178 cto->ct_header.rqs_seqno = 1;
1179
1180 if (send_status) {
1181 cto->ct_scsi_status = scsi_status;
1182 cto->ct_flags |= sflags | CT_NO_DATA;;
1183 cto->ct_resid = resid;
1184 }
1185 if (send_status) {
1186 isp_prt(mp->isp, ISP_LOGTDEBUG1,
1187 "CTIO lun%d for ID %d ct_flags 0x%x scsi "
1188 "status %x resid %d",
1189 csio->ccb_h.target_lun,
1190 cto->ct_iid, cto->ct_flags,
1191 cto->ct_scsi_status, cto->ct_resid);
1192 } else {
1193 isp_prt(mp->isp, ISP_LOGTDEBUG1,
1194 "CTIO lun%d for ID%d ct_flags 0x%x",
1195 csio->ccb_h.target_lun,
1196 cto->ct_iid, cto->ct_flags);
1197 }
1198 ISP_TDQE(mp->isp, "last tdma_mk", *mp->iptrp, cto);
1199 ISP_SWIZ_CTIO(mp->isp, cto, cto);
1200 } else {
1201 ct_entry_t *octo = cto;
1202
1203 /*
1204 * Make sure handle fields are clean
1205 */
1206 cto->ct_reserved = 0;
1207 cto->ct_header.rqs_seqno = 0;
1208
1209 isp_prt(mp->isp, ISP_LOGTDEBUG1,
1210 "CTIO lun%d for ID%d ct_flags 0x%x",
1211 csio->ccb_h.target_lun, cto->ct_iid, cto->ct_flags);
1212 ISP_TDQE(mp->isp, "tdma_mk", *mp->iptrp, cto);
1213
1214 /*
1215 * Get a new CTIO
1216 */
1217 cto = (ct_entry_t *)
1218 ISP_QUEUE_ENTRY(mp->isp->isp_rquest, *mp->iptrp);
1219 *mp->iptrp =
1220 ISP_NXT_QENTRY(*mp->iptrp, RQUEST_QUEUE_LEN(isp));
1221 if (*mp->iptrp == mp->optr) {
1222 isp_prt(mp->isp, ISP_LOGWARN,
1223 "Queue Overflow in tdma_mk");
1224 mp->error = MUSHERR_NOQENTRIES;
1225 return;
1226 }
1227 /*
1228 * Fill in the new CTIO with info from the old one.
1229 */
1230 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO;
1231 cto->ct_header.rqs_entry_count = 1;
1232 cto->ct_header.rqs_flags = 0;
1233 cto->ct_lun = octo->ct_lun;
1234 cto->ct_iid = octo->ct_iid;
1235 cto->ct_reserved2 = octo->ct_reserved2;
1236 cto->ct_tgt = octo->ct_tgt;
1237 cto->ct_flags = octo->ct_flags;
1238 cto->ct_status = 0;
1239 cto->ct_scsi_status = 0;
1240 cto->ct_tag_val = octo->ct_tag_val;
1241 cto->ct_tag_type = octo->ct_tag_type;
1242 cto->ct_xfrlen = 0;
1243 cto->ct_resid = 0;
1244 cto->ct_timeout = octo->ct_timeout;
1245 cto->ct_seg_count = 0;
1246 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg));
1247 /*
1248 * Now swizzle the old one for the consumption of the
1249 * chip.
1250 */
1251 ISP_SWIZ_CTIO(mp->isp, octo, octo);
1252 }
1253 }
1254 }
1255
1256 static void
1257 tdma_mkfc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1258 {
1259 mush_t *mp;
1260 struct ccb_scsiio *csio;
1261 struct isp_pcisoftc *pci;
1262 bus_dmamap_t *dp;
1263 ct2_entry_t *cto;
1264 u_int16_t scsi_status, send_status, send_sense;
1265 u_int32_t handle, totxfr, datalen;
1266 u_int8_t sense[QLTM_SENSELEN];
1267 int nctios;
1268
1269 mp = (mush_t *) arg;
1270 if (error) {
1271 mp->error = error;
1272 return;
1273 }
1274
1275 csio = mp->cmd_token;
1276 cto = mp->rq;
1277
1278 if (nseg == 0) {
1279 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) {
1280 isp_prt(mp->isp, ISP_LOGWARN,
1281 "dma2_tgt_fc, a status CTIO2 without MODE1 "
1282 "set (0x%x)", cto->ct_flags);
1283 mp->error = EINVAL;
1284 return;
1285 }
1286 cto->ct_header.rqs_entry_count = 1;
1287 cto->ct_header.rqs_seqno = 1;
1288 /* ct_reserved contains the handle set by caller */
1289 /*
1290 * We preserve ct_lun, ct_iid, ct_rxid. We set the data
1291 * flags to NO DATA and clear relative offset flags.
1292 * We preserve the ct_resid and the response area.
1293 */
1294 cto->ct_flags |= CT2_NO_DATA;
1295 if (cto->ct_resid > 0)
1296 cto->ct_flags |= CT2_DATA_UNDER;
1297 else if (cto->ct_resid < 0)
1298 cto->ct_flags |= CT2_DATA_OVER;
1299 cto->ct_seg_count = 0;
1300 cto->ct_reloff = 0;
1301 ISP_TDQE(mp->isp, "dma2_tgt_fc[no data]", *mp->iptrp, cto);
1302 isp_prt(mp->isp, ISP_LOGTDEBUG1,
1303 "CTIO2 RX_ID 0x%x lun %d->iid%d flgs 0x%x sts 0x%x ssts "
1304 "0x%x res %d", cto->ct_rxid, csio->ccb_h.target_lun,
1305 cto->ct_iid, cto->ct_flags, cto->ct_status,
1306 cto->rsp.m1.ct_scsi_status, cto->ct_resid);
1307 ISP_SWIZ_CTIO2(isp, cto, cto);
1308 return;
1309 }
1310
1311 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) {
1312 isp_prt(mp->isp, ISP_LOGWARN,
1313 "dma2_tgt_fc, a data CTIO2 without MODE0 set "
1314 "(0x%x)", cto->ct_flags);
1315 mp->error = EINVAL;
1316 return;
1317 }
1318
1319
1320 nctios = nseg / ISP_RQDSEG_T2;
1321 if (nseg % ISP_RQDSEG_T2) {
1322 nctios++;
1323 }
1324
1325 /*
1326 * Save the handle, status, reloff, and residual. We'll reinsert the
1327 * handle into the last CTIO2 we're going to send, and reinsert status
1328 * and residual (and possibly sense data) if that's to be sent as well.
1329 *
1330 * We preserve ct_reloff and adjust it for each data CTIO2 we send past
1331 * the first one. This is needed so that the FCP DATA IUs being sent
1332 * out have the correct offset (they can arrive at the other end out
1333 * of order).
1334 */
1335
1336 handle = cto->ct_reserved;
1337 cto->ct_reserved = 0;
1338
1339 if ((send_status = (cto->ct_flags & CT2_SENDSTATUS)) != 0) {
1340 cto->ct_flags &= ~CT2_SENDSTATUS;
1341
1342 /*
1343 * Preserve residual, which is actually the total count.
1344 */
1345 datalen = cto->ct_resid;
1346
1347 /*
1348 * Save actual SCSI status. We'll reinsert the
1349 * CT2_SNSLEN_VALID later if appropriate.
1350 */
1351 scsi_status = cto->rsp.m0.ct_scsi_status & 0xff;
1352 send_sense = cto->rsp.m0.ct_scsi_status & CT2_SNSLEN_VALID;
1353
1354 /*
1355 * If we're sending status and have a CHECK CONDTION and
1356 * have sense data, we send one more CTIO2 with just the
1357 * status and sense data. The upper layers have stashed
1358 * the sense data in the dataseg structure for us.
1359 */
1360
1361 if ((scsi_status & 0xf) == SCSI_STATUS_CHECK_COND &&
1362 send_sense) {
1363 bcopy(cto->rsp.m0.ct_dataseg, sense, QLTM_SENSELEN);
1364 nctios++;
1365 }
1366 } else {
1367 scsi_status = send_sense = datalen = 0;
1368 }
1369
1370 totxfr = cto->ct_resid = 0;
1371 cto->rsp.m0.ct_scsi_status = 0;
1372 bzero(&cto->rsp, sizeof (cto->rsp));
1373
1374 pci = (struct isp_pcisoftc *)mp->isp;
1375 dp = &pci->dmaps[isp_handle_index(handle)];
1376 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1377 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREREAD);
1378 } else {
1379 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREWRITE);
1380 }
1381
1382 while (nctios--) {
1383 int seg, seglim;
1384
1385 seglim = nseg;
1386 if (seglim) {
1387 if (seglim > ISP_RQDSEG_T2)
1388 seglim = ISP_RQDSEG_T2;
1389
1390 for (seg = 0; seg < seglim; seg++) {
1391 cto->rsp.m0.ct_dataseg[seg].ds_base =
1392 dm_segs->ds_addr;
1393 cto->rsp.m0.ct_dataseg[seg].ds_count =
1394 dm_segs->ds_len;
1395 cto->rsp.m0.ct_xfrlen += dm_segs->ds_len;
1396 totxfr += dm_segs->ds_len;
1397 dm_segs++;
1398 }
1399 cto->ct_seg_count = seg;
1400 } else {
1401 /*
1402 * This case should only happen when we're sending a
1403 * synthesized MODE1 final status with sense data.
1404 */
1405 if (send_sense == 0) {
1406 isp_prt(mp->isp, ISP_LOGWARN,
1407 "dma2_tgt_fc ran out of segments, "
1408 "no SENSE DATA");
1409 mp->error = EINVAL;
1410 return;
1411 }
1412 }
1413
1414 /*
1415 * At this point, the fields ct_lun, ct_iid, ct_rxid,
1416 * ct_timeout have been carried over unchanged from what
1417 * our caller had set.
1418 *
1419 * The field ct_reloff is either what the caller set, or
1420 * what we've added to below.
1421 *
1422 * The dataseg fields and the seg_count fields we just got
1423 * through setting. The data direction we've preserved all
1424 * along and only clear it if we're sending a MODE1 status
1425 * as the last CTIO.
1426 *
1427 */
1428
1429 if (nctios == 0) {
1430
1431 /*
1432 * We're the last in a sequence of CTIO2s, so mark this
1433 * CTIO2 and save the handle to the CCB such that when
1434 * this CTIO2 completes we can free dma resources and
1435 * do whatever else we need to do to finish the rest
1436 * of the command.
1437 */
1438
1439 cto->ct_reserved = handle;
1440 cto->ct_header.rqs_seqno = 1;
1441
1442 if (send_status) {
1443 if (send_sense) {
1444 bcopy(sense, cto->rsp.m1.ct_resp,
1445 QLTM_SENSELEN);
1446 cto->rsp.m1.ct_senselen =
1447 QLTM_SENSELEN;
1448 scsi_status |= CT2_SNSLEN_VALID;
1449 cto->rsp.m1.ct_scsi_status =
1450 scsi_status;
1451 cto->ct_flags &= CT2_FLAG_MMASK;
1452 cto->ct_flags |= CT2_FLAG_MODE1 |
1453 CT2_NO_DATA| CT2_SENDSTATUS;
1454 } else {
1455 cto->rsp.m0.ct_scsi_status =
1456 scsi_status;
1457 cto->ct_flags |= CT2_SENDSTATUS;
1458 }
1459 /*
1460 * Get 'real' residual and set flags based
1461 * on it.
1462 */
1463 cto->ct_resid = datalen - totxfr;
1464 if (cto->ct_resid > 0)
1465 cto->ct_flags |= CT2_DATA_UNDER;
1466 else if (cto->ct_resid < 0)
1467 cto->ct_flags |= CT2_DATA_OVER;
1468 }
1469 ISP_TDQE(mp->isp, "last dma2_tgt_fc", *mp->iptrp, cto);
1470 isp_prt(mp->isp, ISP_LOGTDEBUG1,
1471 "CTIO2 RX_ID 0x%x lun %d->iid%d flgs 0x%x sts 0x%x"
1472 " ssts 0x%x res %d", cto->ct_rxid,
1473 csio->ccb_h.target_lun, (int) cto->ct_iid,
1474 cto->ct_flags, cto->ct_status,
1475 cto->rsp.m1.ct_scsi_status, cto->ct_resid);
1476 ISP_SWIZ_CTIO2(isp, cto, cto);
1477 } else {
1478 ct2_entry_t *octo = cto;
1479
1480 /*
1481 * Make sure handle fields are clean
1482 */
1483 cto->ct_reserved = 0;
1484 cto->ct_header.rqs_seqno = 0;
1485
1486 ISP_TDQE(mp->isp, "dma2_tgt_fc", *mp->iptrp, cto);
1487 isp_prt(mp->isp, ISP_LOGTDEBUG1,
1488 "CTIO2 RX_ID 0x%x lun %d->iid%d flgs 0x%x",
1489 cto->ct_rxid, csio->ccb_h.target_lun,
1490 (int) cto->ct_iid, cto->ct_flags);
1491 /*
1492 * Get a new CTIO2
1493 */
1494 cto = (ct2_entry_t *)
1495 ISP_QUEUE_ENTRY(mp->isp->isp_rquest, *mp->iptrp);
1496 *mp->iptrp =
1497 ISP_NXT_QENTRY(*mp->iptrp, RQUEST_QUEUE_LEN(isp));
1498 if (*mp->iptrp == mp->optr) {
1499 isp_prt(mp->isp, ISP_LOGWARN,
1500 "Queue Overflow in dma2_tgt_fc");
1501 mp->error = MUSHERR_NOQENTRIES;
1502 return;
1503 }
1504
1505 /*
1506 * Fill in the new CTIO2 with info from the old one.
1507 */
1508 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2;
1509 cto->ct_header.rqs_entry_count = 1;
1510 cto->ct_header.rqs_flags = 0;
1511 /* ct_header.rqs_seqno && ct_reserved done later */
1512 cto->ct_lun = octo->ct_lun;
1513 cto->ct_iid = octo->ct_iid;
1514 cto->ct_rxid = octo->ct_rxid;
1515 cto->ct_flags = octo->ct_flags;
1516 cto->ct_status = 0;
1517 cto->ct_resid = 0;
1518 cto->ct_timeout = octo->ct_timeout;
1519 cto->ct_seg_count = 0;
1520 /*
1521 * Adjust the new relative offset by the amount which
1522 * is recorded in the data segment of the old CTIO2 we
1523 * just finished filling out.
1524 */
1525 cto->ct_reloff += octo->rsp.m0.ct_xfrlen;
1526 bzero(&cto->rsp, sizeof (cto->rsp));
1527 ISP_SWIZ_CTIO2(isp, cto, cto);
1528 }
1529 }
1530 }
1531 #endif
1532
1533 static void dma2 __P((void *, bus_dma_segment_t *, int, int));
1534
1535 static void
1536 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1537 {
1538 mush_t *mp;
1539 struct ccb_scsiio *csio;
1540 struct isp_pcisoftc *pci;
1541 bus_dmamap_t *dp;
1542 bus_dma_segment_t *eseg;
1543 ispreq_t *rq;
1544 ispcontreq_t *crq;
1545 int seglim, datalen;
1546
1547 mp = (mush_t *) arg;
1548 if (error) {
1549 mp->error = error;
1550 return;
1551 }
1552
1553 if (nseg < 1) {
1554 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg);
1555 mp->error = EFAULT;
1556 return;
1557 }
1558 csio = mp->cmd_token;
1559 rq = mp->rq;
1560 pci = (struct isp_pcisoftc *)mp->isp;
1561 dp = &pci->dmaps[isp_handle_index(rq->req_handle)];
1562
1563 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1564 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREREAD);
1565 } else {
1566 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREWRITE);
1567 }
1568
1569 datalen = XS_XFRLEN(csio);
1570
1571 /*
1572 * We're passed an initial partially filled in entry that
1573 * has most fields filled in except for data transfer
1574 * related values.
1575 *
1576 * Our job is to fill in the initial request queue entry and
1577 * then to start allocating and filling in continuation entries
1578 * until we've covered the entire transfer.
1579 */
1580
1581 if (IS_FC(mp->isp)) {
1582 seglim = ISP_RQDSEG_T2;
1583 ((ispreqt2_t *)rq)->req_totalcnt = datalen;
1584 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1585 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN;
1586 } else {
1587 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT;
1588 }
1589 } else {
1590 if (csio->cdb_len > 12) {
1591 seglim = 0;
1592 } else {
1593 seglim = ISP_RQDSEG;
1594 }
1595 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1596 rq->req_flags |= REQFLAG_DATA_IN;
1597 } else {
1598 rq->req_flags |= REQFLAG_DATA_OUT;
1599 }
1600 }
1601
1602 eseg = dm_segs + nseg;
1603
1604 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) {
1605 if (IS_FC(mp->isp)) {
1606 ispreqt2_t *rq2 = (ispreqt2_t *)rq;
1607 rq2->req_dataseg[rq2->req_seg_count].ds_base =
1608 dm_segs->ds_addr;
1609 rq2->req_dataseg[rq2->req_seg_count].ds_count =
1610 dm_segs->ds_len;
1611 } else {
1612 rq->req_dataseg[rq->req_seg_count].ds_base =
1613 dm_segs->ds_addr;
1614 rq->req_dataseg[rq->req_seg_count].ds_count =
1615 dm_segs->ds_len;
1616 }
1617 datalen -= dm_segs->ds_len;
1618 #if 0
1619 if (IS_FC(mp->isp)) {
1620 ispreqt2_t *rq2 = (ispreqt2_t *)rq;
1621 printf("%s: seg0[%d] cnt 0x%x paddr 0x%08x\n",
1622 mp->isp->isp_name, rq->req_seg_count,
1623 rq2->req_dataseg[rq2->req_seg_count].ds_count,
1624 rq2->req_dataseg[rq2->req_seg_count].ds_base);
1625 } else {
1626 printf("%s: seg0[%d] cnt 0x%x paddr 0x%08x\n",
1627 mp->isp->isp_name, rq->req_seg_count,
1628 rq->req_dataseg[rq->req_seg_count].ds_count,
1629 rq->req_dataseg[rq->req_seg_count].ds_base);
1630 }
1631 #endif
1632 rq->req_seg_count++;
1633 dm_segs++;
1634 }
1635
1636 while (datalen > 0 && dm_segs != eseg) {
1637 crq = (ispcontreq_t *)
1638 ISP_QUEUE_ENTRY(mp->isp->isp_rquest, *mp->iptrp);
1639 *mp->iptrp = ISP_NXT_QENTRY(*mp->iptrp, RQUEST_QUEUE_LEN(isp));
1640 if (*mp->iptrp == mp->optr) {
1641 #if 0
1642 printf("%s: Request Queue Overflow++\n",
1643 mp->isp->isp_name);
1644 #endif
1645 mp->error = MUSHERR_NOQENTRIES;
1646 return;
1647 }
1648 rq->req_header.rqs_entry_count++;
1649 bzero((void *)crq, sizeof (*crq));
1650 crq->req_header.rqs_entry_count = 1;
1651 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
1652
1653 seglim = 0;
1654 while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) {
1655 crq->req_dataseg[seglim].ds_base =
1656 dm_segs->ds_addr;
1657 crq->req_dataseg[seglim].ds_count =
1658 dm_segs->ds_len;
1659 #if 0
1660 printf("%s: seg%d[%d] cnt 0x%x paddr 0x%08x\n",
1661 mp->isp->isp_name, rq->req_header.rqs_entry_count-1,
1662 seglim, crq->req_dataseg[seglim].ds_count,
1663 crq->req_dataseg[seglim].ds_base);
1664 #endif
1665 rq->req_seg_count++;
1666 dm_segs++;
1667 seglim++;
1668 datalen -= dm_segs->ds_len;
1669 }
1670 }
1671 }
1672
1673 static int
1674 isp_pci_dmasetup(struct ispsoftc *isp, struct ccb_scsiio *csio, ispreq_t *rq,
1675 u_int16_t *iptrp, u_int16_t optr)
1676 {
1677 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
1678 bus_dmamap_t *dp = NULL;
1679 mush_t mush, *mp;
1680 void (*eptr) __P((void *, bus_dma_segment_t *, int, int));
1681
1682 #ifdef ISP_TARGET_MODE
1683 if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) {
1684 if (IS_FC(isp)) {
1685 eptr = tdma_mkfc;
1686 } else {
1687 eptr = tdma_mk;
1688 }
1689 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE ||
1690 (csio->dxfer_len == 0)) {
1691 rq->req_seg_count = 1;
1692 mp = &mush;
1693 mp->isp = isp;
1694 mp->cmd_token = csio;
1695 mp->rq = rq;
1696 mp->iptrp = iptrp;
1697 mp->optr = optr;
1698 mp->error = 0;
1699 (*eptr)(mp, NULL, 0, 0);
1700 goto exit;
1701 }
1702 } else
1703 #endif
1704 eptr = dma2;
1705
1706 /*
1707 * NB: if we need to do request queue entry swizzling,
1708 * NB: this is where it would need to be done for cmds
1709 * NB: that move no data. For commands that move data,
1710 * NB: swizzling would take place in those functions.
1711 */
1712 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE ||
1713 (csio->dxfer_len == 0)) {
1714 rq->req_seg_count = 1;
1715 return (CMD_QUEUED);
1716 }
1717
1718 /*
1719 * Do a virtual grapevine step to collect info for
1720 * the callback dma allocation that we have to use...
1721 */
1722 mp = &mush;
1723 mp->isp = isp;
1724 mp->cmd_token = csio;
1725 mp->rq = rq;
1726 mp->iptrp = iptrp;
1727 mp->optr = optr;
1728 mp->error = 0;
1729
1730 if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
1731 if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) {
1732 int error, s;
1733 dp = &pci->dmaps[isp_handle_index(rq->req_handle)];
1734 s = splsoftvm();
1735 error = bus_dmamap_load(pci->parent_dmat, *dp,
1736 csio->data_ptr, csio->dxfer_len, eptr, mp, 0);
1737 if (error == EINPROGRESS) {
1738 bus_dmamap_unload(pci->parent_dmat, *dp);
1739 mp->error = EINVAL;
1740 isp_prt(isp, ISP_LOGERR,
1741 "deferred dma allocation not supported");
1742 } else if (error && mp->error == 0) {
1743 #ifdef DIAGNOSTIC
1744 printf("%s: error %d in dma mapping code\n",
1745 isp->isp_name, error);
1746 #endif
1747 mp->error = error;
1748 }
1749 splx(s);
1750 } else {
1751 /* Pointer to physical buffer */
1752 struct bus_dma_segment seg;
1753 seg.ds_addr = (bus_addr_t)csio->data_ptr;
1754 seg.ds_len = csio->dxfer_len;
1755 (*eptr)(mp, &seg, 1, 0);
1756 }
1757 } else {
1758 struct bus_dma_segment *segs;
1759
1760 if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) {
1761 isp_prt(isp, ISP_LOGERR,
1762 "Physical segment pointers unsupported");
1763 mp->error = EINVAL;
1764 } else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
1765 isp_prt(isp, ISP_LOGERR,
1766 "Virtual segment addresses unsupported");
1767 mp->error = EINVAL;
1768 } else {
1769 /* Just use the segments provided */
1770 segs = (struct bus_dma_segment *) csio->data_ptr;
1771 (*eptr)(mp, segs, csio->sglist_cnt, 0);
1772 }
1773 }
1774 #ifdef ISP_TARGET_MODE
1775 exit:
1776 #endif
1777 if (mp->error) {
1778 int retval = CMD_COMPLETE;
1779 if (mp->error == MUSHERR_NOQENTRIES) {
1780 retval = CMD_EAGAIN;
1781 } else if (mp->error == EFBIG) {
1782 XS_SETERR(csio, CAM_REQ_TOO_BIG);
1783 } else if (mp->error == EINVAL) {
1784 XS_SETERR(csio, CAM_REQ_INVALID);
1785 } else {
1786 XS_SETERR(csio, CAM_UNREC_HBA_ERROR);
1787 }
1788 return (retval);
1789 } else {
1790 /*
1791 * Check to see if we weren't cancelled while sleeping on
1792 * getting DMA resources...
1793 */
1794 if ((csio->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1795 if (dp) {
1796 bus_dmamap_unload(pci->parent_dmat, *dp);
1797 }
1798 return (CMD_COMPLETE);
1799 }
1800 return (CMD_QUEUED);
1801 }
1802 }
1803
1804 static void
1805 isp_pci_dmateardown(struct ispsoftc *isp, XS_T *xs, u_int32_t handle)
1806 {
1807 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
1808 bus_dmamap_t *dp = &pci->dmaps[isp_handle_index(handle)];
1809 if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1810 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_POSTREAD);
1811 } else {
1812 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_POSTWRITE);
1813 }
1814 bus_dmamap_unload(pci->parent_dmat, *dp);
1815 }
1816
1817
1818 static void
1819 isp_pci_reset1(struct ispsoftc *isp)
1820 {
1821 /* Make sure the BIOS is disabled */
1822 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
1823 /* and enable interrupts */
1824 ENABLE_INTS(isp);
1825 }
1826
1827 static void
1828 isp_pci_dumpregs(struct ispsoftc *isp, const char *msg)
1829 {
1830 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
1831 if (msg)
1832 printf("%s: %s\n", isp->isp_name, msg);
1833 if (IS_SCSI(isp))
1834 printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1));
1835 else
1836 printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR));
1837 printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR),
1838 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA));
1839 printf("risc_hccr=%x\n", ISP_READ(isp, HCCR));
1840
1841
1842 if (IS_SCSI(isp)) {
1843 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE);
1844 printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n",
1845 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS),
1846 ISP_READ(isp, CDMA_FIFO_STS));
1847 printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n",
1848 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS),
1849 ISP_READ(isp, DDMA_FIFO_STS));
1850 printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n",
1851 ISP_READ(isp, SXP_INTERRUPT),
1852 ISP_READ(isp, SXP_GROSS_ERR),
1853 ISP_READ(isp, SXP_PINS_CTRL));
1854 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE);
1855 }
1856 printf(" mbox regs: %x %x %x %x %x\n",
1857 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1),
1858 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3),
1859 ISP_READ(isp, OUTMAILBOX4));
1860 printf(" PCI Status Command/Status=%lx\n",
1861 pci_conf_read(pci->pci_id, PCIR_COMMAND));
1862 }
Cache object: 4e1e4a340b9b79fbfc1a39b53fcb31f6
|