1 /* $NetBSD: siop_common.c,v 1.41 2006/11/16 01:32:52 christos Exp $ */
2
3 /*
4 * Copyright (c) 2000, 2002 Manuel Bouyer.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Manuel Bouyer.
17 * 4. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 */
32
33 /* SYM53c7/8xx PCI-SCSI I/O Processors driver */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: siop_common.c,v 1.41 2006/11/16 01:32:52 christos Exp $");
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/device.h>
41 #include <sys/malloc.h>
42 #include <sys/buf.h>
43 #include <sys/kernel.h>
44 #include <sys/scsiio.h>
45
46 #include <uvm/uvm_extern.h>
47
48 #include <machine/endian.h>
49 #include <machine/bus.h>
50
51 #include <dev/scsipi/scsi_all.h>
52 #include <dev/scsipi/scsi_message.h>
53 #include <dev/scsipi/scsipi_all.h>
54
55 #include <dev/scsipi/scsiconf.h>
56
57 #include <dev/ic/siopreg.h>
58 #include <dev/ic/siopvar_common.h>
59
60 #include "opt_siop.h"
61
62 #undef DEBUG
63 #undef DEBUG_DR
64 #undef DEBUG_NEG
65
66 int
67 siop_common_attach(sc)
68 struct siop_common_softc *sc;
69 {
70 int error, i;
71 bus_dma_segment_t seg;
72 int rseg;
73
74 /*
75 * Allocate DMA-safe memory for the script and map it.
76 */
77 if ((sc->features & SF_CHIP_RAM) == 0) {
78 error = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE,
79 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
80 if (error) {
81 aprint_error(
82 "%s: unable to allocate script DMA memory, "
83 "error = %d\n", sc->sc_dev.dv_xname, error);
84 return error;
85 }
86 error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, PAGE_SIZE,
87 (caddr_t *)&sc->sc_script,
88 BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
89 if (error) {
90 aprint_error("%s: unable to map script DMA memory, "
91 "error = %d\n", sc->sc_dev.dv_xname, error);
92 return error;
93 }
94 error = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1,
95 PAGE_SIZE, 0, BUS_DMA_NOWAIT, &sc->sc_scriptdma);
96 if (error) {
97 aprint_error("%s: unable to create script DMA map, "
98 "error = %d\n", sc->sc_dev.dv_xname, error);
99 return error;
100 }
101 error = bus_dmamap_load(sc->sc_dmat, sc->sc_scriptdma,
102 sc->sc_script, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
103 if (error) {
104 aprint_error("%s: unable to load script DMA map, "
105 "error = %d\n", sc->sc_dev.dv_xname, error);
106 return error;
107 }
108 sc->sc_scriptaddr =
109 sc->sc_scriptdma->dm_segs[0].ds_addr;
110 sc->ram_size = PAGE_SIZE;
111 }
112
113 sc->sc_adapt.adapt_dev = &sc->sc_dev;
114 sc->sc_adapt.adapt_nchannels = 1;
115 sc->sc_adapt.adapt_openings = 0;
116 sc->sc_adapt.adapt_ioctl = siop_ioctl;
117 sc->sc_adapt.adapt_minphys = minphys;
118
119 memset(&sc->sc_chan, 0, sizeof(sc->sc_chan));
120 sc->sc_chan.chan_adapter = &sc->sc_adapt;
121 sc->sc_chan.chan_bustype = &scsi_bustype;
122 sc->sc_chan.chan_channel = 0;
123 sc->sc_chan.chan_flags = SCSIPI_CHAN_CANGROW;
124 sc->sc_chan.chan_ntargets =
125 (sc->features & SF_BUS_WIDE) ? 16 : 8;
126 sc->sc_chan.chan_nluns = 8;
127 sc->sc_chan.chan_id =
128 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCID);
129 if (sc->sc_chan.chan_id == 0 ||
130 sc->sc_chan.chan_id >= sc->sc_chan.chan_ntargets)
131 sc->sc_chan.chan_id = SIOP_DEFAULT_TARGET;
132
133 for (i = 0; i < 16; i++)
134 sc->targets[i] = NULL;
135
136 /* find min/max sync period for this chip */
137 sc->st_maxsync = 0;
138 sc->dt_maxsync = 0;
139 sc->st_minsync = 255;
140 sc->dt_minsync = 255;
141 for (i = 0; i < sizeof(scf_period) / sizeof(scf_period[0]); i++) {
142 if (sc->clock_period != scf_period[i].clock)
143 continue;
144 if (sc->st_maxsync < scf_period[i].period)
145 sc->st_maxsync = scf_period[i].period;
146 if (sc->st_minsync > scf_period[i].period)
147 sc->st_minsync = scf_period[i].period;
148 }
149 if (sc->st_maxsync == 255 || sc->st_minsync == 0)
150 panic("siop: can't find my sync parameters");
151 for (i = 0; i < sizeof(dt_scf_period) / sizeof(dt_scf_period[0]); i++) {
152 if (sc->clock_period != dt_scf_period[i].clock)
153 continue;
154 if (sc->dt_maxsync < dt_scf_period[i].period)
155 sc->dt_maxsync = dt_scf_period[i].period;
156 if (sc->dt_minsync > dt_scf_period[i].period)
157 sc->dt_minsync = dt_scf_period[i].period;
158 }
159 if (sc->dt_maxsync == 255 || sc->dt_minsync == 0)
160 panic("siop: can't find my sync parameters");
161 return 0;
162 }
163
164 void
165 siop_common_reset(sc)
166 struct siop_common_softc *sc;
167 {
168 u_int32_t stest3;
169
170 /* reset the chip */
171 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT, ISTAT_SRST);
172 delay(1000);
173 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT, 0);
174
175 /* init registers */
176 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL0,
177 SCNTL0_ARB_MASK | SCNTL0_EPC | SCNTL0_AAP);
178 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1, 0);
179 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3, sc->clock_div);
180 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER, 0);
181 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_DIEN, 0xff);
182 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SIEN0,
183 0xff & ~(SIEN0_CMP | SIEN0_SEL | SIEN0_RSL));
184 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SIEN1,
185 0xff & ~(SIEN1_HTH | SIEN1_GEN));
186 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2, 0);
187 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3, STEST3_TE);
188 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STIME0,
189 (0xb << STIME0_SEL_SHIFT));
190 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCID,
191 sc->sc_chan.chan_id | SCID_RRE);
192 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_RESPID0,
193 1 << sc->sc_chan.chan_id);
194 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_DCNTL,
195 (sc->features & SF_CHIP_PF) ? DCNTL_COM | DCNTL_PFEN : DCNTL_COM);
196 if (sc->features & SF_CHIP_AAIP)
197 bus_space_write_1(sc->sc_rt, sc->sc_rh,
198 SIOP_AIPCNTL1, AIPCNTL1_DIS);
199
200 /* enable clock doubler or quadruler if appropriate */
201 if (sc->features & (SF_CHIP_DBLR | SF_CHIP_QUAD)) {
202 stest3 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3);
203 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1,
204 STEST1_DBLEN);
205 if (sc->features & SF_CHIP_QUAD) {
206 /* wait for PPL to lock */
207 while ((bus_space_read_1(sc->sc_rt, sc->sc_rh,
208 SIOP_STEST4) & STEST4_LOCK) == 0)
209 delay(10);
210 } else {
211 /* data sheet says 20us - more won't hurt */
212 delay(100);
213 }
214 /* halt scsi clock, select doubler/quad, restart clock */
215 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3,
216 stest3 | STEST3_HSC);
217 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1,
218 STEST1_DBLEN | STEST1_DBLSEL);
219 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3, stest3);
220 } else {
221 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1, 0);
222 }
223 if (sc->features & SF_CHIP_FIFO)
224 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST5,
225 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST5) |
226 CTEST5_DFS);
227 if (sc->features & SF_CHIP_LED0) {
228 /* Set GPIO0 as output if software LED control is required */
229 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_GPCNTL,
230 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_GPCNTL) & 0xfe);
231 }
232 if (sc->features & SF_BUS_ULTRA3) {
233 /* reset SCNTL4 */
234 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL4, 0);
235 }
236 sc->mode = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST4) &
237 STEST4_MODE_MASK;
238
239 /*
240 * initialise the RAM. Without this we may get scsi gross errors on
241 * the 1010
242 */
243 if (sc->features & SF_CHIP_RAM)
244 bus_space_set_region_4(sc->sc_ramt, sc->sc_ramh,
245 0, 0, sc->ram_size / 4);
246 sc->sc_reset(sc);
247 }
248
249 /* prepare tables before sending a cmd */
250 void
251 siop_setuptables(siop_cmd)
252 struct siop_common_cmd *siop_cmd;
253 {
254 int i;
255 struct siop_common_softc *sc = siop_cmd->siop_sc;
256 struct scsipi_xfer *xs = siop_cmd->xs;
257 int target = xs->xs_periph->periph_target;
258 int lun = xs->xs_periph->periph_lun;
259 int msgoffset = 1;
260
261 siop_cmd->siop_tables->id = htole32(sc->targets[target]->id);
262 memset(siop_cmd->siop_tables->msg_out, 0,
263 sizeof(siop_cmd->siop_tables->msg_out));
264 /* request sense doesn't disconnect */
265 if (xs->xs_control & XS_CTL_REQSENSE)
266 siop_cmd->siop_tables->msg_out[0] = MSG_IDENTIFY(lun, 0);
267 else if ((sc->features & SF_CHIP_GEBUG) &&
268 (sc->targets[target]->flags & TARF_ISWIDE) == 0)
269 /*
270 * 1010 bug: it seems that the 1010 has problems with reselect
271 * when not in wide mode (generate false SCSI gross error).
272 * The FreeBSD sym driver has comments about it but their
273 * workaround (disable SCSI gross error reporting) doesn't
274 * work with my adapter. So disable disconnect when not
275 * wide.
276 */
277 siop_cmd->siop_tables->msg_out[0] = MSG_IDENTIFY(lun, 0);
278 else
279 siop_cmd->siop_tables->msg_out[0] = MSG_IDENTIFY(lun, 1);
280 if (xs->xs_tag_type != 0) {
281 if ((sc->targets[target]->flags & TARF_TAG) == 0) {
282 scsipi_printaddr(xs->xs_periph);
283 printf(": tagged command type %d id %d\n",
284 siop_cmd->xs->xs_tag_type, siop_cmd->xs->xs_tag_id);
285 panic("tagged command for non-tagging device");
286 }
287 siop_cmd->flags |= CMDFL_TAG;
288 siop_cmd->siop_tables->msg_out[1] = siop_cmd->xs->xs_tag_type;
289 /*
290 * use siop_cmd->tag not xs->xs_tag_id, caller may want a
291 * different one
292 */
293 siop_cmd->siop_tables->msg_out[2] = siop_cmd->tag;
294 msgoffset = 3;
295 }
296 siop_cmd->siop_tables->t_msgout.count= htole32(msgoffset);
297 if (sc->targets[target]->status == TARST_ASYNC) {
298 if ((sc->targets[target]->flags & TARF_DT) &&
299 (sc->mode == STEST4_MODE_LVD)) {
300 sc->targets[target]->status = TARST_PPR_NEG;
301 siop_ppr_msg(siop_cmd, msgoffset, sc->dt_minsync,
302 sc->maxoff);
303 } else if (sc->targets[target]->flags & TARF_WIDE) {
304 sc->targets[target]->status = TARST_WIDE_NEG;
305 siop_wdtr_msg(siop_cmd, msgoffset,
306 MSG_EXT_WDTR_BUS_16_BIT);
307 } else if (sc->targets[target]->flags & TARF_SYNC) {
308 sc->targets[target]->status = TARST_SYNC_NEG;
309 siop_sdtr_msg(siop_cmd, msgoffset, sc->st_minsync,
310 (sc->maxoff > 31) ? 31 : sc->maxoff);
311 } else {
312 sc->targets[target]->status = TARST_OK;
313 siop_update_xfer_mode(sc, target);
314 }
315 }
316 siop_cmd->siop_tables->status =
317 htole32(SCSI_SIOP_NOSTATUS); /* set invalid status */
318
319 siop_cmd->siop_tables->cmd.count =
320 htole32(siop_cmd->dmamap_cmd->dm_segs[0].ds_len);
321 siop_cmd->siop_tables->cmd.addr =
322 htole32(siop_cmd->dmamap_cmd->dm_segs[0].ds_addr);
323 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
324 for (i = 0; i < siop_cmd->dmamap_data->dm_nsegs; i++) {
325 siop_cmd->siop_tables->data[i].count =
326 htole32(siop_cmd->dmamap_data->dm_segs[i].ds_len);
327 siop_cmd->siop_tables->data[i].addr =
328 htole32(siop_cmd->dmamap_data->dm_segs[i].ds_addr);
329 }
330 }
331 }
332
333 int
334 siop_wdtr_neg(siop_cmd)
335 struct siop_common_cmd *siop_cmd;
336 {
337 struct siop_common_softc *sc = siop_cmd->siop_sc;
338 struct siop_common_target *siop_target = siop_cmd->siop_target;
339 int target = siop_cmd->xs->xs_periph->periph_target;
340 struct siop_common_xfer *tables = siop_cmd->siop_tables;
341
342 if (siop_target->status == TARST_WIDE_NEG) {
343 /* we initiated wide negotiation */
344 switch (tables->msg_in[3]) {
345 case MSG_EXT_WDTR_BUS_8_BIT:
346 siop_target->flags &= ~TARF_ISWIDE;
347 sc->targets[target]->id &= ~(SCNTL3_EWS << 24);
348 break;
349 case MSG_EXT_WDTR_BUS_16_BIT:
350 if (siop_target->flags & TARF_WIDE) {
351 siop_target->flags |= TARF_ISWIDE;
352 sc->targets[target]->id |= (SCNTL3_EWS << 24);
353 break;
354 }
355 /* FALLTHROUGH */
356 default:
357 /*
358 * hum, we got more than what we can handle, shouldn't
359 * happen. Reject, and stay async
360 */
361 siop_target->flags &= ~TARF_ISWIDE;
362 siop_target->status = TARST_OK;
363 siop_target->offset = siop_target->period = 0;
364 siop_update_xfer_mode(sc, target);
365 printf("%s: rejecting invalid wide negotiation from "
366 "target %d (%d)\n", sc->sc_dev.dv_xname, target,
367 tables->msg_in[3]);
368 tables->t_msgout.count= htole32(1);
369 tables->msg_out[0] = MSG_MESSAGE_REJECT;
370 return SIOP_NEG_MSGOUT;
371 }
372 tables->id = htole32(sc->targets[target]->id);
373 bus_space_write_1(sc->sc_rt, sc->sc_rh,
374 SIOP_SCNTL3,
375 (sc->targets[target]->id >> 24) & 0xff);
376 /* we now need to do sync */
377 if (siop_target->flags & TARF_SYNC) {
378 siop_target->status = TARST_SYNC_NEG;
379 siop_sdtr_msg(siop_cmd, 0, sc->st_minsync,
380 (sc->maxoff > 31) ? 31 : sc->maxoff);
381 return SIOP_NEG_MSGOUT;
382 } else {
383 siop_target->status = TARST_OK;
384 siop_update_xfer_mode(sc, target);
385 return SIOP_NEG_ACK;
386 }
387 } else {
388 /* target initiated wide negotiation */
389 if (tables->msg_in[3] >= MSG_EXT_WDTR_BUS_16_BIT
390 && (siop_target->flags & TARF_WIDE)) {
391 siop_target->flags |= TARF_ISWIDE;
392 sc->targets[target]->id |= SCNTL3_EWS << 24;
393 } else {
394 siop_target->flags &= ~TARF_ISWIDE;
395 sc->targets[target]->id &= ~(SCNTL3_EWS << 24);
396 }
397 tables->id = htole32(sc->targets[target]->id);
398 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3,
399 (sc->targets[target]->id >> 24) & 0xff);
400 /*
401 * we did reset wide parameters, so fall back to async,
402 * but don't schedule a sync neg, target should initiate it
403 */
404 siop_target->status = TARST_OK;
405 siop_target->offset = siop_target->period = 0;
406 siop_update_xfer_mode(sc, target);
407 siop_wdtr_msg(siop_cmd, 0, (siop_target->flags & TARF_ISWIDE) ?
408 MSG_EXT_WDTR_BUS_16_BIT : MSG_EXT_WDTR_BUS_8_BIT);
409 return SIOP_NEG_MSGOUT;
410 }
411 }
412
413 int
414 siop_ppr_neg(siop_cmd)
415 struct siop_common_cmd *siop_cmd;
416 {
417 struct siop_common_softc *sc = siop_cmd->siop_sc;
418 struct siop_common_target *siop_target = siop_cmd->siop_target;
419 int target = siop_cmd->xs->xs_periph->periph_target;
420 struct siop_common_xfer *tables = siop_cmd->siop_tables;
421 int sync, offset, options, scf = 0;
422 int i;
423
424 #ifdef DEBUG_NEG
425 printf("%s: anserw on ppr negotiation:", sc->sc_dev.dv_xname);
426 for (i = 0; i < 8; i++)
427 printf(" 0x%x", tables->msg_in[i]);
428 printf("\n");
429 #endif
430
431 if (siop_target->status == TARST_PPR_NEG) {
432 /* we initiated PPR negotiation */
433 sync = tables->msg_in[3];
434 offset = tables->msg_in[5];
435 options = tables->msg_in[7];
436 if (options != MSG_EXT_PPR_DT) {
437 /* should't happen */
438 printf("%s: ppr negotiation for target %d: "
439 "no DT option\n", sc->sc_dev.dv_xname, target);
440 siop_target->status = TARST_ASYNC;
441 siop_target->flags &= ~(TARF_DT | TARF_ISDT);
442 siop_target->offset = 0;
443 siop_target->period = 0;
444 goto reject;
445 }
446
447 if (offset > sc->maxoff || sync < sc->dt_minsync ||
448 sync > sc->dt_maxsync) {
449 printf("%s: ppr negotiation for target %d: "
450 "offset (%d) or sync (%d) out of range\n",
451 sc->sc_dev.dv_xname, target, offset, sync);
452 /* should not happen */
453 siop_target->offset = 0;
454 siop_target->period = 0;
455 goto reject;
456 } else {
457 for (i = 0; i <
458 sizeof(dt_scf_period) / sizeof(dt_scf_period[0]);
459 i++) {
460 if (sc->clock_period != dt_scf_period[i].clock)
461 continue;
462 if (dt_scf_period[i].period == sync) {
463 /* ok, found it. we now are sync. */
464 siop_target->offset = offset;
465 siop_target->period = sync;
466 scf = dt_scf_period[i].scf;
467 siop_target->flags |= TARF_ISDT;
468 }
469 }
470 if ((siop_target->flags & TARF_ISDT) == 0) {
471 printf("%s: ppr negotiation for target %d: "
472 "sync (%d) incompatible with adapter\n",
473 sc->sc_dev.dv_xname, target, sync);
474 /*
475 * we didn't find it in our table, do async
476 * send reject msg, start SDTR/WDTR neg
477 */
478 siop_target->status = TARST_ASYNC;
479 siop_target->flags &= ~(TARF_DT | TARF_ISDT);
480 siop_target->offset = 0;
481 siop_target->period = 0;
482 goto reject;
483 }
484 }
485 if (tables->msg_in[6] != 1) {
486 printf("%s: ppr negotiation for target %d: "
487 "transfer width (%d) incompatible with dt\n",
488 sc->sc_dev.dv_xname, target, tables->msg_in[6]);
489 /* DT mode can only be done with wide transfers */
490 siop_target->status = TARST_ASYNC;
491 goto reject;
492 }
493 siop_target->flags |= TARF_ISWIDE;
494 sc->targets[target]->id |= (SCNTL3_EWS << 24);
495 sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24);
496 sc->targets[target]->id |= scf << (24 + SCNTL3_SCF_SHIFT);
497 sc->targets[target]->id &= ~(SXFER_MO_MASK << 8);
498 sc->targets[target]->id |=
499 (siop_target->offset & SXFER_MO_MASK) << 8;
500 sc->targets[target]->id &= ~0xff;
501 sc->targets[target]->id |= SCNTL4_U3EN;
502 siop_target->status = TARST_OK;
503 siop_update_xfer_mode(sc, target);
504 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3,
505 (sc->targets[target]->id >> 24) & 0xff);
506 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER,
507 (sc->targets[target]->id >> 8) & 0xff);
508 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL4,
509 sc->targets[target]->id & 0xff);
510 return SIOP_NEG_ACK;
511 } else {
512 /* target initiated PPR negotiation, shouldn't happen */
513 printf("%s: rejecting invalid PPR negotiation from "
514 "target %d\n", sc->sc_dev.dv_xname, target);
515 reject:
516 tables->t_msgout.count= htole32(1);
517 tables->msg_out[0] = MSG_MESSAGE_REJECT;
518 return SIOP_NEG_MSGOUT;
519 }
520 }
521
522 int
523 siop_sdtr_neg(siop_cmd)
524 struct siop_common_cmd *siop_cmd;
525 {
526 struct siop_common_softc *sc = siop_cmd->siop_sc;
527 struct siop_common_target *siop_target = siop_cmd->siop_target;
528 int target = siop_cmd->xs->xs_periph->periph_target;
529 int sync, maxoffset, offset, i;
530 int send_msgout = 0;
531 struct siop_common_xfer *tables = siop_cmd->siop_tables;
532
533 /* limit to Ultra/2 parameters, need PPR for Ultra/3 */
534 maxoffset = (sc->maxoff > 31) ? 31 : sc->maxoff;
535
536 sync = tables->msg_in[3];
537 offset = tables->msg_in[4];
538
539 if (siop_target->status == TARST_SYNC_NEG) {
540 /* we initiated sync negotiation */
541 siop_target->status = TARST_OK;
542 #ifdef DEBUG
543 printf("sdtr: sync %d offset %d\n", sync, offset);
544 #endif
545 if (offset > maxoffset || sync < sc->st_minsync ||
546 sync > sc->st_maxsync)
547 goto reject;
548 for (i = 0; i < sizeof(scf_period) / sizeof(scf_period[0]);
549 i++) {
550 if (sc->clock_period != scf_period[i].clock)
551 continue;
552 if (scf_period[i].period == sync) {
553 /* ok, found it. we now are sync. */
554 siop_target->offset = offset;
555 siop_target->period = sync;
556 sc->targets[target]->id &=
557 ~(SCNTL3_SCF_MASK << 24);
558 sc->targets[target]->id |= scf_period[i].scf
559 << (24 + SCNTL3_SCF_SHIFT);
560 if (sync < 25 && /* Ultra */
561 (sc->features & SF_BUS_ULTRA3) == 0)
562 sc->targets[target]->id |=
563 SCNTL3_ULTRA << 24;
564 else
565 sc->targets[target]->id &=
566 ~(SCNTL3_ULTRA << 24);
567 sc->targets[target]->id &=
568 ~(SXFER_MO_MASK << 8);
569 sc->targets[target]->id |=
570 (offset & SXFER_MO_MASK) << 8;
571 sc->targets[target]->id &= ~0xff; /* scntl4 */
572 goto end;
573 }
574 }
575 /*
576 * we didn't find it in our table, do async and send reject
577 * msg
578 */
579 reject:
580 send_msgout = 1;
581 tables->t_msgout.count= htole32(1);
582 tables->msg_out[0] = MSG_MESSAGE_REJECT;
583 sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24);
584 sc->targets[target]->id &= ~(SCNTL3_ULTRA << 24);
585 sc->targets[target]->id &= ~(SXFER_MO_MASK << 8);
586 sc->targets[target]->id &= ~0xff; /* scntl4 */
587 siop_target->offset = siop_target->period = 0;
588 } else { /* target initiated sync neg */
589 #ifdef DEBUG
590 printf("sdtr (target): sync %d offset %d\n", sync, offset);
591 #endif
592 if (offset == 0 || sync > sc->st_maxsync) { /* async */
593 goto async;
594 }
595 if (offset > maxoffset)
596 offset = maxoffset;
597 if (sync < sc->st_minsync)
598 sync = sc->st_minsync;
599 /* look for sync period */
600 for (i = 0; i < sizeof(scf_period) / sizeof(scf_period[0]);
601 i++) {
602 if (sc->clock_period != scf_period[i].clock)
603 continue;
604 if (scf_period[i].period == sync) {
605 /* ok, found it. we now are sync. */
606 siop_target->offset = offset;
607 siop_target->period = sync;
608 sc->targets[target]->id &=
609 ~(SCNTL3_SCF_MASK << 24);
610 sc->targets[target]->id |= scf_period[i].scf
611 << (24 + SCNTL3_SCF_SHIFT);
612 if (sync < 25 && /* Ultra */
613 (sc->features & SF_BUS_ULTRA3) == 0)
614 sc->targets[target]->id |=
615 SCNTL3_ULTRA << 24;
616 else
617 sc->targets[target]->id &=
618 ~(SCNTL3_ULTRA << 24);
619 sc->targets[target]->id &=
620 ~(SXFER_MO_MASK << 8);
621 sc->targets[target]->id |=
622 (offset & SXFER_MO_MASK) << 8;
623 sc->targets[target]->id &= ~0xff; /* scntl4 */
624 siop_sdtr_msg(siop_cmd, 0, sync, offset);
625 send_msgout = 1;
626 goto end;
627 }
628 }
629 async:
630 siop_target->offset = siop_target->period = 0;
631 sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24);
632 sc->targets[target]->id &= ~(SCNTL3_ULTRA << 24);
633 sc->targets[target]->id &= ~(SXFER_MO_MASK << 8);
634 sc->targets[target]->id &= ~0xff; /* scntl4 */
635 siop_sdtr_msg(siop_cmd, 0, 0, 0);
636 send_msgout = 1;
637 }
638 end:
639 if (siop_target->status == TARST_OK)
640 siop_update_xfer_mode(sc, target);
641 #ifdef DEBUG
642 printf("id now 0x%x\n", sc->targets[target]->id);
643 #endif
644 tables->id = htole32(sc->targets[target]->id);
645 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3,
646 (sc->targets[target]->id >> 24) & 0xff);
647 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER,
648 (sc->targets[target]->id >> 8) & 0xff);
649 if (send_msgout) {
650 return SIOP_NEG_MSGOUT;
651 } else {
652 return SIOP_NEG_ACK;
653 }
654 }
655
656 void
657 siop_sdtr_msg(siop_cmd, offset, ssync, soff)
658 struct siop_common_cmd *siop_cmd;
659 int offset;
660 int ssync, soff;
661 {
662 siop_cmd->siop_tables->msg_out[offset + 0] = MSG_EXTENDED;
663 siop_cmd->siop_tables->msg_out[offset + 1] = MSG_EXT_SDTR_LEN;
664 siop_cmd->siop_tables->msg_out[offset + 2] = MSG_EXT_SDTR;
665 siop_cmd->siop_tables->msg_out[offset + 3] = ssync;
666 siop_cmd->siop_tables->msg_out[offset + 4] = soff;
667 siop_cmd->siop_tables->t_msgout.count =
668 htole32(offset + MSG_EXT_SDTR_LEN + 2);
669 }
670
671 void
672 siop_wdtr_msg(siop_cmd, offset, wide)
673 struct siop_common_cmd *siop_cmd;
674 int offset;
675 int wide;
676 {
677 siop_cmd->siop_tables->msg_out[offset + 0] = MSG_EXTENDED;
678 siop_cmd->siop_tables->msg_out[offset + 1] = MSG_EXT_WDTR_LEN;
679 siop_cmd->siop_tables->msg_out[offset + 2] = MSG_EXT_WDTR;
680 siop_cmd->siop_tables->msg_out[offset + 3] = wide;
681 siop_cmd->siop_tables->t_msgout.count =
682 htole32(offset + MSG_EXT_WDTR_LEN + 2);
683 }
684
685 void
686 siop_ppr_msg(siop_cmd, offset, ssync, soff)
687 struct siop_common_cmd *siop_cmd;
688 int offset;
689 int ssync, soff;
690 {
691 siop_cmd->siop_tables->msg_out[offset + 0] = MSG_EXTENDED;
692 siop_cmd->siop_tables->msg_out[offset + 1] = MSG_EXT_PPR_LEN;
693 siop_cmd->siop_tables->msg_out[offset + 2] = MSG_EXT_PPR;
694 siop_cmd->siop_tables->msg_out[offset + 3] = ssync;
695 siop_cmd->siop_tables->msg_out[offset + 4] = 0; /* reserved */
696 siop_cmd->siop_tables->msg_out[offset + 5] = soff;
697 siop_cmd->siop_tables->msg_out[offset + 6] = 1; /* wide */
698 siop_cmd->siop_tables->msg_out[offset + 7] = MSG_EXT_PPR_DT;
699 siop_cmd->siop_tables->t_msgout.count =
700 htole32(offset + MSG_EXT_PPR_LEN + 2);
701 }
702
703 void
704 siop_minphys(bp)
705 struct buf *bp;
706 {
707 minphys(bp);
708 }
709
710 int
711 siop_ioctl(struct scsipi_channel *chan, u_long cmd, caddr_t arg,
712 int flag, struct proc *p)
713 {
714 struct siop_common_softc *sc = (void *)chan->chan_adapter->adapt_dev;
715
716 switch (cmd) {
717 case SCBUSIORESET:
718 /*
719 * abort the script. This will trigger an interrupt, which will
720 * trigger a bus reset.
721 * We can't safely trigger the reset here as we can't access
722 * the required register while the script is running.
723 */
724 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT, ISTAT_ABRT);
725 return (0);
726 default:
727 return (ENOTTY);
728 }
729 }
730
731 void
732 siop_ma(siop_cmd)
733 struct siop_common_cmd *siop_cmd;
734 {
735 int offset, dbc, sstat;
736 struct siop_common_softc *sc = siop_cmd->siop_sc;
737 scr_table_t *table; /* table with partial xfer */
738
739 /*
740 * compute how much of the current table didn't get handled when
741 * a phase mismatch occurs
742 */
743 if ((siop_cmd->xs->xs_control & (XS_CTL_DATA_OUT | XS_CTL_DATA_IN))
744 == 0)
745 return; /* no valid data transfer */
746
747 offset = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCRATCHA + 1);
748 if (offset >= SIOP_NSG) {
749 printf("%s: bad offset in siop_sdp (%d)\n",
750 sc->sc_dev.dv_xname, offset);
751 return;
752 }
753 table = &siop_cmd->siop_tables->data[offset];
754 #ifdef DEBUG_DR
755 printf("siop_ma: offset %d count=%d addr=0x%x ", offset,
756 table->count, table->addr);
757 #endif
758 dbc = bus_space_read_4(sc->sc_rt, sc->sc_rh, SIOP_DBC) & 0x00ffffff;
759 if (siop_cmd->xs->xs_control & XS_CTL_DATA_OUT) {
760 if (sc->features & SF_CHIP_DFBC) {
761 dbc +=
762 bus_space_read_2(sc->sc_rt, sc->sc_rh, SIOP_DFBC);
763 } else {
764 /* need to account stale data in FIFO */
765 int dfifo =
766 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_DFIFO);
767 if (sc->features & SF_CHIP_FIFO) {
768 dfifo |= (bus_space_read_1(sc->sc_rt, sc->sc_rh,
769 SIOP_CTEST5) & CTEST5_BOMASK) << 8;
770 dbc += (dfifo - (dbc & 0x3ff)) & 0x3ff;
771 } else {
772 dbc += (dfifo - (dbc & 0x7f)) & 0x7f;
773 }
774 }
775 sstat = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SSTAT0);
776 if (sstat & SSTAT0_OLF)
777 dbc++;
778 if ((sstat & SSTAT0_ORF) && (sc->features & SF_CHIP_DFBC) == 0)
779 dbc++;
780 if (siop_cmd->siop_target->flags & TARF_ISWIDE) {
781 sstat = bus_space_read_1(sc->sc_rt, sc->sc_rh,
782 SIOP_SSTAT2);
783 if (sstat & SSTAT2_OLF1)
784 dbc++;
785 if ((sstat & SSTAT2_ORF1) &&
786 (sc->features & SF_CHIP_DFBC) == 0)
787 dbc++;
788 }
789 /* clear the FIFO */
790 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3,
791 bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3) |
792 CTEST3_CLF);
793 }
794 siop_cmd->flags |= CMDFL_RESID;
795 siop_cmd->resid = dbc;
796 }
797
798 void
799 siop_sdp(siop_cmd, offset)
800 struct siop_common_cmd *siop_cmd;
801 int offset;
802 {
803 scr_table_t *table;
804
805 if ((siop_cmd->xs->xs_control & (XS_CTL_DATA_OUT | XS_CTL_DATA_IN))
806 == 0)
807 return; /* no data pointers to save */
808
809 /*
810 * offset == SIOP_NSG may be a valid condition if we get a Save data
811 * pointer when the xfer is done. Just ignore the Save data pointer
812 * in this case
813 */
814 if (offset == SIOP_NSG)
815 return;
816 #ifdef DIAGNOSTIC
817 if (offset > SIOP_NSG) {
818 scsipi_printaddr(siop_cmd->xs->xs_periph);
819 printf(": offset %d > %d\n", offset, SIOP_NSG);
820 panic("siop_sdp: offset");
821 }
822 #endif
823 /*
824 * Save data pointer. We do this by adjusting the tables to point
825 * at the begginning of the data not yet transfered.
826 * offset points to the first table with untransfered data.
827 */
828
829 /*
830 * before doing that we decrease resid from the ammount of data which
831 * has been transfered.
832 */
833 siop_update_resid(siop_cmd, offset);
834
835 /*
836 * First let see if we have a resid from a phase mismatch. If so,
837 * we have to adjst the table at offset to remove transfered data.
838 */
839 if (siop_cmd->flags & CMDFL_RESID) {
840 siop_cmd->flags &= ~CMDFL_RESID;
841 table = &siop_cmd->siop_tables->data[offset];
842 /* "cut" already transfered data from this table */
843 table->addr =
844 htole32(le32toh(table->addr) +
845 le32toh(table->count) - siop_cmd->resid);
846 table->count = htole32(siop_cmd->resid);
847 }
848
849 /*
850 * now we can remove entries which have been transfered.
851 * We just move the entries with data left at the beggining of the
852 * tables
853 */
854 memmove(&siop_cmd->siop_tables->data[0],
855 &siop_cmd->siop_tables->data[offset],
856 (SIOP_NSG - offset) * sizeof(scr_table_t));
857 }
858
859 void
860 siop_update_resid(siop_cmd, offset)
861 struct siop_common_cmd *siop_cmd;
862 int offset;
863 {
864 scr_table_t *table;
865 int i;
866
867 if ((siop_cmd->xs->xs_control & (XS_CTL_DATA_OUT | XS_CTL_DATA_IN))
868 == 0)
869 return; /* no data to transfer */
870
871 /*
872 * update resid. First account for the table entries which have
873 * been fully completed.
874 */
875 for (i = 0; i < offset; i++)
876 siop_cmd->xs->resid -=
877 le32toh(siop_cmd->siop_tables->data[i].count);
878 /*
879 * if CMDFL_RESID is set, the last table (pointed by offset) is a
880 * partial transfers. If not, offset points to the entry folloing
881 * the last full transfer.
882 */
883 if (siop_cmd->flags & CMDFL_RESID) {
884 table = &siop_cmd->siop_tables->data[offset];
885 siop_cmd->xs->resid -= le32toh(table->count) - siop_cmd->resid;
886 }
887 }
888
889 int
890 siop_iwr(siop_cmd)
891 struct siop_common_cmd *siop_cmd;
892 {
893 int offset;
894 scr_table_t *table; /* table with IWR */
895 struct siop_common_softc *sc = siop_cmd->siop_sc;
896 /* handle ignore wide residue messages */
897
898 /* if target isn't wide, reject */
899 if ((siop_cmd->siop_target->flags & TARF_ISWIDE) == 0) {
900 siop_cmd->siop_tables->t_msgout.count= htole32(1);
901 siop_cmd->siop_tables->msg_out[0] = MSG_MESSAGE_REJECT;
902 return SIOP_NEG_MSGOUT;
903 }
904 /* get index of current command in table */
905 offset = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCRATCHA + 1);
906 /*
907 * if the current table did complete, we're now pointing at the
908 * next one. Go back one if we didn't see a phase mismatch.
909 */
910 if ((siop_cmd->flags & CMDFL_RESID) == 0)
911 offset--;
912 table = &siop_cmd->siop_tables->data[offset];
913
914 if ((siop_cmd->flags & CMDFL_RESID) == 0) {
915 if (le32toh(table->count) & 1) {
916 /* we really got the number of bytes we expected */
917 return SIOP_NEG_ACK;
918 } else {
919 /*
920 * now we really had a short xfer, by one byte.
921 * handle it just as if we had a phase mistmatch
922 * (there is a resid of one for this table).
923 * Update scratcha1 to reflect the fact that
924 * this xfer isn't complete.
925 */
926 siop_cmd->flags |= CMDFL_RESID;
927 siop_cmd->resid = 1;
928 bus_space_write_1(sc->sc_rt, sc->sc_rh,
929 SIOP_SCRATCHA + 1, offset);
930 return SIOP_NEG_ACK;
931 }
932 } else {
933 /*
934 * we already have a short xfer for this table; it's
935 * just one byte less than we though it was
936 */
937 siop_cmd->resid--;
938 return SIOP_NEG_ACK;
939 }
940 }
941
942 void
943 siop_clearfifo(sc)
944 struct siop_common_softc *sc;
945 {
946 int timeout = 0;
947 int ctest3 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3);
948
949 #ifdef DEBUG_INTR
950 printf("DMA fifo not empty !\n");
951 #endif
952 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3,
953 ctest3 | CTEST3_CLF);
954 while ((bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3) &
955 CTEST3_CLF) != 0) {
956 delay(1);
957 if (++timeout > 1000) {
958 printf("clear fifo failed\n");
959 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3,
960 bus_space_read_1(sc->sc_rt, sc->sc_rh,
961 SIOP_CTEST3) & ~CTEST3_CLF);
962 return;
963 }
964 }
965 }
966
967 int
968 siop_modechange(sc)
969 struct siop_common_softc *sc;
970 {
971 int retry;
972 int sist0, sist1, stest2;
973 for (retry = 0; retry < 5; retry++) {
974 /*
975 * datasheet says to wait 100ms and re-read SIST1,
976 * to check that DIFFSENSE is stable.
977 * We may delay() 5 times for 100ms at interrupt time;
978 * hopefully this will not happen often.
979 */
980 delay(100000);
981 sist0 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SIST0);
982 sist1 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SIST1);
983 if (sist1 & SIEN1_SBMC)
984 continue; /* we got an irq again */
985 sc->mode = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST4) &
986 STEST4_MODE_MASK;
987 stest2 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2);
988 switch(sc->mode) {
989 case STEST4_MODE_DIF:
990 printf("%s: switching to differential mode\n",
991 sc->sc_dev.dv_xname);
992 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2,
993 stest2 | STEST2_DIF);
994 break;
995 case STEST4_MODE_SE:
996 printf("%s: switching to single-ended mode\n",
997 sc->sc_dev.dv_xname);
998 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2,
999 stest2 & ~STEST2_DIF);
1000 break;
1001 case STEST4_MODE_LVD:
1002 printf("%s: switching to LVD mode\n",
1003 sc->sc_dev.dv_xname);
1004 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2,
1005 stest2 & ~STEST2_DIF);
1006 break;
1007 default:
1008 printf("%s: invalid SCSI mode 0x%x\n",
1009 sc->sc_dev.dv_xname, sc->mode);
1010 return 0;
1011 }
1012 return 1;
1013 }
1014 printf("%s: timeout waiting for DIFFSENSE to stabilise\n",
1015 sc->sc_dev.dv_xname);
1016 return 0;
1017 }
1018
1019 void
1020 siop_resetbus(sc)
1021 struct siop_common_softc *sc;
1022 {
1023 int scntl1;
1024 scntl1 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1);
1025 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1,
1026 scntl1 | SCNTL1_RST);
1027 /* minimum 25 us, more time won't hurt */
1028 delay(100);
1029 bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1, scntl1);
1030 }
1031
1032 void
1033 siop_update_xfer_mode(sc, target)
1034 struct siop_common_softc *sc;
1035 int target;
1036 {
1037 struct siop_common_target *siop_target = sc->targets[target];
1038 struct scsipi_xfer_mode xm;
1039
1040 xm.xm_target = target;
1041 xm.xm_mode = 0;
1042 xm.xm_period = 0;
1043 xm.xm_offset = 0;
1044
1045
1046 if (siop_target->flags & TARF_ISWIDE)
1047 xm.xm_mode |= PERIPH_CAP_WIDE16;
1048 if (siop_target->period) {
1049 xm.xm_period = siop_target->period;
1050 xm.xm_offset = siop_target->offset;
1051 xm.xm_mode |= PERIPH_CAP_SYNC;
1052 }
1053 if (siop_target->flags & TARF_TAG) {
1054 /* 1010 workaround: can't do disconnect if not wide, so can't do tag */
1055 if ((sc->features & SF_CHIP_GEBUG) == 0 ||
1056 (sc->targets[target]->flags & TARF_ISWIDE))
1057 xm.xm_mode |= PERIPH_CAP_TQING;
1058 }
1059
1060 scsipi_async_event(&sc->sc_chan, ASYNC_EVENT_XFER_MODE, &xm);
1061 }
Cache object: a96ccd4b6189850e22f97ef23db3b7a5
|