1 /* $NetBSD: fwohci.c,v 1.84.2.1 2007/09/09 21:32:18 bouyer Exp $ */
2
3 /*-
4 * Copyright (c) 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas of 3am Software Foundry.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * IEEE1394 Open Host Controller Interface
41 * based on OHCI Specification 1.1 (January 6, 2000)
42 * The first version to support network interface part is wrtten by
43 * Atsushi Onoe <onoe@NetBSD.org>.
44 */
45
46 /*
47 * The first version to support isochronous acquisition part is wrtten
48 * by HAYAKAWA Koichi <haya@NetBSD.org>.
49 */
50
51 #include <sys/cdefs.h>
52 __KERNEL_RCSID(0, "$NetBSD: fwohci.c,v 1.84.2.1 2007/09/09 21:32:18 bouyer Exp $");
53
54 #define FWOHCI_WAIT_DEBUG 1
55
56 #define FWOHCI_IT_BUFNUM 4
57
58 #include "opt_inet.h"
59 #include "fwiso.h"
60
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/kthread.h>
64 #include <sys/socket.h>
65 #include <sys/callout.h>
66 #include <sys/device.h>
67 #include <sys/kernel.h>
68 #include <sys/malloc.h>
69 #include <sys/mbuf.h>
70 #include <sys/poll.h>
71 #include <sys/select.h>
72
73 #if __NetBSD_Version__ >= 105010000
74 #include <uvm/uvm_extern.h>
75 #else
76 #include <vm/vm.h>
77 #endif
78
79 #include <machine/bus.h>
80 #include <machine/intr.h>
81
82 #include <dev/ieee1394/ieee1394reg.h>
83 #include <dev/ieee1394/fwohcireg.h>
84
85 #include <dev/ieee1394/ieee1394var.h>
86 #include <dev/ieee1394/fwohcivar.h>
87 #include <dev/ieee1394/fwisovar.h>
88
89 static const char * const ieee1394_speeds[] = { IEEE1394_SPD_STRINGS };
90
91 #if 0
92 static int fwohci_dnamem_alloc(struct fwohci_softc *sc, int size,
93 int alignment, bus_dmamap_t *mapp, caddr_t *kvap, int flags);
94 #endif
95 static void fwohci_create_event_thread(void *);
96 static void fwohci_thread_init(void *);
97
98 static void fwohci_event_thread(struct fwohci_softc *);
99 static void fwohci_hw_init(struct fwohci_softc *);
100 static void fwohci_power(int, void *);
101 static void fwohci_shutdown(void *);
102
103 static int fwohci_desc_alloc(struct fwohci_softc *);
104 static struct fwohci_desc *fwohci_desc_get(struct fwohci_softc *, int);
105 static void fwohci_desc_put(struct fwohci_softc *, struct fwohci_desc *, int);
106
107 static int fwohci_ctx_alloc(struct fwohci_softc *, struct fwohci_ctx **,
108 int, int, int);
109 static void fwohci_ctx_free(struct fwohci_softc *, struct fwohci_ctx *);
110 static void fwohci_ctx_init(struct fwohci_softc *, struct fwohci_ctx *);
111
112 static int fwohci_misc_dmabuf_alloc(bus_dma_tag_t, int, int,
113 bus_dma_segment_t *, bus_dmamap_t *, void **, const char *);
114 static void fwohci_misc_dmabuf_free(bus_dma_tag_t, int, int,
115 bus_dma_segment_t *, bus_dmamap_t *, caddr_t);
116
117 static struct fwohci_ir_ctx *fwohci_ir_ctx_construct(struct fwohci_softc *,
118 int, int, int, int, int, int);
119 static void fwohci_ir_ctx_destruct(struct fwohci_ir_ctx *);
120
121 static int fwohci_ir_buf_setup(struct fwohci_ir_ctx *);
122 static int fwohci_ir_init(struct fwohci_ir_ctx *);
123 static int fwohci_ir_start(struct fwohci_ir_ctx *);
124 static void fwohci_ir_intr(struct fwohci_softc *, struct fwohci_ir_ctx *);
125 static int fwohci_ir_stop(struct fwohci_ir_ctx *);
126 static int fwohci_ir_ctx_packetnum(struct fwohci_ir_ctx *);
127 #ifdef USEDRAIN
128 static int fwohci_ir_ctx_drain(struct fwohci_ir_ctx *);
129 #endif /* USEDRAIN */
130
131 static int fwohci_it_desc_alloc(struct fwohci_it_ctx *);
132 static void fwohci_it_desc_free(struct fwohci_it_ctx *itc);
133 struct fwohci_it_ctx *fwohci_it_ctx_construct(struct fwohci_softc *,
134 int, int, int, int);
135 void fwohci_it_ctx_destruct(struct fwohci_it_ctx *);
136 int fwohci_it_ctx_writedata(ieee1394_it_tag_t, int,
137 struct ieee1394_it_datalist *, int);
138 static void fwohci_it_ctx_run(struct fwohci_it_ctx *);
139 int fwohci_it_ctx_flush(ieee1394_it_tag_t);
140 static void fwohci_it_intr(struct fwohci_softc *, struct fwohci_it_ctx *);
141
142 int fwohci_itd_construct(struct fwohci_it_ctx *, struct fwohci_it_dmabuf *,
143 int, struct fwohci_desc *, bus_addr_t, int, int, paddr_t);
144 void fwohci_itd_destruct(struct fwohci_it_dmabuf *);
145 static int fwohci_itd_dmabuf_alloc(struct fwohci_it_dmabuf *);
146 static void fwohci_itd_dmabuf_free(struct fwohci_it_dmabuf *);
147 int fwohci_itd_link(struct fwohci_it_dmabuf *, struct fwohci_it_dmabuf *);
148 int fwohci_itd_unlink(struct fwohci_it_dmabuf *);
149 int fwohci_itd_writedata(struct fwohci_it_dmabuf *, int,
150 struct ieee1394_it_datalist *);
151 int fwohci_itd_isfilled(struct fwohci_it_dmabuf *);
152
153 static int fwohci_buf_alloc(struct fwohci_softc *, struct fwohci_buf *);
154 static void fwohci_buf_free(struct fwohci_softc *, struct fwohci_buf *);
155 static void fwohci_buf_init_rx(struct fwohci_softc *);
156 static void fwohci_buf_start_rx(struct fwohci_softc *);
157 static void fwohci_buf_stop_tx(struct fwohci_softc *);
158 static void fwohci_buf_stop_rx(struct fwohci_softc *);
159 static void fwohci_buf_next(struct fwohci_softc *, struct fwohci_ctx *);
160 static int fwohci_buf_pktget(struct fwohci_softc *, struct fwohci_buf **,
161 caddr_t *, int);
162 static int fwohci_buf_input(struct fwohci_softc *, struct fwohci_ctx *,
163 struct fwohci_pkt *);
164 static int fwohci_buf_input_ppb(struct fwohci_softc *, struct fwohci_ctx *,
165 struct fwohci_pkt *);
166
167 static u_int8_t fwohci_phy_read(struct fwohci_softc *, u_int8_t);
168 static void fwohci_phy_write(struct fwohci_softc *, u_int8_t, u_int8_t);
169 static void fwohci_phy_busreset(struct fwohci_softc *);
170 static void fwohci_phy_input(struct fwohci_softc *, struct fwohci_pkt *);
171
172 static int fwohci_handler_set(struct fwohci_softc *, int, u_int32_t, u_int32_t,
173 u_int32_t, int (*)(struct fwohci_softc *, void *, struct fwohci_pkt *),
174 void *);
175
176 ieee1394_ir_tag_t fwohci_ir_ctx_set(struct device *, int, int, int, int, int);
177 int fwohci_ir_ctx_clear(struct device *, ieee1394_ir_tag_t);
178 int fwohci_ir_read(struct device *, ieee1394_ir_tag_t, struct uio *,
179 int, int);
180 int fwohci_ir_wait(struct device *, ieee1394_ir_tag_t, void *, char *name);
181 int fwohci_ir_select(struct device *, ieee1394_ir_tag_t, struct proc *);
182
183
184
185 ieee1394_it_tag_t fwohci_it_set(struct ieee1394_softc *, int, int);
186 static ieee1394_it_tag_t fwohci_it_ctx_set(struct fwohci_softc *, int, int, int);
187 int fwohci_it_ctx_clear(ieee1394_it_tag_t *);
188
189 static void fwohci_arrq_input(struct fwohci_softc *, struct fwohci_ctx *);
190 static void fwohci_arrs_input(struct fwohci_softc *, struct fwohci_ctx *);
191 static void fwohci_as_input(struct fwohci_softc *, struct fwohci_ctx *);
192
193 static int fwohci_at_output(struct fwohci_softc *, struct fwohci_ctx *,
194 struct fwohci_pkt *);
195 static void fwohci_at_done(struct fwohci_softc *, struct fwohci_ctx *, int);
196 static void fwohci_atrs_output(struct fwohci_softc *, int, struct fwohci_pkt *,
197 struct fwohci_pkt *);
198
199 static int fwohci_guidrom_init(struct fwohci_softc *);
200 static void fwohci_configrom_init(struct fwohci_softc *);
201 static int fwohci_configrom_input(struct fwohci_softc *, void *,
202 struct fwohci_pkt *);
203 static void fwohci_selfid_init(struct fwohci_softc *);
204 static int fwohci_selfid_input(struct fwohci_softc *);
205
206 static void fwohci_csr_init(struct fwohci_softc *);
207 static int fwohci_csr_input(struct fwohci_softc *, void *,
208 struct fwohci_pkt *);
209
210 static void fwohci_uid_collect(struct fwohci_softc *);
211 static void fwohci_uid_req(struct fwohci_softc *, int);
212 static int fwohci_uid_input(struct fwohci_softc *, void *,
213 struct fwohci_pkt *);
214 static int fwohci_uid_lookup(struct fwohci_softc *, const u_int8_t *);
215 static void fwohci_check_nodes(struct fwohci_softc *);
216
217 static int fwohci_if_inreg(struct device *, u_int32_t, u_int32_t,
218 void (*)(struct device *, struct mbuf *));
219 static int fwohci_if_input(struct fwohci_softc *, void *, struct fwohci_pkt *);
220 static int fwohci_if_input_iso(struct fwohci_softc *, void *, struct fwohci_pkt *);
221
222 static int fwohci_if_output(struct device *, struct mbuf *,
223 void (*)(struct device *, struct mbuf *));
224 static int fwohci_if_setiso(struct device *, u_int32_t, u_int32_t, u_int32_t,
225 void (*)(struct device *, struct mbuf *));
226 static int fwohci_read(struct ieee1394_abuf *);
227 static int fwohci_write(struct ieee1394_abuf *);
228 static int fwohci_read_resp(struct fwohci_softc *, void *, struct fwohci_pkt *);
229 static int fwohci_write_ack(struct fwohci_softc *, void *, struct fwohci_pkt *);
230 static int fwohci_read_multi_resp(struct fwohci_softc *, void *,
231 struct fwohci_pkt *);
232 static int fwohci_inreg(struct ieee1394_abuf *, int);
233 static int fwohci_unreg(struct ieee1394_abuf *, int);
234 static int fwohci_parse_input(struct fwohci_softc *, void *,
235 struct fwohci_pkt *);
236 static int fwohci_submatch(struct device *, struct cfdata *,
237 const locdesc_t *, void *);
238
239 /* XXX */
240 u_int16_t fwohci_cycletimer(struct fwohci_softc *);
241 u_int16_t fwohci_it_cycletimer(ieee1394_it_tag_t);
242
243 #ifdef FW_DEBUG
244 static void fwohci_show_intr(struct fwohci_softc *, u_int32_t);
245 static void fwohci_show_phypkt(struct fwohci_softc *, u_int32_t);
246
247 /* 1 is normal debug, 2 is verbose debug, 3 is complete (packet dumps). */
248
249 #define DPRINTF(x) if (fwdebug) printf x
250 #define DPRINTFN(n,x) if (fwdebug>(n)) printf x
251 int fwdebug = 1;
252 #else
253 #define DPRINTF(x)
254 #define DPRINTFN(n,x)
255 #endif
256
257 #define OHCI_ITHEADER_SPD_MASK 0x00070000
258 #define OHCI_ITHEADER_SPD_BITPOS 16
259 #define OHCI_ITHEADER_TAG_MASK 0x0000c000
260 #define OHCI_ITHEADER_TAG_BITPOS 14
261 #define OHCI_ITHEADER_CHAN_MASK 0x00003f00
262 #define OHCI_ITHEADER_CHAN_BITPOS 8
263 #define OHCI_ITHEADER_TCODE_MASK 0x000000f0
264 #define OHCI_ITHEADER_TCODE_BITPOS 4
265 #define OHCI_ITHEADER_SY_MASK 0x0000000f
266 #define OHCI_ITHEADER_SY_BITPOS 0
267
268 #define OHCI_ITHEADER_VAL(fld, val) \
269 (OHCI_ITHEADER_##fld##_MASK & ((val) << OHCI_ITHEADER_##fld##_BITPOS))
270
271 int
272 fwohci_init(struct fwohci_softc *sc, const struct evcnt *ev)
273 {
274 int i;
275 u_int32_t val;
276 #if 0
277 int error;
278 #endif
279
280 evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, ev,
281 sc->sc_sc1394.sc1394_dev.dv_xname, "intr");
282
283 evcnt_attach_dynamic(&sc->sc_isocnt, EVCNT_TYPE_MISC, ev,
284 sc->sc_sc1394.sc1394_dev.dv_xname, "isorcvs");
285 evcnt_attach_dynamic(&sc->sc_ascnt, EVCNT_TYPE_MISC, ev,
286 sc->sc_sc1394.sc1394_dev.dv_xname, "asrcvs");
287 evcnt_attach_dynamic(&sc->sc_itintrcnt, EVCNT_TYPE_INTR, ev,
288 sc->sc_sc1394.sc1394_dev.dv_xname, "itintr");
289
290 /*
291 * Wait for reset completion
292 */
293 for (i = 0; i < OHCI_LOOP; i++) {
294 val = OHCI_CSR_READ(sc, OHCI_REG_HCControlClear);
295 if ((val & OHCI_HCControl_SoftReset) == 0)
296 break;
297 DELAY(10);
298 }
299
300 /* What dialect of OHCI is this device?
301 */
302 val = OHCI_CSR_READ(sc, OHCI_REG_Version);
303 aprint_normal("%s: OHCI %u.%u", sc->sc_sc1394.sc1394_dev.dv_xname,
304 OHCI_Version_GET_Version(val), OHCI_Version_GET_Revision(val));
305
306 LIST_INIT(&sc->sc_nodelist);
307
308 if (fwohci_guidrom_init(sc) != 0) {
309 aprint_error("\n%s: fatal: no global UID ROM\n",
310 sc->sc_sc1394.sc1394_dev.dv_xname);
311 return -1;
312 }
313
314 aprint_normal(", %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
315 sc->sc_sc1394.sc1394_guid[0], sc->sc_sc1394.sc1394_guid[1],
316 sc->sc_sc1394.sc1394_guid[2], sc->sc_sc1394.sc1394_guid[3],
317 sc->sc_sc1394.sc1394_guid[4], sc->sc_sc1394.sc1394_guid[5],
318 sc->sc_sc1394.sc1394_guid[6], sc->sc_sc1394.sc1394_guid[7]);
319
320 /* Get the maximum link speed and receive size
321 */
322 val = OHCI_CSR_READ(sc, OHCI_REG_BusOptions);
323 sc->sc_sc1394.sc1394_link_speed =
324 OHCI_BITVAL(val, OHCI_BusOptions_LinkSpd);
325 if (sc->sc_sc1394.sc1394_link_speed < IEEE1394_SPD_MAX) {
326 aprint_normal(", %s",
327 ieee1394_speeds[sc->sc_sc1394.sc1394_link_speed]);
328 } else {
329 aprint_normal(", unknown speed %u",
330 sc->sc_sc1394.sc1394_link_speed);
331 }
332
333 /* MaxRec is encoded as log2(max_rec_octets)-1
334 */
335 sc->sc_sc1394.sc1394_max_receive =
336 1 << (OHCI_BITVAL(val, OHCI_BusOptions_MaxRec) + 1);
337 aprint_normal(", %u max_rec", sc->sc_sc1394.sc1394_max_receive);
338
339 /*
340 * Count how many isochronous receive ctx we have.
341 */
342 OHCI_CSR_WRITE(sc, OHCI_REG_IsoRecvIntMaskSet, ~0);
343 val = OHCI_CSR_READ(sc, OHCI_REG_IsoRecvIntMaskClear);
344 OHCI_CSR_WRITE(sc, OHCI_REG_IsoRecvIntMaskClear, ~0);
345 for (i = 0; val != 0; val >>= 1) {
346 if (val & 0x1)
347 i++;
348 }
349 sc->sc_isoctx = i;
350 aprint_normal(", %d ir_ctx", sc->sc_isoctx);
351
352 /*
353 * Count how many isochronous transmit ctx we have.
354 */
355 OHCI_CSR_WRITE(sc, OHCI_REG_IsoXmitIntMaskSet, ~0);
356 val = OHCI_CSR_READ(sc, OHCI_REG_IsoXmitIntMaskClear);
357 OHCI_CSR_WRITE(sc, OHCI_REG_IsoXmitIntMaskClear, ~0);
358 for (i = 0; val != 0; val >>= 1) {
359 if (val & 0x1) {
360 i++;
361 OHCI_SYNC_TX_DMA_WRITE(sc, i,OHCI_SUBREG_CommandPtr,0);
362 }
363 }
364 sc->sc_itctx = i;
365
366 aprint_normal(", %d it_ctx", sc->sc_itctx);
367
368 aprint_normal("\n");
369
370 #if 0
371 error = fwohci_dnamem_alloc(sc, OHCI_CONFIG_SIZE,
372 OHCI_CONFIG_ALIGNMENT, &sc->sc_configrom_map,
373 (caddr_t *) &sc->sc_configrom, BUS_DMA_WAITOK|BUS_DMA_COHERENT);
374 return error;
375 #endif
376
377 sc->sc_dying = 0;
378 sc->sc_nodeid = 0xffff; /* invalid */
379
380 sc->sc_sc1394.sc1394_callback.sc1394_read = fwohci_read;
381 sc->sc_sc1394.sc1394_callback.sc1394_write = fwohci_write;
382 sc->sc_sc1394.sc1394_callback.sc1394_inreg = fwohci_inreg;
383 sc->sc_sc1394.sc1394_callback.sc1394_unreg = fwohci_unreg;
384
385 kthread_create(fwohci_create_event_thread, sc);
386 return 0;
387 }
388
389 static int
390 fwohci_if_setiso(struct device *self, u_int32_t channel, u_int32_t tag,
391 u_int32_t direction, void (*handler)(struct device *, struct mbuf *))
392 {
393 struct fwohci_softc *sc = (struct fwohci_softc *)self;
394 int retval;
395 int s;
396
397 if (direction == 1) {
398 return EIO;
399 }
400
401 s = splnet();
402 retval = fwohci_handler_set(sc, IEEE1394_TCODE_STREAM_DATA,
403 channel, 1 << tag, 0, fwohci_if_input_iso, handler);
404 splx(s);
405
406 if (!retval) {
407 printf("%s: dummy iso handler set\n",
408 sc->sc_sc1394.sc1394_dev.dv_xname);
409 } else {
410 printf("%s: dummy iso handler cannot set\n",
411 sc->sc_sc1394.sc1394_dev.dv_xname);
412 }
413
414 return retval;
415 }
416
417 int
418 fwohci_intr(void *arg)
419 {
420 struct fwohci_softc * const sc = arg;
421 int progress = 0;
422 u_int32_t intmask, iso;
423
424 for (;;) {
425 intmask = OHCI_CSR_READ(sc, OHCI_REG_IntEventClear);
426
427 /*
428 * On a bus reset, everything except bus reset gets
429 * cleared. That can't get cleared until the selfid
430 * phase completes (which happens outside the
431 * interrupt routines). So if just a bus reset is left
432 * in the mask and it's already in the sc_intmask,
433 * just return.
434 */
435
436 if ((intmask == 0) ||
437 (progress && (intmask == OHCI_Int_BusReset) &&
438 (sc->sc_intmask & OHCI_Int_BusReset))) {
439 if (progress)
440 wakeup(fwohci_event_thread);
441 return progress;
442 }
443 OHCI_CSR_WRITE(sc, OHCI_REG_IntEventClear,
444 intmask & ~OHCI_Int_BusReset);
445 #ifdef FW_DEBUG
446 if (fwdebug > 1)
447 fwohci_show_intr(sc, intmask);
448 #endif
449
450 if (intmask & OHCI_Int_BusReset) {
451 /*
452 * According to OHCI spec 6.1.1 "busReset",
453 * All asynchronous transmit must be stopped before
454 * clearing BusReset. Moreover, the BusReset
455 * interrupt bit should not be cleared during the
456 * SelfID phase. Thus we turned off interrupt mask
457 * bit of BusReset instead until SelfID completion
458 * or SelfID timeout.
459 */
460 intmask &= OHCI_Int_SelfIDComplete;
461 OHCI_CSR_WRITE(sc, OHCI_REG_IntMaskClear,
462 OHCI_Int_BusReset);
463 sc->sc_intmask = OHCI_Int_BusReset;
464 }
465 sc->sc_intmask |= intmask;
466
467 if (intmask & OHCI_Int_IsochTx) {
468 int i;
469
470 iso = OHCI_CSR_READ(sc, OHCI_REG_IsoXmitIntEventClear);
471 OHCI_CSR_WRITE(sc, OHCI_REG_IsoXmitIntEventClear, iso);
472
473 sc->sc_itintrcnt.ev_count++;
474 for (i = 0; i < sc->sc_itctx; ++i) {
475 if ((iso & (1<<i)) == 0 ||
476 sc->sc_ctx_it[i] == NULL) {
477 continue;
478 }
479
480 fwohci_it_intr(sc, sc->sc_ctx_it[i]);
481 }
482 }
483 if (intmask & OHCI_Int_IsochRx) {
484 int i;
485
486 iso = OHCI_CSR_READ(sc, OHCI_REG_IsoRecvIntEventClear);
487 OHCI_CSR_WRITE(sc, OHCI_REG_IsoRecvIntEventClear, iso);
488
489 for (i = 0; i < sc->sc_isoctx; i++) {
490 if ((iso & (1 << i))
491 && sc->sc_ctx_ir[i] != NULL) {
492 iso &= ~(1 << i);
493 fwohci_ir_intr(sc, sc->sc_ctx_ir[i]);
494 }
495 }
496
497 if (iso == 0) {
498 sc->sc_intmask &= ~OHCI_Int_IsochRx;
499 }
500 sc->sc_iso |= iso;
501 }
502
503 if (!progress) {
504 sc->sc_intrcnt.ev_count++;
505 progress = 1;
506 }
507 }
508 }
509
510 static void
511 fwohci_create_event_thread(void *arg)
512 {
513 struct fwohci_softc *sc = arg;
514
515 if (kthread_create1(fwohci_thread_init, sc, &sc->sc_event_thread, "%s",
516 sc->sc_sc1394.sc1394_dev.dv_xname)) {
517 printf("%s: unable to create event thread\n",
518 sc->sc_sc1394.sc1394_dev.dv_xname);
519 panic("fwohci_create_event_thread");
520 }
521 }
522
523 static void
524 fwohci_thread_init(void *arg)
525 {
526 struct fwohci_softc *sc = arg;
527 int i;
528
529 /*
530 * Allocate descriptors
531 */
532 if (fwohci_desc_alloc(sc)) {
533 printf("%s: not enabling interrupts\n",
534 sc->sc_sc1394.sc1394_dev.dv_xname);
535 kthread_exit(1);
536 }
537
538 /*
539 * Enable Link Power
540 */
541
542 OHCI_CSR_WRITE(sc, OHCI_REG_HCControlSet, OHCI_HCControl_LPS);
543
544 /*
545 * Allocate DMA Context
546 */
547 fwohci_ctx_alloc(sc, &sc->sc_ctx_arrq, OHCI_BUF_ARRQ_CNT,
548 OHCI_CTX_ASYNC_RX_REQUEST, FWOHCI_CTX_ASYNC);
549 fwohci_ctx_alloc(sc, &sc->sc_ctx_arrs, OHCI_BUF_ARRS_CNT,
550 OHCI_CTX_ASYNC_RX_RESPONSE, FWOHCI_CTX_ASYNC);
551 fwohci_ctx_alloc(sc, &sc->sc_ctx_atrq, 0, OHCI_CTX_ASYNC_TX_REQUEST,
552 FWOHCI_CTX_ASYNC);
553 fwohci_ctx_alloc(sc, &sc->sc_ctx_atrs, 0, OHCI_CTX_ASYNC_TX_RESPONSE,
554 FWOHCI_CTX_ASYNC);
555 sc->sc_ctx_as = malloc(sizeof(sc->sc_ctx_as[0]) * sc->sc_isoctx,
556 M_DEVBUF, M_WAITOK);
557 if (sc->sc_ctx_as == NULL) {
558 printf("no asynchronous stream\n");
559 } else {
560 for (i = 0; i < sc->sc_isoctx; i++)
561 sc->sc_ctx_as[i] = NULL;
562 }
563 sc->sc_ctx_ir = malloc(sizeof(sc->sc_ctx_ir[0]) * sc->sc_isoctx,
564 M_DEVBUF, M_WAITOK|M_ZERO);
565 sc->sc_ctx_it = malloc(sizeof(sc->sc_ctx_it[0]) * sc->sc_itctx,
566 M_DEVBUF, M_WAITOK|M_ZERO);
567
568 /*
569 * Allocate buffer for configuration ROM and SelfID buffer
570 */
571 fwohci_buf_alloc(sc, &sc->sc_buf_cnfrom);
572 fwohci_buf_alloc(sc, &sc->sc_buf_selfid);
573
574 callout_init(&sc->sc_selfid_callout);
575
576 sc->sc_sc1394.sc1394_ifinreg = fwohci_if_inreg;
577 sc->sc_sc1394.sc1394_ifoutput = fwohci_if_output;
578 sc->sc_sc1394.sc1394_ifsetiso = fwohci_if_setiso;
579
580 sc->sc_sc1394.sc1394_ir_open = fwohci_ir_ctx_set;
581 sc->sc_sc1394.sc1394_ir_close = fwohci_ir_ctx_clear;
582 sc->sc_sc1394.sc1394_ir_read = fwohci_ir_read;
583 sc->sc_sc1394.sc1394_ir_wait = fwohci_ir_wait;
584 sc->sc_sc1394.sc1394_ir_select = fwohci_ir_select;
585
586 #if 0
587 sc->sc_sc1394.sc1394_it_open = fwohci_it_open;
588 sc->sc_sc1394.sc1394_it_write = fwohci_it_write;
589 sc->sc_sc1394.sc1394_it_close = fwohci_it_close;
590 /* XXX: need fwohci_it_flush? */
591 #endif
592
593 /*
594 * establish hooks for shutdown and suspend/resume
595 */
596 sc->sc_shutdownhook = shutdownhook_establish(fwohci_shutdown, sc);
597 sc->sc_powerhook = powerhook_establish(fwohci_power, sc);
598
599 sc->sc_sc1394.sc1394_if = config_found(&sc->sc_sc1394.sc1394_dev, "fw",
600 fwohci_print);
601
602 #if NFWISO > 0
603 fwiso_register_if(&sc->sc_sc1394);
604 #endif
605
606 /* Main loop. It's not coming back normally. */
607
608 fwohci_event_thread(sc);
609
610 kthread_exit(0);
611 }
612
613 static void
614 fwohci_event_thread(struct fwohci_softc *sc)
615 {
616 int i, s;
617 u_int32_t intmask, iso;
618
619 s = splbio();
620
621 /*
622 * Initialize hardware registers.
623 */
624
625 fwohci_hw_init(sc);
626
627 /* Initial Bus Reset */
628 fwohci_phy_busreset(sc);
629 splx(s);
630
631 while (!sc->sc_dying) {
632 s = splbio();
633 intmask = sc->sc_intmask;
634 if (intmask == 0) {
635 tsleep(fwohci_event_thread, PZERO, "fwohciev", 0);
636 splx(s);
637 continue;
638 }
639 sc->sc_intmask = 0;
640 splx(s);
641
642 if (intmask & OHCI_Int_BusReset) {
643 fwohci_buf_stop_tx(sc);
644 if (sc->sc_uidtbl != NULL) {
645 free(sc->sc_uidtbl, M_DEVBUF);
646 sc->sc_uidtbl = NULL;
647 }
648
649 callout_reset(&sc->sc_selfid_callout,
650 OHCI_SELFID_TIMEOUT,
651 (void (*)(void *))fwohci_phy_busreset, sc);
652 sc->sc_nodeid = 0xffff; /* indicate invalid */
653 sc->sc_rootid = 0;
654 sc->sc_irmid = IEEE1394_BCAST_PHY_ID;
655 }
656 if (intmask & OHCI_Int_SelfIDComplete) {
657 s = splbio();
658 OHCI_CSR_WRITE(sc, OHCI_REG_IntEventClear,
659 OHCI_Int_BusReset);
660 OHCI_CSR_WRITE(sc, OHCI_REG_IntMaskSet,
661 OHCI_Int_BusReset);
662 splx(s);
663 callout_stop(&sc->sc_selfid_callout);
664 if (fwohci_selfid_input(sc) == 0) {
665 fwohci_buf_start_rx(sc);
666 fwohci_uid_collect(sc);
667 }
668 }
669 if (intmask & OHCI_Int_ReqTxComplete)
670 fwohci_at_done(sc, sc->sc_ctx_atrq, 0);
671 if (intmask & OHCI_Int_RespTxComplete)
672 fwohci_at_done(sc, sc->sc_ctx_atrs, 0);
673 if (intmask & OHCI_Int_RQPkt)
674 fwohci_arrq_input(sc, sc->sc_ctx_arrq);
675 if (intmask & OHCI_Int_RSPkt)
676 fwohci_arrs_input(sc, sc->sc_ctx_arrs);
677 if (intmask & OHCI_Int_IsochRx) {
678 if (sc->sc_ctx_as == NULL) {
679 continue;
680 }
681 s = splbio();
682 iso = sc->sc_iso;
683 sc->sc_iso = 0;
684 splx(s);
685 for (i = 0; i < sc->sc_isoctx; i++) {
686 if ((iso & (1 << i)) &&
687 sc->sc_ctx_as[i] != NULL) {
688 fwohci_as_input(sc, sc->sc_ctx_as[i]);
689 sc->sc_ascnt.ev_count++;
690 }
691 }
692 }
693 }
694 }
695
696 #if 0
697 static int
698 fwohci_dnamem_alloc(struct fwohci_softc *sc, int size, int alignment,
699 bus_dmamap_t *mapp, caddr_t *kvap, int flags)
700 {
701 bus_dma_segment_t segs[1];
702 int error, nsegs, steps;
703
704 steps = 0;
705 error = bus_dmamem_alloc(sc->sc_dmat, size, alignment, alignment,
706 segs, 1, &nsegs, flags);
707 if (error)
708 goto cleanup;
709
710 steps = 1;
711 error = bus_dmamem_map(sc->sc_dmat, segs, nsegs, segs[0].ds_len,
712 kvap, flags);
713 if (error)
714 goto cleanup;
715
716 if (error == 0)
717 error = bus_dmamap_create(sc->sc_dmat, size, 1, alignment,
718 size, flags, mapp);
719 if (error)
720 goto cleanup;
721 if (error == 0)
722 error = bus_dmamap_load(sc->sc_dmat, *mapp, *kvap, size, NULL,
723 flags);
724 if (error)
725 goto cleanup;
726
727 cleanup:
728 switch (steps) {
729 case 1:
730 bus_dmamem_free(sc->sc_dmat, segs, nsegs);
731 }
732
733 return error;
734 }
735 #endif
736
737 int
738 fwohci_print(void *aux, const char *pnp)
739 {
740 char *name = aux;
741
742 if (pnp)
743 aprint_normal("%s at %s", name, pnp);
744
745 return UNCONF;
746 }
747
748 static void
749 fwohci_hw_init(struct fwohci_softc *sc)
750 {
751 int i;
752 u_int32_t val;
753
754 /*
755 * Software Reset.
756 */
757 OHCI_CSR_WRITE(sc, OHCI_REG_HCControlSet, OHCI_HCControl_SoftReset);
758 for (i = 0; i < OHCI_LOOP; i++) {
759 val = OHCI_CSR_READ(sc, OHCI_REG_HCControlClear);
760 if ((val & OHCI_HCControl_SoftReset) == 0)
761 break;
762 DELAY(10);
763 }
764
765 OHCI_CSR_WRITE(sc, OHCI_REG_HCControlSet, OHCI_HCControl_LPS);
766
767 /*
768 * First, initilize CSRs with undefined value to default settings.
769 */
770 val = OHCI_CSR_READ(sc, OHCI_REG_BusOptions);
771 val |= OHCI_BusOptions_ISC | OHCI_BusOptions_CMC;
772 #if 0
773 val |= OHCI_BusOptions_BMC | OHCI_BusOptions_IRMC;
774 #else
775 val &= ~(OHCI_BusOptions_BMC | OHCI_BusOptions_IRMC);
776 #endif
777 OHCI_CSR_WRITE(sc, OHCI_REG_BusOptions, val);
778 for (i = 0; i < sc->sc_isoctx; i++) {
779 OHCI_SYNC_RX_DMA_WRITE(sc, i, OHCI_SUBREG_ContextControlClear,
780 ~0);
781 }
782 for (i = 0; i < sc->sc_itctx; i++) {
783 OHCI_SYNC_TX_DMA_WRITE(sc, i, OHCI_SUBREG_ContextControlClear,
784 ~0);
785 }
786 OHCI_CSR_WRITE(sc, OHCI_REG_LinkControlClear, ~0);
787
788 fwohci_configrom_init(sc);
789 fwohci_selfid_init(sc);
790 fwohci_buf_init_rx(sc);
791 fwohci_csr_init(sc);
792
793 /*
794 * Final CSR settings.
795 */
796 OHCI_CSR_WRITE(sc, OHCI_REG_LinkControlSet,
797 OHCI_LinkControl_CycleTimerEnable |
798 OHCI_LinkControl_RcvSelfID | OHCI_LinkControl_RcvPhyPkt);
799
800 OHCI_CSR_WRITE(sc, OHCI_REG_ATRetries, 0x00000888); /*XXX*/
801
802 /* clear receive filter */
803 OHCI_CSR_WRITE(sc, OHCI_REG_IRMultiChanMaskHiClear, ~0);
804 OHCI_CSR_WRITE(sc, OHCI_REG_IRMultiChanMaskLoClear, ~0);
805 OHCI_CSR_WRITE(sc, OHCI_REG_AsynchronousRequestFilterHiSet, 0x80000000);
806
807 OHCI_CSR_WRITE(sc, OHCI_REG_HCControlClear,
808 OHCI_HCControl_NoByteSwapData | OHCI_HCControl_APhyEnhanceEnable);
809 #if BYTE_ORDER == BIG_ENDIAN
810 OHCI_CSR_WRITE(sc, OHCI_REG_HCControlSet,
811 OHCI_HCControl_NoByteSwapData);
812 #endif
813
814 OHCI_CSR_WRITE(sc, OHCI_REG_IntMaskClear, ~0);
815 OHCI_CSR_WRITE(sc, OHCI_REG_IntMaskSet, OHCI_Int_BusReset |
816 OHCI_Int_SelfIDComplete | OHCI_Int_IsochRx | OHCI_Int_IsochTx |
817 OHCI_Int_RSPkt | OHCI_Int_RQPkt | OHCI_Int_ARRS | OHCI_Int_ARRQ |
818 OHCI_Int_RespTxComplete | OHCI_Int_ReqTxComplete);
819 OHCI_CSR_WRITE(sc, OHCI_REG_IntMaskSet, OHCI_Int_CycleTooLong |
820 OHCI_Int_UnrecoverableError | OHCI_Int_CycleInconsistent |
821 OHCI_Int_LockRespErr | OHCI_Int_PostedWriteErr);
822 OHCI_CSR_WRITE(sc, OHCI_REG_IsoXmitIntMaskSet, ~0);
823 OHCI_CSR_WRITE(sc, OHCI_REG_IsoRecvIntMaskSet, ~0);
824 OHCI_CSR_WRITE(sc, OHCI_REG_IntMaskSet, OHCI_Int_MasterEnable);
825
826 OHCI_CSR_WRITE(sc, OHCI_REG_HCControlSet, OHCI_HCControl_LinkEnable);
827
828 /*
829 * Start the receivers
830 */
831 fwohci_buf_start_rx(sc);
832 }
833
834 static void
835 fwohci_power(int why, void *arg)
836 {
837 struct fwohci_softc *sc = arg;
838 int s;
839
840 s = splbio();
841 switch (why) {
842 case PWR_SUSPEND:
843 case PWR_STANDBY:
844 fwohci_shutdown(sc);
845 break;
846 case PWR_RESUME:
847 fwohci_hw_init(sc);
848 fwohci_phy_busreset(sc);
849 break;
850 case PWR_SOFTSUSPEND:
851 case PWR_SOFTSTANDBY:
852 case PWR_SOFTRESUME:
853 break;
854 }
855 splx(s);
856 }
857
858 static void
859 fwohci_shutdown(void *arg)
860 {
861 struct fwohci_softc *sc = arg;
862 u_int32_t val;
863
864 callout_stop(&sc->sc_selfid_callout);
865 /* disable all interrupt */
866 OHCI_CSR_WRITE(sc, OHCI_REG_IntMaskClear, OHCI_Int_MasterEnable);
867 fwohci_buf_stop_tx(sc);
868 fwohci_buf_stop_rx(sc);
869 val = OHCI_CSR_READ(sc, OHCI_REG_BusOptions);
870 val &= ~(OHCI_BusOptions_BMC | OHCI_BusOptions_ISC |
871 OHCI_BusOptions_CMC | OHCI_BusOptions_IRMC);
872 OHCI_CSR_WRITE(sc, OHCI_REG_BusOptions, val);
873 fwohci_phy_busreset(sc);
874 OHCI_CSR_WRITE(sc, OHCI_REG_HCControlClear, OHCI_HCControl_LinkEnable);
875 OHCI_CSR_WRITE(sc, OHCI_REG_HCControlClear, OHCI_HCControl_LPS);
876 OHCI_CSR_WRITE(sc, OHCI_REG_HCControlSet, OHCI_HCControl_SoftReset);
877 }
878
879 /*
880 * COMMON FUNCTIONS
881 */
882
883 /*
884 * read the PHY Register.
885 */
886 static u_int8_t
887 fwohci_phy_read(struct fwohci_softc *sc, u_int8_t reg)
888 {
889 int i;
890 u_int32_t val;
891
892 OHCI_CSR_WRITE(sc, OHCI_REG_PhyControl,
893 OHCI_PhyControl_RdReg | (reg << OHCI_PhyControl_RegAddr_BITPOS));
894 for (i = 0; i < OHCI_LOOP; i++) {
895 if (OHCI_CSR_READ(sc, OHCI_REG_PhyControl) &
896 OHCI_PhyControl_RdDone)
897 break;
898 DELAY(10);
899 }
900 val = OHCI_CSR_READ(sc, OHCI_REG_PhyControl);
901 return (val & OHCI_PhyControl_RdData) >> OHCI_PhyControl_RdData_BITPOS;
902 }
903
904 /*
905 * write the PHY Register.
906 */
907 static void
908 fwohci_phy_write(struct fwohci_softc *sc, u_int8_t reg, u_int8_t val)
909 {
910 int i;
911
912 OHCI_CSR_WRITE(sc, OHCI_REG_PhyControl, OHCI_PhyControl_WrReg |
913 (reg << OHCI_PhyControl_RegAddr_BITPOS) |
914 (val << OHCI_PhyControl_WrData_BITPOS));
915 for (i = 0; i < OHCI_LOOP; i++) {
916 if (!(OHCI_CSR_READ(sc, OHCI_REG_PhyControl) &
917 OHCI_PhyControl_WrReg))
918 break;
919 DELAY(10);
920 }
921 }
922
923 /*
924 * Initiate Bus Reset
925 */
926 static void
927 fwohci_phy_busreset(struct fwohci_softc *sc)
928 {
929 int s;
930 u_int8_t val;
931
932 s = splbio();
933 OHCI_CSR_WRITE(sc, OHCI_REG_IntEventClear,
934 OHCI_Int_BusReset | OHCI_Int_SelfIDComplete);
935 OHCI_CSR_WRITE(sc, OHCI_REG_IntMaskSet, OHCI_Int_BusReset);
936 callout_stop(&sc->sc_selfid_callout);
937 val = fwohci_phy_read(sc, 1);
938 val = (val & 0x80) | /* preserve RHB (force root) */
939 0x40 | /* Initiate Bus Reset */
940 0x3f; /* default GAP count */
941 fwohci_phy_write(sc, 1, val);
942 splx(s);
943 }
944
945 /*
946 * PHY Packet
947 */
948 static void
949 fwohci_phy_input(struct fwohci_softc *sc, struct fwohci_pkt *pkt)
950 {
951 u_int32_t val;
952
953 val = pkt->fp_hdr[1];
954 if (val != ~pkt->fp_hdr[2]) {
955 if (val == 0 && ((*pkt->fp_trail & 0x001f0000) >> 16) ==
956 OHCI_CTXCTL_EVENT_BUS_RESET) {
957 DPRINTFN(1, ("fwohci_phy_input: BusReset: 0x%08x\n",
958 pkt->fp_hdr[2]));
959 } else {
960 printf("%s: phy packet corrupted (0x%08x, 0x%08x)\n",
961 sc->sc_sc1394.sc1394_dev.dv_xname, val,
962 pkt->fp_hdr[2]);
963 }
964 return;
965 }
966 #ifdef FW_DEBUG
967 if (fwdebug > 1)
968 fwohci_show_phypkt(sc, val);
969 #endif
970 }
971
972 /*
973 * Descriptor for context DMA.
974 */
975 static int
976 fwohci_desc_alloc(struct fwohci_softc *sc)
977 {
978 int error, mapsize, dsize;
979
980 /*
981 * allocate descriptor buffer
982 */
983
984 sc->sc_descsize = OHCI_BUF_ARRQ_CNT + OHCI_BUF_ARRS_CNT +
985 OHCI_BUF_ATRQ_CNT + OHCI_BUF_ATRS_CNT +
986 OHCI_BUF_IR_CNT * sc->sc_isoctx + 2;
987 dsize = sizeof(struct fwohci_desc) * sc->sc_descsize;
988 mapsize = howmany(sc->sc_descsize, NBBY);
989 sc->sc_descmap = malloc(mapsize, M_DEVBUF, M_WAITOK|M_ZERO);
990
991 if (sc->sc_descmap == NULL) {
992 printf("fwohci_desc_alloc: cannot get memory\n");
993 return -1;
994 }
995
996 if ((error = bus_dmamem_alloc(sc->sc_dmat, dsize, PAGE_SIZE, 0,
997 &sc->sc_dseg, 1, &sc->sc_dnseg, 0)) != 0) {
998 printf("%s: unable to allocate descriptor buffer, error = %d\n",
999 sc->sc_sc1394.sc1394_dev.dv_xname, error);
1000 goto fail_0;
1001 }
1002
1003 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dseg, sc->sc_dnseg,
1004 dsize, (caddr_t *)&sc->sc_desc, BUS_DMA_COHERENT | BUS_DMA_WAITOK))
1005 != 0) {
1006 printf("%s: unable to map descriptor buffer, error = %d\n",
1007 sc->sc_sc1394.sc1394_dev.dv_xname, error);
1008 goto fail_1;
1009 }
1010
1011 if ((error = bus_dmamap_create(sc->sc_dmat, dsize, sc->sc_dnseg,
1012 dsize, 0, BUS_DMA_WAITOK, &sc->sc_ddmamap)) != 0) {
1013 printf("%s: unable to create descriptor buffer DMA map, "
1014 "error = %d\n", sc->sc_sc1394.sc1394_dev.dv_xname, error);
1015 goto fail_2;
1016 }
1017
1018 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_ddmamap, sc->sc_desc,
1019 dsize, NULL, BUS_DMA_WAITOK)) != 0) {
1020 printf("%s: unable to load descriptor buffer DMA map, "
1021 "error = %d\n", sc->sc_sc1394.sc1394_dev.dv_xname, error);
1022 goto fail_3;
1023 }
1024
1025 return 0;
1026
1027 fail_3:
1028 bus_dmamap_destroy(sc->sc_dmat, sc->sc_ddmamap);
1029 fail_2:
1030 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_desc, dsize);
1031 fail_1:
1032 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_dnseg);
1033 fail_0:
1034 return error;
1035 }
1036
1037 static struct fwohci_desc *
1038 fwohci_desc_get(struct fwohci_softc *sc, int ndesc)
1039 {
1040 int i, n;
1041
1042 for (n = 0; n <= sc->sc_descsize - ndesc; n++) {
1043 for (i = 0; ; i++) {
1044 if (i == ndesc) {
1045 for (i = 0; i < ndesc; i++)
1046 setbit(sc->sc_descmap, n + i);
1047 return sc->sc_desc + n;
1048 }
1049 if (isset(sc->sc_descmap, n + i))
1050 break;
1051 }
1052 }
1053 return NULL;
1054 }
1055
1056 static void
1057 fwohci_desc_put(struct fwohci_softc *sc, struct fwohci_desc *fd, int ndesc)
1058 {
1059 int i, n;
1060
1061 n = fd - sc->sc_desc;
1062 for (i = 0; i < ndesc; i++, n++) {
1063 #ifdef DIAGNOSTIC
1064 if (isclr(sc->sc_descmap, n))
1065 panic("fwohci_desc_put: duplicated free");
1066 #endif
1067 clrbit(sc->sc_descmap, n);
1068 }
1069 }
1070
1071 /*
1072 * Asynchronous/Isochronous Transmit/Receive Context
1073 */
1074 static int
1075 fwohci_ctx_alloc(struct fwohci_softc *sc, struct fwohci_ctx **fcp,
1076 int bufcnt, int ctx, int ctxtype)
1077 {
1078 int i, error;
1079 struct fwohci_ctx *fc;
1080 struct fwohci_buf *fb;
1081 struct fwohci_desc *fd;
1082 #if DOUBLEBUF
1083 int buf2cnt;
1084 #endif
1085
1086 fc = malloc(sizeof(*fc), M_DEVBUF, M_WAITOK|M_ZERO);
1087 LIST_INIT(&fc->fc_handler);
1088 TAILQ_INIT(&fc->fc_buf);
1089 fc->fc_ctx = ctx;
1090 fc->fc_buffers = fb = malloc(sizeof(*fb) * bufcnt, M_DEVBUF, M_WAITOK|M_ZERO);
1091 fc->fc_bufcnt = bufcnt;
1092 #if DOUBLEBUF
1093 TAILQ_INIT(&fc->fc_buf2); /* for isochronous */
1094 if (ctxtype == FWOHCI_CTX_ISO_MULTI) {
1095 buf2cnt = bufcnt/2;
1096 bufcnt -= buf2cnt;
1097 if (buf2cnt == 0) {
1098 panic("cannot allocate iso buffer");
1099 }
1100 }
1101 #endif
1102 for (i = 0; i < bufcnt; i++, fb++) {
1103 if ((error = fwohci_buf_alloc(sc, fb)) != 0)
1104 goto fail;
1105 if ((fd = fwohci_desc_get(sc, 1)) == NULL) {
1106 error = ENOBUFS;
1107 goto fail;
1108 }
1109 fb->fb_desc = fd;
1110 fb->fb_daddr = sc->sc_ddmamap->dm_segs[0].ds_addr +
1111 ((caddr_t)fd - (caddr_t)sc->sc_desc);
1112 fd->fd_flags = OHCI_DESC_INPUT | OHCI_DESC_STATUS |
1113 OHCI_DESC_INTR_ALWAYS | OHCI_DESC_BRANCH;
1114 fd->fd_reqcount = fb->fb_dmamap->dm_segs[0].ds_len;
1115 fd->fd_data = fb->fb_dmamap->dm_segs[0].ds_addr;
1116 TAILQ_INSERT_TAIL(&fc->fc_buf, fb, fb_list);
1117 }
1118 #if DOUBLEBUF
1119 if (ctxtype == FWOHCI_CTX_ISO_MULTI) {
1120 for (i = bufcnt; i < bufcnt + buf2cnt; i++, fb++) {
1121 if ((error = fwohci_buf_alloc(sc, fb)) != 0)
1122 goto fail;
1123 if ((fd = fwohci_desc_get(sc, 1)) == NULL) {
1124 error = ENOBUFS;
1125 goto fail;
1126 }
1127 fb->fb_desc = fd;
1128 fb->fb_daddr = sc->sc_ddmamap->dm_segs[0].ds_addr +
1129 ((caddr_t)fd - (caddr_t)sc->sc_desc);
1130 bus_dmamap_sync(sc->sc_dmat, sc->sc_ddmamap,
1131 (caddr_t)fd - (caddr_t)sc->sc_desc, sizeof(struct fwohci_desc),
1132 BUS_DMASYNC_PREWRITE);
1133 fd->fd_flags = OHCI_DESC_INPUT | OHCI_DESC_STATUS |
1134 OHCI_DESC_INTR_ALWAYS | OHCI_DESC_BRANCH;
1135 fd->fd_reqcount = fb->fb_dmamap->dm_segs[0].ds_len;
1136 fd->fd_data = fb->fb_dmamap->dm_segs[0].ds_addr;
1137 TAILQ_INSERT_TAIL(&fc->fc_buf2, fb, fb_list);
1138 bus_dmamap_sync(sc->sc_dmat, sc->sc_ddmamap,
1139 (caddr_t)fd - (caddr_t)sc->sc_desc, sizeof(struct fwohci_desc),
1140 BUS_DMASYNC_POSTWRITE);
1141 }
1142 }
1143 #endif /* DOUBLEBUF */
1144 fc->fc_type = ctxtype;
1145 *fcp = fc;
1146 return 0;
1147
1148 fail:
1149 while (i-- > 0) {
1150 fb--;
1151 if (fb->fb_desc)
1152 fwohci_desc_put(sc, fb->fb_desc, 1);
1153 fwohci_buf_free(sc, fb);
1154 }
1155 free(fc, M_DEVBUF);
1156 return error;
1157 }
1158
1159 static void
1160 fwohci_ctx_free(struct fwohci_softc *sc, struct fwohci_ctx *fc)
1161 {
1162 struct fwohci_buf *fb;
1163 struct fwohci_handler *fh;
1164
1165 #if DOUBLEBUF
1166 if ((fc->fc_type == FWOHCI_CTX_ISO_MULTI) &&
1167 (TAILQ_FIRST(&fc->fc_buf) > TAILQ_FIRST(&fc->fc_buf2))) {
1168 struct fwohci_buf_s fctmp;
1169
1170 fctmp = fc->fc_buf;
1171 fc->fc_buf = fc->fc_buf2;
1172 fc->fc_buf2 = fctmp;
1173 }
1174 #endif
1175 while ((fh = LIST_FIRST(&fc->fc_handler)) != NULL)
1176 fwohci_handler_set(sc, fh->fh_tcode, fh->fh_key1, fh->fh_key2,
1177 fh->fh_key3, NULL, NULL);
1178 while ((fb = TAILQ_FIRST(&fc->fc_buf)) != NULL) {
1179 TAILQ_REMOVE(&fc->fc_buf, fb, fb_list);
1180 if (fb->fb_desc)
1181 fwohci_desc_put(sc, fb->fb_desc, 1);
1182 fwohci_buf_free(sc, fb);
1183 }
1184 #if DOUBLEBUF
1185 while ((fb = TAILQ_FIRST(&fc->fc_buf2)) != NULL) {
1186 TAILQ_REMOVE(&fc->fc_buf2, fb, fb_list);
1187 if (fb->fb_desc)
1188 fwohci_desc_put(sc, fb->fb_desc, 1);
1189 fwohci_buf_free(sc, fb);
1190 }
1191 #endif /* DOUBLEBUF */
1192 free(fc->fc_buffers, M_DEVBUF);
1193 free(fc, M_DEVBUF);
1194 }
1195
1196 static void
1197 fwohci_ctx_init(struct fwohci_softc *sc, struct fwohci_ctx *fc)
1198 {
1199 struct fwohci_buf *fb, *nfb;
1200 struct fwohci_desc *fd;
1201 struct fwohci_handler *fh;
1202 int n;
1203
1204 for (fb = TAILQ_FIRST(&fc->fc_buf); fb != NULL; fb = nfb) {
1205 nfb = TAILQ_NEXT(fb, fb_list);
1206 fb->fb_off = 0;
1207 fd = fb->fb_desc;
1208 fd->fd_branch = (nfb != NULL) ? (nfb->fb_daddr | 1) : 0;
1209 fd->fd_rescount = fd->fd_reqcount;
1210 }
1211
1212 #if DOUBLEBUF
1213 for (fb = TAILQ_FIRST(&fc->fc_buf2); fb != NULL; fb = nfb) {
1214 bus_dmamap_sync(sc->sc_dmat, sc->sc_ddmamap,
1215 (caddr_t)fd - (caddr_t)sc->sc_desc, sizeof(struct fwohci_desc),
1216 BUS_DMASYNC_PREWRITE);
1217 nfb = TAILQ_NEXT(fb, fb_list);
1218 fb->fb_off = 0;
1219 fd = fb->fb_desc;
1220 fd->fd_branch = (nfb != NULL) ? (nfb->fb_daddr | 1) : 0;
1221 fd->fd_rescount = fd->fd_reqcount;
1222 bus_dmamap_sync(sc->sc_dmat, sc->sc_ddmamap,
1223 (caddr_t)fd - (caddr_t)sc->sc_desc, sizeof(struct fwohci_desc),
1224 BUS_DMASYNC_POSTWRITE);
1225 }
1226 #endif /* DOUBLEBUF */
1227
1228 n = fc->fc_ctx;
1229 fb = TAILQ_FIRST(&fc->fc_buf);
1230 if (fc->fc_type != FWOHCI_CTX_ASYNC) {
1231 OHCI_SYNC_RX_DMA_WRITE(sc, n, OHCI_SUBREG_CommandPtr,
1232 fb->fb_daddr | 1);
1233 OHCI_SYNC_RX_DMA_WRITE(sc, n, OHCI_SUBREG_ContextControlClear,
1234 OHCI_CTXCTL_RX_BUFFER_FILL |
1235 OHCI_CTXCTL_RX_CYCLE_MATCH_ENABLE |
1236 OHCI_CTXCTL_RX_MULTI_CHAN_MODE |
1237 OHCI_CTXCTL_RX_DUAL_BUFFER_MODE);
1238 OHCI_SYNC_RX_DMA_WRITE(sc, n, OHCI_SUBREG_ContextControlSet,
1239 OHCI_CTXCTL_RX_ISOCH_HEADER);
1240 if (fc->fc_type == FWOHCI_CTX_ISO_MULTI) {
1241 OHCI_SYNC_RX_DMA_WRITE(sc, n,
1242 OHCI_SUBREG_ContextControlSet,
1243 OHCI_CTXCTL_RX_BUFFER_FILL);
1244 }
1245 fh = LIST_FIRST(&fc->fc_handler);
1246
1247 if (fh->fh_key1 == IEEE1394_ISO_CHANNEL_ANY) {
1248 OHCI_SYNC_RX_DMA_WRITE(sc, n,
1249 OHCI_SUBREG_ContextControlSet,
1250 OHCI_CTXCTL_RX_MULTI_CHAN_MODE);
1251
1252 /* Receive all the isochronous channels */
1253 OHCI_CSR_WRITE(sc, OHCI_REG_IRMultiChanMaskHiSet,
1254 0xffffffff);
1255 OHCI_CSR_WRITE(sc, OHCI_REG_IRMultiChanMaskLoSet,
1256 0xffffffff);
1257 DPRINTF(("%s: CTXCTL 0x%08x\n",
1258 sc->sc_sc1394.sc1394_dev.dv_xname,
1259 OHCI_SYNC_RX_DMA_READ(sc, n,
1260 OHCI_SUBREG_ContextControlSet)));
1261 }
1262 OHCI_SYNC_RX_DMA_WRITE(sc, n, OHCI_SUBREG_ContextMatch,
1263 (fh->fh_key2 << OHCI_CTXMATCH_TAG_BITPOS) |
1264 (fh->fh_key1 & IEEE1394_ISO_CHANNEL_MASK));
1265 } else {
1266 OHCI_ASYNC_DMA_WRITE(sc, n, OHCI_SUBREG_CommandPtr,
1267 fb->fb_daddr | 1);
1268 }
1269 }
1270
1271 /*
1272 * DMA data buffer
1273 */
1274 static int
1275 fwohci_buf_alloc(struct fwohci_softc *sc, struct fwohci_buf *fb)
1276 {
1277 int error;
1278
1279 if ((error = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE,
1280 PAGE_SIZE, &fb->fb_seg, 1, &fb->fb_nseg, BUS_DMA_WAITOK)) != 0) {
1281 printf("%s: unable to allocate buffer, error = %d\n",
1282 sc->sc_sc1394.sc1394_dev.dv_xname, error);
1283 goto fail_0;
1284 }
1285
1286 if ((error = bus_dmamem_map(sc->sc_dmat, &fb->fb_seg,
1287 fb->fb_nseg, PAGE_SIZE, &fb->fb_buf, BUS_DMA_WAITOK)) != 0) {
1288 printf("%s: unable to map buffer, error = %d\n",
1289 sc->sc_sc1394.sc1394_dev.dv_xname, error);
1290 goto fail_1;
1291 }
1292
1293 if ((error = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, fb->fb_nseg,
1294 PAGE_SIZE, 0, BUS_DMA_WAITOK, &fb->fb_dmamap)) != 0) {
1295 printf("%s: unable to create buffer DMA map, "
1296 "error = %d\n", sc->sc_sc1394.sc1394_dev.dv_xname,
1297 error);
1298 goto fail_2;
1299 }
1300
1301 if ((error = bus_dmamap_load(sc->sc_dmat, fb->fb_dmamap,
1302 fb->fb_buf, PAGE_SIZE, NULL, BUS_DMA_WAITOK)) != 0) {
1303 printf("%s: unable to load buffer DMA map, "
1304 "error = %d\n", sc->sc_sc1394.sc1394_dev.dv_xname,
1305 error);
1306 goto fail_3;
1307 }
1308
1309 return 0;
1310
1311 bus_dmamap_unload(sc->sc_dmat, fb->fb_dmamap);
1312 fail_3:
1313 bus_dmamap_destroy(sc->sc_dmat, fb->fb_dmamap);
1314 fail_2:
1315 bus_dmamem_unmap(sc->sc_dmat, fb->fb_buf, PAGE_SIZE);
1316 fail_1:
1317 bus_dmamem_free(sc->sc_dmat, &fb->fb_seg, fb->fb_nseg);
1318 fail_0:
1319 return error;
1320 }
1321
1322 static void
1323 fwohci_buf_free(struct fwohci_softc *sc, struct fwohci_buf *fb)
1324 {
1325
1326 bus_dmamap_unload(sc->sc_dmat, fb->fb_dmamap);
1327 bus_dmamap_destroy(sc->sc_dmat, fb->fb_dmamap);
1328 bus_dmamem_unmap(sc->sc_dmat, fb->fb_buf, PAGE_SIZE);
1329 bus_dmamem_free(sc->sc_dmat, &fb->fb_seg, fb->fb_nseg);
1330 }
1331
1332 static void
1333 fwohci_buf_init_rx(struct fwohci_softc *sc)
1334 {
1335 int i;
1336
1337 /*
1338 * Initialize for Asynchronous Receive Queue.
1339 */
1340 fwohci_ctx_init(sc, sc->sc_ctx_arrq);
1341 fwohci_ctx_init(sc, sc->sc_ctx_arrs);
1342
1343 /*
1344 * Initialize for Isochronous Receive Queue.
1345 */
1346 if (sc->sc_ctx_as != NULL) {
1347 for (i = 0; i < sc->sc_isoctx; i++) {
1348 if (sc->sc_ctx_as[i] != NULL)
1349 fwohci_ctx_init(sc, sc->sc_ctx_as[i]);
1350 }
1351 }
1352 }
1353
1354 static void
1355 fwohci_buf_start_rx(struct fwohci_softc *sc)
1356 {
1357 int i;
1358
1359 OHCI_ASYNC_DMA_WRITE(sc, OHCI_CTX_ASYNC_RX_REQUEST,
1360 OHCI_SUBREG_ContextControlSet, OHCI_CTXCTL_RUN);
1361 OHCI_ASYNC_DMA_WRITE(sc, OHCI_CTX_ASYNC_RX_RESPONSE,
1362 OHCI_SUBREG_ContextControlSet, OHCI_CTXCTL_RUN);
1363 if (sc->sc_ctx_as != NULL) {
1364 for (i = 0; i < sc->sc_isoctx; i++) {
1365 if (sc->sc_ctx_as[i] != NULL)
1366 OHCI_SYNC_RX_DMA_WRITE(sc, i,
1367 OHCI_SUBREG_ContextControlSet,
1368 OHCI_CTXCTL_RUN);
1369 }
1370 }
1371 }
1372
1373 static void
1374 fwohci_buf_stop_tx(struct fwohci_softc *sc)
1375 {
1376 int i;
1377
1378 OHCI_ASYNC_DMA_WRITE(sc, OHCI_CTX_ASYNC_TX_REQUEST,
1379 OHCI_SUBREG_ContextControlClear, OHCI_CTXCTL_RUN);
1380 OHCI_ASYNC_DMA_WRITE(sc, OHCI_CTX_ASYNC_TX_RESPONSE,
1381 OHCI_SUBREG_ContextControlClear, OHCI_CTXCTL_RUN);
1382
1383 /*
1384 * Make sure the transmitter is stopped.
1385 */
1386 for (i = 0; i < OHCI_LOOP; i++) {
1387 DELAY(10);
1388 if (OHCI_ASYNC_DMA_READ(sc, OHCI_CTX_ASYNC_TX_REQUEST,
1389 OHCI_SUBREG_ContextControlClear) & OHCI_CTXCTL_ACTIVE)
1390 continue;
1391 if (OHCI_ASYNC_DMA_READ(sc, OHCI_CTX_ASYNC_TX_RESPONSE,
1392 OHCI_SUBREG_ContextControlClear) & OHCI_CTXCTL_ACTIVE)
1393 continue;
1394 break;
1395 }
1396
1397 /*
1398 * Initialize for Asynchronous Transmit Queue.
1399 */
1400 fwohci_at_done(sc, sc->sc_ctx_atrq, 1);
1401 fwohci_at_done(sc, sc->sc_ctx_atrs, 1);
1402 }
1403
1404 static void
1405 fwohci_buf_stop_rx(struct fwohci_softc *sc)
1406 {
1407 int i;
1408
1409 OHCI_ASYNC_DMA_WRITE(sc, OHCI_CTX_ASYNC_RX_REQUEST,
1410 OHCI_SUBREG_ContextControlClear, OHCI_CTXCTL_RUN);
1411 OHCI_ASYNC_DMA_WRITE(sc, OHCI_CTX_ASYNC_RX_RESPONSE,
1412 OHCI_SUBREG_ContextControlClear, OHCI_CTXCTL_RUN);
1413 for (i = 0; i < sc->sc_isoctx; i++) {
1414 OHCI_SYNC_RX_DMA_WRITE(sc, i,
1415 OHCI_SUBREG_ContextControlClear, OHCI_CTXCTL_RUN);
1416 }
1417 }
1418
1419 static void
1420 fwohci_buf_next(struct fwohci_softc *sc, struct fwohci_ctx *fc)
1421 {
1422 struct fwohci_buf *fb, *tfb;
1423
1424 #if DOUBLEBUF
1425 if (fc->fc_type != FWOHCI_CTX_ISO_MULTI) {
1426 #endif
1427 while ((fb = TAILQ_FIRST(&fc->fc_buf)) != NULL) {
1428 if (fc->fc_type) {
1429 if (fb->fb_off == 0)
1430 break;
1431 } else {
1432 if (fb->fb_off != fb->fb_desc->fd_reqcount ||
1433 fb->fb_desc->fd_rescount != 0)
1434 break;
1435 }
1436 TAILQ_REMOVE(&fc->fc_buf, fb, fb_list);
1437 fb->fb_desc->fd_rescount = fb->fb_desc->fd_reqcount;
1438 fb->fb_off = 0;
1439 fb->fb_desc->fd_branch = 0;
1440 tfb = TAILQ_LAST(&fc->fc_buf, fwohci_buf_s);
1441 tfb->fb_desc->fd_branch = fb->fb_daddr | 1;
1442 TAILQ_INSERT_TAIL(&fc->fc_buf, fb, fb_list);
1443 }
1444 #if DOUBLEBUF
1445 } else {
1446 struct fwohci_buf_s fctmp;
1447
1448 /* cleaning buffer */
1449 for (fb = TAILQ_FIRST(&fc->fc_buf); fb != NULL;
1450 fb = TAILQ_NEXT(fb, fb_list)) {
1451 fb->fb_off = 0;
1452 fb->fb_desc->fd_rescount = fb->fb_desc->fd_reqcount;
1453 }
1454
1455 /* rotating buffer */
1456 fctmp = fc->fc_buf;
1457 fc->fc_buf = fc->fc_buf2;
1458 fc->fc_buf2 = fctmp;
1459 }
1460 #endif
1461 }
1462
1463 static int
1464 fwohci_buf_pktget(struct fwohci_softc *sc, struct fwohci_buf **fbp, caddr_t *pp,
1465 int len)
1466 {
1467 struct fwohci_buf *fb;
1468 struct fwohci_desc *fd;
1469 int bufend;
1470
1471 fb = *fbp;
1472 again:
1473 fd = fb->fb_desc;
1474 DPRINTFN(1, ("fwohci_buf_pktget: desc %ld, off %d, req %d, res %d,"
1475 " len %d, avail %d\n", (long)(fd - sc->sc_desc), fb->fb_off,
1476 fd->fd_reqcount, fd->fd_rescount, len,
1477 fd->fd_reqcount - fd->fd_rescount - fb->fb_off));
1478 bufend = fd->fd_reqcount - fd->fd_rescount;
1479 if (fb->fb_off >= bufend) {
1480 DPRINTFN(5, ("buf %x finish req %d res %d off %d ",
1481 fb->fb_desc->fd_data, fd->fd_reqcount, fd->fd_rescount,
1482 fb->fb_off));
1483 if (fd->fd_rescount == 0) {
1484 *fbp = fb = TAILQ_NEXT(fb, fb_list);
1485 if (fb != NULL)
1486 goto again;
1487 }
1488 return 0;
1489 }
1490 if (fb->fb_off + len > bufend)
1491 len = bufend - fb->fb_off;
1492 bus_dmamap_sync(sc->sc_dmat, fb->fb_dmamap, fb->fb_off, len,
1493 BUS_DMASYNC_POSTREAD);
1494 *pp = fb->fb_buf + fb->fb_off;
1495 fb->fb_off += roundup(len, 4);
1496 return len;
1497 }
1498
1499 static int
1500 fwohci_buf_input(struct fwohci_softc *sc, struct fwohci_ctx *fc,
1501 struct fwohci_pkt *pkt)
1502 {
1503 caddr_t p;
1504 struct fwohci_buf *fb;
1505 int len, count, i;
1506 #ifdef FW_DEBUG
1507 int tlabel;
1508 #endif
1509
1510 memset(pkt, 0, sizeof(*pkt));
1511 pkt->fp_uio.uio_iov = pkt->fp_iov;
1512 pkt->fp_uio.uio_rw = UIO_WRITE;
1513 pkt->fp_uio.uio_segflg = UIO_SYSSPACE;
1514
1515 /* get first quadlet */
1516 fb = TAILQ_FIRST(&fc->fc_buf);
1517 count = 4;
1518 len = fwohci_buf_pktget(sc, &fb, &p, count);
1519 if (len <= 0) {
1520 DPRINTFN(1, ("fwohci_buf_input: no input for %d\n",
1521 fc->fc_ctx));
1522 return 0;
1523 }
1524 pkt->fp_hdr[0] = *(u_int32_t *)p;
1525 pkt->fp_tcode = (pkt->fp_hdr[0] & 0x000000f0) >> 4;
1526 switch (pkt->fp_tcode) {
1527 case IEEE1394_TCODE_WRITE_REQ_QUAD:
1528 case IEEE1394_TCODE_READ_RESP_QUAD:
1529 pkt->fp_hlen = 12;
1530 pkt->fp_dlen = 4;
1531 break;
1532 case IEEE1394_TCODE_READ_REQ_BLOCK:
1533 pkt->fp_hlen = 16;
1534 pkt->fp_dlen = 0;
1535 break;
1536 case IEEE1394_TCODE_WRITE_REQ_BLOCK:
1537 case IEEE1394_TCODE_READ_RESP_BLOCK:
1538 case IEEE1394_TCODE_LOCK_REQ:
1539 case IEEE1394_TCODE_LOCK_RESP:
1540 pkt->fp_hlen = 16;
1541 break;
1542 case IEEE1394_TCODE_STREAM_DATA:
1543 #ifdef DIAGNOSTIC
1544 if (fc->fc_type == FWOHCI_CTX_ISO_MULTI)
1545 #endif
1546 {
1547 pkt->fp_hlen = 4;
1548 pkt->fp_dlen = pkt->fp_hdr[0] >> 16;
1549 DPRINTFN(5, ("[%d]", pkt->fp_dlen));
1550 break;
1551 }
1552 #ifdef DIAGNOSTIC
1553 else {
1554 printf("fwohci_buf_input: bad tcode: STREAM_DATA\n");
1555 return 0;
1556 }
1557 #endif
1558 default:
1559 pkt->fp_hlen = 12;
1560 pkt->fp_dlen = 0;
1561 break;
1562 }
1563
1564 /* get header */
1565 while (count < pkt->fp_hlen) {
1566 len = fwohci_buf_pktget(sc, &fb, &p, pkt->fp_hlen - count);
1567 if (len == 0) {
1568 printf("fwohci_buf_input: malformed input 1: %d\n",
1569 pkt->fp_hlen - count);
1570 return 0;
1571 }
1572 memcpy((caddr_t)pkt->fp_hdr + count, p, len);
1573 count += len;
1574 }
1575 if (pkt->fp_hlen == 16 &&
1576 pkt->fp_tcode != IEEE1394_TCODE_READ_REQ_BLOCK)
1577 pkt->fp_dlen = pkt->fp_hdr[3] >> 16;
1578 #ifdef FW_DEBUG
1579 tlabel = (pkt->fp_hdr[0] & 0x0000fc00) >> 10;
1580 #endif
1581 DPRINTFN(1, ("fwohci_buf_input: tcode=0x%x, tlabel=0x%x, hlen=%d, "
1582 "dlen=%d\n", pkt->fp_tcode, tlabel, pkt->fp_hlen, pkt->fp_dlen));
1583
1584 /* get data */
1585 count = 0;
1586 i = 0;
1587 while (count < pkt->fp_dlen) {
1588 len = fwohci_buf_pktget(sc, &fb,
1589 (caddr_t *)&pkt->fp_iov[i].iov_base,
1590 pkt->fp_dlen - count);
1591 if (len == 0) {
1592 printf("fwohci_buf_input: malformed input 2: %d\n",
1593 pkt->fp_dlen - count);
1594 return 0;
1595 }
1596 pkt->fp_iov[i++].iov_len = len;
1597 count += len;
1598 }
1599 pkt->fp_uio.uio_iovcnt = i;
1600 pkt->fp_uio.uio_resid = count;
1601
1602 /* get trailer */
1603 len = fwohci_buf_pktget(sc, &fb, (caddr_t *)&pkt->fp_trail,
1604 sizeof(*pkt->fp_trail));
1605 if (len <= 0) {
1606 printf("fwohci_buf_input: malformed input 3: %d\n",
1607 pkt->fp_hlen - count);
1608 return 0;
1609 }
1610 return 1;
1611 }
1612
1613 static int
1614 fwohci_buf_input_ppb(struct fwohci_softc *sc, struct fwohci_ctx *fc,
1615 struct fwohci_pkt *pkt)
1616 {
1617 caddr_t p;
1618 int len;
1619 struct fwohci_buf *fb;
1620 struct fwohci_desc *fd;
1621
1622 if (fc->fc_type == FWOHCI_CTX_ISO_MULTI) {
1623 return fwohci_buf_input(sc, fc, pkt);
1624 }
1625
1626 memset(pkt, 0, sizeof(*pkt));
1627 pkt->fp_uio.uio_iov = pkt->fp_iov;
1628 pkt->fp_uio.uio_rw = UIO_WRITE;
1629 pkt->fp_uio.uio_segflg = UIO_SYSSPACE;
1630
1631 for (fb = TAILQ_FIRST(&fc->fc_buf); ; fb = TAILQ_NEXT(fb, fb_list)) {
1632 if (fb == NULL)
1633 return 0;
1634 if (fb->fb_off == 0)
1635 break;
1636 }
1637 fd = fb->fb_desc;
1638 len = fd->fd_reqcount - fd->fd_rescount;
1639 if (len == 0)
1640 return 0;
1641 bus_dmamap_sync(sc->sc_dmat, fb->fb_dmamap, fb->fb_off, len,
1642 BUS_DMASYNC_POSTREAD);
1643
1644 p = fb->fb_buf;
1645 fb->fb_off += roundup(len, 4);
1646 if (len < 8) {
1647 printf("fwohci_buf_input_ppb: malformed input 1: %d\n", len);
1648 return 0;
1649 }
1650
1651 /*
1652 * get trailer first, may be bogus data unless status update
1653 * in descriptor is set.
1654 */
1655 pkt->fp_trail = (u_int32_t *)p;
1656 *pkt->fp_trail = (*pkt->fp_trail & 0xffff) | (fd->fd_status << 16);
1657 pkt->fp_hdr[0] = ((u_int32_t *)p)[1];
1658 pkt->fp_tcode = (pkt->fp_hdr[0] & 0x000000f0) >> 4;
1659 #ifdef DIAGNOSTIC
1660 if (pkt->fp_tcode != IEEE1394_TCODE_STREAM_DATA) {
1661 printf("fwohci_buf_input_ppb: bad tcode: 0x%x\n",
1662 pkt->fp_tcode);
1663 return 0;
1664 }
1665 #endif
1666 pkt->fp_hlen = 4;
1667 pkt->fp_dlen = pkt->fp_hdr[0] >> 16;
1668 p += 8;
1669 len -= 8;
1670 if (pkt->fp_dlen != len) {
1671 printf("fwohci_buf_input_ppb: malformed input 2: %d != %d\n",
1672 pkt->fp_dlen, len);
1673 return 0;
1674 }
1675 DPRINTFN(1, ("fwohci_buf_input_ppb: tcode=0x%x, hlen=%d, dlen=%d\n",
1676 pkt->fp_tcode, pkt->fp_hlen, pkt->fp_dlen));
1677 pkt->fp_iov[0].iov_base = p;
1678 pkt->fp_iov[0].iov_len = len;
1679 pkt->fp_uio.uio_iovcnt = 0;
1680 pkt->fp_uio.uio_resid = len;
1681 return 1;
1682 }
1683
1684 static int
1685 fwohci_handler_set(struct fwohci_softc *sc,
1686 int tcode, u_int32_t key1, u_int32_t key2, u_int32_t key3,
1687 int (*handler)(struct fwohci_softc *, void *, struct fwohci_pkt *),
1688 void *arg)
1689 {
1690 struct fwohci_ctx *fc;
1691 struct fwohci_handler *fh;
1692 u_int64_t addr, naddr;
1693 u_int32_t off;
1694 int i, j;
1695
1696 if (tcode == IEEE1394_TCODE_STREAM_DATA &&
1697 (((key1 & OHCI_ASYNC_STREAM) && sc->sc_ctx_as != NULL)
1698 || (key1 & OHCI_ASYNC_STREAM) == 0)) {
1699 int isasync = key1 & OHCI_ASYNC_STREAM;
1700
1701 key1 = key1 & IEEE1394_ISO_CHANNEL_ANY ?
1702 IEEE1394_ISO_CHANNEL_ANY : (key1 & IEEE1394_ISOCH_MASK);
1703 if (key1 & IEEE1394_ISO_CHANNEL_ANY) {
1704 printf("%s: key changed to %x\n",
1705 sc->sc_sc1394.sc1394_dev.dv_xname, key1);
1706 }
1707 j = sc->sc_isoctx;
1708 fh = NULL;
1709
1710 for (i = 0; i < sc->sc_isoctx; i++) {
1711 if ((fc = sc->sc_ctx_as[i]) == NULL) {
1712 if (j == sc->sc_isoctx)
1713 j = i;
1714 continue;
1715 }
1716 fh = LIST_FIRST(&fc->fc_handler);
1717 if (fh->fh_tcode == tcode &&
1718 fh->fh_key1 == key1 && fh->fh_key2 == key2)
1719 break;
1720 fh = NULL;
1721 }
1722 if (fh == NULL) {
1723 if (handler == NULL)
1724 return 0;
1725 if (j == sc->sc_isoctx) {
1726 DPRINTF(("fwohci_handler_set: no more free "
1727 "context\n"));
1728 return ENOMEM;
1729 }
1730 if ((fc = sc->sc_ctx_as[j]) == NULL) {
1731 fwohci_ctx_alloc(sc, &fc, OHCI_BUF_IR_CNT, j,
1732 isasync ? FWOHCI_CTX_ISO_SINGLE :
1733 FWOHCI_CTX_ISO_MULTI);
1734 sc->sc_ctx_as[j] = fc;
1735 }
1736 }
1737 #ifdef FW_DEBUG
1738 if (fh == NULL && handler != NULL) {
1739 printf("use ir context %d\n", j);
1740 } else if (fh != NULL && handler == NULL) {
1741 printf("remove ir context %d\n", i);
1742 }
1743 #endif
1744 } else {
1745 switch (tcode) {
1746 case IEEE1394_TCODE_WRITE_REQ_QUAD:
1747 case IEEE1394_TCODE_WRITE_REQ_BLOCK:
1748 case IEEE1394_TCODE_READ_REQ_QUAD:
1749 case IEEE1394_TCODE_READ_REQ_BLOCK:
1750 case IEEE1394_TCODE_LOCK_REQ:
1751 fc = sc->sc_ctx_arrq;
1752 break;
1753 case IEEE1394_TCODE_WRITE_RESP:
1754 case IEEE1394_TCODE_READ_RESP_QUAD:
1755 case IEEE1394_TCODE_READ_RESP_BLOCK:
1756 case IEEE1394_TCODE_LOCK_RESP:
1757 fc = sc->sc_ctx_arrs;
1758 break;
1759 default:
1760 return EIO;
1761 }
1762 naddr = ((u_int64_t)key1 << 32) + key2;
1763
1764 for (fh = LIST_FIRST(&fc->fc_handler); fh != NULL;
1765 fh = LIST_NEXT(fh, fh_list)) {
1766 if (fh->fh_tcode == tcode) {
1767 if (fh->fh_key1 == key1 &&
1768 fh->fh_key2 == key2 && fh->fh_key3 == key3)
1769 break;
1770 /* Make sure it's not within a current range. */
1771 addr = ((u_int64_t)fh->fh_key1 << 32) +
1772 fh->fh_key2;
1773 off = fh->fh_key3;
1774 if (key3 &&
1775 (((naddr >= addr) &&
1776 (naddr < (addr + off))) ||
1777 (((naddr + key3) > addr) &&
1778 ((naddr + key3) <= (addr + off))) ||
1779 ((addr > naddr) &&
1780 (addr < (naddr + key3)))))
1781 if (handler)
1782 return EEXIST;
1783 }
1784 }
1785 }
1786 if (handler == NULL) {
1787 if (fh != NULL) {
1788 LIST_REMOVE(fh, fh_list);
1789 free(fh, M_DEVBUF);
1790 }
1791 if (tcode == IEEE1394_TCODE_STREAM_DATA) {
1792 OHCI_SYNC_RX_DMA_WRITE(sc, fc->fc_ctx,
1793 OHCI_SUBREG_ContextControlClear, OHCI_CTXCTL_RUN);
1794 sc->sc_ctx_as[fc->fc_ctx] = NULL;
1795 fwohci_ctx_free(sc, fc);
1796 }
1797 return 0;
1798 }
1799 if (fh == NULL) {
1800 fh = malloc(sizeof(*fh), M_DEVBUF, M_WAITOK);
1801 LIST_INSERT_HEAD(&fc->fc_handler, fh, fh_list);
1802 }
1803 fh->fh_tcode = tcode;
1804 fh->fh_key1 = key1;
1805 fh->fh_key2 = key2;
1806 fh->fh_key3 = key3;
1807 fh->fh_handler = handler;
1808 fh->fh_handarg = arg;
1809 DPRINTFN(1, ("fwohci_handler_set: ctx %d, tcode %x, key 0x%x, 0x%x, "
1810 "0x%x\n", fc->fc_ctx, tcode, key1, key2, key3));
1811
1812 if (tcode == IEEE1394_TCODE_STREAM_DATA) {
1813 fwohci_ctx_init(sc, fc);
1814 DPRINTFN(1, ("fwohci_handler_set: SYNC desc %ld\n",
1815 (long)(TAILQ_FIRST(&fc->fc_buf)->fb_desc - sc->sc_desc)));
1816 OHCI_SYNC_RX_DMA_WRITE(sc, fc->fc_ctx,
1817 OHCI_SUBREG_ContextControlSet, OHCI_CTXCTL_RUN);
1818 }
1819 return 0;
1820 }
1821
1822 /*
1823 * static ieee1394_ir_tag_t
1824 * fwohci_ir_ctx_set(struct device *dev, int channel, int tagbm,
1825 * int bufnum, int maxsize, int flags)
1826 *
1827 * This function will return non-negative value if it succeeds.
1828 * This return value is pointer to the context of isochronous
1829 * transmission. This function will return NULL value if it
1830 * fails.
1831 */
1832 ieee1394_ir_tag_t
1833 fwohci_ir_ctx_set(struct device *dev, int channel, int tagbm,
1834 int bufnum, int maxsize, int flags)
1835 {
1836 int i, openctx;
1837 struct fwohci_ir_ctx *irc;
1838 struct fwohci_softc *sc = (struct fwohci_softc *)dev;
1839 const char *xname = sc->sc_sc1394.sc1394_dev.dv_xname;
1840
1841 printf("%s: ir_ctx_set channel %d tagbm 0x%x maxsize %d bufnum %d\n",
1842 xname, channel, tagbm, maxsize, bufnum);
1843 /*
1844 * This loop will find the smallest vacant context and check
1845 * whether other channel uses the same channel.
1846 */
1847 openctx = sc->sc_isoctx;
1848 for (i = 0; i < sc->sc_isoctx; ++i) {
1849 if (sc->sc_ctx_ir[i] == NULL) {
1850 /*
1851 * Find a vacant contet. If this has the
1852 * smallest context number, register it.
1853 */
1854 if (openctx == sc->sc_isoctx) {
1855 openctx = i;
1856 }
1857 } else {
1858 /*
1859 * This context is used. Check whether this
1860 * context uses the same channel as ours.
1861 */
1862 if (sc->sc_ctx_ir[i]->irc_channel == channel) {
1863 /* Using same channel. */
1864 printf("%s: channel %d occupied by ctx%d\n",
1865 xname, channel, i);
1866 return NULL;
1867 }
1868 }
1869 }
1870
1871 /*
1872 * If there is a vacant context, allocate isochronous transmit
1873 * context for it.
1874 */
1875 if (openctx != sc->sc_isoctx) {
1876 printf("%s using ctx %d for iso receive\n", xname, openctx);
1877 if ((irc = fwohci_ir_ctx_construct(sc, openctx, channel,
1878 tagbm, bufnum, maxsize, flags)) == NULL) {
1879 return NULL;
1880 }
1881 #ifndef IR_CTX_OPENTEST
1882 sc->sc_ctx_ir[openctx] = irc;
1883 #else
1884 fwohci_ir_ctx_destruct(irc);
1885 irc = NULL;
1886 #endif
1887 } else {
1888 printf("%s: cannot find any vacant contexts\n", xname);
1889 irc = NULL;
1890 }
1891
1892 return (ieee1394_ir_tag_t)irc;
1893 }
1894
1895
1896 /*
1897 * int fwohci_ir_ctx_clear(struct device *dev, ieee1394_ir_tag_t *ir)
1898 *
1899 * This function will return 0 if it succeed. Otherwise return
1900 * negative value.
1901 */
1902 int
1903 fwohci_ir_ctx_clear(struct device *dev, ieee1394_ir_tag_t ir)
1904 {
1905 struct fwohci_ir_ctx *irc = (struct fwohci_ir_ctx *)ir;
1906 struct fwohci_softc *sc = irc->irc_sc;
1907 int i;
1908
1909 if (sc->sc_ctx_ir[irc->irc_num] != irc) {
1910 printf("fwohci_ir_ctx_clear: irc differs %p %p\n",
1911 sc->sc_ctx_ir[irc->irc_num], irc);
1912 return -1;
1913 }
1914
1915 i = 0;
1916 while (irc->irc_status & IRC_STATUS_RUN) {
1917 tsleep((void *)irc, PWAIT|PCATCH, "IEEE1394 iso receive", 100);
1918 if (irc->irc_status & IRC_STATUS_RUN) {
1919 if (fwohci_ir_stop(irc) == 0) {
1920 irc->irc_status &= ~IRC_STATUS_RUN;
1921 }
1922
1923 }
1924 if (++i > 20) {
1925 u_int32_t reg
1926 = OHCI_SYNC_RX_DMA_READ(sc, irc->irc_num,
1927 OHCI_SUBREG_ContextControlSet);
1928
1929 printf("fwochi_ir_ctx_clear: "
1930 "Cannot stop iso receive engine\n");
1931 printf("%s: intr IR_CommandPtr 0x%08x "
1932 "ContextCtrl 0x%08x%s%s%s%s\n",
1933 sc->sc_sc1394.sc1394_dev.dv_xname,
1934 OHCI_SYNC_RX_DMA_READ(sc, irc->irc_num,
1935 OHCI_SUBREG_CommandPtr),
1936 reg,
1937 reg & OHCI_CTXCTL_RUN ? " run" : "",
1938 reg & OHCI_CTXCTL_WAKE ? " wake" : "",
1939 reg & OHCI_CTXCTL_DEAD ? " dead" : "",
1940 reg & OHCI_CTXCTL_ACTIVE ? " active" : "");
1941
1942 return EBUSY;
1943 }
1944 }
1945
1946 printf("fwohci_ir_ctx_clear: DMA engine is stopped. get %d frames max queuelen %d pos %d\n",
1947 irc->irc_pktcount, irc->irc_maxqueuelen, irc->irc_maxqueuepos);
1948
1949 fwohci_ir_ctx_destruct(irc);
1950
1951 sc->sc_ctx_ir[irc->irc_num] = NULL;
1952
1953 return 0;
1954 }
1955
1956
1957
1958
1959
1960
1961
1962
1963 ieee1394_it_tag_t
1964 fwohci_it_set(struct ieee1394_softc *isc, int channel, int tagbm)
1965 {
1966 ieee1394_it_tag_t rv;
1967 int tag;
1968
1969 for (tag = 0; tagbm != 0 && (tagbm & 0x01) == 0; tagbm >>= 1, ++tag);
1970
1971 rv = fwohci_it_ctx_set((struct fwohci_softc *)isc, channel, tag, 488);
1972
1973 return rv;
1974 }
1975
1976 /*
1977 * static ieee1394_it_tag_t
1978 * fwohci_it_ctx_set(struct fwohci_softc *sc,
1979 * u_int32_t key1 (channel), u_int32_t key2 (tag), int maxsize)
1980 *
1981 * This function will return non-negative value if it succeeds.
1982 * This return value is pointer to the context of isochronous
1983 * transmission. This function will return NULL value if it
1984 * fails.
1985 */
1986 static ieee1394_it_tag_t
1987 fwohci_it_ctx_set(struct fwohci_softc *sc, int channel, int tag, int maxsize)
1988 {
1989 int i, openctx;
1990 struct fwohci_it_ctx *itc;
1991 const char *xname = sc->sc_sc1394.sc1394_dev.dv_xname;
1992 #ifdef TEST_CHAIN
1993 extern int fwohci_test_chain(struct fwohci_it_ctx *);
1994 #endif /* TEST_CHAIN */
1995 #ifdef TEST_WRITE
1996 extern void fwohci_test_write(struct fwohci_it_ctx *itc);
1997 #endif /* TEST_WRITE */
1998
1999 printf("%s: it_ctx_set channel %d tag %d maxsize %d\n",
2000 xname, channel, tag, maxsize);
2001
2002 /*
2003 * This loop will find the smallest vacant context and check
2004 * whether other channel uses the same channel.
2005 */
2006 openctx = sc->sc_itctx;
2007 for (i = 0; i < sc->sc_itctx; ++i) {
2008 if (sc->sc_ctx_it[i] == NULL) {
2009 /*
2010 * Find a vacant contet. If this has the
2011 * smallest context number, register it.
2012 */
2013 if (openctx == sc->sc_itctx) {
2014 openctx = i;
2015 }
2016 } else {
2017 /*
2018 * This context is used. Check whether this
2019 * context uses the same channel as ours.
2020 */
2021 if (sc->sc_ctx_it[i]->itc_channel == channel) {
2022 /* Using same channel. */
2023 printf("%s: channel %d occupied by ctx%d\n",
2024 xname, channel, i);
2025 return NULL;
2026 }
2027 }
2028 }
2029
2030 /*
2031 * If there is a vacant context, allocate isochronous transmit
2032 * context for it.
2033 */
2034 if (openctx != sc->sc_itctx) {
2035 printf("%s using ctx %d for iso trasmit\n", xname, openctx);
2036 if ((itc = fwohci_it_ctx_construct(sc, openctx, channel,
2037 tag, maxsize)) == NULL) {
2038 return NULL;
2039 }
2040 sc->sc_ctx_it[openctx] = itc;
2041
2042 #ifdef TEST_CHAIN
2043 fwohci_test_chain(itc);
2044 #endif /* TEST_CHAIN */
2045 #ifdef TEST_WRITE
2046 fwohci_test_write(itc);
2047 itc = NULL;
2048 #endif /* TEST_WRITE */
2049
2050 } else {
2051 printf("%s: cannot find any vacant contexts\n", xname);
2052 itc = NULL;
2053 }
2054
2055 return (ieee1394_it_tag_t)itc;
2056 }
2057
2058
2059 /*
2060 * int fwohci_it_ctx_clear(ieee1394_it_tag_t *it)
2061 *
2062 * This function will return 0 if it succeed. Otherwise return
2063 * negative value.
2064 */
2065 int
2066 fwohci_it_ctx_clear(ieee1394_it_tag_t *it)
2067 {
2068 struct fwohci_it_ctx *itc = (struct fwohci_it_ctx *)it;
2069 struct fwohci_softc *sc = itc->itc_sc;
2070 int i;
2071
2072 if (sc->sc_ctx_it[itc->itc_num] != itc) {
2073 printf("fwohci_it_ctx_clear: itc differs %p %p\n",
2074 sc->sc_ctx_it[itc->itc_num], itc);
2075 return -1;
2076 }
2077
2078 fwohci_it_ctx_flush(it);
2079
2080 i = 0;
2081 while (itc->itc_flags & ITC_FLAGS_RUN) {
2082 tsleep((void *)itc, PWAIT|PCATCH, "IEEE1394 iso transmit", 100);
2083 if (itc->itc_flags & ITC_FLAGS_RUN) {
2084 u_int32_t reg;
2085
2086 reg = OHCI_SYNC_TX_DMA_READ(sc, itc->itc_num,
2087 OHCI_SUBREG_ContextControlSet);
2088
2089 if ((reg & OHCI_CTXCTL_WAKE) == 0) {
2090 itc->itc_flags &= ~ITC_FLAGS_RUN;
2091 printf("fwochi_it_ctx_clear: "
2092 "DMA engine stopped without intr\n");
2093 }
2094 printf("%s: %d intr IT_CommandPtr 0x%08x "
2095 "ContextCtrl 0x%08x%s%s%s%s\n",
2096 sc->sc_sc1394.sc1394_dev.dv_xname, i,
2097 OHCI_SYNC_TX_DMA_READ(sc, itc->itc_num,
2098 OHCI_SUBREG_CommandPtr),
2099 reg,
2100 reg & OHCI_CTXCTL_RUN ? " run" : "",
2101 reg & OHCI_CTXCTL_WAKE ? " wake" : "",
2102 reg & OHCI_CTXCTL_DEAD ? " dead" : "",
2103 reg & OHCI_CTXCTL_ACTIVE ? " active" : "");
2104
2105
2106 }
2107 if (++i > 20) {
2108 u_int32_t reg
2109 = OHCI_SYNC_TX_DMA_READ(sc, itc->itc_num,
2110 OHCI_SUBREG_ContextControlSet);
2111
2112 printf("fwochi_it_ctx_clear: "
2113 "Cannot stop iso transmit engine\n");
2114 printf("%s: intr IT_CommandPtr 0x%08x "
2115 "ContextCtrl 0x%08x%s%s%s%s\n",
2116 sc->sc_sc1394.sc1394_dev.dv_xname,
2117 OHCI_SYNC_TX_DMA_READ(sc, itc->itc_num,
2118 OHCI_SUBREG_CommandPtr),
2119 reg,
2120 reg & OHCI_CTXCTL_RUN ? " run" : "",
2121 reg & OHCI_CTXCTL_WAKE ? " wake" : "",
2122 reg & OHCI_CTXCTL_DEAD ? " dead" : "",
2123 reg & OHCI_CTXCTL_ACTIVE ? " active" : "");
2124
2125 return EBUSY;
2126 }
2127 }
2128
2129 printf("fwohci_it_ctx_clear: DMA engine is stopped.\n");
2130
2131 fwohci_it_ctx_destruct(itc);
2132
2133 sc->sc_ctx_it[itc->itc_num] = NULL;
2134
2135
2136 return 0;
2137 }
2138
2139
2140
2141
2142
2143
2144 /*
2145 * Asynchronous Receive Requests input frontend.
2146 */
2147 static void
2148 fwohci_arrq_input(struct fwohci_softc *sc, struct fwohci_ctx *fc)
2149 {
2150 int rcode;
2151 u_int16_t len;
2152 u_int32_t key1, key2, off;
2153 u_int64_t addr, naddr;
2154 struct fwohci_handler *fh;
2155 struct fwohci_pkt pkt, res;
2156
2157 /*
2158 * Do not return if next packet is in the buffer, or the next
2159 * packet cannot be received until the next receive interrupt.
2160 */
2161 while (fwohci_buf_input(sc, fc, &pkt)) {
2162 if (pkt.fp_tcode == OHCI_TCODE_PHY) {
2163 fwohci_phy_input(sc, &pkt);
2164 continue;
2165 }
2166 key1 = pkt.fp_hdr[1] & 0xffff;
2167 key2 = pkt.fp_hdr[2];
2168 if ((pkt.fp_tcode == IEEE1394_TCODE_WRITE_REQ_BLOCK) ||
2169 (pkt.fp_tcode == IEEE1394_TCODE_READ_REQ_BLOCK)) {
2170 len = (pkt.fp_hdr[3] & 0xffff0000) >> 16;
2171 naddr = ((u_int64_t)key1 << 32) + key2;
2172 } else {
2173 len = 0;
2174 naddr = 0; /* XXX: gcc */
2175 }
2176 for (fh = LIST_FIRST(&fc->fc_handler); fh != NULL;
2177 fh = LIST_NEXT(fh, fh_list)) {
2178 if (pkt.fp_tcode == fh->fh_tcode) {
2179 /* Assume length check happens in handler */
2180 if (key1 == fh->fh_key1 &&
2181 key2 == fh->fh_key2) {
2182 rcode = (*fh->fh_handler)(sc,
2183 fh->fh_handarg, &pkt);
2184 break;
2185 }
2186 addr = ((u_int64_t)fh->fh_key1 << 32) +
2187 fh->fh_key2;
2188 off = fh->fh_key3;
2189 /* Check for a range qualifier */
2190 if (len &&
2191 ((naddr >= addr) && (naddr < (addr + off))
2192 && (naddr + len <= (addr + off)))) {
2193 rcode = (*fh->fh_handler)(sc,
2194 fh->fh_handarg, &pkt);
2195 break;
2196 }
2197 }
2198 }
2199 if (fh == NULL) {
2200 rcode = IEEE1394_RCODE_ADDRESS_ERROR;
2201 DPRINTFN(1, ("fwohci_arrq_input: no listener: tcode "
2202 "0x%x, addr=0x%04x %08x\n", pkt.fp_tcode, key1,
2203 key2));
2204 DPRINTFN(2, ("fwohci_arrq_input: no listener: hdr[0]: "
2205 "0x%08x, hdr[1]: 0x%08x, hdr[2]: 0x%08x, hdr[3]: "
2206 "0x%08x\n", pkt.fp_hdr[0], pkt.fp_hdr[1],
2207 pkt.fp_hdr[2], pkt.fp_hdr[3]));
2208 }
2209 if (((*pkt.fp_trail & 0x001f0000) >> 16) !=
2210 OHCI_CTXCTL_EVENT_ACK_PENDING)
2211 continue;
2212 if (rcode != -1) {
2213 memset(&res, 0, sizeof(res));
2214 res.fp_uio.uio_rw = UIO_WRITE;
2215 res.fp_uio.uio_segflg = UIO_SYSSPACE;
2216 fwohci_atrs_output(sc, rcode, &pkt, &res);
2217 }
2218 }
2219 fwohci_buf_next(sc, fc);
2220 OHCI_ASYNC_DMA_WRITE(sc, fc->fc_ctx,
2221 OHCI_SUBREG_ContextControlSet, OHCI_CTXCTL_WAKE);
2222 }
2223
2224
2225 /*
2226 * Asynchronous Receive Response input frontend.
2227 */
2228 static void
2229 fwohci_arrs_input(struct fwohci_softc *sc, struct fwohci_ctx *fc)
2230 {
2231 struct fwohci_pkt pkt;
2232 struct fwohci_handler *fh;
2233 u_int16_t srcid;
2234 int rcode, tlabel;
2235
2236 while (fwohci_buf_input(sc, fc, &pkt)) {
2237 srcid = pkt.fp_hdr[1] >> 16;
2238 rcode = (pkt.fp_hdr[1] & 0x0000f000) >> 12;
2239 tlabel = (pkt.fp_hdr[0] & 0x0000fc00) >> 10;
2240 DPRINTFN(1, ("fwohci_arrs_input: tcode 0x%x, from 0x%04x,"
2241 " tlabel 0x%x, rcode 0x%x, hlen %d, dlen %d\n",
2242 pkt.fp_tcode, srcid, tlabel, rcode, pkt.fp_hlen,
2243 pkt.fp_dlen));
2244 for (fh = LIST_FIRST(&fc->fc_handler); fh != NULL;
2245 fh = LIST_NEXT(fh, fh_list)) {
2246 if (pkt.fp_tcode == fh->fh_tcode &&
2247 (srcid & OHCI_NodeId_NodeNumber) == fh->fh_key1 &&
2248 tlabel == fh->fh_key2) {
2249 (*fh->fh_handler)(sc, fh->fh_handarg, &pkt);
2250 LIST_REMOVE(fh, fh_list);
2251 free(fh, M_DEVBUF);
2252 break;
2253 }
2254 }
2255 if (fh == NULL)
2256 DPRINTFN(1, ("fwohci_arrs_input: no listner\n"));
2257 }
2258 fwohci_buf_next(sc, fc);
2259 OHCI_ASYNC_DMA_WRITE(sc, fc->fc_ctx,
2260 OHCI_SUBREG_ContextControlSet, OHCI_CTXCTL_WAKE);
2261 }
2262
2263 /*
2264 * Isochronous Receive input frontend.
2265 */
2266 static void
2267 fwohci_as_input(struct fwohci_softc *sc, struct fwohci_ctx *fc)
2268 {
2269 int rcode, chan, tag;
2270 struct iovec *iov;
2271 struct fwohci_handler *fh;
2272 struct fwohci_pkt pkt;
2273
2274 #if DOUBLEBUF
2275 if (fc->fc_type == FWOHCI_CTX_ISO_MULTI) {
2276 struct fwohci_buf *fb;
2277 int i;
2278 u_int32_t reg;
2279
2280 /* stop DMA engine before read buffer */
2281 reg = OHCI_SYNC_RX_DMA_READ(sc, fc->fc_ctx,
2282 OHCI_SUBREG_ContextControlClear);
2283 DPRINTFN(5, ("ir_input %08x =>", reg));
2284 if (reg & OHCI_CTXCTL_RUN) {
2285 OHCI_SYNC_RX_DMA_WRITE(sc, fc->fc_ctx,
2286 OHCI_SUBREG_ContextControlClear, OHCI_CTXCTL_RUN);
2287 }
2288 DPRINTFN(5, (" %08x\n", OHCI_SYNC_RX_DMA_READ(sc, fc->fc_ctx, OHCI_SUBREG_ContextControlClear)));
2289
2290 i = 0;
2291 while ((reg = OHCI_SYNC_RX_DMA_READ(sc, fc->fc_ctx, OHCI_SUBREG_ContextControlSet)) & OHCI_CTXCTL_ACTIVE) {
2292 delay(10);
2293 if (++i > 10000) {
2294 printf("cannot stop DMA engine 0x%08x\n", reg);
2295 return;
2296 }
2297 }
2298
2299 /* rotate DMA buffer */
2300 fb = TAILQ_FIRST(&fc->fc_buf2);
2301 OHCI_SYNC_RX_DMA_WRITE(sc, fc->fc_ctx, OHCI_SUBREG_CommandPtr,
2302 fb->fb_daddr | 1);
2303 /* start DMA engine */
2304 OHCI_SYNC_RX_DMA_WRITE(sc, fc->fc_ctx,
2305 OHCI_SUBREG_ContextControlSet, OHCI_CTXCTL_RUN);
2306 OHCI_CSR_WRITE(sc, OHCI_REG_IsoRecvIntEventClear,
2307 (1 << fc->fc_ctx));
2308 }
2309 #endif
2310
2311 while (fwohci_buf_input_ppb(sc, fc, &pkt)) {
2312 chan = (pkt.fp_hdr[0] & 0x00003f00) >> 8;
2313 tag = (pkt.fp_hdr[0] & 0x0000c000) >> 14;
2314 DPRINTFN(1, ("fwohci_as_input: hdr 0x%08x, tcode 0x%0x, hlen %d"
2315 ", dlen %d\n", pkt.fp_hdr[0], pkt.fp_tcode, pkt.fp_hlen,
2316 pkt.fp_dlen));
2317 if (tag == IEEE1394_TAG_GASP &&
2318 fc->fc_type == FWOHCI_CTX_ISO_SINGLE) {
2319 /*
2320 * The pkt with tag=3 is GASP format.
2321 * Move GASP header to header part.
2322 */
2323 if (pkt.fp_dlen < 8)
2324 continue;
2325 iov = pkt.fp_iov;
2326 /* assuming pkt per buffer mode */
2327 pkt.fp_hdr[1] = ntohl(((u_int32_t *)iov->iov_base)[0]);
2328 pkt.fp_hdr[2] = ntohl(((u_int32_t *)iov->iov_base)[1]);
2329 iov->iov_base = (caddr_t)iov->iov_base + 8;
2330 iov->iov_len -= 8;
2331 pkt.fp_hlen += 8;
2332 pkt.fp_dlen -= 8;
2333 }
2334 for (fh = LIST_FIRST(&fc->fc_handler); fh != NULL;
2335 fh = LIST_NEXT(fh, fh_list)) {
2336 if (pkt.fp_tcode == fh->fh_tcode &&
2337 (chan == fh->fh_key1 ||
2338 fh->fh_key1 == IEEE1394_ISO_CHANNEL_ANY) &&
2339 ((1 << tag) & fh->fh_key2) != 0) {
2340 rcode = (*fh->fh_handler)(sc, fh->fh_handarg,
2341 &pkt);
2342 break;
2343 }
2344 }
2345 #ifdef FW_DEBUG
2346 if (fh == NULL) {
2347 DPRINTFN(1, ("fwohci_as_input: no handler\n"));
2348 } else {
2349 DPRINTFN(1, ("fwohci_as_input: rcode %d\n", rcode));
2350 }
2351 #endif
2352 }
2353 fwohci_buf_next(sc, fc);
2354
2355 if (fc->fc_type == FWOHCI_CTX_ISO_SINGLE) {
2356 OHCI_SYNC_RX_DMA_WRITE(sc, fc->fc_ctx,
2357 OHCI_SUBREG_ContextControlSet,
2358 OHCI_CTXCTL_WAKE);
2359 }
2360 }
2361
2362 /*
2363 * Asynchronous Transmit common routine.
2364 */
2365 static int
2366 fwohci_at_output(struct fwohci_softc *sc, struct fwohci_ctx *fc,
2367 struct fwohci_pkt *pkt)
2368 {
2369 struct fwohci_buf *fb;
2370 struct fwohci_desc *fd;
2371 struct mbuf *m, *m0;
2372 int i, ndesc, error, off, len;
2373 u_int32_t val;
2374 #ifdef FW_DEBUG
2375 struct iovec *iov;
2376 int tlabel = (pkt->fp_hdr[0] & 0x0000fc00) >> 10;
2377 #endif
2378
2379 if ((sc->sc_nodeid & OHCI_NodeId_NodeNumber) == IEEE1394_BCAST_PHY_ID)
2380 /* We can't send anything during selfid duration */
2381 return EAGAIN;
2382
2383 #ifdef FW_DEBUG
2384 DPRINTFN(1, ("fwohci_at_output: tcode 0x%x, tlabel 0x%x hlen %d, "
2385 "dlen %d", pkt->fp_tcode, tlabel, pkt->fp_hlen, pkt->fp_dlen));
2386 for (i = 0; i < pkt->fp_hlen/4; i++)
2387 DPRINTFN(2, ("%s%08x", i?" ":"\n ", pkt->fp_hdr[i]));
2388 DPRINTFN(2, ("$"));
2389 for (ndesc = 0, iov = pkt->fp_iov;
2390 ndesc < pkt->fp_uio.uio_iovcnt; ndesc++, iov++) {
2391 for (i = 0; i < iov->iov_len; i++)
2392 DPRINTFN(2, ("%s%02x", (i%32)?((i%4)?"":" "):"\n ",
2393 ((u_int8_t *)iov->iov_base)[i]));
2394 DPRINTFN(2, ("$"));
2395 }
2396 DPRINTFN(1, ("\n"));
2397 #endif
2398
2399 if ((m = pkt->fp_m) != NULL) {
2400 for (ndesc = 2; m != NULL; m = m->m_next)
2401 ndesc++;
2402 if (ndesc > OHCI_DESC_MAX) {
2403 m0 = NULL;
2404 ndesc = 2;
2405 for (off = 0; off < pkt->fp_dlen; off += len) {
2406 if (m0 == NULL) {
2407 MGETHDR(m0, M_DONTWAIT, MT_DATA);
2408 if (m0 != NULL)
2409 M_COPY_PKTHDR(m0, pkt->fp_m);
2410 m = m0;
2411 } else {
2412 MGET(m->m_next, M_DONTWAIT, MT_DATA);
2413 m = m->m_next;
2414 }
2415 if (m != NULL)
2416 MCLGET(m, M_DONTWAIT);
2417 if (m == NULL || (m->m_flags & M_EXT) == 0) {
2418 m_freem(m0);
2419 return ENOMEM;
2420 }
2421 len = pkt->fp_dlen - off;
2422 if (len > m->m_ext.ext_size)
2423 len = m->m_ext.ext_size;
2424 m_copydata(pkt->fp_m, off, len,
2425 mtod(m, caddr_t));
2426 m->m_len = len;
2427 ndesc++;
2428 }
2429 m_freem(pkt->fp_m);
2430 pkt->fp_m = m0;
2431 }
2432 } else
2433 ndesc = 2 + pkt->fp_uio.uio_iovcnt;
2434
2435 if (ndesc > OHCI_DESC_MAX)
2436 return ENOBUFS;
2437
2438 fb = malloc(sizeof(*fb), M_DEVBUF, M_WAITOK);
2439 if (ndesc > 2) {
2440 if ((error = bus_dmamap_create(sc->sc_dmat, pkt->fp_dlen,
2441 OHCI_DESC_MAX - 2, pkt->fp_dlen, 0, BUS_DMA_WAITOK,
2442 &fb->fb_dmamap)) != 0) {
2443 free(fb, M_DEVBUF);
2444 return error;
2445 }
2446
2447 if (pkt->fp_m != NULL)
2448 error = bus_dmamap_load_mbuf(sc->sc_dmat, fb->fb_dmamap,
2449 pkt->fp_m, BUS_DMA_WAITOK);
2450 else
2451 error = bus_dmamap_load_uio(sc->sc_dmat, fb->fb_dmamap,
2452 &pkt->fp_uio, BUS_DMA_WAITOK);
2453 if (error != 0) {
2454 DPRINTFN(1, ("Can't load DMA map: %d\n", error));
2455 bus_dmamap_destroy(sc->sc_dmat, fb->fb_dmamap);
2456 free(fb, M_DEVBUF);
2457 return error;
2458 }
2459 ndesc = fb->fb_dmamap->dm_nsegs + 2;
2460
2461 bus_dmamap_sync(sc->sc_dmat, fb->fb_dmamap, 0, pkt->fp_dlen,
2462 BUS_DMASYNC_PREWRITE);
2463 }
2464
2465 fb->fb_nseg = ndesc;
2466 fb->fb_desc = fwohci_desc_get(sc, ndesc);
2467 if (fb->fb_desc == NULL) {
2468 free(fb, M_DEVBUF);
2469 return ENOBUFS;
2470 }
2471 fb->fb_daddr = sc->sc_ddmamap->dm_segs[0].ds_addr +
2472 ((caddr_t)fb->fb_desc - (caddr_t)sc->sc_desc);
2473 fb->fb_m = pkt->fp_m;
2474 fb->fb_callback = pkt->fp_callback;
2475 fb->fb_statuscb = pkt->fp_statuscb;
2476 fb->fb_statusarg = pkt->fp_statusarg;
2477
2478 fd = fb->fb_desc;
2479 fd->fd_flags = OHCI_DESC_IMMED;
2480 fd->fd_reqcount = pkt->fp_hlen;
2481 fd->fd_data = 0;
2482 fd->fd_branch = 0;
2483 fd->fd_status = 0;
2484 if (fc->fc_ctx == OHCI_CTX_ASYNC_TX_RESPONSE) {
2485 i = 3; /* XXX: 3 sec */
2486 val = OHCI_CSR_READ(sc, OHCI_REG_IsochronousCycleTimer);
2487 fd->fd_timestamp = ((val >> 12) & 0x1fff) |
2488 ((((val >> 25) + i) & 0x7) << 13);
2489 } else
2490 fd->fd_timestamp = 0;
2491 memcpy(fd + 1, pkt->fp_hdr, pkt->fp_hlen);
2492 for (i = 0; i < ndesc - 2; i++) {
2493 fd = fb->fb_desc + 2 + i;
2494 fd->fd_flags = 0;
2495 fd->fd_reqcount = fb->fb_dmamap->dm_segs[i].ds_len;
2496 fd->fd_data = fb->fb_dmamap->dm_segs[i].ds_addr;
2497 fd->fd_branch = 0;
2498 fd->fd_status = 0;
2499 fd->fd_timestamp = 0;
2500 }
2501 fd->fd_flags |= OHCI_DESC_LAST | OHCI_DESC_BRANCH;
2502 fd->fd_flags |= OHCI_DESC_INTR_ALWAYS;
2503
2504 #ifdef FW_DEBUG
2505 DPRINTFN(1, ("fwohci_at_output: desc %ld",
2506 (long)(fb->fb_desc - sc->sc_desc)));
2507 for (i = 0; i < ndesc * 4; i++)
2508 DPRINTFN(2, ("%s%08x", i&7?" ":"\n ",
2509 ((u_int32_t *)fb->fb_desc)[i]));
2510 DPRINTFN(1, ("\n"));
2511 #endif
2512
2513 val = OHCI_ASYNC_DMA_READ(sc, fc->fc_ctx,
2514 OHCI_SUBREG_ContextControlClear);
2515
2516 if (val & OHCI_CTXCTL_RUN) {
2517 if (fc->fc_branch == NULL) {
2518 OHCI_ASYNC_DMA_WRITE(sc, fc->fc_ctx,
2519 OHCI_SUBREG_ContextControlClear, OHCI_CTXCTL_RUN);
2520 goto run;
2521 }
2522 *fc->fc_branch = fb->fb_daddr | ndesc;
2523 OHCI_ASYNC_DMA_WRITE(sc, fc->fc_ctx,
2524 OHCI_SUBREG_ContextControlSet, OHCI_CTXCTL_WAKE);
2525 } else {
2526 run:
2527 OHCI_ASYNC_DMA_WRITE(sc, fc->fc_ctx,
2528 OHCI_SUBREG_CommandPtr, fb->fb_daddr | ndesc);
2529 OHCI_ASYNC_DMA_WRITE(sc, fc->fc_ctx,
2530 OHCI_SUBREG_ContextControlSet, OHCI_CTXCTL_RUN);
2531 }
2532 fc->fc_branch = &fd->fd_branch;
2533
2534 fc->fc_bufcnt++;
2535 TAILQ_INSERT_TAIL(&fc->fc_buf, fb, fb_list);
2536 pkt->fp_m = NULL;
2537 return 0;
2538 }
2539
2540 static void
2541 fwohci_at_done(struct fwohci_softc *sc, struct fwohci_ctx *fc, int force)
2542 {
2543 struct fwohci_buf *fb;
2544 struct fwohci_desc *fd;
2545 struct fwohci_pkt pkt;
2546 int i;
2547
2548 while ((fb = TAILQ_FIRST(&fc->fc_buf)) != NULL) {
2549 fd = fb->fb_desc;
2550 #ifdef FW_DEBUG
2551 DPRINTFN(1, ("fwohci_at_done: %sdesc %ld (%d)",
2552 force ? "force " : "", (long)(fd - sc->sc_desc),
2553 fb->fb_nseg));
2554 for (i = 0; i < fb->fb_nseg * 4; i++)
2555 DPRINTFN(2, ("%s%08x", i&7?" ":"\n ",
2556 ((u_int32_t *)fd)[i]));
2557 DPRINTFN(1, ("\n"));
2558 #endif
2559 if (fb->fb_nseg > 2)
2560 fd += fb->fb_nseg - 1;
2561 if (!force && !(fd->fd_status & OHCI_CTXCTL_ACTIVE))
2562 break;
2563 TAILQ_REMOVE(&fc->fc_buf, fb, fb_list);
2564 if (fc->fc_branch == &fd->fd_branch) {
2565 OHCI_ASYNC_DMA_WRITE(sc, fc->fc_ctx,
2566 OHCI_SUBREG_ContextControlClear, OHCI_CTXCTL_RUN);
2567 fc->fc_branch = NULL;
2568 for (i = 0; i < OHCI_LOOP; i++) {
2569 if (!(OHCI_ASYNC_DMA_READ(sc, fc->fc_ctx,
2570 OHCI_SUBREG_ContextControlClear) &
2571 OHCI_CTXCTL_ACTIVE))
2572 break;
2573 DELAY(10);
2574 }
2575 }
2576
2577 if (fb->fb_statuscb) {
2578 memset(&pkt, 0, sizeof(pkt));
2579 pkt.fp_status = fd->fd_status;
2580 memcpy(pkt.fp_hdr, fd + 1, sizeof(pkt.fp_hdr[0]));
2581
2582 /* Indicate this is just returning the status bits. */
2583 pkt.fp_tcode = -1;
2584 (*fb->fb_statuscb)(sc, fb->fb_statusarg, &pkt);
2585 fb->fb_statuscb = NULL;
2586 fb->fb_statusarg = NULL;
2587 }
2588 fwohci_desc_put(sc, fb->fb_desc, fb->fb_nseg);
2589 if (fb->fb_nseg > 2)
2590 bus_dmamap_destroy(sc->sc_dmat, fb->fb_dmamap);
2591 fc->fc_bufcnt--;
2592 if (fb->fb_callback) {
2593 (*fb->fb_callback)(sc->sc_sc1394.sc1394_if, fb->fb_m);
2594 fb->fb_callback = NULL;
2595 } else if (fb->fb_m != NULL)
2596 m_freem(fb->fb_m);
2597 free(fb, M_DEVBUF);
2598 }
2599 }
2600
2601 /*
2602 * Asynchronous Transmit Response -- in response of request packet.
2603 */
2604 static void
2605 fwohci_atrs_output(struct fwohci_softc *sc, int rcode, struct fwohci_pkt *req,
2606 struct fwohci_pkt *res)
2607 {
2608
2609 if (((*req->fp_trail & 0x001f0000) >> 16) !=
2610 OHCI_CTXCTL_EVENT_ACK_PENDING)
2611 return;
2612
2613 res->fp_hdr[0] = (req->fp_hdr[0] & 0x0000fc00) | 0x00000100;
2614 res->fp_hdr[1] = (req->fp_hdr[1] & 0xffff0000) | (rcode << 12);
2615 switch (req->fp_tcode) {
2616 case IEEE1394_TCODE_WRITE_REQ_QUAD:
2617 case IEEE1394_TCODE_WRITE_REQ_BLOCK:
2618 res->fp_tcode = IEEE1394_TCODE_WRITE_RESP;
2619 res->fp_hlen = 12;
2620 break;
2621 case IEEE1394_TCODE_READ_REQ_QUAD:
2622 res->fp_tcode = IEEE1394_TCODE_READ_RESP_QUAD;
2623 res->fp_hlen = 16;
2624 res->fp_dlen = 0;
2625 if (res->fp_uio.uio_iovcnt == 1 && res->fp_iov[0].iov_len == 4)
2626 res->fp_hdr[3] =
2627 *(u_int32_t *)res->fp_iov[0].iov_base;
2628 res->fp_uio.uio_iovcnt = 0;
2629 break;
2630 case IEEE1394_TCODE_READ_REQ_BLOCK:
2631 case IEEE1394_TCODE_LOCK_REQ:
2632 if (req->fp_tcode == IEEE1394_TCODE_LOCK_REQ)
2633 res->fp_tcode = IEEE1394_TCODE_LOCK_RESP;
2634 else
2635 res->fp_tcode = IEEE1394_TCODE_READ_RESP_BLOCK;
2636 res->fp_hlen = 16;
2637 res->fp_dlen = res->fp_uio.uio_resid;
2638 res->fp_hdr[3] = res->fp_dlen << 16;
2639 break;
2640 }
2641 res->fp_hdr[0] |= (res->fp_tcode << 4);
2642 fwohci_at_output(sc, sc->sc_ctx_atrs, res);
2643 }
2644
2645 /*
2646 * APPLICATION LAYER SERVICES
2647 */
2648
2649 /*
2650 * Retrieve Global UID from GUID ROM
2651 */
2652 static int
2653 fwohci_guidrom_init(struct fwohci_softc *sc)
2654 {
2655 int i, n, off;
2656 u_int32_t val1, val2;
2657
2658 /* Extract the Global UID
2659 */
2660 val1 = OHCI_CSR_READ(sc, OHCI_REG_GUIDHi);
2661 val2 = OHCI_CSR_READ(sc, OHCI_REG_GUIDLo);
2662
2663 if (val1 != 0 || val2 != 0) {
2664 sc->sc_sc1394.sc1394_guid[0] = (val1 >> 24) & 0xff;
2665 sc->sc_sc1394.sc1394_guid[1] = (val1 >> 16) & 0xff;
2666 sc->sc_sc1394.sc1394_guid[2] = (val1 >> 8) & 0xff;
2667 sc->sc_sc1394.sc1394_guid[3] = (val1 >> 0) & 0xff;
2668 sc->sc_sc1394.sc1394_guid[4] = (val2 >> 24) & 0xff;
2669 sc->sc_sc1394.sc1394_guid[5] = (val2 >> 16) & 0xff;
2670 sc->sc_sc1394.sc1394_guid[6] = (val2 >> 8) & 0xff;
2671 sc->sc_sc1394.sc1394_guid[7] = (val2 >> 0) & 0xff;
2672 } else {
2673 val1 = OHCI_CSR_READ(sc, OHCI_REG_Version);
2674 if ((val1 & OHCI_Version_GUID_ROM) == 0)
2675 return -1;
2676 OHCI_CSR_WRITE(sc, OHCI_REG_Guid_Rom, OHCI_Guid_AddrReset);
2677 for (i = 0; i < OHCI_LOOP; i++) {
2678 val1 = OHCI_CSR_READ(sc, OHCI_REG_Guid_Rom);
2679 if (!(val1 & OHCI_Guid_AddrReset))
2680 break;
2681 DELAY(10);
2682 }
2683 off = OHCI_BITVAL(val1, OHCI_Guid_MiniROM) + 4;
2684 val2 = 0;
2685 for (n = 0; n < off + sizeof(sc->sc_sc1394.sc1394_guid); n++) {
2686 OHCI_CSR_WRITE(sc, OHCI_REG_Guid_Rom,
2687 OHCI_Guid_RdStart);
2688 for (i = 0; i < OHCI_LOOP; i++) {
2689 val1 = OHCI_CSR_READ(sc, OHCI_REG_Guid_Rom);
2690 if (!(val1 & OHCI_Guid_RdStart))
2691 break;
2692 DELAY(10);
2693 }
2694 if (n < off)
2695 continue;
2696 val1 = OHCI_BITVAL(val1, OHCI_Guid_RdData);
2697 sc->sc_sc1394.sc1394_guid[n - off] = val1;
2698 val2 |= val1;
2699 }
2700 if (val2 == 0)
2701 return -1;
2702 }
2703 return 0;
2704 }
2705
2706 /*
2707 * Initialization for Configuration ROM (no DMA context)
2708 */
2709
2710 #define CFR_MAXUNIT 20
2711
2712 struct configromctx {
2713 u_int32_t *ptr;
2714 int curunit;
2715 struct {
2716 u_int32_t *start;
2717 int length;
2718 u_int32_t *refer;
2719 int refunit;
2720 } unit[CFR_MAXUNIT];
2721 };
2722
2723 #define CFR_PUT_DATA4(cfr, d1, d2, d3, d4) \
2724 (*(cfr)->ptr++ = (((d1)<<24) | ((d2)<<16) | ((d3)<<8) | (d4)))
2725
2726 #define CFR_PUT_DATA1(cfr, d) (*(cfr)->ptr++ = (d))
2727
2728 #define CFR_PUT_VALUE(cfr, key, d) (*(cfr)->ptr++ = ((key)<<24) | (d))
2729
2730 #define CFR_PUT_CRC(cfr, n) \
2731 (*(cfr)->unit[n].start = ((cfr)->unit[n].length << 16) | \
2732 fwohci_crc16((cfr)->unit[n].start + 1, (cfr)->unit[n].length))
2733
2734 #define CFR_START_UNIT(cfr, n) \
2735 do { \
2736 if ((cfr)->unit[n].refer != NULL) { \
2737 *(cfr)->unit[n].refer |= \
2738 (cfr)->ptr - (cfr)->unit[n].refer; \
2739 CFR_PUT_CRC(cfr, (cfr)->unit[n].refunit); \
2740 } \
2741 (cfr)->curunit = (n); \
2742 (cfr)->unit[n].start = (cfr)->ptr++; \
2743 } while (0 /* CONSTCOND */)
2744
2745 #define CFR_PUT_REFER(cfr, key, n) \
2746 do { \
2747 (cfr)->unit[n].refer = (cfr)->ptr; \
2748 (cfr)->unit[n].refunit = (cfr)->curunit; \
2749 *(cfr)->ptr++ = (key) << 24; \
2750 } while (0 /* CONSTCOND */)
2751
2752 #define CFR_END_UNIT(cfr) \
2753 do { \
2754 (cfr)->unit[(cfr)->curunit].length = (cfr)->ptr - \
2755 ((cfr)->unit[(cfr)->curunit].start + 1); \
2756 CFR_PUT_CRC(cfr, (cfr)->curunit); \
2757 } while (0 /* CONSTCOND */)
2758
2759 static u_int16_t
2760 fwohci_crc16(u_int32_t *ptr, int len)
2761 {
2762 int shift;
2763 u_int32_t crc, sum, data;
2764
2765 crc = 0;
2766 while (len-- > 0) {
2767 data = *ptr++;
2768 for (shift = 28; shift >= 0; shift -= 4) {
2769 sum = ((crc >> 12) ^ (data >> shift)) & 0x000f;
2770 crc = (crc << 4) ^ (sum << 12) ^ (sum << 5) ^ sum;
2771 }
2772 crc &= 0xffff;
2773 }
2774 return crc;
2775 }
2776
2777 static void
2778 fwohci_configrom_init(struct fwohci_softc *sc)
2779 {
2780 int i, val;
2781 struct fwohci_buf *fb;
2782 u_int32_t *hdr;
2783 struct configromctx cfr;
2784
2785 fb = &sc->sc_buf_cnfrom;
2786 memset(&cfr, 0, sizeof(cfr));
2787 cfr.ptr = hdr = (u_int32_t *)fb->fb_buf;
2788
2789 /* headers */
2790 CFR_START_UNIT(&cfr, 0);
2791 CFR_PUT_DATA1(&cfr, OHCI_CSR_READ(sc, OHCI_REG_BusId));
2792 CFR_PUT_DATA1(&cfr, OHCI_CSR_READ(sc, OHCI_REG_BusOptions));
2793 CFR_PUT_DATA1(&cfr, OHCI_CSR_READ(sc, OHCI_REG_GUIDHi));
2794 CFR_PUT_DATA1(&cfr, OHCI_CSR_READ(sc, OHCI_REG_GUIDLo));
2795 CFR_END_UNIT(&cfr);
2796 /* copy info_length from crc_length */
2797 *hdr |= (*hdr & 0x00ff0000) << 8;
2798 OHCI_CSR_WRITE(sc, OHCI_REG_ConfigROMhdr, *hdr);
2799
2800 /* root directory */
2801 CFR_START_UNIT(&cfr, 1);
2802 CFR_PUT_VALUE(&cfr, 0x03, 0x00005e); /* vendor id */
2803 CFR_PUT_REFER(&cfr, 0x81, 2); /* textual descriptor offset */
2804 CFR_PUT_VALUE(&cfr, 0x0c, 0x0083c0); /* node capability */
2805 /* spt,64,fix,lst,drq */
2806 #ifdef INET
2807 CFR_PUT_REFER(&cfr, 0xd1, 3); /* IPv4 unit directory */
2808 #endif /* INET */
2809 #ifdef INET6
2810 CFR_PUT_REFER(&cfr, 0xd1, 4); /* IPv6 unit directory */
2811 #endif /* INET6 */
2812 CFR_END_UNIT(&cfr);
2813
2814 CFR_START_UNIT(&cfr, 2);
2815 CFR_PUT_VALUE(&cfr, 0, 0); /* textual descriptor */
2816 CFR_PUT_DATA1(&cfr, 0); /* minimal ASCII */
2817 CFR_PUT_DATA4(&cfr, 'N', 'e', 't', 'B');
2818 CFR_PUT_DATA4(&cfr, 'S', 'D', 0x00, 0x00);
2819 CFR_END_UNIT(&cfr);
2820
2821 #ifdef INET
2822 /* IPv4 unit directory */
2823 CFR_START_UNIT(&cfr, 3);
2824 CFR_PUT_VALUE(&cfr, 0x12, 0x00005e); /* unit spec id */
2825 CFR_PUT_REFER(&cfr, 0x81, 6); /* textual descriptor offset */
2826 CFR_PUT_VALUE(&cfr, 0x13, 0x000001); /* unit sw version */
2827 CFR_PUT_REFER(&cfr, 0x81, 7); /* textual descriptor offset */
2828 CFR_PUT_REFER(&cfr, 0x95, 8); /* Unit location */
2829 CFR_END_UNIT(&cfr);
2830
2831 CFR_START_UNIT(&cfr, 6);
2832 CFR_PUT_VALUE(&cfr, 0, 0); /* textual descriptor */
2833 CFR_PUT_DATA1(&cfr, 0); /* minimal ASCII */
2834 CFR_PUT_DATA4(&cfr, 'I', 'A', 'N', 'A');
2835 CFR_END_UNIT(&cfr);
2836
2837 CFR_START_UNIT(&cfr, 7);
2838 CFR_PUT_VALUE(&cfr, 0, 0); /* textual descriptor */
2839 CFR_PUT_DATA1(&cfr, 0); /* minimal ASCII */
2840 CFR_PUT_DATA4(&cfr, 'I', 'P', 'v', '4');
2841 CFR_END_UNIT(&cfr);
2842
2843 CFR_START_UNIT(&cfr, 8); /* Spec's valid addr range. */
2844 CFR_PUT_DATA1(&cfr, FW_FIFO_HI);
2845 CFR_PUT_DATA1(&cfr, (FW_FIFO_LO | 0x1));
2846 CFR_PUT_DATA1(&cfr, FW_FIFO_HI);
2847 CFR_PUT_DATA1(&cfr, FW_FIFO_LO);
2848 CFR_END_UNIT(&cfr);
2849
2850 #endif /* INET */
2851
2852 #ifdef INET6
2853 /* IPv6 unit directory */
2854 CFR_START_UNIT(&cfr, 4);
2855 CFR_PUT_VALUE(&cfr, 0x12, 0x00005e); /* unit spec id */
2856 CFR_PUT_REFER(&cfr, 0x81, 9); /* textual descriptor offset */
2857 CFR_PUT_VALUE(&cfr, 0x13, 0x000002); /* unit sw version */
2858 /* XXX: TBA by IANA */
2859 CFR_PUT_REFER(&cfr, 0x81, 10); /* textual descriptor offset */
2860 CFR_PUT_REFER(&cfr, 0x95, 11); /* Unit location */
2861 CFR_END_UNIT(&cfr);
2862
2863 CFR_START_UNIT(&cfr, 9);
2864 CFR_PUT_VALUE(&cfr, 0, 0); /* textual descriptor */
2865 CFR_PUT_DATA1(&cfr, 0); /* minimal ASCII */
2866 CFR_PUT_DATA4(&cfr, 'I', 'A', 'N', 'A');
2867 CFR_END_UNIT(&cfr);
2868
2869 CFR_START_UNIT(&cfr, 10);
2870 CFR_PUT_VALUE(&cfr, 0, 0); /* textual descriptor */
2871 CFR_PUT_DATA1(&cfr, 0);
2872 CFR_PUT_DATA4(&cfr, 'I', 'P', 'v', '6');
2873 CFR_END_UNIT(&cfr);
2874
2875 CFR_START_UNIT(&cfr, 11); /* Spec's valid addr range. */
2876 CFR_PUT_DATA1(&cfr, FW_FIFO_HI);
2877 CFR_PUT_DATA1(&cfr, (FW_FIFO_LO | 0x1));
2878 CFR_PUT_DATA1(&cfr, FW_FIFO_HI);
2879 CFR_PUT_DATA1(&cfr, FW_FIFO_LO);
2880 CFR_END_UNIT(&cfr);
2881
2882 #endif /* INET6 */
2883
2884 fb->fb_off = cfr.ptr - hdr;
2885 #ifdef FW_DEBUG
2886 DPRINTF(("%s: Config ROM:", sc->sc_sc1394.sc1394_dev.dv_xname));
2887 for (i = 0; i < fb->fb_off; i++)
2888 DPRINTF(("%s%08x", i&7?" ":"\n ", hdr[i]));
2889 DPRINTF(("\n"));
2890 #endif /* FW_DEBUG */
2891
2892 /*
2893 * Make network byte order for DMA
2894 */
2895 for (i = 0; i < fb->fb_off; i++)
2896 HTONL(hdr[i]);
2897 bus_dmamap_sync(sc->sc_dmat, fb->fb_dmamap, 0,
2898 (caddr_t)cfr.ptr - fb->fb_buf, BUS_DMASYNC_PREWRITE);
2899
2900 OHCI_CSR_WRITE(sc, OHCI_REG_ConfigROMmap,
2901 fb->fb_dmamap->dm_segs[0].ds_addr);
2902
2903 /* This register is only valid on OHCI 1.1. */
2904 val = OHCI_CSR_READ(sc, OHCI_REG_Version);
2905 if ((OHCI_Version_GET_Version(val) == 1) &&
2906 (OHCI_Version_GET_Revision(val) == 1))
2907 OHCI_CSR_WRITE(sc, OHCI_REG_HCControlSet,
2908 OHCI_HCControl_BIBImageValid);
2909
2910 /* Only allow quad reads of the rom. */
2911 for (i = 0; i < fb->fb_off; i++)
2912 fwohci_handler_set(sc, IEEE1394_TCODE_READ_REQ_QUAD,
2913 CSR_BASE_HI, CSR_BASE_LO + CSR_CONFIG_ROM + (i * 4), 0,
2914 fwohci_configrom_input, NULL);
2915 }
2916
2917 static int
2918 fwohci_configrom_input(struct fwohci_softc *sc, void *arg,
2919 struct fwohci_pkt *pkt)
2920 {
2921 struct fwohci_pkt res;
2922 u_int32_t loc, *rom;
2923
2924 /* This will be used as an array index so size accordingly. */
2925 loc = pkt->fp_hdr[2] - (CSR_BASE_LO + CSR_CONFIG_ROM);
2926 if ((loc & 0x03) != 0) {
2927 /* alignment error */
2928 return IEEE1394_RCODE_ADDRESS_ERROR;
2929 }
2930 else
2931 loc /= 4;
2932 rom = (u_int32_t *)sc->sc_buf_cnfrom.fb_buf;
2933
2934 DPRINTFN(1, ("fwohci_configrom_input: ConfigRom[0x%04x]: 0x%08x\n", loc,
2935 ntohl(rom[loc])));
2936
2937 memset(&res, 0, sizeof(res));
2938 res.fp_hdr[3] = rom[loc];
2939 fwohci_atrs_output(sc, IEEE1394_RCODE_COMPLETE, pkt, &res);
2940 return -1;
2941 }
2942
2943 /*
2944 * SelfID buffer (no DMA context)
2945 */
2946 static void
2947 fwohci_selfid_init(struct fwohci_softc *sc)
2948 {
2949 struct fwohci_buf *fb;
2950
2951 fb = &sc->sc_buf_selfid;
2952 #ifdef DIAGNOSTIC
2953 if ((fb->fb_dmamap->dm_segs[0].ds_addr & 0x7ff) != 0)
2954 panic("fwohci_selfid_init: not aligned: %ld (%ld) %p",
2955 (unsigned long)fb->fb_dmamap->dm_segs[0].ds_addr,
2956 (unsigned long)fb->fb_dmamap->dm_segs[0].ds_len, fb->fb_buf);
2957 #endif
2958 memset(fb->fb_buf, 0, fb->fb_dmamap->dm_segs[0].ds_len);
2959 bus_dmamap_sync(sc->sc_dmat, fb->fb_dmamap, 0,
2960 fb->fb_dmamap->dm_segs[0].ds_len, BUS_DMASYNC_PREREAD);
2961
2962 OHCI_CSR_WRITE(sc, OHCI_REG_SelfIDBuffer,
2963 fb->fb_dmamap->dm_segs[0].ds_addr);
2964 }
2965
2966 static int
2967 fwohci_selfid_input(struct fwohci_softc *sc)
2968 {
2969 int i;
2970 u_int32_t count, val, gen;
2971 u_int32_t *buf;
2972
2973 buf = (u_int32_t *)sc->sc_buf_selfid.fb_buf;
2974 val = OHCI_CSR_READ(sc, OHCI_REG_SelfIDCount);
2975 again:
2976 if (val & OHCI_SelfID_Error) {
2977 printf("%s: SelfID Error\n", sc->sc_sc1394.sc1394_dev.dv_xname);
2978 return -1;
2979 }
2980 count = OHCI_BITVAL(val, OHCI_SelfID_Size);
2981
2982 bus_dmamap_sync(sc->sc_dmat, sc->sc_buf_selfid.fb_dmamap,
2983 0, count << 2, BUS_DMASYNC_POSTREAD);
2984 gen = OHCI_BITVAL(buf[0], OHCI_SelfID_Gen);
2985
2986 #ifdef FW_DEBUG
2987 DPRINTFN(1, ("%s: SelfID: 0x%08x", sc->sc_sc1394.sc1394_dev.dv_xname,
2988 val));
2989 for (i = 0; i < count; i++)
2990 DPRINTFN(2, ("%s%08x", i&7?" ":"\n ", buf[i]));
2991 DPRINTFN(1, ("\n"));
2992 #endif /* FW_DEBUG */
2993
2994 for (i = 1; i < count; i += 2) {
2995 if (buf[i] != ~buf[i + 1])
2996 break;
2997 if (buf[i] & 0x00000001)
2998 continue; /* more pkt */
2999 if (buf[i] & 0x00800000)
3000 continue; /* external id */
3001 sc->sc_rootid = (buf[i] & 0x3f000000) >> 24;
3002 if ((buf[i] & 0x00400800) == 0x00400800)
3003 sc->sc_irmid = sc->sc_rootid;
3004 }
3005
3006 val = OHCI_CSR_READ(sc, OHCI_REG_SelfIDCount);
3007 if (OHCI_BITVAL(val, OHCI_SelfID_Gen) != gen) {
3008 if (OHCI_BITVAL(val, OHCI_SelfID_Gen) !=
3009 OHCI_BITVAL(buf[0], OHCI_SelfID_Gen))
3010 goto again;
3011 DPRINTF(("%s: SelfID Gen mismatch (%d, %d)\n",
3012 sc->sc_sc1394.sc1394_dev.dv_xname, gen,
3013 OHCI_BITVAL(val, OHCI_SelfID_Gen)));
3014 return -1;
3015 }
3016 if (i != count) {
3017 printf("%s: SelfID corrupted (%d, 0x%08x, 0x%08x)\n",
3018 sc->sc_sc1394.sc1394_dev.dv_xname, i, buf[i], buf[i + 1]);
3019 #if 1
3020 if (i == 1 && buf[i] == 0 && buf[i + 1] == 0) {
3021 /*
3022 * XXX: CXD3222 sometimes fails to DMA
3023 * selfid packet??
3024 */
3025 sc->sc_rootid = (count - 1) / 2 - 1;
3026 sc->sc_irmid = sc->sc_rootid;
3027 } else
3028 #endif
3029 return -1;
3030 }
3031
3032 val = OHCI_CSR_READ(sc, OHCI_REG_NodeId);
3033 if ((val & OHCI_NodeId_IDValid) == 0) {
3034 sc->sc_nodeid = 0xffff; /* invalid */
3035 printf("%s: nodeid is invalid\n",
3036 sc->sc_sc1394.sc1394_dev.dv_xname);
3037 return -1;
3038 }
3039 sc->sc_nodeid = val & 0xffff;
3040 sc->sc_sc1394.sc1394_node_id = sc->sc_nodeid & OHCI_NodeId_NodeNumber;
3041
3042 DPRINTF(("%s: nodeid=0x%04x(%d), rootid=%d, irmid=%d\n",
3043 sc->sc_sc1394.sc1394_dev.dv_xname, sc->sc_nodeid,
3044 sc->sc_nodeid & OHCI_NodeId_NodeNumber, sc->sc_rootid,
3045 sc->sc_irmid));
3046
3047 if ((sc->sc_nodeid & OHCI_NodeId_NodeNumber) > sc->sc_rootid)
3048 return -1;
3049
3050 if ((sc->sc_nodeid & OHCI_NodeId_NodeNumber) == sc->sc_rootid)
3051 OHCI_CSR_WRITE(sc, OHCI_REG_LinkControlSet,
3052 OHCI_LinkControl_CycleMaster);
3053 else
3054 OHCI_CSR_WRITE(sc, OHCI_REG_LinkControlClear,
3055 OHCI_LinkControl_CycleMaster);
3056 return 0;
3057 }
3058
3059 /*
3060 * some CSRs are handled by driver.
3061 */
3062 static void
3063 fwohci_csr_init(struct fwohci_softc *sc)
3064 {
3065 int i;
3066 static u_int32_t csr[] = {
3067 CSR_STATE_CLEAR, CSR_STATE_SET, CSR_SB_CYCLE_TIME,
3068 CSR_SB_BUS_TIME, CSR_SB_BUSY_TIMEOUT, CSR_SB_BUS_MANAGER_ID,
3069 CSR_SB_CHANNEL_AVAILABLE_HI, CSR_SB_CHANNEL_AVAILABLE_LO,
3070 CSR_SB_BROADCAST_CHANNEL
3071 };
3072
3073 for (i = 0; i < sizeof(csr) / sizeof(csr[0]); i++) {
3074 fwohci_handler_set(sc, IEEE1394_TCODE_WRITE_REQ_QUAD,
3075 CSR_BASE_HI, CSR_BASE_LO + csr[i], 0, fwohci_csr_input,
3076 NULL);
3077 fwohci_handler_set(sc, IEEE1394_TCODE_READ_REQ_QUAD,
3078 CSR_BASE_HI, CSR_BASE_LO + csr[i], 0, fwohci_csr_input,
3079 NULL);
3080 }
3081 sc->sc_csr[CSR_SB_BROADCAST_CHANNEL] = 31; /*XXX*/
3082 }
3083
3084 static int
3085 fwohci_csr_input(struct fwohci_softc *sc, void *arg, struct fwohci_pkt *pkt)
3086 {
3087 struct fwohci_pkt res;
3088 u_int32_t reg;
3089
3090 /*
3091 * XXX need to do special functionality other than just r/w...
3092 */
3093 reg = pkt->fp_hdr[2] - CSR_BASE_LO;
3094
3095 if ((reg & 0x03) != 0) {
3096 /* alignment error */
3097 return IEEE1394_RCODE_ADDRESS_ERROR;
3098 }
3099 DPRINTFN(1, ("fwohci_csr_input: CSR[0x%04x]: 0x%08x", reg,
3100 *(u_int32_t *)(&sc->sc_csr[reg])));
3101 if (pkt->fp_tcode == IEEE1394_TCODE_WRITE_REQ_QUAD) {
3102 DPRINTFN(1, (" -> 0x%08x\n",
3103 ntohl(*(u_int32_t *)pkt->fp_iov[0].iov_base)));
3104 *(u_int32_t *)&sc->sc_csr[reg] =
3105 ntohl(*(u_int32_t *)pkt->fp_iov[0].iov_base);
3106 } else {
3107 DPRINTFN(1, ("\n"));
3108 res.fp_hdr[3] = htonl(*(u_int32_t *)&sc->sc_csr[reg]);
3109 res.fp_iov[0].iov_base = &res.fp_hdr[3];
3110 res.fp_iov[0].iov_len = 4;
3111 res.fp_uio.uio_resid = 4;
3112 res.fp_uio.uio_iovcnt = 1;
3113 fwohci_atrs_output(sc, IEEE1394_RCODE_COMPLETE, pkt, &res);
3114 return -1;
3115 }
3116 return IEEE1394_RCODE_COMPLETE;
3117 }
3118
3119 /*
3120 * Mapping between nodeid and unique ID (EUI-64).
3121 *
3122 * Track old mappings and simply update their devices with the new id's when
3123 * they match an existing EUI. This allows proper renumeration of the bus.
3124 */
3125 static void
3126 fwohci_uid_collect(struct fwohci_softc *sc)
3127 {
3128 int i;
3129 struct fwohci_uidtbl *fu;
3130 struct ieee1394_softc *iea;
3131
3132 LIST_FOREACH(iea, &sc->sc_nodelist, sc1394_node)
3133 iea->sc1394_node_id = 0xffff;
3134
3135 if (sc->sc_uidtbl != NULL)
3136 free(sc->sc_uidtbl, M_DEVBUF);
3137 sc->sc_uidtbl = malloc(sizeof(*fu) * (sc->sc_rootid + 1), M_DEVBUF,
3138 M_NOWAIT|M_ZERO); /* XXX M_WAITOK requires locks */
3139 if (sc->sc_uidtbl == NULL)
3140 return;
3141
3142 for (i = 0, fu = sc->sc_uidtbl; i <= sc->sc_rootid; i++, fu++) {
3143 if (i == (sc->sc_nodeid & OHCI_NodeId_NodeNumber)) {
3144 memcpy(fu->fu_uid, sc->sc_sc1394.sc1394_guid, 8);
3145 fu->fu_valid = 3;
3146
3147 iea = (struct ieee1394_softc *)sc->sc_sc1394.sc1394_if;
3148 if (iea) {
3149 iea->sc1394_node_id = i;
3150 DPRINTF(("%s: Updating nodeid to %d\n",
3151 iea->sc1394_dev.dv_xname,
3152 iea->sc1394_node_id));
3153 }
3154 } else {
3155 fu->fu_valid = 0;
3156 fwohci_uid_req(sc, i);
3157 }
3158 }
3159 if (sc->sc_rootid == 0)
3160 fwohci_check_nodes(sc);
3161 }
3162
3163 static void
3164 fwohci_uid_req(struct fwohci_softc *sc, int phyid)
3165 {
3166 struct fwohci_pkt pkt;
3167
3168 memset(&pkt, 0, sizeof(pkt));
3169 pkt.fp_tcode = IEEE1394_TCODE_READ_REQ_QUAD;
3170 pkt.fp_hlen = 12;
3171 pkt.fp_dlen = 0;
3172 pkt.fp_hdr[0] = 0x00000100 | (sc->sc_tlabel << 10) |
3173 (pkt.fp_tcode << 4);
3174 pkt.fp_hdr[1] = ((0xffc0 | phyid) << 16) | CSR_BASE_HI;
3175 pkt.fp_hdr[2] = CSR_BASE_LO + CSR_CONFIG_ROM + 12;
3176 fwohci_handler_set(sc, IEEE1394_TCODE_READ_RESP_QUAD, phyid,
3177 sc->sc_tlabel, 0, fwohci_uid_input, (void *)0);
3178 sc->sc_tlabel = (sc->sc_tlabel + 1) & 0x3f;
3179 fwohci_at_output(sc, sc->sc_ctx_atrq, &pkt);
3180
3181 pkt.fp_hdr[0] = 0x00000100 | (sc->sc_tlabel << 10) |
3182 (pkt.fp_tcode << 4);
3183 pkt.fp_hdr[2] = CSR_BASE_LO + CSR_CONFIG_ROM + 16;
3184 fwohci_handler_set(sc, IEEE1394_TCODE_READ_RESP_QUAD, phyid,
3185 sc->sc_tlabel, 0, fwohci_uid_input, (void *)1);
3186 sc->sc_tlabel = (sc->sc_tlabel + 1) & 0x3f;
3187 fwohci_at_output(sc, sc->sc_ctx_atrq, &pkt);
3188 }
3189
3190 static int
3191 fwohci_uid_input(struct fwohci_softc *sc, void *arg, struct fwohci_pkt *res)
3192 {
3193 struct fwohci_uidtbl *fu;
3194 struct ieee1394_softc *iea;
3195 struct ieee1394_attach_args fwa;
3196 int i, n, done, rcode, found;
3197
3198 found = 0;
3199
3200 n = (res->fp_hdr[1] >> 16) & OHCI_NodeId_NodeNumber;
3201 rcode = (res->fp_hdr[1] & 0x0000f000) >> 12;
3202 if (rcode != IEEE1394_RCODE_COMPLETE ||
3203 sc->sc_uidtbl == NULL ||
3204 n > sc->sc_rootid)
3205 return 0;
3206 fu = &sc->sc_uidtbl[n];
3207 if (arg == 0) {
3208 memcpy(fu->fu_uid, res->fp_iov[0].iov_base, 4);
3209 fu->fu_valid |= 0x1;
3210 } else {
3211 memcpy(fu->fu_uid + 4, res->fp_iov[0].iov_base, 4);
3212 fu->fu_valid |= 0x2;
3213 }
3214 #ifdef FW_DEBUG
3215 if (fu->fu_valid == 0x3)
3216 DPRINTFN(1, ("fwohci_uid_input: "
3217 "Node %d, UID %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n", n,
3218 fu->fu_uid[0], fu->fu_uid[1], fu->fu_uid[2], fu->fu_uid[3],
3219 fu->fu_uid[4], fu->fu_uid[5], fu->fu_uid[6], fu->fu_uid[7]));
3220 #endif
3221 if (fu->fu_valid == 0x3) {
3222 LIST_FOREACH(iea, &sc->sc_nodelist, sc1394_node)
3223 if (memcmp(iea->sc1394_guid, fu->fu_uid, 8) == 0) {
3224 found = 1;
3225 iea->sc1394_node_id = n;
3226 DPRINTF(("%s: Updating nodeid to %d\n",
3227 iea->sc1394_dev.dv_xname,
3228 iea->sc1394_node_id));
3229 if (iea->sc1394_callback.sc1394_reset)
3230 iea->sc1394_callback.sc1394_reset(iea,
3231 iea->sc1394_callback.sc1394_resetarg);
3232 break;
3233 }
3234 if (!found) {
3235 strcpy(fwa.name, "fwnode");
3236 memcpy(fwa.uid, fu->fu_uid, 8);
3237 fwa.nodeid = n;
3238 iea = (struct ieee1394_softc *)
3239 config_found_sm_loc(&sc->sc_sc1394.sc1394_dev,
3240 "fwbus", NULL, &fwa,
3241 fwohci_print, fwohci_submatch);
3242 if (iea != NULL)
3243 LIST_INSERT_HEAD(&sc->sc_nodelist, iea,
3244 sc1394_node);
3245 }
3246 }
3247 done = 1;
3248
3249 for (i = 0; i < sc->sc_rootid + 1; i++) {
3250 fu = &sc->sc_uidtbl[i];
3251 if (fu->fu_valid != 0x3) {
3252 done = 0;
3253 break;
3254 }
3255 }
3256 if (done)
3257 fwohci_check_nodes(sc);
3258
3259 return 0;
3260 }
3261
3262 static void
3263 fwohci_check_nodes(struct fwohci_softc *sc)
3264 {
3265 struct device *detach = NULL;
3266 struct ieee1394_softc *iea;
3267
3268 LIST_FOREACH(iea, &sc->sc_nodelist, sc1394_node) {
3269
3270 /*
3271 * Have to defer detachment until the next
3272 * loop iteration since config_detach
3273 * free's the softc and the loop iterator
3274 * needs data from the softc to move
3275 * forward.
3276 */
3277
3278 if (detach) {
3279 config_detach(detach, 0);
3280 detach = NULL;
3281 }
3282 if (iea->sc1394_node_id == 0xffff) {
3283 detach = (struct device *)iea;
3284 LIST_REMOVE(iea, sc1394_node);
3285 }
3286 }
3287 if (detach)
3288 config_detach(detach, 0);
3289 }
3290
3291 static int
3292 fwohci_uid_lookup(struct fwohci_softc *sc, const u_int8_t *uid)
3293 {
3294 struct fwohci_uidtbl *fu;
3295 int n;
3296 static const u_int8_t bcast[] =
3297 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
3298
3299 fu = sc->sc_uidtbl;
3300 if (fu == NULL) {
3301 if (memcmp(uid, bcast, sizeof(bcast)) == 0)
3302 return IEEE1394_BCAST_PHY_ID;
3303 fwohci_uid_collect(sc); /* try to get */
3304 return -1;
3305 }
3306 for (n = 0; n <= sc->sc_rootid; n++, fu++) {
3307 if (fu->fu_valid == 0x3 && memcmp(fu->fu_uid, uid, 8) == 0)
3308 return n;
3309 }
3310 if (memcmp(uid, bcast, sizeof(bcast)) == 0)
3311 return IEEE1394_BCAST_PHY_ID;
3312 for (n = 0, fu = sc->sc_uidtbl; n <= sc->sc_rootid; n++, fu++) {
3313 if (fu->fu_valid != 0x3) {
3314 /*
3315 * XXX: need timer before retransmission
3316 */
3317 fwohci_uid_req(sc, n);
3318 }
3319 }
3320 return -1;
3321 }
3322
3323 /*
3324 * functions to support network interface
3325 */
3326 static int
3327 fwohci_if_inreg(struct device *self, u_int32_t offhi, u_int32_t offlo,
3328 void (*handler)(struct device *, struct mbuf *))
3329 {
3330 struct fwohci_softc *sc = (struct fwohci_softc *)self;
3331
3332 fwohci_handler_set(sc, IEEE1394_TCODE_WRITE_REQ_BLOCK, offhi, offlo, 0,
3333 handler ? fwohci_if_input : NULL, handler);
3334 fwohci_handler_set(sc, IEEE1394_TCODE_STREAM_DATA,
3335 (sc->sc_csr[CSR_SB_BROADCAST_CHANNEL] & IEEE1394_ISOCH_MASK) |
3336 OHCI_ASYNC_STREAM,
3337 1 << IEEE1394_TAG_GASP, 0,
3338 handler ? fwohci_if_input : NULL, handler);
3339 return 0;
3340 }
3341
3342 static int
3343 fwohci_if_input(struct fwohci_softc *sc, void *arg, struct fwohci_pkt *pkt)
3344 {
3345 int n, len;
3346 struct mbuf *m;
3347 struct iovec *iov;
3348 void (*handler)(struct device *, struct mbuf *) = arg;
3349
3350 #ifdef FW_DEBUG
3351 int i;
3352 DPRINTFN(1, ("fwohci_if_input: tcode=0x%x, dlen=%d", pkt->fp_tcode,
3353 pkt->fp_dlen));
3354 for (i = 0; i < pkt->fp_hlen/4; i++)
3355 DPRINTFN(2, ("%s%08x", i?" ":"\n ", pkt->fp_hdr[i]));
3356 DPRINTFN(2, ("$"));
3357 for (n = 0, len = pkt->fp_dlen; len > 0; len -= i, n++){
3358 iov = &pkt->fp_iov[n];
3359 for (i = 0; i < iov->iov_len; i++)
3360 DPRINTFN(2, ("%s%02x", (i%32)?((i%4)?"":" "):"\n ",
3361 ((u_int8_t *)iov->iov_base)[i]));
3362 DPRINTFN(2, ("$"));
3363 }
3364 DPRINTFN(1, ("\n"));
3365 #endif /* FW_DEBUG */
3366 len = pkt->fp_dlen;
3367 MGETHDR(m, M_DONTWAIT, MT_DATA);
3368 if (m == NULL)
3369 return IEEE1394_RCODE_COMPLETE;
3370 m->m_len = 16;
3371 if (len + m->m_len > MHLEN) {
3372 MCLGET(m, M_DONTWAIT);
3373 if ((m->m_flags & M_EXT) == 0) {
3374 m_freem(m);
3375 return IEEE1394_RCODE_COMPLETE;
3376 }
3377 }
3378 n = (pkt->fp_hdr[1] >> 16) & OHCI_NodeId_NodeNumber;
3379 if (sc->sc_uidtbl == NULL || n > sc->sc_rootid ||
3380 sc->sc_uidtbl[n].fu_valid != 0x3) {
3381 printf("%s: packet from unknown node: phy id %d\n",
3382 sc->sc_sc1394.sc1394_dev.dv_xname, n);
3383 m_freem(m);
3384 fwohci_uid_req(sc, n);
3385 return IEEE1394_RCODE_COMPLETE;
3386 }
3387 memcpy(mtod(m, caddr_t), sc->sc_uidtbl[n].fu_uid, 8);
3388 if (pkt->fp_tcode == IEEE1394_TCODE_STREAM_DATA) {
3389 m->m_flags |= M_BCAST;
3390 mtod(m, u_int32_t *)[2] = mtod(m, u_int32_t *)[3] = 0;
3391 } else {
3392 mtod(m, u_int32_t *)[2] = htonl(pkt->fp_hdr[1]);
3393 mtod(m, u_int32_t *)[3] = htonl(pkt->fp_hdr[2]);
3394 }
3395 mtod(m, u_int8_t *)[8] = n; /*XXX: node id for debug */
3396 mtod(m, u_int8_t *)[9] =
3397 (*pkt->fp_trail >> (16 + OHCI_CTXCTL_SPD_BITPOS)) &
3398 ((1 << OHCI_CTXCTL_SPD_BITLEN) - 1);
3399
3400 m->m_pkthdr.rcvif = NULL; /* set in child */
3401 m->m_pkthdr.len = len + m->m_len;
3402 /*
3403 * We may use receive buffer by external mbuf instead of copy here.
3404 * But asynchronous receive buffer must be operate in buffer fill
3405 * mode, so that each receive buffer will shared by multiple mbufs.
3406 * If upper layer doesn't free mbuf soon, e.g. application program
3407 * is suspended, buffer must be reallocated.
3408 * Isochronous buffer must be operate in packet buffer mode, and
3409 * it is easy to map receive buffer to external mbuf. But it is
3410 * used for broadcast/multicast only, and is expected not so
3411 * performance sensitive for now.
3412 * XXX: The performance may be important for multicast case,
3413 * so we should revisit here later.
3414 * -- onoe
3415 */
3416 n = 0;
3417 iov = pkt->fp_uio.uio_iov;
3418 while (len > 0) {
3419 memcpy(mtod(m, caddr_t) + m->m_len, iov->iov_base,
3420 iov->iov_len);
3421 m->m_len += iov->iov_len;
3422 len -= iov->iov_len;
3423 iov++;
3424 }
3425 (*handler)(sc->sc_sc1394.sc1394_if, m);
3426 return IEEE1394_RCODE_COMPLETE;
3427 }
3428
3429 static int
3430 fwohci_if_input_iso(struct fwohci_softc *sc, void *arg, struct fwohci_pkt *pkt)
3431 {
3432 int n, len;
3433 int chan, tag;
3434 struct mbuf *m;
3435 struct iovec *iov;
3436 void (*handler)(struct device *, struct mbuf *) = arg;
3437 #ifdef FW_DEBUG
3438 int i;
3439 #endif
3440
3441 chan = (pkt->fp_hdr[0] & 0x00003f00) >> 8;
3442 tag = (pkt->fp_hdr[0] & 0x0000c000) >> 14;
3443 #ifdef FW_DEBUG
3444 DPRINTFN(1, ("fwohci_if_input_iso: "
3445 "tcode=0x%x, chan=%d, tag=%x, dlen=%d",
3446 pkt->fp_tcode, chan, tag, pkt->fp_dlen));
3447 for (i = 0; i < pkt->fp_hlen/4; i++)
3448 DPRINTFN(2, ("%s%08x", i?" ":"\n\t", pkt->fp_hdr[i]));
3449 DPRINTFN(2, ("$"));
3450 for (n = 0, len = pkt->fp_dlen; len > 0; len -= i, n++){
3451 iov = &pkt->fp_iov[n];
3452 for (i = 0; i < iov->iov_len; i++)
3453 DPRINTFN(2, ("%s%02x",
3454 (i%32)?((i%4)?"":" "):"\n\t",
3455 ((u_int8_t *)iov->iov_base)[i]));
3456 DPRINTFN(2, ("$"));
3457 }
3458 DPRINTFN(2, ("\n"));
3459 #endif /* FW_DEBUG */
3460 len = pkt->fp_dlen;
3461 MGETHDR(m, M_DONTWAIT, MT_DATA);
3462 if (m == NULL)
3463 return IEEE1394_RCODE_COMPLETE;
3464 m->m_len = 16;
3465 if (m->m_len + len > MHLEN) {
3466 MCLGET(m, M_DONTWAIT);
3467 if ((m->m_flags & M_EXT) == 0) {
3468 m_freem(m);
3469 return IEEE1394_RCODE_COMPLETE;
3470 }
3471 }
3472
3473 m->m_flags |= M_BCAST;
3474
3475 if (tag == IEEE1394_TAG_GASP) {
3476 n = (pkt->fp_hdr[1] >> 16) & OHCI_NodeId_NodeNumber;
3477 if (sc->sc_uidtbl == NULL || n > sc->sc_rootid ||
3478 sc->sc_uidtbl[n].fu_valid != 0x3) {
3479 printf("%s: packet from unknown node: phy id %d\n",
3480 sc->sc_sc1394.sc1394_dev.dv_xname, n);
3481 m_freem(m);
3482 return IEEE1394_RCODE_COMPLETE;
3483 }
3484 memcpy(mtod(m, caddr_t), sc->sc_uidtbl[n].fu_uid, 8);
3485 mtod(m, u_int32_t *)[2] = htonl(pkt->fp_hdr[1]);
3486 mtod(m, u_int32_t *)[3] = htonl(pkt->fp_hdr[2]);
3487 mtod(m, u_int8_t *)[8] = n; /*XXX: node id for debug */
3488 mtod(m, u_int8_t *)[9] =
3489 (*pkt->fp_trail >> (16 + OHCI_CTXCTL_SPD_BITPOS)) &
3490 ((1 << OHCI_CTXCTL_SPD_BITLEN) - 1);
3491 }
3492 mtod(m, u_int8_t *)[14] = chan;
3493 mtod(m, u_int8_t *)[15] = tag;
3494
3495
3496 m->m_pkthdr.rcvif = NULL; /* set in child */
3497 m->m_pkthdr.len = len + m->m_len;
3498 /*
3499 * We may use receive buffer by external mbuf instead of copy here.
3500 * But asynchronous receive buffer must be operate in buffer fill
3501 * mode, so that each receive buffer will shared by multiple mbufs.
3502 * If upper layer doesn't free mbuf soon, e.g. application program
3503 * is suspended, buffer must be reallocated.
3504 * Isochronous buffer must be operate in packet buffer mode, and
3505 * it is easy to map receive buffer to external mbuf. But it is
3506 * used for broadcast/multicast only, and is expected not so
3507 * performance sensitive for now.
3508 * XXX: The performance may be important for multicast case,
3509 * so we should revisit here later.
3510 * -- onoe
3511 */
3512 n = 0;
3513 iov = pkt->fp_uio.uio_iov;
3514 while (len > 0) {
3515 memcpy(mtod(m, caddr_t) + m->m_len, iov->iov_base,
3516 iov->iov_len);
3517 m->m_len += iov->iov_len;
3518 len -= iov->iov_len;
3519 iov++;
3520 }
3521 (*handler)(sc->sc_sc1394.sc1394_if, m);
3522 return IEEE1394_RCODE_COMPLETE;
3523 }
3524
3525
3526
3527 static int
3528 fwohci_if_output(struct device *self, struct mbuf *m0,
3529 void (*callback)(struct device *, struct mbuf *))
3530 {
3531 struct fwohci_softc *sc = (struct fwohci_softc *)self;
3532 struct fwohci_pkt pkt;
3533 u_int8_t *p;
3534 int n = 0, error, spd, hdrlen, maxrec; /* XXX: gcc */
3535 #ifdef FW_DEBUG
3536 struct mbuf *m;
3537 #endif
3538
3539 p = mtod(m0, u_int8_t *);
3540 if (m0->m_flags & (M_BCAST | M_MCAST)) {
3541 spd = IEEE1394_SPD_S100; /*XXX*/
3542 maxrec = 512; /*XXX*/
3543 hdrlen = 8;
3544 } else {
3545 n = fwohci_uid_lookup(sc, p);
3546 if (n < 0) {
3547 printf("%s: nodeid unknown:"
3548 " %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
3549 sc->sc_sc1394.sc1394_dev.dv_xname,
3550 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7]);
3551 error = EHOSTUNREACH;
3552 goto end;
3553 }
3554 if (n == IEEE1394_BCAST_PHY_ID) {
3555 printf("%s: broadcast with !M_MCAST\n",
3556 sc->sc_sc1394.sc1394_dev.dv_xname);
3557 #ifdef FW_DEBUG
3558 DPRINTFN(2, ("packet:"));
3559 for (m = m0; m != NULL; m = m->m_next) {
3560 for (n = 0; n < m->m_len; n++)
3561 DPRINTFN(2, ("%s%02x", (n%32)?
3562 ((n%4)?"":" "):"\n ",
3563 mtod(m, u_int8_t *)[n]));
3564 DPRINTFN(2, ("$"));
3565 }
3566 DPRINTFN(2, ("\n"));
3567 #endif
3568 error = EHOSTUNREACH;
3569 goto end;
3570 }
3571 maxrec = 2 << p[8];
3572 spd = p[9];
3573 hdrlen = 0;
3574 }
3575 if (spd > sc->sc_sc1394.sc1394_link_speed) {
3576 DPRINTF(("fwohci_if_output: spd (%d) is faster than %d\n",
3577 spd, sc->sc_sc1394.sc1394_link_speed));
3578 spd = sc->sc_sc1394.sc1394_link_speed;
3579 }
3580 if (maxrec > (512 << spd)) {
3581 DPRINTF(("fwohci_if_output: maxrec (%d) is larger for spd (%d)"
3582 "\n", maxrec, spd));
3583 maxrec = 512 << spd;
3584 }
3585 while (maxrec > sc->sc_sc1394.sc1394_max_receive) {
3586 DPRINTF(("fwohci_if_output: maxrec (%d) is larger than"
3587 " %d\n", maxrec, sc->sc_sc1394.sc1394_max_receive));
3588 maxrec >>= 1;
3589 }
3590 if (maxrec < 512) {
3591 DPRINTF(("fwohci_if_output: maxrec (%d) is smaller than "
3592 "minimum\n", maxrec));
3593 maxrec = 512;
3594 }
3595
3596 m_adj(m0, 16 - hdrlen);
3597 if (m0->m_pkthdr.len > maxrec) {
3598 DPRINTF(("fwohci_if_output: packet too big: hdr %d, pktlen "
3599 "%d, maxrec %d\n", hdrlen, m0->m_pkthdr.len, maxrec));
3600 error = E2BIG; /*XXX*/
3601 goto end;
3602 }
3603
3604 memset(&pkt, 0, sizeof(pkt));
3605 pkt.fp_uio.uio_iov = pkt.fp_iov;
3606 pkt.fp_uio.uio_segflg = UIO_SYSSPACE;
3607 pkt.fp_uio.uio_rw = UIO_WRITE;
3608 if (m0->m_flags & (M_BCAST | M_MCAST)) {
3609 /* construct GASP header */
3610 p = mtod(m0, u_int8_t *);
3611 p[0] = sc->sc_nodeid >> 8;
3612 p[1] = sc->sc_nodeid & 0xff;
3613 p[2] = 0x00; p[3] = 0x00; p[4] = 0x5e;
3614 p[5] = 0x00; p[6] = 0x00; p[7] = 0x01;
3615 pkt.fp_tcode = IEEE1394_TCODE_STREAM_DATA;
3616 pkt.fp_hlen = 8;
3617 pkt.fp_hdr[0] = (spd << 16) | (IEEE1394_TAG_GASP << 14) |
3618 ((sc->sc_csr[CSR_SB_BROADCAST_CHANNEL] &
3619 OHCI_NodeId_NodeNumber) << 8);
3620 pkt.fp_hdr[1] = m0->m_pkthdr.len << 16;
3621 } else {
3622 pkt.fp_tcode = IEEE1394_TCODE_WRITE_REQ_BLOCK;
3623 pkt.fp_hlen = 16;
3624 pkt.fp_hdr[0] = 0x00800100 | (sc->sc_tlabel << 10) |
3625 (spd << 16);
3626 pkt.fp_hdr[1] =
3627 (((sc->sc_nodeid & OHCI_NodeId_BusNumber) | n) << 16) |
3628 (p[10] << 8) | p[11];
3629 pkt.fp_hdr[2] = (p[12]<<24) | (p[13]<<16) | (p[14]<<8) | p[15];
3630 pkt.fp_hdr[3] = m0->m_pkthdr.len << 16;
3631 sc->sc_tlabel = (sc->sc_tlabel + 1) & 0x3f;
3632 }
3633 pkt.fp_hdr[0] |= (pkt.fp_tcode << 4);
3634 pkt.fp_dlen = m0->m_pkthdr.len;
3635 pkt.fp_m = m0;
3636 pkt.fp_callback = callback;
3637 error = fwohci_at_output(sc, sc->sc_ctx_atrq, &pkt);
3638 m0 = pkt.fp_m;
3639 end:
3640 if (m0 != NULL) {
3641 if (callback)
3642 (*callback)(sc->sc_sc1394.sc1394_if, m0);
3643 else
3644 m_freem(m0);
3645 }
3646 return error;
3647 }
3648
3649 /*
3650 * High level routines to provide abstraction to attaching layers to
3651 * send/receive data.
3652 */
3653
3654 /*
3655 * These break down into 4 routines as follows:
3656 *
3657 * int fwohci_read(struct ieee1394_abuf *)
3658 *
3659 * This routine will attempt to read a region from the requested node.
3660 * A callback must be provided which will be called when either the completed
3661 * read is done or an unrecoverable error occurs. This is mainly a convenience
3662 * routine since it will encapsulate retrying a region as quadlet vs. block
3663 * reads and recombining all the returned data. This could also be done with a
3664 * series of write/inreg's for each packet sent.
3665 *
3666 * int fwohci_write(struct ieee1394_abuf *)
3667 *
3668 * The work horse main entry point for putting packets on the bus. This is the
3669 * generalized interface for fwnode/etc code to put packets out onto the bus.
3670 * It accepts all standard ieee1394 tcodes (XXX: only a few today) and
3671 * optionally will callback via a func pointer to the calling code with the
3672 * resulting ACK code from the packet. If the ACK code is to be ignored (i.e.
3673 * no cb) then the write routine will take care of free'ing the abuf since the
3674 * fwnode/etc code won't have any knowledge of when to do this. This allows for
3675 * simple one-off packets to be sent from the upper-level code without worrying
3676 * about a callback for cleanup.
3677 *
3678 * int fwohci_inreg(struct ieee1394_abuf *, int)
3679 *
3680 * This is very simple. It evals the abuf passed in and registers an internal
3681 * handler as the callback for packets received for that operation.
3682 * The integer argument specifies whether on a block read/write operation to
3683 * allow sub-regions to be read/written (in block form) as well.
3684 *
3685 * XXX: This whole structure needs to be redone as a list of regions and
3686 * operations allowed on those regions.
3687 *
3688 * int fwohci_unreg(struct ieee1394_abuf *, int)
3689 *
3690 * This simply unregisters the respective callback done via inreg for items
3691 * which only need to register an area for a one-time operation (like a status
3692 * buffer a remote node will write to when the current operation is done). The
3693 * int argument specifies the same behavior as inreg, except in reverse (i.e.
3694 * it unregisters).
3695 */
3696
3697 static int
3698 fwohci_read(struct ieee1394_abuf *ab)
3699 {
3700 struct fwohci_pkt pkt;
3701 struct ieee1394_softc *sc = ab->ab_req;
3702 struct fwohci_softc *psc =
3703 (struct fwohci_softc *)sc->sc1394_dev.dv_parent;
3704 struct fwohci_cb *fcb;
3705 u_int32_t high, lo;
3706 int rv, tcode;
3707
3708 /* Have to have a callback when reading. */
3709 if (ab->ab_cb == NULL)
3710 return -1;
3711
3712 fcb = malloc(sizeof(struct fwohci_cb), M_DEVBUF, M_WAITOK);
3713 fcb->ab = ab;
3714 fcb->count = 0;
3715 fcb->abuf_valid = 1;
3716
3717 high = ((ab->ab_addr & 0x0000ffff00000000ULL) >> 32);
3718 lo = (ab->ab_addr & 0x00000000ffffffffULL);
3719
3720 memset(&pkt, 0, sizeof(pkt));
3721 pkt.fp_hdr[1] = ((0xffc0 | ab->ab_req->sc1394_node_id) << 16) | high;
3722 pkt.fp_hdr[2] = lo;
3723 pkt.fp_dlen = 0;
3724
3725 if (ab->ab_length == 4) {
3726 pkt.fp_tcode = IEEE1394_TCODE_READ_REQ_QUAD;
3727 tcode = IEEE1394_TCODE_READ_RESP_QUAD;
3728 pkt.fp_hlen = 12;
3729 } else {
3730 pkt.fp_tcode = IEEE1394_TCODE_READ_REQ_BLOCK;
3731 pkt.fp_hlen = 16;
3732 tcode = IEEE1394_TCODE_READ_RESP_BLOCK;
3733 pkt.fp_hdr[3] = (ab->ab_length << 16);
3734 }
3735 pkt.fp_hdr[0] = 0x00000100 | (sc->sc1394_link_speed << 16) |
3736 (psc->sc_tlabel << 10) | (pkt.fp_tcode << 4);
3737
3738 pkt.fp_statusarg = fcb;
3739 pkt.fp_statuscb = fwohci_read_resp;
3740
3741 rv = fwohci_handler_set(psc, tcode, ab->ab_req->sc1394_node_id,
3742 psc->sc_tlabel, 0, fwohci_read_resp, fcb);
3743 if (rv)
3744 return rv;
3745 rv = fwohci_at_output(psc, psc->sc_ctx_atrq, &pkt);
3746 if (rv)
3747 fwohci_handler_set(psc, tcode, ab->ab_req->sc1394_node_id,
3748 psc->sc_tlabel, 0, NULL, NULL);
3749 psc->sc_tlabel = (psc->sc_tlabel + 1) & 0x3f;
3750 fcb->count = 1;
3751 return rv;
3752 }
3753
3754 static int
3755 fwohci_write(struct ieee1394_abuf *ab)
3756 {
3757 struct fwohci_pkt pkt;
3758 struct ieee1394_softc *sc = ab->ab_req;
3759 struct fwohci_softc *psc =
3760 (struct fwohci_softc *)sc->sc1394_dev.dv_parent;
3761 u_int32_t high, lo;
3762 int rv;
3763
3764 if (ab->ab_tcode == IEEE1394_TCODE_WRITE_REQ_BLOCK) {
3765 if (ab->ab_length > IEEE1394_MAX_REC(sc->sc1394_max_receive)) {
3766 DPRINTF(("Packet too large: %d\n", ab->ab_length));
3767 return E2BIG;
3768 }
3769 }
3770
3771 if (ab->ab_length >
3772 IEEE1394_MAX_ASYNCH_FOR_SPEED(sc->sc1394_link_speed)) {
3773 DPRINTF(("Packet too large: %d\n", ab->ab_length));
3774 return E2BIG;
3775 }
3776
3777 if (ab->ab_data && ab->ab_uio)
3778 panic("Can't call with uio and data set");
3779 if ((ab->ab_data == NULL) && (ab->ab_uio == NULL))
3780 panic("One of either ab_data or ab_uio must be set");
3781
3782 memset(&pkt, 0, sizeof(pkt));
3783
3784 pkt.fp_tcode = ab->ab_tcode;
3785 if (ab->ab_data) {
3786 pkt.fp_uio.uio_iov = pkt.fp_iov;
3787 pkt.fp_uio.uio_segflg = UIO_SYSSPACE;
3788 pkt.fp_uio.uio_rw = UIO_WRITE;
3789 } else
3790 memcpy(&pkt.fp_uio, ab->ab_uio, sizeof(struct uio));
3791
3792 pkt.fp_statusarg = ab;
3793 pkt.fp_statuscb = fwohci_write_ack;
3794
3795 switch (ab->ab_tcode) {
3796 case IEEE1394_TCODE_WRITE_RESP:
3797 pkt.fp_hlen = 12;
3798 case IEEE1394_TCODE_READ_RESP_QUAD:
3799 case IEEE1394_TCODE_READ_RESP_BLOCK:
3800 if (!pkt.fp_hlen)
3801 pkt.fp_hlen = 16;
3802 high = ab->ab_retlen;
3803 ab->ab_retlen = 0;
3804 lo = 0;
3805 pkt.fp_hdr[0] = 0x00000100 | (sc->sc1394_link_speed << 16) |
3806 (ab->ab_tlabel << 10) | (pkt.fp_tcode << 4);
3807 break;
3808 default:
3809 pkt.fp_hlen = 16;
3810 high = ((ab->ab_addr & 0x0000ffff00000000ULL) >> 32);
3811 lo = (ab->ab_addr & 0x00000000ffffffffULL);
3812 pkt.fp_hdr[0] = 0x00000100 | (sc->sc1394_link_speed << 16) |
3813 (psc->sc_tlabel << 10) | (pkt.fp_tcode << 4);
3814 psc->sc_tlabel = (psc->sc_tlabel + 1) & 0x3f;
3815 break;
3816 }
3817
3818 pkt.fp_hdr[1] = ((0xffc0 | ab->ab_req->sc1394_node_id) << 16) | high;
3819 pkt.fp_hdr[2] = lo;
3820 if (pkt.fp_hlen == 16) {
3821 if (ab->ab_length == 4) {
3822 pkt.fp_hdr[3] = ab->ab_data[0];
3823 pkt.fp_dlen = 0;
3824 } else {
3825 pkt.fp_hdr[3] = (ab->ab_length << 16);
3826 pkt.fp_dlen = ab->ab_length;
3827 if (ab->ab_data) {
3828 pkt.fp_uio.uio_iovcnt = 1;
3829 pkt.fp_uio.uio_resid = ab->ab_length;
3830 pkt.fp_iov[0].iov_base = ab->ab_data;
3831 pkt.fp_iov[0].iov_len = ab->ab_length;
3832 }
3833 }
3834 }
3835 switch (ab->ab_tcode) {
3836 case IEEE1394_TCODE_WRITE_RESP:
3837 case IEEE1394_TCODE_READ_RESP_QUAD:
3838 case IEEE1394_TCODE_READ_RESP_BLOCK:
3839 rv = fwohci_at_output(psc, psc->sc_ctx_atrs, &pkt);
3840 break;
3841 default:
3842 rv = fwohci_at_output(psc, psc->sc_ctx_atrq, &pkt);
3843 break;
3844 }
3845 return rv;
3846 }
3847
3848 static int
3849 fwohci_read_resp(struct fwohci_softc *sc, void *arg, struct fwohci_pkt *pkt)
3850 {
3851 struct fwohci_cb *fcb = arg;
3852 struct ieee1394_abuf *ab = fcb->ab;
3853 struct fwohci_pkt newpkt;
3854 u_int32_t *cur, high, lo;
3855 int i, tcode, rcode, status, rv;
3856
3857 /*
3858 * Both the ACK handling and normal response callbacks are handled here.
3859 * The main reason for this is the various error conditions that can
3860 * occur trying to block read some areas and the ways that gets reported
3861 * back to calling station. This is a variety of ACK codes, responses,
3862 * etc which makes it much more difficult to process if both aren't
3863 * handled here.
3864 */
3865
3866 /* Check for status packet. */
3867
3868 if (pkt->fp_tcode == -1) {
3869 status = pkt->fp_status & OHCI_DESC_STATUS_ACK_MASK;
3870 rcode = -1;
3871 tcode = (pkt->fp_hdr[0] >> 4) & 0xf;
3872 if ((status != OHCI_CTXCTL_EVENT_ACK_COMPLETE) &&
3873 (status != OHCI_CTXCTL_EVENT_ACK_PENDING))
3874 DPRINTFN(2, ("Got status packet: 0x%02x\n",
3875 (unsigned int)status));
3876 fcb->count--;
3877
3878 /*
3879 * Got all the ack's back and the buffer is invalid (i.e. the
3880 * callback has been called. Clean up.
3881 */
3882
3883 if (fcb->abuf_valid == 0) {
3884 if (fcb->count == 0)
3885 free(fcb, M_DEVBUF);
3886 return IEEE1394_RCODE_COMPLETE;
3887 }
3888 } else {
3889 status = -1;
3890 tcode = pkt->fp_tcode;
3891 rcode = (pkt->fp_hdr[1] & 0x0000f000) >> 12;
3892 }
3893
3894 /*
3895 * Some area's (like the config rom want to be read as quadlets only.
3896 *
3897 * The current ideas to try are:
3898 *
3899 * Got an ACK_TYPE_ERROR on a block read.
3900 *
3901 * Got either RCODE_TYPE or RCODE_ADDRESS errors in a block read
3902 * response.
3903 *
3904 * In all cases construct a new packet for a quadlet read and let
3905 * mutli_resp handle the iteration over the space.
3906 */
3907
3908 if (((status == OHCI_CTXCTL_EVENT_ACK_TYPE_ERROR) &&
3909 (tcode == IEEE1394_TCODE_READ_REQ_BLOCK)) ||
3910 (((rcode == IEEE1394_RCODE_TYPE_ERROR) ||
3911 (rcode == IEEE1394_RCODE_ADDRESS_ERROR)) &&
3912 (tcode == IEEE1394_TCODE_READ_RESP_BLOCK))) {
3913
3914 /* Read the area in quadlet chunks (internally track this). */
3915
3916 memset(&newpkt, 0, sizeof(newpkt));
3917
3918 high = ((ab->ab_addr & 0x0000ffff00000000ULL) >> 32);
3919 lo = (ab->ab_addr & 0x00000000ffffffffULL);
3920
3921 newpkt.fp_tcode = IEEE1394_TCODE_READ_REQ_QUAD;
3922 newpkt.fp_hlen = 12;
3923 newpkt.fp_dlen = 0;
3924 newpkt.fp_hdr[1] =
3925 ((0xffc0 | ab->ab_req->sc1394_node_id) << 16) | high;
3926 newpkt.fp_hdr[2] = lo;
3927 newpkt.fp_hdr[0] = 0x00000100 | (sc->sc_tlabel << 10) |
3928 (newpkt.fp_tcode << 4);
3929
3930 rv = fwohci_handler_set(sc, IEEE1394_TCODE_READ_RESP_QUAD,
3931 ab->ab_req->sc1394_node_id, sc->sc_tlabel, 0,
3932 fwohci_read_multi_resp, fcb);
3933 if (rv) {
3934 (*ab->ab_cb)(ab, -1);
3935 goto cleanup;
3936 }
3937 newpkt.fp_statusarg = fcb;
3938 newpkt.fp_statuscb = fwohci_read_resp;
3939 rv = fwohci_at_output(sc, sc->sc_ctx_atrq, &newpkt);
3940 if (rv) {
3941 fwohci_handler_set(sc, IEEE1394_TCODE_READ_RESP_QUAD,
3942 ab->ab_req->sc1394_node_id, sc->sc_tlabel, 0, NULL,
3943 NULL);
3944 (*ab->ab_cb)(ab, -1);
3945 goto cleanup;
3946 }
3947 fcb->count++;
3948 sc->sc_tlabel = (sc->sc_tlabel + 1) & 0x3f;
3949 return IEEE1394_RCODE_COMPLETE;
3950 } else if ((rcode != -1) || ((status != -1) &&
3951 (status != OHCI_CTXCTL_EVENT_ACK_COMPLETE) &&
3952 (status != OHCI_CTXCTL_EVENT_ACK_PENDING))) {
3953
3954 /*
3955 * Recombine all the iov data into 1 chunk for higher
3956 * level code.
3957 */
3958
3959 if (rcode != -1) {
3960 cur = ab->ab_data;
3961 for (i = 0; i < pkt->fp_uio.uio_iovcnt; i++) {
3962 /*
3963 * Make sure and don't exceed the buffer
3964 * allocated for return.
3965 */
3966 if ((ab->ab_retlen + pkt->fp_iov[i].iov_len) >
3967 ab->ab_length) {
3968 memcpy(cur, pkt->fp_iov[i].iov_base,
3969 (ab->ab_length - ab->ab_retlen));
3970 ab->ab_retlen = ab->ab_length;
3971 break;
3972 }
3973 memcpy(cur, pkt->fp_iov[i].iov_base,
3974 pkt->fp_iov[i].iov_len);
3975 cur += pkt->fp_iov[i].iov_len;
3976 ab->ab_retlen += pkt->fp_iov[i].iov_len;
3977 }
3978 }
3979 if (status != -1)
3980 /* XXX: Need a complete tlabel interface. */
3981 for (i = 0; i < 64; i++)
3982 fwohci_handler_set(sc,
3983 IEEE1394_TCODE_READ_RESP_QUAD,
3984 ab->ab_req->sc1394_node_id, i, 0, NULL,
3985 NULL);
3986 (*ab->ab_cb)(ab, rcode);
3987 goto cleanup;
3988 } else
3989 /* Good ack packet. */
3990 return IEEE1394_RCODE_COMPLETE;
3991
3992 /* Can't get here unless ab->ab_cb has been called. */
3993
3994 cleanup:
3995 fcb->abuf_valid = 0;
3996 if (fcb->count == 0)
3997 free(fcb, M_DEVBUF);
3998 return IEEE1394_RCODE_COMPLETE;
3999 }
4000
4001 static int
4002 fwohci_read_multi_resp(struct fwohci_softc *sc, void *arg,
4003 struct fwohci_pkt *pkt)
4004 {
4005 struct fwohci_cb *fcb = arg;
4006 struct ieee1394_abuf *ab = fcb->ab;
4007 struct fwohci_pkt newpkt;
4008 u_int32_t high, lo;
4009 int rcode, rv;
4010
4011 /*
4012 * Bad return codes from the wire, just return what's already in the
4013 * buf.
4014 */
4015
4016 /* Make sure a response packet didn't arrive after a bad ACK. */
4017 if (fcb->abuf_valid == 0)
4018 return IEEE1394_RCODE_COMPLETE;
4019
4020 rcode = (pkt->fp_hdr[1] & 0x0000f000) >> 12;
4021
4022 if (rcode) {
4023 (*ab->ab_cb)(ab, rcode);
4024 goto cleanup;
4025 }
4026
4027 if ((ab->ab_retlen + pkt->fp_iov[0].iov_len) > ab->ab_length) {
4028 memcpy(((char *)ab->ab_data + ab->ab_retlen),
4029 pkt->fp_iov[0].iov_base, (ab->ab_length - ab->ab_retlen));
4030 ab->ab_retlen = ab->ab_length;
4031 } else {
4032 memcpy(((char *)ab->ab_data + ab->ab_retlen),
4033 pkt->fp_iov[0].iov_base, 4);
4034 ab->ab_retlen += 4;
4035 }
4036 /* Still more, loop and read 4 more bytes. */
4037 if (ab->ab_retlen < ab->ab_length) {
4038 memset(&newpkt, 0, sizeof(newpkt));
4039
4040 high = ((ab->ab_addr & 0x0000ffff00000000ULL) >> 32);
4041 lo = (ab->ab_addr & 0x00000000ffffffffULL) + ab->ab_retlen;
4042
4043 newpkt.fp_tcode = IEEE1394_TCODE_READ_REQ_QUAD;
4044 newpkt.fp_hlen = 12;
4045 newpkt.fp_dlen = 0;
4046 newpkt.fp_hdr[1] =
4047 ((0xffc0 | ab->ab_req->sc1394_node_id) << 16) | high;
4048 newpkt.fp_hdr[2] = lo;
4049 newpkt.fp_hdr[0] = 0x00000100 | (sc->sc_tlabel << 10) |
4050 (newpkt.fp_tcode << 4);
4051
4052 newpkt.fp_statusarg = fcb;
4053 newpkt.fp_statuscb = fwohci_read_resp;
4054
4055 /*
4056 * Bad return code. Just give up and return what's
4057 * come in now.
4058 */
4059 rv = fwohci_handler_set(sc, IEEE1394_TCODE_READ_RESP_QUAD,
4060 ab->ab_req->sc1394_node_id, sc->sc_tlabel, 0,
4061 fwohci_read_multi_resp, fcb);
4062 if (rv)
4063 (*ab->ab_cb)(ab, -1);
4064 else {
4065 rv = fwohci_at_output(sc, sc->sc_ctx_atrq, &newpkt);
4066 if (rv) {
4067 fwohci_handler_set(sc,
4068 IEEE1394_TCODE_READ_RESP_QUAD,
4069 ab->ab_req->sc1394_node_id, sc->sc_tlabel,
4070 0, NULL, NULL);
4071 (*ab->ab_cb)(ab, -1);
4072 } else {
4073 sc->sc_tlabel = (sc->sc_tlabel + 1) & 0x3f;
4074 fcb->count++;
4075 return IEEE1394_RCODE_COMPLETE;
4076 }
4077 }
4078 } else
4079 (*ab->ab_cb)(ab, IEEE1394_RCODE_COMPLETE);
4080
4081 cleanup:
4082 /* Can't get here unless ab_cb has been called. */
4083 fcb->abuf_valid = 0;
4084 if (fcb->count == 0)
4085 free(fcb, M_DEVBUF);
4086 return IEEE1394_RCODE_COMPLETE;
4087 }
4088
4089 static int
4090 fwohci_write_ack(struct fwohci_softc *sc, void *arg, struct fwohci_pkt *pkt)
4091 {
4092 struct ieee1394_abuf *ab = arg;
4093 u_int16_t status;
4094
4095
4096 status = pkt->fp_status & OHCI_DESC_STATUS_ACK_MASK;
4097 if ((status != OHCI_CTXCTL_EVENT_ACK_COMPLETE) &&
4098 (status != OHCI_CTXCTL_EVENT_ACK_PENDING))
4099 DPRINTF(("Got status packet: 0x%02x\n",
4100 (unsigned int)status));
4101
4102 /* No callback means this level should free the buffers. */
4103 if (ab->ab_cb)
4104 (*ab->ab_cb)(ab, status);
4105 else {
4106 if (ab->ab_data)
4107 free(ab->ab_data, M_1394DATA);
4108 free(ab, M_1394DATA);
4109 }
4110 return IEEE1394_RCODE_COMPLETE;
4111 }
4112
4113 static int
4114 fwohci_inreg(struct ieee1394_abuf *ab, int allow)
4115 {
4116 struct ieee1394_softc *sc = ab->ab_req;
4117 struct fwohci_softc *psc =
4118 (struct fwohci_softc *)sc->sc1394_dev.dv_parent;
4119 u_int32_t high, lo;
4120 int rv;
4121
4122 high = ((ab->ab_addr & 0x0000ffff00000000ULL) >> 32);
4123 lo = (ab->ab_addr & 0x00000000ffffffffULL);
4124
4125 rv = 0;
4126 switch (ab->ab_tcode) {
4127 case IEEE1394_TCODE_READ_REQ_QUAD:
4128 case IEEE1394_TCODE_WRITE_REQ_QUAD:
4129 if (ab->ab_cb)
4130 rv = fwohci_handler_set(psc, ab->ab_tcode, high, lo, 0,
4131 fwohci_parse_input, ab);
4132 else
4133 fwohci_handler_set(psc, ab->ab_tcode, high, lo, 0, NULL,
4134 NULL);
4135 break;
4136 case IEEE1394_TCODE_READ_REQ_BLOCK:
4137 case IEEE1394_TCODE_WRITE_REQ_BLOCK:
4138 if (allow) {
4139 if (ab->ab_cb) {
4140 rv = fwohci_handler_set(psc, ab->ab_tcode,
4141 high, lo, ab->ab_length,
4142 fwohci_parse_input, ab);
4143 if (rv)
4144 fwohci_handler_set(psc, ab->ab_tcode,
4145 high, lo, ab->ab_length, NULL,
4146 NULL);
4147 ab->ab_subok = 1;
4148 } else
4149 fwohci_handler_set(psc, ab->ab_tcode, high, lo,
4150 ab->ab_length, NULL, NULL);
4151 } else {
4152 if (ab->ab_cb)
4153 rv = fwohci_handler_set(psc, ab->ab_tcode, high,
4154 lo, 0, fwohci_parse_input, ab);
4155 else
4156 fwohci_handler_set(psc, ab->ab_tcode, high, lo,
4157 0, NULL, NULL);
4158 }
4159 break;
4160 default:
4161 DPRINTF(("Invalid registration tcode: %d\n", ab->ab_tcode));
4162 return -1;
4163 break;
4164 }
4165 return rv;
4166 }
4167
4168 static int
4169 fwohci_unreg(struct ieee1394_abuf *ab, int allow)
4170 {
4171 void *save;
4172 int rv;
4173
4174 save = ab->ab_cb;
4175 ab->ab_cb = NULL;
4176 rv = fwohci_inreg(ab, allow);
4177 ab->ab_cb = save;
4178 return rv;
4179 }
4180
4181 static int
4182 fwohci_parse_input(struct fwohci_softc *sc, void *arg, struct fwohci_pkt *pkt)
4183 {
4184 struct ieee1394_abuf *ab = (struct ieee1394_abuf *)arg;
4185 u_int64_t addr;
4186 u_int8_t *cur;
4187 int i, count, ret;
4188
4189 ab->ab_tcode = (pkt->fp_hdr[0] >> 4) & 0xf;
4190 ab->ab_tlabel = (pkt->fp_hdr[0] >> 10) & 0x3f;
4191 addr = (((u_int64_t)(pkt->fp_hdr[1] & 0xffff) << 32) | pkt->fp_hdr[2]);
4192
4193 /* Make sure it's always 0 in case this gets reused multiple times. */
4194 ab->ab_retlen = 0;
4195
4196 switch (ab->ab_tcode) {
4197 case IEEE1394_TCODE_READ_REQ_QUAD:
4198 ab->ab_retlen = 4;
4199 /* Response's (if required) will come from callback code */
4200 ret = -1;
4201 break;
4202 case IEEE1394_TCODE_READ_REQ_BLOCK:
4203 ab->ab_retlen = (pkt->fp_hdr[3] >> 16) & 0xffff;
4204 if (ab->ab_subok) {
4205 if ((addr + ab->ab_retlen) >
4206 (ab->ab_addr + ab->ab_length))
4207 return IEEE1394_RCODE_ADDRESS_ERROR;
4208 } else
4209 if (ab->ab_retlen != ab->ab_length)
4210 return IEEE1394_RCODE_ADDRESS_ERROR;
4211 /* Response's (if required) will come from callback code */
4212 ret = -1;
4213 break;
4214 case IEEE1394_TCODE_WRITE_REQ_QUAD:
4215 ab->ab_retlen = 4;
4216 /* Fall through. */
4217
4218 case IEEE1394_TCODE_WRITE_REQ_BLOCK:
4219 if (!ab->ab_retlen)
4220 ab->ab_retlen = (pkt->fp_hdr[3] >> 16) & 0xffff;
4221 if (ab->ab_subok) {
4222 if ((addr + ab->ab_retlen) >
4223 (ab->ab_addr + ab->ab_length))
4224 return IEEE1394_RCODE_ADDRESS_ERROR;
4225 } else
4226 if (ab->ab_retlen > ab->ab_length)
4227 return IEEE1394_RCODE_ADDRESS_ERROR;
4228
4229 if (ab->ab_tcode == IEEE1394_TCODE_WRITE_REQ_QUAD)
4230 ab->ab_data[0] = pkt->fp_hdr[3];
4231 else {
4232 count = 0;
4233 cur = (u_int8_t *)ab->ab_data + (addr - ab->ab_addr);
4234 for (i = 0; i < pkt->fp_uio.uio_iovcnt; i++) {
4235 memcpy(cur, pkt->fp_iov[i].iov_base,
4236 pkt->fp_iov[i].iov_len);
4237 cur += pkt->fp_iov[i].iov_len;
4238 count += pkt->fp_iov[i].iov_len;
4239 }
4240 if (ab->ab_retlen != count)
4241 panic("Packet claims %d length "
4242 "but only %d bytes returned\n",
4243 ab->ab_retlen, count);
4244 }
4245 ret = IEEE1394_RCODE_COMPLETE;
4246 break;
4247 default:
4248 panic("Got a callback for a tcode that wasn't requested: %d",
4249 ab->ab_tcode);
4250 break;
4251 }
4252 if (ab->ab_cb) {
4253 ab->ab_retaddr = addr;
4254 ab->ab_cb(ab, IEEE1394_RCODE_COMPLETE);
4255 }
4256 return ret;
4257 }
4258
4259 static int
4260 fwohci_submatch(struct device *parent, struct cfdata *cf,
4261 const locdesc_t *ldesc, void *aux)
4262 {
4263 struct ieee1394_attach_args *fwa = aux;
4264
4265 /* Both halves must be filled in for a match. */
4266 if ((cf->fwbuscf_idhi == FWBUS_UNK_IDHI &&
4267 cf->fwbuscf_idlo == FWBUS_UNK_IDLO) ||
4268 (cf->fwbuscf_idhi == ntohl(*((u_int32_t *)&fwa->uid[0])) &&
4269 cf->fwbuscf_idlo == ntohl(*((u_int32_t *)&fwa->uid[4]))))
4270 return (config_match(parent, cf, aux));
4271 return 0;
4272 }
4273
4274 int
4275 fwohci_detach(struct fwohci_softc *sc, int flags)
4276 {
4277 int rv = 0;
4278
4279 if (sc->sc_sc1394.sc1394_if != NULL)
4280 rv = config_detach(sc->sc_sc1394.sc1394_if, flags);
4281 if (rv != 0)
4282 return (rv);
4283
4284 callout_stop(&sc->sc_selfid_callout);
4285
4286 if (sc->sc_powerhook != NULL)
4287 powerhook_disestablish(sc->sc_powerhook);
4288 if (sc->sc_shutdownhook != NULL)
4289 shutdownhook_disestablish(sc->sc_shutdownhook);
4290
4291 return (rv);
4292 }
4293
4294 int
4295 fwohci_activate(struct device *self, enum devact act)
4296 {
4297 struct fwohci_softc *sc = (struct fwohci_softc *)self;
4298 int s, rv = 0;
4299
4300 s = splhigh();
4301 switch (act) {
4302 case DVACT_ACTIVATE:
4303 rv = EOPNOTSUPP;
4304 break;
4305
4306 case DVACT_DEACTIVATE:
4307 if (sc->sc_sc1394.sc1394_if != NULL)
4308 rv = config_deactivate(sc->sc_sc1394.sc1394_if);
4309 break;
4310 }
4311 splx(s);
4312
4313 return (rv);
4314 }
4315
4316 #ifdef FW_DEBUG
4317 static void
4318 fwohci_show_intr(struct fwohci_softc *sc, u_int32_t intmask)
4319 {
4320
4321 printf("%s: intmask=0x%08x:", sc->sc_sc1394.sc1394_dev.dv_xname,
4322 intmask);
4323 if (intmask & OHCI_Int_CycleTooLong)
4324 printf(" CycleTooLong");
4325 if (intmask & OHCI_Int_UnrecoverableError)
4326 printf(" UnrecoverableError");
4327 if (intmask & OHCI_Int_CycleInconsistent)
4328 printf(" CycleInconsistent");
4329 if (intmask & OHCI_Int_BusReset)
4330 printf(" BusReset");
4331 if (intmask & OHCI_Int_SelfIDComplete)
4332 printf(" SelfIDComplete");
4333 if (intmask & OHCI_Int_LockRespErr)
4334 printf(" LockRespErr");
4335 if (intmask & OHCI_Int_PostedWriteErr)
4336 printf(" PostedWriteErr");
4337 if (intmask & OHCI_Int_ReqTxComplete)
4338 printf(" ReqTxComplete(0x%04x)",
4339 OHCI_ASYNC_DMA_READ(sc, OHCI_CTX_ASYNC_TX_REQUEST,
4340 OHCI_SUBREG_ContextControlClear));
4341 if (intmask & OHCI_Int_RespTxComplete)
4342 printf(" RespTxComplete(0x%04x)",
4343 OHCI_ASYNC_DMA_READ(sc, OHCI_CTX_ASYNC_TX_RESPONSE,
4344 OHCI_SUBREG_ContextControlClear));
4345 if (intmask & OHCI_Int_ARRS)
4346 printf(" ARRS(0x%04x)",
4347 OHCI_ASYNC_DMA_READ(sc, OHCI_CTX_ASYNC_RX_RESPONSE,
4348 OHCI_SUBREG_ContextControlClear));
4349 if (intmask & OHCI_Int_ARRQ)
4350 printf(" ARRQ(0x%04x)",
4351 OHCI_ASYNC_DMA_READ(sc, OHCI_CTX_ASYNC_RX_REQUEST,
4352 OHCI_SUBREG_ContextControlClear));
4353 if (intmask & OHCI_Int_IsochRx)
4354 printf(" IsochRx(0x%08x)",
4355 OHCI_CSR_READ(sc, OHCI_REG_IsoRecvIntEventClear));
4356 if (intmask & OHCI_Int_IsochTx)
4357 printf(" IsochTx(0x%08x)",
4358 OHCI_CSR_READ(sc, OHCI_REG_IsoXmitIntEventClear));
4359 if (intmask & OHCI_Int_RQPkt)
4360 printf(" RQPkt(0x%04x)",
4361 OHCI_ASYNC_DMA_READ(sc, OHCI_CTX_ASYNC_RX_REQUEST,
4362 OHCI_SUBREG_ContextControlClear));
4363 if (intmask & OHCI_Int_RSPkt)
4364 printf(" RSPkt(0x%04x)",
4365 OHCI_ASYNC_DMA_READ(sc, OHCI_CTX_ASYNC_RX_RESPONSE,
4366 OHCI_SUBREG_ContextControlClear));
4367 printf("\n");
4368 }
4369
4370 static void
4371 fwohci_show_phypkt(struct fwohci_softc *sc, u_int32_t val)
4372 {
4373 u_int8_t key, phyid;
4374
4375 key = (val & 0xc0000000) >> 30;
4376 phyid = (val & 0x3f000000) >> 24;
4377 printf("%s: PHY packet from %d: ",
4378 sc->sc_sc1394.sc1394_dev.dv_xname, phyid);
4379 switch (key) {
4380 case 0:
4381 printf("PHY Config:");
4382 if (val & 0x00800000)
4383 printf(" ForceRoot");
4384 if (val & 0x00400000)
4385 printf(" Gap=%x", (val & 0x003f0000) >> 16);
4386 printf("\n");
4387 break;
4388 case 1:
4389 printf("Link-on\n");
4390 break;
4391 case 2:
4392 printf("SelfID:");
4393 if (val & 0x00800000) {
4394 printf(" #%d", (val & 0x00700000) >> 20);
4395 } else {
4396 if (val & 0x00400000)
4397 printf(" LinkActive");
4398 printf(" Gap=%x", (val & 0x003f0000) >> 16);
4399 printf(" Spd=S%d", 100 << ((val & 0x0000c000) >> 14));
4400 if (val & 0x00000800)
4401 printf(" Cont");
4402 if (val & 0x00000002)
4403 printf(" InitiateBusReset");
4404 }
4405 if (val & 0x00000001)
4406 printf(" +");
4407 printf("\n");
4408 break;
4409 default:
4410 printf("unknown: 0x%08x\n", val);
4411 break;
4412 }
4413 }
4414 #endif /* FW_DEBUG */
4415
4416 #if 0
4417 void fwohci_dumpreg(struct ieee1394_softc *, struct fwiso_regdump *);
4418
4419 void
4420 fwohci_dumpreg(struct ieee1394_softc *isc, struct fwiso_regdump *fr)
4421 {
4422 struct fwohci_softc *sc = (struct fwohci_softc *)isc;
4423 #if 0
4424 u_int32_t val;
4425
4426 printf("%s: dump reg\n", isc->sc1394_dev.dv_xname);
4427 printf("\tNodeID reg 0x%08x\n",
4428 OHCI_CSR_READ(sc, OHCI_REG_NodeId));
4429 val = OHCI_CSR_READ(sc, OHCI_REG_IsochronousCycleTimer);
4430 printf("\tIsoCounter 0x%08x, %d %d %d", val,
4431 (val >> 25) & 0xfe, (val >> 12) & 0x1fff, val & 0xfff);
4432 val = OHCI_CSR_READ(sc, OHCI_REG_IntMaskSet);
4433 printf(" IntMask 0x%08x, %s\n", val,
4434 val & OHCI_Int_IsochTx ? "isoTx" : "");
4435
4436 val = OHCI_SYNC_TX_DMA_READ(sc, 0, OHCI_SUBREG_ContextControlSet);
4437 printf("\tIT_CommandPtr 0x%08x ContextCtrl 0x%08x%s%s%s%s\n",
4438 OHCI_SYNC_TX_DMA_READ(sc, 0, OHCI_SUBREG_CommandPtr),
4439 val,
4440 val & OHCI_CTXCTL_RUN ? " run" : "",
4441 val & OHCI_CTXCTL_WAKE ? " wake" : "",
4442 val & OHCI_CTXCTL_DEAD ? " dead" : "",
4443 val & OHCI_CTXCTL_ACTIVE ? " active" : "");
4444 #endif
4445
4446 fr->fr_nodeid = OHCI_CSR_READ(sc, OHCI_REG_NodeId);
4447 fr->fr_isocounter = OHCI_CSR_READ(sc, OHCI_REG_IsochronousCycleTimer);
4448 fr->fr_intmask = OHCI_CSR_READ(sc, OHCI_REG_IntMaskSet);
4449 fr->fr_it0_commandptr = OHCI_SYNC_TX_DMA_READ(sc, 0, OHCI_SUBREG_CommandPtr);
4450 fr->fr_it0_contextctrl = OHCI_SYNC_TX_DMA_READ(sc, 0, OHCI_SUBREG_ContextControlSet);
4451
4452
4453 }
4454 #endif
4455
4456
4457 u_int16_t
4458 fwohci_cycletimer(struct fwohci_softc *sc)
4459 {
4460 u_int32_t reg;
4461
4462 reg = OHCI_CSR_READ(sc, OHCI_REG_IsochronousCycleTimer);
4463
4464 return (reg >> 12)&0xffff;
4465 }
4466
4467
4468 u_int16_t
4469 fwohci_it_cycletimer(ieee1394_it_tag_t it)
4470 {
4471 struct fwohci_it_ctx *itc = (struct fwohci_it_ctx *)it;
4472
4473 return fwohci_cycletimer(itc->itc_sc);
4474 }
4475
4476
4477
4478
4479
4480 /*
4481 * return value: if positive value, number of DMA buffer segments. If
4482 * negative value, error happens. Never zero.
4483 */
4484 static int
4485 fwohci_misc_dmabuf_alloc(bus_dma_tag_t dmat, int dsize, int segno,
4486 bus_dma_segment_t *segp, bus_dmamap_t *dmapp, void **mapp,
4487 const char *xname)
4488 {
4489 int nsegs;
4490 int error;
4491
4492 printf("fwohci_misc_desc_alloc: dsize %d segno %d\n", dsize, segno);
4493
4494 if ((error = bus_dmamem_alloc(dmat, dsize, PAGE_SIZE, 0,
4495 segp, segno, &nsegs, 0)) != 0) {
4496 printf("%s: unable to allocate descriptor buffer, error = %d\n",
4497 xname, error);
4498 goto fail_0;
4499 }
4500
4501 DPRINTF(("fwohci_misc_desc_alloc: %d segment[s]\n", nsegs));
4502
4503 if ((error = bus_dmamem_map(dmat, segp, nsegs, dsize, (caddr_t *)mapp,
4504 BUS_DMA_COHERENT | BUS_DMA_WAITOK)) != 0) {
4505 printf("%s: unable to map descriptor buffer, error = %d\n",
4506 xname, error);
4507 goto fail_1;
4508 }
4509
4510 DPRINTF(("fwohci_misc_desc_alloc: %s map ok\n", xname));
4511
4512 #ifdef FWOHCI_DEBUG
4513 {
4514 int loop;
4515
4516 for (loop = 0; loop < nsegs; ++loop) {
4517 printf("\t%.2d: 0x%lx - 0x%lx\n", loop,
4518 (long)segp[loop].ds_addr,
4519 (long)segp[loop].ds_addr + segp[loop].ds_len - 1);
4520 }
4521 }
4522 #endif /* FWOHCI_DEBUG */
4523
4524 if ((error = bus_dmamap_create(dmat, dsize, nsegs, dsize,
4525 0, BUS_DMA_WAITOK, dmapp)) != 0) {
4526 printf("%s: unable to create descriptor buffer DMA map, "
4527 "error = %d\n", xname, error);
4528 goto fail_2;
4529 }
4530
4531 DPRINTF(("fwohci_misc_dmabuf_alloc: bus_dmamem_create success\n"));
4532
4533 if ((error = bus_dmamap_load(dmat, *dmapp, *mapp, dsize, NULL,
4534 BUS_DMA_WAITOK)) != 0) {
4535 printf("%s: unable to load descriptor buffer DMA map, "
4536 "error = %d\n", xname, error);
4537 goto fail_3;
4538 }
4539
4540 DPRINTF(("fwohci_it_desc_alloc: bus_dmamem_load success\n"));
4541
4542 return nsegs;
4543
4544 fail_3:
4545 bus_dmamap_destroy(dmat, *dmapp);
4546 fail_2:
4547 bus_dmamem_unmap(dmat, *mapp, dsize);
4548 fail_1:
4549 bus_dmamem_free(dmat, segp, nsegs);
4550 fail_0:
4551 return error;
4552 }
4553
4554
4555 static void
4556 fwohci_misc_dmabuf_free(bus_dma_tag_t dmat, int dsize, int nsegs,
4557 bus_dma_segment_t *segp, bus_dmamap_t *dmapp, caddr_t map)
4558 {
4559 bus_dmamap_destroy(dmat, *dmapp);
4560 bus_dmamem_unmap(dmat, map, dsize);
4561 bus_dmamem_free(dmat, segp, nsegs);
4562 }
4563
4564
4565
4566
4567 /*
4568 * Isochronous receive service
4569 */
4570
4571 /*
4572 * static struct fwohci_ir_ctx *
4573 * fwohci_ir_ctx_construct(struct fwohci_softc *sc, int no, int ch, int tagbm,
4574 * int bufnum, int maxsize, int flags)
4575 */
4576 static struct fwohci_ir_ctx *
4577 fwohci_ir_ctx_construct(struct fwohci_softc *sc, int no, int ch, int tagbm,
4578 int bufnum, int maxsize, int flags)
4579 {
4580 struct fwohci_ir_ctx *irc;
4581 int i;
4582
4583 printf("fwohci_ir_construct(%s, %d, %d, %x, %d, %d\n",
4584 sc->sc_sc1394.sc1394_dev.dv_xname, no, ch, tagbm, bufnum, maxsize);
4585
4586 if ((irc = malloc(sizeof(*irc), M_DEVBUF, M_WAITOK|M_ZERO)) == NULL) {
4587 return NULL;
4588 }
4589
4590 irc->irc_sc = sc;
4591
4592 irc->irc_num = no;
4593 irc->irc_status = 0;
4594
4595 irc->irc_channel = ch;
4596 irc->irc_tagbm = tagbm;
4597
4598 irc->irc_desc_num = bufnum;
4599
4600 irc->irc_flags = flags;
4601
4602 /* add header */
4603 maxsize += 8;
4604 /* rounding up */
4605 for (i = 32; i < maxsize; i <<= 1);
4606 printf("fwohci_ir_ctx_construct: maxsize %d => %d\n",
4607 maxsize, i);
4608
4609 maxsize = i;
4610
4611 irc->irc_maxsize = maxsize;
4612 irc->irc_buf_totalsize = bufnum * maxsize;
4613
4614 if (fwohci_ir_buf_setup(irc)) {
4615 /* cannot alloc descriptor */
4616 return NULL;
4617 }
4618
4619 irc->irc_readtop = irc->irc_desc_map;
4620 irc->irc_writeend = irc->irc_desc_map + irc->irc_desc_num - 1;
4621 irc->irc_savedbranch = irc->irc_writeend->fd_branch;
4622 irc->irc_writeend->fd_branch = 0;
4623 /* sync */
4624
4625 if (fwohci_ir_stop(irc) || fwohci_ir_init(irc)) {
4626 return NULL;
4627 }
4628
4629 irc->irc_status |= IRC_STATUS_READY;
4630
4631 return irc;
4632 }
4633
4634
4635
4636 /*
4637 * static void fwohci_ir_ctx_destruct(struct fwohci_ir_ctx *irc)
4638 *
4639 * This function release all DMA buffers and itself.
4640 */
4641 static void
4642 fwohci_ir_ctx_destruct(struct fwohci_ir_ctx *irc)
4643 {
4644 fwohci_misc_dmabuf_free(irc->irc_sc->sc_dmat, irc->irc_buf_totalsize,
4645 irc->irc_buf_nsegs, irc->irc_buf_segs,
4646 &irc->irc_buf_dmamap, (caddr_t)irc->irc_buf);
4647 fwohci_misc_dmabuf_free(irc->irc_sc->sc_dmat,
4648 irc->irc_desc_size,
4649 irc->irc_desc_nsegs, &irc->irc_desc_seg,
4650 &irc->irc_desc_dmamap, (caddr_t)irc->irc_desc_map);
4651
4652 free(irc, M_DEVBUF);
4653 }
4654
4655
4656
4657
4658 /*
4659 * static int fwohci_ir_buf_setup(struct fwohci_ir_ctx *irc)
4660 *
4661 * Allocates descriptors for context DMA dedicated for
4662 * isochronous receive.
4663 *
4664 * This function returns 0 (zero) if it succeeds. Otherwise,
4665 * return negative value.
4666 */
4667 static int
4668 fwohci_ir_buf_setup(struct fwohci_ir_ctx *irc)
4669 {
4670 int nsegs;
4671 struct fwohci_desc *fd;
4672 u_int32_t branch;
4673 int bufno = 0; /* DMA segment */
4674 bus_size_t bufused = 0; /* offset in a DMA segment */
4675
4676 irc->irc_desc_size = irc->irc_desc_num * sizeof(struct fwohci_desc);
4677
4678 nsegs = fwohci_misc_dmabuf_alloc(irc->irc_sc->sc_dmat,
4679 irc->irc_desc_size, 1, &irc->irc_desc_seg, &irc->irc_desc_dmamap,
4680 (void **)&irc->irc_desc_map,
4681 irc->irc_sc->sc_sc1394.sc1394_dev.dv_xname);
4682
4683 if (nsegs < 0) {
4684 printf("fwohci_ir_buf_alloc: cannot get descriptor\n");
4685 return -1;
4686 }
4687 irc->irc_desc_nsegs = nsegs;
4688
4689 nsegs = fwohci_misc_dmabuf_alloc(irc->irc_sc->sc_dmat,
4690 irc->irc_buf_totalsize, 16, irc->irc_buf_segs,
4691 &irc->irc_buf_dmamap, (void **)&irc->irc_buf,
4692 irc->irc_sc->sc_sc1394.sc1394_dev.dv_xname);
4693
4694 if (nsegs < 0) {
4695 printf("fwohci_ir_buf_alloc: cannot get DMA buffer\n");
4696 fwohci_misc_dmabuf_free(irc->irc_sc->sc_dmat,
4697 irc->irc_desc_size,
4698 irc->irc_desc_nsegs, &irc->irc_desc_seg,
4699 &irc->irc_desc_dmamap, (caddr_t)irc->irc_desc_map);
4700 return -1;
4701 }
4702 irc->irc_buf_nsegs = nsegs;
4703
4704 branch = irc->irc_desc_dmamap->dm_segs[0].ds_addr
4705 + sizeof(struct fwohci_desc);
4706 bufno = 0;
4707 bufused = 0;
4708
4709 for (fd = irc->irc_desc_map;
4710 fd < irc->irc_desc_map + irc->irc_desc_num; ++fd) {
4711 fd->fd_flags = OHCI_DESC_INPUT | OHCI_DESC_LAST
4712 | OHCI_DESC_STATUS | OHCI_DESC_BRANCH;
4713 if (irc->irc_flags & IEEE1394_IR_SHORTDELAY) {
4714 fd->fd_flags |= OHCI_DESC_INTR_ALWAYS;
4715 }
4716 #if 0
4717 if ((fd - irc->irc_desc_map) % 64 == 0) {
4718 fd->fd_flags |= OHCI_DESC_INTR_ALWAYS;
4719 }
4720 #endif
4721 fd->fd_reqcount = irc->irc_maxsize;
4722 fd->fd_status = fd->fd_rescount = 0;
4723
4724 fd->fd_branch = branch | 0x01;
4725 branch += sizeof(struct fwohci_desc);
4726
4727 /* physical addr to data? */
4728 fd->fd_data =
4729 (u_int32_t)((irc->irc_buf_segs[bufno].ds_addr + bufused));
4730 bufused += irc->irc_maxsize;
4731 if (bufused > irc->irc_buf_segs[bufno].ds_len) {
4732 bufused = 0;
4733 if (++bufno == irc->irc_buf_nsegs) {
4734 /* fail */
4735 printf("fwohci_ir_buf_setup fail\n");
4736
4737 fwohci_misc_dmabuf_free(irc->irc_sc->sc_dmat,
4738 irc->irc_desc_size,
4739 irc->irc_desc_nsegs, &irc->irc_desc_seg,
4740 &irc->irc_desc_dmamap,
4741 (caddr_t)irc->irc_desc_map);
4742 fwohci_misc_dmabuf_free(irc->irc_sc->sc_dmat,
4743 irc->irc_buf_totalsize,
4744 irc->irc_buf_nsegs, irc->irc_buf_segs,
4745 &irc->irc_buf_dmamap,
4746 (caddr_t)irc->irc_buf);
4747 return -1;
4748 }
4749 }
4750
4751 #ifdef FWOHCI_DEBUG
4752 if (fd < irc->irc_desc_map + 4
4753 || (fd > irc->irc_desc_map + irc->irc_desc_num - 4)) {
4754 printf("fwohci_ir_buf_setup: desc %d %p buf %08x"
4755 " size %d branch %08x\n",
4756 fd - irc->irc_desc_map, fd, fd->fd_data,
4757 fd->fd_reqcount, fd->fd_branch);
4758 }
4759 #endif /* FWOHCI_DEBUG */
4760 }
4761
4762 --fd;
4763 fd->fd_branch = irc->irc_desc_dmamap->dm_segs[0].ds_addr | 1;
4764 DPRINTF(("fwohci_ir_buf_setup: desc %d %p buf %08x size %d branch %08x\n",
4765 (int)(fd - irc->irc_desc_map), fd, fd->fd_data, fd->fd_reqcount,
4766 fd->fd_branch));
4767
4768 return 0;
4769 }
4770
4771
4772
4773 /*
4774 * static void fwohci_ir_init(struct fwohci_ir_ctx *irc)
4775 *
4776 * This function initialise DMA engine.
4777 */
4778 static int
4779 fwohci_ir_init(struct fwohci_ir_ctx *irc)
4780 {
4781 struct fwohci_softc *sc = irc->irc_sc;
4782 int n = irc->irc_num;
4783 u_int32_t ctxmatch;
4784
4785 ctxmatch = irc->irc_channel & IEEE1394_ISO_CHANNEL_MASK;
4786
4787 if (irc->irc_channel & IEEE1394_ISO_CHANNEL_ANY) {
4788 OHCI_SYNC_RX_DMA_WRITE(sc, n,
4789 OHCI_SUBREG_ContextControlSet,
4790 OHCI_CTXCTL_RX_MULTI_CHAN_MODE);
4791
4792 /* Receive all the isochronous channels */
4793 OHCI_CSR_WRITE(sc, OHCI_REG_IRMultiChanMaskHiSet, 0xffffffff);
4794 OHCI_CSR_WRITE(sc, OHCI_REG_IRMultiChanMaskLoSet, 0xffffffff);
4795 ctxmatch = 0;
4796 }
4797
4798 ctxmatch |= ((irc->irc_tagbm & 0x0f) << OHCI_CTXMATCH_TAG_BITPOS);
4799 OHCI_SYNC_RX_DMA_WRITE(sc, n, OHCI_SUBREG_ContextMatch, ctxmatch);
4800
4801 OHCI_SYNC_RX_DMA_WRITE(sc, n, OHCI_SUBREG_ContextControlClear,
4802 OHCI_CTXCTL_RX_BUFFER_FILL | OHCI_CTXCTL_RX_CYCLE_MATCH_ENABLE);
4803 OHCI_SYNC_RX_DMA_WRITE(sc, n, OHCI_SUBREG_ContextControlSet,
4804 OHCI_CTXCTL_RX_ISOCH_HEADER);
4805
4806 printf("fwohci_ir_init\n");
4807
4808 return 0;
4809 }
4810
4811
4812 /*
4813 * static int fwohci_ir_start(struct fwohci_ir_ctx *irc)
4814 *
4815 * This function starts DMA engine. This function must call
4816 * after fwohci_ir_init() and active bit of context control
4817 * register negated. This function will not check it.
4818 */
4819 static int
4820 fwohci_ir_start(struct fwohci_ir_ctx *irc)
4821 {
4822 struct fwohci_softc *sc = irc->irc_sc;
4823 int startidx = irc->irc_readtop - irc->irc_desc_map;
4824 u_int32_t startaddr;
4825
4826 startaddr = irc->irc_desc_dmamap->dm_segs[0].ds_addr
4827 + sizeof(struct fwohci_desc)*startidx;
4828
4829 OHCI_SYNC_RX_DMA_WRITE(sc, irc->irc_num, OHCI_SUBREG_CommandPtr,
4830 startaddr | 1);
4831 OHCI_CSR_WRITE(sc, OHCI_REG_IsoRecvIntEventClear,
4832 (1 << irc->irc_num));
4833 OHCI_SYNC_RX_DMA_WRITE(sc, irc->irc_num,
4834 OHCI_SUBREG_ContextControlSet, OHCI_CTXCTL_RUN);
4835
4836 printf("fwohci_ir_start: CmdPtr %08x Ctx %08x startidx %d\n",
4837 OHCI_SYNC_RX_DMA_READ(sc, irc->irc_num, OHCI_SUBREG_CommandPtr),
4838 OHCI_SYNC_RX_DMA_READ(sc, irc->irc_num, OHCI_SUBREG_ContextControlSet),
4839 startidx);
4840
4841 irc->irc_status &= ~IRC_STATUS_READY;
4842 irc->irc_status |= IRC_STATUS_RUN;
4843
4844 if ((irc->irc_flags & IEEE1394_IR_TRIGGER_CIP_SYNC) == 0) {
4845 irc->irc_status |= IRC_STATUS_RECEIVE;
4846 }
4847
4848 return 0;
4849 }
4850
4851
4852
4853 /*
4854 * static int fwohci_ir_stop(struct fwohci_ir_ctx *irc)
4855 *
4856 * This function stops DMA engine.
4857 */
4858 static int
4859 fwohci_ir_stop(struct fwohci_ir_ctx *irc)
4860 {
4861 struct fwohci_softc *sc = irc->irc_sc;
4862 int i;
4863
4864 printf("fwohci_ir_stop\n");
4865
4866 OHCI_SYNC_RX_DMA_WRITE(sc, irc->irc_num,
4867 OHCI_SUBREG_ContextControlClear,
4868 OHCI_CTXCTL_RUN | OHCI_CTXCTL_DEAD);
4869
4870 i = 0;
4871 while (OHCI_SYNC_RX_DMA_READ(sc, irc->irc_num,
4872 OHCI_SUBREG_ContextControlSet) & OHCI_CTXCTL_ACTIVE) {
4873 #if 0
4874 u_int32_t reg = OHCI_SYNC_RX_DMA_READ(sc, irc->irc_num,
4875 OHCI_SUBREG_ContextControlClear);
4876
4877 printf("%s: %d intr IR_CommandPtr 0x%08x "
4878 "ContextCtrl 0x%08x%s%s%s%s\n",
4879 sc->sc_sc1394.sc1394_dev.dv_xname, i,
4880 OHCI_SYNC_RX_DMA_READ(sc, irc->irc_num,
4881 OHCI_SUBREG_CommandPtr),
4882 reg,
4883 reg & OHCI_CTXCTL_RUN ? " run" : "",
4884 reg & OHCI_CTXCTL_WAKE ? " wake" : "",
4885 reg & OHCI_CTXCTL_DEAD ? " dead" : "",
4886 reg & OHCI_CTXCTL_ACTIVE ? " active" : "");
4887 #endif
4888 if (i > 20) {
4889 printf("fwohci_ir_stop: %s does not stop\n",
4890 sc->sc_sc1394.sc1394_dev.dv_xname);
4891 return 1;
4892 }
4893 DELAY(10);
4894 }
4895
4896 irc->irc_status &= ~IRC_STATUS_RUN;
4897
4898 return 0;
4899 }
4900
4901
4902
4903
4904
4905
4906 static void
4907 fwohci_ir_intr(struct fwohci_softc *sc, struct fwohci_ir_ctx *irc)
4908 {
4909 const char *xname = sc->sc_sc1394.sc1394_dev.dv_xname;
4910 u_int32_t cmd, ctx;
4911 int idx;
4912 struct fwohci_desc *fd;
4913
4914 sc->sc_isocnt.ev_count++;
4915
4916 if (!(irc->irc_status & IRC_STATUS_RUN)) {
4917 printf("fwohci_ir_intr: not running\n");
4918 return;
4919 }
4920
4921 bus_dmamap_sync(sc->sc_dmat, irc->irc_desc_dmamap,
4922 0, irc->irc_desc_size, BUS_DMASYNC_PREREAD);
4923
4924 ctx = OHCI_SYNC_RX_DMA_READ(sc, irc->irc_num,
4925 OHCI_SUBREG_ContextControlSet);
4926
4927 cmd = OHCI_SYNC_RX_DMA_READ(sc, irc->irc_num,
4928 OHCI_SUBREG_CommandPtr);
4929
4930 #define OHCI_CTXCTL_RUNNING (OHCI_CTXCTL_RUN|OHCI_CTXCTL_ACTIVE)
4931 #define OHCI_CTXCTL_RUNNING_MASK (OHCI_CTXCTL_RUNNING|OHCI_CTXCTL_DEAD)
4932
4933 idx = (cmd & 0xfffffff8) - (u_int32_t)irc->irc_desc_dmamap->dm_segs[0].ds_addr;
4934 idx /= sizeof(struct fwohci_desc);
4935
4936 if ((ctx & OHCI_CTXCTL_RUNNING_MASK) == OHCI_CTXCTL_RUNNING) {
4937 if (irc->irc_waitchan != NULL) {
4938 DPRINTF(("fwohci_ir_intr: wakeup "
4939 "ctx %d CmdPtr %08x Ctxctl %08x idx %d\n",
4940 irc->irc_num, cmd, ctx, idx));
4941 #ifdef FWOHCI_WAIT_DEBUG
4942 irc->irc_cycle[1] = fwohci_cycletimer(irc->irc_sc);
4943 #endif
4944 wakeup((void *)irc->irc_waitchan);
4945 }
4946 selwakeup(&irc->irc_sel);
4947 return;
4948 }
4949
4950 fd = irc->irc_desc_map + idx;
4951
4952 printf("fwohci_ir_intr: %s error "
4953 "ctx %d CmdPtr %08x Ctxctl %08x idx %d\n", xname,
4954 irc->irc_num, cmd, ctx, idx);
4955 printf("\tfd flag %x branch %x stat %x rescnt %x total pkt %d\n",
4956 fd->fd_flags, fd->fd_branch, fd->fd_status,fd->fd_rescount,
4957 irc->irc_pktcount);
4958 }
4959
4960
4961
4962
4963 /*
4964 * static int fwohci_ir_ctx_packetnum(struct fwohci_ir_ctx *irc)
4965 *
4966 * This function obtains the lenth of descriptors with data.
4967 */
4968 static int
4969 fwohci_ir_ctx_packetnum(struct fwohci_ir_ctx *irc)
4970 {
4971 struct fwohci_desc *fd = irc->irc_readtop;
4972 int i = 0;
4973
4974 /* XXX SYNC */
4975 while (fd->fd_status != 0) {
4976 if (fd == irc->irc_readtop && i > 0) {
4977 printf("descriptor filled %d at %d\n", i,
4978 irc->irc_pktcount);
4979 #ifdef FWOHCI_WAIT_DEBUG
4980 irc->irc_cycle[2] = fwohci_cycletimer(irc->irc_sc);
4981 printf("cycletimer %d:%d %d:%d %d:%d\n",
4982 irc->irc_cycle[0]>>13, irc->irc_cycle[0]&0x1fff,
4983 irc->irc_cycle[1]>>13, irc->irc_cycle[1]&0x1fff,
4984 irc->irc_cycle[2]>>13, irc->irc_cycle[2]&0x1fff);
4985 #endif
4986
4987 break;
4988 }
4989
4990 ++i;
4991 ++fd;
4992 if (fd == irc->irc_desc_map + irc->irc_desc_num) {
4993 fd = irc->irc_desc_map;
4994 }
4995
4996 }
4997
4998 return i;
4999 }
5000
5001
5002
5003
5004 /*
5005 * int fwohci_ir_read(struct device *dev, ieee1394_ir_tag_t tag,
5006 * struct uio *uio, int headoffs, int flags)
5007 *
5008 * This function reads data from fwohci's isochronous receive
5009 * buffer.
5010 */
5011 int
5012 fwohci_ir_read(struct device *dev, ieee1394_ir_tag_t tag, struct uio *uio,
5013 int headoffs, int flags)
5014 {
5015 struct fwohci_ir_ctx *irc = (struct fwohci_ir_ctx *)tag;
5016 int packetnum;
5017 int copylen, hdrshim, fwisohdrsiz;
5018 struct fwohci_desc *fd, *fdprev = NULL; /* XXX fdprev use is suspect */
5019 u_int8_t *data;
5020 int status = 0;
5021 u_int32_t tmpbranch;
5022 int pktcount_prev = irc->irc_pktcount;
5023 #ifdef FW_DEBUG
5024 int totalread = 0;
5025 #endif
5026
5027 if (irc->irc_status & IRC_STATUS_READY) {
5028 printf("fwohci_ir_read: starting iso read engine\n");
5029 fwohci_ir_start(irc);
5030 }
5031
5032 packetnum = fwohci_ir_ctx_packetnum(irc);
5033
5034 DPRINTF(("fwohci_ir_read resid %lu DMA buf %d\n",
5035 (unsigned long)uio->uio_resid, packetnum));
5036
5037 if (packetnum == 0) {
5038 return EAGAIN;
5039 }
5040
5041 #ifdef USEDRAIN
5042 if (packetnum > irc->irc_desc_num - irc->irc_desc_num/4) {
5043 packetnum -= fwohci_ir_ctx_drain(irc);
5044 if (irc->irc_pktcount != 0) {
5045 printf("fwohci_ir_read overrun %d\n",
5046 irc->irc_pktcount);
5047 }
5048 }
5049 #endif /* USEDRAIN */
5050
5051 fd = irc->irc_readtop;
5052
5053 #if 0
5054 if ((irc->irc_status & IRC_STATUS_RECEIVE) == 0
5055 && irc->irc_flags & IEEE1394_IR_TRIGGER_CIP_SYNC) {
5056 unsigned int s;
5057 int i = 0;
5058
5059 fdprev = fd;
5060 while (fd->fd_status != 0) {
5061 s = data[14] << 8;
5062 s |= data[15];
5063
5064 if (s != 0x0000ffffu) {
5065 DPRINTF(("find header %x at %d\n",
5066 s, irc->irc_pktcount));
5067 irc->irc_status |= IRC_STATUS_RECEIVE;
5068 break;
5069 }
5070
5071 fd->fd_rescount = 0;
5072 fd->fd_status = 0;
5073
5074 fdprev = fd;
5075 if (++fd == irc->irc_desc_map + irc->irc_desc_num) {
5076 fd = irc->irc_desc_map;
5077 data = irc->irc_buf;
5078 }
5079 ++i;
5080 }
5081
5082 /* XXX SYNC */
5083 if (i > 0) {
5084 tmpbranch = fdprev->fd_branch;
5085 fdprev->fd_branch = 0;
5086 irc->irc_writeend->fd_branch = irc->irc_savedbranch;
5087 irc->irc_writeend = fdprev;
5088 irc->irc_savedbranch = tmpbranch;
5089 }
5090 /* XXX SYNC */
5091
5092 if (fd->fd_status == 0) {
5093 return EAGAIN;
5094 }
5095 }
5096 #endif
5097
5098 hdrshim = 8;
5099 fwisohdrsiz = 0;
5100 data = irc->irc_buf + (fd - irc->irc_desc_map) * irc->irc_maxsize;
5101 if (irc->irc_flags & IEEE1394_IR_NEEDHEADER) {
5102 fwisohdrsiz = sizeof(struct fwiso_header);
5103 }
5104
5105 while (fd->fd_status != 0 &&
5106 (copylen = fd->fd_reqcount - fd->fd_rescount - hdrshim - headoffs)
5107 + fwisohdrsiz <= uio->uio_resid) {
5108
5109 DPRINTF(("pkt %04x:%04x uiomove %p, %d\n",
5110 fd->fd_status, fd->fd_rescount,
5111 (void *)(data + 8 + headoffs), copylen));
5112 if ((irc->irc_status & IRC_STATUS_RECEIVE) == 0) {
5113 DPRINTF(("[%d]", copylen));
5114 if (irc->irc_pktcount > 1000) {
5115 printf("no header found\n");
5116 status = EIO;
5117 break; /* XXX */
5118 }
5119 } else {
5120 DPRINTF(("<%d>", copylen));
5121 }
5122
5123 if ((irc->irc_status & IRC_STATUS_RECEIVE) == 0
5124 && irc->irc_flags & IEEE1394_IR_TRIGGER_CIP_SYNC
5125 && copylen > 0) {
5126 unsigned int s;
5127
5128 s = data[14] << 8;
5129 s |= data[15];
5130
5131 if (s != 0x0000ffffu) {
5132 DPRINTF(("find header %x at %d\n",
5133 s, irc->irc_pktcount));
5134 irc->irc_status |= IRC_STATUS_RECEIVE;
5135 }
5136 }
5137
5138 if (irc->irc_status & IRC_STATUS_RECEIVE) {
5139 if (copylen > 0) {
5140 if (irc->irc_flags & IEEE1394_IR_NEEDHEADER) {
5141 struct fwiso_header fh;
5142
5143 fh.fh_timestamp = htonl((*(u_int32_t *)data) & 0xffff);
5144 fh.fh_speed = htonl((fd->fd_status >> 5)& 0x00000007);
5145 fh.fh_capture_size = htonl(copylen + 4);
5146 fh.fh_iso_header = htonl(*(u_int32_t *)(data + 4));
5147 status = uiomove((void *)&fh,
5148 sizeof(fh), uio);
5149 if (status != 0) {
5150 /* An error happens */
5151 printf("uio error in hdr\n");
5152 break;
5153 }
5154 }
5155 status = uiomove((void *)(data + 8 + headoffs),
5156 copylen, uio);
5157 if (status != 0) {
5158 /* An error happens */
5159 printf("uio error\n");
5160 break;
5161 }
5162 #ifdef FW_DEBUG
5163 totalread += copylen;
5164 #endif
5165 }
5166 }
5167
5168 fd->fd_rescount = 0;
5169 fd->fd_status = 0;
5170
5171 #if 0
5172 /* advance writeend pointer and fill branch */
5173
5174 tmpbranch = fd->fd_branch;
5175 fd->fd_branch = 0;
5176 irc->irc_writeend->fd_branch = irc->irc_savedbranch;
5177 irc->irc_writeend = fd;
5178 irc->irc_savedbranch = tmpbranch;
5179 #endif
5180 fdprev = fd;
5181
5182 data += irc->irc_maxsize;
5183 if (++fd == irc->irc_desc_map + irc->irc_desc_num) {
5184 fd = irc->irc_desc_map;
5185 data = irc->irc_buf;
5186 }
5187 ++irc->irc_pktcount;
5188 }
5189
5190 #if 1
5191 if (irc->irc_pktcount != pktcount_prev) {
5192 /* XXX SYNC */
5193 tmpbranch = fdprev->fd_branch;
5194 fdprev->fd_branch = 0;
5195 irc->irc_writeend->fd_branch = irc->irc_savedbranch;
5196 irc->irc_writeend = fdprev;
5197 irc->irc_savedbranch = tmpbranch;
5198 /* XXX SYNC */
5199 }
5200 #endif
5201
5202 if (!(OHCI_SYNC_RX_DMA_READ(irc->irc_sc, irc->irc_num,
5203 OHCI_SUBREG_ContextControlClear) & OHCI_CTXCTL_ACTIVE)) {
5204 /* do wake */
5205 OHCI_SYNC_RX_DMA_WRITE(irc->irc_sc, irc->irc_num,
5206 OHCI_SUBREG_ContextControlSet, OHCI_CTXCTL_WAKE);
5207 }
5208
5209 if (packetnum > irc->irc_maxqueuelen) {
5210 irc->irc_maxqueuelen = packetnum;
5211 irc->irc_maxqueuepos = irc->irc_pktcount;
5212 }
5213
5214 if (irc->irc_pktcount == pktcount_prev) {
5215 #if 0
5216 printf("fwohci_ir_read: process 0 packet, total %d\n",
5217 irc->irc_pktcount);
5218 if (++pktfail > 30) {
5219 return 0;
5220 }
5221 #endif
5222 return EAGAIN;
5223 }
5224
5225 irc->irc_readtop = fd;
5226
5227 DPRINTF(("fwochi_ir_read: process %d packet, total %d\n",
5228 totalread, irc->irc_pktcount));
5229
5230 return status;
5231 }
5232
5233
5234
5235
5236 /*
5237 * int fwohci_ir_wait(struct device *dev, ieee1394_ir_tag_t tag,
5238 * void *wchan, char *name)
5239 *
5240 * This function waits till new data comes.
5241 */
5242 int
5243 fwohci_ir_wait(struct device *dev, ieee1394_ir_tag_t tag, void *wchan, char *name)
5244 {
5245 struct fwohci_ir_ctx *irc = (struct fwohci_ir_ctx *)tag;
5246 struct fwohci_desc *fd;
5247 int pktnum;
5248 int stat;
5249
5250 if ((pktnum = fwohci_ir_ctx_packetnum(irc)) > 4) {
5251 DPRINTF(("fwohci_ir_wait enough data %d\n", pktnum));
5252 return 0;
5253 }
5254
5255 fd = irc->irc_readtop + 32;
5256 if (fd >= irc->irc_desc_map + irc->irc_desc_num) {
5257 fd -= irc->irc_desc_num;
5258 }
5259
5260 irc->irc_waitchan = wchan;
5261 if ((irc->irc_flags & IEEE1394_IR_SHORTDELAY) == 0) {
5262 fd->fd_flags |= OHCI_DESC_INTR_ALWAYS;
5263 DPRINTF(("fwohci_ir_wait stops %d set intr %d\n",
5264 (int)(irc->irc_readtop - irc->irc_desc_map),
5265 (int)(fd - irc->irc_desc_map)));
5266 /* XXX SYNC */
5267 }
5268
5269 #ifdef FWOHCI_WAIT_DEBUG
5270 irc->irc_cycle[0] = fwohci_cycletimer(irc->irc_sc);
5271 #endif
5272
5273 irc->irc_status |= IRC_STATUS_SLEEPING;
5274 if ((stat = tsleep(wchan, PCATCH|PRIBIO, name, hz*10)) != 0) {
5275 irc->irc_waitchan = NULL;
5276 fd->fd_flags &= ~OHCI_DESC_INTR_ALWAYS;
5277 if (stat == EWOULDBLOCK) {
5278 printf("fwohci_ir_wait: timeout\n");
5279 return EIO;
5280 } else {
5281 return EINTR;
5282 }
5283 }
5284
5285 irc->irc_waitchan = NULL;
5286 if ((irc->irc_flags & IEEE1394_IR_SHORTDELAY) == 0) {
5287 fd->fd_flags &= ~OHCI_DESC_INTR_ALWAYS;
5288 /* XXX SYNC */
5289 }
5290
5291 DPRINTF(("fwohci_ir_wait: wakeup\n"));
5292
5293 return 0;
5294 }
5295
5296
5297
5298
5299 /*
5300 * int fwohci_ir_select(struct device *dev, ieee1394_ir_tag_t tag,
5301 * struct proc *p)
5302 *
5303 * This function returns the number of packets in queue.
5304 */
5305 int
5306 fwohci_ir_select(struct device *dev, ieee1394_ir_tag_t tag, struct proc *p)
5307 {
5308 struct fwohci_ir_ctx *irc = (struct fwohci_ir_ctx *)tag;
5309 int pktnum;
5310
5311 if (irc->irc_status & IRC_STATUS_READY) {
5312 printf("fwohci_ir_select: starting iso read engine\n");
5313 fwohci_ir_start(irc);
5314 }
5315
5316 if ((pktnum = fwohci_ir_ctx_packetnum(irc)) == 0) {
5317 selrecord(p, &irc->irc_sel);
5318 }
5319
5320 return pktnum;
5321 }
5322
5323
5324
5325 #ifdef USEDRAIN
5326 /*
5327 * int fwohci_ir_ctx_drain(struct fwohci_ir_ctx *irc)
5328 *
5329 * This function will drain all the packets in receive DMA
5330 * buffer.
5331 */
5332 static int
5333 fwohci_ir_ctx_drain(struct fwohci_ir_ctx *irc)
5334 {
5335 struct fwohci_desc *fd = irc->irc_readtop;
5336 u_int32_t reg;
5337 int count = 0;
5338
5339 reg = OHCI_SYNC_RX_DMA_READ(irc->irc_sc, irc->irc_num,
5340 OHCI_SUBREG_ContextControlClear);
5341
5342 printf("fwohci_ir_ctx_drain ctx%s%s%s%s\n",
5343 reg & OHCI_CTXCTL_RUN ? " run" : "",
5344 reg & OHCI_CTXCTL_WAKE ? " wake" : "",
5345 reg & OHCI_CTXCTL_DEAD ? " dead" : "",
5346 reg & OHCI_CTXCTL_ACTIVE ? " active" : "");
5347
5348 if ((reg & OHCI_CTXCTL_RUNNING_MASK) == OHCI_CTXCTL_RUN) {
5349 /* DMA engine is stopped */
5350 u_int32_t startadr;
5351
5352 for (fd = irc->irc_desc_map;
5353 fd < irc->irc_desc_map + irc->irc_desc_num;
5354 ++fd) {
5355 fd->fd_status = 0;
5356 }
5357
5358 /* Restore branch addr of the last descriptor */
5359 irc->irc_writeend->fd_branch = irc->irc_savedbranch;
5360
5361 irc->irc_readtop = irc->irc_desc_map;
5362 irc->irc_writeend = irc->irc_desc_map + irc->irc_desc_num - 1;
5363 irc->irc_savedbranch = irc->irc_writeend->fd_branch;
5364 irc->irc_writeend->fd_branch = 0;
5365
5366 count = irc->irc_desc_num;
5367
5368 OHCI_SYNC_RX_DMA_WRITE(irc->irc_sc, irc->irc_num,
5369 OHCI_SUBREG_ContextControlClear,
5370 OHCI_CTXCTL_RUN | OHCI_CTXCTL_DEAD);
5371
5372 startadr = (u_int32_t)irc->irc_desc_dmamap->dm_segs[0].ds_addr;
5373
5374 printf("fwohci_ir_ctx_drain: remove %d pkts\n", count);
5375
5376 OHCI_SYNC_RX_DMA_WRITE(irc->irc_sc, irc->irc_num,
5377 OHCI_SUBREG_CommandPtr, startadr | 1);
5378
5379 OHCI_SYNC_RX_DMA_WRITE(irc->irc_sc, irc->irc_num,
5380 OHCI_SUBREG_ContextControlSet, OHCI_CTXCTL_RUN);
5381 } else {
5382 const int removecount = irc->irc_desc_num/2;
5383 u_int32_t tmpbranch;
5384
5385 for (count = 0; count < removecount; ++count) {
5386 if (fd->fd_status == 0) {
5387 break;
5388 }
5389
5390 fd->fd_status = 0;
5391
5392 tmpbranch = fd->fd_branch;
5393 fd->fd_branch = 0;
5394 irc->irc_writeend->fd_branch = irc->irc_savedbranch;
5395 irc->irc_writeend = fd;
5396 irc->irc_savedbranch = tmpbranch;
5397
5398 if (++fd == irc->irc_desc_map + irc->irc_desc_num) {
5399 fd = irc->irc_desc_map;
5400 }
5401 ++count;
5402 }
5403
5404 printf("fwohci_ir_ctx_drain: remove %d pkts\n", count);
5405 }
5406
5407 return count;
5408 }
5409 #endif /* USEDRAIN */
5410
5411
5412
5413
5414
5415
5416
5417
5418
5419 /*
5420 * service routines for isochronous transmit
5421 */
5422
5423
5424 struct fwohci_it_ctx *
5425 fwohci_it_ctx_construct(struct fwohci_softc *sc, int no, int ch, int tag, int maxsize)
5426 {
5427 struct fwohci_it_ctx *itc;
5428 size_t dmastrsize;
5429 struct fwohci_it_dmabuf *dmastr;
5430 struct fwohci_desc *desc;
5431 bus_addr_t descphys;
5432 int nodesc;
5433 int i, j;
5434
5435 if ((itc = malloc(sizeof(*itc), M_DEVBUF, M_NOWAIT|M_ZERO)) == NULL) {
5436 return itc;
5437 }
5438
5439 itc->itc_num = no;
5440 itc->itc_flags = 0;
5441 itc->itc_sc = sc;
5442 itc->itc_bufnum = FWOHCI_IT_BUFNUM;
5443
5444 itc->itc_channel = ch;
5445 itc->itc_tag = tag;
5446 itc->itc_speed = OHCI_CTXCTL_SPD_100; /* XXX */
5447
5448 itc->itc_outpkt = 0;
5449
5450 itc->itc_maxsize = maxsize;
5451
5452 dmastrsize = sizeof(struct fwohci_it_dmabuf)*itc->itc_bufnum;
5453
5454 if ((dmastr = malloc(dmastrsize, M_DEVBUF, M_NOWAIT|M_ZERO)) == NULL) {
5455 goto error_1;
5456 }
5457 itc->itc_buf = dmastr;
5458
5459 /*
5460 * Get memory for descriptors. One buffer will have 256
5461 * packet entry and 1 trailing descriptor for writing scratch.
5462 * 4-byte space for scratch.
5463 */
5464 itc->itc_descsize = (256*3 + 1)*itc->itc_bufnum;
5465
5466 if (fwohci_it_desc_alloc(itc)) {
5467 printf("%s: cannot get enough memory for descriptor\n",
5468 sc->sc_sc1394.sc1394_dev.dv_xname);
5469 goto error_2;
5470 }
5471
5472 /* prepare DMA buffer */
5473 nodesc = itc->itc_descsize/itc->itc_bufnum;
5474 desc = (struct fwohci_desc *)itc->itc_descmap;
5475 descphys = itc->itc_dseg.ds_addr;
5476
5477 for (i = 0; i < itc->itc_bufnum; ++i) {
5478
5479 if (fwohci_itd_construct(itc, &dmastr[i], i, desc,
5480 descphys, nodesc,
5481 itc->itc_maxsize, itc->itc_scratch_paddr)) {
5482 goto error_3;
5483 }
5484 desc += nodesc;
5485 descphys += sizeof(struct fwohci_desc)*nodesc;
5486 }
5487
5488 #if 1
5489 itc->itc_buf_start = itc->itc_buf;
5490 itc->itc_buf_end = itc->itc_buf;
5491 itc->itc_buf_linkend = itc->itc_buf;
5492 #else
5493 itc->itc_bufidx_start = 0;
5494 itc->itc_bufidx_end = 0;
5495 itc->itc_bufidx_linkend = 0;
5496 #endif
5497 itc->itc_buf_cnt = 0;
5498 itc->itc_waitchan = NULL;
5499 *itc->itc_scratch = 0xffffffff;
5500
5501 return itc;
5502
5503 error_3:
5504 for (j = 0; j < i; ++j) {
5505 fwohci_itd_destruct(&dmastr[j]);
5506 }
5507 fwohci_it_desc_free(itc);
5508 error_2:
5509 free(itc->itc_buf, M_DEVBUF);
5510 error_1:
5511 free(itc, M_DEVBUF);
5512
5513 return NULL;
5514 }
5515
5516
5517
5518 void
5519 fwohci_it_ctx_destruct(struct fwohci_it_ctx *itc)
5520 {
5521 int i;
5522
5523 for (i = 0; i < itc->itc_bufnum; ++i) {
5524 fwohci_itd_destruct(&itc->itc_buf[i]);
5525 }
5526
5527 fwohci_it_desc_free(itc);
5528 free(itc, M_DEVBUF);
5529 }
5530
5531
5532 /*
5533 * static int fwohci_it_desc_alloc(struct fwohci_it_ctx *itc)
5534 *
5535 * Allocates descriptors for context DMA dedicated for
5536 * isochronous transmit.
5537 *
5538 * This function returns 0 (zero) if it succeeds. Otherwise,
5539 * return negative value.
5540 */
5541 static int
5542 fwohci_it_desc_alloc(struct fwohci_it_ctx *itc)
5543 {
5544 bus_dma_tag_t dmat = itc->itc_sc->sc_dmat;
5545 const char *xname = itc->itc_sc->sc_sc1394.sc1394_dev.dv_xname;
5546 int error, dsize;
5547
5548 /* add for scratch */
5549 itc->itc_descsize++;
5550
5551 /* rounding up to 256 */
5552 if ((itc->itc_descsize & 0x0ff) != 0) {
5553 itc->itc_descsize =
5554 (itc->itc_descsize & ~0x0ff) + 0x100;
5555 }
5556 /* remove for scratch */
5557
5558 itc->itc_descsize--;
5559 printf("%s: fwohci_it_desc_alloc will allocate %d descs\n",
5560 xname, itc->itc_descsize);
5561
5562 /*
5563 * allocate descriptor buffer
5564 */
5565 dsize = sizeof(struct fwohci_desc) * itc->itc_descsize;
5566
5567 printf("%s: fwohci_it_desc_alloc: descriptor %d, dsize %d\n",
5568 xname, itc->itc_descsize, dsize);
5569
5570 if ((error = bus_dmamem_alloc(dmat, dsize, PAGE_SIZE, 0,
5571 &itc->itc_dseg, 1, &itc->itc_dnsegs, 0)) != 0) {
5572 printf("%s: unable to allocate descriptor buffer, error = %d\n",
5573 xname, error);
5574 goto fail_0;
5575 }
5576
5577 printf("fwohci_it_desc_alloc: %d segment[s]\n", itc->itc_dnsegs);
5578
5579 if ((error = bus_dmamem_map(dmat, &itc->itc_dseg,
5580 itc->itc_dnsegs, dsize, (caddr_t *)&itc->itc_descmap,
5581 BUS_DMA_COHERENT | BUS_DMA_WAITOK)) != 0) {
5582 printf("%s: unable to map descriptor buffer, error = %d\n",
5583 xname, error);
5584 goto fail_1;
5585 }
5586
5587 printf("fwohci_it_desc_alloc: bus_dmamem_map success dseg %lx:%lx\n",
5588 (long)itc->itc_dseg.ds_addr, (long)itc->itc_dseg.ds_len);
5589
5590 if ((error = bus_dmamap_create(dmat, dsize, itc->itc_dnsegs,
5591 dsize, 0, BUS_DMA_WAITOK, &itc->itc_ddmamap)) != 0) {
5592 printf("%s: unable to create descriptor buffer DMA map, "
5593 "error = %d\n", xname, error);
5594 goto fail_2;
5595 }
5596
5597 printf("fwohci_it_desc_alloc: bus_dmamem_create success\n");
5598
5599 {
5600 int loop;
5601
5602 for (loop = 0; loop < itc->itc_ddmamap->dm_nsegs; ++loop) {
5603 printf("\t%.2d: 0x%lx - 0x%lx\n", loop,
5604 (long)itc->itc_ddmamap->dm_segs[loop].ds_addr,
5605 (long)itc->itc_ddmamap->dm_segs[loop].ds_addr +
5606 (long)itc->itc_ddmamap->dm_segs[loop].ds_len - 1);
5607 }
5608 }
5609
5610 if ((error = bus_dmamap_load(dmat, itc->itc_ddmamap,
5611 itc->itc_descmap, dsize, NULL, BUS_DMA_WAITOK)) != 0) {
5612 printf("%s: unable to load descriptor buffer DMA map, "
5613 "error = %d\n", xname, error);
5614 goto fail_3;
5615 }
5616
5617 printf("%s: fwohci_it_desc_alloc: get DMA memory phys:0x%08x vm:%p\n",
5618 xname, (int)itc->itc_ddmamap->dm_segs[0].ds_addr, itc->itc_descmap);
5619
5620 itc->itc_scratch = (u_int32_t *)(itc->itc_descmap
5621 + (sizeof(struct fwohci_desc))*itc->itc_descsize);
5622 itc->itc_scratch_paddr =
5623 itc->itc_ddmamap->dm_segs[0].ds_addr
5624 + (sizeof(struct fwohci_desc))*itc->itc_descsize;
5625
5626 printf("%s: scratch %p, 0x%x\n", xname, itc->itc_scratch,
5627 (int)itc->itc_scratch_paddr);
5628
5629 /* itc->itc_scratch_paddr = vtophys(itc->itc_scratch); */
5630
5631 return 0;
5632
5633 fail_3:
5634 bus_dmamap_destroy(dmat, itc->itc_ddmamap);
5635 fail_2:
5636 bus_dmamem_unmap(dmat, (caddr_t)itc->itc_descmap, dsize);
5637 fail_1:
5638 bus_dmamem_free(dmat, &itc->itc_dseg, itc->itc_dnsegs);
5639 fail_0:
5640 itc->itc_dnsegs = 0;
5641 itc->itc_descmap = NULL;
5642 return error;
5643 }
5644
5645
5646 static void
5647 fwohci_it_desc_free(struct fwohci_it_ctx *itc)
5648 {
5649 bus_dma_tag_t dmat = itc->itc_sc->sc_dmat;
5650 int dsize = sizeof(struct fwohci_desc) * itc->itc_descsize + 4;
5651
5652 bus_dmamap_destroy(dmat, itc->itc_ddmamap);
5653 bus_dmamem_unmap(dmat, (caddr_t)itc->itc_descmap, dsize);
5654 bus_dmamem_free(dmat, &itc->itc_dseg, itc->itc_dnsegs);
5655
5656 itc->itc_dnsegs = 0;
5657 itc->itc_descmap = NULL;
5658 }
5659
5660
5661
5662 /*
5663 * int fwohci_it_ctx_writedata(ieee1394_it_tag_t it, int ndata,
5664 * struct ieee1394_it_datalist *itdata, int flags)
5665 *
5666 * This function will write packet data to DMA buffer in the
5667 * context. This function will parse ieee1394_it_datalist
5668 * command and fill DMA buffer. This function will return the
5669 * number of written packets, or error code if the return value
5670 * is negative.
5671 *
5672 * When this funtion returns positive value but smaller than
5673 * ndata, it reaches at the ent of DMA buffer.
5674 */
5675 int
5676 fwohci_it_ctx_writedata(ieee1394_it_tag_t it, int ndata,
5677 struct ieee1394_it_datalist *itdata, int flags)
5678 {
5679 struct fwohci_it_ctx *itc = (struct fwohci_it_ctx *)it;
5680 int rv;
5681 int writepkt = 0;
5682 struct fwohci_it_dmabuf *itd;
5683 int i = 0;
5684
5685 itd = itc->itc_buf_end;
5686
5687 while (ndata > 0) {
5688 int s;
5689
5690 if (fwohci_itd_isfull(itd) || fwohci_itd_islocked(itd)) {
5691 if (itc->itc_buf_cnt == itc->itc_bufnum) {
5692 /* no space to write */
5693 printf("sleeping: start linkend end %d %d %d "
5694 "bufcnt %d\n",
5695 itc->itc_buf_start->itd_num,
5696 itc->itc_buf_linkend->itd_num,
5697 itc->itc_buf_end->itd_num,
5698 itc->itc_buf_cnt);
5699
5700 itc->itc_waitchan = itc;
5701 if (tsleep((void *)itc->itc_waitchan,
5702 PCATCH, "fwohci it", 0) == EWOULDBLOCK) {
5703 itc->itc_waitchan = NULL;
5704 printf("fwohci0 signal\n");
5705 break;
5706 }
5707 printf("waking: start linkend end %d %d %d\n",
5708 itc->itc_buf_start->itd_num,
5709 itc->itc_buf_linkend->itd_num,
5710 itc->itc_buf_end->itd_num);
5711
5712 itc->itc_waitchan = itc;
5713 i = 0;
5714 } else {
5715 /*
5716 * Use next buffer. This DMA buffer is full
5717 * or locked.
5718 */
5719 INC_BUF(itc, itd);
5720 }
5721 }
5722
5723 if (++i > 10) {
5724 panic("why loop so much %d", itc->itc_buf_cnt);
5725 break;
5726 }
5727
5728 s = splbio();
5729
5730 if (fwohci_itd_hasdata(itd) == 0) {
5731 ++itc->itc_buf_cnt;
5732 DPRINTF(("<buf cnt %d>\n", itc->itc_buf_cnt));
5733 }
5734
5735 rv = fwohci_itd_writedata(itd, ndata, itdata);
5736 DPRINTF(("fwohci_it_ctx_writedata: buf %d ndata %d rv %d\n",
5737 itd->itd_num, ndata, rv));
5738
5739 if (itc->itc_buf_start == itc->itc_buf_linkend
5740 && (itc->itc_flags & ITC_FLAGS_RUN) != 0) {
5741
5742 #ifdef DEBUG_USERADD
5743 printf("fwohci_it_ctx_writedata: emergency!\n");
5744 #endif
5745 if (itc->itc_buf_linkend != itc->itc_buf_end
5746 && fwohci_itd_hasdata(itc->itc_buf_end)) {
5747 struct fwohci_it_dmabuf *itdn = itc->itc_buf_linkend;
5748
5749 INC_BUF(itc, itdn);
5750 printf("connecting %d after %d\n",
5751 itdn->itd_num,
5752 itc->itc_buf_linkend->itd_num);
5753 if (fwohci_itd_link(itc->itc_buf_linkend, itdn)) {
5754 printf("fwohci_it_ctx_writedata:"
5755 " cannot link correctly\n");
5756 splx(s);
5757 return -1;
5758 }
5759 itc->itc_buf_linkend = itdn;
5760 }
5761 }
5762
5763 splx(s);
5764
5765 if (rv < 0) {
5766 /* some errors happend */
5767 break;
5768 }
5769
5770 writepkt += rv;
5771 ndata -= rv;
5772 itdata += rv;
5773 itc->itc_buf_end = itd;
5774 }
5775
5776 /* Start DMA engine if stopped */
5777 if ((itc->itc_flags & ITC_FLAGS_RUN) == 0) {
5778 if (itc->itc_buf_cnt > itc->itc_bufnum - 1 || flags) {
5779 /* run */
5780 printf("fwohci_itc_ctl_writedata: DMA engine start\n");
5781 fwohci_it_ctx_run(itc);
5782 }
5783 }
5784
5785 return writepkt;
5786 }
5787
5788
5789
5790 static void
5791 fwohci_it_ctx_run(struct fwohci_it_ctx *itc)
5792 {
5793 struct fwohci_softc *sc = itc->itc_sc;
5794 int ctx = itc->itc_num;
5795 struct fwohci_it_dmabuf *itd
5796 = (struct fwohci_it_dmabuf *)itc->itc_buf_start;
5797 u_int32_t reg;
5798 int i;
5799
5800 if (itc->itc_flags & ITC_FLAGS_RUN) {
5801 return;
5802 }
5803 itc->itc_flags |= ITC_FLAGS_RUN;
5804
5805 /*
5806 * dirty, but I can't imagine better place to save branch addr
5807 * of top DMA buffer and substitute 0 to it.
5808 */
5809 itd->itd_savedbranch = itd->itd_lastdesc->fd_branch;
5810 itd->itd_lastdesc->fd_branch = 0;
5811
5812 if (itc->itc_buf_cnt > 1) {
5813 struct fwohci_it_dmabuf *itdn = itd;
5814
5815 #if 0
5816 INC_BUF(itc, itdn);
5817
5818 if (fwohci_itd_link(itd, itdn)) {
5819 printf("fwohci_it_ctx_run: cannot link correctly\n");
5820 return;
5821 }
5822 itc->itc_buf_linkend = itdn;
5823 #else
5824 for (;;) {
5825 INC_BUF(itc, itdn);
5826
5827 if (itdn == itc->itc_buf_end) {
5828 break;
5829 }
5830 if (fwohci_itd_link(itd, itdn)) {
5831 printf("fwohci_it_ctx_run: cannot link\n");
5832 return;
5833 }
5834 itd = itdn;
5835 }
5836 itc->itc_buf_linkend = itd;
5837 #endif
5838 } else {
5839 itd->itd_lastdesc->fd_flags |= OHCI_DESC_INTR_ALWAYS;
5840 itc->itc_buf_linkend = itc->itc_buf_end;
5841 itc->itc_buf_end->itd_flags |= ITD_FLAGS_LOCK;
5842
5843 /* sanity check */
5844 if (itc->itc_buf_end != itc->itc_buf_start) {
5845 printf("buf start & end differs %p %p\n",
5846 itc->itc_buf_end, itc->itc_buf_start);
5847 }
5848 #if 0
5849 {
5850 u_int32_t *fdp;
5851 u_int32_t adr;
5852 int i;
5853
5854 printf("fwohci_it_ctx_run: itc_buf_cnt 1, DMA buf %d\n",
5855 itd->itd_num);
5856 printf(" last desc %p npacket %d, %d 0x%04x%04x",
5857 itd->itd_lastdesc, itd->itd_npacket,
5858 (itd->itd_lastdesc - itd->itd_desc)/3,
5859 itd->itd_lastdesc->fd_flags,
5860 itd->itd_lastdesc->fd_reqcount);
5861 fdp = (u_int32_t *)itd->itd_desc;
5862 adr = (u_int32_t)itd->itd_desc_phys; /* XXX */
5863
5864 for (i = 0; i < 7*4; ++i) {
5865 if (i % 4 == 0) {
5866 printf("\n%x:", adr + 4*i);
5867 }
5868 printf(" %08x", fdp[i]);
5869 }
5870
5871 if (itd->itd_npacket > 4) {
5872 printf("\n...");
5873 i = (itd->itd_npacket - 2)*12 + 4;
5874 } else {
5875 i = 2*12 + 4;
5876 }
5877 for (;i < itd->itd_npacket*12 + 4; ++i) {
5878 if (i % 4 == 0) {
5879 printf("\n%x:", adr + 4*i);
5880 }
5881 printf(" %08x", fdp[i]);
5882 }
5883 printf("\n");
5884 }
5885 #endif
5886 }
5887 {
5888 struct fwohci_desc *fd;
5889
5890 printf("fwohci_it_ctx_run: link start linkend end %d %d %d\n",
5891 itc->itc_buf_start->itd_num,
5892 itc->itc_buf_linkend->itd_num,
5893 itc->itc_buf_end->itd_num);
5894
5895 fd = itc->itc_buf_start->itd_desc;
5896 if ((fd->fd_flags & 0xff00) != OHCI_DESC_STORE_VALUE) {
5897 printf("fwohci_it_ctx_run: start buf not with STORE\n");
5898 }
5899 fd += 3;
5900 if ((fd->fd_flags & OHCI_DESC_INTR_ALWAYS) == 0) {
5901 printf("fwohci_it_ctx_run: start buf does not have intr\n");
5902 }
5903
5904 fd = itc->itc_buf_linkend->itd_desc;
5905 if ((fd->fd_flags & 0xff00) != OHCI_DESC_STORE_VALUE) {
5906 printf("fwohci_it_ctx_run: linkend buf not with STORE\n");
5907 }
5908 fd += 3;
5909 if ((fd->fd_flags & OHCI_DESC_INTR_ALWAYS) == 0) {
5910 printf("fwohci_it_ctx_run: linkend buf does not have intr\n");
5911 }
5912 }
5913
5914 *itc->itc_scratch = 0xffffffff;
5915
5916 OHCI_SYNC_TX_DMA_WRITE(sc, ctx, OHCI_SUBREG_ContextControlClear,
5917 0xffff0000);
5918 reg = OHCI_SYNC_TX_DMA_READ(sc, ctx, OHCI_SUBREG_ContextControlSet);
5919
5920 printf("fwohci_it_ctx_run start for ctx %d\n", ctx);
5921 printf("%s: bfr IT_CommandPtr 0x%08x ContextCtrl 0x%08x%s%s%s%s\n",
5922 sc->sc_sc1394.sc1394_dev.dv_xname,
5923 OHCI_SYNC_TX_DMA_READ(sc, ctx, OHCI_SUBREG_CommandPtr),
5924 reg,
5925 reg & OHCI_CTXCTL_RUN ? " run" : "",
5926 reg & OHCI_CTXCTL_WAKE ? " wake" : "",
5927 reg & OHCI_CTXCTL_DEAD ? " dead" : "",
5928 reg & OHCI_CTXCTL_ACTIVE ? " active" : "");
5929
5930 OHCI_SYNC_TX_DMA_WRITE(sc, ctx, OHCI_SUBREG_ContextControlClear,
5931 OHCI_CTXCTL_RUN);
5932
5933 reg = OHCI_SYNC_TX_DMA_READ(sc, ctx, OHCI_SUBREG_ContextControlSet);
5934 i = 0;
5935 while (reg & (OHCI_CTXCTL_ACTIVE | OHCI_CTXCTL_RUN)) {
5936 delay(100);
5937 if (++i > 1000) {
5938 printf("%s: cannot stop iso transmit engine\n",
5939 sc->sc_sc1394.sc1394_dev.dv_xname);
5940 break;
5941 }
5942 reg = OHCI_SYNC_TX_DMA_READ(sc, ctx,
5943 OHCI_SUBREG_ContextControlSet);
5944 }
5945
5946 printf("%s: itm IT_CommandPtr 0x%08x ContextCtrl 0x%08x%s%s%s%s\n",
5947 sc->sc_sc1394.sc1394_dev.dv_xname,
5948 OHCI_SYNC_TX_DMA_READ(sc, ctx, OHCI_SUBREG_CommandPtr),
5949 reg,
5950 reg & OHCI_CTXCTL_RUN ? " run" : "",
5951 reg & OHCI_CTXCTL_WAKE ? " wake" : "",
5952 reg & OHCI_CTXCTL_DEAD ? " dead" : "",
5953 reg & OHCI_CTXCTL_ACTIVE ? " active" : "");
5954
5955 printf("%s: writing CommandPtr to 0x%08x\n",
5956 sc->sc_sc1394.sc1394_dev.dv_xname,
5957 (int)itc->itc_buf_start->itd_desc_phys);
5958 OHCI_SYNC_TX_DMA_WRITE(sc, ctx, OHCI_SUBREG_CommandPtr,
5959 fwohci_itd_list_head(itc->itc_buf_start) | 4);
5960
5961 OHCI_SYNC_TX_DMA_WRITE(sc, ctx, OHCI_SUBREG_ContextControlSet,
5962 OHCI_CTXCTL_RUN | OHCI_CTXCTL_WAKE);
5963
5964 reg = OHCI_SYNC_TX_DMA_READ(sc, ctx, OHCI_SUBREG_ContextControlSet);
5965
5966 printf("%s: aft IT_CommandPtr 0x%08x ContextCtrl 0x%08x%s%s%s%s\n",
5967 sc->sc_sc1394.sc1394_dev.dv_xname,
5968 OHCI_SYNC_TX_DMA_READ(sc, ctx, OHCI_SUBREG_CommandPtr),
5969 reg,
5970 reg & OHCI_CTXCTL_RUN ? " run" : "",
5971 reg & OHCI_CTXCTL_WAKE ? " wake" : "",
5972 reg & OHCI_CTXCTL_DEAD ? " dead" : "",
5973 reg & OHCI_CTXCTL_ACTIVE ? " active" : "");
5974 }
5975
5976
5977
5978 int
5979 fwohci_it_ctx_flush(ieee1394_it_tag_t it)
5980 {
5981 struct fwohci_it_ctx *itc = (struct fwohci_it_ctx *)it;
5982 int rv = 0;
5983
5984 if ((itc->itc_flags & ITC_FLAGS_RUN) == 0
5985 && itc->itc_buf_cnt > 0) {
5986 printf("fwohci_it_ctx_flush: %s flushing\n",
5987 itc->itc_sc->sc_sc1394.sc1394_dev.dv_xname);
5988
5989 fwohci_it_ctx_run(itc);
5990 rv = 1;
5991 }
5992
5993 return rv;
5994 }
5995
5996
5997 /*
5998 * static void fwohci_it_intr(struct fwohci_softc *sc,
5999 * struct fwochi_it_ctx *itc)
6000 *
6001 * This function is the interrupt handler for isochronous
6002 * transmit interrupt. This function will 1) unlink used
6003 * (already transmitted) buffers, 2) link new filled buffers, if
6004 * necessary and 3) say some free DMA buffers exist to
6005 * fwiso_write()
6006 */
6007 static void
6008 fwohci_it_intr(struct fwohci_softc *sc, struct fwohci_it_ctx *itc)
6009 {
6010 struct fwohci_it_dmabuf *itd, *newstartbuf;
6011 u_int16_t scratchval;
6012 u_int32_t reg;
6013
6014 reg = OHCI_SYNC_TX_DMA_READ(sc, itc->itc_num,
6015 OHCI_SUBREG_ContextControlSet);
6016
6017 /* print out debug info */
6018 #ifdef FW_DEBUG
6019 printf("fwohci_it_intr: CTX %d\n", itc->itc_num);
6020
6021 printf("fwohci_it_intr: %s: IT_CommandPtr 0x%08x "
6022 "ContextCtrl 0x%08x%s%s%s%s\n",
6023 sc->sc_sc1394.sc1394_dev.dv_xname,
6024 OHCI_SYNC_TX_DMA_READ(sc, itc->itc_num, OHCI_SUBREG_CommandPtr),
6025 reg,
6026 reg & OHCI_CTXCTL_RUN ? " run" : "",
6027 reg & OHCI_CTXCTL_WAKE ? " wake" : "",
6028 reg & OHCI_CTXCTL_DEAD ? " dead" : "",
6029 reg & OHCI_CTXCTL_ACTIVE ? " active" : "");
6030 printf("fwohci_it_intr: %s: scratch %x start %d end %d valid %d\n",
6031 sc->sc_sc1394.sc1394_dev.dv_xname, *itc->itc_scratch,
6032 itc->itc_buf_start->itd_num, itc->itc_buf_end->itd_num,
6033 itc->itc_buf_cnt);
6034 {
6035 u_int32_t reg
6036 = OHCI_CSR_READ(sc, OHCI_REG_IsochronousCycleTimer);
6037 printf("\t\tIsoCounter 0x%08x, %d %d %d\n", reg,
6038 (reg >> 25) & 0xfe, (reg >> 12) & 0x1fff, reg & 0xfff);
6039 }
6040 #endif /* FW_DEBUG */
6041 /* end print out debug info */
6042
6043 scratchval = (*itc->itc_scratch) & 0x0000ffff;
6044 *itc->itc_scratch = 0xffffffff;
6045
6046 if ((reg & OHCI_CTXCTL_ACTIVE) == 0 && scratchval != 0xffff) {
6047 /* DMA engine has been stopped */
6048 printf("DMA engine stopped\n");
6049 printf("fwohci_it_intr: %s: IT_CommandPtr 0x%08x "
6050 "ContextCtrl 0x%08x%s%s%s%s\n",
6051 sc->sc_sc1394.sc1394_dev.dv_xname,
6052 OHCI_SYNC_TX_DMA_READ(sc, itc->itc_num, OHCI_SUBREG_CommandPtr),
6053 reg,
6054 reg & OHCI_CTXCTL_RUN ? " run" : "",
6055 reg & OHCI_CTXCTL_WAKE ? " wake" : "",
6056 reg & OHCI_CTXCTL_DEAD ? " dead" : "",
6057 reg & OHCI_CTXCTL_ACTIVE ? " active" : "");
6058 printf("fwohci_it_intr: %s: scratch %x start %d end %d valid %d\n",
6059 sc->sc_sc1394.sc1394_dev.dv_xname, *itc->itc_scratch,
6060 itc->itc_buf_start->itd_num, itc->itc_buf_end->itd_num,
6061 itc->itc_buf_cnt);
6062 {
6063 u_int32_t reg
6064 = OHCI_CSR_READ(sc, OHCI_REG_IsochronousCycleTimer);
6065 printf("\t\tIsoCounter 0x%08x, %d %d %d\n", reg,
6066 (reg >> 25) & 0xfe, (reg >> 12) & 0x1fff, reg & 0xfff);
6067 }
6068 printf("\t\tbranch of lastdesc 0x%08x\n",
6069 itc->itc_buf_start->itd_lastdesc->fd_branch);
6070
6071 scratchval = 0xffff;
6072 itc->itc_flags &= ~ITC_FLAGS_RUN;
6073 }
6074
6075 /* unlink old buffers */
6076 if (scratchval != 0xffff) {
6077 /* normal path */
6078 newstartbuf = &itc->itc_buf[scratchval];
6079 } else {
6080 /* DMA engine stopped */
6081 newstartbuf = itc->itc_buf_linkend;
6082 INC_BUF(itc, newstartbuf);
6083 }
6084
6085 itd = (struct fwohci_it_dmabuf *)itc->itc_buf_start;
6086 itc->itc_buf_start = newstartbuf;
6087 while (itd != newstartbuf) {
6088 itc->itc_outpkt += itd->itd_npacket;
6089 fwohci_itd_unlink(itd);
6090 INC_BUF(itc, itd);
6091 --itc->itc_buf_cnt;
6092 DPRINTF(("<buf cnt %d>\n", itc->itc_buf_cnt));
6093 }
6094
6095 #ifdef DEBUG_USERADD
6096 if (scratchval != 0xffff) {
6097 printf("fwohci0: intr start %d dataend %d %d\n", scratchval,
6098 itc->itc_buf_end->itd_num, itc->itc_outpkt);
6099 }
6100 #endif
6101
6102 if (scratchval == 0xffff) {
6103 /* no data supplied */
6104 printf("fwohci_it_intr: no it data. output total %d\n",
6105 itc->itc_outpkt);
6106
6107 if (itc->itc_buf_cnt > 0) {
6108 printf("fwohci_it_intr: it DMA stops "
6109 "w/ valid databuf %d buf %d data %d"
6110 " intr reg 0x%08x\n",
6111 itc->itc_buf_cnt,
6112 itc->itc_buf_end->itd_num,
6113 fwohci_itd_hasdata(itc->itc_buf_end),
6114 OHCI_CSR_READ(sc, OHCI_REG_IntEventSet));
6115 } else {
6116 /* All the data gone */
6117 itc->itc_buf_start
6118 = itc->itc_buf_end
6119 = itc->itc_buf_linkend
6120 = &itc->itc_buf[0];
6121 printf("fwohci_it_intr: all packets gone\n");
6122 }
6123
6124 itc->itc_flags &= ~ITC_FLAGS_RUN;
6125
6126 OHCI_SYNC_TX_DMA_WRITE(sc, itc->itc_num,
6127 OHCI_SUBREG_ContextControlClear, 0xffffffff);
6128 OHCI_SYNC_TX_DMA_WRITE(sc, itc->itc_num,
6129 OHCI_SUBREG_CommandPtr, 0);
6130 OHCI_SYNC_TX_DMA_WRITE(sc, itc->itc_num,
6131 OHCI_SUBREG_ContextControlClear, 0x1f);
6132
6133 /* send message */
6134 if (itc->itc_waitchan != NULL) {
6135 wakeup((void *)itc->itc_waitchan);
6136 }
6137
6138 return;
6139 }
6140
6141 #if 0
6142 /* unlink old buffers */
6143 newstartbuf = &itc->itc_buf[scratchval];
6144
6145 itd = (struct fwohci_it_dmabuf *)itc->itc_buf_start;
6146 itc->itc_buf_start = newstartbuf;
6147 while (itd != newstartbuf) {
6148 itc->itc_outpkt += itd->itd_npacket;
6149 fwohci_itd_unlink(itd);
6150 INC_BUF(itc, itd);
6151 --itc->itc_buf_cnt;
6152 DPRINTF(("<buf cnt %d>\n", itc->itc_buf_cnt));
6153 }
6154 #endif
6155
6156 /* sanity check */
6157 {
6158 int startidx, endidx, linkendidx;
6159
6160 startidx = itc->itc_buf_start->itd_num;
6161 endidx = itc->itc_buf_end->itd_num;
6162 linkendidx = itc->itc_buf_linkend->itd_num;
6163
6164 if (startidx < endidx) {
6165 if (linkendidx < startidx
6166 || endidx < linkendidx) {
6167 printf("funny, linkend is not between start "
6168 "and end [%d, %d]: %d\n",
6169 startidx, endidx, linkendidx);
6170 }
6171 } else if (startidx > endidx) {
6172 if (linkendidx < startidx
6173 && endidx < linkendidx) {
6174 printf("funny, linkend is not between start "
6175 "and end [%d, %d]: %d\n",
6176 startidx, endidx, linkendidx);
6177 }
6178 } else {
6179 if (linkendidx != startidx) {
6180 printf("funny, linkend is not between start "
6181 "and end [%d, %d]: %d\n",
6182 startidx, endidx, linkendidx);
6183 }
6184
6185 }
6186 }
6187
6188 /* link if some valid DMA buffers exist */
6189 if (itc->itc_buf_cnt > 1
6190 && itc->itc_buf_linkend != itc->itc_buf_end) {
6191 struct fwohci_it_dmabuf *itdprev;
6192 int i;
6193
6194 DPRINTF(("CTX %d: start linkend dataend bufs %d, %d, %d, %d\n",
6195 itc->itc_num,
6196 itc->itc_buf_start->itd_num,
6197 itc->itc_buf_linkend->itd_num,
6198 itc->itc_buf_end->itd_num,
6199 itc->itc_buf_cnt));
6200
6201 itd = itdprev = itc->itc_buf_linkend;
6202 INC_BUF(itc, itd);
6203
6204 #if 0
6205 if (fwohci_itd_isfilled(itd) || itc->itc_buf_cnt == 2) {
6206 while (itdprev != itc->itc_buf_end) {
6207
6208 if (fwohci_itd_link(itdprev, itd)) {
6209 break;
6210 }
6211
6212 itdprev = itd;
6213 INC_BUF(itc, itd);
6214 }
6215 itc->itc_buf_linkend = itdprev;
6216 }
6217 #endif
6218 i = 0;
6219 while (itdprev != itc->itc_buf_end) {
6220 if (!fwohci_itd_isfilled(itd) && itc->itc_buf_cnt > 2) {
6221 break;
6222 }
6223
6224 if (fwohci_itd_link(itdprev, itd)) {
6225 break;
6226 }
6227
6228 itdprev = itd;
6229 INC_BUF(itc, itd);
6230
6231 itc->itc_buf_linkend = itdprev;
6232 ++i;
6233 }
6234
6235 if (i > 0) {
6236 DPRINTF(("CTX %d: start linkend dataend bufs %d, %d, %d, %d\n",
6237 itc->itc_num,
6238 itc->itc_buf_start->itd_num,
6239 itc->itc_buf_linkend->itd_num,
6240 itc->itc_buf_end->itd_num,
6241 itc->itc_buf_cnt));
6242 }
6243 } else {
6244 struct fwohci_it_dmabuf *le;
6245
6246 le = itc->itc_buf_linkend;
6247
6248 printf("CTX %d: start linkend dataend bufs %d, %d, %d, %d no buffer added\n",
6249 itc->itc_num,
6250 itc->itc_buf_start->itd_num,
6251 itc->itc_buf_linkend->itd_num,
6252 itc->itc_buf_end->itd_num,
6253 itc->itc_buf_cnt);
6254 printf("\tlast descriptor %s %04x %08x\n",
6255 le->itd_lastdesc->fd_flags & OHCI_DESC_INTR_ALWAYS ? "intr" : "",
6256 le->itd_lastdesc->fd_flags,
6257 le->itd_lastdesc->fd_branch);
6258 }
6259
6260 /* send message */
6261 if (itc->itc_waitchan != NULL) {
6262 /* */
6263 wakeup((void *)itc->itc_waitchan);
6264 }
6265 }
6266
6267
6268
6269 /*
6270 * int fwohci_itd_construct(struct fwohci_it_ctx *itc,
6271 * struct fwohci_it_dmabuf *itd, int num,
6272 * struct fwohci_desc *desc, bus_addr_t phys,
6273 * int descsize, int maxsize, paddr_t scratch)
6274 *
6275 *
6276 *
6277 */
6278 int
6279 fwohci_itd_construct(struct fwohci_it_ctx *itc, struct fwohci_it_dmabuf *itd,
6280 int num, struct fwohci_desc *desc, bus_addr_t phys, int descsize,
6281 int maxsize, paddr_t scratch)
6282 {
6283 const char *xname = itc->itc_sc->sc_sc1394.sc1394_dev.dv_xname;
6284 struct fwohci_desc *fd;
6285 struct fwohci_desc *descend;
6286 int npkt;
6287 int bufno = 0; /* DMA segment */
6288 bus_size_t bufused = 0; /* offset in a DMA segment */
6289 int roundsize;
6290 int tag = itc->itc_tag;
6291 int ch = itc->itc_channel;
6292
6293 itd->itd_ctx = itc;
6294 itd->itd_num = num;
6295
6296 if (descsize > 1024*3) {
6297 printf("%s: fwohci_itd_construct[%d] descsize %d too big\n",
6298 xname, num, descsize);
6299 return -1;
6300 }
6301
6302 itd->itd_desc = desc;
6303 itd->itd_descsize = descsize;
6304 itd->itd_desc_phys = phys;
6305
6306 itd->itd_lastdesc = desc;
6307 itd->itd_npacket = 0;
6308
6309 printf("%s: fwohci_itd_construct[%d] desc %p descsize %d, maxsize %d\n",
6310 xname, itd->itd_num, itd->itd_desc, itd->itd_descsize, maxsize);
6311
6312 if (descsize < 4) {
6313 /* too small descriptor array. at least 4 */
6314 return -1;
6315 }
6316
6317 /* count up how many packet can handle */
6318 itd->itd_maxpacket = (descsize - 1)/3;
6319
6320 /* rounding up to power of 2. minimum 16 */
6321 roundsize = 16;
6322 for (roundsize = 16; roundsize < maxsize; roundsize <<= 1);
6323 itd->itd_maxsize = roundsize;
6324
6325 printf("\t\tdesc%d [%x, %lx]\n", itd->itd_num,
6326 (u_int32_t)phys,
6327 (unsigned long)((u_int32_t)phys
6328 + (itd->itd_maxpacket*3 + 1)*sizeof(struct fwohci_desc)));
6329 printf("%s: fwohci_itd_construct[%d] npkt %d maxsize round up to %d\n",
6330 xname, itd->itd_num, itd->itd_maxpacket, itd->itd_maxsize);
6331
6332 /* obtain DMA buffer */
6333 if (fwohci_itd_dmabuf_alloc(itd)) {
6334 /* cannot allocate memory for DMA buffer */
6335 return -1;
6336 }
6337
6338 /*
6339 * make descriptor chain
6340 *
6341 * First descriptor group has a STORE_VALUE, OUTPUT_IMMEDIATE
6342 * and OUTPUT_LAST descriptors Second and after that, a
6343 * descriptor group has an OUTPUT_IMMEDIATE and an OUTPUT_LAST
6344 * descriptor.
6345 */
6346 descend = desc + descsize;
6347
6348 /* set store value descriptor for 1st descriptor group */
6349 desc->fd_flags = OHCI_DESC_STORE_VALUE;
6350 desc->fd_reqcount = num; /* write number of DMA buffer class */
6351 desc->fd_data = scratch; /* at physical memory 'scratch' */
6352 desc->fd_branch = 0;
6353 desc->fd_status = desc->fd_rescount = 0;
6354
6355 itd->itd_store = desc;
6356 itd->itd_store_phys = phys;
6357
6358 ++desc;
6359 phys += 16;
6360
6361 npkt = 0;
6362 /* make OUTPUT_DESC chain for packets */
6363 for (fd = desc; fd + 2 < descend; fd += 3, ++npkt) {
6364 struct fwohci_desc *fi = fd;
6365 struct fwohci_desc *fl = fd + 2;
6366 u_int32_t *fi_data = (u_int32_t *)(fd + 1);
6367
6368 #if 0
6369 if (npkt > itd->itd_maxpacket - 3) {
6370 printf("%s: %3d fi fl %p %p\n", xname, npkt, fi,fl);
6371 }
6372 #endif
6373
6374 fi->fd_reqcount = 8; /* data size for OHCI command */
6375 fi->fd_flags = OHCI_DESC_IMMED;
6376 fi->fd_data = 0;
6377 fi->fd_branch = 0; /* branch for error */
6378 fi->fd_status = fi->fd_rescount = 0;
6379
6380 /* channel and tag is unchanged */
6381 *fi_data = OHCI_ITHEADER_VAL(TAG, tag) |
6382 OHCI_ITHEADER_VAL(CHAN, ch) |
6383 OHCI_ITHEADER_VAL(TCODE, IEEE1394_TCODE_STREAM_DATA);
6384 *++fi_data = 0;
6385 *++fi_data = 0;
6386 *++fi_data = 0;
6387
6388 fl->fd_flags = OHCI_DESC_OUTPUT | OHCI_DESC_LAST |
6389 OHCI_DESC_BRANCH;
6390 fl->fd_branch =
6391 (phys + sizeof(struct fwohci_desc)*(npkt + 1)*3) | 0x03;
6392 fl->fd_status = fl->fd_rescount = 0;
6393
6394 #ifdef FW_DEBUG
6395 if (npkt > itd->itd_maxpacket - 3) {
6396 DPRINTF(("%s: %3d fi fl fl branch %p %p 0x%x\n",
6397 xname, npkt, fi, fl, (int)fl->fd_branch));
6398 }
6399 #endif
6400
6401 /* physical addr to data? */
6402 fl->fd_data =
6403 (u_int32_t)((itd->itd_seg[bufno].ds_addr + bufused));
6404 bufused += itd->itd_maxsize;
6405 if (bufused > itd->itd_seg[bufno].ds_len) {
6406 bufused = 0;
6407 if (++bufno == itd->itd_nsegs) {
6408 /* fail */
6409 break;
6410 }
6411 }
6412 }
6413
6414 #if 0
6415 if (itd->itd_num == 0) {
6416 u_int32_t *fdp;
6417 u_int32_t adr;
6418 int i = 0;
6419
6420 fdp = (u_int32_t *)itd->itd_desc;
6421 adr = (u_int32_t)itd->itd_desc_phys; /* XXX */
6422
6423 printf("fwohci_itd_construct: audit DMA desc chain. %d\n",
6424 itd->itd_maxpacket);
6425 for (i = 0; i < itd->itd_maxpacket*12 + 4; ++i) {
6426 if (i % 4 == 0) {
6427 printf("\n%x:", adr + 4*i);
6428 }
6429 printf(" %08x", fdp[i]);
6430 }
6431 printf("\n");
6432
6433 }
6434 #endif
6435 /* last branch should be 0 */
6436 --fd;
6437 fd->fd_branch = 0;
6438
6439 printf("%s: pkt %d %d maxdesc %p\n",
6440 xname, npkt, itd->itd_maxpacket, descend);
6441
6442 return 0;
6443 }
6444
6445 void
6446 fwohci_itd_destruct(struct fwohci_it_dmabuf *itd)
6447 {
6448 const char *xname = itd->itd_ctx->itc_sc->sc_sc1394.sc1394_dev.dv_xname;
6449
6450 printf("%s: fwohci_itd_destruct %d\n", xname, itd->itd_num);
6451
6452 fwohci_itd_dmabuf_free(itd);
6453 }
6454
6455
6456 /*
6457 * static int fwohci_itd_dmabuf_alloc(struct fwohci_it_dmabuf *itd)
6458 *
6459 * This function allocates DMA memory for fwohci_it_dmabuf. This
6460 * function will return 0 when it succeeds and return non-zero
6461 * value when it fails.
6462 */
6463 static int
6464 fwohci_itd_dmabuf_alloc(struct fwohci_it_dmabuf *itd)
6465 {
6466 const char *xname = itd->itd_ctx->itc_sc->sc_sc1394.sc1394_dev.dv_xname;
6467 bus_dma_tag_t dmat = itd->itd_ctx->itc_sc->sc_dmat;
6468
6469 int dmasize = itd->itd_maxsize * itd->itd_maxpacket;
6470 int error;
6471
6472 DPRINTF(("%s: fwohci_itd_dmabuf_alloc[%d] dmasize %d maxpkt %d\n",
6473 xname, itd->itd_num, dmasize, itd->itd_maxpacket));
6474
6475 if ((error = bus_dmamem_alloc(dmat, dmasize, PAGE_SIZE, 0,
6476 itd->itd_seg, FWOHCI_MAX_ITDATASEG, &itd->itd_nsegs, 0)) != 0) {
6477 printf("%s: unable to allocate data buffer, error = %d\n",
6478 xname, error);
6479 goto fail_0;
6480 }
6481
6482 /* checking memory range */
6483 #ifdef FW_DEBUG
6484 {
6485 int loop;
6486
6487 for (loop = 0; loop < itd->itd_nsegs; ++loop) {
6488 DPRINTF(("\t%.2d: 0x%lx - 0x%lx\n", loop,
6489 (long)itd->itd_seg[loop].ds_addr,
6490 (long)itd->itd_seg[loop].ds_addr
6491 + (long)itd->itd_seg[loop].ds_len - 1));
6492 }
6493 }
6494 #endif
6495
6496 if ((error = bus_dmamem_map(dmat, itd->itd_seg, itd->itd_nsegs,
6497 dmasize, (caddr_t *)&itd->itd_buf,
6498 BUS_DMA_COHERENT | BUS_DMA_WAITOK)) != 0) {
6499 printf("%s: unable to map data buffer, error = %d\n",
6500 xname, error);
6501 goto fail_1;
6502 }
6503
6504 DPRINTF(("fwohci_it_data_alloc[%d]: bus_dmamem_map addr %p\n",
6505 itd->itd_num, itd->itd_buf));
6506
6507 if ((error = bus_dmamap_create(dmat, /*chunklen*/dmasize,
6508 itd->itd_nsegs, dmasize, 0, BUS_DMA_WAITOK,
6509 &itd->itd_dmamap)) != 0) {
6510 printf("%s: unable to create data buffer DMA map, "
6511 "error = %d\n", xname, error);
6512 goto fail_2;
6513 }
6514
6515 DPRINTF(("fwohci_it_data_alloc: bus_dmamem_create\n"));
6516
6517 if ((error = bus_dmamap_load(dmat, itd->itd_dmamap,
6518 itd->itd_buf, dmasize, NULL, BUS_DMA_WAITOK)) != 0) {
6519 printf("%s: unable to load data buffer DMA map, error = %d\n",
6520 xname, error);
6521 goto fail_3;
6522 }
6523
6524 DPRINTF(("fwohci_itd_dmabuf_alloc: load DMA memory vm %p\n",
6525 itd->itd_buf));
6526 DPRINTF(("\tmapsize %ld nsegs %d\n",
6527 (long)itd->itd_dmamap->dm_mapsize, itd->itd_dmamap->dm_nsegs));
6528
6529 #ifdef FW_DEBUG
6530 {
6531 int loop;
6532
6533 for (loop = 0; loop < itd->itd_dmamap->dm_nsegs; ++loop) {
6534 DPRINTF(("\t%.2d: 0x%lx - 0x%lx\n", loop,
6535 (long)itd->itd_dmamap->dm_segs[loop].ds_addr,
6536 (long)itd->itd_dmamap->dm_segs[loop].ds_addr +
6537 (long)itd->itd_dmamap->dm_segs[loop].ds_len - 1));
6538 }
6539 }
6540 #endif
6541
6542 return 0;
6543
6544 fail_3:
6545 bus_dmamap_destroy(dmat, itd->itd_dmamap);
6546 fail_2:
6547 bus_dmamem_unmap(dmat, (caddr_t)itd->itd_buf, dmasize);
6548 fail_1:
6549 bus_dmamem_free(dmat, itd->itd_seg, itd->itd_nsegs);
6550 fail_0:
6551 itd->itd_nsegs = 0;
6552 itd->itd_maxpacket = 0;
6553 return error;
6554 }
6555
6556 /*
6557 * static void fwohci_itd_dmabuf_free(struct fwohci_it_dmabuf *itd)
6558 *
6559 * This function will release memory resource allocated by
6560 * fwohci_itd_dmabuf_alloc().
6561 */
6562 static void
6563 fwohci_itd_dmabuf_free(struct fwohci_it_dmabuf *itd)
6564 {
6565 bus_dma_tag_t dmat = itd->itd_ctx->itc_sc->sc_dmat;
6566 int dmasize = itd->itd_maxsize * itd->itd_maxpacket;
6567
6568 bus_dmamap_destroy(dmat, itd->itd_dmamap);
6569 bus_dmamem_unmap(dmat, (caddr_t)itd->itd_buf, dmasize);
6570 bus_dmamem_free(dmat, itd->itd_seg, itd->itd_nsegs);
6571
6572 itd->itd_nsegs = 0;
6573 itd->itd_maxpacket = 0;
6574 }
6575
6576
6577
6578 /*
6579 * int fwohci_itd_link(struct fwohci_it_dmabuf *itd,
6580 * struct fwohci_it_dmabuf *itdc)
6581 *
6582 * This function will concatinate two descriptor chains in dmabuf
6583 * itd and itdc. The descriptor link in itdc follows one in itd.
6584 * This function will move interrrupt packet from the end of itd
6585 * to the top of itdc.
6586 *
6587 * This function will return 0 whel this funcion suceeds. If an
6588 * error happens, return a negative value.
6589 */
6590 int
6591 fwohci_itd_link(struct fwohci_it_dmabuf *itd, struct fwohci_it_dmabuf *itdc)
6592 {
6593 struct fwohci_desc *fd1, *fdc;
6594
6595 if (itdc->itd_lastdesc == itdc->itd_desc) {
6596 /* no valid data */
6597 printf("fwohci_itd_link: no data\n");
6598 return -1;
6599 }
6600
6601 if (itdc->itd_flags & ITD_FLAGS_LOCK) {
6602 /* used already */
6603 printf("fwohci_itd_link: link locked\n");
6604 return -1;
6605 }
6606 itdc->itd_flags |= ITD_FLAGS_LOCK;
6607 /* for the first one */
6608 itd->itd_flags |= ITD_FLAGS_LOCK;
6609
6610 DPRINTF(("linking %d after %d: add %d pkts\n",
6611 itdc->itd_num, itd->itd_num, itdc->itd_npacket));
6612
6613 /* XXX: should sync cache */
6614
6615 fd1 = itd->itd_lastdesc;
6616 fdc = itdc->itd_desc + 3; /* OUTPUT_LAST in the first descriptor */
6617
6618 /* sanity check */
6619 #define OUTPUT_LAST_DESC (OHCI_DESC_OUTPUT | OHCI_DESC_LAST | OHCI_DESC_BRANCH)
6620 if ((fd1->fd_flags & OUTPUT_LAST_DESC) != OUTPUT_LAST_DESC) {
6621 printf("funny! not OUTPUT_LAST descriptor %p\n", fd1);
6622 }
6623 if (itd->itd_lastdesc - itd->itd_desc != 3 * itd->itd_npacket) {
6624 printf("funny! packet number inconsistency %ld <=> %ld\n",
6625 (long)(itd->itd_lastdesc - itd->itd_desc),
6626 (long)(3*itd->itd_npacket));
6627 }
6628
6629 fd1->fd_flags &= ~OHCI_DESC_INTR_ALWAYS;
6630 fdc->fd_flags |= OHCI_DESC_INTR_ALWAYS;
6631 fd1->fd_branch = itdc->itd_desc_phys | 4;
6632
6633 itdc->itd_lastdesc->fd_flags |= OHCI_DESC_INTR_ALWAYS;
6634 /* save branch addr of lastdesc and substitute 0 to it */
6635 itdc->itd_savedbranch = itdc->itd_lastdesc->fd_branch;
6636 itdc->itd_lastdesc->fd_branch = 0;
6637
6638 DPRINTF(("%s: link (%d %d), add pkt %d/%d branch 0x%x next saved 0x%x\n",
6639 itd->itd_ctx->itc_sc->sc_sc1394.sc1394_dev.dv_xname,
6640 itd->itd_num, itdc->itd_num,
6641 itdc->itd_npacket, itdc->itd_maxpacket,
6642 (int)fd1->fd_branch, (int)itdc->itd_savedbranch));
6643
6644 /* XXX: should sync cache */
6645
6646 return 0;
6647 }
6648
6649
6650 /*
6651 * int fwohci_itd_unlink(struct fwohci_it_dmabuf *itd)
6652 *
6653 * This function will unlink the descriptor chain from valid link
6654 * of descriptors. The target descriptor is specified by the
6655 * arguent.
6656 */
6657 int
6658 fwohci_itd_unlink(struct fwohci_it_dmabuf *itd)
6659 {
6660 struct fwohci_desc *fd;
6661
6662 /* XXX: should sync cache */
6663
6664 fd = itd->itd_lastdesc;
6665
6666 fd->fd_branch = itd->itd_savedbranch;
6667 DPRINTF(("%s: unlink buf %d branch restored 0x%x\n",
6668 itd->itd_ctx->itc_sc->sc_sc1394.sc1394_dev.dv_xname,
6669 itd->itd_num, (int)fd->fd_branch));
6670
6671 fd->fd_flags &= ~OHCI_DESC_INTR_ALWAYS;
6672 itd->itd_lastdesc = itd->itd_desc;
6673
6674 fd = itd->itd_desc + 3; /* 1st OUTPUT_LAST */
6675 fd->fd_flags &= ~OHCI_DESC_INTR_ALWAYS;
6676
6677 /* XXX: should sync cache */
6678
6679 itd->itd_npacket = 0;
6680 itd->itd_lastdesc = itd->itd_desc;
6681 itd->itd_flags &= ~ITD_FLAGS_LOCK;
6682
6683 return 0;
6684 }
6685
6686
6687 /*
6688 * static int fwohci_itd_writedata(struct fwohci_it_dmabuf *, int ndata,
6689 * struct ieee1394_it_datalist *);
6690 *
6691 * This function will return the number of written data, or
6692 * negative value if an error happens
6693 */
6694 int
6695 fwohci_itd_writedata(struct fwohci_it_dmabuf *itd, int ndata,
6696 struct ieee1394_it_datalist *itdata)
6697 {
6698 int writepkt;
6699 int i;
6700 u_int8_t *p;
6701 struct fwohci_desc *fd;
6702 u_int32_t *fd_idata;
6703 const int dspace =
6704 itd->itd_maxpacket - itd->itd_npacket < ndata ?
6705 itd->itd_maxpacket - itd->itd_npacket : ndata;
6706
6707 if (itd->itd_flags & ITD_FLAGS_LOCK || dspace == 0) {
6708 /* it is locked: cannot write anything */
6709 if (itd->itd_flags & ITD_FLAGS_LOCK) {
6710 DPRINTF(("fwohci_itd_writedata: buf %d lock flag %s,"
6711 " dspace %d\n",
6712 itd->itd_num,
6713 itd->itd_flags & ITD_FLAGS_LOCK ? "ON" : "OFF",
6714 dspace));
6715 return 0; /* not an error */
6716 }
6717 }
6718
6719 /* sanity check */
6720 if (itd->itd_maxpacket < itd->itd_npacket) {
6721 printf("fwohci_itd_writedata: funny! # pkt > maxpkt"
6722 "%d %d\n", itd->itd_npacket, itd->itd_maxpacket);
6723 }
6724
6725 p = itd->itd_buf + itd->itd_maxsize * itd->itd_npacket;
6726 fd = itd->itd_lastdesc;
6727
6728 DPRINTF(("fwohci_itd_writedata(%d[%p], %d, 0x%p) invoked:\n",
6729 itd->itd_num, itd, ndata, itdata));
6730
6731 for (writepkt = 0; writepkt < dspace; ++writepkt) {
6732 u_int8_t *p1 = p;
6733 int cpysize;
6734 int totalsize = 0;
6735
6736 DPRINTF(("writing %d ", writepkt));
6737
6738 for (i = 0; i < 4; ++i) {
6739 switch (itdata->it_cmd[i]&IEEE1394_IT_CMD_MASK) {
6740 case IEEE1394_IT_CMD_IMMED:
6741 memcpy(p1, &itdata->it_u[i].id_data, 8);
6742 p1 += 8;
6743 totalsize += 8;
6744 break;
6745 case IEEE1394_IT_CMD_PTR:
6746 cpysize = itdata->it_cmd[i]&IEEE1394_IT_CMD_SIZE;
6747 DPRINTF(("fwohci_itd_writedata: cpy %d %p\n",
6748 cpysize, itdata->it_u[i].id_addr));
6749 if (totalsize + cpysize > itd->itd_maxsize) {
6750 /* error: too big size */
6751 break;
6752 }
6753 memcpy(p1, itdata->it_u[i].id_addr, cpysize);
6754 totalsize += cpysize;
6755 break;
6756 case IEEE1394_IT_CMD_NOP:
6757 break;
6758 default:
6759 /* unknown command */
6760 break;
6761 }
6762 }
6763
6764 /* only for DV test */
6765 if (totalsize != 488) {
6766 printf("error: totalsize %d at %d\n",
6767 totalsize, writepkt);
6768 }
6769
6770 DPRINTF(("totalsize %d ", totalsize));
6771
6772 /* fill iso command in OUTPUT_IMMED descriptor */
6773
6774 /* XXX: sync cache */
6775 fd += 2; /* next to first descriptor */
6776 fd_idata = (u_int32_t *)fd;
6777
6778 /*
6779 * Umm, should tag, channel and tcode be written
6780 * previously in itd_construct?
6781 */
6782 #if 0
6783 *fd_idata = OHCI_ITHEADER_VAL(TAG, tag) |
6784 OHCI_ITHEADER_VAL(CHAN, ch) |
6785 OHCI_ITHEADER_VAL(TCODE, IEEE1394_TCODE_STREAM_DATA);
6786 #endif
6787 *++fd_idata = totalsize << 16;
6788
6789 /* fill data in OUTPUT_LAST descriptor */
6790 ++fd;
6791 /* intr check... */
6792 if (fd->fd_flags & OHCI_DESC_INTR_ALWAYS) {
6793 printf("uncleared INTR flag in desc %ld\n",
6794 (long)(fd - itd->itd_desc - 1)/3);
6795 }
6796 fd->fd_flags &= ~OHCI_DESC_INTR_ALWAYS;
6797
6798 if ((fd - itd->itd_desc - 1)/3 != itd->itd_maxpacket - 1) {
6799 u_int32_t bcal;
6800
6801 bcal = (fd - itd->itd_desc + 1)*sizeof(struct fwohci_desc) + (u_int32_t)itd->itd_desc_phys;
6802 if (bcal != (fd->fd_branch & 0xfffffff0)) {
6803
6804 printf("uum, branch differ at %d, %x %x %ld/%d\n",
6805 itd->itd_num,
6806 bcal,
6807 fd->fd_branch,
6808 (long)((fd - itd->itd_desc - 1)/3),
6809 itd->itd_maxpacket);
6810 }
6811 } else {
6812 /* the last pcaket */
6813 if (fd->fd_branch != 0) {
6814 printf("uum, branch differ at %d, %x %x %ld/%d\n",
6815 itd->itd_num,
6816 0,
6817 fd->fd_branch,
6818 (long)((fd - itd->itd_desc - 1)/3),
6819 itd->itd_maxpacket);
6820 }
6821 }
6822
6823 /* sanity check */
6824 if (fd->fd_flags != OUTPUT_LAST_DESC) {
6825 printf("fwohci_itd_writedata: dmabuf %d desc inconsistent %d\n",
6826 itd->itd_num, writepkt + itd->itd_npacket);
6827 break;
6828 }
6829 fd->fd_reqcount = totalsize;
6830 /* XXX: sync cache */
6831
6832 ++itdata;
6833 p += itd->itd_maxsize;
6834 }
6835
6836 DPRINTF(("loop start %d, %d times %d\n",
6837 itd->itd_npacket, dspace, writepkt));
6838
6839 itd->itd_npacket += writepkt;
6840 itd->itd_lastdesc = fd;
6841
6842 return writepkt;
6843 }
6844
6845
6846
6847
6848
6849 int
6850 fwohci_itd_isfilled(struct fwohci_it_dmabuf *itd)
6851 {
6852
6853 return itd->itd_npacket*2 > itd->itd_maxpacket ? 1 : 0;
6854 }
Cache object: 664153ac79642137d7eed6d987ed7690
|