1 /* $NetBSD: fwdev.c,v 1.33 2021/11/10 16:08:17 msaitoh Exp $ */
2 /*-
3 * Copyright (c) 2003 Hidetoshi Shimokawa
4 * Copyright (c) 1998-2002 Katsushi Kobayashi and Hidetoshi Shimokawa
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the acknowledgement as bellow:
17 *
18 * This product includes software developed by K. Kobayashi and H. Shimokawa
19 *
20 * 4. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
27 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
29 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
31 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
32 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 * POSSIBILITY OF SUCH DAMAGE.
34 *
35 * $FreeBSD: src/sys/dev/firewire/fwdev.c,v 1.52 2007/06/06 14:31:36 simokawa Exp $
36 *
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: fwdev.c,v 1.33 2021/11/10 16:08:17 msaitoh Exp $");
41
42 #include <sys/param.h>
43 #include <sys/device.h>
44 #include <sys/errno.h>
45 #include <sys/buf.h>
46 #include <sys/bus.h>
47 #include <sys/conf.h>
48 #include <sys/kernel.h>
49 #include <sys/malloc.h>
50 #include <sys/mbuf.h>
51 #include <sys/poll.h>
52 #include <sys/proc.h>
53 #include <sys/select.h>
54
55 #include <dev/ieee1394/firewire.h>
56 #include <dev/ieee1394/firewirereg.h>
57 #include <dev/ieee1394/fwdma.h>
58 #include <dev/ieee1394/fwmem.h>
59 #include <dev/ieee1394/iec68113.h>
60
61 #include "ioconf.h"
62
63 #define FWNODE_INVAL 0xffff
64
65 dev_type_open(fw_open);
66 dev_type_close(fw_close);
67 dev_type_read(fw_read);
68 dev_type_write(fw_write);
69 dev_type_ioctl(fw_ioctl);
70 dev_type_poll(fw_poll);
71 dev_type_mmap(fw_mmap);
72 dev_type_strategy(fw_strategy);
73
74 const struct bdevsw fw_bdevsw = {
75 .d_open = fw_open,
76 .d_close = fw_close,
77 .d_strategy = fw_strategy,
78 .d_ioctl = fw_ioctl,
79 .d_dump = nodump,
80 .d_psize = nosize,
81 .d_discard = nodiscard,
82 .d_flag = D_OTHER
83 };
84
85 const struct cdevsw fw_cdevsw = {
86 .d_open = fw_open,
87 .d_close = fw_close,
88 .d_read = fw_read,
89 .d_write = fw_write,
90 .d_ioctl = fw_ioctl,
91 .d_stop = nostop,
92 .d_tty = notty,
93 .d_poll = fw_poll,
94 .d_mmap = fw_mmap,
95 .d_kqfilter = nokqfilter,
96 .d_discard = nodiscard,
97 .d_flag = D_OTHER
98 };
99
100 struct fw_drv1 {
101 struct firewire_comm *fc;
102 struct fw_xferq *ir;
103 struct fw_xferq *it;
104 struct fw_isobufreq bufreq;
105 STAILQ_HEAD(, fw_bind) binds;
106 STAILQ_HEAD(, fw_xfer) rq;
107 kcondvar_t cv;
108 };
109
110 static int fwdev_allocbuf(struct firewire_comm *, struct fw_xferq *,
111 struct fw_bufspec *);
112 static int fwdev_freebuf(struct fw_xferq *);
113 static int fw_read_async(struct fw_drv1 *, struct uio *, int);
114 static int fw_write_async(struct fw_drv1 *, struct uio *, int);
115 static void fw_hand(struct fw_xfer *);
116
117
118 int
119 fw_open(dev_t dev, int flags, int fmt, struct lwp *td)
120 {
121 struct firewire_softc *sc;
122 struct fw_drv1 *d;
123 int err = 0;
124
125 sc = device_lookup_private(&ieee1394if_cd, DEV2UNIT(dev));
126 if (sc == NULL)
127 return ENXIO;
128
129 if (DEV_FWMEM(dev))
130 return fwmem_open(dev, flags, fmt, td);
131
132 mutex_enter(&sc->fc->fc_mtx);
133 if (sc->si_drv1 != NULL) {
134 mutex_exit(&sc->fc->fc_mtx);
135 return EBUSY;
136 }
137 /* set dummy value for allocation */
138 sc->si_drv1 = (void *)-1;
139 mutex_exit(&sc->fc->fc_mtx);
140
141 sc->si_drv1 = malloc(sizeof(struct fw_drv1), M_FW, M_WAITOK | M_ZERO);
142 if (sc->si_drv1 == NULL)
143 return ENOMEM;
144
145 d = (struct fw_drv1 *)sc->si_drv1;
146 d->fc = sc->fc;
147 STAILQ_INIT(&d->binds);
148 STAILQ_INIT(&d->rq);
149 cv_init(&d->cv, "fwra");
150
151 return err;
152 }
153
154 int
155 fw_close(dev_t dev, int flags, int fmt, struct lwp *td)
156 {
157 struct firewire_softc *sc;
158 struct firewire_comm *fc;
159 struct fw_drv1 *d;
160 struct fw_xfer *xfer;
161 struct fw_bind *fwb;
162 int err = 0;
163
164 sc = device_lookup_private(&ieee1394if_cd, DEV2UNIT(dev));
165 if (sc == NULL)
166 return ENXIO;
167
168 if (DEV_FWMEM(dev))
169 return fwmem_close(dev, flags, fmt, td);
170
171 d = (struct fw_drv1 *)sc->si_drv1;
172 fc = d->fc;
173
174 /* remove binding */
175 for (fwb = STAILQ_FIRST(&d->binds); fwb != NULL;
176 fwb = STAILQ_FIRST(&d->binds)) {
177 fw_bindremove(fc, fwb);
178 STAILQ_REMOVE_HEAD(&d->binds, chlist);
179 fw_xferlist_remove(&fwb->xferlist);
180 free(fwb, M_FW);
181 }
182 if (d->ir != NULL) {
183 struct fw_xferq *ir = d->ir;
184
185 if ((ir->flag & FWXFERQ_OPEN) == 0)
186 return EINVAL;
187 if (ir->flag & FWXFERQ_RUNNING) {
188 ir->flag &= ~FWXFERQ_RUNNING;
189 fc->irx_disable(fc, ir->dmach);
190 }
191 /* free extbuf */
192 fwdev_freebuf(ir);
193 /* drain receiving buffer */
194 for (xfer = STAILQ_FIRST(&ir->q); xfer != NULL;
195 xfer = STAILQ_FIRST(&ir->q)) {
196 ir->queued--;
197 STAILQ_REMOVE_HEAD(&ir->q, link);
198
199 xfer->resp = 0;
200 fw_xfer_done(xfer);
201 }
202 ir->flag &=
203 ~(FWXFERQ_OPEN | FWXFERQ_MODEMASK | FWXFERQ_CHTAGMASK);
204 d->ir = NULL;
205
206 }
207 if (d->it != NULL) {
208 struct fw_xferq *it = d->it;
209
210 if ((it->flag & FWXFERQ_OPEN) == 0)
211 return EINVAL;
212 if (it->flag & FWXFERQ_RUNNING) {
213 it->flag &= ~FWXFERQ_RUNNING;
214 fc->itx_disable(fc, it->dmach);
215 }
216 /* free extbuf */
217 fwdev_freebuf(it);
218 it->flag &=
219 ~(FWXFERQ_OPEN | FWXFERQ_MODEMASK | FWXFERQ_CHTAGMASK);
220 d->it = NULL;
221 }
222 cv_destroy(&d->cv);
223 free(sc->si_drv1, M_FW);
224 sc->si_drv1 = NULL;
225
226 return err;
227 }
228
229 int
230 fw_read(dev_t dev, struct uio *uio, int ioflag)
231 {
232 struct firewire_softc *sc;
233 struct firewire_comm *fc;
234 struct fw_drv1 *d;
235 struct fw_xferq *ir;
236 struct fw_pkt *fp;
237 int err = 0, slept = 0;
238
239 sc = device_lookup_private(&ieee1394if_cd, DEV2UNIT(dev));
240 if (sc == NULL)
241 return ENXIO;
242
243 if (DEV_FWMEM(dev))
244 return physio(fw_strategy, NULL, dev, ioflag, minphys, uio);
245
246 d = (struct fw_drv1 *)sc->si_drv1;
247 fc = d->fc;
248 ir = d->ir;
249
250 if (ir == NULL)
251 return fw_read_async(d, uio, ioflag);
252
253 if (ir->buf == NULL)
254 return EIO;
255
256 mutex_enter(&fc->fc_mtx);
257 readloop:
258 if (ir->stproc == NULL) {
259 /* iso bulkxfer */
260 ir->stproc = STAILQ_FIRST(&ir->stvalid);
261 if (ir->stproc != NULL) {
262 STAILQ_REMOVE_HEAD(&ir->stvalid, link);
263 ir->queued = 0;
264 }
265 }
266 if (ir->stproc == NULL) {
267 /* no data available */
268 if (slept == 0) {
269 slept = 1;
270 ir->flag |= FWXFERQ_WAKEUP;
271 err = cv_timedwait_sig(&ir->cv, &fc->fc_mtx, hz);
272 ir->flag &= ~FWXFERQ_WAKEUP;
273 if (err == 0)
274 goto readloop;
275 } else if (slept == 1)
276 err = EIO;
277 mutex_exit(&fc->fc_mtx);
278 return err;
279 } else if (ir->stproc != NULL) {
280 /* iso bulkxfer */
281 mutex_exit(&fc->fc_mtx);
282 fp = (struct fw_pkt *)fwdma_v_addr(ir->buf,
283 ir->stproc->poffset + ir->queued);
284 if (fc->irx_post != NULL)
285 fc->irx_post(fc, fp->mode.ld);
286 if (fp->mode.stream.len == 0)
287 return EIO;
288 err = uiomove((void *)fp,
289 fp->mode.stream.len + sizeof(uint32_t), uio);
290 ir->queued++;
291 if (ir->queued >= ir->bnpacket) {
292 STAILQ_INSERT_TAIL(&ir->stfree, ir->stproc, link);
293 fc->irx_enable(fc, ir->dmach);
294 ir->stproc = NULL;
295 }
296 if (uio->uio_resid >= ir->psize) {
297 slept = -1;
298 mutex_enter(&fc->fc_mtx);
299 goto readloop;
300 }
301 } else
302 mutex_exit(&fc->fc_mtx);
303 return err;
304 }
305
306 int
307 fw_write(dev_t dev, struct uio *uio, int ioflag)
308 {
309 struct firewire_softc *sc;
310 struct firewire_comm *fc;
311 struct fw_drv1 *d;
312 struct fw_pkt *fp;
313 struct fw_xferq *it;
314 int slept = 0, err = 0;
315
316 sc = device_lookup_private(&ieee1394if_cd, DEV2UNIT(dev));
317 if (sc == NULL)
318 return ENXIO;
319
320 if (DEV_FWMEM(dev))
321 return physio(fw_strategy, NULL, dev, ioflag, minphys, uio);
322
323 d = (struct fw_drv1 *)sc->si_drv1;
324 fc = d->fc;
325 it = d->it;
326
327 if (it == NULL)
328 return fw_write_async(d, uio, ioflag);
329
330 if (it->buf == NULL)
331 return EIO;
332
333 mutex_enter(&fc->fc_mtx);
334 isoloop:
335 if (it->stproc == NULL) {
336 it->stproc = STAILQ_FIRST(&it->stfree);
337 if (it->stproc != NULL) {
338 STAILQ_REMOVE_HEAD(&it->stfree, link);
339 it->queued = 0;
340 } else if (slept == 0) {
341 slept = 1;
342 #if 0 /* XXX to avoid lock recursion */
343 err = fc->itx_enable(fc, it->dmach);
344 if (err)
345 goto out;
346 #endif
347 err = cv_timedwait_sig(&it->cv, &fc->fc_mtx, hz);
348 if (err)
349 goto out;
350 goto isoloop;
351 } else {
352 err = EIO;
353 goto out;
354 }
355 }
356 mutex_exit(&fc->fc_mtx);
357 fp = (struct fw_pkt *)fwdma_v_addr(it->buf,
358 it->stproc->poffset + it->queued);
359 err = uiomove((void *)fp, sizeof(struct fw_isohdr), uio);
360 if (err != 0)
361 return err;
362 err =
363 uiomove((void *)fp->mode.stream.payload, fp->mode.stream.len, uio);
364 it->queued++;
365 if (it->queued >= it->bnpacket) {
366 STAILQ_INSERT_TAIL(&it->stvalid, it->stproc, link);
367 it->stproc = NULL;
368 err = fc->itx_enable(fc, it->dmach);
369 }
370 if (uio->uio_resid >= sizeof(struct fw_isohdr)) {
371 slept = 0;
372 mutex_enter(&fc->fc_mtx);
373 goto isoloop;
374 }
375 return err;
376
377 out:
378 mutex_exit(&fc->fc_mtx);
379 return err;
380 }
381
382 int
383 fw_ioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *td)
384 {
385 struct firewire_softc *sc;
386 struct firewire_comm *fc;
387 struct fw_drv1 *d;
388 struct fw_device *fwdev;
389 struct fw_bind *fwb;
390 struct fw_xferq *ir, *it;
391 struct fw_xfer *xfer;
392 struct fw_pkt *fp;
393 struct fw_devinfo *devinfo;
394 struct fw_devlstreq *fwdevlst = (struct fw_devlstreq *)data;
395 struct fw_asyreq *asyreq = (struct fw_asyreq *)data;
396 struct fw_isochreq *ichreq = (struct fw_isochreq *)data;
397 struct fw_isobufreq *ibufreq = (struct fw_isobufreq *)data;
398 struct fw_asybindreq *bindreq = (struct fw_asybindreq *)data;
399 struct fw_crom_buf *crom_buf = (struct fw_crom_buf *)data;
400 int i, len, err = 0;
401 void *ptr;
402
403 sc = device_lookup_private(&ieee1394if_cd, DEV2UNIT(dev));
404 if (sc == NULL)
405 return ENXIO;
406
407 if (DEV_FWMEM(dev))
408 return fwmem_ioctl(dev, cmd, data, flag, td);
409
410 if (!data)
411 return EINVAL;
412
413 d = (struct fw_drv1 *)sc->si_drv1;
414 fc = d->fc;
415 ir = d->ir;
416 it = d->it;
417
418 switch (cmd) {
419 case FW_STSTREAM:
420 if (it == NULL) {
421 i = fw_open_isodma(fc, /* tx */1);
422 if (i < 0) {
423 err = EBUSY;
424 break;
425 }
426 it = fc->it[i];
427 err = fwdev_allocbuf(fc, it, &d->bufreq.tx);
428 if (err) {
429 it->flag &= ~FWXFERQ_OPEN;
430 break;
431 }
432 }
433 it->flag &= ~0xff;
434 it->flag |= (0x3f & ichreq->ch);
435 it->flag |= ((0x3 & ichreq->tag) << 6);
436 d->it = it;
437 break;
438
439 case FW_GTSTREAM:
440 if (it != NULL) {
441 ichreq->ch = it->flag & 0x3f;
442 ichreq->tag = it->flag >> 2 & 0x3;
443 } else
444 err = EINVAL;
445 break;
446
447 case FW_SRSTREAM:
448 if (ir == NULL) {
449 i = fw_open_isodma(fc, /* tx */0);
450 if (i < 0) {
451 err = EBUSY;
452 break;
453 }
454 ir = fc->ir[i];
455 err = fwdev_allocbuf(fc, ir, &d->bufreq.rx);
456 if (err) {
457 ir->flag &= ~FWXFERQ_OPEN;
458 break;
459 }
460 }
461 ir->flag &= ~0xff;
462 ir->flag |= (0x3f & ichreq->ch);
463 ir->flag |= ((0x3 & ichreq->tag) << 6);
464 d->ir = ir;
465 err = fc->irx_enable(fc, ir->dmach);
466 break;
467
468 case FW_GRSTREAM:
469 if (d->ir != NULL) {
470 ichreq->ch = ir->flag & 0x3f;
471 ichreq->tag = ir->flag >> 2 & 0x3;
472 } else
473 err = EINVAL;
474 break;
475
476 case FW_SSTBUF:
477 memcpy(&d->bufreq, ibufreq, sizeof(d->bufreq));
478 break;
479
480 case FW_GSTBUF:
481 memset(&ibufreq->rx, 0, sizeof(ibufreq->rx));
482 if (ir != NULL) {
483 ibufreq->rx.nchunk = ir->bnchunk;
484 ibufreq->rx.npacket = ir->bnpacket;
485 ibufreq->rx.psize = ir->psize;
486 }
487 memset(&ibufreq->tx, 0, sizeof(ibufreq->tx));
488 if (it != NULL) {
489 ibufreq->tx.nchunk = it->bnchunk;
490 ibufreq->tx.npacket = it->bnpacket;
491 ibufreq->tx.psize = it->psize;
492 }
493 break;
494
495 case FW_ASYREQ:
496 {
497 const struct tcode_info *tinfo;
498 int pay_len = 0;
499
500 fp = &asyreq->pkt;
501 tinfo = &fc->tcode[fp->mode.hdr.tcode];
502
503 if ((tinfo->flag & FWTI_BLOCK_ASY) != 0)
504 pay_len = MAX(0, asyreq->req.len - tinfo->hdr_len);
505
506 xfer = fw_xfer_alloc_buf(M_FW, pay_len, PAGE_SIZE/*XXX*/);
507 if (xfer == NULL)
508 return ENOMEM;
509
510 switch (asyreq->req.type) {
511 case FWASREQNODE:
512 break;
513
514 case FWASREQEUI:
515 fwdev = fw_noderesolve_eui64(fc, &asyreq->req.dst.eui);
516 if (fwdev == NULL) {
517 aprint_error_dev(fc->bdev,
518 "cannot find node\n");
519 err = EINVAL;
520 goto out;
521 }
522 fp->mode.hdr.dst = FWLOCALBUS | fwdev->dst;
523 break;
524
525 case FWASRESTL:
526 /* XXX what's this? */
527 break;
528
529 case FWASREQSTREAM:
530 /* nothing to do */
531 break;
532 }
533
534 memcpy(&xfer->send.hdr, fp, tinfo->hdr_len);
535 if (pay_len > 0)
536 memcpy(xfer->send.payload, (char *)fp + tinfo->hdr_len,
537 pay_len);
538 xfer->send.spd = asyreq->req.sped;
539 xfer->hand = fw_xferwake;
540
541 if ((err = fw_asyreq(fc, -1, xfer)) != 0)
542 goto out;
543 if ((err = fw_xferwait(xfer)) != 0)
544 goto out;
545 if (xfer->resp != 0) {
546 err = EIO;
547 goto out;
548 }
549 if ((tinfo->flag & FWTI_TLABEL) == 0)
550 goto out;
551
552 /* copy response */
553 tinfo = &fc->tcode[xfer->recv.hdr.mode.hdr.tcode];
554 if (xfer->recv.hdr.mode.hdr.tcode == FWTCODE_RRESB ||
555 xfer->recv.hdr.mode.hdr.tcode == FWTCODE_LRES) {
556 pay_len = xfer->recv.pay_len;
557 if (asyreq->req.len >=
558 xfer->recv.pay_len + tinfo->hdr_len)
559 asyreq->req.len =
560 xfer->recv.pay_len + tinfo->hdr_len;
561 else {
562 err = EINVAL;
563 pay_len = 0;
564 }
565 } else
566 pay_len = 0;
567 memcpy(fp, &xfer->recv.hdr, tinfo->hdr_len);
568 memcpy((char *)fp + tinfo->hdr_len, xfer->recv.payload,
569 pay_len);
570 out:
571 fw_xfer_free_buf(xfer);
572 break;
573 }
574
575 case FW_IBUSRST:
576 fc->ibr(fc);
577 break;
578
579 case FW_CBINDADDR:
580 fwb = fw_bindlookup(fc, bindreq->start.hi, bindreq->start.lo);
581 if (fwb == NULL) {
582 err = EINVAL;
583 break;
584 }
585 fw_bindremove(fc, fwb);
586 STAILQ_REMOVE(&d->binds, fwb, fw_bind, chlist);
587 fw_xferlist_remove(&fwb->xferlist);
588 free(fwb, M_FW);
589 break;
590
591 case FW_SBINDADDR:
592 if (bindreq->len <= 0 ) {
593 err = EINVAL;
594 break;
595 }
596 if (bindreq->start.hi > 0xffff ) {
597 err = EINVAL;
598 break;
599 }
600 fwb = (struct fw_bind *)malloc(sizeof(struct fw_bind),
601 M_FW, M_WAITOK);
602 if (fwb == NULL) {
603 err = ENOMEM;
604 break;
605 }
606 fwb->start = ((u_int64_t)bindreq->start.hi << 32) |
607 bindreq->start.lo;
608 fwb->end = fwb->start + bindreq->len;
609 fwb->sc = (void *)d;
610 STAILQ_INIT(&fwb->xferlist);
611 err = fw_bindadd(fc, fwb);
612 if (err == 0) {
613 fw_xferlist_add(&fwb->xferlist, M_FW,
614 /* XXX */
615 PAGE_SIZE, PAGE_SIZE, 5, fc, (void *)fwb, fw_hand);
616 STAILQ_INSERT_TAIL(&d->binds, fwb, chlist);
617 } else {
618 free(fwb, M_FW);
619 }
620 break;
621
622 case FW_GDEVLST:
623 i = len = 1;
624 /* myself */
625 devinfo = fwdevlst->dev;
626 devinfo->dst = fc->nodeid;
627 devinfo->status = 0; /* XXX */
628 devinfo->eui.hi = fc->eui.hi;
629 devinfo->eui.lo = fc->eui.lo;
630 STAILQ_FOREACH(fwdev, &fc->devices, link) {
631 if (len < FW_MAX_DEVLST) {
632 devinfo = &fwdevlst->dev[len++];
633 devinfo->dst = fwdev->dst;
634 devinfo->status =
635 (fwdev->status == FWDEVINVAL) ? 0 : 1;
636 devinfo->eui.hi = fwdev->eui.hi;
637 devinfo->eui.lo = fwdev->eui.lo;
638 }
639 i++;
640 }
641 fwdevlst->n = i;
642 fwdevlst->info_len = len;
643 break;
644
645 case FW_GTPMAP:
646 memcpy(data, fc->topology_map,
647 (fc->topology_map->crc_len + 1) * 4);
648 break;
649
650 case FW_GCROM:
651 STAILQ_FOREACH(fwdev, &fc->devices, link)
652 if (FW_EUI64_EQUAL(fwdev->eui, crom_buf->eui))
653 break;
654 if (fwdev == NULL) {
655 if (!FW_EUI64_EQUAL(fc->eui, crom_buf->eui)) {
656 err = FWNODE_INVAL;
657 break;
658 }
659 /* myself */
660 ptr = malloc(CROMSIZE, M_FW, M_WAITOK);
661 len = CROMSIZE;
662 for (i = 0; i < CROMSIZE/4; i++)
663 ((uint32_t *)ptr)[i] = ntohl(fc->config_rom[i]);
664 } else {
665 /* found */
666 ptr = (void *)fwdev->csrrom;
667 if (fwdev->rommax < CSRROMOFF)
668 len = 0;
669 else
670 len = fwdev->rommax - CSRROMOFF + 4;
671 }
672 if (crom_buf->len < len)
673 len = crom_buf->len;
674 else
675 crom_buf->len = len;
676 err = copyout(ptr, crom_buf->ptr, len);
677 if (fwdev == NULL)
678 /* myself */
679 free(ptr, M_FW);
680 break;
681
682 default:
683 fc->ioctl(dev, cmd, data, flag, td);
684 break;
685 }
686 return err;
687 }
688
689 int
690 fw_poll(dev_t dev, int events, struct lwp *td)
691 {
692 struct firewire_softc *sc;
693 struct fw_xferq *ir;
694 int revents, tmp;
695
696 sc = device_lookup_private(&ieee1394if_cd, DEV2UNIT(dev));
697 if (sc == NULL)
698 return ENXIO;
699
700 ir = ((struct fw_drv1 *)sc->si_drv1)->ir;
701 revents = 0;
702 tmp = POLLIN | POLLRDNORM;
703 if (events & tmp) {
704 if (STAILQ_FIRST(&ir->q) != NULL)
705 revents |= tmp;
706 else
707 selrecord(td, &ir->rsel);
708 }
709 tmp = POLLOUT | POLLWRNORM;
710 if (events & tmp)
711 /* XXX should be fixed */
712 revents |= tmp;
713
714 return revents;
715 }
716
717 paddr_t
718 fw_mmap(dev_t dev, off_t offset, int nproto)
719 {
720 struct firewire_softc *sc;
721
722 sc = device_lookup_private(&ieee1394if_cd, DEV2UNIT(dev));
723 if (sc == NULL)
724 return ENXIO;
725
726 return EINVAL;
727 }
728
729 void
730 fw_strategy(struct bio *bp)
731 {
732 struct firewire_softc *sc;
733 dev_t dev = bp->bio_dev;
734
735 sc = device_lookup_private(&ieee1394if_cd, DEV2UNIT(dev));
736 if (sc == NULL)
737 return;
738
739 if (DEV_FWMEM(dev)) {
740 fwmem_strategy(bp);
741 return;
742 }
743
744 bp->bio_error = EOPNOTSUPP;
745 bp->bio_resid = bp->bio_bcount;
746 biodone(bp);
747 }
748
749
750 static int
751 fwdev_allocbuf(struct firewire_comm *fc, struct fw_xferq *q,
752 struct fw_bufspec *b)
753 {
754 int i;
755
756 if (q->flag & (FWXFERQ_RUNNING | FWXFERQ_EXTBUF))
757 return EBUSY;
758
759 q->bulkxfer =
760 (struct fw_bulkxfer *)malloc(sizeof(struct fw_bulkxfer) * b->nchunk,
761 M_FW, M_WAITOK);
762 if (q->bulkxfer == NULL)
763 return ENOMEM;
764
765 b->psize = roundup2(b->psize, sizeof(uint32_t));
766 q->buf = fwdma_malloc_multiseg(fc, sizeof(uint32_t), b->psize,
767 b->nchunk * b->npacket, BUS_DMA_WAITOK);
768
769 if (q->buf == NULL) {
770 free(q->bulkxfer, M_FW);
771 q->bulkxfer = NULL;
772 return ENOMEM;
773 }
774 q->bnchunk = b->nchunk;
775 q->bnpacket = b->npacket;
776 q->psize = (b->psize + 3) & ~3;
777 q->queued = 0;
778
779 STAILQ_INIT(&q->stvalid);
780 STAILQ_INIT(&q->stfree);
781 STAILQ_INIT(&q->stdma);
782 q->stproc = NULL;
783
784 for (i = 0 ; i < q->bnchunk; i++) {
785 q->bulkxfer[i].poffset = i * q->bnpacket;
786 q->bulkxfer[i].mbuf = NULL;
787 STAILQ_INSERT_TAIL(&q->stfree, &q->bulkxfer[i], link);
788 }
789
790 q->flag &= ~FWXFERQ_MODEMASK;
791 q->flag |= FWXFERQ_STREAM;
792 q->flag |= FWXFERQ_EXTBUF;
793
794 return 0;
795 }
796
797 static int
798 fwdev_freebuf(struct fw_xferq *q)
799 {
800
801 if (q->flag & FWXFERQ_EXTBUF) {
802 if (q->buf != NULL)
803 fwdma_free_multiseg(q->buf);
804 q->buf = NULL;
805 free(q->bulkxfer, M_FW);
806 q->bulkxfer = NULL;
807 q->flag &= ~FWXFERQ_EXTBUF;
808 q->psize = 0;
809 q->maxq = FWMAXQUEUE;
810 }
811 return 0;
812 }
813
814 static int
815 fw_read_async(struct fw_drv1 *d, struct uio *uio, int ioflag)
816 {
817 struct fw_xfer *xfer;
818 struct fw_bind *fwb;
819 struct fw_pkt *fp;
820 const struct tcode_info *tinfo;
821 int err = 0;
822
823 mutex_enter(&d->fc->fc_mtx);
824
825 for (;;) {
826 xfer = STAILQ_FIRST(&d->rq);
827 if (xfer == NULL && err == 0) {
828 err = cv_wait_sig(&d->cv, &d->fc->fc_mtx);
829 if (err) {
830 mutex_exit(&d->fc->fc_mtx);
831 return err;
832 }
833 continue;
834 }
835 break;
836 }
837
838 STAILQ_REMOVE_HEAD(&d->rq, link);
839 mutex_exit(&d->fc->fc_mtx);
840 fp = &xfer->recv.hdr;
841 #if 0 /* for GASP ?? */
842 if (fc->irx_post != NULL)
843 fc->irx_post(fc, fp->mode.ld);
844 #endif
845 tinfo = &xfer->fc->tcode[fp->mode.hdr.tcode];
846 err = uiomove((void *)fp, tinfo->hdr_len, uio);
847 if (err)
848 goto out;
849 err = uiomove((void *)xfer->recv.payload, xfer->recv.pay_len, uio);
850
851 out:
852 /* recycle this xfer */
853 fwb = (struct fw_bind *)xfer->sc;
854 fw_xfer_unload(xfer);
855 xfer->recv.pay_len = PAGE_SIZE;
856 mutex_enter(&d->fc->fc_mtx);
857 STAILQ_INSERT_TAIL(&fwb->xferlist, xfer, link);
858 mutex_exit(&d->fc->fc_mtx);
859 return err;
860 }
861
862 static int
863 fw_write_async(struct fw_drv1 *d, struct uio *uio, int ioflag)
864 {
865 struct fw_xfer *xfer;
866 struct fw_pkt pkt;
867 const struct tcode_info *tinfo;
868 int err;
869
870 memset(&pkt, 0, sizeof(struct fw_pkt));
871 if ((err = uiomove((void *)&pkt, sizeof(uint32_t), uio)))
872 return err;
873 tinfo = &d->fc->tcode[pkt.mode.hdr.tcode];
874 if ((err = uiomove((char *)&pkt + sizeof(uint32_t),
875 tinfo->hdr_len - sizeof(uint32_t), uio)))
876 return err;
877
878 if ((xfer = fw_xfer_alloc_buf(M_FW, uio->uio_resid,
879 PAGE_SIZE/*XXX*/)) == NULL)
880 return ENOMEM;
881
882 memcpy(&xfer->send.hdr, &pkt, sizeof(struct fw_pkt));
883 xfer->send.pay_len = uio->uio_resid;
884 if (uio->uio_resid > 0) {
885 if ((err =
886 uiomove((void *)xfer->send.payload, uio->uio_resid, uio)))
887 goto out;
888 }
889
890 xfer->fc = d->fc;
891 xfer->sc = NULL;
892 xfer->hand = fw_xferwake;
893 xfer->send.spd = 2 /* XXX */;
894
895 if ((err = fw_asyreq(xfer->fc, -1, xfer)))
896 goto out;
897
898 if ((err = fw_xferwait(xfer)))
899 goto out;
900
901 if (xfer->resp != 0) {
902 err = xfer->resp;
903 goto out;
904 }
905
906 if (xfer->flag == FWXF_RCVD) {
907 mutex_enter(&xfer->fc->fc_mtx);
908 STAILQ_INSERT_TAIL(&d->rq, xfer, link);
909 mutex_exit(&xfer->fc->fc_mtx);
910 return 0;
911 }
912
913 out:
914 fw_xfer_free(xfer);
915 return err;
916 }
917
918 static void
919 fw_hand(struct fw_xfer *xfer)
920 {
921 struct fw_bind *fwb;
922 struct fw_drv1 *d;
923
924 fwb = (struct fw_bind *)xfer->sc;
925 d = (struct fw_drv1 *)fwb->sc;
926 mutex_enter(&xfer->fc->fc_mtx);
927 STAILQ_INSERT_TAIL(&d->rq, xfer, link);
928 cv_broadcast(&d->cv);
929 mutex_exit(&xfer->fc->fc_mtx);
930 }
Cache object: 999d3eaa56374cfb0dfa75f54910f8f5
|