1 /* $NetBSD: pdq_ifsubr.c,v 1.38 2001/12/21 23:21:47 matt Exp $ */
2
3 /*-
4 * Copyright (c) 1995, 1996 Matt Thomas <matt@3am-software.com>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. The name of the author may not be used to endorse or promote products
13 * derived from this software without specific prior written permission
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * $NetBSD: pdq_ifsubr.c,v 1.12 1997/06/05 01:56:35 thomas Exp$
27 */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD: src/sys/dev/pdq/pdq_ifsubr.c,v 1.23.4.1 2006/01/29 15:39:06 emaste Exp $");
31
32 /*
33 * DEC PDQ FDDI Controller; code for BSD derived operating systems
34 *
35 * This module provide bus independent BSD specific O/S functions.
36 * (ie. it provides an ifnet interface to the rest of the system)
37 */
38
39 #ifdef __NetBSD__
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: pdq_ifsubr.c,v 1.38 2001/12/21 23:21:47 matt Exp $");
42 #endif
43
44 #define PDQ_OSSUPPORT
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/lock.h>
50 #include <sys/mutex.h>
51 #include <sys/malloc.h>
52 #include <sys/socket.h>
53 #include <sys/sockio.h>
54
55 #include <sys/module.h>
56 #include <sys/bus.h>
57
58 #include <machine/bus_memio.h>
59 #include <machine/bus_pio.h>
60 #include <machine/bus.h>
61 #include <machine/resource.h>
62 #include <sys/rman.h>
63
64 #include <net/if.h>
65 #include <net/if_arp.h>
66 #include <net/if_dl.h>
67 #include <net/if_media.h>
68 #include <net/fddi.h>
69
70 #include <net/bpf.h>
71
72 #include <dev/pdq/pdq_freebsd.h>
73 #include <dev/pdq/pdqreg.h>
74
75 devclass_t pdq_devclass;
76
77 static void
78 pdq_ifinit(
79 pdq_softc_t *sc)
80 {
81 if (sc->sc_if.if_flags & IFF_UP) {
82 sc->sc_if.if_flags |= IFF_RUNNING;
83 if (sc->sc_if.if_flags & IFF_PROMISC) {
84 sc->sc_pdq->pdq_flags |= PDQ_PROMISC;
85 } else {
86 sc->sc_pdq->pdq_flags &= ~PDQ_PROMISC;
87 }
88 if (sc->sc_if.if_flags & IFF_LINK1) {
89 sc->sc_pdq->pdq_flags |= PDQ_PASS_SMT;
90 } else {
91 sc->sc_pdq->pdq_flags &= ~PDQ_PASS_SMT;
92 }
93 sc->sc_pdq->pdq_flags |= PDQ_RUNNING;
94 pdq_run(sc->sc_pdq);
95 } else {
96 sc->sc_if.if_flags &= ~IFF_RUNNING;
97 sc->sc_pdq->pdq_flags &= ~PDQ_RUNNING;
98 pdq_stop(sc->sc_pdq);
99 }
100 }
101
102 static void
103 pdq_ifwatchdog(
104 struct ifnet *ifp)
105 {
106 /*
107 * No progress was made on the transmit queue for PDQ_OS_TX_TRANSMIT
108 * seconds. Remove all queued packets.
109 */
110
111 ifp->if_flags &= ~IFF_OACTIVE;
112 ifp->if_timer = 0;
113 for (;;) {
114 struct mbuf *m;
115 IFQ_DEQUEUE(&ifp->if_snd, m);
116 if (m == NULL)
117 return;
118 PDQ_OS_DATABUF_FREE(PDQ_OS_IFP_TO_SOFTC(ifp)->sc_pdq, m);
119 }
120 }
121
122 static void
123 pdq_ifstart(
124 struct ifnet *ifp)
125 {
126 pdq_softc_t * const sc = PDQ_OS_IFP_TO_SOFTC(ifp);
127 struct mbuf *m;
128 int tx = 0;
129
130 if ((ifp->if_flags & IFF_RUNNING) == 0)
131 return;
132
133 if (sc->sc_if.if_timer == 0)
134 sc->sc_if.if_timer = PDQ_OS_TX_TIMEOUT;
135
136 if ((sc->sc_pdq->pdq_flags & PDQ_TXOK) == 0) {
137 sc->sc_if.if_flags |= IFF_OACTIVE;
138 return;
139 }
140 sc->sc_flags |= PDQIF_DOWNCALL;
141 for (;; tx = 1) {
142 IF_DEQUEUE(&ifp->if_snd, m);
143 if (m == NULL)
144 break;
145 #if defined(PDQ_BUS_DMA) && !defined(PDQ_BUS_DMA_NOTX)
146 if ((m->m_flags & M_HASTXDMAMAP) == 0) {
147 bus_dmamap_t map;
148 if (PDQ_OS_HDR_OFFSET != PDQ_RX_FC_OFFSET) {
149 m->m_data[0] = PDQ_FDDI_PH0;
150 m->m_data[1] = PDQ_FDDI_PH1;
151 m->m_data[2] = PDQ_FDDI_PH2;
152 }
153 if (!bus_dmamap_create(sc->sc_dmatag, m->m_pkthdr.len, 255,
154 m->m_pkthdr.len, 0, BUS_DMA_NOWAIT, &map)) {
155 if (!bus_dmamap_load_mbuf(sc->sc_dmatag, map, m,
156 BUS_DMA_WRITE|BUS_DMA_NOWAIT)) {
157 bus_dmamap_sync(sc->sc_dmatag, map, 0, m->m_pkthdr.len,
158 BUS_DMASYNC_PREWRITE);
159 M_SETCTX(m, map);
160 m->m_flags |= M_HASTXDMAMAP;
161 }
162 }
163 if ((m->m_flags & M_HASTXDMAMAP) == 0)
164 break;
165 }
166 #else
167 if (PDQ_OS_HDR_OFFSET != PDQ_RX_FC_OFFSET) {
168 m->m_data[0] = PDQ_FDDI_PH0;
169 m->m_data[1] = PDQ_FDDI_PH1;
170 m->m_data[2] = PDQ_FDDI_PH2;
171 }
172 #endif
173
174 if (pdq_queue_transmit_data(sc->sc_pdq, m) == PDQ_FALSE)
175 break;
176 }
177 if (m != NULL) {
178 ifp->if_flags |= IFF_OACTIVE;
179 IF_PREPEND(&ifp->if_snd, m);
180 }
181 if (tx)
182 PDQ_DO_TYPE2_PRODUCER(sc->sc_pdq);
183 sc->sc_flags &= ~PDQIF_DOWNCALL;
184 }
185
186 void
187 pdq_os_receive_pdu(
188 pdq_t *pdq,
189 struct mbuf *m,
190 size_t pktlen,
191 int drop)
192 {
193 pdq_softc_t *sc = pdq->pdq_os_ctx;
194 struct ifnet *ifp = &sc->sc_if;
195 struct fddi_header *fh;
196
197 ifp->if_ipackets++;
198 #if defined(PDQ_BUS_DMA)
199 {
200 /*
201 * Even though the first mbuf start at the first fddi header octet,
202 * the dmamap starts PDQ_OS_HDR_OFFSET octets earlier. Any additional
203 * mbufs will start normally.
204 */
205 int offset = PDQ_OS_HDR_OFFSET;
206 struct mbuf *m0;
207 for (m0 = m; m0 != NULL; m0 = m0->m_next, offset = 0) {
208 pdq_os_databuf_sync(sc, m0, offset, m0->m_len, BUS_DMASYNC_POSTREAD);
209 bus_dmamap_unload(sc->sc_dmatag, M_GETCTX(m0, bus_dmamap_t));
210 bus_dmamap_destroy(sc->sc_dmatag, M_GETCTX(m0, bus_dmamap_t));
211 m0->m_flags &= ~M_HASRXDMAMAP;
212 M_SETCTX(m0, NULL);
213 }
214 }
215 #endif
216 m->m_pkthdr.len = pktlen;
217 #if NBPFILTER > 0 && defined(__NetBSD__)
218 if (sc->sc_bpf != NULL)
219 PDQ_BPF_MTAP(sc, m);
220 #endif
221 fh = mtod(m, struct fddi_header *);
222 if (drop || (fh->fddi_fc & (FDDIFC_L|FDDIFC_F)) != FDDIFC_LLC_ASYNC) {
223 ifp->if_iqdrops++;
224 ifp->if_ierrors++;
225 PDQ_OS_DATABUF_FREE(pdq, m);
226 return;
227 }
228
229 m->m_pkthdr.rcvif = ifp;
230 (*ifp->if_input)(ifp, m);
231 }
232
233 void
234 pdq_os_restart_transmitter(
235 pdq_t *pdq)
236 {
237 pdq_softc_t *sc = pdq->pdq_os_ctx;
238 sc->sc_if.if_flags &= ~IFF_OACTIVE;
239 if (IFQ_IS_EMPTY(&sc->sc_if.if_snd) == 0) {
240 sc->sc_if.if_timer = PDQ_OS_TX_TIMEOUT;
241 if ((sc->sc_flags & PDQIF_DOWNCALL) == 0)
242 pdq_ifstart(&sc->sc_if);
243 } else {
244 sc->sc_if.if_timer = 0;
245 }
246 }
247
248 void
249 pdq_os_transmit_done(
250 pdq_t *pdq,
251 struct mbuf *m)
252 {
253 pdq_softc_t *sc = pdq->pdq_os_ctx;
254 #if NBPFILTER > 0
255 if (sc->sc_bpf != NULL)
256 PDQ_BPF_MTAP(sc, m);
257 #endif
258 PDQ_OS_DATABUF_FREE(pdq, m);
259 sc->sc_if.if_opackets++;
260 }
261
262 void
263 pdq_os_addr_fill(
264 pdq_t *pdq,
265 pdq_lanaddr_t *addr,
266 size_t num_addrs)
267 {
268 pdq_softc_t *sc = pdq->pdq_os_ctx;
269 struct ifnet *ifp;
270 struct ifmultiaddr *ifma;
271
272 ifp = &sc->arpcom.ac_if;
273
274 /*
275 * ADDR_FILTER_SET is always issued before FILTER_SET so
276 * we can play with PDQ_ALLMULTI and not worry about
277 * queueing a FILTER_SET ourselves.
278 */
279
280 pdq->pdq_flags &= ~PDQ_ALLMULTI;
281 #if defined(IFF_ALLMULTI)
282 sc->sc_if.if_flags &= ~IFF_ALLMULTI;
283 #endif
284
285 IF_ADDR_LOCK(PDQ_IFNET(sc));
286 for (ifma = TAILQ_FIRST(&sc->sc_if.if_multiaddrs); ifma && num_addrs > 0;
287 ifma = TAILQ_NEXT(ifma, ifma_link)) {
288 char *mcaddr;
289 if (ifma->ifma_addr->sa_family != AF_LINK)
290 continue;
291 mcaddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
292 ((u_short *) addr->lanaddr_bytes)[0] = ((u_short *) mcaddr)[0];
293 ((u_short *) addr->lanaddr_bytes)[1] = ((u_short *) mcaddr)[1];
294 ((u_short *) addr->lanaddr_bytes)[2] = ((u_short *) mcaddr)[2];
295 addr++;
296 num_addrs--;
297 }
298 IF_ADDR_UNLOCK(PDQ_IFNET(sc));
299 /*
300 * If not all the address fit into the CAM, turn on all-multicast mode.
301 */
302 if (ifma != NULL) {
303 pdq->pdq_flags |= PDQ_ALLMULTI;
304 #if defined(IFF_ALLMULTI)
305 sc->sc_if.if_flags |= IFF_ALLMULTI;
306 #endif
307 }
308 }
309
310 #if defined(IFM_FDDI)
311 static int
312 pdq_ifmedia_change(
313 struct ifnet *ifp)
314 {
315 pdq_softc_t * const sc = PDQ_OS_IFP_TO_SOFTC(ifp);
316
317 if (sc->sc_ifmedia.ifm_media & IFM_FDX) {
318 if ((sc->sc_pdq->pdq_flags & PDQ_WANT_FDX) == 0) {
319 sc->sc_pdq->pdq_flags |= PDQ_WANT_FDX;
320 if (sc->sc_pdq->pdq_flags & PDQ_RUNNING)
321 pdq_run(sc->sc_pdq);
322 }
323 } else if (sc->sc_pdq->pdq_flags & PDQ_WANT_FDX) {
324 sc->sc_pdq->pdq_flags &= ~PDQ_WANT_FDX;
325 if (sc->sc_pdq->pdq_flags & PDQ_RUNNING)
326 pdq_run(sc->sc_pdq);
327 }
328
329 return 0;
330 }
331
332 static void
333 pdq_ifmedia_status(
334 struct ifnet *ifp,
335 struct ifmediareq *ifmr)
336 {
337 pdq_softc_t * const sc = PDQ_OS_IFP_TO_SOFTC(ifp);
338
339 ifmr->ifm_status = IFM_AVALID;
340 if (sc->sc_pdq->pdq_flags & PDQ_IS_ONRING)
341 ifmr->ifm_status |= IFM_ACTIVE;
342
343 ifmr->ifm_active = (ifmr->ifm_current & ~IFM_FDX);
344 if (sc->sc_pdq->pdq_flags & PDQ_IS_FDX)
345 ifmr->ifm_active |= IFM_FDX;
346 }
347
348 void
349 pdq_os_update_status(
350 pdq_t *pdq,
351 const void *arg)
352 {
353 pdq_softc_t * const sc = pdq->pdq_os_ctx;
354 const pdq_response_status_chars_get_t *rsp = arg;
355 int media = 0;
356
357 switch (rsp->status_chars_get.pmd_type[0]) {
358 case PDQ_PMD_TYPE_ANSI_MUTLI_MODE: media = IFM_FDDI_MMF; break;
359 case PDQ_PMD_TYPE_ANSI_SINGLE_MODE_TYPE_1: media = IFM_FDDI_SMF; break;
360 case PDQ_PMD_TYPE_ANSI_SIGNLE_MODE_TYPE_2: media = IFM_FDDI_SMF; break;
361 case PDQ_PMD_TYPE_UNSHIELDED_TWISTED_PAIR: media = IFM_FDDI_UTP; break;
362 default: media |= IFM_MANUAL;
363 }
364
365 if (rsp->status_chars_get.station_type == PDQ_STATION_TYPE_DAS)
366 media |= IFM_FDDI_DA;
367
368 sc->sc_ifmedia.ifm_media = media | IFM_FDDI;
369 }
370 #endif /* defined(IFM_FDDI) */
371
372 static int
373 pdq_ifioctl(
374 struct ifnet *ifp,
375 u_long cmd,
376 caddr_t data)
377 {
378 pdq_softc_t *sc = PDQ_OS_IFP_TO_SOFTC(ifp);
379 int error = 0;
380
381 PDQ_LOCK(sc);
382
383 switch (cmd) {
384 case SIOCSIFMTU:
385 case SIOCGIFADDR:
386 case SIOCSIFADDR: {
387 error = fddi_ioctl(ifp, cmd, data);
388 break;
389 }
390
391 case SIOCSIFFLAGS: {
392 pdq_ifinit(sc);
393 break;
394 }
395
396 case SIOCADDMULTI:
397 case SIOCDELMULTI: {
398 if (sc->sc_if.if_flags & IFF_RUNNING) {
399 pdq_run(sc->sc_pdq);
400 error = 0;
401 }
402 break;
403 }
404
405 #if defined(IFM_FDDI) && defined(SIOCSIFMEDIA)
406 case SIOCSIFMEDIA:
407 case SIOCGIFMEDIA: {
408 struct ifreq *ifr = (struct ifreq *)data;
409 error = ifmedia_ioctl(ifp, ifr, &sc->sc_ifmedia, cmd);
410 break;
411 }
412 #endif
413
414 default: {
415 error = EINVAL;
416 break;
417 }
418 }
419
420 PDQ_UNLOCK(sc);
421 return error;
422 }
423
424 #ifndef IFF_NOTRAILERS
425 #define IFF_NOTRAILERS 0
426 #endif
427
428 void
429 pdq_ifattach(pdq_softc_t *sc)
430 {
431 struct ifnet *ifp = &sc->sc_if;
432
433 mtx_init(&sc->mtx, device_get_nameunit(sc->dev), MTX_NETWORK_LOCK,
434 MTX_DEF | MTX_RECURSE);
435
436 ifp->if_softc = sc;
437 ifp->if_init = (if_init_f_t *)pdq_ifinit;
438 ifp->if_snd.ifq_maxlen = IFQ_MAXLEN;
439 ifp->if_flags = IFF_BROADCAST|IFF_SIMPLEX|IFF_NOTRAILERS|IFF_MULTICAST;
440
441 #if (defined(__FreeBSD__) && BSD >= 199506) || defined(__NetBSD__)
442 ifp->if_watchdog = pdq_ifwatchdog;
443 #else
444 ifp->if_watchdog = ifwatchdog;
445 #endif
446
447 ifp->if_ioctl = pdq_ifioctl;
448 #if !defined(__NetBSD__) && !defined(__FreeBSD__)
449 ifp->if_output = fddi_output;
450 #endif
451 ifp->if_start = pdq_ifstart;
452
453 #if defined(IFM_FDDI)
454 {
455 const int media = sc->sc_ifmedia.ifm_media;
456 ifmedia_init(&sc->sc_ifmedia, IFM_FDX,
457 pdq_ifmedia_change, pdq_ifmedia_status);
458 ifmedia_add(&sc->sc_ifmedia, media, 0, 0);
459 ifmedia_set(&sc->sc_ifmedia, media);
460 }
461 #endif
462
463 #if defined(__NetBSD__)
464 if_attach(ifp);
465 fddi_ifattach(ifp, (caddr_t)&sc->sc_pdq->pdq_hwaddr);
466 #else
467 fddi_ifattach(ifp, FDDI_BPF_SUPPORTED);
468 #endif
469 }
470
471 void
472 pdq_ifdetach (pdq_softc_t *sc)
473 {
474 struct ifnet *ifp;
475
476 ifp = &sc->arpcom.ac_if;
477
478 fddi_ifdetach(ifp, FDDI_BPF_SUPPORTED);
479 pdq_stop(sc->sc_pdq);
480 pdq_free(sc->dev);
481
482 return;
483 }
484
485 void
486 pdq_free (device_t dev)
487 {
488 pdq_softc_t *sc;
489
490 sc = device_get_softc(dev);
491
492 if (sc->io)
493 bus_release_resource(dev, sc->io_type, sc->io_rid, sc->io);
494 if (sc->mem)
495 bus_release_resource(dev, sc->mem_type, sc->mem_rid, sc->mem);
496 if (sc->irq_ih)
497 bus_teardown_intr(dev, sc->irq, sc->irq_ih);
498 if (sc->irq)
499 bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq);
500
501 /*
502 * Destroy the mutex.
503 */
504 if (mtx_initialized(&sc->mtx) != 0) {
505 mtx_destroy(&sc->mtx);
506 }
507
508 return;
509 }
510
511 #if defined(PDQ_BUS_DMA)
512 int
513 pdq_os_memalloc_contig(
514 pdq_t *pdq)
515 {
516 pdq_softc_t * const sc = pdq->pdq_os_ctx;
517 bus_dma_segment_t db_segs[1], ui_segs[1], cb_segs[1];
518 int db_nsegs = 0, ui_nsegs = 0;
519 int steps = 0;
520 int not_ok;
521
522 not_ok = bus_dmamem_alloc(sc->sc_dmatag,
523 sizeof(*pdq->pdq_dbp), sizeof(*pdq->pdq_dbp),
524 sizeof(*pdq->pdq_dbp), db_segs, 1, &db_nsegs,
525 BUS_DMA_NOWAIT);
526 if (!not_ok) {
527 steps = 1;
528 not_ok = bus_dmamem_map(sc->sc_dmatag, db_segs, db_nsegs,
529 sizeof(*pdq->pdq_dbp), (caddr_t *) &pdq->pdq_dbp,
530 BUS_DMA_NOWAIT);
531 }
532 if (!not_ok) {
533 steps = 2;
534 not_ok = bus_dmamap_create(sc->sc_dmatag, db_segs[0].ds_len, 1,
535 0x2000, 0, BUS_DMA_NOWAIT, &sc->sc_dbmap);
536 }
537 if (!not_ok) {
538 steps = 3;
539 not_ok = bus_dmamap_load(sc->sc_dmatag, sc->sc_dbmap,
540 pdq->pdq_dbp, sizeof(*pdq->pdq_dbp),
541 NULL, BUS_DMA_NOWAIT);
542 }
543 if (!not_ok) {
544 steps = 4;
545 pdq->pdq_pa_descriptor_block = sc->sc_dbmap->dm_segs[0].ds_addr;
546 not_ok = bus_dmamem_alloc(sc->sc_dmatag,
547 PDQ_OS_PAGESIZE, PDQ_OS_PAGESIZE, PDQ_OS_PAGESIZE,
548 ui_segs, 1, &ui_nsegs, BUS_DMA_NOWAIT);
549 }
550 if (!not_ok) {
551 steps = 5;
552 not_ok = bus_dmamem_map(sc->sc_dmatag, ui_segs, ui_nsegs,
553 PDQ_OS_PAGESIZE,
554 (caddr_t *) &pdq->pdq_unsolicited_info.ui_events,
555 BUS_DMA_NOWAIT);
556 }
557 if (!not_ok) {
558 steps = 6;
559 not_ok = bus_dmamap_create(sc->sc_dmatag, ui_segs[0].ds_len, 1,
560 PDQ_OS_PAGESIZE, 0, BUS_DMA_NOWAIT,
561 &sc->sc_uimap);
562 }
563 if (!not_ok) {
564 steps = 7;
565 not_ok = bus_dmamap_load(sc->sc_dmatag, sc->sc_uimap,
566 pdq->pdq_unsolicited_info.ui_events,
567 PDQ_OS_PAGESIZE, NULL, BUS_DMA_NOWAIT);
568 }
569 if (!not_ok) {
570 steps = 8;
571 pdq->pdq_unsolicited_info.ui_pa_bufstart = sc->sc_uimap->dm_segs[0].ds_addr;
572 cb_segs[0] = db_segs[0];
573 cb_segs[0].ds_addr += offsetof(pdq_descriptor_block_t, pdqdb_consumer);
574 cb_segs[0].ds_len = sizeof(pdq_consumer_block_t);
575 not_ok = bus_dmamem_map(sc->sc_dmatag, cb_segs, 1,
576 sizeof(*pdq->pdq_cbp), (caddr_t *) &pdq->pdq_cbp,
577 BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
578 }
579 if (!not_ok) {
580 steps = 9;
581 not_ok = bus_dmamap_create(sc->sc_dmatag, cb_segs[0].ds_len, 1,
582 0x2000, 0, BUS_DMA_NOWAIT, &sc->sc_cbmap);
583 }
584 if (!not_ok) {
585 steps = 10;
586 not_ok = bus_dmamap_load(sc->sc_dmatag, sc->sc_cbmap,
587 (caddr_t) pdq->pdq_cbp, sizeof(*pdq->pdq_cbp),
588 NULL, BUS_DMA_NOWAIT);
589 }
590 if (!not_ok) {
591 pdq->pdq_pa_consumer_block = sc->sc_cbmap->dm_segs[0].ds_addr;
592 return not_ok;
593 }
594
595 switch (steps) {
596 case 11: {
597 bus_dmamap_unload(sc->sc_dmatag, sc->sc_cbmap);
598 /* FALL THROUGH */
599 }
600 case 10: {
601 bus_dmamap_destroy(sc->sc_dmatag, sc->sc_cbmap);
602 /* FALL THROUGH */
603 }
604 case 9: {
605 bus_dmamem_unmap(sc->sc_dmatag,
606 (caddr_t) pdq->pdq_cbp, sizeof(*pdq->pdq_cbp));
607 /* FALL THROUGH */
608 }
609 case 8: {
610 bus_dmamap_unload(sc->sc_dmatag, sc->sc_uimap);
611 /* FALL THROUGH */
612 }
613 case 7: {
614 bus_dmamap_destroy(sc->sc_dmatag, sc->sc_uimap);
615 /* FALL THROUGH */
616 }
617 case 6: {
618 bus_dmamem_unmap(sc->sc_dmatag,
619 (caddr_t) pdq->pdq_unsolicited_info.ui_events,
620 PDQ_OS_PAGESIZE);
621 /* FALL THROUGH */
622 }
623 case 5: {
624 bus_dmamem_free(sc->sc_dmatag, ui_segs, ui_nsegs);
625 /* FALL THROUGH */
626 }
627 case 4: {
628 bus_dmamap_unload(sc->sc_dmatag, sc->sc_dbmap);
629 /* FALL THROUGH */
630 }
631 case 3: {
632 bus_dmamap_destroy(sc->sc_dmatag, sc->sc_dbmap);
633 /* FALL THROUGH */
634 }
635 case 2: {
636 bus_dmamem_unmap(sc->sc_dmatag,
637 (caddr_t) pdq->pdq_dbp,
638 sizeof(*pdq->pdq_dbp));
639 /* FALL THROUGH */
640 }
641 case 1: {
642 bus_dmamem_free(sc->sc_dmatag, db_segs, db_nsegs);
643 /* FALL THROUGH */
644 }
645 }
646
647 return not_ok;
648 }
649
650 extern void
651 pdq_os_descriptor_block_sync(
652 pdq_os_ctx_t *sc,
653 size_t offset,
654 size_t length,
655 int ops)
656 {
657 bus_dmamap_sync(sc->sc_dmatag, sc->sc_dbmap, offset, length, ops);
658 }
659
660 extern void
661 pdq_os_consumer_block_sync(
662 pdq_os_ctx_t *sc,
663 int ops)
664 {
665 bus_dmamap_sync(sc->sc_dmatag, sc->sc_cbmap, 0, sizeof(pdq_consumer_block_t), ops);
666 }
667
668 extern void
669 pdq_os_unsolicited_event_sync(
670 pdq_os_ctx_t *sc,
671 size_t offset,
672 size_t length,
673 int ops)
674 {
675 bus_dmamap_sync(sc->sc_dmatag, sc->sc_uimap, offset, length, ops);
676 }
677
678 extern void
679 pdq_os_databuf_sync(
680 pdq_os_ctx_t *sc,
681 struct mbuf *m,
682 size_t offset,
683 size_t length,
684 int ops)
685 {
686 bus_dmamap_sync(sc->sc_dmatag, M_GETCTX(m, bus_dmamap_t), offset, length, ops);
687 }
688
689 extern void
690 pdq_os_databuf_free(
691 pdq_os_ctx_t *sc,
692 struct mbuf *m)
693 {
694 if (m->m_flags & (M_HASRXDMAMAP|M_HASTXDMAMAP)) {
695 bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
696 bus_dmamap_unload(sc->sc_dmatag, map);
697 bus_dmamap_destroy(sc->sc_dmatag, map);
698 m->m_flags &= ~(M_HASRXDMAMAP|M_HASTXDMAMAP);
699 }
700 m_freem(m);
701 }
702
703 extern struct mbuf *
704 pdq_os_databuf_alloc(
705 pdq_os_ctx_t *sc)
706 {
707 struct mbuf *m;
708 bus_dmamap_t map;
709
710 MGETHDR(m, M_DONTWAIT, MT_DATA);
711 if (m == NULL) {
712 printf("%s: can't alloc small buf\n", sc->sc_dev.dv_xname);
713 return NULL;
714 }
715 MCLGET(m, M_DONTWAIT);
716 if ((m->m_flags & M_EXT) == 0) {
717 printf("%s: can't alloc cluster\n", sc->sc_dev.dv_xname);
718 m_free(m);
719 return NULL;
720 }
721 m->m_pkthdr.len = m->m_len = PDQ_OS_DATABUF_SIZE;
722
723 if (bus_dmamap_create(sc->sc_dmatag, PDQ_OS_DATABUF_SIZE,
724 1, PDQ_OS_DATABUF_SIZE, 0, BUS_DMA_NOWAIT, &map)) {
725 printf("%s: can't create dmamap\n", sc->sc_dev.dv_xname);
726 m_free(m);
727 return NULL;
728 }
729 if (bus_dmamap_load_mbuf(sc->sc_dmatag, map, m,
730 BUS_DMA_READ|BUS_DMA_NOWAIT)) {
731 printf("%s: can't load dmamap\n", sc->sc_dev.dv_xname);
732 bus_dmamap_destroy(sc->sc_dmatag, map);
733 m_free(m);
734 return NULL;
735 }
736 m->m_flags |= M_HASRXDMAMAP;
737 M_SETCTX(m, map);
738 return m;
739 }
740 #endif
Cache object: d6433112ab1e0b0c78000d2975fc1439
|