1 /*-
2 * Copyright (c) 2004 Olivier Houchard
3 * Copyright (c) 2002 Peter Grehan
4 * Copyright (c) 1997, 1998 Justin T. Gibbs.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification, immediately at the beginning of the file.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred
29 */
30
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33
34 /*
35 * ARM bus dma support routines
36 */
37
38 #define _ARM32_BUS_DMA_PRIVATE
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/malloc.h>
42 #include <sys/bus.h>
43 #include <sys/interrupt.h>
44 #include <sys/lock.h>
45 #include <sys/proc.h>
46 #include <sys/mutex.h>
47 #include <sys/mbuf.h>
48 #include <sys/uio.h>
49 #include <sys/ktr.h>
50 #include <sys/kernel.h>
51 #include <sys/sysctl.h>
52
53 #include <vm/vm.h>
54 #include <vm/vm_page.h>
55 #include <vm/vm_map.h>
56
57 #include <machine/atomic.h>
58 #include <machine/bus.h>
59 #include <machine/cpufunc.h>
60 #include <machine/md_var.h>
61
62 #define MAX_BPAGES 64
63 #define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3
64 #define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4
65
66 struct bounce_zone;
67
68 struct bus_dma_tag {
69 bus_dma_tag_t parent;
70 bus_size_t alignment;
71 bus_size_t boundary;
72 bus_addr_t lowaddr;
73 bus_addr_t highaddr;
74 bus_dma_filter_t *filter;
75 void *filterarg;
76 bus_size_t maxsize;
77 u_int nsegments;
78 bus_size_t maxsegsz;
79 int flags;
80 int ref_count;
81 int map_count;
82 bus_dma_lock_t *lockfunc;
83 void *lockfuncarg;
84 /*
85 * DMA range for this tag. If the page doesn't fall within
86 * one of these ranges, an error is returned. The caller
87 * may then decide what to do with the transfer. If the
88 * range pointer is NULL, it is ignored.
89 */
90 struct arm32_dma_range *ranges;
91 int _nranges;
92 struct bounce_zone *bounce_zone;
93 };
94
95 struct bounce_page {
96 vm_offset_t vaddr; /* kva of bounce buffer */
97 vm_offset_t vaddr_nocache; /* kva of bounce buffer uncached */
98 bus_addr_t busaddr; /* Physical address */
99 vm_offset_t datavaddr; /* kva of client data */
100 bus_size_t datacount; /* client data count */
101 STAILQ_ENTRY(bounce_page) links;
102 };
103
104 int busdma_swi_pending;
105
106 struct bounce_zone {
107 STAILQ_ENTRY(bounce_zone) links;
108 STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
109 int total_bpages;
110 int free_bpages;
111 int reserved_bpages;
112 int active_bpages;
113 int total_bounced;
114 int total_deferred;
115 bus_size_t alignment;
116 bus_size_t boundary;
117 bus_addr_t lowaddr;
118 char zoneid[8];
119 char lowaddrid[20];
120 struct sysctl_ctx_list sysctl_tree;
121 struct sysctl_oid *sysctl_tree_top;
122 };
123
124 static struct mtx bounce_lock;
125 static int total_bpages;
126 static int busdma_zonecount;
127 static STAILQ_HEAD(, bounce_zone) bounce_zone_list;
128
129 SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters");
130 SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
131 "Total bounce pages");
132
133 #define DMAMAP_LINEAR 0x1
134 #define DMAMAP_MBUF 0x2
135 #define DMAMAP_UIO 0x4
136 #define DMAMAP_ALLOCATED 0x10
137 #define DMAMAP_TYPE_MASK (DMAMAP_LINEAR|DMAMAP_MBUF|DMAMAP_UIO)
138 #define DMAMAP_COHERENT 0x8
139 struct bus_dmamap {
140 struct bp_list bpages;
141 int pagesneeded;
142 int pagesreserved;
143 bus_dma_tag_t dmat;
144 int flags;
145 void *buffer;
146 void *origbuffer;
147 void *allocbuffer;
148 TAILQ_ENTRY(bus_dmamap) freelist;
149 int len;
150 STAILQ_ENTRY(bus_dmamap) links;
151 bus_dmamap_callback_t *callback;
152 void *callback_arg;
153
154 };
155
156 static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
157 static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
158
159 static TAILQ_HEAD(,bus_dmamap) dmamap_freelist =
160 TAILQ_HEAD_INITIALIZER(dmamap_freelist);
161
162 #define BUSDMA_STATIC_MAPS 500
163 static struct bus_dmamap map_pool[BUSDMA_STATIC_MAPS];
164
165 static struct mtx busdma_mtx;
166
167 MTX_SYSINIT(busdma_mtx, &busdma_mtx, "busdma lock", MTX_DEF);
168
169 static void init_bounce_pages(void *dummy);
170 static int alloc_bounce_zone(bus_dma_tag_t dmat);
171 static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
172 static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
173 int commit);
174 static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
175 vm_offset_t vaddr, bus_size_t size);
176 static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
177
178 /* Default tag, as most drivers provide no parent tag. */
179 bus_dma_tag_t arm_root_dma_tag;
180
181 /*
182 * Return true if a match is made.
183 *
184 * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'.
185 *
186 * If paddr is within the bounds of the dma tag then call the filter callback
187 * to check for a match, if there is no filter callback then assume a match.
188 */
189 static int
190 run_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
191 {
192 int retval;
193
194 retval = 0;
195
196 do {
197 if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr)
198 || ((paddr & (dmat->alignment - 1)) != 0))
199 && (dmat->filter == NULL
200 || (*dmat->filter)(dmat->filterarg, paddr) != 0))
201 retval = 1;
202
203 dmat = dmat->parent;
204 } while (retval == 0 && dmat != NULL);
205 return (retval);
206 }
207
208 static void
209 arm_dmamap_freelist_init(void *dummy)
210 {
211 int i;
212
213 for (i = 0; i < BUSDMA_STATIC_MAPS; i++)
214 TAILQ_INSERT_HEAD(&dmamap_freelist, &map_pool[i], freelist);
215 }
216
217 SYSINIT(busdma, SI_SUB_VM, SI_ORDER_ANY, arm_dmamap_freelist_init, NULL);
218
219 /*
220 * Check to see if the specified page is in an allowed DMA range.
221 */
222
223 static __inline int
224 bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs,
225 bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap,
226 int flags, vm_offset_t *lastaddrp, int *segp);
227
228 static __inline int
229 _bus_dma_can_bounce(vm_offset_t lowaddr, vm_offset_t highaddr)
230 {
231 int i;
232 for (i = 0; phys_avail[i] && phys_avail[i + 1]; i += 2) {
233 if ((lowaddr >= phys_avail[i] && lowaddr <= phys_avail[i + 1])
234 || (lowaddr < phys_avail[i] &&
235 highaddr > phys_avail[i]))
236 return (1);
237 }
238 return (0);
239 }
240
241 static __inline struct arm32_dma_range *
242 _bus_dma_inrange(struct arm32_dma_range *ranges, int nranges,
243 bus_addr_t curaddr)
244 {
245 struct arm32_dma_range *dr;
246 int i;
247
248 for (i = 0, dr = ranges; i < nranges; i++, dr++) {
249 if (curaddr >= dr->dr_sysbase &&
250 round_page(curaddr) <= (dr->dr_sysbase + dr->dr_len))
251 return (dr);
252 }
253
254 return (NULL);
255 }
256 /*
257 * Convenience function for manipulating driver locks from busdma (during
258 * busdma_swi, for example). Drivers that don't provide their own locks
259 * should specify &Giant to dmat->lockfuncarg. Drivers that use their own
260 * non-mutex locking scheme don't have to use this at all.
261 */
262 void
263 busdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
264 {
265 struct mtx *dmtx;
266
267 dmtx = (struct mtx *)arg;
268 switch (op) {
269 case BUS_DMA_LOCK:
270 mtx_lock(dmtx);
271 break;
272 case BUS_DMA_UNLOCK:
273 mtx_unlock(dmtx);
274 break;
275 default:
276 panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
277 }
278 }
279
280 /*
281 * dflt_lock should never get called. It gets put into the dma tag when
282 * lockfunc == NULL, which is only valid if the maps that are associated
283 * with the tag are meant to never be defered.
284 * XXX Should have a way to identify which driver is responsible here.
285 */
286 static void
287 dflt_lock(void *arg, bus_dma_lock_op_t op)
288 {
289 #ifdef INVARIANTS
290 panic("driver error: busdma dflt_lock called");
291 #else
292 printf("DRIVER_ERROR: busdma dflt_lock called\n");
293 #endif
294 }
295
296 static __inline bus_dmamap_t
297 _busdma_alloc_dmamap(void)
298 {
299 bus_dmamap_t map;
300
301 mtx_lock(&busdma_mtx);
302 map = TAILQ_FIRST(&dmamap_freelist);
303 if (map)
304 TAILQ_REMOVE(&dmamap_freelist, map, freelist);
305 mtx_unlock(&busdma_mtx);
306 if (!map) {
307 map = malloc(sizeof(*map), M_DEVBUF, M_NOWAIT | M_ZERO);
308 if (map)
309 map->flags = DMAMAP_ALLOCATED;
310 } else
311 map->flags = 0;
312 STAILQ_INIT(&map->bpages);
313 return (map);
314 }
315
316 static __inline void
317 _busdma_free_dmamap(bus_dmamap_t map)
318 {
319 if (map->flags & DMAMAP_ALLOCATED)
320 free(map, M_DEVBUF);
321 else {
322 mtx_lock(&busdma_mtx);
323 TAILQ_INSERT_HEAD(&dmamap_freelist, map, freelist);
324 mtx_unlock(&busdma_mtx);
325 }
326 }
327
328 /*
329 * Allocate a device specific dma_tag.
330 */
331 #define SEG_NB 1024
332
333 int
334 bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
335 bus_size_t boundary, bus_addr_t lowaddr,
336 bus_addr_t highaddr, bus_dma_filter_t *filter,
337 void *filterarg, bus_size_t maxsize, int nsegments,
338 bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
339 void *lockfuncarg, bus_dma_tag_t *dmat)
340 {
341 bus_dma_tag_t newtag;
342 int error = 0;
343 /* Return a NULL tag on failure */
344 *dmat = NULL;
345 if (!parent)
346 parent = arm_root_dma_tag;
347
348 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
349 if (newtag == NULL) {
350 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
351 __func__, newtag, 0, error);
352 return (ENOMEM);
353 }
354
355 newtag->parent = parent;
356 newtag->alignment = alignment;
357 newtag->boundary = boundary;
358 newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1);
359 newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1);
360 newtag->filter = filter;
361 newtag->filterarg = filterarg;
362 newtag->maxsize = maxsize;
363 newtag->nsegments = nsegments;
364 newtag->maxsegsz = maxsegsz;
365 newtag->flags = flags;
366 newtag->ref_count = 1; /* Count ourself */
367 newtag->map_count = 0;
368 newtag->ranges = bus_dma_get_range();
369 newtag->_nranges = bus_dma_get_range_nb();
370 if (lockfunc != NULL) {
371 newtag->lockfunc = lockfunc;
372 newtag->lockfuncarg = lockfuncarg;
373 } else {
374 newtag->lockfunc = dflt_lock;
375 newtag->lockfuncarg = NULL;
376 }
377 /*
378 * Take into account any restrictions imposed by our parent tag
379 */
380 if (parent != NULL) {
381 newtag->lowaddr = min(parent->lowaddr, newtag->lowaddr);
382 newtag->highaddr = max(parent->highaddr, newtag->highaddr);
383 if (newtag->boundary == 0)
384 newtag->boundary = parent->boundary;
385 else if (parent->boundary != 0)
386 newtag->boundary = min(parent->boundary,
387 newtag->boundary);
388 if ((newtag->filter != NULL) ||
389 ((parent->flags & BUS_DMA_COULD_BOUNCE) != 0))
390 newtag->flags |= BUS_DMA_COULD_BOUNCE;
391 if (newtag->filter == NULL) {
392 /*
393 * Short circuit looking at our parent directly
394 * since we have encapsulated all of its information
395 */
396 newtag->filter = parent->filter;
397 newtag->filterarg = parent->filterarg;
398 newtag->parent = parent->parent;
399 }
400 if (newtag->parent != NULL)
401 atomic_add_int(&parent->ref_count, 1);
402 }
403 if (_bus_dma_can_bounce(newtag->lowaddr, newtag->highaddr)
404 || newtag->alignment > 1)
405 newtag->flags |= BUS_DMA_COULD_BOUNCE;
406
407 if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
408 (flags & BUS_DMA_ALLOCNOW) != 0) {
409 struct bounce_zone *bz;
410
411 /* Must bounce */
412
413 if ((error = alloc_bounce_zone(newtag)) != 0) {
414 free(newtag, M_DEVBUF);
415 return (error);
416 }
417 bz = newtag->bounce_zone;
418
419 if (ptoa(bz->total_bpages) < maxsize) {
420 int pages;
421
422 pages = atop(maxsize) - bz->total_bpages;
423
424 /* Add pages to our bounce pool */
425 if (alloc_bounce_pages(newtag, pages) < pages)
426 error = ENOMEM;
427 }
428 /* Performed initial allocation */
429 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
430 } else
431 newtag->bounce_zone = NULL;
432 if (error != 0)
433 free(newtag, M_DEVBUF);
434 else
435 *dmat = newtag;
436 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
437 __func__, newtag, (newtag != NULL ? newtag->flags : 0), error);
438
439 return (error);
440 }
441
442 int
443 bus_dma_tag_destroy(bus_dma_tag_t dmat)
444 {
445 #ifdef KTR
446 bus_dma_tag_t dmat_copy = dmat;
447 #endif
448
449 if (dmat != NULL) {
450
451 if (dmat->map_count != 0)
452 return (EBUSY);
453
454 while (dmat != NULL) {
455 bus_dma_tag_t parent;
456
457 parent = dmat->parent;
458 atomic_subtract_int(&dmat->ref_count, 1);
459 if (dmat->ref_count == 0) {
460 free(dmat, M_DEVBUF);
461 /*
462 * Last reference count, so
463 * release our reference
464 * count on our parent.
465 */
466 dmat = parent;
467 } else
468 dmat = NULL;
469 }
470 }
471 CTR2(KTR_BUSDMA, "%s tag %p", __func__, dmat_copy);
472
473 return (0);
474 }
475
476 #include <sys/kdb.h>
477 /*
478 * Allocate a handle for mapping from kva/uva/physical
479 * address space into bus device space.
480 */
481 int
482 bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
483 {
484 bus_dmamap_t newmap;
485 int error = 0;
486
487 newmap = _busdma_alloc_dmamap();
488 if (newmap == NULL) {
489 CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM);
490 return (ENOMEM);
491 }
492 *mapp = newmap;
493 newmap->dmat = dmat;
494 newmap->allocbuffer = NULL;
495 dmat->map_count++;
496
497 /*
498 * Bouncing might be required if the driver asks for an active
499 * exclusion region, a data alignment that is stricter than 1, and/or
500 * an active address boundary.
501 */
502 if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
503
504 /* Must bounce */
505 struct bounce_zone *bz;
506 int maxpages;
507
508 if (dmat->bounce_zone == NULL) {
509 if ((error = alloc_bounce_zone(dmat)) != 0) {
510 _busdma_free_dmamap(newmap);
511 *mapp = NULL;
512 return (error);
513 }
514 }
515 bz = dmat->bounce_zone;
516
517 /* Initialize the new map */
518 STAILQ_INIT(&((*mapp)->bpages));
519
520 /*
521 * Attempt to add pages to our pool on a per-instance
522 * basis up to a sane limit.
523 */
524 maxpages = MAX_BPAGES;
525 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
526 || (dmat->map_count > 0 && bz->total_bpages < maxpages)) {
527 int pages;
528
529 pages = MAX(atop(dmat->maxsize), 1);
530 pages = MIN(maxpages - bz->total_bpages, pages);
531 pages = MAX(pages, 1);
532 if (alloc_bounce_pages(dmat, pages) < pages)
533 error = ENOMEM;
534
535 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
536 if (error == 0)
537 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
538 } else {
539 error = 0;
540 }
541 }
542 }
543 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
544 __func__, dmat, dmat->flags, error);
545
546 return (0);
547 }
548
549 /*
550 * Destroy a handle for mapping from kva/uva/physical
551 * address space into bus device space.
552 */
553 int
554 bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
555 {
556
557 _busdma_free_dmamap(map);
558 if (STAILQ_FIRST(&map->bpages) != NULL) {
559 CTR3(KTR_BUSDMA, "%s: tag %p error %d",
560 __func__, dmat, EBUSY);
561 return (EBUSY);
562 }
563 dmat->map_count--;
564 CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
565 return (0);
566 }
567
568 /*
569 * Allocate a piece of memory that can be efficiently mapped into
570 * bus device space based on the constraints lited in the dma tag.
571 * A dmamap to for use with dmamap_load is also allocated.
572 */
573 int
574 bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
575 bus_dmamap_t *mapp)
576 {
577 bus_dmamap_t newmap = NULL;
578
579 int mflags;
580
581 if (flags & BUS_DMA_NOWAIT)
582 mflags = M_NOWAIT;
583 else
584 mflags = M_WAITOK;
585 if (flags & BUS_DMA_ZERO)
586 mflags |= M_ZERO;
587
588 newmap = _busdma_alloc_dmamap();
589 if (newmap == NULL) {
590 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
591 __func__, dmat, dmat->flags, ENOMEM);
592 return (ENOMEM);
593 }
594 dmat->map_count++;
595 *mapp = newmap;
596 newmap->dmat = dmat;
597
598 if (dmat->maxsize <= PAGE_SIZE &&
599 (dmat->alignment < dmat->maxsize) &&
600 !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr)) {
601 *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags);
602 } else {
603 /*
604 * XXX Use Contigmalloc until it is merged into this facility
605 * and handles multi-seg allocations. Nobody is doing
606 * multi-seg allocations yet though.
607 */
608 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
609 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
610 dmat->boundary);
611 }
612 if (*vaddr == NULL) {
613 if (newmap != NULL) {
614 _busdma_free_dmamap(newmap);
615 dmat->map_count--;
616 }
617 *mapp = NULL;
618 return (ENOMEM);
619 }
620 if (flags & BUS_DMA_COHERENT) {
621 void *tmpaddr = arm_remap_nocache(
622 (void *)((vm_offset_t)*vaddr &~ PAGE_MASK),
623 dmat->maxsize + ((vm_offset_t)*vaddr & PAGE_MASK));
624
625 if (tmpaddr) {
626 tmpaddr = (void *)((vm_offset_t)(tmpaddr) +
627 ((vm_offset_t)*vaddr & PAGE_MASK));
628 newmap->origbuffer = *vaddr;
629 newmap->allocbuffer = tmpaddr;
630 cpu_idcache_wbinv_range((vm_offset_t)*vaddr,
631 dmat->maxsize);
632 *vaddr = tmpaddr;
633 } else
634 newmap->origbuffer = newmap->allocbuffer = NULL;
635 } else
636 newmap->origbuffer = newmap->allocbuffer = NULL;
637 return (0);
638 }
639
640 /*
641 * Free a piece of memory and it's allocated dmamap, that was allocated
642 * via bus_dmamem_alloc. Make the same choice for free/contigfree.
643 */
644 void
645 bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
646 {
647 if (map->allocbuffer) {
648 KASSERT(map->allocbuffer == vaddr,
649 ("Trying to freeing the wrong DMA buffer"));
650 vaddr = map->origbuffer;
651 arm_unmap_nocache(map->allocbuffer, dmat->maxsize);
652 }
653 if (dmat->maxsize <= PAGE_SIZE &&
654 dmat->alignment < dmat->maxsize &&
655 !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr))
656 free(vaddr, M_DEVBUF);
657 else {
658 contigfree(vaddr, dmat->maxsize, M_DEVBUF);
659 }
660 dmat->map_count--;
661 _busdma_free_dmamap(map);
662 CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
663 }
664
665 static int
666 _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
667 bus_size_t buflen, int flags)
668 {
669 vm_offset_t vaddr;
670 vm_offset_t vendaddr;
671 bus_addr_t paddr;
672
673 if ((map->pagesneeded == 0)) {
674 CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
675 "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem),
676 dmat->boundary, dmat->alignment);
677 CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d",
678 map, map->pagesneeded);
679 /*
680 * Count the number of bounce pages
681 * needed in order to complete this transfer
682 */
683 vaddr = trunc_page((vm_offset_t)buf);
684 vendaddr = (vm_offset_t)buf + buflen;
685
686 while (vaddr < vendaddr) {
687 paddr = pmap_kextract(vaddr);
688 if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
689 run_filter(dmat, paddr) != 0)
690 map->pagesneeded++;
691 vaddr += PAGE_SIZE;
692 }
693 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
694 }
695
696 /* Reserve Necessary Bounce Pages */
697 if (map->pagesneeded != 0) {
698 mtx_lock(&bounce_lock);
699 if (flags & BUS_DMA_NOWAIT) {
700 if (reserve_bounce_pages(dmat, map, 0) != 0) {
701 mtx_unlock(&bounce_lock);
702 return (ENOMEM);
703 }
704 } else {
705 if (reserve_bounce_pages(dmat, map, 1) != 0) {
706 /* Queue us for resources */
707 STAILQ_INSERT_TAIL(&bounce_map_waitinglist,
708 map, links);
709 mtx_unlock(&bounce_lock);
710 return (EINPROGRESS);
711 }
712 }
713 mtx_unlock(&bounce_lock);
714 }
715
716 return (0);
717 }
718
719 /*
720 * Utility function to load a linear buffer. lastaddrp holds state
721 * between invocations (for multiple-buffer loads). segp contains
722 * the starting segment on entrance, and the ending segment on exit.
723 * first indicates if this is the first invocation of this function.
724 */
725 static __inline int
726 bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs,
727 bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap,
728 int flags, vm_offset_t *lastaddrp, int *segp)
729 {
730 bus_size_t sgsize;
731 bus_addr_t curaddr, lastaddr, baddr, bmask;
732 vm_offset_t vaddr = (vm_offset_t)buf;
733 int seg;
734 int error = 0;
735 pd_entry_t *pde;
736 pt_entry_t pte;
737 pt_entry_t *ptep;
738
739 lastaddr = *lastaddrp;
740 bmask = ~(dmat->boundary - 1);
741
742 if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
743 error = _bus_dmamap_count_pages(dmat, map, buf, buflen, flags);
744 if (error)
745 return (error);
746 }
747 CTR3(KTR_BUSDMA, "lowaddr= %d boundary= %d, "
748 "alignment= %d", dmat->lowaddr, dmat->boundary, dmat->alignment);
749
750 for (seg = *segp; buflen > 0 ; ) {
751 /*
752 * Get the physical address for this segment.
753 *
754 * XXX Don't support checking for coherent mappings
755 * XXX in user address space.
756 */
757 if (__predict_true(pmap == pmap_kernel())) {
758 (void) pmap_get_pde_pte(pmap, vaddr, &pde, &ptep);
759 if (__predict_false(pmap_pde_section(pde))) {
760 curaddr = (*pde & L1_S_FRAME) |
761 (vaddr & L1_S_OFFSET);
762 if (*pde & L1_S_CACHE_MASK) {
763 map->flags &=
764 ~DMAMAP_COHERENT;
765 }
766 } else {
767 pte = *ptep;
768 KASSERT((pte & L2_TYPE_MASK) != L2_TYPE_INV,
769 ("INV type"));
770 if (__predict_false((pte & L2_TYPE_MASK)
771 == L2_TYPE_L)) {
772 curaddr = (pte & L2_L_FRAME) |
773 (vaddr & L2_L_OFFSET);
774 if (pte & L2_L_CACHE_MASK) {
775 map->flags &=
776 ~DMAMAP_COHERENT;
777
778 }
779 } else {
780 curaddr = (pte & L2_S_FRAME) |
781 (vaddr & L2_S_OFFSET);
782 if (pte & L2_S_CACHE_MASK) {
783 map->flags &=
784 ~DMAMAP_COHERENT;
785 }
786 }
787 }
788 } else {
789 curaddr = pmap_extract(pmap, vaddr);
790 map->flags &= ~DMAMAP_COHERENT;
791 }
792
793 /*
794 * Compute the segment size, and adjust counts.
795 */
796 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
797 if (buflen < sgsize)
798 sgsize = buflen;
799
800 /*
801 * Make sure we don't cross any boundaries.
802 */
803 if (dmat->boundary > 0) {
804 baddr = (curaddr + dmat->boundary) & bmask;
805 if (sgsize > (baddr - curaddr))
806 sgsize = (baddr - curaddr);
807 }
808 if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
809 map->pagesneeded != 0 && run_filter(dmat, curaddr))
810 curaddr = add_bounce_page(dmat, map, vaddr, sgsize);
811
812 if (dmat->ranges) {
813 struct arm32_dma_range *dr;
814
815 dr = _bus_dma_inrange(dmat->ranges, dmat->_nranges,
816 curaddr);
817 if (dr == NULL)
818 return (EINVAL);
819 /*
820 * In a valid DMA range. Translate the physical
821 * memory address to an address in the DMA window.
822 */
823 curaddr = (curaddr - dr->dr_sysbase) + dr->dr_busbase;
824
825 }
826
827 /*
828 * Insert chunk into a segment, coalescing with
829 * the previous segment if possible.
830 */
831 if (seg >= 0 && curaddr == lastaddr &&
832 (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
833 (dmat->boundary == 0 ||
834 (segs[seg].ds_addr & bmask) ==
835 (curaddr & bmask))) {
836 segs[seg].ds_len += sgsize;
837 goto segdone;
838 } else {
839 if (++seg >= dmat->nsegments)
840 break;
841 segs[seg].ds_addr = curaddr;
842 segs[seg].ds_len = sgsize;
843 }
844 if (error)
845 break;
846 segdone:
847 lastaddr = curaddr + sgsize;
848 vaddr += sgsize;
849 buflen -= sgsize;
850 }
851
852 *segp = seg;
853 *lastaddrp = lastaddr;
854
855 /*
856 * Did we fit?
857 */
858 if (buflen != 0)
859 error = EFBIG; /* XXX better return value here? */
860 return (error);
861 }
862
863 /*
864 * Map the buffer buf into bus space using the dmamap map.
865 */
866 int
867 bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
868 bus_size_t buflen, bus_dmamap_callback_t *callback,
869 void *callback_arg, int flags)
870 {
871 vm_offset_t lastaddr = 0;
872 int error, nsegs = -1;
873 #ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT
874 bus_dma_segment_t dm_segments[dmat->nsegments];
875 #else
876 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
877 #endif
878
879 KASSERT(dmat != NULL, ("dmatag is NULL"));
880 KASSERT(map != NULL, ("dmamap is NULL"));
881 map->callback = callback;
882 map->callback_arg = callback_arg;
883 map->flags &= ~DMAMAP_TYPE_MASK;
884 map->flags |= DMAMAP_LINEAR|DMAMAP_COHERENT;
885 map->buffer = buf;
886 map->len = buflen;
887 error = bus_dmamap_load_buffer(dmat,
888 dm_segments, map, buf, buflen, kernel_pmap,
889 flags, &lastaddr, &nsegs);
890 if (error == EINPROGRESS)
891 return (error);
892 if (error)
893 (*callback)(callback_arg, NULL, 0, error);
894 else
895 (*callback)(callback_arg, dm_segments, nsegs + 1, error);
896
897 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
898 __func__, dmat, dmat->flags, nsegs + 1, error);
899
900 return (0);
901 }
902
903 /*
904 * Like bus_dmamap_load(), but for mbufs.
905 */
906 int
907 bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
908 bus_dmamap_callback2_t *callback, void *callback_arg,
909 int flags)
910 {
911 #ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT
912 bus_dma_segment_t dm_segments[dmat->nsegments];
913 #else
914 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
915 #endif
916 int nsegs = -1, error = 0;
917
918 M_ASSERTPKTHDR(m0);
919
920 map->flags &= ~DMAMAP_TYPE_MASK;
921 map->flags |= DMAMAP_MBUF | DMAMAP_COHERENT;
922 map->buffer = m0;
923 map->len = 0;
924 if (m0->m_pkthdr.len <= dmat->maxsize) {
925 vm_offset_t lastaddr = 0;
926 struct mbuf *m;
927
928 for (m = m0; m != NULL && error == 0; m = m->m_next) {
929 if (m->m_len > 0) {
930 error = bus_dmamap_load_buffer(dmat,
931 dm_segments, map, m->m_data, m->m_len,
932 pmap_kernel(), flags, &lastaddr, &nsegs);
933 map->len += m->m_len;
934 }
935 }
936 } else {
937 error = EINVAL;
938 }
939
940 if (error) {
941 /*
942 * force "no valid mappings" on error in callback.
943 */
944 (*callback)(callback_arg, dm_segments, 0, 0, error);
945 } else {
946 (*callback)(callback_arg, dm_segments, nsegs + 1,
947 m0->m_pkthdr.len, error);
948 }
949 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
950 __func__, dmat, dmat->flags, error, nsegs + 1);
951
952 return (error);
953 }
954
955 int
956 bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
957 struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs,
958 int flags)
959 {
960 int error = 0;
961 M_ASSERTPKTHDR(m0);
962
963 flags |= BUS_DMA_NOWAIT;
964 *nsegs = -1;
965 map->flags &= ~DMAMAP_TYPE_MASK;
966 map->flags |= DMAMAP_MBUF | DMAMAP_COHERENT;
967 map->buffer = m0;
968 map->len = 0;
969 if (m0->m_pkthdr.len <= dmat->maxsize) {
970 vm_offset_t lastaddr = 0;
971 struct mbuf *m;
972
973 for (m = m0; m != NULL && error == 0; m = m->m_next) {
974 if (m->m_len > 0) {
975 error = bus_dmamap_load_buffer(dmat, segs, map,
976 m->m_data, m->m_len,
977 pmap_kernel(), flags, &lastaddr,
978 nsegs);
979 map->len += m->m_len;
980 }
981 }
982 } else {
983 error = EINVAL;
984 }
985
986 /* XXX FIXME: Having to increment nsegs is really annoying */
987 ++*nsegs;
988 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
989 __func__, dmat, dmat->flags, error, *nsegs);
990 return (error);
991 }
992
993 /*
994 * Like bus_dmamap_load(), but for uios.
995 */
996 int
997 bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
998 bus_dmamap_callback2_t *callback, void *callback_arg,
999 int flags)
1000 {
1001 vm_offset_t lastaddr;
1002 #ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT
1003 bus_dma_segment_t dm_segments[dmat->nsegments];
1004 #else
1005 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
1006 #endif
1007 int nsegs, i, error;
1008 bus_size_t resid;
1009 struct iovec *iov;
1010 struct pmap *pmap;
1011
1012 resid = uio->uio_resid;
1013 iov = uio->uio_iov;
1014 map->flags &= ~DMAMAP_TYPE_MASK;
1015 map->flags |= DMAMAP_UIO|DMAMAP_COHERENT;
1016 map->buffer = uio;
1017 map->len = 0;
1018
1019 if (uio->uio_segflg == UIO_USERSPACE) {
1020 KASSERT(uio->uio_td != NULL,
1021 ("bus_dmamap_load_uio: USERSPACE but no proc"));
1022 pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace);
1023 } else
1024 pmap = kernel_pmap;
1025
1026 error = 0;
1027 nsegs = -1;
1028 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
1029 /*
1030 * Now at the first iovec to load. Load each iovec
1031 * until we have exhausted the residual count.
1032 */
1033 bus_size_t minlen =
1034 resid < iov[i].iov_len ? resid : iov[i].iov_len;
1035 caddr_t addr = (caddr_t) iov[i].iov_base;
1036
1037 if (minlen > 0) {
1038 error = bus_dmamap_load_buffer(dmat, dm_segments, map,
1039 addr, minlen, pmap, flags, &lastaddr, &nsegs);
1040
1041 map->len += minlen;
1042 resid -= minlen;
1043 }
1044 }
1045
1046 if (error) {
1047 /*
1048 * force "no valid mappings" on error in callback.
1049 */
1050 (*callback)(callback_arg, dm_segments, 0, 0, error);
1051 } else {
1052 (*callback)(callback_arg, dm_segments, nsegs+1,
1053 uio->uio_resid, error);
1054 }
1055
1056 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
1057 __func__, dmat, dmat->flags, error, nsegs + 1);
1058 return (error);
1059 }
1060
1061 /*
1062 * Release the mapping held by map.
1063 */
1064 void
1065 _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
1066 {
1067 struct bounce_page *bpage;
1068
1069 map->flags &= ~DMAMAP_TYPE_MASK;
1070 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
1071 STAILQ_REMOVE_HEAD(&map->bpages, links);
1072 free_bounce_page(dmat, bpage);
1073 }
1074 return;
1075 }
1076
1077
1078 static __inline void
1079 bus_dmamap_sync_buf(void *buf, int len, bus_dmasync_op_t op)
1080 {
1081 char _tmp_cl[arm_dcache_align], _tmp_clend[arm_dcache_align];
1082
1083 if (op & BUS_DMASYNC_PREWRITE)
1084 cpu_dcache_wb_range((vm_offset_t)buf, len);
1085 if (op & BUS_DMASYNC_POSTREAD) {
1086 if ((vm_offset_t)buf & arm_dcache_align_mask)
1087 memcpy(_tmp_cl, (void *)((vm_offset_t)buf & ~
1088 arm_dcache_align_mask),
1089 (vm_offset_t)buf - ((vm_offset_t)buf &~
1090 arm_dcache_align_mask));
1091 if (((vm_offset_t)buf + len) & arm_dcache_align_mask)
1092 memcpy(_tmp_cl, (void *)((vm_offset_t)buf & ~
1093 arm_dcache_align_mask),
1094 (vm_offset_t)buf - ((vm_offset_t)buf &~
1095 arm_dcache_align_mask));
1096 if (((vm_offset_t)buf + len) & arm_dcache_align_mask)
1097 memcpy(_tmp_clend, (void *)(((vm_offset_t)buf + len) & ~
1098 arm_dcache_align_mask),
1099 (vm_offset_t)buf +len - (((vm_offset_t)buf + len) &~
1100 arm_dcache_align_mask));
1101 cpu_dcache_inv_range((vm_offset_t)buf, len);
1102 if ((vm_offset_t)buf & arm_dcache_align_mask)
1103 memcpy((void *)((vm_offset_t)buf &
1104 ~arm_dcache_align_mask),
1105 _tmp_cl,
1106 (vm_offset_t)buf - ((vm_offset_t)buf &~
1107 arm_dcache_align_mask));
1108 if (((vm_offset_t)buf + len) & arm_dcache_align_mask)
1109 memcpy((void *)(((vm_offset_t)buf + len) & ~
1110 arm_dcache_align_mask), _tmp_clend,
1111 (vm_offset_t)buf +len - (((vm_offset_t)buf + len) &~
1112 arm_dcache_align_mask));
1113 }
1114 }
1115
1116 static void
1117 _bus_dmamap_sync_bp(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
1118 {
1119 struct bounce_page *bpage;
1120
1121 STAILQ_FOREACH(bpage, &map->bpages, links) {
1122 if (op & BUS_DMASYNC_PREWRITE) {
1123 bcopy((void *)bpage->datavaddr,
1124 (void *)(bpage->vaddr_nocache != 0 ?
1125 bpage->vaddr_nocache : bpage->vaddr),
1126 bpage->datacount);
1127 if (bpage->vaddr_nocache == 0)
1128 cpu_dcache_wb_range(bpage->vaddr,
1129 bpage->datacount);
1130 }
1131 if (op & BUS_DMASYNC_POSTREAD) {
1132 if (bpage->vaddr_nocache == 0)
1133 cpu_dcache_inv_range(bpage->vaddr,
1134 bpage->datacount);
1135 bcopy((void *)(bpage->vaddr_nocache != 0 ?
1136 bpage->vaddr_nocache : bpage->vaddr),
1137 (void *)bpage->datavaddr, bpage->datacount);
1138 }
1139 }
1140 }
1141
1142 static __inline int
1143 _bus_dma_buf_is_in_bp(bus_dmamap_t map, void *buf, int len)
1144 {
1145 struct bounce_page *bpage;
1146
1147 STAILQ_FOREACH(bpage, &map->bpages, links) {
1148 if ((vm_offset_t)buf >= bpage->datavaddr &&
1149 (vm_offset_t)buf + len <= bpage->datavaddr +
1150 bpage->datacount)
1151 return (1);
1152 }
1153 return (0);
1154
1155 }
1156
1157 void
1158 _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
1159 {
1160 struct mbuf *m;
1161 struct uio *uio;
1162 int resid;
1163 struct iovec *iov;
1164
1165 if (op == BUS_DMASYNC_POSTWRITE)
1166 return;
1167 if (STAILQ_FIRST(&map->bpages))
1168 _bus_dmamap_sync_bp(dmat, map, op);
1169 if (map->flags & DMAMAP_COHERENT)
1170 return;
1171 if ((op && BUS_DMASYNC_POSTREAD) && (map->len > PAGE_SIZE)) {
1172 cpu_dcache_wbinv_all();
1173 return;
1174 }
1175 CTR3(KTR_BUSDMA, "%s: op %x flags %x", __func__, op, map->flags);
1176 switch(map->flags & DMAMAP_TYPE_MASK) {
1177 case DMAMAP_LINEAR:
1178 if (!(_bus_dma_buf_is_in_bp(map, map->buffer, map->len)))
1179 bus_dmamap_sync_buf(map->buffer, map->len, op);
1180 break;
1181 case DMAMAP_MBUF:
1182 m = map->buffer;
1183 while (m) {
1184 if (m->m_len > 0 &&
1185 !(_bus_dma_buf_is_in_bp(map, m->m_data, m->m_len)))
1186 bus_dmamap_sync_buf(m->m_data, m->m_len, op);
1187 m = m->m_next;
1188 }
1189 break;
1190 case DMAMAP_UIO:
1191 uio = map->buffer;
1192 iov = uio->uio_iov;
1193 resid = uio->uio_resid;
1194 for (int i = 0; i < uio->uio_iovcnt && resid != 0; i++) {
1195 bus_size_t minlen = resid < iov[i].iov_len ? resid :
1196 iov[i].iov_len;
1197 if (minlen > 0) {
1198 if (!_bus_dma_buf_is_in_bp(map, iov[i].iov_base,
1199 minlen))
1200 bus_dmamap_sync_buf(iov[i].iov_base,
1201 minlen, op);
1202 resid -= minlen;
1203 }
1204 }
1205 break;
1206 default:
1207 break;
1208 }
1209 cpu_drain_writebuf();
1210 }
1211
1212 static void
1213 init_bounce_pages(void *dummy __unused)
1214 {
1215
1216 total_bpages = 0;
1217 STAILQ_INIT(&bounce_zone_list);
1218 STAILQ_INIT(&bounce_map_waitinglist);
1219 STAILQ_INIT(&bounce_map_callbacklist);
1220 mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
1221 }
1222 SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
1223
1224 static struct sysctl_ctx_list *
1225 busdma_sysctl_tree(struct bounce_zone *bz)
1226 {
1227 return (&bz->sysctl_tree);
1228 }
1229
1230 static struct sysctl_oid *
1231 busdma_sysctl_tree_top(struct bounce_zone *bz)
1232 {
1233 return (bz->sysctl_tree_top);
1234 }
1235
1236 static int
1237 alloc_bounce_zone(bus_dma_tag_t dmat)
1238 {
1239 struct bounce_zone *bz;
1240
1241 /* Check to see if we already have a suitable zone */
1242 STAILQ_FOREACH(bz, &bounce_zone_list, links) {
1243 if ((dmat->alignment <= bz->alignment)
1244 && (dmat->boundary <= bz->boundary)
1245 && (dmat->lowaddr >= bz->lowaddr)) {
1246 dmat->bounce_zone = bz;
1247 return (0);
1248 }
1249 }
1250
1251 if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF,
1252 M_NOWAIT | M_ZERO)) == NULL)
1253 return (ENOMEM);
1254
1255 STAILQ_INIT(&bz->bounce_page_list);
1256 bz->free_bpages = 0;
1257 bz->reserved_bpages = 0;
1258 bz->active_bpages = 0;
1259 bz->lowaddr = dmat->lowaddr;
1260 bz->alignment = dmat->alignment;
1261 bz->boundary = dmat->boundary;
1262 snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount);
1263 busdma_zonecount++;
1264 snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr);
1265 STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links);
1266 dmat->bounce_zone = bz;
1267
1268 sysctl_ctx_init(&bz->sysctl_tree);
1269 bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree,
1270 SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid,
1271 CTLFLAG_RD, 0, "");
1272 if (bz->sysctl_tree_top == NULL) {
1273 sysctl_ctx_free(&bz->sysctl_tree);
1274 return (0); /* XXX error code? */
1275 }
1276
1277 SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1278 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1279 "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0,
1280 "Total bounce pages");
1281 SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1282 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1283 "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0,
1284 "Free bounce pages");
1285 SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1286 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1287 "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0,
1288 "Reserved bounce pages");
1289 SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1290 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1291 "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0,
1292 "Active bounce pages");
1293 SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1294 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1295 "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0,
1296 "Total bounce requests");
1297 SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1298 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1299 "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0,
1300 "Total bounce requests that were deferred");
1301 SYSCTL_ADD_STRING(busdma_sysctl_tree(bz),
1302 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1303 "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, "");
1304 SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1305 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1306 "alignment", CTLFLAG_RD, &bz->alignment, 0, "");
1307 SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1308 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1309 "boundary", CTLFLAG_RD, &bz->boundary, 0, "");
1310
1311 return (0);
1312 }
1313
1314 static int
1315 alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
1316 {
1317 struct bounce_zone *bz;
1318 int count;
1319
1320 bz = dmat->bounce_zone;
1321 count = 0;
1322 while (numpages > 0) {
1323 struct bounce_page *bpage;
1324
1325 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
1326 M_NOWAIT | M_ZERO);
1327
1328 if (bpage == NULL)
1329 break;
1330 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
1331 M_NOWAIT, 0ul,
1332 bz->lowaddr,
1333 PAGE_SIZE,
1334 bz->boundary);
1335 if (bpage->vaddr == 0) {
1336 free(bpage, M_DEVBUF);
1337 break;
1338 }
1339 bpage->busaddr = pmap_kextract(bpage->vaddr);
1340 bpage->vaddr_nocache = (vm_offset_t)arm_remap_nocache(
1341 (void *)bpage->vaddr, PAGE_SIZE);
1342 mtx_lock(&bounce_lock);
1343 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links);
1344 total_bpages++;
1345 bz->total_bpages++;
1346 bz->free_bpages++;
1347 mtx_unlock(&bounce_lock);
1348 count++;
1349 numpages--;
1350 }
1351 return (count);
1352 }
1353
1354 static int
1355 reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
1356 {
1357 struct bounce_zone *bz;
1358 int pages;
1359
1360 mtx_assert(&bounce_lock, MA_OWNED);
1361 bz = dmat->bounce_zone;
1362 pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved);
1363 if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
1364 return (map->pagesneeded - (map->pagesreserved + pages));
1365 bz->free_bpages -= pages;
1366 bz->reserved_bpages += pages;
1367 map->pagesreserved += pages;
1368 pages = map->pagesneeded - map->pagesreserved;
1369
1370 return (pages);
1371 }
1372
1373 static bus_addr_t
1374 add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
1375 bus_size_t size)
1376 {
1377 struct bounce_zone *bz;
1378 struct bounce_page *bpage;
1379
1380 KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag"));
1381 KASSERT(map != NULL, ("add_bounce_page: bad map %p", map));
1382
1383 bz = dmat->bounce_zone;
1384 if (map->pagesneeded == 0)
1385 panic("add_bounce_page: map doesn't need any pages");
1386 map->pagesneeded--;
1387
1388 if (map->pagesreserved == 0)
1389 panic("add_bounce_page: map doesn't need any pages");
1390 map->pagesreserved--;
1391
1392 mtx_lock(&bounce_lock);
1393 bpage = STAILQ_FIRST(&bz->bounce_page_list);
1394 if (bpage == NULL)
1395 panic("add_bounce_page: free page list is empty");
1396
1397 STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links);
1398 bz->reserved_bpages--;
1399 bz->active_bpages++;
1400 mtx_unlock(&bounce_lock);
1401
1402 bpage->datavaddr = vaddr;
1403 bpage->datacount = size;
1404 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
1405 return (bpage->busaddr);
1406 }
1407
1408 static void
1409 free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
1410 {
1411 struct bus_dmamap *map;
1412 struct bounce_zone *bz;
1413
1414 bz = dmat->bounce_zone;
1415 bpage->datavaddr = 0;
1416 bpage->datacount = 0;
1417
1418 mtx_lock(&bounce_lock);
1419 STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links);
1420 bz->free_bpages++;
1421 bz->active_bpages--;
1422 if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
1423 if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
1424 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
1425 STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
1426 map, links);
1427 busdma_swi_pending = 1;
1428 bz->total_deferred++;
1429 swi_sched(vm_ih, 0);
1430 }
1431 }
1432 mtx_unlock(&bounce_lock);
1433 }
1434
1435 void
1436 busdma_swi(void)
1437 {
1438 bus_dma_tag_t dmat;
1439 struct bus_dmamap *map;
1440
1441 mtx_lock(&bounce_lock);
1442 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
1443 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
1444 mtx_unlock(&bounce_lock);
1445 dmat = map->dmat;
1446 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK);
1447 bus_dmamap_load(map->dmat, map, map->buffer, map->len,
1448 map->callback, map->callback_arg, /*flags*/0);
1449 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK);
1450 mtx_lock(&bounce_lock);
1451 }
1452 mtx_unlock(&bounce_lock);
1453 }
Cache object: 253283c3ea69670ad87541025f03119e
|