1 /*
2 * Copyright (c) 1997, 1998 Justin T. Gibbs.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions, and the following disclaimer,
10 * without modification, immediately at the beginning of the file.
11 * 2. The name of the author may not be used to endorse or promote products
12 * derived from this software without specific prior written permission.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: releng/5.1/sys/amd64/amd64/busdma_machdep.c 115343 2003-05-27 04:59:59Z scottl $
27 */
28
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/malloc.h>
32 #include <sys/bus.h>
33 #include <sys/interrupt.h>
34 #include <sys/kernel.h>
35 #include <sys/lock.h>
36 #include <sys/proc.h>
37 #include <sys/mutex.h>
38 #include <sys/mbuf.h>
39 #include <sys/uio.h>
40
41 #include <vm/vm.h>
42 #include <vm/vm_page.h>
43 #include <vm/vm_map.h>
44
45 #include <machine/atomic.h>
46 #include <machine/bus.h>
47 #include <machine/md_var.h>
48
49 #define MAX_BPAGES 512
50
51 struct bus_dma_tag {
52 bus_dma_tag_t parent;
53 bus_size_t alignment;
54 bus_size_t boundary;
55 bus_addr_t lowaddr;
56 bus_addr_t highaddr;
57 bus_dma_filter_t *filter;
58 void *filterarg;
59 bus_size_t maxsize;
60 u_int nsegments;
61 bus_size_t maxsegsz;
62 int flags;
63 int ref_count;
64 int map_count;
65 };
66
67 struct bounce_page {
68 vm_offset_t vaddr; /* kva of bounce buffer */
69 bus_addr_t busaddr; /* Physical address */
70 vm_offset_t datavaddr; /* kva of client data */
71 bus_size_t datacount; /* client data count */
72 STAILQ_ENTRY(bounce_page) links;
73 };
74
75 int busdma_swi_pending;
76
77 static STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
78 static int free_bpages;
79 static int reserved_bpages;
80 static int active_bpages;
81 static int total_bpages;
82 static bus_addr_t bounce_lowaddr = BUS_SPACE_MAXADDR;
83
84 struct bus_dmamap {
85 struct bp_list bpages;
86 int pagesneeded;
87 int pagesreserved;
88 bus_dma_tag_t dmat;
89 void *buf; /* unmapped buffer pointer */
90 bus_size_t buflen; /* unmapped buffer length */
91 bus_dmamap_callback_t *callback;
92 void *callback_arg;
93 STAILQ_ENTRY(bus_dmamap) links;
94 };
95
96 static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
97 static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
98 static struct bus_dmamap nobounce_dmamap;
99
100 static void init_bounce_pages(void *dummy);
101 static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
102 static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
103 int commit);
104 static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
105 vm_offset_t vaddr, bus_size_t size);
106 static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
107 static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
108
109 /* To protect all the the bounce pages related lists and data. */
110 static struct mtx bounce_lock;
111
112 /*
113 * Return true if a match is made.
114 *
115 * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'.
116 *
117 * If paddr is within the bounds of the dma tag then call the filter callback
118 * to check for a match, if there is no filter callback then assume a match.
119 */
120 static __inline int
121 run_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
122 {
123 int retval;
124
125 retval = 0;
126 do {
127 if (paddr > dmat->lowaddr
128 && paddr <= dmat->highaddr
129 && (dmat->filter == NULL
130 || (*dmat->filter)(dmat->filterarg, paddr) != 0))
131 retval = 1;
132
133 dmat = dmat->parent;
134 } while (retval == 0 && dmat != NULL);
135 return (retval);
136 }
137
138 #define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4
139 /*
140 * Allocate a device specific dma_tag.
141 */
142 int
143 bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
144 bus_size_t boundary, bus_addr_t lowaddr,
145 bus_addr_t highaddr, bus_dma_filter_t *filter,
146 void *filterarg, bus_size_t maxsize, int nsegments,
147 bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat)
148 {
149 bus_dma_tag_t newtag;
150 int error = 0;
151
152 /* Return a NULL tag on failure */
153 *dmat = NULL;
154
155 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
156 if (newtag == NULL)
157 return (ENOMEM);
158
159 newtag->parent = parent;
160 newtag->alignment = alignment;
161 newtag->boundary = boundary;
162 newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1);
163 newtag->highaddr = trunc_page((vm_paddr_t)highaddr) +
164 (PAGE_SIZE - 1);
165 newtag->filter = filter;
166 newtag->filterarg = filterarg;
167 newtag->maxsize = maxsize;
168 newtag->nsegments = nsegments;
169 newtag->maxsegsz = maxsegsz;
170 newtag->flags = flags;
171 newtag->ref_count = 1; /* Count ourself */
172 newtag->map_count = 0;
173
174 /* Take into account any restrictions imposed by our parent tag */
175 if (parent != NULL) {
176 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
177 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
178 /*
179 * XXX Not really correct??? Probably need to honor boundary
180 * all the way up the inheritence chain.
181 */
182 newtag->boundary = MAX(parent->boundary, newtag->boundary);
183 if (newtag->filter == NULL) {
184 /*
185 * Short circuit looking at our parent directly
186 * since we have encapsulated all of its information
187 */
188 newtag->filter = parent->filter;
189 newtag->filterarg = parent->filterarg;
190 newtag->parent = parent->parent;
191 }
192 if (newtag->parent != NULL)
193 atomic_add_int(&parent->ref_count, 1);
194 }
195
196 if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem) &&
197 (flags & BUS_DMA_ALLOCNOW) != 0) {
198 /* Must bounce */
199
200 if (lowaddr > bounce_lowaddr) {
201 /*
202 * Go through the pool and kill any pages
203 * that don't reside below lowaddr.
204 */
205 panic("bus_dma_tag_create: page reallocation "
206 "not implemented");
207 }
208 if (ptoa(total_bpages) < maxsize) {
209 int pages;
210
211 pages = atop(maxsize) - total_bpages;
212
213 /* Add pages to our bounce pool */
214 if (alloc_bounce_pages(newtag, pages) < pages)
215 error = ENOMEM;
216 }
217 /* Performed initial allocation */
218 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
219 }
220
221 if (error != 0) {
222 free(newtag, M_DEVBUF);
223 } else {
224 *dmat = newtag;
225 }
226 return (error);
227 }
228
229 int
230 bus_dma_tag_destroy(bus_dma_tag_t dmat)
231 {
232 if (dmat != NULL) {
233
234 if (dmat->map_count != 0)
235 return (EBUSY);
236
237 while (dmat != NULL) {
238 bus_dma_tag_t parent;
239
240 parent = dmat->parent;
241 atomic_subtract_int(&dmat->ref_count, 1);
242 if (dmat->ref_count == 0) {
243 free(dmat, M_DEVBUF);
244 /*
245 * Last reference count, so
246 * release our reference
247 * count on our parent.
248 */
249 dmat = parent;
250 } else
251 dmat = NULL;
252 }
253 }
254 return (0);
255 }
256
257 /*
258 * Allocate a handle for mapping from kva/uva/physical
259 * address space into bus device space.
260 */
261 int
262 bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
263 {
264 int error;
265
266 error = 0;
267
268 if (dmat->lowaddr < ptoa((vm_paddr_t)Maxmem)) {
269 /* Must bounce */
270 int maxpages;
271
272 *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
273 M_NOWAIT | M_ZERO);
274 if (*mapp == NULL)
275 return (ENOMEM);
276
277 /* Initialize the new map */
278 STAILQ_INIT(&((*mapp)->bpages));
279
280 /*
281 * Attempt to add pages to our pool on a per-instance
282 * basis up to a sane limit.
283 */
284 maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr));
285 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
286 || (dmat->map_count > 0
287 && total_bpages < maxpages)) {
288 int pages;
289
290 if (dmat->lowaddr > bounce_lowaddr) {
291 /*
292 * Go through the pool and kill any pages
293 * that don't reside below lowaddr.
294 */
295 panic("bus_dmamap_create: page reallocation "
296 "not implemented");
297 }
298 pages = MAX(atop(dmat->maxsize), 1);
299 pages = MIN(maxpages - total_bpages, pages);
300 if (alloc_bounce_pages(dmat, pages) < pages)
301 error = ENOMEM;
302
303 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
304 if (error == 0)
305 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
306 } else {
307 error = 0;
308 }
309 }
310 } else {
311 *mapp = NULL;
312 }
313 if (error == 0)
314 dmat->map_count++;
315 return (error);
316 }
317
318 /*
319 * Destroy a handle for mapping from kva/uva/physical
320 * address space into bus device space.
321 */
322 int
323 bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
324 {
325 if (map != NULL) {
326 if (STAILQ_FIRST(&map->bpages) != NULL)
327 return (EBUSY);
328 free(map, M_DEVBUF);
329 }
330 dmat->map_count--;
331 return (0);
332 }
333
334
335 /*
336 * Allocate a piece of memory that can be efficiently mapped into
337 * bus device space based on the constraints lited in the dma tag.
338 * A dmamap to for use with dmamap_load is also allocated.
339 */
340 int
341 bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
342 bus_dmamap_t *mapp)
343 {
344 /* If we succeed, no mapping/bouncing will be required */
345 *mapp = NULL;
346
347 if ((dmat->maxsize <= PAGE_SIZE) &&
348 dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) {
349 *vaddr = malloc(dmat->maxsize, M_DEVBUF,
350 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK);
351 } else {
352 /*
353 * XXX Use Contigmalloc until it is merged into this facility
354 * and handles multi-seg allocations. Nobody is doing
355 * multi-seg allocations yet though.
356 */
357 mtx_lock(&Giant);
358 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF,
359 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK,
360 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
361 dmat->boundary);
362 mtx_unlock(&Giant);
363 }
364 if (*vaddr == NULL)
365 return (ENOMEM);
366 return (0);
367 }
368
369 /*
370 * Free a piece of memory and it's allociated dmamap, that was allocated
371 * via bus_dmamem_alloc. Make the same choice for free/contigfree.
372 */
373 void
374 bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
375 {
376 /*
377 * dmamem does not need to be bounced, so the map should be
378 * NULL
379 */
380 if (map != NULL)
381 panic("bus_dmamem_free: Invalid map freed\n");
382 if ((dmat->maxsize <= PAGE_SIZE)
383 && dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem))
384 free(vaddr, M_DEVBUF);
385 else {
386 mtx_lock(&Giant);
387 contigfree(vaddr, dmat->maxsize, M_DEVBUF);
388 mtx_unlock(&Giant);
389 }
390 }
391
392 /*
393 * Utility function to load a linear buffer. lastaddrp holds state
394 * between invocations (for multiple-buffer loads). segp contains
395 * the starting segment on entrace, and the ending segment on exit.
396 * first indicates if this is the first invocation of this function.
397 */
398 static int
399 _bus_dmamap_load_buffer(bus_dma_tag_t dmat,
400 bus_dmamap_t map,
401 bus_dma_segment_t segs[],
402 void *buf, bus_size_t buflen,
403 struct thread *td,
404 int flags,
405 bus_addr_t *lastaddrp,
406 int *segp,
407 int first)
408 {
409 bus_size_t sgsize;
410 bus_addr_t curaddr, lastaddr, baddr, bmask;
411 vm_offset_t vaddr;
412 bus_addr_t paddr;
413 int needbounce = 0;
414 int seg;
415 pmap_t pmap;
416
417 if (map == NULL)
418 map = &nobounce_dmamap;
419
420 if (td != NULL)
421 pmap = vmspace_pmap(td->td_proc->p_vmspace);
422 else
423 pmap = NULL;
424
425 if (dmat->lowaddr < ptoa((vm_paddr_t)Maxmem)) {
426 vm_offset_t vendaddr;
427
428 /*
429 * Count the number of bounce pages
430 * needed in order to complete this transfer
431 */
432 vaddr = trunc_page((vm_offset_t)buf);
433 vendaddr = (vm_offset_t)buf + buflen;
434
435 while (vaddr < vendaddr) {
436 paddr = pmap_kextract(vaddr);
437 if (run_filter(dmat, paddr) != 0) {
438 needbounce = 1;
439 map->pagesneeded++;
440 }
441 vaddr += PAGE_SIZE;
442 }
443 }
444
445 vaddr = (vm_offset_t)buf;
446
447 /* Reserve Necessary Bounce Pages */
448 if (map->pagesneeded != 0) {
449 mtx_lock(&bounce_lock);
450 if (flags & BUS_DMA_NOWAIT) {
451 if (reserve_bounce_pages(dmat, map, 0) != 0) {
452 mtx_unlock(&bounce_lock);
453 return (ENOMEM);
454 }
455 } else {
456 if (reserve_bounce_pages(dmat, map, 1) != 0) {
457 /* Queue us for resources */
458 map->dmat = dmat;
459 map->buf = buf;
460 map->buflen = buflen;
461 STAILQ_INSERT_TAIL(&bounce_map_waitinglist,
462 map, links);
463 mtx_unlock(&bounce_lock);
464 return (EINPROGRESS);
465 }
466 }
467 mtx_unlock(&bounce_lock);
468 }
469
470 lastaddr = *lastaddrp;
471 bmask = ~(dmat->boundary - 1);
472
473 for (seg = *segp; buflen > 0 ; ) {
474 /*
475 * Get the physical address for this segment.
476 */
477 if (pmap)
478 curaddr = pmap_extract(pmap, vaddr);
479 else
480 curaddr = pmap_kextract(vaddr);
481
482 /*
483 * Compute the segment size, and adjust counts.
484 */
485 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
486 if (buflen < sgsize)
487 sgsize = buflen;
488
489 /*
490 * Make sure we don't cross any boundaries.
491 */
492 if (dmat->boundary > 0) {
493 baddr = (curaddr + dmat->boundary) & bmask;
494 if (sgsize > (baddr - curaddr))
495 sgsize = (baddr - curaddr);
496 }
497
498 if (map->pagesneeded != 0 && run_filter(dmat, curaddr))
499 curaddr = add_bounce_page(dmat, map, vaddr, sgsize);
500
501 /*
502 * Insert chunk into a segment, coalescing with
503 * previous segment if possible.
504 */
505 if (first) {
506 segs[seg].ds_addr = curaddr;
507 segs[seg].ds_len = sgsize;
508 first = 0;
509 } else {
510 if (needbounce == 0 && curaddr == lastaddr &&
511 (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
512 (dmat->boundary == 0 ||
513 (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
514 segs[seg].ds_len += sgsize;
515 else {
516 if (++seg >= dmat->nsegments)
517 break;
518 segs[seg].ds_addr = curaddr;
519 segs[seg].ds_len = sgsize;
520 }
521 }
522
523 lastaddr = curaddr + sgsize;
524 vaddr += sgsize;
525 buflen -= sgsize;
526 }
527
528 *segp = seg;
529 *lastaddrp = lastaddr;
530
531 /*
532 * Did we fit?
533 */
534 return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
535 }
536
537 #define BUS_DMAMAP_NSEGS ((64 * 1024) / PAGE_SIZE + 1)
538
539 /*
540 * Map the buffer buf into bus space using the dmamap map.
541 */
542 int
543 bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
544 bus_size_t buflen, bus_dmamap_callback_t *callback,
545 void *callback_arg, int flags)
546 {
547 #ifdef __GNUC__
548 bus_dma_segment_t dm_segments[dmat->nsegments];
549 #else
550 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
551 #endif
552 bus_addr_t lastaddr = 0;
553 int error, nsegs = 0;
554
555 if (map != NULL) {
556 flags |= BUS_DMA_WAITOK;
557 map->callback = callback;
558 map->callback_arg = callback_arg;
559 }
560
561 error = _bus_dmamap_load_buffer(dmat, map, dm_segments, buf, buflen,
562 NULL, flags, &lastaddr, &nsegs, 1);
563
564 if (error == EINPROGRESS)
565 return (error);
566
567 if (error)
568 (*callback)(callback_arg, dm_segments, 0, error);
569 else
570 (*callback)(callback_arg, dm_segments, nsegs + 1, 0);
571
572 return (0);
573 }
574
575
576 /*
577 * Like _bus_dmamap_load(), but for mbufs.
578 */
579 int
580 bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
581 struct mbuf *m0,
582 bus_dmamap_callback2_t *callback, void *callback_arg,
583 int flags)
584 {
585 #ifdef __GNUC__
586 bus_dma_segment_t dm_segments[dmat->nsegments];
587 #else
588 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
589 #endif
590 int nsegs, error;
591
592 KASSERT(m0->m_flags & M_PKTHDR,
593 ("bus_dmamap_load_mbuf: no packet header"));
594
595 flags |= BUS_DMA_NOWAIT;
596 nsegs = 0;
597 error = 0;
598 if (m0->m_pkthdr.len <= dmat->maxsize) {
599 int first = 1;
600 bus_addr_t lastaddr = 0;
601 struct mbuf *m;
602
603 for (m = m0; m != NULL && error == 0; m = m->m_next) {
604 if (m->m_len > 0) {
605 error = _bus_dmamap_load_buffer(dmat, map,
606 dm_segments,
607 m->m_data, m->m_len,
608 NULL, flags, &lastaddr,
609 &nsegs, first);
610 first = 0;
611 }
612 }
613 } else {
614 error = EINVAL;
615 }
616
617 if (error) {
618 /* force "no valid mappings" in callback */
619 (*callback)(callback_arg, dm_segments, 0, 0, error);
620 } else {
621 (*callback)(callback_arg, dm_segments,
622 nsegs+1, m0->m_pkthdr.len, error);
623 }
624 return (error);
625 }
626
627 /*
628 * Like _bus_dmamap_load(), but for uios.
629 */
630 int
631 bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map,
632 struct uio *uio,
633 bus_dmamap_callback2_t *callback, void *callback_arg,
634 int flags)
635 {
636 bus_addr_t lastaddr;
637 #ifdef __GNUC__
638 bus_dma_segment_t dm_segments[dmat->nsegments];
639 #else
640 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
641 #endif
642 int nsegs, error, first, i;
643 bus_size_t resid;
644 struct iovec *iov;
645 struct thread *td = NULL;
646
647 flags |= BUS_DMA_NOWAIT;
648 resid = uio->uio_resid;
649 iov = uio->uio_iov;
650
651 if (uio->uio_segflg == UIO_USERSPACE) {
652 td = uio->uio_td;
653 KASSERT(td != NULL,
654 ("bus_dmamap_load_uio: USERSPACE but no proc"));
655 }
656
657 nsegs = 0;
658 error = 0;
659 first = 1;
660 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
661 /*
662 * Now at the first iovec to load. Load each iovec
663 * until we have exhausted the residual count.
664 */
665 bus_size_t minlen =
666 resid < iov[i].iov_len ? resid : iov[i].iov_len;
667 caddr_t addr = (caddr_t) iov[i].iov_base;
668
669 if (minlen > 0) {
670 error = _bus_dmamap_load_buffer(dmat, map,
671 dm_segments,
672 addr, minlen,
673 td, flags, &lastaddr, &nsegs, first);
674 first = 0;
675
676 resid -= minlen;
677 }
678 }
679
680 if (error) {
681 /* force "no valid mappings" in callback */
682 (*callback)(callback_arg, dm_segments, 0, 0, error);
683 } else {
684 (*callback)(callback_arg, dm_segments,
685 nsegs+1, uio->uio_resid, error);
686 }
687 return (error);
688 }
689
690 /*
691 * Release the mapping held by map.
692 */
693 void
694 _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
695 {
696 struct bounce_page *bpage;
697
698 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
699 STAILQ_REMOVE_HEAD(&map->bpages, links);
700 free_bounce_page(dmat, bpage);
701 }
702 }
703
704 void
705 _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
706 {
707 struct bounce_page *bpage;
708
709 if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
710 /*
711 * Handle data bouncing. We might also
712 * want to add support for invalidating
713 * the caches on broken hardware
714 */
715 if (op & BUS_DMASYNC_PREWRITE) {
716 while (bpage != NULL) {
717 bcopy((void *)bpage->datavaddr,
718 (void *)bpage->vaddr,
719 bpage->datacount);
720 bpage = STAILQ_NEXT(bpage, links);
721 }
722 }
723
724 if (op & BUS_DMASYNC_POSTREAD) {
725 while (bpage != NULL) {
726 bcopy((void *)bpage->vaddr,
727 (void *)bpage->datavaddr,
728 bpage->datacount);
729 bpage = STAILQ_NEXT(bpage, links);
730 }
731 }
732 }
733 }
734
735 static void
736 init_bounce_pages(void *dummy __unused)
737 {
738
739 free_bpages = 0;
740 reserved_bpages = 0;
741 active_bpages = 0;
742 total_bpages = 0;
743 STAILQ_INIT(&bounce_page_list);
744 STAILQ_INIT(&bounce_map_waitinglist);
745 STAILQ_INIT(&bounce_map_callbacklist);
746 mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
747 }
748 SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
749
750 static int
751 alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
752 {
753 int count;
754
755 count = 0;
756 while (numpages > 0) {
757 struct bounce_page *bpage;
758
759 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
760 M_NOWAIT | M_ZERO);
761
762 if (bpage == NULL)
763 break;
764 mtx_lock(&Giant);
765 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
766 M_NOWAIT, 0ul,
767 dmat->lowaddr,
768 PAGE_SIZE,
769 0);
770 mtx_unlock(&Giant);
771 if (bpage->vaddr == 0) {
772 free(bpage, M_DEVBUF);
773 break;
774 }
775 bpage->busaddr = pmap_kextract(bpage->vaddr);
776 mtx_lock(&bounce_lock);
777 STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links);
778 total_bpages++;
779 free_bpages++;
780 mtx_unlock(&bounce_lock);
781 count++;
782 numpages--;
783 }
784 return (count);
785 }
786
787 static int
788 reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
789 {
790 int pages;
791
792 mtx_assert(&bounce_lock, MA_OWNED);
793 pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved);
794 if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
795 return (map->pagesneeded - (map->pagesreserved + pages));
796 free_bpages -= pages;
797 reserved_bpages += pages;
798 map->pagesreserved += pages;
799 pages = map->pagesneeded - map->pagesreserved;
800
801 return (pages);
802 }
803
804 static bus_addr_t
805 add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
806 bus_size_t size)
807 {
808 struct bounce_page *bpage;
809
810 KASSERT(map != NULL && map != &nobounce_dmamap,
811 ("add_bounce_page: bad map %p", map));
812
813 if (map->pagesneeded == 0)
814 panic("add_bounce_page: map doesn't need any pages");
815 map->pagesneeded--;
816
817 if (map->pagesreserved == 0)
818 panic("add_bounce_page: map doesn't need any pages");
819 map->pagesreserved--;
820
821 mtx_lock(&bounce_lock);
822 bpage = STAILQ_FIRST(&bounce_page_list);
823 if (bpage == NULL)
824 panic("add_bounce_page: free page list is empty");
825
826 STAILQ_REMOVE_HEAD(&bounce_page_list, links);
827 reserved_bpages--;
828 active_bpages++;
829 mtx_unlock(&bounce_lock);
830
831 bpage->datavaddr = vaddr;
832 bpage->datacount = size;
833 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
834 return (bpage->busaddr);
835 }
836
837 static void
838 free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
839 {
840 struct bus_dmamap *map;
841
842 bpage->datavaddr = 0;
843 bpage->datacount = 0;
844
845 mtx_lock(&bounce_lock);
846 STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links);
847 free_bpages++;
848 active_bpages--;
849 if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
850 if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
851 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
852 STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
853 map, links);
854 busdma_swi_pending = 1;
855 swi_sched(vm_ih, 0);
856 }
857 }
858 mtx_unlock(&bounce_lock);
859 }
860
861 void
862 busdma_swi(void)
863 {
864 struct bus_dmamap *map;
865
866 mtx_lock(&bounce_lock);
867 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
868 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
869 mtx_unlock(&bounce_lock);
870 bus_dmamap_load(map->dmat, map, map->buf, map->buflen,
871 map->callback, map->callback_arg, /*flags*/0);
872 mtx_lock(&bounce_lock);
873 }
874 mtx_unlock(&bounce_lock);
875 }
Cache object: a9abc2da9228f2b8c8fdfb62862c7a2d
|