1 /*
2 * Copyright (c) 1997, 1998 Justin T. Gibbs.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions, and the following disclaimer,
10 * without modification, immediately at the beginning of the file.
11 * 2. The name of the author may not be used to endorse or promote products
12 * derived from this software without specific prior written permission.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD: releng/5.2/sys/amd64/amd64/busdma_machdep.c 120357 2003-09-22 23:11:42Z peter $");
29
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/malloc.h>
33 #include <sys/bus.h>
34 #include <sys/interrupt.h>
35 #include <sys/kernel.h>
36 #include <sys/lock.h>
37 #include <sys/proc.h>
38 #include <sys/mutex.h>
39 #include <sys/mbuf.h>
40 #include <sys/uio.h>
41
42 #include <vm/vm.h>
43 #include <vm/vm_page.h>
44 #include <vm/vm_map.h>
45
46 #include <machine/atomic.h>
47 #include <machine/bus.h>
48 #include <machine/md_var.h>
49
50 #define MAX_BPAGES 512
51
52 struct bus_dma_tag {
53 bus_dma_tag_t parent;
54 bus_size_t alignment;
55 bus_size_t boundary;
56 bus_addr_t lowaddr;
57 bus_addr_t highaddr;
58 bus_dma_filter_t *filter;
59 void *filterarg;
60 bus_size_t maxsize;
61 u_int nsegments;
62 bus_size_t maxsegsz;
63 int flags;
64 int ref_count;
65 int map_count;
66 bus_dma_lock_t *lockfunc;
67 void *lockfuncarg;
68 bus_dma_segment_t *segments;
69 };
70
71 struct bounce_page {
72 vm_offset_t vaddr; /* kva of bounce buffer */
73 bus_addr_t busaddr; /* Physical address */
74 vm_offset_t datavaddr; /* kva of client data */
75 bus_size_t datacount; /* client data count */
76 STAILQ_ENTRY(bounce_page) links;
77 };
78
79 int busdma_swi_pending;
80
81 static struct mtx bounce_lock;
82 static STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
83 static int free_bpages;
84 static int reserved_bpages;
85 static int active_bpages;
86 static int total_bpages;
87 static bus_addr_t bounce_lowaddr = BUS_SPACE_MAXADDR;
88
89 struct bus_dmamap {
90 struct bp_list bpages;
91 int pagesneeded;
92 int pagesreserved;
93 bus_dma_tag_t dmat;
94 void *buf; /* unmapped buffer pointer */
95 bus_size_t buflen; /* unmapped buffer length */
96 bus_dmamap_callback_t *callback;
97 void *callback_arg;
98 STAILQ_ENTRY(bus_dmamap) links;
99 };
100
101 static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
102 static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
103 static struct bus_dmamap nobounce_dmamap;
104
105 static void init_bounce_pages(void *dummy);
106 static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
107 static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
108 int commit);
109 static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
110 vm_offset_t vaddr, bus_size_t size);
111 static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
112 static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
113
114 /*
115 * Return true if a match is made.
116 *
117 * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'.
118 *
119 * If paddr is within the bounds of the dma tag then call the filter callback
120 * to check for a match, if there is no filter callback then assume a match.
121 */
122 static __inline int
123 run_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
124 {
125 int retval;
126
127 retval = 0;
128 do {
129 if (paddr > dmat->lowaddr
130 && paddr <= dmat->highaddr
131 && (dmat->filter == NULL
132 || (*dmat->filter)(dmat->filterarg, paddr) != 0))
133 retval = 1;
134
135 dmat = dmat->parent;
136 } while (retval == 0 && dmat != NULL);
137 return (retval);
138 }
139
140 /*
141 * Convenience function for manipulating driver locks from busdma (during
142 * busdma_swi, for example). Drivers that don't provide their own locks
143 * should specify &Giant to dmat->lockfuncarg. Drivers that use their own
144 * non-mutex locking scheme don't have to use this at all.
145 */
146 void
147 busdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
148 {
149 struct mtx *dmtx;
150
151 dmtx = (struct mtx *)arg;
152 switch (op) {
153 case BUS_DMA_LOCK:
154 mtx_lock(dmtx);
155 break;
156 case BUS_DMA_UNLOCK:
157 mtx_unlock(dmtx);
158 break;
159 default:
160 panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
161 }
162 }
163
164 /*
165 * dflt_lock should never get called. It gets put into the dma tag when
166 * lockfunc == NULL, which is only valid if the maps that are associated
167 * with the tag are meant to never be defered.
168 * XXX Should have a way to identify which driver is responsible here.
169 */
170 static void
171 dflt_lock(void *arg, bus_dma_lock_op_t op)
172 {
173 panic("driver error: busdma dflt_lock called");
174 }
175
176 #define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4
177 /*
178 * Allocate a device specific dma_tag.
179 */
180 int
181 bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
182 bus_size_t boundary, bus_addr_t lowaddr,
183 bus_addr_t highaddr, bus_dma_filter_t *filter,
184 void *filterarg, bus_size_t maxsize, int nsegments,
185 bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
186 void *lockfuncarg, bus_dma_tag_t *dmat)
187 {
188 bus_dma_tag_t newtag;
189 int error = 0;
190
191 /* Return a NULL tag on failure */
192 *dmat = NULL;
193
194 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
195 if (newtag == NULL)
196 return (ENOMEM);
197
198 newtag->parent = parent;
199 newtag->alignment = alignment;
200 newtag->boundary = boundary;
201 newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1);
202 newtag->highaddr = trunc_page((vm_paddr_t)highaddr) +
203 (PAGE_SIZE - 1);
204 newtag->filter = filter;
205 newtag->filterarg = filterarg;
206 newtag->maxsize = maxsize;
207 newtag->nsegments = nsegments;
208 newtag->maxsegsz = maxsegsz;
209 newtag->flags = flags;
210 newtag->ref_count = 1; /* Count ourself */
211 newtag->map_count = 0;
212 if (lockfunc != NULL) {
213 newtag->lockfunc = lockfunc;
214 newtag->lockfuncarg = lockfuncarg;
215 } else {
216 newtag->lockfunc = dflt_lock;
217 newtag->lockfuncarg = NULL;
218 }
219 newtag->segments = NULL;
220
221 /* Take into account any restrictions imposed by our parent tag */
222 if (parent != NULL) {
223 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
224 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
225 /*
226 * XXX Not really correct??? Probably need to honor boundary
227 * all the way up the inheritence chain.
228 */
229 newtag->boundary = MAX(parent->boundary, newtag->boundary);
230 if (newtag->filter == NULL) {
231 /*
232 * Short circuit looking at our parent directly
233 * since we have encapsulated all of its information
234 */
235 newtag->filter = parent->filter;
236 newtag->filterarg = parent->filterarg;
237 newtag->parent = parent->parent;
238 }
239 if (newtag->parent != NULL)
240 atomic_add_int(&parent->ref_count, 1);
241 }
242
243 if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem) &&
244 (flags & BUS_DMA_ALLOCNOW) != 0) {
245 /* Must bounce */
246
247 if (lowaddr > bounce_lowaddr) {
248 /*
249 * Go through the pool and kill any pages
250 * that don't reside below lowaddr.
251 */
252 panic("bus_dma_tag_create: page reallocation "
253 "not implemented");
254 }
255 if (ptoa(total_bpages) < maxsize) {
256 int pages;
257
258 pages = atop(maxsize) - total_bpages;
259
260 /* Add pages to our bounce pool */
261 if (alloc_bounce_pages(newtag, pages) < pages)
262 error = ENOMEM;
263 }
264 /* Performed initial allocation */
265 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
266 }
267
268 if (error != 0) {
269 free(newtag, M_DEVBUF);
270 } else {
271 *dmat = newtag;
272 }
273 return (error);
274 }
275
276 int
277 bus_dma_tag_destroy(bus_dma_tag_t dmat)
278 {
279 if (dmat != NULL) {
280
281 if (dmat->map_count != 0)
282 return (EBUSY);
283
284 while (dmat != NULL) {
285 bus_dma_tag_t parent;
286
287 parent = dmat->parent;
288 atomic_subtract_int(&dmat->ref_count, 1);
289 if (dmat->ref_count == 0) {
290 if (dmat->segments != NULL)
291 free(dmat->segments, M_DEVBUF);
292 free(dmat, M_DEVBUF);
293 /*
294 * Last reference count, so
295 * release our reference
296 * count on our parent.
297 */
298 dmat = parent;
299 } else
300 dmat = NULL;
301 }
302 }
303 return (0);
304 }
305
306 /*
307 * Allocate a handle for mapping from kva/uva/physical
308 * address space into bus device space.
309 */
310 int
311 bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
312 {
313 int error;
314
315 error = 0;
316
317 if (dmat->segments == NULL) {
318 dmat->segments = (bus_dma_segment_t *)malloc(
319 sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF,
320 M_NOWAIT);
321 if (dmat->segments == NULL)
322 return (ENOMEM);
323 }
324
325 if (dmat->lowaddr < ptoa((vm_paddr_t)Maxmem)) {
326 /* Must bounce */
327 int maxpages;
328
329 *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
330 M_NOWAIT | M_ZERO);
331 if (*mapp == NULL)
332 return (ENOMEM);
333
334 /* Initialize the new map */
335 STAILQ_INIT(&((*mapp)->bpages));
336
337 /*
338 * Attempt to add pages to our pool on a per-instance
339 * basis up to a sane limit.
340 */
341 maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr));
342 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
343 || (dmat->map_count > 0
344 && total_bpages < maxpages)) {
345 int pages;
346
347 if (dmat->lowaddr > bounce_lowaddr) {
348 /*
349 * Go through the pool and kill any pages
350 * that don't reside below lowaddr.
351 */
352 panic("bus_dmamap_create: page reallocation "
353 "not implemented");
354 }
355 pages = MAX(atop(dmat->maxsize), 1);
356 pages = MIN(maxpages - total_bpages, pages);
357 if (alloc_bounce_pages(dmat, pages) < pages)
358 error = ENOMEM;
359
360 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
361 if (error == 0)
362 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
363 } else {
364 error = 0;
365 }
366 }
367 } else {
368 *mapp = NULL;
369 }
370 if (error == 0)
371 dmat->map_count++;
372 return (error);
373 }
374
375 /*
376 * Destroy a handle for mapping from kva/uva/physical
377 * address space into bus device space.
378 */
379 int
380 bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
381 {
382 if (map != NULL && map != &nobounce_dmamap) {
383 if (STAILQ_FIRST(&map->bpages) != NULL)
384 return (EBUSY);
385 free(map, M_DEVBUF);
386 }
387 dmat->map_count--;
388 return (0);
389 }
390
391
392 /*
393 * Allocate a piece of memory that can be efficiently mapped into
394 * bus device space based on the constraints lited in the dma tag.
395 * A dmamap to for use with dmamap_load is also allocated.
396 */
397 int
398 bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
399 bus_dmamap_t *mapp)
400 {
401 int mflags;
402
403 if (flags & BUS_DMA_NOWAIT)
404 mflags = M_NOWAIT;
405 else
406 mflags = M_WAITOK;
407 if (flags & BUS_DMA_ZERO)
408 mflags |= M_ZERO;
409
410 /* If we succeed, no mapping/bouncing will be required */
411 *mapp = NULL;
412
413 if (dmat->segments == NULL) {
414 dmat->segments = (bus_dma_segment_t *)malloc(
415 sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF,
416 M_NOWAIT);
417 if (dmat->segments == NULL)
418 return (ENOMEM);
419 }
420
421 if ((dmat->maxsize <= PAGE_SIZE) &&
422 dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) {
423 *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags);
424 } else {
425 /*
426 * XXX Use Contigmalloc until it is merged into this facility
427 * and handles multi-seg allocations. Nobody is doing
428 * multi-seg allocations yet though.
429 */
430 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
431 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
432 dmat->boundary);
433 }
434 if (*vaddr == NULL)
435 return (ENOMEM);
436 return (0);
437 }
438
439 /*
440 * Free a piece of memory and it's allociated dmamap, that was allocated
441 * via bus_dmamem_alloc. Make the same choice for free/contigfree.
442 */
443 void
444 bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
445 {
446 /*
447 * dmamem does not need to be bounced, so the map should be
448 * NULL
449 */
450 if (map != NULL)
451 panic("bus_dmamem_free: Invalid map freed\n");
452 if ((dmat->maxsize <= PAGE_SIZE)
453 && dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem))
454 free(vaddr, M_DEVBUF);
455 else {
456 mtx_lock(&Giant);
457 contigfree(vaddr, dmat->maxsize, M_DEVBUF);
458 mtx_unlock(&Giant);
459 }
460 }
461
462 /*
463 * Utility function to load a linear buffer. lastaddrp holds state
464 * between invocations (for multiple-buffer loads). segp contains
465 * the starting segment on entrace, and the ending segment on exit.
466 * first indicates if this is the first invocation of this function.
467 */
468 static int
469 _bus_dmamap_load_buffer(bus_dma_tag_t dmat,
470 bus_dmamap_t map,
471 void *buf, bus_size_t buflen,
472 struct thread *td,
473 int flags,
474 bus_addr_t *lastaddrp,
475 int *segp,
476 int first)
477 {
478 bus_dma_segment_t *segs;
479 bus_size_t sgsize;
480 bus_addr_t curaddr, lastaddr, baddr, bmask;
481 vm_offset_t vaddr;
482 bus_addr_t paddr;
483 int needbounce = 0;
484 int seg;
485 pmap_t pmap;
486
487 segs = dmat->segments;
488
489 if (map == NULL)
490 map = &nobounce_dmamap;
491
492 if (td != NULL)
493 pmap = vmspace_pmap(td->td_proc->p_vmspace);
494 else
495 pmap = NULL;
496
497 if (dmat->lowaddr < ptoa((vm_paddr_t)Maxmem) &&
498 map->pagesneeded == 0) {
499 vm_offset_t vendaddr;
500
501 /*
502 * Count the number of bounce pages
503 * needed in order to complete this transfer
504 */
505 vaddr = trunc_page((vm_offset_t)buf);
506 vendaddr = (vm_offset_t)buf + buflen;
507
508 while (vaddr < vendaddr) {
509 paddr = pmap_kextract(vaddr);
510 if (run_filter(dmat, paddr) != 0) {
511 needbounce = 1;
512 map->pagesneeded++;
513 }
514 vaddr += PAGE_SIZE;
515 }
516 }
517
518 vaddr = (vm_offset_t)buf;
519
520 /* Reserve Necessary Bounce Pages */
521 if (map->pagesneeded != 0) {
522 mtx_lock(&bounce_lock);
523 if (flags & BUS_DMA_NOWAIT) {
524 if (reserve_bounce_pages(dmat, map, 0) != 0) {
525 mtx_unlock(&bounce_lock);
526 return (ENOMEM);
527 }
528 } else {
529 if (reserve_bounce_pages(dmat, map, 1) != 0) {
530 /* Queue us for resources */
531 map->dmat = dmat;
532 map->buf = buf;
533 map->buflen = buflen;
534 STAILQ_INSERT_TAIL(&bounce_map_waitinglist,
535 map, links);
536 mtx_unlock(&bounce_lock);
537 return (EINPROGRESS);
538 }
539 }
540 mtx_unlock(&bounce_lock);
541 }
542
543 lastaddr = *lastaddrp;
544 bmask = ~(dmat->boundary - 1);
545
546 for (seg = *segp; buflen > 0 ; ) {
547 /*
548 * Get the physical address for this segment.
549 */
550 if (pmap)
551 curaddr = pmap_extract(pmap, vaddr);
552 else
553 curaddr = pmap_kextract(vaddr);
554
555 /*
556 * Compute the segment size, and adjust counts.
557 */
558 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
559 if (buflen < sgsize)
560 sgsize = buflen;
561
562 /*
563 * Make sure we don't cross any boundaries.
564 */
565 if (dmat->boundary > 0) {
566 baddr = (curaddr + dmat->boundary) & bmask;
567 if (sgsize > (baddr - curaddr))
568 sgsize = (baddr - curaddr);
569 }
570
571 if (map->pagesneeded != 0 && run_filter(dmat, curaddr))
572 curaddr = add_bounce_page(dmat, map, vaddr, sgsize);
573
574 /*
575 * Insert chunk into a segment, coalescing with
576 * previous segment if possible.
577 */
578 if (first) {
579 segs[seg].ds_addr = curaddr;
580 segs[seg].ds_len = sgsize;
581 first = 0;
582 } else {
583 if (needbounce == 0 && curaddr == lastaddr &&
584 (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
585 (dmat->boundary == 0 ||
586 (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
587 segs[seg].ds_len += sgsize;
588 else {
589 if (++seg >= dmat->nsegments)
590 break;
591 segs[seg].ds_addr = curaddr;
592 segs[seg].ds_len = sgsize;
593 }
594 }
595
596 lastaddr = curaddr + sgsize;
597 vaddr += sgsize;
598 buflen -= sgsize;
599 }
600
601 *segp = seg;
602 *lastaddrp = lastaddr;
603
604 /*
605 * Did we fit?
606 */
607 return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
608 }
609
610 /*
611 * Map the buffer buf into bus space using the dmamap map.
612 */
613 int
614 bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
615 bus_size_t buflen, bus_dmamap_callback_t *callback,
616 void *callback_arg, int flags)
617 {
618 bus_addr_t lastaddr = 0;
619 int error, nsegs = 0;
620
621 if (map != NULL) {
622 flags |= BUS_DMA_WAITOK;
623 map->callback = callback;
624 map->callback_arg = callback_arg;
625 }
626
627 error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, NULL, flags,
628 &lastaddr, &nsegs, 1);
629
630 if (error == EINPROGRESS)
631 return (error);
632
633 if (error)
634 (*callback)(callback_arg, dmat->segments, 0, error);
635 else
636 (*callback)(callback_arg, dmat->segments, nsegs + 1, 0);
637
638 return (0);
639 }
640
641
642 /*
643 * Like _bus_dmamap_load(), but for mbufs.
644 */
645 int
646 bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
647 struct mbuf *m0,
648 bus_dmamap_callback2_t *callback, void *callback_arg,
649 int flags)
650 {
651 int nsegs, error;
652
653 M_ASSERTPKTHDR(m0);
654
655 flags |= BUS_DMA_NOWAIT;
656 nsegs = 0;
657 error = 0;
658 if (m0->m_pkthdr.len <= dmat->maxsize) {
659 int first = 1;
660 bus_addr_t lastaddr = 0;
661 struct mbuf *m;
662
663 for (m = m0; m != NULL && error == 0; m = m->m_next) {
664 if (m->m_len > 0) {
665 error = _bus_dmamap_load_buffer(dmat, map,
666 m->m_data, m->m_len,
667 NULL, flags, &lastaddr,
668 &nsegs, first);
669 first = 0;
670 }
671 }
672 } else {
673 error = EINVAL;
674 }
675
676 if (error) {
677 /* force "no valid mappings" in callback */
678 (*callback)(callback_arg, dmat->segments, 0, 0, error);
679 } else {
680 (*callback)(callback_arg, dmat->segments,
681 nsegs+1, m0->m_pkthdr.len, error);
682 }
683 return (error);
684 }
685
686 /*
687 * Like _bus_dmamap_load(), but for uios.
688 */
689 int
690 bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map,
691 struct uio *uio,
692 bus_dmamap_callback2_t *callback, void *callback_arg,
693 int flags)
694 {
695 bus_addr_t lastaddr;
696 int nsegs, error, first, i;
697 bus_size_t resid;
698 struct iovec *iov;
699 struct thread *td = NULL;
700
701 flags |= BUS_DMA_NOWAIT;
702 resid = uio->uio_resid;
703 iov = uio->uio_iov;
704
705 if (uio->uio_segflg == UIO_USERSPACE) {
706 td = uio->uio_td;
707 KASSERT(td != NULL,
708 ("bus_dmamap_load_uio: USERSPACE but no proc"));
709 }
710
711 nsegs = 0;
712 error = 0;
713 first = 1;
714 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
715 /*
716 * Now at the first iovec to load. Load each iovec
717 * until we have exhausted the residual count.
718 */
719 bus_size_t minlen =
720 resid < iov[i].iov_len ? resid : iov[i].iov_len;
721 caddr_t addr = (caddr_t) iov[i].iov_base;
722
723 if (minlen > 0) {
724 error = _bus_dmamap_load_buffer(dmat, map,
725 addr, minlen,
726 td, flags, &lastaddr, &nsegs, first);
727 first = 0;
728
729 resid -= minlen;
730 }
731 }
732
733 if (error) {
734 /* force "no valid mappings" in callback */
735 (*callback)(callback_arg, dmat->segments, 0, 0, error);
736 } else {
737 (*callback)(callback_arg, dmat->segments,
738 nsegs+1, uio->uio_resid, error);
739 }
740 return (error);
741 }
742
743 /*
744 * Release the mapping held by map.
745 */
746 void
747 _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
748 {
749 struct bounce_page *bpage;
750
751 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
752 STAILQ_REMOVE_HEAD(&map->bpages, links);
753 free_bounce_page(dmat, bpage);
754 }
755 }
756
757 void
758 _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
759 {
760 struct bounce_page *bpage;
761
762 if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
763 /*
764 * Handle data bouncing. We might also
765 * want to add support for invalidating
766 * the caches on broken hardware
767 */
768 if (op & BUS_DMASYNC_PREWRITE) {
769 while (bpage != NULL) {
770 bcopy((void *)bpage->datavaddr,
771 (void *)bpage->vaddr,
772 bpage->datacount);
773 bpage = STAILQ_NEXT(bpage, links);
774 }
775 }
776
777 if (op & BUS_DMASYNC_POSTREAD) {
778 while (bpage != NULL) {
779 bcopy((void *)bpage->vaddr,
780 (void *)bpage->datavaddr,
781 bpage->datacount);
782 bpage = STAILQ_NEXT(bpage, links);
783 }
784 }
785 }
786 }
787
788 static void
789 init_bounce_pages(void *dummy __unused)
790 {
791
792 free_bpages = 0;
793 reserved_bpages = 0;
794 active_bpages = 0;
795 total_bpages = 0;
796 STAILQ_INIT(&bounce_page_list);
797 STAILQ_INIT(&bounce_map_waitinglist);
798 STAILQ_INIT(&bounce_map_callbacklist);
799 mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
800 }
801 SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
802
803 static int
804 alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
805 {
806 int count;
807
808 count = 0;
809 while (numpages > 0) {
810 struct bounce_page *bpage;
811
812 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
813 M_NOWAIT | M_ZERO);
814
815 if (bpage == NULL)
816 break;
817 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
818 M_NOWAIT, 0ul,
819 dmat->lowaddr,
820 PAGE_SIZE,
821 dmat->boundary);
822 if (bpage->vaddr == 0) {
823 free(bpage, M_DEVBUF);
824 break;
825 }
826 bpage->busaddr = pmap_kextract(bpage->vaddr);
827 mtx_lock(&bounce_lock);
828 STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links);
829 total_bpages++;
830 free_bpages++;
831 mtx_unlock(&bounce_lock);
832 count++;
833 numpages--;
834 }
835 return (count);
836 }
837
838 static int
839 reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
840 {
841 int pages;
842
843 mtx_assert(&bounce_lock, MA_OWNED);
844 pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved);
845 if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
846 return (map->pagesneeded - (map->pagesreserved + pages));
847 free_bpages -= pages;
848 reserved_bpages += pages;
849 map->pagesreserved += pages;
850 pages = map->pagesneeded - map->pagesreserved;
851
852 return (pages);
853 }
854
855 static bus_addr_t
856 add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
857 bus_size_t size)
858 {
859 struct bounce_page *bpage;
860
861 KASSERT(map != NULL && map != &nobounce_dmamap,
862 ("add_bounce_page: bad map %p", map));
863
864 if (map->pagesneeded == 0)
865 panic("add_bounce_page: map doesn't need any pages");
866 map->pagesneeded--;
867
868 if (map->pagesreserved == 0)
869 panic("add_bounce_page: map doesn't need any pages");
870 map->pagesreserved--;
871
872 mtx_lock(&bounce_lock);
873 bpage = STAILQ_FIRST(&bounce_page_list);
874 if (bpage == NULL)
875 panic("add_bounce_page: free page list is empty");
876
877 STAILQ_REMOVE_HEAD(&bounce_page_list, links);
878 reserved_bpages--;
879 active_bpages++;
880 mtx_unlock(&bounce_lock);
881
882 bpage->datavaddr = vaddr;
883 bpage->datacount = size;
884 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
885 return (bpage->busaddr);
886 }
887
888 static void
889 free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
890 {
891 struct bus_dmamap *map;
892
893 bpage->datavaddr = 0;
894 bpage->datacount = 0;
895
896 mtx_lock(&bounce_lock);
897 STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links);
898 free_bpages++;
899 active_bpages--;
900 if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
901 if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
902 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
903 STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
904 map, links);
905 busdma_swi_pending = 1;
906 swi_sched(vm_ih, 0);
907 }
908 }
909 mtx_unlock(&bounce_lock);
910 }
911
912 void
913 busdma_swi(void)
914 {
915 bus_dma_tag_t dmat;
916 struct bus_dmamap *map;
917
918 mtx_lock(&bounce_lock);
919 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
920 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
921 mtx_unlock(&bounce_lock);
922 dmat = map->dmat;
923 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK);
924 bus_dmamap_load(map->dmat, map, map->buf, map->buflen,
925 map->callback, map->callback_arg, /*flags*/0);
926 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK);
927 mtx_lock(&bounce_lock);
928 }
929 mtx_unlock(&bounce_lock);
930 }
Cache object: 2af6ceb53de72497adf5821475172681
|