1 /*-
2 * Copyright (c) 1997, 1998 Justin T. Gibbs.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions, and the following disclaimer,
10 * without modification, immediately at the beginning of the file.
11 * 2. The name of the author may not be used to endorse or promote products
12 * derived from this software without specific prior written permission.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD: releng/7.3/sys/i386/i386/busdma_machdep.c 200289 2009-12-09 08:16:12Z scottl $");
29
30 #include <sys/param.h>
31 #include <sys/kdb.h>
32 #include <ddb/ddb.h>
33 #include <ddb/db_output.h>
34 #include <sys/systm.h>
35 #include <sys/malloc.h>
36 #include <sys/bus.h>
37 #include <sys/interrupt.h>
38 #include <sys/kernel.h>
39 #include <sys/ktr.h>
40 #include <sys/lock.h>
41 #include <sys/proc.h>
42 #include <sys/mutex.h>
43 #include <sys/mbuf.h>
44 #include <sys/uio.h>
45 #include <sys/sysctl.h>
46
47 #include <vm/vm.h>
48 #include <vm/vm_page.h>
49 #include <vm/vm_map.h>
50
51 #include <machine/atomic.h>
52 #include <machine/bus.h>
53 #include <machine/md_var.h>
54 #include <machine/specialreg.h>
55
56 #define MAX_BPAGES 512
57 #define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3
58 #define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4
59
60 struct bounce_zone;
61
62 struct bus_dma_tag {
63 bus_dma_tag_t parent;
64 bus_size_t alignment;
65 bus_size_t boundary;
66 bus_addr_t lowaddr;
67 bus_addr_t highaddr;
68 bus_dma_filter_t *filter;
69 void *filterarg;
70 bus_size_t maxsize;
71 u_int nsegments;
72 bus_size_t maxsegsz;
73 int flags;
74 int ref_count;
75 int map_count;
76 bus_dma_lock_t *lockfunc;
77 void *lockfuncarg;
78 bus_dma_segment_t *segments;
79 struct bounce_zone *bounce_zone;
80 };
81
82 struct bounce_page {
83 vm_offset_t vaddr; /* kva of bounce buffer */
84 bus_addr_t busaddr; /* Physical address */
85 vm_offset_t datavaddr; /* kva of client data */
86 bus_size_t datacount; /* client data count */
87 STAILQ_ENTRY(bounce_page) links;
88 };
89
90 int busdma_swi_pending;
91
92 struct bounce_zone {
93 STAILQ_ENTRY(bounce_zone) links;
94 STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
95 int total_bpages;
96 int free_bpages;
97 int reserved_bpages;
98 int active_bpages;
99 int total_bounced;
100 int total_deferred;
101 bus_size_t alignment;
102 bus_addr_t lowaddr;
103 char zoneid[8];
104 char lowaddrid[20];
105 struct sysctl_ctx_list sysctl_tree;
106 struct sysctl_oid *sysctl_tree_top;
107 };
108
109 static struct mtx bounce_lock;
110 static int total_bpages;
111 static int busdma_zonecount;
112 static STAILQ_HEAD(, bounce_zone) bounce_zone_list;
113
114 SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters");
115 SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
116 "Total bounce pages");
117
118 struct bus_dmamap {
119 struct bp_list bpages;
120 int pagesneeded;
121 int pagesreserved;
122 bus_dma_tag_t dmat;
123 void *buf; /* unmapped buffer pointer */
124 bus_size_t buflen; /* unmapped buffer length */
125 bus_dmamap_callback_t *callback;
126 void *callback_arg;
127 STAILQ_ENTRY(bus_dmamap) links;
128 };
129
130 static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
131 static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
132 static struct bus_dmamap nobounce_dmamap;
133
134 static void init_bounce_pages(void *dummy);
135 static int alloc_bounce_zone(bus_dma_tag_t dmat);
136 static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
137 static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
138 int commit);
139 static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
140 vm_offset_t vaddr, bus_size_t size);
141 static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
142 int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
143 int _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
144 void *buf, bus_size_t buflen, int flags);
145
146 /*
147 * Return true if a match is made.
148 *
149 * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'.
150 *
151 * If paddr is within the bounds of the dma tag then call the filter callback
152 * to check for a match, if there is no filter callback then assume a match.
153 */
154 int
155 run_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
156 {
157 int retval;
158
159 retval = 0;
160
161 do {
162 if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr)
163 || ((paddr & (dmat->alignment - 1)) != 0))
164 && (dmat->filter == NULL
165 || (*dmat->filter)(dmat->filterarg, paddr) != 0))
166 retval = 1;
167
168 dmat = dmat->parent;
169 } while (retval == 0 && dmat != NULL);
170 return (retval);
171 }
172
173 /*
174 * Convenience function for manipulating driver locks from busdma (during
175 * busdma_swi, for example). Drivers that don't provide their own locks
176 * should specify &Giant to dmat->lockfuncarg. Drivers that use their own
177 * non-mutex locking scheme don't have to use this at all.
178 */
179 void
180 busdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
181 {
182 struct mtx *dmtx;
183
184 dmtx = (struct mtx *)arg;
185 switch (op) {
186 case BUS_DMA_LOCK:
187 mtx_lock(dmtx);
188 break;
189 case BUS_DMA_UNLOCK:
190 mtx_unlock(dmtx);
191 break;
192 default:
193 panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
194 }
195 }
196
197 /*
198 * dflt_lock should never get called. It gets put into the dma tag when
199 * lockfunc == NULL, which is only valid if the maps that are associated
200 * with the tag are meant to never be defered.
201 * XXX Should have a way to identify which driver is responsible here.
202 */
203 static void
204 dflt_lock(void *arg, bus_dma_lock_op_t op)
205 {
206 panic("driver error: busdma dflt_lock called");
207 }
208
209 /*
210 * Allocate a device specific dma_tag.
211 */
212 int
213 bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
214 bus_size_t boundary, bus_addr_t lowaddr,
215 bus_addr_t highaddr, bus_dma_filter_t *filter,
216 void *filterarg, bus_size_t maxsize, int nsegments,
217 bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
218 void *lockfuncarg, bus_dma_tag_t *dmat)
219 {
220 bus_dma_tag_t newtag;
221 int error = 0;
222
223 /* Basic sanity checking */
224 if (boundary != 0 && boundary < maxsegsz)
225 maxsegsz = boundary;
226
227 if (maxsegsz == 0) {
228 return (EINVAL);
229 }
230
231 /* Return a NULL tag on failure */
232 *dmat = NULL;
233
234 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF,
235 M_ZERO | M_NOWAIT);
236 if (newtag == NULL) {
237 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
238 __func__, newtag, 0, error);
239 return (ENOMEM);
240 }
241
242 newtag->parent = parent;
243 newtag->alignment = alignment;
244 newtag->boundary = boundary;
245 newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1);
246 newtag->highaddr = trunc_page((vm_paddr_t)highaddr) +
247 (PAGE_SIZE - 1);
248 newtag->filter = filter;
249 newtag->filterarg = filterarg;
250 newtag->maxsize = maxsize;
251 newtag->nsegments = nsegments;
252 newtag->maxsegsz = maxsegsz;
253 newtag->flags = flags;
254 newtag->ref_count = 1; /* Count ourself */
255 newtag->map_count = 0;
256 if (lockfunc != NULL) {
257 newtag->lockfunc = lockfunc;
258 newtag->lockfuncarg = lockfuncarg;
259 } else {
260 newtag->lockfunc = dflt_lock;
261 newtag->lockfuncarg = NULL;
262 }
263 newtag->segments = NULL;
264
265 /* Take into account any restrictions imposed by our parent tag */
266 if (parent != NULL) {
267 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
268 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
269 if (newtag->boundary == 0)
270 newtag->boundary = parent->boundary;
271 else if (parent->boundary != 0)
272 newtag->boundary = MIN(parent->boundary,
273 newtag->boundary);
274 if ((newtag->filter != NULL) ||
275 ((parent->flags & BUS_DMA_COULD_BOUNCE) != 0))
276 newtag->flags |= BUS_DMA_COULD_BOUNCE;
277 if (newtag->filter == NULL) {
278 /*
279 * Short circuit looking at our parent directly
280 * since we have encapsulated all of its information
281 */
282 newtag->filter = parent->filter;
283 newtag->filterarg = parent->filterarg;
284 newtag->parent = parent->parent;
285 }
286 if (newtag->parent != NULL)
287 atomic_add_int(&parent->ref_count, 1);
288 }
289
290 if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem)
291 || newtag->alignment > 1)
292 newtag->flags |= BUS_DMA_COULD_BOUNCE;
293
294 if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
295 (flags & BUS_DMA_ALLOCNOW) != 0) {
296 struct bounce_zone *bz;
297
298 /* Must bounce */
299
300 if ((error = alloc_bounce_zone(newtag)) != 0) {
301 free(newtag, M_DEVBUF);
302 return (error);
303 }
304 bz = newtag->bounce_zone;
305
306 if (ptoa(bz->total_bpages) < maxsize) {
307 int pages;
308
309 pages = atop(maxsize) - bz->total_bpages;
310
311 /* Add pages to our bounce pool */
312 if (alloc_bounce_pages(newtag, pages) < pages)
313 error = ENOMEM;
314 }
315 /* Performed initial allocation */
316 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
317 }
318
319 if (error != 0) {
320 free(newtag, M_DEVBUF);
321 } else {
322 *dmat = newtag;
323 }
324 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
325 __func__, newtag, (newtag != NULL ? newtag->flags : 0), error);
326 return (error);
327 }
328
329 int
330 bus_dma_tag_destroy(bus_dma_tag_t dmat)
331 {
332 bus_dma_tag_t dmat_copy;
333 int error;
334
335 error = 0;
336 dmat_copy = dmat;
337
338 if (dmat != NULL) {
339
340 if (dmat->map_count != 0) {
341 error = EBUSY;
342 goto out;
343 }
344
345 while (dmat != NULL) {
346 bus_dma_tag_t parent;
347
348 parent = dmat->parent;
349 atomic_subtract_int(&dmat->ref_count, 1);
350 if (dmat->ref_count == 0) {
351 if (dmat->segments != NULL)
352 free(dmat->segments, M_DEVBUF);
353 free(dmat, M_DEVBUF);
354 /*
355 * Last reference count, so
356 * release our reference
357 * count on our parent.
358 */
359 dmat = parent;
360 } else
361 dmat = NULL;
362 }
363 }
364 out:
365 CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error);
366 return (error);
367 }
368
369 /*
370 * Allocate a handle for mapping from kva/uva/physical
371 * address space into bus device space.
372 */
373 int
374 bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
375 {
376 int error;
377
378 error = 0;
379
380 if (dmat->segments == NULL) {
381 dmat->segments = (bus_dma_segment_t *)malloc(
382 sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF,
383 M_NOWAIT);
384 if (dmat->segments == NULL) {
385 CTR3(KTR_BUSDMA, "%s: tag %p error %d",
386 __func__, dmat, ENOMEM);
387 return (ENOMEM);
388 }
389 }
390
391 /*
392 * Bouncing might be required if the driver asks for an active
393 * exclusion region, a data alignment that is stricter than 1, and/or
394 * an active address boundary.
395 */
396 if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
397
398 /* Must bounce */
399 struct bounce_zone *bz;
400 int maxpages;
401
402 if (dmat->bounce_zone == NULL) {
403 if ((error = alloc_bounce_zone(dmat)) != 0)
404 return (error);
405 }
406 bz = dmat->bounce_zone;
407
408 *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
409 M_NOWAIT | M_ZERO);
410 if (*mapp == NULL) {
411 CTR3(KTR_BUSDMA, "%s: tag %p error %d",
412 __func__, dmat, ENOMEM);
413 return (ENOMEM);
414 }
415
416 /* Initialize the new map */
417 STAILQ_INIT(&((*mapp)->bpages));
418
419 /*
420 * Attempt to add pages to our pool on a per-instance
421 * basis up to a sane limit.
422 */
423 if (dmat->alignment > 1)
424 maxpages = MAX_BPAGES;
425 else
426 maxpages = MIN(MAX_BPAGES, Maxmem -atop(dmat->lowaddr));
427 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
428 || (dmat->map_count > 0 && bz->total_bpages < maxpages)) {
429 int pages;
430
431 pages = MAX(atop(dmat->maxsize), 1);
432 pages = MIN(maxpages - bz->total_bpages, pages);
433 pages = MAX(pages, 1);
434 if (alloc_bounce_pages(dmat, pages) < pages)
435 error = ENOMEM;
436
437 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
438 if (error == 0)
439 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
440 } else {
441 error = 0;
442 }
443 }
444 } else {
445 *mapp = NULL;
446 }
447 if (error == 0)
448 dmat->map_count++;
449 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
450 __func__, dmat, dmat->flags, error);
451 return (error);
452 }
453
454 /*
455 * Destroy a handle for mapping from kva/uva/physical
456 * address space into bus device space.
457 */
458 int
459 bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
460 {
461 if (map != NULL && map != &nobounce_dmamap) {
462 if (STAILQ_FIRST(&map->bpages) != NULL) {
463 CTR3(KTR_BUSDMA, "%s: tag %p error %d",
464 __func__, dmat, EBUSY);
465 return (EBUSY);
466 }
467 free(map, M_DEVBUF);
468 }
469 dmat->map_count--;
470 CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
471 return (0);
472 }
473
474
475 /*
476 * Allocate a piece of memory that can be efficiently mapped into
477 * bus device space based on the constraints lited in the dma tag.
478 * A dmamap to for use with dmamap_load is also allocated.
479 */
480 int
481 bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
482 bus_dmamap_t *mapp)
483 {
484 int mflags;
485
486 if (flags & BUS_DMA_NOWAIT)
487 mflags = M_NOWAIT;
488 else
489 mflags = M_WAITOK;
490 if (flags & BUS_DMA_ZERO)
491 mflags |= M_ZERO;
492
493 /* If we succeed, no mapping/bouncing will be required */
494 *mapp = NULL;
495
496 if (dmat->segments == NULL) {
497 dmat->segments = (bus_dma_segment_t *)malloc(
498 sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF,
499 M_NOWAIT);
500 if (dmat->segments == NULL) {
501 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
502 __func__, dmat, dmat->flags, ENOMEM);
503 return (ENOMEM);
504 }
505 }
506
507 /*
508 * XXX:
509 * (dmat->alignment < dmat->maxsize) is just a quick hack; the exact
510 * alignment guarantees of malloc need to be nailed down, and the
511 * code below should be rewritten to take that into account.
512 *
513 * In the meantime, we'll warn the user if malloc gets it wrong.
514 */
515 if ((dmat->maxsize <= PAGE_SIZE) &&
516 (dmat->alignment < dmat->maxsize) &&
517 dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) {
518 *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags);
519 } else {
520 /*
521 * XXX Use Contigmalloc until it is merged into this facility
522 * and handles multi-seg allocations. Nobody is doing
523 * multi-seg allocations yet though.
524 * XXX Certain AGP hardware does.
525 */
526 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
527 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
528 dmat->boundary);
529 }
530 if (*vaddr == NULL) {
531 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
532 __func__, dmat, dmat->flags, ENOMEM);
533 return (ENOMEM);
534 } else if ((uintptr_t)*vaddr & (dmat->alignment - 1)) {
535 printf("bus_dmamem_alloc failed to align memory properly.\n");
536 }
537 if (flags & BUS_DMA_NOCACHE)
538 pmap_change_attr((vm_offset_t)*vaddr, dmat->maxsize,
539 PAT_UNCACHEABLE);
540 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
541 __func__, dmat, dmat->flags, ENOMEM);
542 return (0);
543 }
544
545 /*
546 * Free a piece of memory and it's allociated dmamap, that was allocated
547 * via bus_dmamem_alloc. Make the same choice for free/contigfree.
548 */
549 void
550 bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
551 {
552 /*
553 * dmamem does not need to be bounced, so the map should be
554 * NULL
555 */
556 if (map != NULL)
557 panic("bus_dmamem_free: Invalid map freed\n");
558 pmap_change_attr((vm_offset_t)vaddr, dmat->maxsize, PAT_WRITE_BACK);
559 if ((dmat->maxsize <= PAGE_SIZE) &&
560 (dmat->alignment < dmat->maxsize) &&
561 dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem))
562 free(vaddr, M_DEVBUF);
563 else {
564 contigfree(vaddr, dmat->maxsize, M_DEVBUF);
565 }
566 CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
567 }
568
569 int
570 _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
571 void *buf, bus_size_t buflen, int flags)
572 {
573 vm_offset_t vaddr;
574 vm_offset_t vendaddr;
575 bus_addr_t paddr;
576
577 if ((map != &nobounce_dmamap && map->pagesneeded == 0)) {
578 CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
579 "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem),
580 dmat->boundary, dmat->alignment);
581 CTR3(KTR_BUSDMA, "map= %p, nobouncemap= %p, pagesneeded= %d",
582 map, &nobounce_dmamap, map->pagesneeded);
583 /*
584 * Count the number of bounce pages
585 * needed in order to complete this transfer
586 */
587 vaddr = (vm_offset_t)buf;
588 vendaddr = (vm_offset_t)buf + buflen;
589
590 while (vaddr < vendaddr) {
591 if (pmap)
592 paddr = pmap_extract(pmap, vaddr);
593 else
594 paddr = pmap_kextract(vaddr);
595 if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
596 run_filter(dmat, paddr) != 0) {
597 map->pagesneeded++;
598 }
599 vaddr += (PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK));
600 }
601 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
602 }
603
604 /* Reserve Necessary Bounce Pages */
605 if (map->pagesneeded != 0) {
606 mtx_lock(&bounce_lock);
607 if (flags & BUS_DMA_NOWAIT) {
608 if (reserve_bounce_pages(dmat, map, 0) != 0) {
609 mtx_unlock(&bounce_lock);
610 return (ENOMEM);
611 }
612 } else {
613 if (reserve_bounce_pages(dmat, map, 1) != 0) {
614 /* Queue us for resources */
615 map->dmat = dmat;
616 map->buf = buf;
617 map->buflen = buflen;
618 STAILQ_INSERT_TAIL(&bounce_map_waitinglist,
619 map, links);
620 mtx_unlock(&bounce_lock);
621 return (EINPROGRESS);
622 }
623 }
624 mtx_unlock(&bounce_lock);
625 }
626
627 return (0);
628 }
629
630 /*
631 * Utility function to load a linear buffer. lastaddrp holds state
632 * between invocations (for multiple-buffer loads). segp contains
633 * the starting segment on entrace, and the ending segment on exit.
634 * first indicates if this is the first invocation of this function.
635 */
636 static __inline int
637 _bus_dmamap_load_buffer(bus_dma_tag_t dmat,
638 bus_dmamap_t map,
639 void *buf, bus_size_t buflen,
640 pmap_t pmap,
641 int flags,
642 bus_addr_t *lastaddrp,
643 bus_dma_segment_t *segs,
644 int *segp,
645 int first)
646 {
647 bus_size_t sgsize;
648 bus_addr_t curaddr, lastaddr, baddr, bmask;
649 vm_offset_t vaddr;
650 int seg, error;
651
652 if (map == NULL)
653 map = &nobounce_dmamap;
654
655 if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
656 error = _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
657 if (error)
658 return (error);
659 }
660
661 vaddr = (vm_offset_t)buf;
662 lastaddr = *lastaddrp;
663 bmask = ~(dmat->boundary - 1);
664
665 for (seg = *segp; buflen > 0 ; ) {
666 /*
667 * Get the physical address for this segment.
668 */
669 if (pmap)
670 curaddr = pmap_extract(pmap, vaddr);
671 else
672 curaddr = pmap_kextract(vaddr);
673
674 /*
675 * Compute the segment size, and adjust counts.
676 */
677 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
678 if (sgsize > dmat->maxsegsz)
679 sgsize = dmat->maxsegsz;
680 if (buflen < sgsize)
681 sgsize = buflen;
682
683 /*
684 * Make sure we don't cross any boundaries.
685 */
686 if (dmat->boundary > 0) {
687 baddr = (curaddr + dmat->boundary) & bmask;
688 if (sgsize > (baddr - curaddr))
689 sgsize = (baddr - curaddr);
690 }
691
692 if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
693 map->pagesneeded != 0 && run_filter(dmat, curaddr))
694 curaddr = add_bounce_page(dmat, map, vaddr, sgsize);
695
696 /*
697 * Insert chunk into a segment, coalescing with
698 * previous segment if possible.
699 */
700 if (first) {
701 segs[seg].ds_addr = curaddr;
702 segs[seg].ds_len = sgsize;
703 first = 0;
704 } else {
705 if (curaddr == lastaddr &&
706 (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
707 (dmat->boundary == 0 ||
708 (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
709 segs[seg].ds_len += sgsize;
710 else {
711 if (++seg >= dmat->nsegments)
712 break;
713 segs[seg].ds_addr = curaddr;
714 segs[seg].ds_len = sgsize;
715 }
716 }
717
718 lastaddr = curaddr + sgsize;
719 vaddr += sgsize;
720 buflen -= sgsize;
721 }
722
723 *segp = seg;
724 *lastaddrp = lastaddr;
725
726 /*
727 * Did we fit?
728 */
729 return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
730 }
731
732 /*
733 * Map the buffer buf into bus space using the dmamap map.
734 */
735 int
736 bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
737 bus_size_t buflen, bus_dmamap_callback_t *callback,
738 void *callback_arg, int flags)
739 {
740 bus_addr_t lastaddr = 0;
741 int error, nsegs = 0;
742
743 if (map != NULL) {
744 flags |= BUS_DMA_WAITOK;
745 map->callback = callback;
746 map->callback_arg = callback_arg;
747 }
748
749 error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, NULL, flags,
750 &lastaddr, dmat->segments, &nsegs, 1);
751
752 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
753 __func__, dmat, dmat->flags, error, nsegs + 1);
754
755 if (error == EINPROGRESS) {
756 return (error);
757 }
758
759 if (error)
760 (*callback)(callback_arg, dmat->segments, 0, error);
761 else
762 (*callback)(callback_arg, dmat->segments, nsegs + 1, 0);
763
764 /*
765 * Return ENOMEM to the caller so that it can pass it up the stack.
766 * This error only happens when NOWAIT is set, so deferal is disabled.
767 */
768 if (error == ENOMEM)
769 return (error);
770
771 return (0);
772 }
773
774
775 /*
776 * Like _bus_dmamap_load(), but for mbufs.
777 */
778 static __inline int
779 _bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
780 struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs,
781 int flags)
782 {
783 int error;
784
785 M_ASSERTPKTHDR(m0);
786
787 flags |= BUS_DMA_NOWAIT;
788 *nsegs = 0;
789 error = 0;
790 if (m0->m_pkthdr.len <= dmat->maxsize) {
791 int first = 1;
792 bus_addr_t lastaddr = 0;
793 struct mbuf *m;
794
795 for (m = m0; m != NULL && error == 0; m = m->m_next) {
796 if (m->m_len > 0) {
797 error = _bus_dmamap_load_buffer(dmat, map,
798 m->m_data, m->m_len,
799 NULL, flags, &lastaddr,
800 segs, nsegs, first);
801 first = 0;
802 }
803 }
804 } else {
805 error = EINVAL;
806 }
807
808 /* XXX FIXME: Having to increment nsegs is really annoying */
809 ++*nsegs;
810 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
811 __func__, dmat, dmat->flags, error, *nsegs);
812 return (error);
813 }
814
815 int
816 bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
817 struct mbuf *m0,
818 bus_dmamap_callback2_t *callback, void *callback_arg,
819 int flags)
820 {
821 int nsegs, error;
822
823 error = _bus_dmamap_load_mbuf_sg(dmat, map, m0, dmat->segments, &nsegs,
824 flags);
825
826 if (error) {
827 /* force "no valid mappings" in callback */
828 (*callback)(callback_arg, dmat->segments, 0, 0, error);
829 } else {
830 (*callback)(callback_arg, dmat->segments,
831 nsegs, m0->m_pkthdr.len, error);
832 }
833 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
834 __func__, dmat, dmat->flags, error, nsegs);
835 return (error);
836 }
837
838 int
839 bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
840 struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs,
841 int flags)
842 {
843 return (_bus_dmamap_load_mbuf_sg(dmat, map, m0, segs, nsegs, flags));
844 }
845
846 /*
847 * Like _bus_dmamap_load(), but for uios.
848 */
849 int
850 bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map,
851 struct uio *uio,
852 bus_dmamap_callback2_t *callback, void *callback_arg,
853 int flags)
854 {
855 bus_addr_t lastaddr;
856 int nsegs, error, first, i;
857 bus_size_t resid;
858 struct iovec *iov;
859 pmap_t pmap;
860
861 flags |= BUS_DMA_NOWAIT;
862 resid = uio->uio_resid;
863 iov = uio->uio_iov;
864
865 if (uio->uio_segflg == UIO_USERSPACE) {
866 KASSERT(uio->uio_td != NULL,
867 ("bus_dmamap_load_uio: USERSPACE but no proc"));
868 pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace);
869 } else
870 pmap = NULL;
871
872 nsegs = 0;
873 error = 0;
874 first = 1;
875 lastaddr = (bus_addr_t) 0;
876 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
877 /*
878 * Now at the first iovec to load. Load each iovec
879 * until we have exhausted the residual count.
880 */
881 bus_size_t minlen =
882 resid < iov[i].iov_len ? resid : iov[i].iov_len;
883 caddr_t addr = (caddr_t) iov[i].iov_base;
884
885 if (minlen > 0) {
886 error = _bus_dmamap_load_buffer(dmat, map,
887 addr, minlen, pmap, flags, &lastaddr,
888 dmat->segments, &nsegs, first);
889 first = 0;
890
891 resid -= minlen;
892 }
893 }
894
895 if (error) {
896 /* force "no valid mappings" in callback */
897 (*callback)(callback_arg, dmat->segments, 0, 0, error);
898 } else {
899 (*callback)(callback_arg, dmat->segments,
900 nsegs+1, uio->uio_resid, error);
901 }
902 CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
903 __func__, dmat, dmat->flags, error, nsegs + 1);
904 return (error);
905 }
906
907 /*
908 * Release the mapping held by map.
909 */
910 void
911 _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
912 {
913 struct bounce_page *bpage;
914
915 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
916 STAILQ_REMOVE_HEAD(&map->bpages, links);
917 free_bounce_page(dmat, bpage);
918 }
919 }
920
921 void
922 _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
923 {
924 struct bounce_page *bpage;
925
926 if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
927 /*
928 * Handle data bouncing. We might also
929 * want to add support for invalidating
930 * the caches on broken hardware
931 */
932 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x "
933 "performing bounce", __func__, op, dmat, dmat->flags);
934
935 if (op & BUS_DMASYNC_PREWRITE) {
936 while (bpage != NULL) {
937 bcopy((void *)bpage->datavaddr,
938 (void *)bpage->vaddr,
939 bpage->datacount);
940 bpage = STAILQ_NEXT(bpage, links);
941 }
942 dmat->bounce_zone->total_bounced++;
943 }
944
945 if (op & BUS_DMASYNC_POSTREAD) {
946 while (bpage != NULL) {
947 bcopy((void *)bpage->vaddr,
948 (void *)bpage->datavaddr,
949 bpage->datacount);
950 bpage = STAILQ_NEXT(bpage, links);
951 }
952 dmat->bounce_zone->total_bounced++;
953 }
954 }
955 }
956
957 static void
958 init_bounce_pages(void *dummy __unused)
959 {
960
961 total_bpages = 0;
962 STAILQ_INIT(&bounce_zone_list);
963 STAILQ_INIT(&bounce_map_waitinglist);
964 STAILQ_INIT(&bounce_map_callbacklist);
965 mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
966 }
967 SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
968
969 static struct sysctl_ctx_list *
970 busdma_sysctl_tree(struct bounce_zone *bz)
971 {
972 return (&bz->sysctl_tree);
973 }
974
975 static struct sysctl_oid *
976 busdma_sysctl_tree_top(struct bounce_zone *bz)
977 {
978 return (bz->sysctl_tree_top);
979 }
980
981 static int
982 alloc_bounce_zone(bus_dma_tag_t dmat)
983 {
984 struct bounce_zone *bz;
985
986 /* Check to see if we already have a suitable zone */
987 STAILQ_FOREACH(bz, &bounce_zone_list, links) {
988 if ((dmat->alignment <= bz->alignment)
989 && (dmat->lowaddr >= bz->lowaddr)) {
990 dmat->bounce_zone = bz;
991 return (0);
992 }
993 }
994
995 if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF,
996 M_NOWAIT | M_ZERO)) == NULL)
997 return (ENOMEM);
998
999 STAILQ_INIT(&bz->bounce_page_list);
1000 bz->free_bpages = 0;
1001 bz->reserved_bpages = 0;
1002 bz->active_bpages = 0;
1003 bz->lowaddr = dmat->lowaddr;
1004 bz->alignment = MAX(dmat->alignment, PAGE_SIZE);
1005 snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount);
1006 busdma_zonecount++;
1007 snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr);
1008 STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links);
1009 dmat->bounce_zone = bz;
1010
1011 sysctl_ctx_init(&bz->sysctl_tree);
1012 bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree,
1013 SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid,
1014 CTLFLAG_RD, 0, "");
1015 if (bz->sysctl_tree_top == NULL) {
1016 sysctl_ctx_free(&bz->sysctl_tree);
1017 return (0); /* XXX error code? */
1018 }
1019
1020 SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1021 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1022 "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0,
1023 "Total bounce pages");
1024 SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1025 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1026 "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0,
1027 "Free bounce pages");
1028 SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1029 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1030 "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0,
1031 "Reserved bounce pages");
1032 SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1033 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1034 "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0,
1035 "Active bounce pages");
1036 SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1037 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1038 "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0,
1039 "Total bounce requests");
1040 SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1041 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1042 "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0,
1043 "Total bounce requests that were deferred");
1044 SYSCTL_ADD_STRING(busdma_sysctl_tree(bz),
1045 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1046 "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, "");
1047 SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1048 SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1049 "alignment", CTLFLAG_RD, &bz->alignment, 0, "");
1050
1051 return (0);
1052 }
1053
1054 static int
1055 alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
1056 {
1057 struct bounce_zone *bz;
1058 int count;
1059
1060 bz = dmat->bounce_zone;
1061 count = 0;
1062 while (numpages > 0) {
1063 struct bounce_page *bpage;
1064
1065 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
1066 M_NOWAIT | M_ZERO);
1067
1068 if (bpage == NULL)
1069 break;
1070 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
1071 M_NOWAIT, 0ul,
1072 bz->lowaddr,
1073 PAGE_SIZE,
1074 0);
1075 if (bpage->vaddr == 0) {
1076 free(bpage, M_DEVBUF);
1077 break;
1078 }
1079 bpage->busaddr = pmap_kextract(bpage->vaddr);
1080 mtx_lock(&bounce_lock);
1081 STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links);
1082 total_bpages++;
1083 bz->total_bpages++;
1084 bz->free_bpages++;
1085 mtx_unlock(&bounce_lock);
1086 count++;
1087 numpages--;
1088 }
1089 return (count);
1090 }
1091
1092 static int
1093 reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
1094 {
1095 struct bounce_zone *bz;
1096 int pages;
1097
1098 mtx_assert(&bounce_lock, MA_OWNED);
1099 bz = dmat->bounce_zone;
1100 pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved);
1101 if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
1102 return (map->pagesneeded - (map->pagesreserved + pages));
1103 bz->free_bpages -= pages;
1104 bz->reserved_bpages += pages;
1105 map->pagesreserved += pages;
1106 pages = map->pagesneeded - map->pagesreserved;
1107
1108 return (pages);
1109 }
1110
1111 static bus_addr_t
1112 add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
1113 bus_size_t size)
1114 {
1115 struct bounce_zone *bz;
1116 struct bounce_page *bpage;
1117
1118 KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag"));
1119 KASSERT(map != NULL && map != &nobounce_dmamap,
1120 ("add_bounce_page: bad map %p", map));
1121
1122 bz = dmat->bounce_zone;
1123 if (map->pagesneeded == 0)
1124 panic("add_bounce_page: map doesn't need any pages");
1125 map->pagesneeded--;
1126
1127 if (map->pagesreserved == 0)
1128 panic("add_bounce_page: map doesn't need any pages");
1129 map->pagesreserved--;
1130
1131 mtx_lock(&bounce_lock);
1132 bpage = STAILQ_FIRST(&bz->bounce_page_list);
1133 if (bpage == NULL)
1134 panic("add_bounce_page: free page list is empty");
1135
1136 STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links);
1137 bz->reserved_bpages--;
1138 bz->active_bpages++;
1139 mtx_unlock(&bounce_lock);
1140
1141 bpage->datavaddr = vaddr;
1142 bpage->datacount = size;
1143 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
1144 return (bpage->busaddr);
1145 }
1146
1147 static void
1148 free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
1149 {
1150 struct bus_dmamap *map;
1151 struct bounce_zone *bz;
1152
1153 bz = dmat->bounce_zone;
1154 bpage->datavaddr = 0;
1155 bpage->datacount = 0;
1156
1157 mtx_lock(&bounce_lock);
1158 STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links);
1159 bz->free_bpages++;
1160 bz->active_bpages--;
1161 if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
1162 if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
1163 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
1164 STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
1165 map, links);
1166 busdma_swi_pending = 1;
1167 bz->total_deferred++;
1168 swi_sched(vm_ih, 0);
1169 }
1170 }
1171 mtx_unlock(&bounce_lock);
1172 }
1173
1174 void
1175 busdma_swi(void)
1176 {
1177 bus_dma_tag_t dmat;
1178 struct bus_dmamap *map;
1179
1180 mtx_lock(&bounce_lock);
1181 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
1182 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
1183 mtx_unlock(&bounce_lock);
1184 dmat = map->dmat;
1185 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK);
1186 bus_dmamap_load(map->dmat, map, map->buf, map->buflen,
1187 map->callback, map->callback_arg, /*flags*/0);
1188 (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK);
1189 mtx_lock(&bounce_lock);
1190 }
1191 mtx_unlock(&bounce_lock);
1192 }
Cache object: 1da65bc44438c83c98cef603dfbf1b07
|