1 /*
2 * Copyright (c) 1997, 1998 Justin T. Gibbs.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions, and the following disclaimer,
10 * without modification, immediately at the beginning of the file.
11 * 2. The name of the author may not be used to endorse or promote products
12 * derived from this software without specific prior written permission.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 */
28
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/malloc.h>
32
33 #include <vm/vm.h>
34 #include <vm/vm_prot.h>
35 #include <vm/vm_page.h>
36
37 #include <machine/bus.h>
38 #include <machine/md_var.h>
39
40 #define MAX(a,b) (((a) > (b)) ? (a) : (b))
41 #define MIN(a,b) (((a) < (b)) ? (a) : (b))
42 #define MAX_BPAGES 128
43
44 struct bus_dma_tag {
45 bus_dma_tag_t parent;
46 bus_size_t alignment;
47 bus_size_t boundary;
48 bus_addr_t lowaddr;
49 bus_addr_t highaddr;
50 bus_dma_filter_t *filter;
51 void *filterarg;
52 bus_size_t maxsize;
53 u_int nsegments;
54 bus_size_t maxsegsz;
55 int flags;
56 int ref_count;
57 int map_count;
58 };
59
60 struct bounce_page {
61 vm_offset_t vaddr; /* kva of bounce buffer */
62 bus_addr_t busaddr; /* Physical address */
63 vm_offset_t datavaddr; /* kva of client data */
64 bus_size_t datacount; /* client data count */
65 STAILQ_ENTRY(bounce_page) links;
66 };
67
68 int busdma_swi_pending;
69
70 static STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
71 static int free_bpages;
72 static int reserved_bpages;
73 static int active_bpages;
74 static int total_bpages;
75 static bus_addr_t bounce_lowaddr = BUS_SPACE_MAXADDR;
76
77 struct bus_dmamap {
78 struct bp_list bpages;
79 int pagesneeded;
80 int pagesreserved;
81 bus_dma_tag_t dmat;
82 void *buf; /* unmapped buffer pointer */
83 bus_size_t buflen; /* unmapped buffer length */
84 bus_dmamap_callback_t *callback;
85 void *callback_arg;
86 STAILQ_ENTRY(bus_dmamap) links;
87 };
88
89 static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
90 static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
91 static struct bus_dmamap nobounce_dmamap;
92
93 static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
94 static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map);
95 static vm_offset_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
96 vm_offset_t vaddr, bus_size_t size);
97 static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
98 static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
99
100 static __inline int
101 run_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
102 {
103 int retval;
104
105 retval = 0;
106 do {
107 if (paddr > dmat->lowaddr
108 && paddr <= dmat->highaddr
109 && (dmat->filter == NULL
110 || (*dmat->filter)(dmat->filterarg, paddr) != 0))
111 retval = 1;
112
113 dmat = dmat->parent;
114 } while (retval == 0 && dmat != NULL);
115 return (retval);
116 }
117
118 #define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4
119 /*
120 * Allocate a device specific dma_tag.
121 */
122 int
123 bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
124 bus_size_t boundary, bus_addr_t lowaddr,
125 bus_addr_t highaddr, bus_dma_filter_t *filter,
126 void *filterarg, bus_size_t maxsize, int nsegments,
127 bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat)
128 {
129 bus_dma_tag_t newtag;
130 int error = 0;
131
132 /* Return a NULL tag on failure */
133 *dmat = NULL;
134
135 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
136 if (newtag == NULL)
137 return (ENOMEM);
138
139 newtag->parent = parent;
140 newtag->boundary = boundary;
141 newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1);
142 newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1);
143 newtag->filter = filter;
144 newtag->filterarg = filterarg;
145 newtag->maxsize = maxsize;
146 newtag->nsegments = nsegments;
147 newtag->maxsegsz = maxsegsz;
148 newtag->flags = flags;
149 newtag->ref_count = 1; /* Count ourself */
150 newtag->map_count = 0;
151
152 /* Take into account any restrictions imposed by our parent tag */
153 if (parent != NULL) {
154 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
155 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
156 /*
157 * XXX Not really correct??? Probably need to honor boundary
158 * all the way up the inheritence chain.
159 */
160 newtag->boundary = MAX(parent->boundary, newtag->boundary);
161 if (newtag->filter == NULL) {
162 /*
163 * Short circuit looking at our parent directly
164 * since we have encapsulated all of its information
165 */
166 newtag->filter = parent->filter;
167 newtag->filterarg = parent->filterarg;
168 newtag->parent = parent->parent;
169 }
170 if (newtag->parent != NULL) {
171 parent->ref_count++;
172 }
173 }
174
175 if (newtag->lowaddr < ptoa(Maxmem) && (flags & BUS_DMA_ALLOCNOW) != 0) {
176 /* Must bounce */
177
178 if (lowaddr > bounce_lowaddr) {
179 /*
180 * Go through the pool and kill any pages
181 * that don't reside below lowaddr.
182 */
183 panic("bus_dma_tag_create: page reallocation "
184 "not implemented");
185 }
186 if (ptoa(total_bpages) < maxsize) {
187 int pages;
188
189 pages = atop(maxsize) - total_bpages;
190
191 /* Add pages to our bounce pool */
192 if (alloc_bounce_pages(newtag, pages) < pages)
193 error = ENOMEM;
194 }
195 /* Performed initial allocation */
196 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
197 }
198
199 if (error != 0) {
200 free(newtag, M_DEVBUF);
201 } else {
202 *dmat = newtag;
203 }
204 return (error);
205 }
206
207 int
208 bus_dma_tag_destroy(bus_dma_tag_t dmat)
209 {
210 if (dmat != NULL) {
211
212 if (dmat->map_count != 0)
213 return (EBUSY);
214
215 while (dmat != NULL) {
216 bus_dma_tag_t parent;
217
218 parent = dmat->parent;
219 dmat->ref_count--;
220 if (dmat->ref_count == 0) {
221 free(dmat, M_DEVBUF);
222 /*
223 * Last reference count, so
224 * release our reference
225 * count on our parent.
226 */
227 dmat = parent;
228 } else
229 dmat = NULL;
230 }
231 }
232 return (0);
233 }
234
235 /*
236 * Allocate a handle for mapping from kva/uva/physical
237 * address space into bus device space.
238 */
239 int
240 bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
241 {
242 int error;
243
244 error = 0;
245
246 if (dmat->lowaddr < ptoa(Maxmem)) {
247 /* Must bounce */
248 int maxpages;
249
250 *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
251 M_NOWAIT);
252 if (*mapp == NULL) {
253 return (ENOMEM);
254 } else {
255 /* Initialize the new map */
256 bzero(*mapp, sizeof(**mapp));
257 STAILQ_INIT(&((*mapp)->bpages));
258 }
259 /*
260 * Attempt to add pages to our pool on a per-instance
261 * basis up to a sane limit.
262 */
263 maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr));
264 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
265 || (dmat->map_count > 0
266 && total_bpages < maxpages)) {
267 int pages;
268
269 if (dmat->lowaddr > bounce_lowaddr) {
270 /*
271 * Go through the pool and kill any pages
272 * that don't reside below lowaddr.
273 */
274 panic("bus_dmamap_create: page reallocation "
275 "not implemented");
276 }
277 pages = atop(dmat->maxsize);
278 pages = MIN(maxpages - total_bpages, pages);
279 error = alloc_bounce_pages(dmat, pages);
280
281 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
282 if (error == 0)
283 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
284 } else {
285 error = 0;
286 }
287 }
288 } else {
289 *mapp = NULL;
290 }
291 if (error == 0)
292 dmat->map_count++;
293 return (error);
294 }
295
296 /*
297 * Destroy a handle for mapping from kva/uva/physical
298 * address space into bus device space.
299 */
300 int
301 bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
302 {
303 if (map != NULL) {
304 if (STAILQ_FIRST(&map->bpages) != NULL)
305 return (EBUSY);
306 free(map, M_DEVBUF);
307 }
308 dmat->map_count--;
309 return (0);
310 }
311
312
313 /*
314 * Allocate a piece of memory that can be efficiently mapped into
315 * bus device space based on the constraints lited in the dma tag.
316 * A dmamap to for use with dmamap_load is also allocated.
317 */
318 int
319 bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
320 bus_dmamap_t *mapp)
321 {
322 /* If we succeed, no mapping/bouncing will be required */
323 *mapp = NULL;
324
325 if ((dmat->maxsize <= PAGE_SIZE) && dmat->lowaddr >= ptoa(Maxmem)) {
326 *vaddr = malloc(dmat->maxsize, M_DEVBUF,
327 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK);
328 } else {
329 /*
330 * XXX Use Contigmalloc until it is merged into this facility
331 * and handles multi-seg allocations. Nobody is doing
332 * multi-seg allocations yet though.
333 */
334 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF,
335 (flags & BUS_DMA_NOWAIT)
336 ? M_NOWAIT : M_WAITOK,
337 0ul, dmat->lowaddr, 1ul, dmat->boundary);
338 }
339 if (*vaddr == NULL)
340 return (ENOMEM);
341 return (0);
342 }
343
344 /*
345 * Free a piece of memory and it's allociated dmamap, that was allocated
346 * via bus_dmamem_alloc.
347 */
348 void
349 bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
350 {
351 /*
352 * dmamem does not need to be bounced, so the map should be
353 * NULL
354 */
355 if (map != NULL)
356 panic("bus_dmamem_free: Invalid map freed\n");
357 /* XXX There is no "contigfree" and "free" doesn't work */
358 if ((dmat->maxsize <= PAGE_SIZE) && dmat->lowaddr >= ptoa(Maxmem))
359 free(vaddr, M_DEVBUF);
360 }
361
362 #define BUS_DMAMAP_NSEGS ((BUS_SPACE_MAXSIZE / PAGE_SIZE) + 1)
363
364 /*
365 * Map the buffer buf into bus space using the dmamap map.
366 */
367 int
368 bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
369 bus_size_t buflen, bus_dmamap_callback_t *callback,
370 void *callback_arg, int flags)
371 {
372 vm_offset_t vaddr;
373 vm_offset_t paddr;
374 #ifdef __GNUC__
375 bus_dma_segment_t dm_segments[dmat->nsegments];
376 #else
377 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
378 #endif
379 bus_dma_segment_t *sg;
380 int seg;
381 int error;
382
383 if (map == NULL)
384 map = &nobounce_dmamap;
385
386 error = 0;
387 /*
388 * If we are being called during a callback, pagesneeded will
389 * be non-zero, so we can avoid doing the work twice.
390 */
391 if (dmat->lowaddr < ptoa(Maxmem) && map->pagesneeded == 0) {
392 vm_offset_t vendaddr;
393
394 /*
395 * Count the number of bounce pages
396 * needed in order to complete this transfer
397 */
398 vaddr = trunc_page((vm_offset_t)buf);
399 vendaddr = (vm_offset_t)buf + buflen;
400
401 while (vaddr < vendaddr) {
402 paddr = pmap_kextract(vaddr);
403 if (run_filter(dmat, paddr) != 0) {
404
405 map->pagesneeded++;
406 }
407 vaddr += PAGE_SIZE;
408 }
409 }
410
411 /* Reserve Necessary Bounce Pages */
412 if (map->pagesneeded != 0) {
413 int s;
414
415 s = splhigh();
416 if (reserve_bounce_pages(dmat, map) != 0) {
417
418 /* Queue us for resources */
419 map->dmat = dmat;
420 map->buf = buf;
421 map->buflen = buflen;
422 map->callback = callback;
423 map->callback_arg = callback_arg;
424
425 STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links);
426 splx(s);
427
428 return (EINPROGRESS);
429 }
430 splx(s);
431 }
432
433 vaddr = (vm_offset_t)buf;
434 sg = &dm_segments[0];
435 seg = 1;
436 sg->ds_len = 0;
437
438 {
439 /*
440 * note: nextpaddr not used on first loop
441 */
442 vm_offset_t nextpaddr = 0;
443
444 do {
445 bus_size_t size;
446
447 paddr = pmap_kextract(vaddr);
448 size = PAGE_SIZE - (paddr & PAGE_MASK);
449 if (size > buflen)
450 size = buflen;
451
452 if (map->pagesneeded != 0
453 && run_filter(dmat, paddr)) {
454 paddr = add_bounce_page(dmat, map,
455 vaddr, size);
456 }
457
458 if (sg->ds_len == 0) {
459 sg->ds_addr = paddr;
460 sg->ds_len = size;
461 } else if (paddr == nextpaddr) {
462 sg->ds_len += size;
463 } else {
464 /* Go to the next segment */
465 sg++;
466 seg++;
467 if (seg > dmat->nsegments)
468 break;
469 sg->ds_addr = paddr;
470 sg->ds_len = size;
471 }
472 vaddr += size;
473 nextpaddr = paddr + size;
474 buflen -= size;
475 } while (buflen > 0);
476 }
477
478 if (buflen != 0) {
479 printf("bus_dmamap_load: Too many segs! buf_len = 0x%lx\n",
480 (u_long)buflen);
481 error = EFBIG;
482 }
483
484 (*callback)(callback_arg, dm_segments, seg, error);
485
486 return (0);
487 }
488
489 /*
490 * Release the mapping held by map.
491 */
492 void
493 _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
494 {
495 struct bounce_page *bpage;
496
497 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
498 STAILQ_REMOVE_HEAD(&map->bpages, links);
499 free_bounce_page(dmat, bpage);
500 }
501 }
502
503 void
504 _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
505 {
506 struct bounce_page *bpage;
507
508 if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
509
510 /*
511 * Handle data bouncing. We might also
512 * want to add support for invalidating
513 * the caches on broken hardware
514 */
515 switch (op) {
516 case BUS_DMASYNC_PREWRITE:
517 while (bpage != NULL) {
518 bcopy((void *)bpage->datavaddr,
519 (void *)bpage->vaddr,
520 bpage->datacount);
521 bpage = STAILQ_NEXT(bpage, links);
522 }
523 break;
524
525 case BUS_DMASYNC_POSTREAD:
526 while (bpage != NULL) {
527 bcopy((void *)bpage->vaddr,
528 (void *)bpage->datavaddr,
529 bpage->datacount);
530 bpage = STAILQ_NEXT(bpage, links);
531 }
532 break;
533 case BUS_DMASYNC_PREREAD:
534 case BUS_DMASYNC_POSTWRITE:
535 /* No-ops */
536 break;
537 }
538 }
539 }
540
541 static int
542 alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
543 {
544 int count;
545
546 count = 0;
547 if (total_bpages == 0) {
548 STAILQ_INIT(&bounce_page_list);
549 STAILQ_INIT(&bounce_map_waitinglist);
550 STAILQ_INIT(&bounce_map_callbacklist);
551 }
552
553 while (numpages > 0) {
554 struct bounce_page *bpage;
555 int s;
556
557 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
558 M_NOWAIT);
559
560 if (bpage == NULL)
561 break;
562 bzero(bpage, sizeof(*bpage));
563 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
564 M_NOWAIT, 0ul,
565 dmat->lowaddr,
566 PAGE_SIZE,
567 0);
568 if (bpage->vaddr == NULL) {
569 free(bpage, M_DEVBUF);
570 break;
571 }
572 bpage->busaddr = pmap_kextract(bpage->vaddr);
573 s = splhigh();
574 STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links);
575 total_bpages++;
576 free_bpages++;
577 splx(s);
578 count++;
579 numpages--;
580 }
581 return (count);
582 }
583
584 static int
585 reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map)
586 {
587 int pages;
588
589 pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved);
590 free_bpages -= pages;
591 reserved_bpages += pages;
592 map->pagesreserved += pages;
593 pages = map->pagesneeded - map->pagesreserved;
594
595 return (pages);
596 }
597
598 static vm_offset_t
599 add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
600 bus_size_t size)
601 {
602 int s;
603 struct bounce_page *bpage;
604
605 if (map->pagesneeded == 0)
606 panic("add_bounce_page: map doesn't need any pages");
607 map->pagesneeded--;
608
609 if (map->pagesreserved == 0)
610 panic("add_bounce_page: map doesn't need any pages");
611 map->pagesreserved--;
612
613 s = splhigh();
614 bpage = STAILQ_FIRST(&bounce_page_list);
615 if (bpage == NULL)
616 panic("add_bounce_page: free page list is empty");
617
618 STAILQ_REMOVE_HEAD(&bounce_page_list, links);
619 reserved_bpages--;
620 active_bpages++;
621 splx(s);
622
623 bpage->datavaddr = vaddr;
624 bpage->datacount = size;
625 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
626 return (bpage->busaddr);
627 }
628
629 static void
630 free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
631 {
632 int s;
633 struct bus_dmamap *map;
634
635 bpage->datavaddr = 0;
636 bpage->datacount = 0;
637
638 s = splhigh();
639 STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links);
640 free_bpages++;
641 active_bpages--;
642 if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
643 if (reserve_bounce_pages(map->dmat, map) == 0) {
644 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
645 STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
646 map, links);
647 busdma_swi_pending = 1;
648 setsoftvm();
649 }
650 }
651 splx(s);
652 }
653
654 void
655 busdma_swi()
656 {
657 int s;
658 struct bus_dmamap *map;
659
660 s = splhigh();
661 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
662 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
663 splx(s);
664 bus_dmamap_load(map->dmat, map, map->buf, map->buflen,
665 map->callback, map->callback_arg, /*flags*/0);
666 s = splhigh();
667 }
668 splx(s);
669 }
Cache object: 1b3bfa46e174bedf71b1f0c8e791b9ab
|