1 /*
2 * Copyright (c) 1997 Justin T. Gibbs.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions, and the following disclaimer,
10 * without modification, immediately at the beginning of the file.
11 * 2. The name of the author may not be used to endorse or promote products
12 * derived from this software without specific prior written permission.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: src/sys/i386/i386/busdma_machdep.c,v 1.6.2.2 1999/09/05 08:11:03 peter Exp $
27 */
28
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/malloc.h>
32
33 #include <vm/vm.h>
34 #include <vm/vm_prot.h>
35 #include <vm/vm_page.h>
36
37 #include <machine/bus.h>
38 #include <machine/md_var.h>
39
40 #define MAX(a,b) (((a) > (b)) ? (a) : (b))
41 #define MIN(a,b) (((a) < (b)) ? (a) : (b))
42 #define MAX_BPAGES 128
43
44 struct bus_dma_tag {
45 bus_dma_tag_t parent;
46 bus_size_t alignment;
47 bus_size_t boundary;
48 bus_addr_t lowaddr;
49 bus_addr_t highaddr;
50 bus_dma_filter_t *filter;
51 void *filterarg;
52 bus_size_t maxsize;
53 u_int nsegments;
54 bus_size_t maxsegsz;
55 int flags;
56 int ref_count;
57 int map_count;
58 };
59
60 struct bounce_page {
61 vm_offset_t vaddr; /* kva of bounce buffer */
62 bus_addr_t busaddr; /* Physical address */
63 vm_offset_t datavaddr; /* kva of client data */
64 bus_size_t datacount; /* client data count */
65 STAILQ_ENTRY(bounce_page) links;
66 };
67
68 int busdma_swi_pending;
69
70 static STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
71 static int free_bpages;
72 static int reserved_bpages;
73 static int active_bpages;
74 static int total_bpages;
75 static bus_addr_t bounce_lowaddr = BUS_SPACE_MAXADDR;
76
77 struct bus_dmamap {
78 struct bp_list bpages;
79 int pagesneeded;
80 int pagesreserved;
81 bus_dma_tag_t dmat;
82 void *buf; /* unmapped buffer pointer */
83 bus_size_t buflen; /* unmapped buffer length */
84 bus_dmamap_callback_t *callback;
85 void *callback_arg;
86 STAILQ_ENTRY(bus_dmamap) links;
87 };
88
89 static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
90 static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
91 static struct bus_dmamap nobounce_dmamap;
92
93 static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
94 static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map);
95 static vm_offset_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
96 vm_offset_t vaddr, bus_size_t size);
97 static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
98 static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
99
100 static __inline int
101 run_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
102 {
103 int retval;
104
105 retval = 0;
106 do {
107 if (paddr > dmat->lowaddr
108 && paddr <= dmat->highaddr
109 && (dmat->filter == NULL
110 || (*dmat->filter)(dmat->filterarg, paddr) != 0))
111 retval = 1;
112
113 dmat = dmat->parent;
114 } while (retval == 0 && dmat != NULL);
115 return (retval);
116 }
117
118 #define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4
119 /*
120 * Allocate a device specific dma_tag.
121 */
122 int
123 bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
124 bus_size_t boundary, bus_addr_t lowaddr,
125 bus_addr_t highaddr, bus_dma_filter_t *filter,
126 void *filterarg, bus_size_t maxsize, int nsegments,
127 bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat)
128 {
129 bus_dma_tag_t newtag;
130 int error = 0;
131
132 /* Return a NULL tag on failure */
133 *dmat = NULL;
134
135 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
136 if (newtag == NULL)
137 return (ENOMEM);
138
139 newtag->parent = parent;
140 newtag->boundary = boundary;
141 newtag->lowaddr = trunc_page(lowaddr) + (PAGE_SIZE - 1);
142 newtag->highaddr = trunc_page(highaddr) + (PAGE_SIZE - 1);
143 newtag->filter = filter;
144 newtag->filterarg = filterarg;
145 newtag->maxsize = maxsize;
146 newtag->nsegments = nsegments;
147 newtag->maxsegsz = maxsegsz;
148 newtag->flags = flags;
149 newtag->ref_count = 1; /* Count ourself */
150 newtag->map_count = 0;
151
152 /* Take into account any restrictions imposed by our parent tag */
153 if (parent != NULL) {
154 newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
155 newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
156 /*
157 * XXX Not really correct??? Probably need to honor boundary
158 * all the way up the inheritence chain.
159 */
160 newtag->boundary = MAX(parent->boundary, newtag->boundary);
161 if (newtag->filter == NULL) {
162 /*
163 * Short circuit looking at our parent directly
164 * since we have encapsulated all of its information
165 */
166 newtag->filter = parent->filter;
167 newtag->filterarg = parent->filterarg;
168 newtag->parent = parent->parent;
169 }
170 if (newtag->parent != NULL) {
171 parent->ref_count++;
172 }
173 }
174
175 if (newtag->lowaddr < ptoa(Maxmem) && (flags & BUS_DMA_ALLOCNOW) != 0) {
176 /* Must bounce */
177
178 if (lowaddr > bounce_lowaddr) {
179 /*
180 * Go through the pool and kill any pages
181 * that don't reside below lowaddr.
182 */
183 panic("bus_dma_tag_create: page reallocation "
184 "not implemented");
185 }
186 if (ptoa(total_bpages) < maxsize) {
187 int pages;
188
189 pages = atop(maxsize) - total_bpages;
190
191 /* Add pages to our bounce pool */
192 if (alloc_bounce_pages(newtag, pages) < pages)
193 error = ENOMEM;
194 }
195 /* Performed initial allocation */
196 newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
197 }
198
199 if (error != 0) {
200 free(newtag, M_DEVBUF);
201 } else {
202 *dmat = newtag;
203 }
204 return (error);
205 }
206
207 int
208 bus_dma_tag_destroy(bus_dma_tag_t dmat)
209 {
210 if (dmat != NULL) {
211
212 if (dmat->map_count != 0)
213 return (EBUSY);
214
215 while (dmat != NULL) {
216 bus_dma_tag_t parent;
217
218 parent = dmat->parent;
219 dmat->ref_count--;
220 if (dmat->ref_count == 0) {
221 free(dmat, M_DEVBUF);
222 }
223 dmat = parent;
224 }
225 }
226 return (0);
227 }
228
229 /*
230 * Allocate a handle for mapping from kva/uva/physical
231 * address space into bus device space.
232 */
233 int
234 bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
235 {
236 int error;
237
238 error = 0;
239
240 if (dmat->lowaddr < ptoa(Maxmem)) {
241 /* Must bounce */
242 int maxpages;
243
244 *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
245 M_NOWAIT);
246 if (*mapp == NULL) {
247 return (ENOMEM);
248 } else {
249 /* Initialize the new map */
250 bzero(*mapp, sizeof(**mapp));
251 STAILQ_INIT(&((*mapp)->bpages));
252 }
253 /*
254 * Attempt to add pages to our pool on a per-instance
255 * basis up to a sane limit.
256 */
257 maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr));
258 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
259 || (dmat->map_count > 0
260 && total_bpages < maxpages)) {
261 int pages;
262
263 if (dmat->lowaddr > bounce_lowaddr) {
264 /*
265 * Go through the pool and kill any pages
266 * that don't reside below lowaddr.
267 */
268 panic("bus_dmamap_create: page reallocation "
269 "not implemented");
270 }
271 pages = atop(dmat->maxsize);
272 pages = MIN(maxpages - total_bpages, pages);
273 error = alloc_bounce_pages(dmat, pages);
274
275 if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
276 if (error == 0)
277 dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
278 } else {
279 error = 0;
280 }
281 }
282 } else {
283 *mapp = &nobounce_dmamap;
284 }
285 if (error == 0)
286 dmat->map_count++;
287 return (error);
288 }
289
290 /*
291 * Destroy a handle for mapping from kva/uva/physical
292 * address space into bus device space.
293 */
294 int
295 bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
296 {
297 if (map != NULL) {
298 if (STAILQ_FIRST(&map->bpages) != NULL)
299 return (EBUSY);
300 free(map, M_DEVBUF);
301 }
302 dmat->map_count--;
303 return (0);
304 }
305
306
307 /*
308 * Allocate a piece of memory that can be efficiently mapped into
309 * bus device space based on the constraints lited in the dma tag.
310 * A dmamap to for use with dmamap_load is also allocated.
311 */
312 int
313 bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
314 bus_dmamap_t *mapp)
315 {
316 /* If we succeed, no mapping/bouncing will be required */
317 *mapp = &nobounce_dmamap;
318
319 if ((dmat->maxsize <= PAGE_SIZE) && dmat->lowaddr >= ptoa(Maxmem)) {
320 *vaddr = malloc(dmat->maxsize, M_DEVBUF,
321 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK);
322 } else {
323 /*
324 * XXX Use Contigmalloc until it is merged into this facility
325 * and handles multi-seg allocations. Nobody is doing
326 * multi-seg allocations yet though.
327 */
328 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF,
329 (flags & BUS_DMA_NOWAIT)
330 ? M_NOWAIT : M_WAITOK,
331 0ul, dmat->lowaddr, 1ul, dmat->boundary);
332 }
333 if (*vaddr == NULL)
334 return (ENOMEM);
335 return (0);
336 }
337
338 /*
339 * Free a piece of memory and it's allociated dmamap, that was allocated
340 * via bus_dmamem_alloc.
341 */
342 void
343 bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
344 {
345 /*
346 * dmamem does not need to be bounced, so the map should be
347 * NULL
348 */
349 if (map != NULL)
350 panic("bus_dmamem_free: Invalid map freed\n");
351 free(vaddr, M_DEVBUF);
352 }
353
354 #define BUS_DMAMAP_NSEGS ((BUS_SPACE_MAXSIZE / PAGE_SIZE) + 1)
355
356 /*
357 * Map the buffer buf into bus space using the dmamap map.
358 */
359 int
360 bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
361 bus_size_t buflen, bus_dmamap_callback_t *callback,
362 void *callback_arg, int flags)
363 {
364 vm_offset_t vaddr;
365 vm_offset_t paddr;
366 #ifdef __GNUC__
367 bus_dma_segment_t dm_segments[dmat->nsegments];
368 #else
369 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
370 #endif
371 bus_dma_segment_t *sg;
372 int seg;
373 int error;
374
375 error = 0;
376 /*
377 * If we are being called during a callback, pagesneeded will
378 * be non-zero, so we can avoid doing the work twice.
379 */
380 if (dmat->lowaddr < ptoa(Maxmem) && map->pagesneeded == 0) {
381 vm_offset_t vendaddr;
382
383 /*
384 * Count the number of bounce pages
385 * needed in order to complete this transfer
386 */
387 vaddr = trunc_page(buf);
388 vendaddr = (vm_offset_t)buf + buflen;
389
390 while (vaddr < vendaddr) {
391 paddr = pmap_kextract(vaddr);
392 if (run_filter(dmat, paddr) != 0) {
393
394 map->pagesneeded++;
395 }
396 vaddr += PAGE_SIZE;
397 }
398 }
399
400 /* Reserve Necessary Bounce Pages */
401 if (map->pagesneeded != 0) {
402 int s;
403
404 s = splhigh();
405 if (reserve_bounce_pages(dmat, map) != 0) {
406
407 /* Queue us for resources */
408 map->dmat = dmat;
409 map->buf = buf;
410 map->buflen = buflen;
411 map->callback = callback;
412 map->callback_arg = callback_arg;
413
414 STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links);
415 splx(s);
416
417 return (EINPROGRESS);
418 }
419 splx(s);
420 }
421
422 vaddr = (vm_offset_t)buf;
423 sg = &dm_segments[0];
424 seg = 1;
425 sg->ds_len = 0;
426
427 do {
428 bus_size_t size;
429 vm_offset_t nextpaddr; /* GCC warning expected */
430
431 paddr = pmap_kextract(vaddr);
432 size = PAGE_SIZE - (paddr & PAGE_MASK);
433 if (size > buflen)
434 size = buflen;
435
436 if (map->pagesneeded != 0
437 && run_filter(dmat, paddr)) {
438 paddr = add_bounce_page(dmat, map, vaddr, size);
439 }
440
441 if (sg->ds_len == 0) {
442 sg->ds_addr = paddr;
443 sg->ds_len = size;
444 } else if (paddr == nextpaddr) {
445 sg->ds_len += size;
446 } else {
447 /* Go to the next segment */
448 sg++;
449 seg++;
450 if (seg > dmat->nsegments)
451 break;
452 sg->ds_addr = paddr;
453 sg->ds_len = size;
454 }
455 vaddr += size;
456 nextpaddr = paddr + size;
457 buflen -= size;
458 } while (buflen > 0);
459
460 if (buflen != 0) {
461 printf("bus_dmamap_load: Too many segs! buf_len = 0x%x\n",
462 buflen);
463 error = EFBIG;
464 }
465
466 (*callback)(callback_arg, dm_segments, seg, error);
467
468 return (0);
469 }
470
471 /*
472 * Release the mapping held by map.
473 */
474 void
475 _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
476 {
477 struct bounce_page *bpage;
478
479 while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
480 STAILQ_REMOVE_HEAD(&map->bpages, links);
481 free_bounce_page(dmat, bpage);
482 }
483 }
484
485 void
486 _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
487 {
488 struct bounce_page *bpage;
489
490 if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
491
492 /*
493 * Handle data bouncing. We might also
494 * want to add support for invalidating
495 * the caches on broken hardware
496 */
497 switch (op) {
498 case BUS_DMASYNC_PREWRITE:
499 while (bpage != NULL) {
500 bcopy((void *)bpage->datavaddr,
501 (void *)bpage->vaddr,
502 bpage->datacount);
503 bpage = STAILQ_NEXT(bpage, links);
504 }
505 break;
506
507 case BUS_DMASYNC_POSTREAD:
508 while (bpage != NULL) {
509 bcopy((void *)bpage->vaddr,
510 (void *)bpage->datavaddr,
511 bpage->datacount);
512 bpage = STAILQ_NEXT(bpage, links);
513 }
514 break;
515 case BUS_DMASYNC_PREREAD:
516 case BUS_DMASYNC_POSTWRITE:
517 /* No-ops */
518 break;
519 }
520 }
521 }
522
523 static int
524 alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
525 {
526 int count;
527
528 count = 0;
529 if (total_bpages == 0) {
530 STAILQ_INIT(&bounce_page_list);
531 STAILQ_INIT(&bounce_map_waitinglist);
532 STAILQ_INIT(&bounce_map_callbacklist);
533 }
534
535 while (numpages > 0) {
536 struct bounce_page *bpage;
537 int s;
538
539 bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
540 M_NOWAIT);
541
542 if (bpage == NULL)
543 break;
544 bzero(bpage, sizeof(*bpage));
545 bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
546 M_NOWAIT, 0ul,
547 dmat->lowaddr,
548 PAGE_SIZE,
549 0);
550 if (bpage->vaddr == NULL) {
551 free(bpage, M_DEVBUF);
552 break;
553 }
554 bpage->busaddr = pmap_kextract(bpage->vaddr);
555 s = splhigh();
556 STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links);
557 total_bpages++;
558 free_bpages++;
559 splx(s);
560 count++;
561 numpages--;
562 }
563 return (count);
564 }
565
566 static int
567 reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map)
568 {
569 int pages;
570
571 pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved);
572 free_bpages -= pages;
573 reserved_bpages += pages;
574 map->pagesreserved += pages;
575 pages = map->pagesneeded - map->pagesreserved;
576
577 return (pages);
578 }
579
580 static vm_offset_t
581 add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
582 bus_size_t size)
583 {
584 int s;
585 struct bounce_page *bpage;
586
587 if (map->pagesneeded == 0)
588 panic("add_bounce_page: map doesn't need any pages");
589 map->pagesneeded--;
590
591 if (map->pagesreserved == 0)
592 panic("add_bounce_page: map doesn't need any pages");
593 map->pagesreserved--;
594
595 s = splhigh();
596 bpage = STAILQ_FIRST(&bounce_page_list);
597 if (bpage == NULL)
598 panic("add_bounce_page: free page list is empty");
599
600 STAILQ_REMOVE_HEAD(&bounce_page_list, links);
601 reserved_bpages--;
602 active_bpages++;
603 splx(s);
604
605 bpage->datavaddr = vaddr;
606 bpage->datacount = size;
607 STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
608 return (bpage->busaddr);
609 }
610
611 static void
612 free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
613 {
614 int s;
615 struct bus_dmamap *map;
616
617 bpage->datavaddr = 0;
618 bpage->datacount = 0;
619
620 s = splhigh();
621 STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links);
622 free_bpages++;
623 active_bpages--;
624 if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
625 if (reserve_bounce_pages(map->dmat, map) == 0) {
626 STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
627 STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
628 map, links);
629 busdma_swi_pending = 1;
630 setsoftvm();
631 }
632 }
633 splx(s);
634 }
635
636 void
637 busdma_swi()
638 {
639 int s;
640 struct bus_dmamap *map;
641
642 s = splhigh();
643 while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
644 STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
645 splx(s);
646 bus_dmamap_load(map->dmat, map, map->buf, map->buflen,
647 map->callback, map->callback_arg, /*flags*/0);
648 s = splhigh();
649 }
650 splx(s);
651 }
Cache object: 349a71babd1281565540d80958826cdc
|