1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 1997, 1998 Justin T. Gibbs.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification, immediately at the beginning of the file.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/domainset.h>
35 #include <sys/malloc.h>
36 #include <sys/bus.h>
37 #include <sys/interrupt.h>
38 #include <sys/kernel.h>
39 #include <sys/ktr.h>
40 #include <sys/lock.h>
41 #include <sys/proc.h>
42 #include <sys/memdesc.h>
43 #include <sys/msan.h>
44 #include <sys/mutex.h>
45 #include <sys/sysctl.h>
46 #include <sys/uio.h>
47
48 #include <vm/vm.h>
49 #include <vm/vm_extern.h>
50 #include <vm/vm_kern.h>
51 #include <vm/vm_page.h>
52 #include <vm/vm_map.h>
53
54 #include <machine/atomic.h>
55 #include <machine/bus.h>
56 #include <machine/md_var.h>
57 #include <machine/specialreg.h>
58 #include <x86/include/busdma_impl.h>
59
60 #ifdef __i386__
61 #define MAX_BPAGES (Maxmem > atop(0x100000000ULL) ? 8192 : 512)
62 #else
63 #define MAX_BPAGES 8192
64 #endif
65
66 enum {
67 BUS_DMA_COULD_BOUNCE = 0x01,
68 BUS_DMA_MIN_ALLOC_COMP = 0x02,
69 BUS_DMA_KMEM_ALLOC = 0x04,
70 BUS_DMA_FORCE_MAP = 0x08,
71 };
72
73 struct bounce_page;
74 struct bounce_zone;
75
76 struct bus_dma_tag {
77 struct bus_dma_tag_common common;
78 int map_count;
79 int bounce_flags;
80 bus_dma_segment_t *segments;
81 struct bounce_zone *bounce_zone;
82 };
83
84 static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
85 "Busdma parameters");
86
87 struct bus_dmamap {
88 STAILQ_HEAD(, bounce_page) bpages;
89 int pagesneeded;
90 int pagesreserved;
91 bus_dma_tag_t dmat;
92 struct memdesc mem;
93 bus_dmamap_callback_t *callback;
94 void *callback_arg;
95 STAILQ_ENTRY(bus_dmamap) links;
96 #ifdef KMSAN
97 struct memdesc kmsan_mem;
98 #endif
99 };
100
101 static struct bus_dmamap nobounce_dmamap;
102
103 static bool _bus_dmamap_pagesneeded(bus_dma_tag_t dmat, vm_paddr_t buf,
104 bus_size_t buflen, int *pagesneeded);
105 static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
106 pmap_t pmap, void *buf, bus_size_t buflen, int flags);
107 static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
108 vm_paddr_t buf, bus_size_t buflen, int flags);
109
110 static MALLOC_DEFINE(M_BUSDMA, "busdma", "busdma metadata");
111
112 #define dmat_alignment(dmat) ((dmat)->common.alignment)
113 #define dmat_domain(dmat) ((dmat)->common.domain)
114 #define dmat_flags(dmat) ((dmat)->common.flags)
115 #define dmat_lowaddr(dmat) ((dmat)->common.lowaddr)
116 #define dmat_lockfunc(dmat) ((dmat)->common.lockfunc)
117 #define dmat_lockfuncarg(dmat) ((dmat)->common.lockfuncarg)
118
119 #include "../../kern/subr_busdma_bounce.c"
120
121 static int
122 bounce_bus_dma_zone_setup(bus_dma_tag_t dmat)
123 {
124 struct bounce_zone *bz;
125 int error;
126
127 /* Must bounce */
128 if ((error = alloc_bounce_zone(dmat)) != 0)
129 return (error);
130 bz = dmat->bounce_zone;
131
132 if (ptoa(bz->total_bpages) < dmat->common.maxsize) {
133 int pages;
134
135 pages = atop(dmat->common.maxsize) - bz->total_bpages;
136
137 /* Add pages to our bounce pool */
138 if (alloc_bounce_pages(dmat, pages) < pages)
139 return (ENOMEM);
140 }
141 /* Performed initial allocation */
142 dmat->bounce_flags |= BUS_DMA_MIN_ALLOC_COMP;
143
144 return (0);
145 }
146
147 /*
148 * Allocate a device specific dma_tag.
149 */
150 static int
151 bounce_bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
152 bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr,
153 bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize,
154 int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
155 void *lockfuncarg, bus_dma_tag_t *dmat)
156 {
157 bus_dma_tag_t newtag;
158 int error;
159
160 *dmat = NULL;
161 error = common_bus_dma_tag_create(parent != NULL ? &parent->common :
162 NULL, alignment, boundary, lowaddr, highaddr, filter, filterarg,
163 maxsize, nsegments, maxsegsz, flags, lockfunc, lockfuncarg,
164 sizeof (struct bus_dma_tag), (void **)&newtag);
165 if (error != 0)
166 return (error);
167
168 newtag->common.impl = &bus_dma_bounce_impl;
169 newtag->map_count = 0;
170 newtag->segments = NULL;
171
172 #ifdef KMSAN
173 /*
174 * When KMSAN is configured, we need a map to store a memory descriptor
175 * which can be used for validation.
176 */
177 newtag->bounce_flags |= BUS_DMA_FORCE_MAP;
178 #endif
179
180 if (parent != NULL && (newtag->common.filter != NULL ||
181 (parent->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0))
182 newtag->bounce_flags |= BUS_DMA_COULD_BOUNCE;
183
184 if (newtag->common.lowaddr < ptoa((vm_paddr_t)Maxmem) ||
185 newtag->common.alignment > 1)
186 newtag->bounce_flags |= BUS_DMA_COULD_BOUNCE;
187
188 if ((newtag->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0 &&
189 (flags & BUS_DMA_ALLOCNOW) != 0)
190 error = bounce_bus_dma_zone_setup(newtag);
191 else
192 error = 0;
193
194 if (error != 0)
195 free(newtag, M_DEVBUF);
196 else
197 *dmat = newtag;
198 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
199 __func__, newtag, (newtag != NULL ? newtag->common.flags : 0),
200 error);
201 return (error);
202 }
203
204 static bool
205 bounce_bus_dma_id_mapped(bus_dma_tag_t dmat, vm_paddr_t buf, bus_size_t buflen)
206 {
207
208 if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) == 0)
209 return (true);
210 return (!_bus_dmamap_pagesneeded(dmat, buf, buflen, NULL));
211 }
212
213 /*
214 * Update the domain for the tag. We may need to reallocate the zone and
215 * bounce pages.
216 */
217 static int
218 bounce_bus_dma_tag_set_domain(bus_dma_tag_t dmat)
219 {
220
221 KASSERT(dmat->map_count == 0,
222 ("bounce_bus_dma_tag_set_domain: Domain set after use.\n"));
223 if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) == 0 ||
224 dmat->bounce_zone == NULL)
225 return (0);
226 dmat->bounce_flags &= ~BUS_DMA_MIN_ALLOC_COMP;
227 return (bounce_bus_dma_zone_setup(dmat));
228 }
229
230 static int
231 bounce_bus_dma_tag_destroy(bus_dma_tag_t dmat)
232 {
233 #ifdef KTR
234 bus_dma_tag_t dmat_copy = dmat;
235 #endif
236 bus_dma_tag_t parent;
237 int error;
238
239 error = 0;
240
241 if (dmat != NULL) {
242 if (dmat->map_count != 0) {
243 error = EBUSY;
244 goto out;
245 }
246 while (dmat != NULL) {
247 parent = (bus_dma_tag_t)dmat->common.parent;
248 atomic_subtract_int(&dmat->common.ref_count, 1);
249 if (dmat->common.ref_count == 0) {
250 if (dmat->segments != NULL)
251 free(dmat->segments, M_DEVBUF);
252 free(dmat, M_DEVBUF);
253 /*
254 * Last reference count, so
255 * release our reference
256 * count on our parent.
257 */
258 dmat = parent;
259 } else
260 dmat = NULL;
261 }
262 }
263 out:
264 CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error);
265 return (error);
266 }
267
268 /*
269 * Allocate a handle for mapping from kva/uva/physical
270 * address space into bus device space.
271 */
272 static int
273 bounce_bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
274 {
275 struct bounce_zone *bz;
276 int error, maxpages, pages;
277
278 error = 0;
279
280 if (dmat->segments == NULL) {
281 dmat->segments = malloc_domainset(
282 sizeof(bus_dma_segment_t) * dmat->common.nsegments,
283 M_DEVBUF, DOMAINSET_PREF(dmat->common.domain), M_NOWAIT);
284 if (dmat->segments == NULL) {
285 CTR3(KTR_BUSDMA, "%s: tag %p error %d",
286 __func__, dmat, ENOMEM);
287 return (ENOMEM);
288 }
289 }
290
291 if (dmat->bounce_flags & (BUS_DMA_COULD_BOUNCE | BUS_DMA_FORCE_MAP)) {
292 *mapp = malloc_domainset(sizeof(**mapp), M_DEVBUF,
293 DOMAINSET_PREF(dmat->common.domain), M_NOWAIT | M_ZERO);
294 if (*mapp == NULL) {
295 CTR3(KTR_BUSDMA, "%s: tag %p error %d",
296 __func__, dmat, ENOMEM);
297 return (ENOMEM);
298 }
299 STAILQ_INIT(&(*mapp)->bpages);
300 } else {
301 *mapp = NULL;
302 }
303
304 /*
305 * Bouncing might be required if the driver asks for an active
306 * exclusion region, a data alignment that is stricter than 1, and/or
307 * an active address boundary.
308 */
309 if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) {
310 /* Must bounce */
311 if (dmat->bounce_zone == NULL &&
312 (error = alloc_bounce_zone(dmat)) != 0)
313 goto out;
314 bz = dmat->bounce_zone;
315
316 /*
317 * Attempt to add pages to our pool on a per-instance
318 * basis up to a sane limit.
319 */
320 if (dmat->common.alignment > 1)
321 maxpages = MAX_BPAGES;
322 else
323 maxpages = MIN(MAX_BPAGES, Maxmem -
324 atop(dmat->common.lowaddr));
325 if ((dmat->bounce_flags & BUS_DMA_MIN_ALLOC_COMP) == 0 ||
326 (bz->map_count > 0 && bz->total_bpages < maxpages)) {
327 pages = MAX(atop(dmat->common.maxsize), 1);
328 pages = MIN(dmat->common.nsegments, pages);
329 pages = MIN(maxpages - bz->total_bpages, pages);
330 pages = MAX(pages, 1);
331 if (alloc_bounce_pages(dmat, pages) < pages)
332 error = ENOMEM;
333 if ((dmat->bounce_flags & BUS_DMA_MIN_ALLOC_COMP)
334 == 0) {
335 if (error == 0) {
336 dmat->bounce_flags |=
337 BUS_DMA_MIN_ALLOC_COMP;
338 }
339 } else
340 error = 0;
341 }
342 bz->map_count++;
343 }
344
345 out:
346 if (error == 0) {
347 dmat->map_count++;
348 } else {
349 free(*mapp, M_DEVBUF);
350 *mapp = NULL;
351 }
352
353 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
354 __func__, dmat, dmat->common.flags, error);
355 return (error);
356 }
357
358 /*
359 * Destroy a handle for mapping from kva/uva/physical
360 * address space into bus device space.
361 */
362 static int
363 bounce_bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
364 {
365
366 if (map != NULL && map != &nobounce_dmamap) {
367 if (STAILQ_FIRST(&map->bpages) != NULL) {
368 CTR3(KTR_BUSDMA, "%s: tag %p error %d",
369 __func__, dmat, EBUSY);
370 return (EBUSY);
371 }
372 if (dmat->bounce_zone)
373 dmat->bounce_zone->map_count--;
374 free(map, M_DEVBUF);
375 }
376 dmat->map_count--;
377 CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
378 return (0);
379 }
380
381 /*
382 * Allocate a piece of memory that can be efficiently mapped into
383 * bus device space based on the constraints lited in the dma tag.
384 * A dmamap to for use with dmamap_load is also allocated.
385 */
386 static int
387 bounce_bus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddr, int flags,
388 bus_dmamap_t *mapp)
389 {
390 vm_memattr_t attr;
391 int mflags;
392
393 if (flags & BUS_DMA_NOWAIT)
394 mflags = M_NOWAIT;
395 else
396 mflags = M_WAITOK;
397
398 /* If we succeed, no mapping/bouncing will be required */
399 *mapp = NULL;
400
401 if (dmat->segments == NULL) {
402 dmat->segments = (bus_dma_segment_t *)malloc_domainset(
403 sizeof(bus_dma_segment_t) * dmat->common.nsegments,
404 M_DEVBUF, DOMAINSET_PREF(dmat->common.domain), mflags);
405 if (dmat->segments == NULL) {
406 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
407 __func__, dmat, dmat->common.flags, ENOMEM);
408 return (ENOMEM);
409 }
410 }
411 if (flags & BUS_DMA_ZERO)
412 mflags |= M_ZERO;
413 if (flags & BUS_DMA_NOCACHE)
414 attr = VM_MEMATTR_UNCACHEABLE;
415 else
416 attr = VM_MEMATTR_DEFAULT;
417
418 /*
419 * Allocate the buffer from the malloc(9) allocator if...
420 * - It's small enough to fit into a single page.
421 * - Its alignment requirement is also smaller than the page size.
422 * - The low address requirement is fulfilled.
423 * - Default cache attributes are requested (WB).
424 * else allocate non-contiguous pages if...
425 * - The page count that could get allocated doesn't exceed
426 * nsegments also when the maximum segment size is less
427 * than PAGE_SIZE.
428 * - The alignment constraint isn't larger than a page boundary.
429 * - There are no boundary-crossing constraints.
430 * else allocate a block of contiguous pages because one or more of the
431 * constraints is something that only the contig allocator can fulfill.
432 *
433 * Warn the user if malloc gets it wrong.
434 */
435 if (dmat->common.maxsize <= PAGE_SIZE &&
436 dmat->common.alignment <= PAGE_SIZE &&
437 dmat->common.lowaddr >= ptoa((vm_paddr_t)Maxmem) &&
438 attr == VM_MEMATTR_DEFAULT) {
439 *vaddr = malloc_domainset_aligned(dmat->common.maxsize,
440 dmat->common.alignment, M_DEVBUF,
441 DOMAINSET_PREF(dmat->common.domain), mflags);
442 KASSERT(*vaddr == NULL || ((uintptr_t)*vaddr & PAGE_MASK) +
443 dmat->common.maxsize <= PAGE_SIZE,
444 ("bounce_bus_dmamem_alloc: multi-page alloc %p maxsize "
445 "%#jx align %#jx", *vaddr, (uintmax_t)dmat->common.maxsize,
446 (uintmax_t)dmat->common.alignment));
447 } else if (dmat->common.nsegments >=
448 howmany(dmat->common.maxsize, MIN(dmat->common.maxsegsz,
449 PAGE_SIZE)) &&
450 dmat->common.alignment <= PAGE_SIZE &&
451 (dmat->common.boundary % PAGE_SIZE) == 0) {
452 /* Page-based multi-segment allocations allowed */
453 *vaddr = kmem_alloc_attr_domainset(
454 DOMAINSET_PREF(dmat->common.domain), dmat->common.maxsize,
455 mflags, 0ul, dmat->common.lowaddr, attr);
456 dmat->bounce_flags |= BUS_DMA_KMEM_ALLOC;
457 } else {
458 *vaddr = kmem_alloc_contig_domainset(
459 DOMAINSET_PREF(dmat->common.domain), dmat->common.maxsize,
460 mflags, 0ul, dmat->common.lowaddr,
461 dmat->common.alignment != 0 ? dmat->common.alignment : 1ul,
462 dmat->common.boundary, attr);
463 dmat->bounce_flags |= BUS_DMA_KMEM_ALLOC;
464 }
465 if (*vaddr == NULL) {
466 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
467 __func__, dmat, dmat->common.flags, ENOMEM);
468 return (ENOMEM);
469 } else if (!vm_addr_align_ok(vtophys(*vaddr), dmat->common.alignment)) {
470 printf("bus_dmamem_alloc failed to align memory properly.\n");
471 }
472 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
473 __func__, dmat, dmat->common.flags, 0);
474 return (0);
475 }
476
477 /*
478 * Free a piece of memory and its associated dmamap, that was allocated
479 * via bus_dmamem_alloc. Make the same choice for free/contigfree.
480 */
481 static void
482 bounce_bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
483 {
484 /*
485 * dmamem does not need to be bounced, so the map should be
486 * NULL and the BUS_DMA_KMEM_ALLOC flag cleared if malloc()
487 * was used and set if kmem_alloc_contig() was used.
488 */
489 if (map != NULL)
490 panic("bus_dmamem_free: Invalid map freed\n");
491 if ((dmat->bounce_flags & BUS_DMA_KMEM_ALLOC) == 0)
492 free(vaddr, M_DEVBUF);
493 else
494 kmem_free(vaddr, dmat->common.maxsize);
495 CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat,
496 dmat->bounce_flags);
497 }
498
499 static bool
500 _bus_dmamap_pagesneeded(bus_dma_tag_t dmat, vm_paddr_t buf, bus_size_t buflen,
501 int *pagesneeded)
502 {
503 vm_paddr_t curaddr;
504 bus_size_t sgsize;
505 int count;
506
507 /*
508 * Count the number of bounce pages needed in order to
509 * complete this transfer
510 */
511 count = 0;
512 curaddr = buf;
513 while (buflen != 0) {
514 sgsize = MIN(buflen, dmat->common.maxsegsz);
515 if (bus_dma_run_filter(&dmat->common, curaddr)) {
516 sgsize = MIN(sgsize,
517 PAGE_SIZE - (curaddr & PAGE_MASK));
518 if (pagesneeded == NULL)
519 return (true);
520 count++;
521 }
522 curaddr += sgsize;
523 buflen -= sgsize;
524 }
525
526 if (pagesneeded != NULL)
527 *pagesneeded = count;
528 return (count != 0);
529 }
530
531 static void
532 _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
533 bus_size_t buflen, int flags)
534 {
535
536 if (map != &nobounce_dmamap && map->pagesneeded == 0) {
537 _bus_dmamap_pagesneeded(dmat, buf, buflen, &map->pagesneeded);
538 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
539 }
540 }
541
542 static void
543 _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
544 void *buf, bus_size_t buflen, int flags)
545 {
546 vm_offset_t vaddr;
547 vm_offset_t vendaddr;
548 vm_paddr_t paddr;
549 bus_size_t sg_len;
550
551 if (map != &nobounce_dmamap && map->pagesneeded == 0) {
552 CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
553 "alignment= %d", dmat->common.lowaddr,
554 ptoa((vm_paddr_t)Maxmem),
555 dmat->common.boundary, dmat->common.alignment);
556 CTR3(KTR_BUSDMA, "map= %p, nobouncemap= %p, pagesneeded= %d",
557 map, &nobounce_dmamap, map->pagesneeded);
558 /*
559 * Count the number of bounce pages
560 * needed in order to complete this transfer
561 */
562 vaddr = (vm_offset_t)buf;
563 vendaddr = (vm_offset_t)buf + buflen;
564
565 while (vaddr < vendaddr) {
566 sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK);
567 if (pmap == kernel_pmap)
568 paddr = pmap_kextract(vaddr);
569 else
570 paddr = pmap_extract(pmap, vaddr);
571 if (bus_dma_run_filter(&dmat->common, paddr) != 0) {
572 sg_len = roundup2(sg_len,
573 dmat->common.alignment);
574 map->pagesneeded++;
575 }
576 vaddr += sg_len;
577 }
578 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
579 }
580 }
581
582 static void
583 _bus_dmamap_count_ma(bus_dma_tag_t dmat, bus_dmamap_t map, struct vm_page **ma,
584 int ma_offs, bus_size_t buflen, int flags)
585 {
586 bus_size_t sg_len, max_sgsize;
587 int page_index;
588 vm_paddr_t paddr;
589
590 if (map != &nobounce_dmamap && map->pagesneeded == 0) {
591 CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
592 "alignment= %d", dmat->common.lowaddr,
593 ptoa((vm_paddr_t)Maxmem),
594 dmat->common.boundary, dmat->common.alignment);
595 CTR3(KTR_BUSDMA, "map= %p, nobouncemap= %p, pagesneeded= %d",
596 map, &nobounce_dmamap, map->pagesneeded);
597
598 /*
599 * Count the number of bounce pages
600 * needed in order to complete this transfer
601 */
602 page_index = 0;
603 while (buflen > 0) {
604 paddr = VM_PAGE_TO_PHYS(ma[page_index]) + ma_offs;
605 sg_len = PAGE_SIZE - ma_offs;
606 max_sgsize = MIN(buflen, dmat->common.maxsegsz);
607 sg_len = MIN(sg_len, max_sgsize);
608 if (bus_dma_run_filter(&dmat->common, paddr) != 0) {
609 sg_len = roundup2(sg_len,
610 dmat->common.alignment);
611 sg_len = MIN(sg_len, max_sgsize);
612 KASSERT(vm_addr_align_ok(sg_len,
613 dmat->common.alignment),
614 ("Segment size is not aligned"));
615 map->pagesneeded++;
616 }
617 if (((ma_offs + sg_len) & ~PAGE_MASK) != 0)
618 page_index++;
619 ma_offs = (ma_offs + sg_len) & PAGE_MASK;
620 KASSERT(buflen >= sg_len,
621 ("Segment length overruns original buffer"));
622 buflen -= sg_len;
623 }
624 CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
625 }
626 }
627
628 /*
629 * Add a single contiguous physical range to the segment list.
630 */
631 static bus_size_t
632 _bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t curaddr,
633 bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
634 {
635 int seg;
636
637 KASSERT(curaddr <= BUS_SPACE_MAXADDR,
638 ("ds_addr %#jx > BUS_SPACE_MAXADDR %#jx; dmat %p fl %#x low %#jx "
639 "hi %#jx",
640 (uintmax_t)curaddr, (uintmax_t)BUS_SPACE_MAXADDR,
641 dmat, dmat->bounce_flags, (uintmax_t)dmat->common.lowaddr,
642 (uintmax_t)dmat->common.highaddr));
643
644 /*
645 * Make sure we don't cross any boundaries.
646 */
647 if (!vm_addr_bound_ok(curaddr, sgsize, dmat->common.boundary))
648 sgsize = roundup2(curaddr, dmat->common.boundary) - curaddr;
649
650 /*
651 * Insert chunk into a segment, coalescing with
652 * previous segment if possible.
653 */
654 seg = *segp;
655 if (seg == -1) {
656 seg = 0;
657 segs[seg].ds_addr = curaddr;
658 segs[seg].ds_len = sgsize;
659 } else {
660 if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
661 (segs[seg].ds_len + sgsize) <= dmat->common.maxsegsz &&
662 vm_addr_bound_ok(segs[seg].ds_addr,
663 segs[seg].ds_len + sgsize, dmat->common.boundary))
664 segs[seg].ds_len += sgsize;
665 else {
666 if (++seg >= dmat->common.nsegments)
667 return (0);
668 segs[seg].ds_addr = curaddr;
669 segs[seg].ds_len = sgsize;
670 }
671 }
672 *segp = seg;
673 return (sgsize);
674 }
675
676 /*
677 * Utility function to load a physical buffer. segp contains
678 * the starting segment on entrace, and the ending segment on exit.
679 */
680 static int
681 bounce_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
682 vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs,
683 int *segp)
684 {
685 bus_size_t sgsize;
686 vm_paddr_t curaddr;
687 int error;
688
689 if (map == NULL)
690 map = &nobounce_dmamap;
691
692 if (segs == NULL)
693 segs = dmat->segments;
694
695 if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) {
696 _bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
697 if (map->pagesneeded != 0) {
698 error = _bus_dmamap_reserve_pages(dmat, map, flags);
699 if (error)
700 return (error);
701 }
702 }
703
704 while (buflen > 0) {
705 curaddr = buf;
706 sgsize = MIN(buflen, dmat->common.maxsegsz);
707 if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0 &&
708 map->pagesneeded != 0 &&
709 bus_dma_run_filter(&dmat->common, curaddr)) {
710 sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK));
711 curaddr = add_bounce_page(dmat, map, 0, curaddr, 0,
712 sgsize);
713 }
714 sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
715 segp);
716 if (sgsize == 0)
717 break;
718 buf += sgsize;
719 buflen -= sgsize;
720 }
721
722 /*
723 * Did we fit?
724 */
725 return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
726 }
727
728 /*
729 * Utility function to load a linear buffer. segp contains
730 * the starting segment on entrace, and the ending segment on exit.
731 */
732 static int
733 bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
734 bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs,
735 int *segp)
736 {
737 bus_size_t sgsize, max_sgsize;
738 vm_paddr_t curaddr;
739 vm_offset_t kvaddr, vaddr;
740 int error;
741
742 if (map == NULL)
743 map = &nobounce_dmamap;
744
745 if (segs == NULL)
746 segs = dmat->segments;
747
748 if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) {
749 _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
750 if (map->pagesneeded != 0) {
751 error = _bus_dmamap_reserve_pages(dmat, map, flags);
752 if (error)
753 return (error);
754 }
755 }
756
757 vaddr = (vm_offset_t)buf;
758 while (buflen > 0) {
759 /*
760 * Get the physical address for this segment.
761 */
762 if (pmap == kernel_pmap) {
763 curaddr = pmap_kextract(vaddr);
764 kvaddr = vaddr;
765 } else {
766 curaddr = pmap_extract(pmap, vaddr);
767 kvaddr = 0;
768 }
769
770 /*
771 * Compute the segment size, and adjust counts.
772 */
773 max_sgsize = MIN(buflen, dmat->common.maxsegsz);
774 sgsize = PAGE_SIZE - (curaddr & PAGE_MASK);
775 if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0 &&
776 map->pagesneeded != 0 &&
777 bus_dma_run_filter(&dmat->common, curaddr)) {
778 sgsize = roundup2(sgsize, dmat->common.alignment);
779 sgsize = MIN(sgsize, max_sgsize);
780 curaddr = add_bounce_page(dmat, map, kvaddr, curaddr, 0,
781 sgsize);
782 } else {
783 sgsize = MIN(sgsize, max_sgsize);
784 }
785 sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
786 segp);
787 if (sgsize == 0)
788 break;
789 vaddr += sgsize;
790 buflen -= sgsize;
791 }
792
793 /*
794 * Did we fit?
795 */
796 return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
797 }
798
799 static int
800 bounce_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map,
801 struct vm_page **ma, bus_size_t buflen, int ma_offs, int flags,
802 bus_dma_segment_t *segs, int *segp)
803 {
804 vm_paddr_t paddr, next_paddr;
805 int error, page_index;
806 bus_size_t sgsize, max_sgsize;
807
808 if (dmat->common.flags & BUS_DMA_KEEP_PG_OFFSET) {
809 /*
810 * If we have to keep the offset of each page this function
811 * is not suitable, switch back to bus_dmamap_load_ma_triv
812 * which is going to do the right thing in this case.
813 */
814 error = bus_dmamap_load_ma_triv(dmat, map, ma, buflen, ma_offs,
815 flags, segs, segp);
816 return (error);
817 }
818
819 if (map == NULL)
820 map = &nobounce_dmamap;
821
822 if (segs == NULL)
823 segs = dmat->segments;
824
825 if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0) {
826 _bus_dmamap_count_ma(dmat, map, ma, ma_offs, buflen, flags);
827 if (map->pagesneeded != 0) {
828 error = _bus_dmamap_reserve_pages(dmat, map, flags);
829 if (error)
830 return (error);
831 }
832 }
833
834 page_index = 0;
835 while (buflen > 0) {
836 /*
837 * Compute the segment size, and adjust counts.
838 */
839 paddr = VM_PAGE_TO_PHYS(ma[page_index]) + ma_offs;
840 max_sgsize = MIN(buflen, dmat->common.maxsegsz);
841 sgsize = PAGE_SIZE - ma_offs;
842 if ((dmat->bounce_flags & BUS_DMA_COULD_BOUNCE) != 0 &&
843 map->pagesneeded != 0 &&
844 bus_dma_run_filter(&dmat->common, paddr)) {
845 sgsize = roundup2(sgsize, dmat->common.alignment);
846 sgsize = MIN(sgsize, max_sgsize);
847 KASSERT(vm_addr_align_ok(sgsize,
848 dmat->common.alignment),
849 ("Segment size is not aligned"));
850 /*
851 * Check if two pages of the user provided buffer
852 * are used.
853 */
854 if ((ma_offs + sgsize) > PAGE_SIZE)
855 next_paddr =
856 VM_PAGE_TO_PHYS(ma[page_index + 1]);
857 else
858 next_paddr = 0;
859 paddr = add_bounce_page(dmat, map, 0, paddr,
860 next_paddr, sgsize);
861 } else {
862 sgsize = MIN(sgsize, max_sgsize);
863 }
864 sgsize = _bus_dmamap_addseg(dmat, map, paddr, sgsize, segs,
865 segp);
866 if (sgsize == 0)
867 break;
868 KASSERT(buflen >= sgsize,
869 ("Segment length overruns original buffer"));
870 buflen -= sgsize;
871 if (((ma_offs + sgsize) & ~PAGE_MASK) != 0)
872 page_index++;
873 ma_offs = (ma_offs + sgsize) & PAGE_MASK;
874 }
875
876 /*
877 * Did we fit?
878 */
879 return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
880 }
881
882 static void
883 bounce_bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
884 struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg)
885 {
886
887 if (map == NULL)
888 return;
889 map->mem = *mem;
890 map->dmat = dmat;
891 map->callback = callback;
892 map->callback_arg = callback_arg;
893 }
894
895 static bus_dma_segment_t *
896 bounce_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
897 bus_dma_segment_t *segs, int nsegs, int error)
898 {
899
900 if (segs == NULL)
901 segs = dmat->segments;
902 return (segs);
903 }
904
905 /*
906 * Release the mapping held by map.
907 */
908 static void
909 bounce_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
910 {
911 if (map == NULL)
912 return;
913
914 free_bounce_pages(dmat, map);
915 }
916
917 static void
918 bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
919 bus_dmasync_op_t op)
920 {
921 struct bounce_page *bpage;
922 vm_offset_t datavaddr, tempvaddr;
923 bus_size_t datacount1, datacount2;
924
925 if (map == NULL)
926 goto out;
927 kmsan_bus_dmamap_sync(&map->kmsan_mem, op);
928 if ((bpage = STAILQ_FIRST(&map->bpages)) == NULL)
929 goto out;
930
931 /*
932 * Handle data bouncing. We might also want to add support for
933 * invalidating the caches on broken hardware.
934 */
935 CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x "
936 "performing bounce", __func__, dmat, dmat->common.flags, op);
937
938 if ((op & BUS_DMASYNC_PREWRITE) != 0) {
939 while (bpage != NULL) {
940 tempvaddr = 0;
941 datavaddr = bpage->datavaddr;
942 datacount1 = bpage->datacount;
943 if (datavaddr == 0) {
944 tempvaddr =
945 pmap_quick_enter_page(bpage->datapage[0]);
946 datavaddr = tempvaddr | bpage->dataoffs;
947 datacount1 = min(PAGE_SIZE - bpage->dataoffs,
948 datacount1);
949 }
950
951 bcopy((void *)datavaddr,
952 (void *)bpage->vaddr, datacount1);
953
954 if (tempvaddr != 0)
955 pmap_quick_remove_page(tempvaddr);
956
957 if (bpage->datapage[1] == 0) {
958 KASSERT(datacount1 == bpage->datacount,
959 ("Mismatch between data size and provided memory space"));
960 goto next_w;
961 }
962
963 /*
964 * We are dealing with an unmapped buffer that expands
965 * over two pages.
966 */
967 datavaddr = pmap_quick_enter_page(bpage->datapage[1]);
968 datacount2 = bpage->datacount - datacount1;
969 bcopy((void *)datavaddr,
970 (void *)(bpage->vaddr + datacount1), datacount2);
971 pmap_quick_remove_page(datavaddr);
972
973 next_w:
974 bpage = STAILQ_NEXT(bpage, links);
975 }
976 dmat->bounce_zone->total_bounced++;
977 }
978
979 if ((op & BUS_DMASYNC_POSTREAD) != 0) {
980 while (bpage != NULL) {
981 tempvaddr = 0;
982 datavaddr = bpage->datavaddr;
983 datacount1 = bpage->datacount;
984 if (datavaddr == 0) {
985 tempvaddr =
986 pmap_quick_enter_page(bpage->datapage[0]);
987 datavaddr = tempvaddr | bpage->dataoffs;
988 datacount1 = min(PAGE_SIZE - bpage->dataoffs,
989 datacount1);
990 }
991
992 bcopy((void *)bpage->vaddr, (void *)datavaddr,
993 datacount1);
994
995 if (tempvaddr != 0)
996 pmap_quick_remove_page(tempvaddr);
997
998 if (bpage->datapage[1] == 0) {
999 KASSERT(datacount1 == bpage->datacount,
1000 ("Mismatch between data size and provided memory space"));
1001 goto next_r;
1002 }
1003
1004 /*
1005 * We are dealing with an unmapped buffer that expands
1006 * over two pages.
1007 */
1008 datavaddr = pmap_quick_enter_page(bpage->datapage[1]);
1009 datacount2 = bpage->datacount - datacount1;
1010 bcopy((void *)(bpage->vaddr + datacount1),
1011 (void *)datavaddr, datacount2);
1012 pmap_quick_remove_page(datavaddr);
1013
1014 next_r:
1015 bpage = STAILQ_NEXT(bpage, links);
1016 }
1017 dmat->bounce_zone->total_bounced++;
1018 }
1019 out:
1020 atomic_thread_fence_rel();
1021 }
1022
1023 #ifdef KMSAN
1024 static void
1025 bounce_bus_dmamap_load_kmsan(bus_dmamap_t map, struct memdesc *mem)
1026 {
1027 if (map == NULL)
1028 return;
1029 memcpy(&map->kmsan_mem, mem, sizeof(map->kmsan_mem));
1030 }
1031 #endif
1032
1033 struct bus_dma_impl bus_dma_bounce_impl = {
1034 .tag_create = bounce_bus_dma_tag_create,
1035 .tag_destroy = bounce_bus_dma_tag_destroy,
1036 .tag_set_domain = bounce_bus_dma_tag_set_domain,
1037 .id_mapped = bounce_bus_dma_id_mapped,
1038 .map_create = bounce_bus_dmamap_create,
1039 .map_destroy = bounce_bus_dmamap_destroy,
1040 .mem_alloc = bounce_bus_dmamem_alloc,
1041 .mem_free = bounce_bus_dmamem_free,
1042 .load_phys = bounce_bus_dmamap_load_phys,
1043 .load_buffer = bounce_bus_dmamap_load_buffer,
1044 .load_ma = bounce_bus_dmamap_load_ma,
1045 .map_waitok = bounce_bus_dmamap_waitok,
1046 .map_complete = bounce_bus_dmamap_complete,
1047 .map_unload = bounce_bus_dmamap_unload,
1048 .map_sync = bounce_bus_dmamap_sync,
1049 #ifdef KMSAN
1050 .load_kmsan = bounce_bus_dmamap_load_kmsan,
1051 #endif
1052 };
Cache object: 59ef57f6867c577b45bbabd5d101cbf5
|