1 /*
2 * Copyright (c) 2002 Peter Grehan
3 * Copyright (c) 1997, 1998 Justin T. Gibbs.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions, and the following disclaimer,
11 * without modification, immediately at the beginning of the file.
12 * 2. The name of the author may not be used to endorse or promote products
13 * derived from this software without specific prior written permission.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
19 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred
28 */
29
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD: releng/5.1/sys/powerpc/powerpc/busdma_machdep.c 115343 2003-05-27 04:59:59Z scottl $");
32
33 /*
34 * MacPPC bus dma support routines
35 */
36
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/malloc.h>
40 #include <sys/bus.h>
41 #include <sys/interrupt.h>
42 #include <sys/lock.h>
43 #include <sys/proc.h>
44 #include <sys/mutex.h>
45 #include <sys/mbuf.h>
46 #include <sys/uio.h>
47
48 #include <vm/vm.h>
49 #include <vm/vm_page.h>
50 #include <vm/vm_map.h>
51
52 #include <machine/atomic.h>
53 #include <machine/bus.h>
54 #include <machine/cpufunc.h>
55
56 struct bus_dma_tag {
57 bus_dma_tag_t parent;
58 bus_size_t alignment;
59 bus_size_t boundary;
60 bus_addr_t lowaddr;
61 bus_addr_t highaddr;
62 bus_dma_filter_t *filter;
63 void *filterarg;
64 bus_size_t maxsize;
65 u_int nsegments;
66 bus_size_t maxsegsz;
67 int flags;
68 int ref_count;
69 int map_count;
70 };
71
72 struct bus_dmamap {
73 bus_dma_tag_t dmat;
74 void *buf; /* unmapped buffer pointer */
75 bus_size_t buflen; /* unmapped buffer length */
76 bus_dmamap_callback_t *callback;
77 void *callback_arg;
78 };
79
80 /*
81 * Allocate a device specific dma_tag.
82 */
83 int
84 bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
85 bus_size_t boundary, bus_addr_t lowaddr,
86 bus_addr_t highaddr, bus_dma_filter_t *filter,
87 void *filterarg, bus_size_t maxsize, int nsegments,
88 bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat)
89 {
90 bus_dma_tag_t newtag;
91 int error = 0;
92
93 /* Return a NULL tag on failure */
94 *dmat = NULL;
95
96 newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
97 if (newtag == NULL)
98 return (ENOMEM);
99
100 newtag->parent = parent;
101 newtag->alignment = alignment;
102 newtag->boundary = boundary;
103 newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1);
104 newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1);
105 newtag->filter = filter;
106 newtag->filterarg = filterarg;
107 newtag->maxsize = maxsize;
108 newtag->nsegments = nsegments;
109 newtag->maxsegsz = maxsegsz;
110 newtag->flags = flags;
111 newtag->ref_count = 1; /* Count ourself */
112 newtag->map_count = 0;
113
114 /*
115 * Take into account any restrictions imposed by our parent tag
116 */
117 if (parent != NULL) {
118 newtag->lowaddr = min(parent->lowaddr, newtag->lowaddr);
119 newtag->highaddr = max(parent->highaddr, newtag->highaddr);
120
121 /*
122 * XXX Not really correct??? Probably need to honor boundary
123 * all the way up the inheritence chain.
124 */
125 newtag->boundary = max(parent->boundary, newtag->boundary);
126 if (newtag->filter == NULL) {
127 /*
128 * Short circuit looking at our parent directly
129 * since we have encapsulated all of its information
130 */
131 newtag->filter = parent->filter;
132 newtag->filterarg = parent->filterarg;
133 newtag->parent = parent->parent;
134 }
135 if (newtag->parent != NULL)
136 atomic_add_int(&parent->ref_count, 1);
137 }
138
139 *dmat = newtag;
140 return (error);
141 }
142
143 int
144 bus_dma_tag_destroy(bus_dma_tag_t dmat)
145 {
146 if (dmat != NULL) {
147
148 if (dmat->map_count != 0)
149 return (EBUSY);
150
151 while (dmat != NULL) {
152 bus_dma_tag_t parent;
153
154 parent = dmat->parent;
155 atomic_subtract_int(&dmat->ref_count, 1);
156 if (dmat->ref_count == 0) {
157 free(dmat, M_DEVBUF);
158 /*
159 * Last reference count, so
160 * release our reference
161 * count on our parent.
162 */
163 dmat = parent;
164 } else
165 dmat = NULL;
166 }
167 }
168 return (0);
169 }
170
171 /*
172 * Allocate a handle for mapping from kva/uva/physical
173 * address space into bus device space.
174 */
175 int
176 bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
177 {
178 *mapp = NULL;
179 dmat->map_count++;
180
181 return (0);
182 }
183
184 /*
185 * Destroy a handle for mapping from kva/uva/physical
186 * address space into bus device space.
187 */
188 int
189 bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
190 {
191 if (map != NULL) {
192 panic("dmamap_destroy: NULL?\n");
193 }
194 dmat->map_count--;
195 return (0);
196 }
197
198 /*
199 * Allocate a piece of memory that can be efficiently mapped into
200 * bus device space based on the constraints lited in the dma tag.
201 * A dmamap to for use with dmamap_load is also allocated.
202 */
203 int
204 bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
205 bus_dmamap_t *mapp)
206 {
207 *mapp = NULL;
208
209 if (dmat->maxsize <= PAGE_SIZE) {
210 *vaddr = malloc(dmat->maxsize, M_DEVBUF,
211 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK);
212 } else {
213 /*
214 * XXX Use Contigmalloc until it is merged into this facility
215 * and handles multi-seg allocations. Nobody is doing
216 * multi-seg allocations yet though.
217 */
218 mtx_lock(&Giant);
219 *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF,
220 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK,
221 0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
222 dmat->boundary);
223 mtx_unlock(&Giant);
224 }
225
226 if (*vaddr == NULL)
227 return (ENOMEM);
228
229 return (0);
230 }
231
232 /*
233 * Free a piece of memory and it's allocated dmamap, that was allocated
234 * via bus_dmamem_alloc. Make the same choice for free/contigfree.
235 */
236 void
237 bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
238 {
239 if (map != NULL)
240 panic("bus_dmamem_free: Invalid map freed\n");
241 if (dmat->maxsize <= PAGE_SIZE)
242 free(vaddr, M_DEVBUF);
243 else {
244 mtx_lock(&Giant);
245 contigfree(vaddr, dmat->maxsize, M_DEVBUF);
246 mtx_unlock(&Giant);
247 }
248 }
249
250 /*
251 * Map the buffer buf into bus space using the dmamap map.
252 */
253 int
254 bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
255 bus_size_t buflen, bus_dmamap_callback_t *callback,
256 void *callback_arg, int flags)
257 {
258 vm_offset_t vaddr;
259 vm_offset_t paddr;
260 #ifdef __GNUC__
261 bus_dma_segment_t dm_segments[dmat->nsegments];
262 #else
263 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
264 #endif
265 bus_dma_segment_t *sg;
266 int seg;
267 int error = 0;
268 vm_offset_t nextpaddr;
269
270 if (map != NULL)
271 panic("bus_dmamap_load: Invalid map\n");
272
273 vaddr = (vm_offset_t)buf;
274 sg = &dm_segments[0];
275 seg = 1;
276 sg->ds_len = 0;
277 nextpaddr = 0;
278
279 do {
280 bus_size_t size;
281
282 paddr = pmap_kextract(vaddr);
283 size = PAGE_SIZE - (paddr & PAGE_MASK);
284 if (size > buflen)
285 size = buflen;
286
287 if (sg->ds_len == 0) {
288 sg->ds_addr = paddr;
289 sg->ds_len = size;
290 } else if (paddr == nextpaddr) {
291 sg->ds_len += size;
292 } else {
293 /* Go to the next segment */
294 sg++;
295 seg++;
296 if (seg > dmat->nsegments)
297 break;
298 sg->ds_addr = paddr;
299 sg->ds_len = size;
300 }
301 vaddr += size;
302 nextpaddr = paddr + size;
303 buflen -= size;
304
305 } while (buflen > 0);
306
307 if (buflen != 0) {
308 printf("bus_dmamap_load: Too many segs! buf_len = 0x%lx\n",
309 (u_long)buflen);
310 error = EFBIG;
311 }
312
313 (*callback)(callback_arg, dm_segments, seg, error);
314
315 return (0);
316 }
317
318 /*
319 * Utility function to load a linear buffer. lastaddrp holds state
320 * between invocations (for multiple-buffer loads). segp contains
321 * the starting segment on entrance, and the ending segment on exit.
322 * first indicates if this is the first invocation of this function.
323 */
324 static int
325 bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t segs[],
326 void *buf, bus_size_t buflen, struct thread *td,
327 int flags, vm_offset_t *lastaddrp, int *segp,
328 int first)
329 {
330 bus_size_t sgsize;
331 bus_addr_t curaddr, lastaddr, baddr, bmask;
332 vm_offset_t vaddr = (vm_offset_t)buf;
333 int seg;
334 pmap_t pmap;
335
336 if (td != NULL)
337 pmap = vmspace_pmap(td->td_proc->p_vmspace);
338 else
339 pmap = NULL;
340
341 lastaddr = *lastaddrp;
342 bmask = ~(dmat->boundary - 1);
343
344 for (seg = *segp; buflen > 0 ; ) {
345 /*
346 * Get the physical address for this segment.
347 */
348 if (pmap)
349 curaddr = pmap_extract(pmap, vaddr);
350 else
351 curaddr = pmap_kextract(vaddr);
352
353 /*
354 * Compute the segment size, and adjust counts.
355 */
356 sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
357 if (buflen < sgsize)
358 sgsize = buflen;
359
360 /*
361 * Make sure we don't cross any boundaries.
362 */
363 if (dmat->boundary > 0) {
364 baddr = (curaddr + dmat->boundary) & bmask;
365 if (sgsize > (baddr - curaddr))
366 sgsize = (baddr - curaddr);
367 }
368
369 /*
370 * Insert chunk into a segment, coalescing with
371 * the previous segment if possible.
372 */
373 if (first) {
374 segs[seg].ds_addr = curaddr;
375 segs[seg].ds_len = sgsize;
376 first = 0;
377 } else {
378 if (curaddr == lastaddr &&
379 (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
380 (dmat->boundary == 0 ||
381 (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
382 segs[seg].ds_len += sgsize;
383 else {
384 if (++seg >= dmat->nsegments)
385 break;
386 segs[seg].ds_addr = curaddr;
387 segs[seg].ds_len = sgsize;
388 }
389 }
390
391 lastaddr = curaddr + sgsize;
392 vaddr += sgsize;
393 buflen -= sgsize;
394 }
395
396 *segp = seg;
397 *lastaddrp = lastaddr;
398
399 /*
400 * Did we fit?
401 */
402 return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
403 }
404
405 /*
406 * Like bus_dmamap_load(), but for mbufs.
407 */
408 int
409 bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
410 bus_dmamap_callback2_t *callback, void *callback_arg,
411 int flags)
412 {
413 #ifdef __GNUC__
414 bus_dma_segment_t dm_segments[dmat->nsegments];
415 #else
416 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
417 #endif
418 int nsegs = 0, error = 0;
419
420 M_ASSERTPKTHDR(m0);
421
422 if (m0->m_pkthdr.len <= dmat->maxsize) {
423 int first = 1;
424 vm_offset_t lastaddr = 0;
425 struct mbuf *m;
426
427 for (m = m0; m != NULL && error == 0; m = m->m_next) {
428 if (m->m_len > 0) {
429 error = bus_dmamap_load_buffer(dmat,
430 dm_segments, m->m_data, m->m_len, NULL,
431 flags, &lastaddr, &nsegs, first);
432 first = 0;
433 }
434 }
435 } else {
436 error = EINVAL;
437 }
438
439 if (error) {
440 /*
441 * force "no valid mappings" on error in callback.
442 */
443 (*callback)(callback_arg, dm_segments, 0, 0, error);
444 } else {
445 (*callback)(callback_arg, dm_segments, nsegs+1,
446 m0->m_pkthdr.len, error);
447 }
448 return (error);
449 }
450
451 /*
452 * Like bus_dmamap_load(), but for uios.
453 */
454 int
455 bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
456 bus_dmamap_callback2_t *callback, void *callback_arg,
457 int flags)
458 {
459 vm_offset_t lastaddr;
460 #ifdef __GNUC__
461 bus_dma_segment_t dm_segments[dmat->nsegments];
462 #else
463 bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
464 #endif
465 int nsegs, i, error, first;
466 bus_size_t resid;
467 struct iovec *iov;
468 struct thread *td = NULL;
469
470 resid = uio->uio_resid;
471 iov = uio->uio_iov;
472
473 if (uio->uio_segflg == UIO_USERSPACE) {
474 td = uio->uio_td;
475 KASSERT(td != NULL,
476 ("bus_dmamap_load_uio: USERSPACE but no proc"));
477 }
478
479 first = 1;
480 nsegs = error = 0;
481 for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
482 /*
483 * Now at the first iovec to load. Load each iovec
484 * until we have exhausted the residual count.
485 */
486 bus_size_t minlen =
487 resid < iov[i].iov_len ? resid : iov[i].iov_len;
488 caddr_t addr = (caddr_t) iov[i].iov_base;
489
490 if (minlen > 0) {
491 error = bus_dmamap_load_buffer(dmat, dm_segments, addr,
492 minlen, td, flags, &lastaddr, &nsegs, first);
493
494 first = 0;
495
496 resid -= minlen;
497 }
498 }
499
500 if (error) {
501 /*
502 * force "no valid mappings" on error in callback.
503 */
504 (*callback)(callback_arg, dm_segments, 0, 0, error);
505 } else {
506 (*callback)(callback_arg, dm_segments, nsegs+1,
507 uio->uio_resid, error);
508 }
509
510 return (error);
511 }
512
513 /*
514 * Release the mapping held by map. A no-op on PowerPC.
515 */
516 void
517 bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
518 {
519
520 return;
521 }
522
523 void
524 bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
525 {
526
527 return;
528 }
Cache object: c479af31a55f6631f999d3a75ba43f61
|