1 /*-
2 * Copyright (c) 1997, 1998 Justin T. Gibbs.
3 * Copyright (c) 2013 The FreeBSD Foundation
4 * All rights reserved.
5 *
6 * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
7 * under sponsorship from the FreeBSD Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions, and the following disclaimer,
14 * without modification, immediately at the beginning of the file.
15 * 2. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
22 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/malloc.h>
37 #include <sys/bus.h>
38 #include <sys/kernel.h>
39 #include <sys/ktr.h>
40 #include <sys/lock.h>
41 #include <sys/memdesc.h>
42 #include <sys/mutex.h>
43 #include <sys/uio.h>
44 #include <vm/vm.h>
45 #include <vm/vm_extern.h>
46 #include <vm/pmap.h>
47 #include <machine/bus.h>
48 #include <x86/include/busdma_impl.h>
49
50 /*
51 * Convenience function for manipulating driver locks from busdma (during
52 * busdma_swi, for example). Drivers that don't provide their own locks
53 * should specify &Giant to dmat->lockfuncarg. Drivers that use their own
54 * non-mutex locking scheme don't have to use this at all.
55 */
56 void
57 busdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
58 {
59 struct mtx *dmtx;
60
61 dmtx = (struct mtx *)arg;
62 switch (op) {
63 case BUS_DMA_LOCK:
64 mtx_lock(dmtx);
65 break;
66 case BUS_DMA_UNLOCK:
67 mtx_unlock(dmtx);
68 break;
69 default:
70 panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
71 }
72 }
73
74 /*
75 * dflt_lock should never get called. It gets put into the dma tag when
76 * lockfunc == NULL, which is only valid if the maps that are associated
77 * with the tag are meant to never be defered.
78 * XXX Should have a way to identify which driver is responsible here.
79 */
80 void
81 bus_dma_dflt_lock(void *arg, bus_dma_lock_op_t op)
82 {
83
84 panic("driver error: busdma dflt_lock called");
85 }
86
87 /*
88 * Return true if a match is made.
89 *
90 * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'.
91 *
92 * If paddr is within the bounds of the dma tag then call the filter callback
93 * to check for a match, if there is no filter callback then assume a match.
94 */
95 int
96 bus_dma_run_filter(struct bus_dma_tag_common *tc, bus_addr_t paddr)
97 {
98 int retval;
99
100 retval = 0;
101 do {
102 if (((paddr > tc->lowaddr && paddr <= tc->highaddr) ||
103 ((paddr & (tc->alignment - 1)) != 0)) &&
104 (tc->filter == NULL ||
105 (*tc->filter)(tc->filterarg, paddr) != 0))
106 retval = 1;
107
108 tc = tc->parent;
109 } while (retval == 0 && tc != NULL);
110 return (retval);
111 }
112
113 int
114 common_bus_dma_tag_create(struct bus_dma_tag_common *parent,
115 bus_size_t alignment, bus_addr_t boundary, bus_addr_t lowaddr,
116 bus_addr_t highaddr, bus_dma_filter_t *filter, void *filterarg,
117 bus_size_t maxsize, int nsegments, bus_size_t maxsegsz, int flags,
118 bus_dma_lock_t *lockfunc, void *lockfuncarg, size_t sz, void **dmat)
119 {
120 void *newtag;
121 struct bus_dma_tag_common *common;
122
123 KASSERT(sz >= sizeof(struct bus_dma_tag_common), ("sz"));
124 /* Basic sanity checking */
125 if (boundary != 0 && boundary < maxsegsz)
126 maxsegsz = boundary;
127 if (maxsegsz == 0)
128 return (EINVAL);
129 /* Return a NULL tag on failure */
130 *dmat = NULL;
131
132 newtag = malloc(sz, M_DEVBUF, M_ZERO | M_NOWAIT);
133 if (newtag == NULL) {
134 CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
135 __func__, newtag, 0, ENOMEM);
136 return (ENOMEM);
137 }
138
139 common = newtag;
140 common->impl = &bus_dma_bounce_impl;
141 common->parent = parent;
142 common->alignment = alignment;
143 common->boundary = boundary;
144 common->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1);
145 common->highaddr = trunc_page((vm_paddr_t)highaddr) + (PAGE_SIZE - 1);
146 common->filter = filter;
147 common->filterarg = filterarg;
148 common->maxsize = maxsize;
149 common->nsegments = nsegments;
150 common->maxsegsz = maxsegsz;
151 common->flags = flags;
152 common->ref_count = 1; /* Count ourself */
153 if (lockfunc != NULL) {
154 common->lockfunc = lockfunc;
155 common->lockfuncarg = lockfuncarg;
156 } else {
157 common->lockfunc = bus_dma_dflt_lock;
158 common->lockfuncarg = NULL;
159 }
160
161 /* Take into account any restrictions imposed by our parent tag */
162 if (parent != NULL) {
163 common->impl = parent->impl;
164 common->lowaddr = MIN(parent->lowaddr, common->lowaddr);
165 common->highaddr = MAX(parent->highaddr, common->highaddr);
166 if (common->boundary == 0)
167 common->boundary = parent->boundary;
168 else if (parent->boundary != 0) {
169 common->boundary = MIN(parent->boundary,
170 common->boundary);
171 }
172 if (common->filter == NULL) {
173 /*
174 * Short circuit looking at our parent directly
175 * since we have encapsulated all of its information
176 */
177 common->filter = parent->filter;
178 common->filterarg = parent->filterarg;
179 common->parent = parent->parent;
180 }
181 atomic_add_int(&parent->ref_count, 1);
182 }
183 *dmat = common;
184 return (0);
185 }
186
187 /*
188 * Allocate a device specific dma_tag.
189 */
190 int
191 bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
192 bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr,
193 bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize,
194 int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
195 void *lockfuncarg, bus_dma_tag_t *dmat)
196 {
197 struct bus_dma_tag_common *tc;
198 int error;
199
200 if (parent == NULL) {
201 error = bus_dma_bounce_impl.tag_create(parent, alignment,
202 boundary, lowaddr, highaddr, filter, filterarg, maxsize,
203 nsegments, maxsegsz, flags, lockfunc, lockfuncarg, dmat);
204 } else {
205 tc = (struct bus_dma_tag_common *)parent;
206 error = tc->impl->tag_create(parent, alignment,
207 boundary, lowaddr, highaddr, filter, filterarg, maxsize,
208 nsegments, maxsegsz, flags, lockfunc, lockfuncarg, dmat);
209 }
210 return (error);
211 }
212
213 int
214 bus_dma_tag_destroy(bus_dma_tag_t dmat)
215 {
216 struct bus_dma_tag_common *tc;
217
218 tc = (struct bus_dma_tag_common *)dmat;
219 return (tc->impl->tag_destroy(dmat));
220 }
221
222 /*
223 * Allocate a handle for mapping from kva/uva/physical
224 * address space into bus device space.
225 */
226 int
227 bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
228 {
229 struct bus_dma_tag_common *tc;
230
231 tc = (struct bus_dma_tag_common *)dmat;
232 return (tc->impl->map_create(dmat, flags, mapp));
233 }
234
235 /*
236 * Destroy a handle for mapping from kva/uva/physical
237 * address space into bus device space.
238 */
239 int
240 bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
241 {
242 struct bus_dma_tag_common *tc;
243
244 tc = (struct bus_dma_tag_common *)dmat;
245 return (tc->impl->map_destroy(dmat, map));
246 }
247
248
249 /*
250 * Allocate a piece of memory that can be efficiently mapped into
251 * bus device space based on the constraints lited in the dma tag.
252 * A dmamap to for use with dmamap_load is also allocated.
253 */
254 int
255 bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
256 bus_dmamap_t *mapp)
257 {
258 struct bus_dma_tag_common *tc;
259
260 tc = (struct bus_dma_tag_common *)dmat;
261 return (tc->impl->mem_alloc(dmat, vaddr, flags, mapp));
262 }
263
264 /*
265 * Free a piece of memory and it's allociated dmamap, that was allocated
266 * via bus_dmamem_alloc. Make the same choice for free/contigfree.
267 */
268 void
269 bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
270 {
271 struct bus_dma_tag_common *tc;
272
273 tc = (struct bus_dma_tag_common *)dmat;
274 tc->impl->mem_free(dmat, vaddr, map);
275 }
276
277 /*
278 * Utility function to load a physical buffer. segp contains
279 * the starting segment on entrace, and the ending segment on exit.
280 */
281 int
282 _bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
283 bus_size_t buflen, int flags, bus_dma_segment_t *segs, int *segp)
284 {
285 struct bus_dma_tag_common *tc;
286
287 tc = (struct bus_dma_tag_common *)dmat;
288 return (tc->impl->load_phys(dmat, map, buf, buflen, flags, segs,
289 segp));
290 }
291
292 int
293 _bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map, struct vm_page **ma,
294 bus_size_t tlen, int ma_offs, int flags, bus_dma_segment_t *segs,
295 int *segp)
296 {
297 struct bus_dma_tag_common *tc;
298
299 tc = (struct bus_dma_tag_common *)dmat;
300 return (tc->impl->load_ma(dmat, map, ma, tlen, ma_offs, flags,
301 segs, segp));
302 }
303
304 /*
305 * Utility function to load a linear buffer. segp contains
306 * the starting segment on entrace, and the ending segment on exit.
307 */
308 int
309 _bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
310 bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs,
311 int *segp)
312 {
313 struct bus_dma_tag_common *tc;
314
315 tc = (struct bus_dma_tag_common *)dmat;
316 return (tc->impl->load_buffer(dmat, map, buf, buflen, pmap, flags, segs,
317 segp));
318 }
319
320 void
321 __bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
322 struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg)
323 {
324 struct bus_dma_tag_common *tc;
325
326 tc = (struct bus_dma_tag_common *)dmat;
327 tc->impl->map_waitok(dmat, map, mem, callback, callback_arg);
328 }
329
330 bus_dma_segment_t *
331 _bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
332 bus_dma_segment_t *segs, int nsegs, int error)
333 {
334 struct bus_dma_tag_common *tc;
335
336 tc = (struct bus_dma_tag_common *)dmat;
337 return (tc->impl->map_complete(dmat, map, segs, nsegs, error));
338 }
339
340 /*
341 * Release the mapping held by map.
342 */
343 void
344 _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
345 {
346 struct bus_dma_tag_common *tc;
347
348 tc = (struct bus_dma_tag_common *)dmat;
349 tc->impl->map_unload(dmat, map);
350 }
351
352 void
353 _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
354 {
355 struct bus_dma_tag_common *tc;
356
357 tc = (struct bus_dma_tag_common *)dmat;
358 tc->impl->map_sync(dmat, map, op);
359 }
Cache object: fc054f6bae0fdca681e3ae2feab9051b
|