1 /*-
2 * Copyright (c) 2015, 2019 Marcel Moolenaar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #include <sys/cdefs.h>
27 __FBSDID("$FreeBSD$");
28
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <machine/bus.h>
32 #include <machine/bus_dma.h>
33 #include <machine/resource.h>
34 #include <sys/bus.h>
35 #include <sys/conf.h>
36 #include <sys/kernel.h>
37 #include <sys/malloc.h>
38 #include <sys/module.h>
39 #include <sys/proc.h>
40 #include <sys/queue.h>
41 #include <sys/rman.h>
42 #include <sys/sbuf.h>
43 #include <sys/sx.h>
44 #include <sys/uio.h>
45 #include <vm/vm.h>
46 #include <vm/pmap.h>
47 #include <vm/vm_map.h>
48
49 #include <dev/proto/proto.h>
50 #include <dev/proto/proto_dev.h>
51 #include <dev/proto/proto_busdma.h>
52
53 MALLOC_DEFINE(M_PROTO_BUSDMA, "proto_busdma", "DMA management data");
54
55 #define BNDRY_MIN(a, b) \
56 (((a) == 0) ? (b) : (((b) == 0) ? (a) : MIN((a), (b))))
57
58 struct proto_callback_bundle {
59 struct proto_busdma *busdma;
60 struct proto_md *md;
61 struct proto_ioc_busdma *ioc;
62 };
63
64 static int
65 proto_busdma_tag_create(struct proto_busdma *busdma, struct proto_tag *parent,
66 struct proto_ioc_busdma *ioc)
67 {
68 struct proto_tag *tag;
69
70 /* Make sure that when a boundary is specified, it's a power of 2 */
71 if (ioc->u.tag.bndry != 0 &&
72 (ioc->u.tag.bndry & (ioc->u.tag.bndry - 1)) != 0)
73 return (EINVAL);
74
75 /*
76 * If nsegs is 1, ignore maxsegsz. What this means is that if we have
77 * just 1 segment, then maxsz should be equal to maxsegsz. To keep it
78 * simple for us, limit maxsegsz to maxsz in any case.
79 */
80 if (ioc->u.tag.maxsegsz > ioc->u.tag.maxsz || ioc->u.tag.nsegs == 1)
81 ioc->u.tag.maxsegsz = ioc->u.tag.maxsz;
82
83 tag = malloc(sizeof(*tag), M_PROTO_BUSDMA, M_WAITOK | M_ZERO);
84 if (parent != NULL) {
85 tag->parent = parent;
86 LIST_INSERT_HEAD(&parent->children, tag, peers);
87 tag->align = MAX(ioc->u.tag.align, parent->align);
88 tag->bndry = BNDRY_MIN(ioc->u.tag.bndry, parent->bndry);
89 tag->maxaddr = MIN(ioc->u.tag.maxaddr, parent->maxaddr);
90 tag->maxsz = MIN(ioc->u.tag.maxsz, parent->maxsz);
91 tag->maxsegsz = MIN(ioc->u.tag.maxsegsz, parent->maxsegsz);
92 tag->nsegs = MIN(ioc->u.tag.nsegs, parent->nsegs);
93 tag->datarate = MIN(ioc->u.tag.datarate, parent->datarate);
94 /* Write constraints back */
95 ioc->u.tag.align = tag->align;
96 ioc->u.tag.bndry = tag->bndry;
97 ioc->u.tag.maxaddr = tag->maxaddr;
98 ioc->u.tag.maxsz = tag->maxsz;
99 ioc->u.tag.maxsegsz = tag->maxsegsz;
100 ioc->u.tag.nsegs = tag->nsegs;
101 ioc->u.tag.datarate = tag->datarate;
102 } else {
103 tag->align = ioc->u.tag.align;
104 tag->bndry = ioc->u.tag.bndry;
105 tag->maxaddr = ioc->u.tag.maxaddr;
106 tag->maxsz = ioc->u.tag.maxsz;
107 tag->maxsegsz = ioc->u.tag.maxsegsz;
108 tag->nsegs = ioc->u.tag.nsegs;
109 tag->datarate = ioc->u.tag.datarate;
110 }
111 LIST_INSERT_HEAD(&busdma->tags, tag, tags);
112 ioc->result = (uintptr_t)(void *)tag;
113 return (0);
114 }
115
116 static int
117 proto_busdma_tag_destroy(struct proto_busdma *busdma, struct proto_tag *tag)
118 {
119
120 if (!LIST_EMPTY(&tag->mds))
121 return (EBUSY);
122 if (!LIST_EMPTY(&tag->children))
123 return (EBUSY);
124
125 if (tag->parent != NULL) {
126 LIST_REMOVE(tag, peers);
127 tag->parent = NULL;
128 }
129 LIST_REMOVE(tag, tags);
130 free(tag, M_PROTO_BUSDMA);
131 return (0);
132 }
133
134 static struct proto_tag *
135 proto_busdma_tag_lookup(struct proto_busdma *busdma, u_long key)
136 {
137 struct proto_tag *tag;
138
139 LIST_FOREACH(tag, &busdma->tags, tags) {
140 if ((void *)tag == (void *)key)
141 return (tag);
142 }
143 return (NULL);
144 }
145
146 static int
147 proto_busdma_md_destroy_internal(struct proto_busdma *busdma,
148 struct proto_md *md)
149 {
150
151 LIST_REMOVE(md, mds);
152 LIST_REMOVE(md, peers);
153 if (md->physaddr)
154 bus_dmamap_unload(md->bd_tag, md->bd_map);
155 if (md->virtaddr != NULL)
156 bus_dmamem_free(md->bd_tag, md->virtaddr, md->bd_map);
157 else
158 bus_dmamap_destroy(md->bd_tag, md->bd_map);
159 bus_dma_tag_destroy(md->bd_tag);
160 free(md, M_PROTO_BUSDMA);
161 return (0);
162 }
163
164 static void
165 proto_busdma_mem_alloc_callback(void *arg, bus_dma_segment_t *segs, int nseg,
166 int error)
167 {
168 struct proto_callback_bundle *pcb = arg;
169
170 pcb->ioc->u.md.bus_nsegs = nseg;
171 pcb->ioc->u.md.bus_addr = segs[0].ds_addr;
172 }
173
174 static int
175 proto_busdma_mem_alloc(struct proto_busdma *busdma, struct proto_tag *tag,
176 struct proto_ioc_busdma *ioc)
177 {
178 struct proto_callback_bundle pcb;
179 struct proto_md *md;
180 int error;
181
182 md = malloc(sizeof(*md), M_PROTO_BUSDMA, M_WAITOK | M_ZERO);
183 md->tag = tag;
184
185 error = bus_dma_tag_create(busdma->bd_roottag, tag->align, tag->bndry,
186 tag->maxaddr, BUS_SPACE_MAXADDR, NULL, NULL, tag->maxsz,
187 tag->nsegs, tag->maxsegsz, 0, NULL, NULL, &md->bd_tag);
188 if (error) {
189 free(md, M_PROTO_BUSDMA);
190 return (error);
191 }
192 error = bus_dmamem_alloc(md->bd_tag, &md->virtaddr, 0, &md->bd_map);
193 if (error) {
194 bus_dma_tag_destroy(md->bd_tag);
195 free(md, M_PROTO_BUSDMA);
196 return (error);
197 }
198 md->physaddr = pmap_kextract((uintptr_t)(md->virtaddr));
199 pcb.busdma = busdma;
200 pcb.md = md;
201 pcb.ioc = ioc;
202 error = bus_dmamap_load(md->bd_tag, md->bd_map, md->virtaddr,
203 tag->maxsz, proto_busdma_mem_alloc_callback, &pcb, BUS_DMA_NOWAIT);
204 if (error) {
205 bus_dmamem_free(md->bd_tag, md->virtaddr, md->bd_map);
206 bus_dma_tag_destroy(md->bd_tag);
207 free(md, M_PROTO_BUSDMA);
208 return (error);
209 }
210 LIST_INSERT_HEAD(&tag->mds, md, peers);
211 LIST_INSERT_HEAD(&busdma->mds, md, mds);
212 ioc->u.md.virt_addr = (uintptr_t)md->virtaddr;
213 ioc->u.md.virt_size = tag->maxsz;
214 ioc->u.md.phys_nsegs = 1;
215 ioc->u.md.phys_addr = md->physaddr;
216 ioc->result = (uintptr_t)(void *)md;
217 return (0);
218 }
219
220 static int
221 proto_busdma_mem_free(struct proto_busdma *busdma, struct proto_md *md)
222 {
223
224 if (md->virtaddr == NULL)
225 return (ENXIO);
226 return (proto_busdma_md_destroy_internal(busdma, md));
227 }
228
229 static int
230 proto_busdma_md_create(struct proto_busdma *busdma, struct proto_tag *tag,
231 struct proto_ioc_busdma *ioc)
232 {
233 struct proto_md *md;
234 int error;
235
236 md = malloc(sizeof(*md), M_PROTO_BUSDMA, M_WAITOK | M_ZERO);
237 md->tag = tag;
238
239 error = bus_dma_tag_create(busdma->bd_roottag, tag->align, tag->bndry,
240 tag->maxaddr, BUS_SPACE_MAXADDR, NULL, NULL, tag->maxsz,
241 tag->nsegs, tag->maxsegsz, 0, NULL, NULL, &md->bd_tag);
242 if (error) {
243 free(md, M_PROTO_BUSDMA);
244 return (error);
245 }
246 error = bus_dmamap_create(md->bd_tag, 0, &md->bd_map);
247 if (error) {
248 bus_dma_tag_destroy(md->bd_tag);
249 free(md, M_PROTO_BUSDMA);
250 return (error);
251 }
252
253 LIST_INSERT_HEAD(&tag->mds, md, peers);
254 LIST_INSERT_HEAD(&busdma->mds, md, mds);
255 ioc->result = (uintptr_t)(void *)md;
256 return (0);
257 }
258
259 static int
260 proto_busdma_md_destroy(struct proto_busdma *busdma, struct proto_md *md)
261 {
262
263 if (md->virtaddr != NULL)
264 return (ENXIO);
265 return (proto_busdma_md_destroy_internal(busdma, md));
266 }
267
268 static void
269 proto_busdma_md_load_callback(void *arg, bus_dma_segment_t *segs, int nseg,
270 bus_size_t sz, int error)
271 {
272 struct proto_callback_bundle *pcb = arg;
273
274 pcb->ioc->u.md.bus_nsegs = nseg;
275 pcb->ioc->u.md.bus_addr = segs[0].ds_addr;
276 }
277
278 static int
279 proto_busdma_md_load(struct proto_busdma *busdma, struct proto_md *md,
280 struct proto_ioc_busdma *ioc, struct thread *td)
281 {
282 struct proto_callback_bundle pcb;
283 struct iovec iov;
284 struct uio uio;
285 pmap_t pmap;
286 int error;
287
288 iov.iov_base = (void *)(uintptr_t)ioc->u.md.virt_addr;
289 iov.iov_len = ioc->u.md.virt_size;
290 uio.uio_iov = &iov;
291 uio.uio_iovcnt = 1;
292 uio.uio_offset = 0;
293 uio.uio_resid = iov.iov_len;
294 uio.uio_segflg = UIO_USERSPACE;
295 uio.uio_rw = UIO_READ;
296 uio.uio_td = td;
297
298 pcb.busdma = busdma;
299 pcb.md = md;
300 pcb.ioc = ioc;
301 error = bus_dmamap_load_uio(md->bd_tag, md->bd_map, &uio,
302 proto_busdma_md_load_callback, &pcb, BUS_DMA_NOWAIT);
303 if (error)
304 return (error);
305
306 /* XXX determine *all* physical memory segments */
307 pmap = vmspace_pmap(td->td_proc->p_vmspace);
308 md->physaddr = pmap_extract(pmap, ioc->u.md.virt_addr);
309 ioc->u.md.phys_nsegs = 1; /* XXX */
310 ioc->u.md.phys_addr = md->physaddr;
311 return (0);
312 }
313
314 static int
315 proto_busdma_md_unload(struct proto_busdma *busdma, struct proto_md *md)
316 {
317
318 if (!md->physaddr)
319 return (ENXIO);
320 bus_dmamap_unload(md->bd_tag, md->bd_map);
321 md->physaddr = 0;
322 return (0);
323 }
324
325 static int
326 proto_busdma_sync(struct proto_busdma *busdma, struct proto_md *md,
327 struct proto_ioc_busdma *ioc)
328 {
329 u_int ops;
330
331 ops = BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE |
332 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE;
333 if (ioc->u.sync.op & ~ops)
334 return (EINVAL);
335 if (!md->physaddr)
336 return (ENXIO);
337 bus_dmamap_sync(md->bd_tag, md->bd_map, ioc->u.sync.op);
338 return (0);
339 }
340
341 static struct proto_md *
342 proto_busdma_md_lookup(struct proto_busdma *busdma, u_long key)
343 {
344 struct proto_md *md;
345
346 LIST_FOREACH(md, &busdma->mds, mds) {
347 if ((void *)md == (void *)key)
348 return (md);
349 }
350 return (NULL);
351 }
352
353 struct proto_busdma *
354 proto_busdma_attach(struct proto_softc *sc)
355 {
356 struct proto_busdma *busdma;
357
358 busdma = malloc(sizeof(*busdma), M_PROTO_BUSDMA, M_WAITOK | M_ZERO);
359 sx_init(&busdma->sxlck, "proto-busdma");
360 return (busdma);
361 }
362
363 int
364 proto_busdma_detach(struct proto_softc *sc, struct proto_busdma *busdma)
365 {
366
367 proto_busdma_cleanup(sc, busdma);
368 sx_destroy(&busdma->sxlck);
369 free(busdma, M_PROTO_BUSDMA);
370 return (0);
371 }
372
373 int
374 proto_busdma_cleanup(struct proto_softc *sc, struct proto_busdma *busdma)
375 {
376 struct proto_md *md, *md1;
377 struct proto_tag *tag, *tag1;
378
379 sx_xlock(&busdma->sxlck);
380 LIST_FOREACH_SAFE(md, &busdma->mds, mds, md1)
381 proto_busdma_md_destroy_internal(busdma, md);
382 LIST_FOREACH_SAFE(tag, &busdma->tags, tags, tag1)
383 proto_busdma_tag_destroy(busdma, tag);
384 sx_xunlock(&busdma->sxlck);
385 return (0);
386 }
387
388 int
389 proto_busdma_ioctl(struct proto_softc *sc, struct proto_busdma *busdma,
390 struct proto_ioc_busdma *ioc, struct thread *td)
391 {
392 struct proto_tag *tag;
393 struct proto_md *md;
394 int error;
395
396 sx_xlock(&busdma->sxlck);
397
398 error = 0;
399 switch (ioc->request) {
400 case PROTO_IOC_BUSDMA_TAG_CREATE:
401 busdma->bd_roottag = bus_get_dma_tag(sc->sc_dev);
402 error = proto_busdma_tag_create(busdma, NULL, ioc);
403 break;
404 case PROTO_IOC_BUSDMA_TAG_DERIVE:
405 tag = proto_busdma_tag_lookup(busdma, ioc->key);
406 if (tag == NULL) {
407 error = EINVAL;
408 break;
409 }
410 error = proto_busdma_tag_create(busdma, tag, ioc);
411 break;
412 case PROTO_IOC_BUSDMA_TAG_DESTROY:
413 tag = proto_busdma_tag_lookup(busdma, ioc->key);
414 if (tag == NULL) {
415 error = EINVAL;
416 break;
417 }
418 error = proto_busdma_tag_destroy(busdma, tag);
419 break;
420 case PROTO_IOC_BUSDMA_MEM_ALLOC:
421 tag = proto_busdma_tag_lookup(busdma, ioc->u.md.tag);
422 if (tag == NULL) {
423 error = EINVAL;
424 break;
425 }
426 error = proto_busdma_mem_alloc(busdma, tag, ioc);
427 break;
428 case PROTO_IOC_BUSDMA_MEM_FREE:
429 md = proto_busdma_md_lookup(busdma, ioc->key);
430 if (md == NULL) {
431 error = EINVAL;
432 break;
433 }
434 error = proto_busdma_mem_free(busdma, md);
435 break;
436 case PROTO_IOC_BUSDMA_MD_CREATE:
437 tag = proto_busdma_tag_lookup(busdma, ioc->u.md.tag);
438 if (tag == NULL) {
439 error = EINVAL;
440 break;
441 }
442 error = proto_busdma_md_create(busdma, tag, ioc);
443 break;
444 case PROTO_IOC_BUSDMA_MD_DESTROY:
445 md = proto_busdma_md_lookup(busdma, ioc->key);
446 if (md == NULL) {
447 error = EINVAL;
448 break;
449 }
450 error = proto_busdma_md_destroy(busdma, md);
451 break;
452 case PROTO_IOC_BUSDMA_MD_LOAD:
453 md = proto_busdma_md_lookup(busdma, ioc->key);
454 if (md == NULL) {
455 error = EINVAL;
456 break;
457 }
458 error = proto_busdma_md_load(busdma, md, ioc, td);
459 break;
460 case PROTO_IOC_BUSDMA_MD_UNLOAD:
461 md = proto_busdma_md_lookup(busdma, ioc->key);
462 if (md == NULL) {
463 error = EINVAL;
464 break;
465 }
466 error = proto_busdma_md_unload(busdma, md);
467 break;
468 case PROTO_IOC_BUSDMA_SYNC:
469 md = proto_busdma_md_lookup(busdma, ioc->key);
470 if (md == NULL) {
471 error = EINVAL;
472 break;
473 }
474 error = proto_busdma_sync(busdma, md, ioc);
475 break;
476 default:
477 error = EINVAL;
478 break;
479 }
480
481 sx_xunlock(&busdma->sxlck);
482
483 return (error);
484 }
485
486 int
487 proto_busdma_mmap_allowed(struct proto_busdma *busdma, vm_paddr_t physaddr)
488 {
489 struct proto_md *md;
490 int result;
491
492 sx_xlock(&busdma->sxlck);
493
494 result = 0;
495 LIST_FOREACH(md, &busdma->mds, mds) {
496 if (physaddr >= trunc_page(md->physaddr) &&
497 physaddr <= trunc_page(md->physaddr + md->tag->maxsz)) {
498 result = 1;
499 break;
500 }
501 }
502
503 sx_xunlock(&busdma->sxlck);
504
505 return (result);
506 }
Cache object: 44db7628b893153d40f1e05c71830e72
|