FreeBSD/Linux Kernel Cross Reference
sys/dev/mvme/mvmebus.c
1 /* $NetBSD: mvmebus.c,v 1.14 2008/04/28 20:23:54 martin Exp $ */
2
3 /*-
4 * Copyright (c) 2000, 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Steve C. Woodford.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: mvmebus.c,v 1.14 2008/04/28 20:23:54 martin Exp $");
34
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/systm.h>
38 #include <sys/device.h>
39 #include <sys/malloc.h>
40 #include <sys/kcore.h>
41
42 #include <sys/cpu.h>
43 #include <sys/bus.h>
44
45 #include <dev/vme/vmereg.h>
46 #include <dev/vme/vmevar.h>
47
48 #include <dev/mvme/mvmebus.h>
49
50 #ifdef DIAGNOSTIC
51 int mvmebus_dummy_dmamap_create(bus_dma_tag_t, bus_size_t, int, bus_size_t,
52 bus_size_t, int, bus_dmamap_t *);
53 void mvmebus_dummy_dmamap_destroy(bus_dma_tag_t, bus_dmamap_t);
54 int mvmebus_dummy_dmamem_alloc(bus_dma_tag_t, bus_size_t, bus_size_t,
55 bus_size_t, bus_dma_segment_t *, int, int *, int);
56 void mvmebus_dummy_dmamem_free(bus_dma_tag_t, bus_dma_segment_t *, int);
57 #endif
58
59 #ifdef DEBUG
60 static const char *mvmebus_mod_string(vme_addr_t, vme_size_t,
61 vme_am_t, vme_datasize_t);
62 #endif
63
64 static void mvmebus_offboard_ram(struct mvmebus_softc *);
65 static int mvmebus_dmamap_load_common(struct mvmebus_softc *, bus_dmamap_t);
66
67 vme_am_t _mvmebus_am_cap[] = {
68 MVMEBUS_AM_CAP_BLKD64 | MVMEBUS_AM_CAP_USER,
69 MVMEBUS_AM_CAP_DATA | MVMEBUS_AM_CAP_USER,
70 MVMEBUS_AM_CAP_PROG | MVMEBUS_AM_CAP_USER,
71 MVMEBUS_AM_CAP_BLK | MVMEBUS_AM_CAP_USER,
72 MVMEBUS_AM_CAP_BLKD64 | MVMEBUS_AM_CAP_SUPER,
73 MVMEBUS_AM_CAP_DATA | MVMEBUS_AM_CAP_SUPER,
74 MVMEBUS_AM_CAP_PROG | MVMEBUS_AM_CAP_SUPER,
75 MVMEBUS_AM_CAP_BLK | MVMEBUS_AM_CAP_SUPER
76 };
77
78 const char *mvmebus_irq_name[] = {
79 "vmeirq0", "vmeirq1", "vmeirq2", "vmeirq3",
80 "vmeirq4", "vmeirq5", "vmeirq6", "vmeirq7"
81 };
82
83 extern phys_ram_seg_t mem_clusters[0];
84 extern int mem_cluster_cnt;
85
86
87 static void
88 mvmebus_offboard_ram(sc)
89 struct mvmebus_softc *sc;
90 {
91 struct mvmebus_range *svr, *mvr;
92 vme_addr_t start, end, size;
93 int i;
94
95 /*
96 * If we have any offboard RAM (i.e. a VMEbus RAM board) then
97 * we need to record its details since it's effectively another
98 * VMEbus slave image as far as we're concerned.
99 * The chip-specific backend will have reserved sc->sc_slaves[0]
100 * for exactly this purpose.
101 */
102 svr = sc->sc_slaves;
103 if (mem_cluster_cnt < 2) {
104 svr->vr_am = MVMEBUS_AM_DISABLED;
105 return;
106 }
107
108 start = mem_clusters[1].start;
109 size = mem_clusters[1].size - 1;
110 end = start + size;
111
112 /*
113 * Figure out which VMEbus master image the RAM is
114 * visible through. This will tell us the address
115 * modifier and datasizes it uses, as well as allowing
116 * us to calculate its `real' VMEbus address.
117 *
118 * XXX FIXME: This is broken if the RAM is mapped through
119 * a translated address space. For example, on mvme167 it's
120 * perfectly legal to set up the following A32 mapping:
121 *
122 * vr_locaddr == 0x80000000
123 * vr_vmestart == 0x10000000
124 * vr_vmeend == 0x10ffffff
125 *
126 * In this case, RAM at VMEbus address 0x10800000 will appear at local
127 * address 0x80800000, but we need to set the slave vr_vmestart to
128 * 0x10800000.
129 */
130 for (i = 0, mvr = sc->sc_masters; i < sc->sc_nmasters; i++, mvr++) {
131 vme_addr_t vstart = mvr->vr_locstart + mvr->vr_vmestart;
132
133 if (start >= vstart &&
134 end <= vstart + (mvr->vr_vmeend - mvr->vr_vmestart))
135 break;
136 }
137 if (i == sc->sc_nmasters) {
138 svr->vr_am = MVMEBUS_AM_DISABLED;
139 #ifdef DEBUG
140 printf("%s: No VMEbus master mapping for offboard RAM!\n",
141 device_xname(&sc->sc_dev));
142 #endif
143 return;
144 }
145
146 svr->vr_locstart = start;
147 svr->vr_vmestart = start & mvr->vr_mask;
148 svr->vr_vmeend = svr->vr_vmestart + size;
149 svr->vr_datasize = mvr->vr_datasize;
150 svr->vr_mask = mvr->vr_mask;
151 svr->vr_am = mvr->vr_am & VME_AM_ADRSIZEMASK;
152 svr->vr_am |= MVMEBUS_AM_CAP_DATA | MVMEBUS_AM_CAP_PROG |
153 MVMEBUS_AM_CAP_SUPER | MVMEBUS_AM_CAP_USER;
154 }
155
156 void
157 mvmebus_attach(sc)
158 struct mvmebus_softc *sc;
159 {
160 struct vmebus_attach_args vaa;
161 int i;
162
163 /* Zap the IRQ reference counts */
164 for (i = 0; i < 8; i++)
165 sc->sc_irqref[i] = 0;
166
167 /* If there's offboard RAM, get its VMEbus slave attributes */
168 mvmebus_offboard_ram(sc);
169
170 #ifdef DEBUG
171 for (i = 0; i < sc->sc_nmasters; i++) {
172 struct mvmebus_range *vr = &sc->sc_masters[i];
173 if (vr->vr_am == MVMEBUS_AM_DISABLED) {
174 printf("%s: Master#%d: disabled\n",
175 device_xname(&sc->sc_dev), i);
176 continue;
177 }
178 printf("%s: Master#%d: 0x%08lx -> %s\n",
179 device_xname(&sc->sc_dev), i,
180 vr->vr_locstart + (vr->vr_vmestart & vr->vr_mask),
181 mvmebus_mod_string(vr->vr_vmestart,
182 (vr->vr_vmeend - vr->vr_vmestart) + 1,
183 vr->vr_am, vr->vr_datasize));
184 }
185
186 for (i = 0; i < sc->sc_nslaves; i++) {
187 struct mvmebus_range *vr = &sc->sc_slaves[i];
188 if (vr->vr_am == MVMEBUS_AM_DISABLED) {
189 printf("%s: Slave#%d: disabled\n",
190 device_xname(&sc->sc_dev), i);
191 continue;
192 }
193 printf("%s: Slave#%d: 0x%08lx -> %s\n",
194 device_xname(&sc->sc_dev), i, vr->vr_locstart,
195 mvmebus_mod_string(vr->vr_vmestart,
196 (vr->vr_vmeend - vr->vr_vmestart) + 1,
197 vr->vr_am, vr->vr_datasize));
198 }
199 #endif
200
201 sc->sc_vct.cookie = sc;
202 sc->sc_vct.vct_probe = mvmebus_probe;
203 sc->sc_vct.vct_map = mvmebus_map;
204 sc->sc_vct.vct_unmap = mvmebus_unmap;
205 sc->sc_vct.vct_int_map = mvmebus_intmap;
206 sc->sc_vct.vct_int_evcnt = mvmebus_intr_evcnt;
207 sc->sc_vct.vct_int_establish = mvmebus_intr_establish;
208 sc->sc_vct.vct_int_disestablish = mvmebus_intr_disestablish;
209 sc->sc_vct.vct_dmamap_create = mvmebus_dmamap_create;
210 sc->sc_vct.vct_dmamap_destroy = mvmebus_dmamap_destroy;
211 sc->sc_vct.vct_dmamem_alloc = mvmebus_dmamem_alloc;
212 sc->sc_vct.vct_dmamem_free = mvmebus_dmamem_free;
213
214 sc->sc_mvmedmat._cookie = sc;
215 sc->sc_mvmedmat._dmamap_load = mvmebus_dmamap_load;
216 sc->sc_mvmedmat._dmamap_load_mbuf = mvmebus_dmamap_load_mbuf;
217 sc->sc_mvmedmat._dmamap_load_uio = mvmebus_dmamap_load_uio;
218 sc->sc_mvmedmat._dmamap_load_raw = mvmebus_dmamap_load_raw;
219 sc->sc_mvmedmat._dmamap_unload = mvmebus_dmamap_unload;
220 sc->sc_mvmedmat._dmamap_sync = mvmebus_dmamap_sync;
221 sc->sc_mvmedmat._dmamem_map = mvmebus_dmamem_map;
222 sc->sc_mvmedmat._dmamem_unmap = mvmebus_dmamem_unmap;
223 sc->sc_mvmedmat._dmamem_mmap = mvmebus_dmamem_mmap;
224
225 #ifdef DIAGNOSTIC
226 sc->sc_mvmedmat._dmamap_create = mvmebus_dummy_dmamap_create;
227 sc->sc_mvmedmat._dmamap_destroy = mvmebus_dummy_dmamap_destroy;
228 sc->sc_mvmedmat._dmamem_alloc = mvmebus_dummy_dmamem_alloc;
229 sc->sc_mvmedmat._dmamem_free = mvmebus_dummy_dmamem_free;
230 #else
231 sc->sc_mvmedmat._dmamap_create = NULL;
232 sc->sc_mvmedmat._dmamap_destroy = NULL;
233 sc->sc_mvmedmat._dmamem_alloc = NULL;
234 sc->sc_mvmedmat._dmamem_free = NULL;
235 #endif
236
237 vaa.va_vct = &sc->sc_vct;
238 vaa.va_bdt = &sc->sc_mvmedmat;
239 vaa.va_slaveconfig = NULL;
240
241 config_found(&sc->sc_dev, &vaa, 0);
242 }
243
244 int
245 mvmebus_map(vsc, vmeaddr, len, am, datasize, swap, tag, handle, resc)
246 void *vsc;
247 vme_addr_t vmeaddr;
248 vme_size_t len;
249 vme_am_t am;
250 vme_datasize_t datasize;
251 vme_swap_t swap;
252 bus_space_tag_t *tag;
253 bus_space_handle_t *handle;
254 vme_mapresc_t *resc;
255 {
256 struct mvmebus_softc *sc;
257 struct mvmebus_mapresc *mr;
258 struct mvmebus_range *vr;
259 vme_addr_t end;
260 vme_am_t cap, as;
261 paddr_t paddr;
262 int rv, i;
263
264 sc = vsc;
265 end = (vmeaddr + len) - 1;
266 paddr = 0;
267 vr = sc->sc_masters;
268 cap = MVMEBUS_AM2CAP(am);
269 as = am & VME_AM_ADRSIZEMASK;
270
271 for (i = 0; i < sc->sc_nmasters && paddr == 0; i++, vr++) {
272 if (vr->vr_am == MVMEBUS_AM_DISABLED)
273 continue;
274
275 if (cap == (vr->vr_am & cap) &&
276 as == (vr->vr_am & VME_AM_ADRSIZEMASK) &&
277 datasize <= vr->vr_datasize &&
278 vmeaddr >= vr->vr_vmestart && end < vr->vr_vmeend)
279 paddr = vr->vr_locstart + (vmeaddr & vr->vr_mask);
280 }
281 if (paddr == 0)
282 return (ENOMEM);
283
284 rv = bus_space_map(sc->sc_bust, paddr, len, 0, handle);
285 if (rv != 0)
286 return (rv);
287
288 /* Allocate space for the resource tag */
289 if ((mr = malloc(sizeof(*mr), M_DEVBUF, M_NOWAIT)) == NULL) {
290 bus_space_unmap(sc->sc_bust, *handle, len);
291 return (ENOMEM);
292 }
293
294 /* Record the range's details */
295 mr->mr_am = am;
296 mr->mr_datasize = datasize;
297 mr->mr_addr = vmeaddr;
298 mr->mr_size = len;
299 mr->mr_handle = *handle;
300 mr->mr_range = i;
301
302 *tag = sc->sc_bust;
303 *resc = (vme_mapresc_t *) mr;
304
305 return (0);
306 }
307
308 /* ARGSUSED */
309 void
310 mvmebus_unmap(vsc, resc)
311 void *vsc;
312 vme_mapresc_t resc;
313 {
314 struct mvmebus_softc *sc = vsc;
315 struct mvmebus_mapresc *mr = (struct mvmebus_mapresc *) resc;
316
317 bus_space_unmap(sc->sc_bust, mr->mr_handle, mr->mr_size);
318
319 free(mr, M_DEVBUF);
320 }
321
322 int
323 mvmebus_probe(vsc, vmeaddr, len, am, datasize, callback, arg)
324 void *vsc;
325 vme_addr_t vmeaddr;
326 vme_size_t len;
327 vme_am_t am;
328 vme_datasize_t datasize;
329 int (*callback)(void *, bus_space_tag_t, bus_space_handle_t);
330 void *arg;
331 {
332 bus_space_tag_t tag;
333 bus_space_handle_t handle;
334 vme_mapresc_t resc;
335 vme_size_t offs;
336 int rv;
337
338 /* Get a temporary mapping to the VMEbus range */
339 rv = mvmebus_map(vsc, vmeaddr, len, am, datasize, 0,
340 &tag, &handle, &resc);
341 if (rv)
342 return (rv);
343
344 if (callback)
345 rv = (*callback) (arg, tag, handle);
346 else
347 for (offs = 0; offs < len && rv == 0;) {
348 switch (datasize) {
349 case VME_D8:
350 rv = bus_space_peek_1(tag, handle, offs, NULL);
351 offs += 1;
352 break;
353
354 case VME_D16:
355 rv = bus_space_peek_2(tag, handle, offs, NULL);
356 offs += 2;
357 break;
358
359 case VME_D32:
360 rv = bus_space_peek_4(tag, handle, offs, NULL);
361 offs += 4;
362 break;
363 }
364 }
365
366 mvmebus_unmap(vsc, resc);
367
368 return (rv);
369 }
370
371 /* ARGSUSED */
372 int
373 mvmebus_intmap(vsc, level, vector, handlep)
374 void *vsc;
375 int level, vector;
376 vme_intr_handle_t *handlep;
377 {
378
379 if (level < 1 || level > 7 || vector < 0x80 || vector > 0xff)
380 return (EINVAL);
381
382 /* This is rather gross */
383 *handlep = (void *) (int) ((level << 8) | vector);
384 return (0);
385 }
386
387 /* ARGSUSED */
388 const struct evcnt *
389 mvmebus_intr_evcnt(vsc, handle)
390 void *vsc;
391 vme_intr_handle_t handle;
392 {
393 struct mvmebus_softc *sc = vsc;
394
395 return (&sc->sc_evcnt[(((int) handle) >> 8) - 1]);
396 }
397
398 void *
399 mvmebus_intr_establish(vsc, handle, prior, func, arg)
400 void *vsc;
401 vme_intr_handle_t handle;
402 int prior;
403 int (*func)(void *);
404 void *arg;
405 {
406 struct mvmebus_softc *sc;
407 int level, vector, first;
408
409 sc = vsc;
410
411 /* Extract the interrupt's level and vector */
412 level = ((int) handle) >> 8;
413 vector = ((int) handle) & 0xff;
414
415 #ifdef DIAGNOSTIC
416 if (vector < 0 || vector > 0xff) {
417 printf("%s: Illegal vector offset: 0x%x\n",
418 device_xname(&sc->sc_dev), vector);
419 panic("mvmebus_intr_establish");
420 }
421 if (level < 1 || level > 7) {
422 printf("%s: Illegal interrupt level: %d\n",
423 device_xname(&sc->sc_dev), level);
424 panic("mvmebus_intr_establish");
425 }
426 #endif
427
428 first = (sc->sc_irqref[level]++ == 0);
429
430 (*sc->sc_intr_establish)(sc->sc_chip, prior, level, vector, first,
431 func, arg, &sc->sc_evcnt[level - 1]);
432
433 return ((void *) handle);
434 }
435
436 void
437 mvmebus_intr_disestablish(vsc, handle)
438 void *vsc;
439 vme_intr_handle_t handle;
440 {
441 struct mvmebus_softc *sc;
442 int level, vector, last;
443
444 sc = vsc;
445
446 /* Extract the interrupt's level and vector */
447 level = ((int) handle) >> 8;
448 vector = ((int) handle) & 0xff;
449
450 #ifdef DIAGNOSTIC
451 if (vector < 0 || vector > 0xff) {
452 printf("%s: Illegal vector offset: 0x%x\n",
453 device_xname(&sc->sc_dev), vector);
454 panic("mvmebus_intr_disestablish");
455 }
456 if (level < 1 || level > 7) {
457 printf("%s: Illegal interrupt level: %d\n",
458 device_xname(&sc->sc_dev), level);
459 panic("mvmebus_intr_disestablish");
460 }
461 if (sc->sc_irqref[level] == 0) {
462 printf("%s: VMEirq#%d: Reference count already zero!\n",
463 device_xname(&sc->sc_dev), level);
464 panic("mvmebus_intr_disestablish");
465 }
466 #endif
467
468 last = (--(sc->sc_irqref[level]) == 0);
469
470 (*sc->sc_intr_disestablish)(sc->sc_chip, level, vector, last,
471 &sc->sc_evcnt[level - 1]);
472 }
473
474 #ifdef DIAGNOSTIC
475 /* ARGSUSED */
476 int
477 mvmebus_dummy_dmamap_create(t, size, nsegs, maxsegsz, boundary, flags, dmamp)
478 bus_dma_tag_t t;
479 bus_size_t size;
480 int nsegs;
481 bus_size_t maxsegsz;
482 bus_size_t boundary;
483 int flags;
484 bus_dmamap_t *dmamp;
485 {
486
487 panic("Must use vme_dmamap_create() in place of bus_dmamap_create()");
488 return (0); /* Shutup the compiler */
489 }
490
491 /* ARGSUSED */
492 void
493 mvmebus_dummy_dmamap_destroy(t, map)
494 bus_dma_tag_t t;
495 bus_dmamap_t map;
496 {
497
498 panic("Must use vme_dmamap_destroy() in place of bus_dmamap_destroy()");
499 }
500 #endif
501
502 /* ARGSUSED */
503 int
504 mvmebus_dmamap_create(vsc, len, am, datasize, swap, nsegs,
505 segsz, bound, flags, mapp)
506 void *vsc;
507 vme_size_t len;
508 vme_am_t am;
509 vme_datasize_t datasize;
510 vme_swap_t swap;
511 int nsegs;
512 vme_size_t segsz;
513 vme_addr_t bound;
514 int flags;
515 bus_dmamap_t *mapp;
516 {
517 struct mvmebus_softc *sc = vsc;
518 struct mvmebus_dmamap *vmap;
519 struct mvmebus_range *vr;
520 vme_am_t cap, as;
521 int i, rv;
522
523 cap = MVMEBUS_AM2CAP(am);
524 as = am & VME_AM_ADRSIZEMASK;
525
526 /*
527 * Verify that we even stand a chance of satisfying
528 * the VMEbus address space and datasize requested.
529 */
530 for (i = 0, vr = sc->sc_slaves; i < sc->sc_nslaves; i++, vr++) {
531 if (vr->vr_am == MVMEBUS_AM_DISABLED)
532 continue;
533
534 if (as == (vr->vr_am & VME_AM_ADRSIZEMASK) &&
535 cap == (vr->vr_am & cap) && datasize <= vr->vr_datasize &&
536 len <= (vr->vr_vmeend - vr->vr_vmestart))
537 break;
538 }
539
540 if (i == sc->sc_nslaves)
541 return (EINVAL);
542
543 if ((vmap = malloc(sizeof(*vmap), M_DMAMAP,
544 (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
545 return (ENOMEM);
546
547
548 rv = bus_dmamap_create(sc->sc_dmat, len, nsegs, segsz,
549 bound, flags, mapp);
550 if (rv != 0) {
551 free(vmap, M_DMAMAP);
552 return (rv);
553 }
554
555 vmap->vm_am = am;
556 vmap->vm_datasize = datasize;
557 vmap->vm_swap = swap;
558 vmap->vm_slave = vr;
559
560 (*mapp)->_dm_cookie = vmap;
561
562 return (0);
563 }
564
565 void
566 mvmebus_dmamap_destroy(vsc, map)
567 void *vsc;
568 bus_dmamap_t map;
569 {
570 struct mvmebus_softc *sc = vsc;
571
572 free(map->_dm_cookie, M_DMAMAP);
573 bus_dmamap_destroy(sc->sc_dmat, map);
574 }
575
576 static int
577 mvmebus_dmamap_load_common(sc, map)
578 struct mvmebus_softc *sc;
579 bus_dmamap_t map;
580 {
581 struct mvmebus_dmamap *vmap = map->_dm_cookie;
582 struct mvmebus_range *vr = vmap->vm_slave;
583 bus_dma_segment_t *ds;
584 vme_am_t cap, am;
585 int i;
586
587 cap = MVMEBUS_AM2CAP(vmap->vm_am);
588 am = vmap->vm_am & VME_AM_ADRSIZEMASK;
589
590 /*
591 * Traverse the list of segments which make up this map, and
592 * convert the CPU-relative addresses therein to VMEbus addresses.
593 */
594 for (ds = &map->dm_segs[0]; ds < &map->dm_segs[map->dm_nsegs]; ds++) {
595 /*
596 * First, see if this map's slave image can access the
597 * segment, otherwise we have to waste time scanning all
598 * the slave images.
599 */
600 vr = vmap->vm_slave;
601 if (am == (vr->vr_am & VME_AM_ADRSIZEMASK) &&
602 cap == (vr->vr_am & cap) &&
603 vmap->vm_datasize <= vr->vr_datasize &&
604 ds->_ds_cpuaddr >= vr->vr_locstart &&
605 ds->ds_len <= (vr->vr_vmeend - vr->vr_vmestart))
606 goto found;
607
608 for (i = 0, vr = sc->sc_slaves; i < sc->sc_nslaves; i++, vr++) {
609 if (vr->vr_am == MVMEBUS_AM_DISABLED)
610 continue;
611
612 /*
613 * Filter out any slave images which don't have the
614 * same VMEbus address modifier and datasize as
615 * this DMA map, and those which don't cover the
616 * physical address region containing the segment.
617 */
618 if (vr != vmap->vm_slave &&
619 am == (vr->vr_am & VME_AM_ADRSIZEMASK) &&
620 cap == (vr->vr_am & cap) &&
621 vmap->vm_datasize <= vr->vr_datasize &&
622 ds->_ds_cpuaddr >= vr->vr_locstart &&
623 ds->ds_len <= (vr->vr_vmeend - vr->vr_vmestart))
624 break;
625 }
626
627 /*
628 * Did we find an applicable slave image which covers this
629 * segment?
630 */
631 if (i == sc->sc_nslaves) {
632 /*
633 * XXX TODO:
634 *
635 * Bounce this segment via a bounce buffer allocated
636 * from this DMA map.
637 */
638 printf("mvmebus_dmamap_load_common: bounce needed!\n");
639 return (EINVAL);
640 }
641
642 found:
643 /*
644 * Generate the VMEbus address of this segment
645 */
646 ds->ds_addr = (ds->_ds_cpuaddr - vr->vr_locstart) +
647 vr->vr_vmestart;
648 }
649
650 return (0);
651 }
652
653 int
654 mvmebus_dmamap_load(t, map, buf, buflen, p, flags)
655 bus_dma_tag_t t;
656 bus_dmamap_t map;
657 void *buf;
658 bus_size_t buflen;
659 struct proc *p;
660 int flags;
661 {
662 struct mvmebus_softc *sc = t->_cookie;
663 int rv;
664
665 rv = bus_dmamap_load(sc->sc_dmat, map, buf, buflen, p, flags);
666 if (rv != 0)
667 return rv;
668
669 return mvmebus_dmamap_load_common(sc, map);
670 }
671
672 int
673 mvmebus_dmamap_load_mbuf(t, map, chain, flags)
674 bus_dma_tag_t t;
675 bus_dmamap_t map;
676 struct mbuf *chain;
677 int flags;
678 {
679 struct mvmebus_softc *sc = t->_cookie;
680 int rv;
681
682 rv = bus_dmamap_load_mbuf(sc->sc_dmat, map, chain, flags);
683 if (rv != 0)
684 return rv;
685
686 return mvmebus_dmamap_load_common(sc, map);
687 }
688
689 int
690 mvmebus_dmamap_load_uio(t, map, uio, flags)
691 bus_dma_tag_t t;
692 bus_dmamap_t map;
693 struct uio *uio;
694 int flags;
695 {
696 struct mvmebus_softc *sc = t->_cookie;
697 int rv;
698
699 rv = bus_dmamap_load_uio(sc->sc_dmat, map, uio, flags);
700 if (rv != 0)
701 return rv;
702
703 return mvmebus_dmamap_load_common(sc, map);
704 }
705
706 int
707 mvmebus_dmamap_load_raw(t, map, segs, nsegs, size, flags)
708 bus_dma_tag_t t;
709 bus_dmamap_t map;
710 bus_dma_segment_t *segs;
711 int nsegs;
712 bus_size_t size;
713 int flags;
714 {
715 struct mvmebus_softc *sc = t->_cookie;
716 int rv;
717
718 /*
719 * mvmebus_dmamem_alloc() will ensure that the physical memory
720 * backing these segments is 100% accessible in at least one
721 * of the board's VMEbus slave images.
722 */
723 rv = bus_dmamap_load_raw(sc->sc_dmat, map, segs, nsegs, size, flags);
724 if (rv != 0)
725 return rv;
726
727 return mvmebus_dmamap_load_common(sc, map);
728 }
729
730 void
731 mvmebus_dmamap_unload(t, map)
732 bus_dma_tag_t t;
733 bus_dmamap_t map;
734 {
735 struct mvmebus_softc *sc = t->_cookie;
736
737 /* XXX Deal with bounce buffers */
738
739 bus_dmamap_unload(sc->sc_dmat, map);
740 }
741
742 void
743 mvmebus_dmamap_sync(t, map, offset, len, ops)
744 bus_dma_tag_t t;
745 bus_dmamap_t map;
746 bus_addr_t offset;
747 bus_size_t len;
748 int ops;
749 {
750 struct mvmebus_softc *sc = t->_cookie;
751
752 /* XXX Bounce buffers */
753
754 bus_dmamap_sync(sc->sc_dmat, map, offset, len, ops);
755 }
756
757 #ifdef DIAGNOSTIC
758 /* ARGSUSED */
759 int
760 mvmebus_dummy_dmamem_alloc(t, size, align, boundary, segs, nsegs, rsegs, flags)
761 bus_dma_tag_t t;
762 bus_size_t size;
763 bus_size_t align;
764 bus_size_t boundary;
765 bus_dma_segment_t *segs;
766 int nsegs;
767 int *rsegs;
768 int flags;
769 {
770
771 panic("Must use vme_dmamem_alloc() in place of bus_dmamem_alloc()");
772 }
773
774 /* ARGSUSED */
775 void
776 mvmebus_dummy_dmamem_free(t, segs, nsegs)
777 bus_dma_tag_t t;
778 bus_dma_segment_t *segs;
779 int nsegs;
780 {
781
782 panic("Must use vme_dmamem_free() in place of bus_dmamem_free()");
783 }
784 #endif
785
786 /* ARGSUSED */
787 int
788 mvmebus_dmamem_alloc(vsc, len, am, datasize, swap, segs, nsegs, rsegs, flags)
789 void *vsc;
790 vme_size_t len;
791 vme_am_t am;
792 vme_datasize_t datasize;
793 vme_swap_t swap;
794 bus_dma_segment_t *segs;
795 int nsegs;
796 int *rsegs;
797 int flags;
798 {
799 extern paddr_t avail_start;
800 struct mvmebus_softc *sc = vsc;
801 struct mvmebus_range *vr;
802 bus_addr_t low, high;
803 bus_size_t bound;
804 vme_am_t cap;
805 int i;
806
807 cap = MVMEBUS_AM2CAP(am);
808 am &= VME_AM_ADRSIZEMASK;
809
810 /*
811 * Find a slave mapping in the requested VMEbus address space.
812 */
813 for (i = 0, vr = sc->sc_slaves; i < sc->sc_nslaves; i++, vr++) {
814 if (vr->vr_am == MVMEBUS_AM_DISABLED)
815 continue;
816
817 if (i == 0 && (flags & BUS_DMA_ONBOARD_RAM) != 0)
818 continue;
819
820 if (am == (vr->vr_am & VME_AM_ADRSIZEMASK) &&
821 cap == (vr->vr_am & cap) && datasize <= vr->vr_datasize &&
822 len <= (vr->vr_vmeend - vr->vr_vmestart))
823 break;
824 }
825 if (i == sc->sc_nslaves)
826 return (EINVAL);
827
828 /*
829 * Set up the constraints so we can allocate physical memory which
830 * is visible in the requested address space
831 */
832 low = max(vr->vr_locstart, avail_start);
833 high = vr->vr_locstart + (vr->vr_vmeend - vr->vr_vmestart) + 1;
834 bound = (bus_size_t) vr->vr_mask + 1;
835
836 /*
837 * Allocate physical memory.
838 *
839 * Note: This fills in the segments with CPU-relative physical
840 * addresses. A further call to bus_dmamap_load_raw() (with a
841 * DMA map which specifies the same VMEbus address space and
842 * constraints as the call to here) must be made. The segments
843 * of the DMA map will then contain VMEbus-relative physical
844 * addresses of the memory allocated here.
845 */
846 return _bus_dmamem_alloc_common(sc->sc_dmat, low, high,
847 len, 0, bound, segs, nsegs, rsegs, flags);
848 }
849
850 void
851 mvmebus_dmamem_free(vsc, segs, nsegs)
852 void *vsc;
853 bus_dma_segment_t *segs;
854 int nsegs;
855 {
856 struct mvmebus_softc *sc = vsc;
857
858 bus_dmamem_free(sc->sc_dmat, segs, nsegs);
859 }
860
861 int
862 mvmebus_dmamem_map(t, segs, nsegs, size, kvap, flags)
863 bus_dma_tag_t t;
864 bus_dma_segment_t *segs;
865 int nsegs;
866 size_t size;
867 void **kvap;
868 int flags;
869 {
870 struct mvmebus_softc *sc = t->_cookie;
871
872 return bus_dmamem_map(sc->sc_dmat, segs, nsegs, size, kvap, flags);
873 }
874
875 void
876 mvmebus_dmamem_unmap(t, kva, size)
877 bus_dma_tag_t t;
878 void *kva;
879 size_t size;
880 {
881 struct mvmebus_softc *sc = t->_cookie;
882
883 bus_dmamem_unmap(sc->sc_dmat, kva, size);
884 }
885
886 paddr_t
887 mvmebus_dmamem_mmap(t, segs, nsegs, offset, prot, flags)
888 bus_dma_tag_t t;
889 bus_dma_segment_t *segs;
890 int nsegs;
891 off_t offset;
892 int prot;
893 int flags;
894 {
895 struct mvmebus_softc *sc = t->_cookie;
896
897 return bus_dmamem_mmap(sc->sc_dmat, segs, nsegs, offset, prot, flags);
898 }
899
900 #ifdef DEBUG
901 static const char *
902 mvmebus_mod_string(addr, len, am, ds)
903 vme_addr_t addr;
904 vme_size_t len;
905 vme_am_t am;
906 vme_datasize_t ds;
907 {
908 static const char *mode[] = {"BLT64)", "DATA)", "PROG)", "BLT32)"};
909 static const char *dsiz[] = {"(", "(D8,", "(D16,", "(D16-D8,",
910 "(D32,", "(D32,D8,", "(D32-D16,", "(D32-D8,"};
911 static const char *adrfmt[] = { "A32:%08x-%08x ", "USR:%08x-%08x ",
912 "A16:%04x-%04x ", "A24:%06x-%06x " };
913 static char mstring[40];
914
915 snprintf(mstring, sizeof(mstring),
916 adrfmt[(am & VME_AM_ADRSIZEMASK) >> VME_AM_ADRSIZESHIFT],
917 addr, addr + len - 1);
918 strlcat(mstring, dsiz[ds & 0x7], sizeof(mstring));
919
920 if (MVMEBUS_AM_HAS_CAP(am)) {
921 if (am & MVMEBUS_AM_CAP_DATA)
922 strlcat(mstring, "D", sizeof(mstring));
923 if (am & MVMEBUS_AM_CAP_PROG)
924 strlcat(mstring, "P", sizeof(mstring));
925 if (am & MVMEBUS_AM_CAP_USER)
926 strlcat(mstring, "U", sizeof(mstring));
927 if (am & MVMEBUS_AM_CAP_SUPER)
928 strlcat(mstring, "S", sizeof(mstring));
929 if (am & MVMEBUS_AM_CAP_BLK)
930 strlcat(mstring, "B", sizeof(mstring));
931 if (am & MVMEBUS_AM_CAP_BLKD64)
932 strlcat(mstring, "6", sizeof(mstring));
933 strlcat(mstring, ")", sizeof(mstring));
934 } else {
935 strlcat(mstring, ((am & VME_AM_PRIVMASK) == VME_AM_USER) ?
936 "USER," : "SUPER,", sizeof(mstring));
937 strlcat(mstring, mode[am & VME_AM_MODEMASK], sizeof(mstring));
938 }
939
940 return (mstring);
941 }
942 #endif
Cache object: 292fe023293bd669f4ae454fa8f2cf94
|