1 /*
2 * Copyright (c) 1999, 2000 Matthew R. Green
3 * Copyright (c) 2001-2003 Thomas Moestl
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
22 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
24 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29 /*-
30 * Copyright (c) 1998 The NetBSD Foundation, Inc.
31 * All rights reserved.
32 *
33 * This code is derived from software contributed to The NetBSD Foundation
34 * by Paul Kranenburg.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by the NetBSD
47 * Foundation, Inc. and its contributors.
48 * 4. Neither the name of The NetBSD Foundation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
53 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
54 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
55 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
56 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
57 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
58 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
59 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
60 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
61 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
62 * POSSIBILITY OF SUCH DAMAGE.
63 */
64 /*
65 * Copyright (c) 1992, 1993
66 * The Regents of the University of California. All rights reserved.
67 *
68 * This software was developed by the Computer Systems Engineering group
69 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
70 * contributed to Berkeley.
71 *
72 * All advertising materials mentioning features or use of this software
73 * must display the following acknowledgement:
74 * This product includes software developed by the University of
75 * California, Lawrence Berkeley Laboratory.
76 *
77 * Redistribution and use in source and binary forms, with or without
78 * modification, are permitted provided that the following conditions
79 * are met:
80 * 1. Redistributions of source code must retain the above copyright
81 * notice, this list of conditions and the following disclaimer.
82 * 2. Redistributions in binary form must reproduce the above copyright
83 * notice, this list of conditions and the following disclaimer in the
84 * documentation and/or other materials provided with the distribution.
85 * 3. All advertising materials mentioning features or use of this software
86 * must display the following acknowledgement:
87 * This product includes software developed by the University of
88 * California, Berkeley and its contributors.
89 * 4. Neither the name of the University nor the names of its contributors
90 * may be used to endorse or promote products derived from this software
91 * without specific prior written permission.
92 *
93 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
94 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
95 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
96 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
97 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
98 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
99 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
100 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
101 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
102 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
103 * SUCH DAMAGE.
104 *
105 * from: NetBSD: sbus.c,v 1.13 1999/05/23 07:24:02 mrg Exp
106 * from: @(#)sbus.c 8.1 (Berkeley) 6/11/93
107 * from: NetBSD: iommu.c,v 1.42 2001/08/06 22:02:58 eeh Exp
108 *
109 * $FreeBSD: releng/5.2/sys/sparc64/sparc64/iommu.c 118090 2003-07-27 15:19:45Z tmm $
110 */
111
112 /*
113 * UltraSPARC IOMMU support; used by both the sbus and pci code.
114 * Currently, the IOTSBs are synchronized, because determining the bus the map
115 * is to be loaded for is not possible with the current busdma code.
116 * The code is structured so that the IOMMUs can be easily divorced when that
117 * is fixed.
118 *
119 * TODO:
120 * - As soon as there is a newbus way to get a parent dma tag, divorce the
121 * IOTSBs.
122 * - Support sub-page boundaries.
123 * - Fix alignment handling for small allocations (the possible page offset
124 * of malloc()ed memory is not handled at all). Revise interaction of
125 * alignment with the load_mbuf and load_uio functions.
126 * - Handle lowaddr and highaddr in some way, and try to work out a way
127 * for filter callbacks to work. Currently, only lowaddr is honored
128 * in that no addresses above it are considered at all.
129 * - Implement BUS_DMA_ALLOCNOW in bus_dma_tag_create as far as possible.
130 * - Check the possible return values and callback error arguments;
131 * the callback currently gets called in error conditions where it should
132 * not be.
133 * - When running out of DVMA space, return EINPROGRESS in the non-
134 * BUS_DMA_NOWAIT case and delay the callback until sufficient space
135 * becomes available.
136 * - Use the streaming cache unless BUS_DMA_COHERENT is specified; do not
137 * flush the streaming cache when coherent mappings are synced.
138 * - Add bounce buffers to support machines with more than 16GB of RAM.
139 */
140 #include "opt_iommu.h"
141
142 #include <sys/param.h>
143 #include <sys/kernel.h>
144 #include <sys/lock.h>
145 #include <sys/malloc.h>
146 #include <sys/mbuf.h>
147 #include <sys/mutex.h>
148 #include <sys/proc.h>
149 #include <sys/systm.h>
150 #include <sys/uio.h>
151
152 #include <vm/vm.h>
153 #include <vm/pmap.h>
154 #include <vm/vm_map.h>
155
156 #include <machine/bus.h>
157 #include <machine/bus_private.h>
158 #include <machine/iommureg.h>
159 #include <machine/pmap.h>
160 #include <machine/resource.h>
161
162 #include <sys/rman.h>
163
164 #include <machine/iommuvar.h>
165
166 /*
167 * Tuning constants.
168 */
169 #define IOMMU_MAX_PRE (32 * 1024)
170 #define IOMMU_MAX_PRE_SEG 3
171
172 /* Threshold for using the streaming buffer. */
173 #define IOMMU_STREAM_THRESH 128
174
175 MALLOC_DEFINE(M_IOMMU, "dvmamem", "IOMMU DVMA Buffers");
176
177 static int iommu_strbuf_flush_sync(struct iommu_state *);
178 #ifdef IOMMU_DIAG
179 static void iommu_diag(struct iommu_state *, vm_offset_t va);
180 #endif
181
182 /*
183 * Protects iommu_maplruq, dm_reslist of all maps on the queue and all
184 * iommu states as long as the TSBs are synchronized.
185 */
186 struct mtx iommu_mtx;
187
188 /*
189 * The following 4 variables need to be moved to the per-IOMMU state once
190 * the IOTSBs are divorced.
191 * LRU queue handling for lazy resource allocation.
192 */
193 static TAILQ_HEAD(, bus_dmamap) iommu_maplruq =
194 TAILQ_HEAD_INITIALIZER(iommu_maplruq);
195
196 /* DVMA space rman. */
197 static struct rman iommu_dvma_rman;
198
199 /* Virtual and physical address of the TSB. */
200 static u_int64_t *iommu_tsb;
201 static vm_offset_t iommu_ptsb;
202
203 /* List of all IOMMUs. */
204 static STAILQ_HEAD(, iommu_state) iommu_insts =
205 STAILQ_HEAD_INITIALIZER(iommu_insts);
206
207 /*
208 * Helpers. Some of these take unused iommu states as parameters, to ease the
209 * transition to divorced TSBs.
210 */
211 #define IOMMU_READ8(is, reg, off) \
212 bus_space_read_8((is)->is_bustag, (is)->is_bushandle, \
213 (is)->reg + (off))
214 #define IOMMU_WRITE8(is, reg, off, v) \
215 bus_space_write_8((is)->is_bustag, (is)->is_bushandle, \
216 (is)->reg + (off), (v))
217
218 #define IOMMU_HAS_SB(is) \
219 ((is)->is_sb[0] != 0 || (is)->is_sb[1] != 0)
220
221 /*
222 * Always overallocate one page; this is needed to handle alignment of the
223 * buffer, so it makes sense using a lazy allocation scheme.
224 */
225 #define IOMMU_SIZE_ROUNDUP(sz) \
226 (round_io_page(sz) + IO_PAGE_SIZE)
227
228 #define IOMMU_SET_TTE(is, va, tte) \
229 (iommu_tsb[IOTSBSLOT(va)] = (tte))
230 #define IOMMU_GET_TTE(is, va) \
231 iommu_tsb[IOTSBSLOT(va)]
232
233 /* Resource helpers */
234 #define IOMMU_RES_START(res) \
235 ((bus_addr_t)rman_get_start(res) << IO_PAGE_SHIFT)
236 #define IOMMU_RES_END(res) \
237 ((bus_addr_t)(rman_get_end(res) + 1) << IO_PAGE_SHIFT)
238 #define IOMMU_RES_SIZE(res) \
239 ((bus_size_t)rman_get_size(res) << IO_PAGE_SHIFT)
240
241 /* Helpers for struct bus_dmamap_res */
242 #define BDR_START(r) IOMMU_RES_START((r)->dr_res)
243 #define BDR_END(r) IOMMU_RES_END((r)->dr_res)
244 #define BDR_SIZE(r) IOMMU_RES_SIZE((r)->dr_res)
245
246 /* Locking macros. */
247 #define IS_LOCK(is) mtx_lock(&iommu_mtx)
248 #define IS_LOCK_ASSERT(is) mtx_assert(&iommu_mtx, MA_OWNED)
249 #define IS_UNLOCK(is) mtx_unlock(&iommu_mtx)
250
251
252 /* Flush a page from the TLB. No locking required, since this is atomic. */
253 static __inline void
254 iommu_tlb_flush(struct iommu_state *is, bus_addr_t va)
255 {
256 struct iommu_state *it;
257
258 /*
259 * Since the TSB is shared for now, the TLBs of all IOMMUs
260 * need to be flushed.
261 */
262 STAILQ_FOREACH(it, &iommu_insts, is_link)
263 IOMMU_WRITE8(it, is_iommu, IMR_FLUSH, va);
264 }
265
266 /*
267 * Flush a page from the streaming buffer. No locking required, since this is
268 * atomic.
269 */
270 static __inline void
271 iommu_strbuf_flushpg(struct iommu_state *is, bus_addr_t va)
272 {
273 int i;
274
275 for (i = 0; i < 2; i++) {
276 if (is->is_sb[i] != 0)
277 IOMMU_WRITE8(is, is_sb[i], ISR_PGFLUSH, va);
278 }
279 }
280
281 /*
282 * Flush an address from the streaming buffer(s); this is an asynchronous
283 * operation. To make sure that it has completed, iommu_strbuf_sync() needs
284 * to be called. No locking required.
285 */
286 static __inline void
287 iommu_strbuf_flush(struct iommu_state *is, bus_addr_t va)
288 {
289 struct iommu_state *it;
290
291 /*
292 * Need to flush the streaming buffers of all IOMMUs, we cannot
293 * determine which one was used for the transaction.
294 */
295 STAILQ_FOREACH(it, &iommu_insts, is_link)
296 iommu_strbuf_flushpg(it, va);
297 }
298
299 /* Synchronize all outstanding flush operations. */
300 static __inline void
301 iommu_strbuf_sync(struct iommu_state *is)
302 {
303 struct iommu_state *it;
304
305 IS_LOCK_ASSERT(is);
306 /*
307 * Need to sync the streaming buffers of all IOMMUs, we cannot
308 * determine which one was used for the transaction.
309 */
310 STAILQ_FOREACH(it, &iommu_insts, is_link)
311 iommu_strbuf_flush_sync(it);
312 }
313
314 /* LRU queue handling for lazy resource allocation. */
315 static __inline void
316 iommu_map_insq(struct iommu_state *is, bus_dmamap_t map)
317 {
318
319 IS_LOCK_ASSERT(is);
320 if (!SLIST_EMPTY(&map->dm_reslist)) {
321 if (map->dm_onq)
322 TAILQ_REMOVE(&iommu_maplruq, map, dm_maplruq);
323 TAILQ_INSERT_TAIL(&iommu_maplruq, map, dm_maplruq);
324 map->dm_onq = 1;
325 }
326 }
327
328 static __inline void
329 iommu_map_remq(struct iommu_state *is, bus_dmamap_t map)
330 {
331
332 IS_LOCK_ASSERT(is);
333 if (map->dm_onq)
334 TAILQ_REMOVE(&iommu_maplruq, map, dm_maplruq);
335 map->dm_onq = 0;
336 }
337
338 /*
339 * initialise the UltraSPARC IOMMU (SBus or PCI):
340 * - allocate and setup the iotsb.
341 * - enable the IOMMU
342 * - initialise the streaming buffers (if they exist)
343 * - create a private DVMA map.
344 */
345 void
346 iommu_init(char *name, struct iommu_state *is, int tsbsize, u_int32_t iovabase,
347 int resvpg)
348 {
349 struct iommu_state *first;
350 vm_size_t size;
351 vm_offset_t offs;
352 u_int64_t end;
353 int i;
354
355 /*
356 * Setup the iommu.
357 *
358 * The sun4u iommu is part of the SBUS or PCI controller so we
359 * will deal with it here..
360 *
361 * The IOMMU address space always ends at 0xffffe000, but the starting
362 * address depends on the size of the map. The map size is 1024 * 2 ^
363 * is->is_tsbsize entries, where each entry is 8 bytes. The start of
364 * the map can be calculated by (0xffffe000 << (8 + is->is_tsbsize)).
365 */
366 is->is_cr = (tsbsize << IOMMUCR_TSBSZ_SHIFT) | IOMMUCR_EN;
367 is->is_tsbsize = tsbsize;
368 is->is_dvmabase = iovabase;
369 if (iovabase == -1)
370 is->is_dvmabase = IOTSB_VSTART(is->is_tsbsize);
371
372 size = IOTSB_BASESZ << is->is_tsbsize;
373 printf("DVMA map: %#lx to %#lx\n",
374 is->is_dvmabase, is->is_dvmabase +
375 (size << (IO_PAGE_SHIFT - IOTTE_SHIFT)) - 1);
376
377 if (STAILQ_EMPTY(&iommu_insts)) {
378 /*
379 * First IOMMU to be registered; set up resource mamangement
380 * and allocate TSB memory.
381 */
382 mtx_init(&iommu_mtx, "iommu", NULL, MTX_DEF);
383 end = is->is_dvmabase + (size << (IO_PAGE_SHIFT - IOTTE_SHIFT));
384 iommu_dvma_rman.rm_type = RMAN_ARRAY;
385 iommu_dvma_rman.rm_descr = "DVMA Memory";
386 if (rman_init(&iommu_dvma_rman) != 0 ||
387 rman_manage_region(&iommu_dvma_rman,
388 (is->is_dvmabase >> IO_PAGE_SHIFT) + resvpg,
389 (end >> IO_PAGE_SHIFT) - 1) != 0)
390 panic("iommu_init: can't initialize dvma rman");
391 /*
392 * Allocate memory for I/O page tables. They need to be
393 * physically contiguous.
394 */
395 iommu_tsb = contigmalloc(size, M_DEVBUF, M_NOWAIT, 0, ~0UL,
396 PAGE_SIZE, 0);
397 if (iommu_tsb == 0)
398 panic("iommu_init: contigmalloc failed");
399 iommu_ptsb = pmap_kextract((vm_offset_t)iommu_tsb);
400 bzero(iommu_tsb, size);
401 } else {
402 /*
403 * Not the first IOMMU; just check that the parameters match
404 * those of the first one.
405 */
406 first = STAILQ_FIRST(&iommu_insts);
407 if (is->is_tsbsize != first->is_tsbsize ||
408 is->is_dvmabase != first->is_dvmabase) {
409 panic("iommu_init: secondary IOMMU state does not "
410 "match primary");
411 }
412 }
413 STAILQ_INSERT_TAIL(&iommu_insts, is, is_link);
414
415 /*
416 * Initialize streaming buffer, if it is there.
417 */
418 if (IOMMU_HAS_SB(is)) {
419 /*
420 * Find two 64-byte blocks in is_flush that are aligned on
421 * a 64-byte boundary for flushing.
422 */
423 offs = roundup2((vm_offset_t)is->is_flush,
424 STRBUF_FLUSHSYNC_NBYTES);
425 for (i = 0; i < 2; i++, offs += STRBUF_FLUSHSYNC_NBYTES) {
426 is->is_flushva[i] = (int64_t *)offs;
427 is->is_flushpa[i] = pmap_kextract(offs);
428 }
429 }
430
431 /*
432 * Now actually start up the IOMMU.
433 */
434 iommu_reset(is);
435 }
436
437 /*
438 * Streaming buffers don't exist on the UltraSPARC IIi; we should have
439 * detected that already and disabled them. If not, we will notice that
440 * they aren't there when the STRBUF_EN bit does not remain.
441 */
442 void
443 iommu_reset(struct iommu_state *is)
444 {
445 int i;
446
447 IOMMU_WRITE8(is, is_iommu, IMR_TSB, iommu_ptsb);
448 /* Enable IOMMU in diagnostic mode */
449 IOMMU_WRITE8(is, is_iommu, IMR_CTL, is->is_cr | IOMMUCR_DE);
450
451 for (i = 0; i < 2; i++) {
452 if (is->is_sb[i] != 0) {
453 /* Enable diagnostics mode? */
454 IOMMU_WRITE8(is, is_sb[i], ISR_CTL, STRBUF_EN);
455
456 /* No streaming buffers? Disable them */
457 if (IOMMU_READ8(is, is_sb[i], ISR_CTL) == 0)
458 is->is_sb[i] = 0;
459 }
460 }
461 }
462
463 /*
464 * Enter a mapping into the TSB. No locking required, since each TSB slot is
465 * uniquely assigned to a single map.
466 */
467 static void
468 iommu_enter(struct iommu_state *is, vm_offset_t va, vm_paddr_t pa,
469 int stream, int flags)
470 {
471 int64_t tte;
472
473 KASSERT(va >= is->is_dvmabase,
474 ("iommu_enter: va %#lx not in DVMA space", va));
475 KASSERT(pa < IOMMU_MAXADDR,
476 ("iommu_enter: XXX: physical address too large (%#lx)", pa));
477
478 tte = MAKEIOTTE(pa, !(flags & BUS_DMA_NOWRITE),
479 !(flags & BUS_DMA_NOCACHE), stream);
480
481 IOMMU_SET_TTE(is, va, tte);
482 iommu_tlb_flush(is, va);
483 #ifdef IOMMU_DIAG
484 IS_LOCK(is);
485 iommu_diag(is, va);
486 IS_UNLOCK(is);
487 #endif
488 }
489
490 /*
491 * Remove mappings created by iommu_enter. Flush the streaming buffer, but do
492 * not synchronize it. Returns whether a streaming buffer flush was performed.
493 */
494 static int
495 iommu_remove(struct iommu_state *is, vm_offset_t va, vm_size_t len)
496 {
497 int streamed = 0;
498
499 #ifdef IOMMU_DIAG
500 iommu_diag(is, va);
501 #endif
502
503 KASSERT(va >= is->is_dvmabase,
504 ("iommu_remove: va 0x%lx not in DVMA space", (u_long)va));
505 KASSERT(va + len >= va,
506 ("iommu_remove: va 0x%lx + len 0x%lx wraps", (long)va, (long)len));
507
508 va = trunc_io_page(va);
509 while (len > 0) {
510 if ((IOMMU_GET_TTE(is, va) & IOTTE_STREAM) != 0) {
511 streamed = 1;
512 iommu_strbuf_flush(is, va);
513 }
514 len -= ulmin(len, IO_PAGE_SIZE);
515 IOMMU_SET_TTE(is, va, 0);
516 iommu_tlb_flush(is, va);
517 va += IO_PAGE_SIZE;
518 }
519 return (streamed);
520 }
521
522 /* Decode an IOMMU fault for host bridge error handlers. */
523 void
524 iommu_decode_fault(struct iommu_state *is, vm_offset_t phys)
525 {
526 bus_addr_t va;
527 long idx;
528
529 idx = phys - iommu_ptsb;
530 if (phys < iommu_ptsb ||
531 idx > (PAGE_SIZE << is->is_tsbsize))
532 return;
533 va = is->is_dvmabase +
534 (((bus_addr_t)idx >> IOTTE_SHIFT) << IO_PAGE_SHIFT);
535 printf("IOMMU fault virtual address %#lx\n", (u_long)va);
536 }
537
538 /*
539 * A barrier operation which makes sure that all previous streaming buffer
540 * flushes complete before it returns.
541 */
542 static int
543 iommu_strbuf_flush_sync(struct iommu_state *is)
544 {
545 struct timeval cur, end;
546 int i;
547
548 IS_LOCK_ASSERT(is);
549 if (!IOMMU_HAS_SB(is))
550 return (0);
551
552 /*
553 * Streaming buffer flushes:
554 *
555 * 1 Tell strbuf to flush by storing va to strbuf_pgflush. If
556 * we're not on a cache line boundary (64-bits):
557 * 2 Store 0 in flag
558 * 3 Store pointer to flag in flushsync
559 * 4 wait till flushsync becomes 0x1
560 *
561 * If it takes more than .5 sec, something
562 * went wrong.
563 */
564 *is->is_flushva[0] = 1;
565 *is->is_flushva[1] = 1;
566 membar(StoreStore);
567 for (i = 0; i < 2; i++) {
568 if (is->is_sb[i] != 0) {
569 *is->is_flushva[i] = 0;
570 IOMMU_WRITE8(is, is_sb[i], ISR_FLUSHSYNC,
571 is->is_flushpa[i]);
572 }
573 }
574
575 microuptime(&cur);
576 end.tv_sec = 0;
577 /*
578 * 0.5s is the recommended timeout from the U2S manual. The actual
579 * time required should be smaller by at least a factor of 1000.
580 * We have no choice but to busy-wait.
581 */
582 end.tv_usec = 500000;
583 timevaladd(&end, &cur);
584
585 while ((!*is->is_flushva[0] || !*is->is_flushva[1]) &&
586 timevalcmp(&cur, &end, <=))
587 microuptime(&cur);
588
589 if (!*is->is_flushva[0] || !*is->is_flushva[1]) {
590 panic("iommu_strbuf_flush_done: flush timeout %ld, %ld at %#lx",
591 *is->is_flushva[0], *is->is_flushva[1], is->is_flushpa[0]);
592 }
593
594 return (1);
595 }
596
597 /* Determine whether we may enable streaming on a mapping. */
598 static __inline int
599 iommu_use_streaming(struct iommu_state *is, bus_dmamap_t map, bus_size_t size)
600 {
601
602 /*
603 * This cannot be enabled yet, as many driver are still missing
604 * bus_dmamap_sync() calls. As soon as there is a BUS_DMA_STREAMING
605 * flag, this should be reenabled conditionally on it.
606 */
607 #ifdef notyet
608 return (size >= IOMMU_STREAM_THRESH && IOMMU_HAS_SB(is) &&
609 (map->dm_flags & DMF_COHERENT) == 0);
610 #else
611 return (0);
612 #endif
613 }
614
615 /*
616 * Allocate DVMA virtual memory for a map. The map may not be on a queue, so
617 * that it can be freely modified.
618 */
619 static int
620 iommu_dvma_valloc(bus_dma_tag_t t, struct iommu_state *is, bus_dmamap_t map,
621 bus_size_t size)
622 {
623 struct resource *res;
624 struct bus_dmamap_res *bdr;
625 bus_size_t align, sgsize;
626
627 KASSERT(!map->dm_onq, ("iommu_dvma_valloc: map on queue!"));
628 if ((bdr = malloc(sizeof(*bdr), M_IOMMU, M_NOWAIT)) == NULL)
629 return (EAGAIN);
630 /*
631 * If a boundary is specified, a map cannot be larger than it; however
632 * we do not clip currently, as that does not play well with the lazy
633 * allocation code.
634 * Alignment to a page boundary is always enforced.
635 */
636 align = (t->dt_alignment + IO_PAGE_MASK) >> IO_PAGE_SHIFT;
637 sgsize = round_io_page(size) >> IO_PAGE_SHIFT;
638 if (t->dt_boundary > 0 && t->dt_boundary < IO_PAGE_SIZE)
639 panic("iommu_dvmamap_load: illegal boundary specified");
640 res = rman_reserve_resource_bound(&iommu_dvma_rman, 0L,
641 t->dt_lowaddr >> IO_PAGE_SHIFT, sgsize,
642 t->dt_boundary >> IO_PAGE_SHIFT,
643 RF_ACTIVE | rman_make_alignment_flags(align), NULL);
644 if (res == NULL) {
645 free(bdr, M_IOMMU);
646 return (ENOMEM);
647 }
648
649 bdr->dr_res = res;
650 bdr->dr_used = 0;
651 SLIST_INSERT_HEAD(&map->dm_reslist, bdr, dr_link);
652 return (0);
653 }
654
655 /* Unload the map and mark all resources as unused, but do not free them. */
656 static void
657 iommu_dvmamap_vunload(struct iommu_state *is, bus_dmamap_t map)
658 {
659 struct bus_dmamap_res *r;
660 int streamed = 0;
661
662 IS_LOCK_ASSERT(is); /* for iommu_strbuf_sync() below. */
663 SLIST_FOREACH(r, &map->dm_reslist, dr_link) {
664 streamed |= iommu_remove(is, BDR_START(r), r->dr_used);
665 r->dr_used = 0;
666 }
667 if (streamed)
668 iommu_strbuf_sync(is);
669 }
670
671 /* Free a DVMA virtual memory resource. */
672 static __inline void
673 iommu_dvma_vfree_res(bus_dmamap_t map, struct bus_dmamap_res *r)
674 {
675
676 KASSERT(r->dr_used == 0, ("iommu_dvma_vfree_res: resource busy!"));
677 if (r->dr_res != NULL && rman_release_resource(r->dr_res) != 0)
678 printf("warning: DVMA space lost\n");
679 SLIST_REMOVE(&map->dm_reslist, r, bus_dmamap_res, dr_link);
680 free(r, M_IOMMU);
681 }
682
683 /* Free all DVMA virtual memory for a map. */
684 static void
685 iommu_dvma_vfree(struct iommu_state *is, bus_dmamap_t map)
686 {
687
688 IS_LOCK(is);
689 iommu_map_remq(is, map);
690 iommu_dvmamap_vunload(is, map);
691 IS_UNLOCK(is);
692 while (!SLIST_EMPTY(&map->dm_reslist))
693 iommu_dvma_vfree_res(map, SLIST_FIRST(&map->dm_reslist));
694 }
695
696 /* Prune a map, freeing all unused DVMA resources. */
697 static bus_size_t
698 iommu_dvma_vprune(struct iommu_state *is, bus_dmamap_t map)
699 {
700 struct bus_dmamap_res *r, *n;
701 bus_size_t freed = 0;
702
703 IS_LOCK_ASSERT(is);
704 for (r = SLIST_FIRST(&map->dm_reslist); r != NULL; r = n) {
705 n = SLIST_NEXT(r, dr_link);
706 if (r->dr_used == 0) {
707 freed += BDR_SIZE(r);
708 iommu_dvma_vfree_res(map, r);
709 }
710 }
711 if (SLIST_EMPTY(&map->dm_reslist))
712 iommu_map_remq(is, map);
713 return (freed);
714 }
715
716 /*
717 * Try to find a suitably-sized (and if requested, -aligned) slab of DVMA
718 * memory with IO page offset voffs.
719 */
720 static bus_addr_t
721 iommu_dvma_vfindseg(bus_dmamap_t map, vm_offset_t voffs, bus_size_t size,
722 bus_addr_t amask)
723 {
724 struct bus_dmamap_res *r;
725 bus_addr_t dvmaddr, dvmend;
726
727 KASSERT(!map->dm_onq, ("iommu_dvma_vfindseg: map on queue!"));
728 SLIST_FOREACH(r, &map->dm_reslist, dr_link) {
729 dvmaddr = round_io_page(BDR_START(r) + r->dr_used);
730 /* Alignment can only work with voffs == 0. */
731 dvmaddr = (dvmaddr + amask) & ~amask;
732 dvmaddr += voffs;
733 dvmend = dvmaddr + size;
734 if (dvmend <= BDR_END(r)) {
735 r->dr_used = dvmend - BDR_START(r);
736 return (dvmaddr);
737 }
738 }
739 return (0);
740 }
741
742 /*
743 * Try to find or allocate a slab of DVMA space; see above.
744 */
745 static int
746 iommu_dvma_vallocseg(bus_dma_tag_t dt, struct iommu_state *is, bus_dmamap_t map,
747 vm_offset_t voffs, bus_size_t size, bus_addr_t amask, bus_addr_t *addr)
748 {
749 bus_dmamap_t tm;
750 bus_addr_t dvmaddr, freed;
751 int error, complete = 0;
752
753 dvmaddr = iommu_dvma_vfindseg(map, voffs, size, amask);
754
755 /* Need to allocate. */
756 if (dvmaddr == 0) {
757 while ((error = iommu_dvma_valloc(dt, is, map,
758 voffs + size)) == ENOMEM && !complete) {
759 /*
760 * Free the allocated DVMA of a few tags until
761 * the required size is reached. This is an
762 * approximation to not have to call the allocation
763 * function too often; most likely one free run
764 * will not suffice if not one map was large enough
765 * itself due to fragmentation.
766 */
767 IS_LOCK(is);
768 freed = 0;
769 do {
770 tm = TAILQ_FIRST(&iommu_maplruq);
771 if (tm == NULL) {
772 complete = 1;
773 break;
774 }
775 freed += iommu_dvma_vprune(is, tm);
776 /* Move to the end. */
777 iommu_map_insq(is, tm);
778 } while (freed < size);
779 IS_UNLOCK(is);
780 }
781 if (error != 0)
782 return (error);
783 dvmaddr = iommu_dvma_vfindseg(map, voffs, size, amask);
784 KASSERT(dvmaddr != 0,
785 ("iommu_dvma_vallocseg: allocation failed unexpectedly!"));
786 }
787 *addr = dvmaddr;
788 return (0);
789 }
790
791 static int
792 iommu_dvmamem_alloc(bus_dma_tag_t dt, void **vaddr, int flags,
793 bus_dmamap_t *mapp)
794 {
795 struct iommu_state *is = dt->dt_cookie;
796 int error, mflags;
797
798 /*
799 * XXX: This will break for 32 bit transfers on machines with more than
800 * 16G (1 << 34 bytes) of memory.
801 */
802 if ((error = sparc64_dma_alloc_map(dt, mapp)) != 0)
803 return (error);
804
805 if ((flags & BUS_DMA_NOWAIT) != 0)
806 mflags = M_NOWAIT;
807 else
808 mflags = M_WAITOK;
809 if ((flags & BUS_DMA_ZERO) != 0)
810 mflags |= M_ZERO;
811
812 if ((*vaddr = malloc(dt->dt_maxsize, M_IOMMU, mflags)) == NULL) {
813 error = ENOMEM;
814 sparc64_dma_free_map(dt, *mapp);
815 return (error);
816 }
817 if ((flags & BUS_DMA_COHERENT) != 0)
818 (*mapp)->dm_flags |= DMF_COHERENT;
819 /*
820 * Try to preallocate DVMA space. If this fails, it is retried at load
821 * time.
822 */
823 iommu_dvma_valloc(dt, is, *mapp, IOMMU_SIZE_ROUNDUP(dt->dt_maxsize));
824 IS_LOCK(is);
825 iommu_map_insq(is, *mapp);
826 IS_UNLOCK(is);
827 return (0);
828 }
829
830 static void
831 iommu_dvmamem_free(bus_dma_tag_t dt, void *vaddr, bus_dmamap_t map)
832 {
833 struct iommu_state *is = dt->dt_cookie;
834
835 iommu_dvma_vfree(is, map);
836 sparc64_dma_free_map(dt, map);
837 free(vaddr, M_IOMMU);
838 }
839
840 static int
841 iommu_dvmamap_create(bus_dma_tag_t dt, int flags, bus_dmamap_t *mapp)
842 {
843 struct iommu_state *is = dt->dt_cookie;
844 bus_size_t totsz, presz, currsz;
845 int error, i, maxpre;
846
847 if ((error = sparc64_dma_alloc_map(dt, mapp)) != 0)
848 return (error);
849 if ((flags & BUS_DMA_COHERENT) != 0)
850 (*mapp)->dm_flags |= DMF_COHERENT;
851 /*
852 * Preallocate DVMA space; if this fails now, it is retried at load
853 * time. Through bus_dmamap_load_mbuf() and bus_dmamap_load_uio(), it
854 * is possible to have multiple discontiguous segments in a single map,
855 * which is handled by allocating additional resources, instead of
856 * increasing the size, to avoid fragmentation.
857 * Clamp preallocation to IOMMU_MAX_PRE. In some situations we can
858 * handle more; that case is handled by reallocating at map load time.
859 */
860 totsz = ulmin(IOMMU_SIZE_ROUNDUP(dt->dt_maxsize), IOMMU_MAX_PRE);
861 error = iommu_dvma_valloc(dt, is, *mapp, totsz);
862 if (error != 0)
863 return (0);
864 /*
865 * Try to be smart about preallocating some additional segments if
866 * needed.
867 */
868 maxpre = imin(dt->dt_nsegments, IOMMU_MAX_PRE_SEG);
869 presz = dt->dt_maxsize / maxpre;
870 KASSERT(presz != 0, ("iommu_dvmamap_create: bogus preallocation size "
871 ", nsegments = %d, maxpre = %d, maxsize = %lu", dt->dt_nsegments,
872 maxpre, dt->dt_maxsize));
873 for (i = 1; i < maxpre && totsz < IOMMU_MAX_PRE; i++) {
874 currsz = round_io_page(ulmin(presz, IOMMU_MAX_PRE - totsz));
875 error = iommu_dvma_valloc(dt, is, *mapp, currsz);
876 if (error != 0)
877 break;
878 totsz += currsz;
879 }
880 IS_LOCK(is);
881 iommu_map_insq(is, *mapp);
882 IS_UNLOCK(is);
883 return (0);
884 }
885
886 static int
887 iommu_dvmamap_destroy(bus_dma_tag_t dt, bus_dmamap_t map)
888 {
889 struct iommu_state *is = dt->dt_cookie;
890
891 iommu_dvma_vfree(is, map);
892 sparc64_dma_free_map(dt, map);
893 return (0);
894 }
895
896 /*
897 * IOMMU DVMA operations, common to SBUS and PCI.
898 */
899 static int
900 iommu_dvmamap_load_buffer(bus_dma_tag_t dt, struct iommu_state *is,
901 bus_dmamap_t map, bus_dma_segment_t sgs[], void *buf,
902 bus_size_t buflen, struct thread *td, int flags, int *segp, int align)
903 {
904 bus_addr_t amask, dvmaddr;
905 bus_size_t sgsize, esize;
906 vm_offset_t vaddr, voffs;
907 vm_paddr_t curaddr;
908 int error, sgcnt, firstpg, stream;
909 pmap_t pmap = NULL;
910
911 KASSERT(buflen != 0, ("iommu_dvmamap_load_buffer: buflen == 0!"));
912 if (buflen > dt->dt_maxsize)
913 return (EINVAL);
914
915 if (td != NULL)
916 pmap = vmspace_pmap(td->td_proc->p_vmspace);
917
918 vaddr = (vm_offset_t)buf;
919 voffs = vaddr & IO_PAGE_MASK;
920 amask = align ? dt->dt_alignment - 1 : 0;
921
922 /* Try to find a slab that is large enough. */
923 error = iommu_dvma_vallocseg(dt, is, map, voffs, buflen, amask,
924 &dvmaddr);
925 if (error != 0)
926 return (error);
927
928 sgcnt = *segp;
929 firstpg = 1;
930 stream = iommu_use_streaming(is, map, buflen);
931 for (; buflen > 0; ) {
932 /*
933 * Get the physical address for this page.
934 */
935 if (pmap != NULL)
936 curaddr = pmap_extract(pmap, vaddr);
937 else
938 curaddr = pmap_kextract(vaddr);
939
940 /*
941 * Compute the segment size, and adjust counts.
942 */
943 sgsize = IO_PAGE_SIZE - ((u_long)vaddr & IO_PAGE_MASK);
944 if (buflen < sgsize)
945 sgsize = buflen;
946
947 buflen -= sgsize;
948 vaddr += sgsize;
949
950 iommu_enter(is, trunc_io_page(dvmaddr), trunc_io_page(curaddr),
951 stream, flags);
952
953 /*
954 * Chop the chunk up into segments of at most maxsegsz, but try
955 * to fill each segment as well as possible.
956 */
957 if (!firstpg) {
958 esize = ulmin(sgsize,
959 dt->dt_maxsegsz - sgs[sgcnt].ds_len);
960 sgs[sgcnt].ds_len += esize;
961 sgsize -= esize;
962 dvmaddr += esize;
963 }
964 while (sgsize > 0) {
965 sgcnt++;
966 if (sgcnt >= dt->dt_nsegments ||
967 sgcnt >= BUS_DMAMAP_NSEGS)
968 return (EFBIG);
969 /*
970 * No extra alignment here - the common practice in the
971 * busdma code seems to be that only the first segment
972 * needs to satisfy the alignment constraints (and that
973 * only for bus_dmamem_alloc()ed maps). It is assumed
974 * that such tags have maxsegsize >= maxsize.
975 */
976 esize = ulmin(sgsize, dt->dt_maxsegsz);
977 sgs[sgcnt].ds_addr = dvmaddr;
978 sgs[sgcnt].ds_len = esize;
979 sgsize -= esize;
980 dvmaddr += esize;
981 }
982
983 firstpg = 0;
984 }
985 *segp = sgcnt;
986 return (0);
987 }
988
989 static int
990 iommu_dvmamap_load(bus_dma_tag_t dt, bus_dmamap_t map, void *buf,
991 bus_size_t buflen, bus_dmamap_callback_t *cb, void *cba,
992 int flags)
993 {
994 struct iommu_state *is = dt->dt_cookie;
995 #ifdef __GNUC__
996 bus_dma_segment_t sgs[dt->dt_nsegments];
997 #else
998 bus_dma_segment_t sgs[BUS_DMAMAP_NSEGS];
999 #endif
1000 int error, seg = -1;
1001
1002 if ((map->dm_flags & DMF_LOADED) != 0) {
1003 #ifdef DIAGNOSTIC
1004 printf("iommu_dvmamap_load: map still in use\n");
1005 #endif
1006 bus_dmamap_unload(dt, map);
1007 }
1008
1009 /*
1010 * Make sure that the map is not on a queue so that the resource list
1011 * may be safely accessed and modified without needing the lock to
1012 * cover the whole operation.
1013 */
1014 IS_LOCK(is);
1015 iommu_map_remq(is, map);
1016 IS_UNLOCK(is);
1017
1018 error = iommu_dvmamap_load_buffer(dt, is, map, sgs, buf, buflen, NULL,
1019 flags, &seg, 1);
1020
1021 IS_LOCK(is);
1022 iommu_map_insq(is, map);
1023 if (error != 0) {
1024 iommu_dvmamap_vunload(is, map);
1025 IS_UNLOCK(is);
1026 (*cb)(cba, sgs, 0, error);
1027 } else {
1028 IS_UNLOCK(is);
1029 map->dm_flags |= DMF_LOADED;
1030 (*cb)(cba, sgs, seg + 1, 0);
1031 }
1032
1033 return (error);
1034 }
1035
1036 static int
1037 iommu_dvmamap_load_mbuf(bus_dma_tag_t dt, bus_dmamap_t map, struct mbuf *m0,
1038 bus_dmamap_callback2_t *cb, void *cba, int flags)
1039 {
1040 struct iommu_state *is = dt->dt_cookie;
1041 #ifdef __GNUC__
1042 bus_dma_segment_t sgs[dt->dt_nsegments];
1043 #else
1044 bus_dma_segment_t sgs[BUS_DMAMAP_NSEGS];
1045 #endif
1046 struct mbuf *m;
1047 int error = 0, first = 1, nsegs = -1;
1048
1049 M_ASSERTPKTHDR(m0);
1050
1051 if ((map->dm_flags & DMF_LOADED) != 0) {
1052 #ifdef DIAGNOSTIC
1053 printf("iommu_dvmamap_load_mbuf: map still in use\n");
1054 #endif
1055 bus_dmamap_unload(dt, map);
1056 }
1057
1058 IS_LOCK(is);
1059 iommu_map_remq(is, map);
1060 IS_UNLOCK(is);
1061
1062 if (m0->m_pkthdr.len <= dt->dt_maxsize) {
1063 for (m = m0; m != NULL && error == 0; m = m->m_next) {
1064 if (m->m_len == 0)
1065 continue;
1066 error = iommu_dvmamap_load_buffer(dt, is, map, sgs,
1067 m->m_data, m->m_len, NULL, flags, &nsegs, first);
1068 first = 0;
1069 }
1070 } else
1071 error = EINVAL;
1072
1073 IS_LOCK(is);
1074 iommu_map_insq(is, map);
1075 if (error != 0) {
1076 iommu_dvmamap_vunload(is, map);
1077 IS_UNLOCK(is);
1078 /* force "no valid mappings" in callback */
1079 (*cb)(cba, sgs, 0, 0, error);
1080 } else {
1081 IS_UNLOCK(is);
1082 map->dm_flags |= DMF_LOADED;
1083 (*cb)(cba, sgs, nsegs + 1, m0->m_pkthdr.len, 0);
1084 }
1085 return (error);
1086 }
1087
1088 static int
1089 iommu_dvmamap_load_uio(bus_dma_tag_t dt, bus_dmamap_t map, struct uio *uio,
1090 bus_dmamap_callback2_t *cb, void *cba, int flags)
1091 {
1092 struct iommu_state *is = dt->dt_cookie;
1093 #ifdef __GNUC__
1094 bus_dma_segment_t sgs[dt->dt_nsegments];
1095 #else
1096 bus_dma_segment_t sgs[BUS_DMAMAP_NSEGS];
1097 #endif
1098 struct iovec *iov;
1099 struct thread *td = NULL;
1100 bus_size_t minlen, resid;
1101 int nsegs = -1, error = 0, first = 1, i;
1102
1103 if ((map->dm_flags & DMF_LOADED) != 0) {
1104 #ifdef DIAGNOSTIC
1105 printf("iommu_dvmamap_load_uio: map still in use\n");
1106 #endif
1107 bus_dmamap_unload(dt, map);
1108 }
1109
1110 IS_LOCK(is);
1111 iommu_map_remq(is, map);
1112 IS_UNLOCK(is);
1113
1114 resid = uio->uio_resid;
1115 iov = uio->uio_iov;
1116
1117 if (uio->uio_segflg == UIO_USERSPACE) {
1118 td = uio->uio_td;
1119 KASSERT(td != NULL,
1120 ("%s: USERSPACE but no proc", __func__));
1121 }
1122
1123 for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
1124 /*
1125 * Now at the first iovec to load. Load each iovec
1126 * until we have exhausted the residual count.
1127 */
1128 minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
1129 if (minlen == 0)
1130 continue;
1131
1132 error = iommu_dvmamap_load_buffer(dt, is, map, sgs,
1133 iov[i].iov_base, minlen, td, flags, &nsegs, first);
1134 first = 0;
1135
1136 resid -= minlen;
1137 }
1138
1139 IS_LOCK(is);
1140 iommu_map_insq(is, map);
1141 if (error) {
1142 iommu_dvmamap_vunload(is, map);
1143 IS_UNLOCK(is);
1144 /* force "no valid mappings" in callback */
1145 (*cb)(cba, sgs, 0, 0, error);
1146 } else {
1147 IS_UNLOCK(is);
1148 map->dm_flags |= DMF_LOADED;
1149 (*cb)(cba, sgs, nsegs + 1, uio->uio_resid, 0);
1150 }
1151 return (error);
1152 }
1153
1154 static void
1155 iommu_dvmamap_unload(bus_dma_tag_t dt, bus_dmamap_t map)
1156 {
1157 struct iommu_state *is = dt->dt_cookie;
1158
1159 if ((map->dm_flags & DMF_LOADED) == 0)
1160 return;
1161 IS_LOCK(is);
1162 iommu_dvmamap_vunload(is, map);
1163 iommu_map_insq(is, map);
1164 IS_UNLOCK(is);
1165 map->dm_flags &= ~DMF_LOADED;
1166 }
1167
1168 static void
1169 iommu_dvmamap_sync(bus_dma_tag_t dt, bus_dmamap_t map, bus_dmasync_op_t op)
1170 {
1171 struct iommu_state *is = dt->dt_cookie;
1172 struct bus_dmamap_res *r;
1173 vm_offset_t va;
1174 vm_size_t len;
1175 int streamed = 0;
1176
1177 if ((map->dm_flags & DMF_LOADED) == 0)
1178 return;
1179 /* XXX This is probably bogus. */
1180 if ((op & BUS_DMASYNC_PREREAD) != 0)
1181 membar(Sync);
1182 if (IOMMU_HAS_SB(is) &&
1183 ((op & BUS_DMASYNC_POSTREAD) != 0 ||
1184 (op & BUS_DMASYNC_PREWRITE) != 0)) {
1185 IS_LOCK(is);
1186 SLIST_FOREACH(r, &map->dm_reslist, dr_link) {
1187 va = (vm_offset_t)BDR_START(r);
1188 len = r->dr_used;
1189 /* if we have a streaming buffer, flush it here first */
1190 while (len > 0) {
1191 if ((IOMMU_GET_TTE(is, va) & IOTTE_STREAM) != 0) {
1192 streamed = 1;
1193 iommu_strbuf_flush(is, va);
1194 }
1195 len -= ulmin(len, IO_PAGE_SIZE);
1196 va += IO_PAGE_SIZE;
1197 }
1198 }
1199 if (streamed)
1200 iommu_strbuf_sync(is);
1201 IS_UNLOCK(is);
1202 }
1203 if ((op & BUS_DMASYNC_PREWRITE) != 0)
1204 membar(Sync);
1205 }
1206
1207 #ifdef IOMMU_DIAG
1208
1209 /*
1210 * Perform an IOMMU diagnostic access and print the tag belonging to va.
1211 */
1212 static void
1213 iommu_diag(struct iommu_state *is, vm_offset_t va)
1214 {
1215 int i;
1216 u_int64_t tag, data;
1217
1218 IS_LOCK_ASSERT(is);
1219 IOMMU_WRITE8(is, is_dva, 0, trunc_io_page(va));
1220 membar(StoreStore | StoreLoad);
1221 printf("iommu_diag: tte entry %#lx", IOMMU_GET_TTE(is, va));
1222 if (is->is_dtcmp != 0) {
1223 printf(", tag compare register is %#lx\n",
1224 IOMMU_READ8(is, is_dtcmp, 0));
1225 } else
1226 printf("\n");
1227 for (i = 0; i < 16; i++) {
1228 tag = IOMMU_READ8(is, is_dtag, i * 8);
1229 data = IOMMU_READ8(is, is_ddram, i * 8);
1230 printf("iommu_diag: tag %d: %#lx, vpn %#lx, err %lx; "
1231 "data %#lx, pa %#lx, v %d, c %d\n", i,
1232 tag, (tag & IOMMU_DTAG_VPNMASK) << IOMMU_DTAG_VPNSHIFT,
1233 (tag & IOMMU_DTAG_ERRMASK) >> IOMMU_DTAG_ERRSHIFT, data,
1234 (data & IOMMU_DDATA_PGMASK) << IOMMU_DDATA_PGSHIFT,
1235 (data & IOMMU_DDATA_V) != 0, (data & IOMMU_DDATA_C) != 0);
1236 }
1237 }
1238
1239 #endif /* IOMMU_DIAG */
1240
1241 struct bus_dma_methods iommu_dma_methods = {
1242 iommu_dvmamap_create,
1243 iommu_dvmamap_destroy,
1244 iommu_dvmamap_load,
1245 iommu_dvmamap_load_mbuf,
1246 iommu_dvmamap_load_uio,
1247 iommu_dvmamap_unload,
1248 iommu_dvmamap_sync,
1249 iommu_dvmamem_alloc,
1250 iommu_dvmamem_free,
1251 };
Cache object: 4b771bfb709bd5016a3ceff62e3f43e5
|