1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2020-2021 Ruslan Bukin <br@bsdpad.com>
5 * Copyright (c) 2014-2021 Andrew Turner
6 * Copyright (c) 2014-2016 The FreeBSD Foundation
7 * All rights reserved.
8 *
9 * This work was supported by Innovate UK project 105694, "Digital Security
10 * by Design (DSbD) Technology Platform Prototype".
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 /*
38 * Manages physical address maps for ARM SMMUv3 and ARM Mali GPU.
39 */
40
41 #include "opt_vm.h"
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/ktr.h>
46 #include <sys/lock.h>
47 #include <sys/mutex.h>
48 #include <sys/rwlock.h>
49
50 #include <vm/vm.h>
51 #include <vm/vm_param.h>
52 #include <vm/vm_page.h>
53 #include <vm/vm_map.h>
54 #include <vm/vm_object.h>
55 #include <vm/vm_pageout.h>
56 #include <vm/vm_radix.h>
57
58 #include <machine/machdep.h>
59
60 #include <arm64/iommu/iommu_pmap.h>
61 #include <arm64/iommu/iommu_pte.h>
62
63 #define IOMMU_PAGE_SIZE 4096
64
65 #define NL0PG (IOMMU_PAGE_SIZE/(sizeof (pd_entry_t)))
66 #define NL1PG (IOMMU_PAGE_SIZE/(sizeof (pd_entry_t)))
67 #define NL2PG (IOMMU_PAGE_SIZE/(sizeof (pd_entry_t)))
68 #define NL3PG (IOMMU_PAGE_SIZE/(sizeof (pt_entry_t)))
69
70 #define NUL0E IOMMU_L0_ENTRIES
71 #define NUL1E (NUL0E * NL1PG)
72 #define NUL2E (NUL1E * NL2PG)
73
74 #define iommu_l0_pindex(v) (NUL2E + NUL1E + ((v) >> IOMMU_L0_SHIFT))
75 #define iommu_l1_pindex(v) (NUL2E + ((v) >> IOMMU_L1_SHIFT))
76 #define iommu_l2_pindex(v) ((v) >> IOMMU_L2_SHIFT)
77
78 /* This code assumes all L1 DMAP entries will be used */
79 CTASSERT((DMAP_MIN_ADDRESS & ~IOMMU_L0_OFFSET) == DMAP_MIN_ADDRESS);
80 CTASSERT((DMAP_MAX_ADDRESS & ~IOMMU_L0_OFFSET) == DMAP_MAX_ADDRESS);
81
82 static vm_page_t _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex);
83 static void _pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m,
84 struct spglist *free);
85
86 /*
87 * These load the old table data and store the new value.
88 * They need to be atomic as the System MMU may write to the table at
89 * the same time as the CPU.
90 */
91 #define pmap_load(table) (*table)
92 #define pmap_clear(table) atomic_store_64(table, 0)
93 #define pmap_store(table, entry) atomic_store_64(table, entry)
94
95 /********************/
96 /* Inline functions */
97 /********************/
98
99 static __inline pd_entry_t *
100 pmap_l0(pmap_t pmap, vm_offset_t va)
101 {
102
103 return (&pmap->pm_l0[iommu_l0_index(va)]);
104 }
105
106 static __inline pd_entry_t *
107 pmap_l0_to_l1(pd_entry_t *l0, vm_offset_t va)
108 {
109 pd_entry_t *l1;
110
111 l1 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l0) & ~ATTR_MASK);
112 return (&l1[iommu_l1_index(va)]);
113 }
114
115 static __inline pd_entry_t *
116 pmap_l1(pmap_t pmap, vm_offset_t va)
117 {
118 pd_entry_t *l0;
119
120 l0 = pmap_l0(pmap, va);
121 if ((pmap_load(l0) & ATTR_DESCR_MASK) != IOMMU_L0_TABLE)
122 return (NULL);
123
124 return (pmap_l0_to_l1(l0, va));
125 }
126
127 static __inline pd_entry_t *
128 pmap_l1_to_l2(pd_entry_t *l1p, vm_offset_t va)
129 {
130 pd_entry_t l1, *l2p;
131
132 l1 = pmap_load(l1p);
133
134 /*
135 * The valid bit may be clear if pmap_update_entry() is concurrently
136 * modifying the entry, so for KVA only the entry type may be checked.
137 */
138 KASSERT(va >= VM_MAX_USER_ADDRESS || (l1 & ATTR_DESCR_VALID) != 0,
139 ("%s: L1 entry %#lx for %#lx is invalid", __func__, l1, va));
140 KASSERT((l1 & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_TABLE,
141 ("%s: L1 entry %#lx for %#lx is a leaf", __func__, l1, va));
142 l2p = (pd_entry_t *)PHYS_TO_DMAP(l1 & ~ATTR_MASK);
143 return (&l2p[iommu_l2_index(va)]);
144 }
145
146 static __inline pd_entry_t *
147 pmap_l2(pmap_t pmap, vm_offset_t va)
148 {
149 pd_entry_t *l1;
150
151 l1 = pmap_l1(pmap, va);
152 if ((pmap_load(l1) & ATTR_DESCR_MASK) != IOMMU_L1_TABLE)
153 return (NULL);
154
155 return (pmap_l1_to_l2(l1, va));
156 }
157
158 static __inline pt_entry_t *
159 pmap_l2_to_l3(pd_entry_t *l2p, vm_offset_t va)
160 {
161 pd_entry_t l2;
162 pt_entry_t *l3p;
163
164 l2 = pmap_load(l2p);
165
166 /*
167 * The valid bit may be clear if pmap_update_entry() is concurrently
168 * modifying the entry, so for KVA only the entry type may be checked.
169 */
170 KASSERT(va >= VM_MAX_USER_ADDRESS || (l2 & ATTR_DESCR_VALID) != 0,
171 ("%s: L2 entry %#lx for %#lx is invalid", __func__, l2, va));
172 KASSERT((l2 & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_TABLE,
173 ("%s: L2 entry %#lx for %#lx is a leaf", __func__, l2, va));
174 l3p = (pt_entry_t *)PHYS_TO_DMAP(l2 & ~ATTR_MASK);
175 return (&l3p[iommu_l3_index(va)]);
176 }
177
178 /*
179 * Returns the lowest valid pde for a given virtual address.
180 * The next level may or may not point to a valid page or block.
181 */
182 static __inline pd_entry_t *
183 pmap_pde(pmap_t pmap, vm_offset_t va, int *level)
184 {
185 pd_entry_t *l0, *l1, *l2, desc;
186
187 l0 = pmap_l0(pmap, va);
188 desc = pmap_load(l0) & ATTR_DESCR_MASK;
189 if (desc != IOMMU_L0_TABLE) {
190 *level = -1;
191 return (NULL);
192 }
193
194 l1 = pmap_l0_to_l1(l0, va);
195 desc = pmap_load(l1) & ATTR_DESCR_MASK;
196 if (desc != IOMMU_L1_TABLE) {
197 *level = 0;
198 return (l0);
199 }
200
201 l2 = pmap_l1_to_l2(l1, va);
202 desc = pmap_load(l2) & ATTR_DESCR_MASK;
203 if (desc != IOMMU_L2_TABLE) {
204 *level = 1;
205 return (l1);
206 }
207
208 *level = 2;
209 return (l2);
210 }
211
212 /*
213 * Returns the lowest valid pte block or table entry for a given virtual
214 * address. If there are no valid entries return NULL and set the level to
215 * the first invalid level.
216 */
217 static __inline pt_entry_t *
218 pmap_pte(pmap_t pmap, vm_offset_t va, int *level)
219 {
220 pd_entry_t *l1, *l2, desc;
221 pt_entry_t *l3;
222
223 l1 = pmap_l1(pmap, va);
224 if (l1 == NULL) {
225 *level = 0;
226 return (NULL);
227 }
228 desc = pmap_load(l1) & ATTR_DESCR_MASK;
229 if (desc == IOMMU_L1_BLOCK) {
230 *level = 1;
231 return (l1);
232 }
233
234 if (desc != IOMMU_L1_TABLE) {
235 *level = 1;
236 return (NULL);
237 }
238
239 l2 = pmap_l1_to_l2(l1, va);
240 desc = pmap_load(l2) & ATTR_DESCR_MASK;
241 if (desc == IOMMU_L2_BLOCK) {
242 *level = 2;
243 return (l2);
244 }
245
246 if (desc != IOMMU_L2_TABLE) {
247 *level = 2;
248 return (NULL);
249 }
250
251 *level = 3;
252 l3 = pmap_l2_to_l3(l2, va);
253 if ((pmap_load(l3) & ATTR_DESCR_MASK) != IOMMU_L3_PAGE)
254 return (NULL);
255
256 return (l3);
257 }
258
259 static __inline int
260 pmap_l3_valid(pt_entry_t l3)
261 {
262
263 return ((l3 & ATTR_DESCR_MASK) == IOMMU_L3_PAGE);
264 }
265
266 CTASSERT(IOMMU_L1_BLOCK == IOMMU_L2_BLOCK);
267
268 static __inline void
269 pmap_resident_count_inc(pmap_t pmap, int count)
270 {
271
272 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
273 pmap->pm_stats.resident_count += count;
274 }
275
276 static __inline void
277 pmap_resident_count_dec(pmap_t pmap, int count)
278 {
279
280 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
281 KASSERT(pmap->pm_stats.resident_count >= count,
282 ("pmap %p resident count underflow %ld %d", pmap,
283 pmap->pm_stats.resident_count, count));
284 pmap->pm_stats.resident_count -= count;
285 }
286
287 /***************************************************
288 * Page table page management routines.....
289 ***************************************************/
290 /*
291 * Schedule the specified unused page table page to be freed. Specifically,
292 * add the page to the specified list of pages that will be released to the
293 * physical memory manager after the TLB has been updated.
294 */
295 static __inline void
296 pmap_add_delayed_free_list(vm_page_t m, struct spglist *free,
297 boolean_t set_PG_ZERO)
298 {
299
300 if (set_PG_ZERO)
301 m->flags |= PG_ZERO;
302 else
303 m->flags &= ~PG_ZERO;
304 SLIST_INSERT_HEAD(free, m, plinks.s.ss);
305 }
306
307 /***************************************************
308 * Low level mapping routines.....
309 ***************************************************/
310
311 /*
312 * Decrements a page table page's reference count, which is used to record the
313 * number of valid page table entries within the page. If the reference count
314 * drops to zero, then the page table page is unmapped. Returns TRUE if the
315 * page table page was unmapped and FALSE otherwise.
316 */
317 static inline boolean_t
318 pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
319 {
320
321 --m->ref_count;
322 if (m->ref_count == 0) {
323 _pmap_unwire_l3(pmap, va, m, free);
324 return (TRUE);
325 } else
326 return (FALSE);
327 }
328
329 static void
330 _pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
331 {
332
333 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
334 /*
335 * unmap the page table page
336 */
337 if (m->pindex >= (NUL2E + NUL1E)) {
338 /* l1 page */
339 pd_entry_t *l0;
340
341 l0 = pmap_l0(pmap, va);
342 pmap_clear(l0);
343 } else if (m->pindex >= NUL2E) {
344 /* l2 page */
345 pd_entry_t *l1;
346
347 l1 = pmap_l1(pmap, va);
348 pmap_clear(l1);
349 } else {
350 /* l3 page */
351 pd_entry_t *l2;
352
353 l2 = pmap_l2(pmap, va);
354 pmap_clear(l2);
355 }
356 pmap_resident_count_dec(pmap, 1);
357 if (m->pindex < NUL2E) {
358 /* We just released an l3, unhold the matching l2 */
359 pd_entry_t *l1, tl1;
360 vm_page_t l2pg;
361
362 l1 = pmap_l1(pmap, va);
363 tl1 = pmap_load(l1);
364 l2pg = PHYS_TO_VM_PAGE(tl1 & ~ATTR_MASK);
365 pmap_unwire_l3(pmap, va, l2pg, free);
366 } else if (m->pindex < (NUL2E + NUL1E)) {
367 /* We just released an l2, unhold the matching l1 */
368 pd_entry_t *l0, tl0;
369 vm_page_t l1pg;
370
371 l0 = pmap_l0(pmap, va);
372 tl0 = pmap_load(l0);
373 l1pg = PHYS_TO_VM_PAGE(tl0 & ~ATTR_MASK);
374 pmap_unwire_l3(pmap, va, l1pg, free);
375 }
376
377 /*
378 * Put page on a list so that it is released after
379 * *ALL* TLB shootdown is done
380 */
381 pmap_add_delayed_free_list(m, free, TRUE);
382 }
383
384 static int
385 iommu_pmap_pinit_levels(pmap_t pmap, int levels)
386 {
387 vm_page_t m;
388
389 /*
390 * allocate the l0 page
391 */
392 m = vm_page_alloc_noobj(VM_ALLOC_WAITOK | VM_ALLOC_WIRED |
393 VM_ALLOC_ZERO);
394 pmap->pm_l0_paddr = VM_PAGE_TO_PHYS(m);
395 pmap->pm_l0 = (pd_entry_t *)PHYS_TO_DMAP(pmap->pm_l0_paddr);
396
397 vm_radix_init(&pmap->pm_root);
398 bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
399
400 MPASS(levels == 3 || levels == 4);
401 pmap->pm_levels = levels;
402
403 /*
404 * Allocate the level 1 entry to use as the root. This will increase
405 * the refcount on the level 1 page so it won't be removed until
406 * pmap_release() is called.
407 */
408 if (pmap->pm_levels == 3) {
409 PMAP_LOCK(pmap);
410 m = _pmap_alloc_l3(pmap, NUL2E + NUL1E);
411 PMAP_UNLOCK(pmap);
412 }
413 pmap->pm_ttbr = VM_PAGE_TO_PHYS(m);
414
415 return (1);
416 }
417
418 int
419 iommu_pmap_pinit(pmap_t pmap)
420 {
421
422 return (iommu_pmap_pinit_levels(pmap, 4));
423 }
424
425 /*
426 * This routine is called if the desired page table page does not exist.
427 *
428 * If page table page allocation fails, this routine may sleep before
429 * returning NULL. It sleeps only if a lock pointer was given.
430 *
431 * Note: If a page allocation fails at page table level two or three,
432 * one or two pages may be held during the wait, only to be released
433 * afterwards. This conservative approach is easily argued to avoid
434 * race conditions.
435 */
436 static vm_page_t
437 _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex)
438 {
439 vm_page_t m, l1pg, l2pg;
440
441 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
442
443 /*
444 * Allocate a page table page.
445 */
446 if ((m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
447 /*
448 * Indicate the need to retry. While waiting, the page table
449 * page may have been allocated.
450 */
451 return (NULL);
452 }
453 m->pindex = ptepindex;
454
455 /*
456 * Because of AArch64's weak memory consistency model, we must have a
457 * barrier here to ensure that the stores for zeroing "m", whether by
458 * pmap_zero_page() or an earlier function, are visible before adding
459 * "m" to the page table. Otherwise, a page table walk by another
460 * processor's MMU could see the mapping to "m" and a stale, non-zero
461 * PTE within "m".
462 */
463 dmb(ishst);
464
465 /*
466 * Map the pagetable page into the process address space, if
467 * it isn't already there.
468 */
469
470 if (ptepindex >= (NUL2E + NUL1E)) {
471 pd_entry_t *l0;
472 vm_pindex_t l0index;
473
474 l0index = ptepindex - (NUL2E + NUL1E);
475 l0 = &pmap->pm_l0[l0index];
476 pmap_store(l0, VM_PAGE_TO_PHYS(m) | IOMMU_L0_TABLE);
477 } else if (ptepindex >= NUL2E) {
478 vm_pindex_t l0index, l1index;
479 pd_entry_t *l0, *l1;
480 pd_entry_t tl0;
481
482 l1index = ptepindex - NUL2E;
483 l0index = l1index >> IOMMU_L0_ENTRIES_SHIFT;
484
485 l0 = &pmap->pm_l0[l0index];
486 tl0 = pmap_load(l0);
487 if (tl0 == 0) {
488 /* recurse for allocating page dir */
489 if (_pmap_alloc_l3(pmap, NUL2E + NUL1E + l0index)
490 == NULL) {
491 vm_page_unwire_noq(m);
492 vm_page_free_zero(m);
493 return (NULL);
494 }
495 } else {
496 l1pg = PHYS_TO_VM_PAGE(tl0 & ~ATTR_MASK);
497 l1pg->ref_count++;
498 }
499
500 l1 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l0) & ~ATTR_MASK);
501 l1 = &l1[ptepindex & Ln_ADDR_MASK];
502 pmap_store(l1, VM_PAGE_TO_PHYS(m) | IOMMU_L1_TABLE);
503 } else {
504 vm_pindex_t l0index, l1index;
505 pd_entry_t *l0, *l1, *l2;
506 pd_entry_t tl0, tl1;
507
508 l1index = ptepindex >> Ln_ENTRIES_SHIFT;
509 l0index = l1index >> IOMMU_L0_ENTRIES_SHIFT;
510
511 l0 = &pmap->pm_l0[l0index];
512 tl0 = pmap_load(l0);
513 if (tl0 == 0) {
514 /* recurse for allocating page dir */
515 if (_pmap_alloc_l3(pmap, NUL2E + l1index) == NULL) {
516 vm_page_unwire_noq(m);
517 vm_page_free_zero(m);
518 return (NULL);
519 }
520 tl0 = pmap_load(l0);
521 l1 = (pd_entry_t *)PHYS_TO_DMAP(tl0 & ~ATTR_MASK);
522 l1 = &l1[l1index & Ln_ADDR_MASK];
523 } else {
524 l1 = (pd_entry_t *)PHYS_TO_DMAP(tl0 & ~ATTR_MASK);
525 l1 = &l1[l1index & Ln_ADDR_MASK];
526 tl1 = pmap_load(l1);
527 if (tl1 == 0) {
528 /* recurse for allocating page dir */
529 if (_pmap_alloc_l3(pmap, NUL2E + l1index)
530 == NULL) {
531 vm_page_unwire_noq(m);
532 vm_page_free_zero(m);
533 return (NULL);
534 }
535 } else {
536 l2pg = PHYS_TO_VM_PAGE(tl1 & ~ATTR_MASK);
537 l2pg->ref_count++;
538 }
539 }
540
541 l2 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l1) & ~ATTR_MASK);
542 l2 = &l2[ptepindex & Ln_ADDR_MASK];
543 pmap_store(l2, VM_PAGE_TO_PHYS(m) | IOMMU_L2_TABLE);
544 }
545
546 pmap_resident_count_inc(pmap, 1);
547
548 return (m);
549 }
550
551 /***************************************************
552 * Pmap allocation/deallocation routines.
553 ***************************************************/
554
555 /*
556 * Release any resources held by the given physical map.
557 * Called when a pmap initialized by pmap_pinit is being released.
558 * Should only be called if the map contains no valid mappings.
559 */
560 void
561 iommu_pmap_release(pmap_t pmap)
562 {
563 boolean_t rv __diagused;
564 struct spglist free;
565 vm_page_t m;
566
567 if (pmap->pm_levels != 4) {
568 KASSERT(pmap->pm_stats.resident_count == 1,
569 ("pmap_release: pmap resident count %ld != 0",
570 pmap->pm_stats.resident_count));
571 KASSERT((pmap->pm_l0[0] & ATTR_DESCR_VALID) == ATTR_DESCR_VALID,
572 ("pmap_release: Invalid l0 entry: %lx", pmap->pm_l0[0]));
573
574 SLIST_INIT(&free);
575 m = PHYS_TO_VM_PAGE(pmap->pm_ttbr);
576 PMAP_LOCK(pmap);
577 rv = pmap_unwire_l3(pmap, 0, m, &free);
578 PMAP_UNLOCK(pmap);
579 MPASS(rv == TRUE);
580 vm_page_free_pages_toq(&free, true);
581 }
582
583 KASSERT(pmap->pm_stats.resident_count == 0,
584 ("pmap_release: pmap resident count %ld != 0",
585 pmap->pm_stats.resident_count));
586 KASSERT(vm_radix_is_empty(&pmap->pm_root),
587 ("pmap_release: pmap has reserved page table page(s)"));
588
589 m = PHYS_TO_VM_PAGE(pmap->pm_l0_paddr);
590 vm_page_unwire_noq(m);
591 vm_page_free_zero(m);
592 }
593
594 /***************************************************
595 * page management routines.
596 ***************************************************/
597
598 /*
599 * Add a single Mali GPU entry. This function does not sleep.
600 */
601 int
602 pmap_gpu_enter(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
603 vm_prot_t prot, u_int flags)
604 {
605 pd_entry_t *pde;
606 pt_entry_t new_l3;
607 pt_entry_t orig_l3 __diagused;
608 pt_entry_t *l3;
609 vm_page_t mpte;
610 pd_entry_t *l1p;
611 pd_entry_t *l2p;
612 int lvl;
613 int rv;
614
615 KASSERT(pmap != kernel_pmap, ("kernel pmap used for GPU"));
616 KASSERT(va < VM_MAXUSER_ADDRESS, ("wrong address space"));
617 KASSERT((va & PAGE_MASK) == 0, ("va is misaligned"));
618 KASSERT((pa & PAGE_MASK) == 0, ("pa is misaligned"));
619
620 new_l3 = (pt_entry_t)(pa | ATTR_SH(ATTR_SH_IS) | IOMMU_L3_BLOCK);
621
622 if ((prot & VM_PROT_WRITE) != 0)
623 new_l3 |= ATTR_S2_S2AP(ATTR_S2_S2AP_WRITE);
624 if ((prot & VM_PROT_READ) != 0)
625 new_l3 |= ATTR_S2_S2AP(ATTR_S2_S2AP_READ);
626 if ((prot & VM_PROT_EXECUTE) == 0)
627 new_l3 |= ATTR_S2_XN(ATTR_S2_XN_ALL);
628
629 CTR2(KTR_PMAP, "pmap_gpu_enter: %.16lx -> %.16lx", va, pa);
630
631 PMAP_LOCK(pmap);
632
633 /*
634 * In the case that a page table page is not
635 * resident, we are creating it here.
636 */
637 retry:
638 pde = pmap_pde(pmap, va, &lvl);
639 if (pde != NULL && lvl == 2) {
640 l3 = pmap_l2_to_l3(pde, va);
641 } else {
642 mpte = _pmap_alloc_l3(pmap, iommu_l2_pindex(va));
643 if (mpte == NULL) {
644 CTR0(KTR_PMAP, "pmap_enter: mpte == NULL");
645 rv = KERN_RESOURCE_SHORTAGE;
646 goto out;
647 }
648
649 /*
650 * Ensure newly created l1, l2 are visible to GPU.
651 * l0 is already visible by similar call in panfrost driver.
652 * The cache entry for l3 handled below.
653 */
654
655 l1p = pmap_l1(pmap, va);
656 l2p = pmap_l2(pmap, va);
657 cpu_dcache_wb_range((vm_offset_t)l1p, sizeof(pd_entry_t));
658 cpu_dcache_wb_range((vm_offset_t)l2p, sizeof(pd_entry_t));
659
660 goto retry;
661 }
662
663 orig_l3 = pmap_load(l3);
664 KASSERT(!pmap_l3_valid(orig_l3), ("l3 is valid"));
665
666 /* New mapping */
667 pmap_store(l3, new_l3);
668
669 cpu_dcache_wb_range((vm_offset_t)l3, sizeof(pt_entry_t));
670
671 pmap_resident_count_inc(pmap, 1);
672 dsb(ishst);
673
674 rv = KERN_SUCCESS;
675 out:
676 PMAP_UNLOCK(pmap);
677
678 return (rv);
679 }
680
681 /*
682 * Remove a single Mali GPU entry.
683 */
684 int
685 pmap_gpu_remove(pmap_t pmap, vm_offset_t va)
686 {
687 pd_entry_t *pde;
688 pt_entry_t *pte;
689 int lvl;
690 int rc;
691
692 KASSERT((va & PAGE_MASK) == 0, ("va is misaligned"));
693 KASSERT(pmap != kernel_pmap, ("kernel pmap used for GPU"));
694
695 PMAP_LOCK(pmap);
696
697 pde = pmap_pde(pmap, va, &lvl);
698 if (pde == NULL || lvl != 2) {
699 rc = KERN_FAILURE;
700 goto out;
701 }
702
703 pte = pmap_l2_to_l3(pde, va);
704
705 pmap_resident_count_dec(pmap, 1);
706 pmap_clear(pte);
707 cpu_dcache_wb_range((vm_offset_t)pte, sizeof(pt_entry_t));
708 rc = KERN_SUCCESS;
709
710 out:
711 PMAP_UNLOCK(pmap);
712
713 return (rc);
714 }
715
716 /*
717 * Add a single SMMU entry. This function does not sleep.
718 */
719 int
720 pmap_smmu_enter(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
721 vm_prot_t prot, u_int flags)
722 {
723 pd_entry_t *pde;
724 pt_entry_t new_l3;
725 pt_entry_t orig_l3 __diagused;
726 pt_entry_t *l3;
727 vm_page_t mpte;
728 int lvl;
729 int rv;
730
731 KASSERT(va < VM_MAXUSER_ADDRESS, ("wrong address space"));
732
733 va = trunc_page(va);
734 new_l3 = (pt_entry_t)(pa | ATTR_DEFAULT |
735 ATTR_S1_IDX(VM_MEMATTR_DEVICE) | IOMMU_L3_PAGE);
736 if ((prot & VM_PROT_WRITE) == 0)
737 new_l3 |= ATTR_S1_AP(ATTR_S1_AP_RO);
738 new_l3 |= ATTR_S1_XN; /* Execute never. */
739 new_l3 |= ATTR_S1_AP(ATTR_S1_AP_USER);
740 new_l3 |= ATTR_S1_nG; /* Non global. */
741
742 CTR2(KTR_PMAP, "pmap_senter: %.16lx -> %.16lx", va, pa);
743
744 PMAP_LOCK(pmap);
745
746 /*
747 * In the case that a page table page is not
748 * resident, we are creating it here.
749 */
750 retry:
751 pde = pmap_pde(pmap, va, &lvl);
752 if (pde != NULL && lvl == 2) {
753 l3 = pmap_l2_to_l3(pde, va);
754 } else {
755 mpte = _pmap_alloc_l3(pmap, iommu_l2_pindex(va));
756 if (mpte == NULL) {
757 CTR0(KTR_PMAP, "pmap_enter: mpte == NULL");
758 rv = KERN_RESOURCE_SHORTAGE;
759 goto out;
760 }
761 goto retry;
762 }
763
764 orig_l3 = pmap_load(l3);
765 KASSERT(!pmap_l3_valid(orig_l3), ("l3 is valid"));
766
767 /* New mapping */
768 pmap_store(l3, new_l3);
769 pmap_resident_count_inc(pmap, 1);
770 dsb(ishst);
771
772 rv = KERN_SUCCESS;
773 out:
774 PMAP_UNLOCK(pmap);
775
776 return (rv);
777 }
778
779 /*
780 * Remove a single SMMU entry.
781 */
782 int
783 pmap_smmu_remove(pmap_t pmap, vm_offset_t va)
784 {
785 pt_entry_t *pte;
786 int lvl;
787 int rc;
788
789 PMAP_LOCK(pmap);
790
791 pte = pmap_pte(pmap, va, &lvl);
792 KASSERT(lvl == 3,
793 ("Invalid SMMU pagetable level: %d != 3", lvl));
794
795 if (pte != NULL) {
796 pmap_resident_count_dec(pmap, 1);
797 pmap_clear(pte);
798 rc = KERN_SUCCESS;
799 } else
800 rc = KERN_FAILURE;
801
802 PMAP_UNLOCK(pmap);
803
804 return (rc);
805 }
806
807 /*
808 * Remove all the allocated L1, L2 pages from SMMU pmap.
809 * All the L3 entires must be cleared in advance, otherwise
810 * this function panics.
811 */
812 void
813 iommu_pmap_remove_pages(pmap_t pmap)
814 {
815 pd_entry_t l0e, *l1, l1e, *l2, l2e;
816 pt_entry_t *l3, l3e;
817 vm_page_t m, m0, m1;
818 vm_offset_t sva;
819 vm_paddr_t pa;
820 vm_paddr_t pa0;
821 vm_paddr_t pa1;
822 int i, j, k, l;
823
824 PMAP_LOCK(pmap);
825
826 for (sva = VM_MINUSER_ADDRESS, i = iommu_l0_index(sva);
827 (i < Ln_ENTRIES && sva < VM_MAXUSER_ADDRESS); i++) {
828 l0e = pmap->pm_l0[i];
829 if ((l0e & ATTR_DESCR_VALID) == 0) {
830 sva += IOMMU_L0_SIZE;
831 continue;
832 }
833 pa0 = l0e & ~ATTR_MASK;
834 m0 = PHYS_TO_VM_PAGE(pa0);
835 l1 = (pd_entry_t *)PHYS_TO_DMAP(pa0);
836
837 for (j = iommu_l1_index(sva); j < Ln_ENTRIES; j++) {
838 l1e = l1[j];
839 if ((l1e & ATTR_DESCR_VALID) == 0) {
840 sva += IOMMU_L1_SIZE;
841 continue;
842 }
843 if ((l1e & ATTR_DESCR_MASK) == IOMMU_L1_BLOCK) {
844 sva += IOMMU_L1_SIZE;
845 continue;
846 }
847 pa1 = l1e & ~ATTR_MASK;
848 m1 = PHYS_TO_VM_PAGE(pa1);
849 l2 = (pd_entry_t *)PHYS_TO_DMAP(pa1);
850
851 for (k = iommu_l2_index(sva); k < Ln_ENTRIES; k++) {
852 l2e = l2[k];
853 if ((l2e & ATTR_DESCR_VALID) == 0) {
854 sva += IOMMU_L2_SIZE;
855 continue;
856 }
857 pa = l2e & ~ATTR_MASK;
858 m = PHYS_TO_VM_PAGE(pa);
859 l3 = (pt_entry_t *)PHYS_TO_DMAP(pa);
860
861 for (l = iommu_l3_index(sva); l < Ln_ENTRIES;
862 l++, sva += IOMMU_L3_SIZE) {
863 l3e = l3[l];
864 if ((l3e & ATTR_DESCR_VALID) == 0)
865 continue;
866 panic("%s: l3e found for va %jx\n",
867 __func__, sva);
868 }
869
870 vm_page_unwire_noq(m1);
871 vm_page_unwire_noq(m);
872 pmap_resident_count_dec(pmap, 1);
873 vm_page_free(m);
874 pmap_clear(&l2[k]);
875 }
876
877 vm_page_unwire_noq(m0);
878 pmap_resident_count_dec(pmap, 1);
879 vm_page_free(m1);
880 pmap_clear(&l1[j]);
881 }
882
883 pmap_resident_count_dec(pmap, 1);
884 vm_page_free(m0);
885 pmap_clear(&pmap->pm_l0[i]);
886 }
887
888 KASSERT(pmap->pm_stats.resident_count == 0,
889 ("Invalid resident count %jd", pmap->pm_stats.resident_count));
890
891 PMAP_UNLOCK(pmap);
892 }
Cache object: 337d70aabc258b5ad819b4acb59a2e45
|