1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (C) 2010 Andreas Tobler
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
20 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
22 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
23 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
24 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
25 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 #include <sys/param.h>
32 #include <sys/kernel.h>
33 #include <sys/ktr.h>
34 #include <sys/lock.h>
35 #include <sys/rmlock.h>
36 #include <sys/mutex.h>
37 #include <sys/proc.h>
38 #include <sys/sysctl.h>
39 #include <sys/systm.h>
40 #include <sys/vmmeter.h>
41
42 #include <dev/ofw/openfirm.h>
43 #include <machine/ofw_machdep.h>
44
45 #include <vm/vm.h>
46 #include <vm/vm_param.h>
47 #include <vm/vm_kern.h>
48 #include <vm/vm_page.h>
49 #include <vm/vm_map.h>
50 #include <vm/vm_object.h>
51 #include <vm/vm_extern.h>
52 #include <vm/vm_pageout.h>
53 #include <vm/uma.h>
54
55 #include <powerpc/aim/mmu_oea64.h>
56
57 #include "phyp-hvcall.h"
58
59 #define MMU_PHYP_DEBUG 0
60 #define MMU_PHYP_ID "mmu_phyp: "
61 #if MMU_PHYP_DEBUG
62 #define dprintf(fmt, ...) printf(fmt, ## __VA_ARGS__)
63 #define dprintf0(fmt, ...) dprintf(MMU_PHYP_ID fmt, ## __VA_ARGS__)
64 #else
65 #define dprintf(fmt, args...) do { ; } while(0)
66 #define dprintf0(fmt, args...) do { ; } while(0)
67 #endif
68
69 static struct rmlock mphyp_eviction_lock;
70
71 /*
72 * Kernel MMU interface
73 */
74
75 static void mphyp_install(void);
76 static void mphyp_bootstrap(vm_offset_t kernelstart,
77 vm_offset_t kernelend);
78 static void mphyp_cpu_bootstrap(int ap);
79 static void *mphyp_dump_pmap(void *ctx, void *buf,
80 u_long *nbytes);
81 static int64_t mphyp_pte_synch(struct pvo_entry *pvo);
82 static int64_t mphyp_pte_clear(struct pvo_entry *pvo, uint64_t ptebit);
83 static int64_t mphyp_pte_unset(struct pvo_entry *pvo);
84 static int64_t mphyp_pte_insert(struct pvo_entry *pvo);
85 static int64_t mphyp_pte_unset_sp(struct pvo_entry *pvo);
86 static int64_t mphyp_pte_insert_sp(struct pvo_entry *pvo);
87 static int64_t mphyp_pte_replace_sp(struct pvo_entry *pvo);
88
89 static struct pmap_funcs mphyp_methods = {
90 .install = mphyp_install,
91 .bootstrap = mphyp_bootstrap,
92 .cpu_bootstrap = mphyp_cpu_bootstrap,
93 .dumpsys_dump_pmap = mphyp_dump_pmap,
94 };
95
96 static struct moea64_funcs mmu_phyp_funcs = {
97 .pte_synch = mphyp_pte_synch,
98 .pte_clear = mphyp_pte_clear,
99 .pte_unset = mphyp_pte_unset,
100 .pte_insert = mphyp_pte_insert,
101 .pte_unset_sp = mphyp_pte_unset_sp,
102 .pte_insert_sp = mphyp_pte_insert_sp,
103 .pte_replace_sp = mphyp_pte_replace_sp,
104 };
105
106 MMU_DEF_INHERIT(pseries_mmu, "mmu_phyp", mphyp_methods, oea64_mmu);
107
108 static int brokenkvm = 0;
109 static uint64_t final_pteg_count = 0;
110
111 static void
112 print_kvm_bug_warning(void *data)
113 {
114
115 if (brokenkvm)
116 printf("WARNING: Running on a broken hypervisor that does "
117 "not support mandatory H_CLEAR_MOD and H_CLEAR_REF "
118 "hypercalls. Performance will be suboptimal.\n");
119 }
120
121 SYSINIT(kvmbugwarn1, SI_SUB_COPYRIGHT, SI_ORDER_THIRD + 1,
122 print_kvm_bug_warning, NULL);
123 SYSINIT(kvmbugwarn2, SI_SUB_LAST, SI_ORDER_THIRD + 1, print_kvm_bug_warning,
124 NULL);
125
126 static void
127 mphyp_install()
128 {
129 char buf[8];
130 uint32_t prop[2];
131 uint32_t nptlp, shift = 0, slb_encoding = 0;
132 uint32_t lp_size, lp_encoding;
133 phandle_t dev, node, root;
134 int idx, len, res;
135 bool has_lp;
136
137 root = OF_peer(0);
138
139 dev = OF_child(root);
140 while (dev != 0) {
141 res = OF_getprop(dev, "name", buf, sizeof(buf));
142 if (res > 0 && strcmp(buf, "cpus") == 0)
143 break;
144 dev = OF_peer(dev);
145 }
146
147 node = OF_child(dev);
148
149 while (node != 0) {
150 res = OF_getprop(node, "device_type", buf, sizeof(buf));
151 if (res > 0 && strcmp(buf, "cpu") == 0)
152 break;
153 node = OF_peer(node);
154 }
155
156 res = OF_getencprop(node, "ibm,pft-size", prop, sizeof(prop));
157 if (res <= 0)
158 panic("mmu_phyp: unknown PFT size");
159 final_pteg_count = 1 << prop[1];
160 res = OF_getencprop(node, "ibm,slb-size", prop, sizeof(prop[0]));
161 if (res > 0)
162 n_slbs = prop[0];
163 dprintf0("slb-size=%i\n", n_slbs);
164
165 /*
166 * Scan the large page size property for PAPR compatible machines.
167 * See PAPR D.5 Changes to Section 5.1.4, 'CPU Node Properties'
168 * for the encoding of the property.
169 */
170
171 len = OF_getproplen(node, "ibm,segment-page-sizes");
172 if (len > 0) {
173 /*
174 * We have to use a variable length array on the stack
175 * since we have very limited stack space.
176 */
177 pcell_t arr[len/sizeof(cell_t)];
178 res = OF_getencprop(node, "ibm,segment-page-sizes", arr,
179 sizeof(arr));
180 len /= 4;
181 idx = 0;
182 has_lp = false;
183 while (len > 0) {
184 shift = arr[idx];
185 slb_encoding = arr[idx + 1];
186 nptlp = arr[idx + 2];
187
188 dprintf0("Segment Page Size: "
189 "%uKB, slb_enc=0x%X: {size, encoding}[%u] =",
190 shift > 10? 1 << (shift-10) : 0,
191 slb_encoding, nptlp);
192
193 idx += 3;
194 len -= 3;
195 while (len > 0 && nptlp) {
196 lp_size = arr[idx];
197 lp_encoding = arr[idx+1];
198
199 dprintf(" {%uKB, 0x%X}",
200 lp_size > 10? 1 << (lp_size-10) : 0,
201 lp_encoding);
202
203 if (slb_encoding == SLBV_L && lp_encoding == 0)
204 has_lp = true;
205
206 if (slb_encoding == SLB_PGSZ_4K_4K &&
207 lp_encoding == LP_4K_16M)
208 moea64_has_lp_4k_16m = true;
209
210 idx += 2;
211 len -= 2;
212 nptlp--;
213 }
214 dprintf("\n");
215 if (has_lp && moea64_has_lp_4k_16m)
216 break;
217 }
218
219 if (has_lp) {
220 moea64_large_page_shift = shift;
221 moea64_large_page_size = 1ULL << lp_size;
222 moea64_large_page_mask = moea64_large_page_size - 1;
223 hw_direct_map = 1;
224 printf(MMU_PHYP_ID
225 "Support for hugepages of %uKB detected\n",
226 moea64_large_page_shift > 10?
227 1 << (moea64_large_page_shift-10) : 0);
228 } else {
229 moea64_large_page_size = 0;
230 moea64_large_page_shift = 0;
231 moea64_large_page_mask = 0;
232 hw_direct_map = 0;
233 printf(MMU_PHYP_ID
234 "Support for hugepages not found\n");
235 }
236 }
237
238 moea64_ops = &mmu_phyp_funcs;
239
240 moea64_install();
241 }
242
243 static void
244 mphyp_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend)
245 {
246 struct lpte old;
247 uint64_t vsid;
248 int idx;
249
250 rm_init(&mphyp_eviction_lock, "pte eviction");
251
252 moea64_early_bootstrap(kernelstart, kernelend);
253
254 moea64_pteg_count = final_pteg_count / sizeof(struct lpteg);
255
256 /* Clear any old page table entries */
257 for (idx = 0; idx < moea64_pteg_count*8; idx++) {
258 phyp_pft_hcall(H_READ, 0, idx, 0, 0, &old.pte_hi,
259 &old.pte_lo, &old.pte_lo);
260 vsid = (old.pte_hi << (ADDR_API_SHFT64 - ADDR_PIDX_SHFT)) >> 28;
261 if (vsid == VSID_VRMA || vsid == 0 /* Older VRMA */)
262 continue;
263
264 if (old.pte_hi & LPTE_VALID)
265 phyp_hcall(H_REMOVE, 0, idx, 0);
266 }
267
268 moea64_mid_bootstrap(kernelstart, kernelend);
269 moea64_late_bootstrap(kernelstart, kernelend);
270
271 /* Test for broken versions of KVM that don't conform to the spec */
272 if (phyp_hcall(H_CLEAR_MOD, 0, 0) == H_FUNCTION)
273 brokenkvm = 1;
274 }
275
276 static void
277 mphyp_cpu_bootstrap(int ap)
278 {
279 struct slb *slb = PCPU_GET(aim.slb);
280 register_t seg0;
281 int i;
282
283 /*
284 * Install kernel SLB entries
285 */
286
287 __asm __volatile ("slbia");
288 __asm __volatile ("slbmfee %0,%1; slbie %0;" : "=r"(seg0) : "r"(0));
289 for (i = 0; i < 64; i++) {
290 if (!(slb[i].slbe & SLBE_VALID))
291 continue;
292
293 __asm __volatile ("slbmte %0, %1" ::
294 "r"(slb[i].slbv), "r"(slb[i].slbe));
295 }
296 }
297
298 static int64_t
299 mphyp_pte_synch(struct pvo_entry *pvo)
300 {
301 struct lpte pte;
302 uint64_t junk;
303
304 __asm __volatile("ptesync");
305 phyp_pft_hcall(H_READ, 0, pvo->pvo_pte.slot, 0, 0, &pte.pte_hi,
306 &pte.pte_lo, &junk);
307 if ((pte.pte_hi & LPTE_AVPN_MASK) !=
308 ((pvo->pvo_vpn >> (ADDR_API_SHFT64 - ADDR_PIDX_SHFT)) &
309 LPTE_AVPN_MASK))
310 return (-1);
311 if (!(pte.pte_hi & LPTE_VALID))
312 return (-1);
313
314 return (pte.pte_lo & (LPTE_CHG | LPTE_REF));
315 }
316
317 static int64_t
318 mphyp_pte_clear(struct pvo_entry *pvo, uint64_t ptebit)
319 {
320 struct rm_priotracker track;
321 int64_t refchg;
322 uint64_t ptelo, junk;
323 int err __diagused;
324
325 /*
326 * This involves two steps (synch and clear) so we need the entry
327 * not to change in the middle. We are protected against deliberate
328 * unset by virtue of holding the pmap lock. Protection against
329 * incidental unset (page table eviction) comes from holding the
330 * shared eviction lock.
331 */
332 PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED);
333 rm_rlock(&mphyp_eviction_lock, &track);
334
335 refchg = mphyp_pte_synch(pvo);
336 if (refchg < 0) {
337 rm_runlock(&mphyp_eviction_lock, &track);
338 return (refchg);
339 }
340
341 if (brokenkvm) {
342 /*
343 * No way to clear either bit, which is total madness.
344 * Pessimistically claim that, once modified, it stays so
345 * forever and that it is never referenced.
346 */
347 rm_runlock(&mphyp_eviction_lock, &track);
348 return (refchg & ~LPTE_REF);
349 }
350
351 if (ptebit & LPTE_CHG) {
352 err = phyp_pft_hcall(H_CLEAR_MOD, 0, pvo->pvo_pte.slot, 0, 0,
353 &ptelo, &junk, &junk);
354 KASSERT(err == H_SUCCESS,
355 ("Error clearing page change bit: %d", err));
356 refchg |= (ptelo & LPTE_CHG);
357 }
358 if (ptebit & LPTE_REF) {
359 err = phyp_pft_hcall(H_CLEAR_REF, 0, pvo->pvo_pte.slot, 0, 0,
360 &ptelo, &junk, &junk);
361 KASSERT(err == H_SUCCESS,
362 ("Error clearing page reference bit: %d", err));
363 refchg |= (ptelo & LPTE_REF);
364 }
365
366 rm_runlock(&mphyp_eviction_lock, &track);
367
368 return (refchg);
369 }
370
371 static int64_t
372 mphyp_pte_unset(struct pvo_entry *pvo)
373 {
374 struct lpte pte;
375 uint64_t junk;
376 int err;
377
378 PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED);
379
380 moea64_pte_from_pvo(pvo, &pte);
381
382 err = phyp_pft_hcall(H_REMOVE, H_AVPN, pvo->pvo_pte.slot,
383 pte.pte_hi & LPTE_AVPN_MASK, 0, &pte.pte_hi, &pte.pte_lo,
384 &junk);
385 KASSERT(err == H_SUCCESS || err == H_NOT_FOUND,
386 ("Error removing page: %d", err));
387
388 if (err == H_NOT_FOUND) {
389 STAT_MOEA64(moea64_pte_overflow--);
390 return (-1);
391 }
392
393 return (pte.pte_lo & (LPTE_REF | LPTE_CHG));
394 }
395
396 static uintptr_t
397 mphyp_pte_spillable_ident(uintptr_t ptegbase, struct lpte *to_evict)
398 {
399 uint64_t slot, junk, k;
400 struct lpte pt;
401 int i, j;
402
403 /* Start at a random slot */
404 i = mftb() % 8;
405 k = -1;
406 for (j = 0; j < 8; j++) {
407 slot = ptegbase + (i + j) % 8;
408 phyp_pft_hcall(H_READ, 0, slot, 0, 0, &pt.pte_hi,
409 &pt.pte_lo, &junk);
410
411 if ((pt.pte_hi & (LPTE_WIRED | LPTE_BIG)) != 0)
412 continue;
413
414 /* This is a candidate, so remember it */
415 k = slot;
416
417 /* Try to get a page that has not been used lately */
418 if (!(pt.pte_hi & LPTE_VALID) || !(pt.pte_lo & LPTE_REF)) {
419 memcpy(to_evict, &pt, sizeof(struct lpte));
420 return (k);
421 }
422 }
423
424 if (k == -1)
425 return (k);
426
427 phyp_pft_hcall(H_READ, 0, k, 0, 0, &to_evict->pte_hi,
428 &to_evict->pte_lo, &junk);
429 return (k);
430 }
431
432 static __inline int64_t
433 mphyp_pte_insert_locked(struct pvo_entry *pvo, struct lpte *pte)
434 {
435 struct lpte evicted;
436 uint64_t index, junk;
437 int64_t result;
438
439 /*
440 * First try primary hash.
441 */
442 pvo->pvo_pte.slot &= ~7UL; /* Base slot address */
443 result = phyp_pft_hcall(H_ENTER, 0, pvo->pvo_pte.slot, pte->pte_hi,
444 pte->pte_lo, &index, &evicted.pte_lo, &junk);
445 if (result == H_SUCCESS) {
446 pvo->pvo_pte.slot = index;
447 return (0);
448 }
449 KASSERT(result == H_PTEG_FULL, ("Page insertion error: %ld "
450 "(ptegidx: %#zx/%#lx, PTE %#lx/%#lx", result, pvo->pvo_pte.slot,
451 moea64_pteg_count, pte->pte_hi, pte->pte_lo));
452
453 /*
454 * Next try secondary hash.
455 */
456 pvo->pvo_vaddr ^= PVO_HID;
457 pte->pte_hi ^= LPTE_HID;
458 pvo->pvo_pte.slot ^= (moea64_pteg_mask << 3);
459
460 result = phyp_pft_hcall(H_ENTER, 0, pvo->pvo_pte.slot,
461 pte->pte_hi, pte->pte_lo, &index, &evicted.pte_lo, &junk);
462 if (result == H_SUCCESS) {
463 pvo->pvo_pte.slot = index;
464 return (0);
465 }
466 KASSERT(result == H_PTEG_FULL, ("Secondary page insertion error: %ld",
467 result));
468
469 return (-1);
470 }
471
472
473 static __inline int64_t
474 mphyp_pte_evict_and_insert_locked(struct pvo_entry *pvo, struct lpte *pte)
475 {
476 struct lpte evicted;
477 uint64_t index, junk, lastptelo;
478 int64_t result;
479
480 evicted.pte_hi = 0;
481
482 index = mphyp_pte_spillable_ident(pvo->pvo_pte.slot, &evicted);
483 if (index == -1L) {
484 /* Try other hash table? */
485 pvo->pvo_vaddr ^= PVO_HID;
486 pte->pte_hi ^= LPTE_HID;
487 pvo->pvo_pte.slot ^= (moea64_pteg_mask << 3);
488 index = mphyp_pte_spillable_ident(pvo->pvo_pte.slot, &evicted);
489 }
490
491 if (index == -1L) {
492 /* No freeable slots in either PTEG? We're hosed. */
493 rm_wunlock(&mphyp_eviction_lock);
494 panic("mphyp_pte_insert: overflow");
495 return (-1);
496 }
497
498 /* Victim acquired: update page before waving goodbye */
499 if (evicted.pte_hi & LPTE_VALID) {
500 result = phyp_pft_hcall(H_REMOVE, H_AVPN, index,
501 evicted.pte_hi & LPTE_AVPN_MASK, 0, &junk, &lastptelo,
502 &junk);
503 STAT_MOEA64(moea64_pte_overflow++);
504 KASSERT(result == H_SUCCESS || result == H_NOT_FOUND,
505 ("Error evicting page: %d", (int)result));
506 }
507
508 /*
509 * Set the new PTE.
510 */
511 result = phyp_pft_hcall(H_ENTER, H_EXACT, index, pte->pte_hi,
512 pte->pte_lo, &index, &evicted.pte_lo, &junk);
513
514 pvo->pvo_pte.slot = index;
515 if (result == H_SUCCESS)
516 return (0);
517
518 rm_wunlock(&mphyp_eviction_lock);
519 panic("Page replacement error: %ld", result);
520 return (result);
521 }
522
523 static int64_t
524 mphyp_pte_insert(struct pvo_entry *pvo)
525 {
526 struct rm_priotracker track;
527 int64_t ret;
528 struct lpte pte;
529
530 PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED);
531
532 /* Initialize PTE */
533 moea64_pte_from_pvo(pvo, &pte);
534
535 /* Make sure further insertion is locked out during evictions */
536 rm_rlock(&mphyp_eviction_lock, &track);
537
538 ret = mphyp_pte_insert_locked(pvo, &pte);
539 rm_runlock(&mphyp_eviction_lock, &track);
540
541 if (ret == -1) {
542 /*
543 * Out of luck. Find a PTE to sacrifice.
544 */
545
546 /* Lock out all insertions for a bit */
547 rm_wlock(&mphyp_eviction_lock);
548 ret = mphyp_pte_evict_and_insert_locked(pvo, &pte);
549 rm_wunlock(&mphyp_eviction_lock); /* All clear */
550 }
551
552 return (ret);
553 }
554
555 static void *
556 mphyp_dump_pmap(void *ctx, void *buf, u_long *nbytes)
557 {
558 struct dump_context *dctx;
559 struct lpte p, *pbuf;
560 int bufidx;
561 uint64_t junk;
562 u_long ptex, ptex_end;
563
564 dctx = (struct dump_context *)ctx;
565 pbuf = (struct lpte *)buf;
566 bufidx = 0;
567 ptex = dctx->ptex;
568 ptex_end = ptex + dctx->blksz / sizeof(struct lpte);
569 ptex_end = MIN(ptex_end, dctx->ptex_end);
570 *nbytes = (ptex_end - ptex) * sizeof(struct lpte);
571
572 if (*nbytes == 0)
573 return (NULL);
574
575 for (; ptex < ptex_end; ptex++) {
576 phyp_pft_hcall(H_READ, 0, ptex, 0, 0,
577 &p.pte_hi, &p.pte_lo, &junk);
578 pbuf[bufidx++] = p;
579 }
580
581 dctx->ptex = ptex;
582 return (buf);
583 }
584
585 static int64_t
586 mphyp_pte_unset_sp(struct pvo_entry *pvo)
587 {
588 struct lpte pte;
589 uint64_t junk, refchg;
590 int err;
591 vm_offset_t eva;
592 pmap_t pm __diagused;
593
594 pm = pvo->pvo_pmap;
595 PMAP_LOCK_ASSERT(pm, MA_OWNED);
596 KASSERT((PVO_VADDR(pvo) & HPT_SP_MASK) == 0,
597 ("%s: va %#jx unaligned", __func__, (uintmax_t)PVO_VADDR(pvo)));
598
599 refchg = 0;
600 eva = PVO_VADDR(pvo) + HPT_SP_SIZE;
601
602 for (; pvo != NULL && PVO_VADDR(pvo) < eva;
603 pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) {
604 moea64_pte_from_pvo(pvo, &pte);
605
606 err = phyp_pft_hcall(H_REMOVE, H_AVPN, pvo->pvo_pte.slot,
607 pte.pte_hi & LPTE_AVPN_MASK, 0, &pte.pte_hi, &pte.pte_lo,
608 &junk);
609 KASSERT(err == H_SUCCESS || err == H_NOT_FOUND,
610 ("Error removing page: %d", err));
611
612 if (err == H_NOT_FOUND)
613 STAT_MOEA64(moea64_pte_overflow--);
614 refchg |= pte.pte_lo & (LPTE_REF | LPTE_CHG);
615 }
616
617 return (refchg);
618 }
619
620 static int64_t
621 mphyp_pte_insert_sp(struct pvo_entry *pvo)
622 {
623 struct rm_priotracker track;
624 int64_t ret;
625 struct lpte pte;
626 vm_offset_t eva;
627 pmap_t pm __diagused;
628
629 pm = pvo->pvo_pmap;
630 PMAP_LOCK_ASSERT(pm, MA_OWNED);
631 KASSERT((PVO_VADDR(pvo) & HPT_SP_MASK) == 0,
632 ("%s: va %#jx unaligned", __func__, (uintmax_t)PVO_VADDR(pvo)));
633
634 eva = PVO_VADDR(pvo) + HPT_SP_SIZE;
635
636 /* Make sure further insertion is locked out during evictions */
637 rm_rlock(&mphyp_eviction_lock, &track);
638
639 for (; pvo != NULL && PVO_VADDR(pvo) < eva;
640 pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) {
641 /* Initialize PTE */
642 moea64_pte_from_pvo(pvo, &pte);
643
644 ret = mphyp_pte_insert_locked(pvo, &pte);
645 if (ret == -1) {
646 /*
647 * Out of luck. Find a PTE to sacrifice.
648 */
649
650 /* Lock out all insertions for a bit */
651 rm_runlock(&mphyp_eviction_lock, &track);
652 rm_wlock(&mphyp_eviction_lock);
653 mphyp_pte_evict_and_insert_locked(pvo, &pte);
654 rm_wunlock(&mphyp_eviction_lock); /* All clear */
655 rm_rlock(&mphyp_eviction_lock, &track);
656 }
657 }
658
659 rm_runlock(&mphyp_eviction_lock, &track);
660 return (0);
661 }
662
663 static int64_t
664 mphyp_pte_replace_sp(struct pvo_entry *pvo)
665 {
666 int64_t refchg;
667
668 refchg = mphyp_pte_unset_sp(pvo);
669 mphyp_pte_insert_sp(pvo);
670 return (refchg);
671 }
Cache object: 6f6f23cba3149f29e916b2ac88f93f2e
|