1 /*-
2 * Copyright (c) 2001 The NetBSD Foundation, Inc.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to The NetBSD Foundation
6 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
28 */
29 /*-
30 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
31 * Copyright (C) 1995, 1996 TooLs GmbH.
32 * All rights reserved.
33 *
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
36 * are met:
37 * 1. Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 * notice, this list of conditions and the following disclaimer in the
41 * documentation and/or other materials provided with the distribution.
42 * 3. All advertising materials mentioning features or use of this software
43 * must display the following acknowledgement:
44 * This product includes software developed by TooLs GmbH.
45 * 4. The name of TooLs GmbH may not be used to endorse or promote products
46 * derived from this software without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
49 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
50 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
51 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
52 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
53 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
54 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
55 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
56 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
57 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
58 *
59 * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $
60 */
61 /*-
62 * Copyright (C) 2001 Benno Rice.
63 * All rights reserved.
64 *
65 * Redistribution and use in source and binary forms, with or without
66 * modification, are permitted provided that the following conditions
67 * are met:
68 * 1. Redistributions of source code must retain the above copyright
69 * notice, this list of conditions and the following disclaimer.
70 * 2. Redistributions in binary form must reproduce the above copyright
71 * notice, this list of conditions and the following disclaimer in the
72 * documentation and/or other materials provided with the distribution.
73 *
74 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
75 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
76 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
77 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
78 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
79 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
80 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
81 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
82 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
83 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
84 */
85
86 #include <sys/cdefs.h>
87 __FBSDID("$FreeBSD$");
88
89 /*
90 * Native 64-bit page table operations for running without a hypervisor.
91 */
92
93 #include <sys/param.h>
94 #include <sys/kernel.h>
95 #include <sys/ktr.h>
96 #include <sys/lock.h>
97 #include <sys/mutex.h>
98 #include <sys/proc.h>
99 #include <sys/sched.h>
100 #include <sys/sysctl.h>
101 #include <sys/systm.h>
102
103 #include <sys/kdb.h>
104
105 #include <vm/vm.h>
106 #include <vm/vm_param.h>
107 #include <vm/vm_kern.h>
108 #include <vm/vm_page.h>
109 #include <vm/vm_map.h>
110 #include <vm/vm_object.h>
111 #include <vm/vm_extern.h>
112 #include <vm/vm_pageout.h>
113 #include <vm/vm_pager.h>
114
115 #include <machine/md_var.h>
116 #include <machine/mmuvar.h>
117
118 #include "mmu_oea64.h"
119 #include "mmu_if.h"
120 #include "moea64_if.h"
121
122 #define PTESYNC() __asm __volatile("ptesync");
123 #define TLBSYNC() __asm __volatile("tlbsync; ptesync");
124 #define SYNC() __asm __volatile("sync");
125 #define EIEIO() __asm __volatile("eieio");
126
127 #define VSID_HASH_MASK 0x0000007fffffffffULL
128
129 /*
130 * The tlbie instruction must be executed in 64-bit mode
131 * so we have to twiddle MSR[SF] around every invocation.
132 * Just to add to the fun, exceptions must be off as well
133 * so that we can't trap in 64-bit mode. What a pain.
134 */
135 static struct mtx tlbie_mutex;
136
137 static __inline void
138 TLBIE(uint64_t vpn) {
139 #ifndef __powerpc64__
140 register_t vpn_hi, vpn_lo;
141 register_t msr;
142 register_t scratch;
143 #endif
144
145 vpn <<= ADDR_PIDX_SHFT;
146 vpn &= ~(0xffffULL << 48);
147
148 #ifdef __powerpc64__
149 mtx_lock(&tlbie_mutex);
150 __asm __volatile("tlbie %0" :: "r"(vpn) : "memory");
151 mtx_unlock(&tlbie_mutex);
152 __asm __volatile("eieio; tlbsync; ptesync");
153 #else
154 vpn_hi = (uint32_t)(vpn >> 32);
155 vpn_lo = (uint32_t)vpn;
156
157 /* Note: spin mutex is to disable exceptions while fiddling MSR */
158 mtx_lock_spin(&tlbie_mutex);
159 __asm __volatile("\
160 mfmsr %0; \
161 mr %1, %0; \
162 insrdi %1,%5,1,0; \
163 mtmsrd %1; isync; \
164 \
165 sld %1,%2,%4; \
166 or %1,%1,%3; \
167 tlbie %1; \
168 \
169 mtmsrd %0; isync; \
170 eieio; \
171 tlbsync; \
172 ptesync;"
173 : "=r"(msr), "=r"(scratch) : "r"(vpn_hi), "r"(vpn_lo), "r"(32), "r"(1)
174 : "memory");
175 mtx_unlock_spin(&tlbie_mutex);
176 #endif
177 }
178
179 #define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR)
180 #define ENABLE_TRANS(msr) mtmsr(msr)
181
182 /*
183 * PTEG data.
184 */
185 static struct lpteg *moea64_pteg_table;
186
187 /*
188 * PTE calls.
189 */
190 static int moea64_pte_insert_native(mmu_t, u_int, struct lpte *);
191 static uintptr_t moea64_pvo_to_pte_native(mmu_t, const struct pvo_entry *);
192 static void moea64_pte_synch_native(mmu_t, uintptr_t pt,
193 struct lpte *pvo_pt);
194 static void moea64_pte_clear_native(mmu_t, uintptr_t pt,
195 struct lpte *pvo_pt, uint64_t vpn, uint64_t ptebit);
196 static void moea64_pte_change_native(mmu_t, uintptr_t pt,
197 struct lpte *pvo_pt, uint64_t vpn);
198 static void moea64_pte_unset_native(mmu_t mmu, uintptr_t pt,
199 struct lpte *pvo_pt, uint64_t vpn);
200
201 /*
202 * Utility routines.
203 */
204 static void moea64_bootstrap_native(mmu_t mmup,
205 vm_offset_t kernelstart, vm_offset_t kernelend);
206 static void moea64_cpu_bootstrap_native(mmu_t, int ap);
207 static void tlbia(void);
208
209 static mmu_method_t moea64_native_methods[] = {
210 /* Internal interfaces */
211 MMUMETHOD(mmu_bootstrap, moea64_bootstrap_native),
212 MMUMETHOD(mmu_cpu_bootstrap, moea64_cpu_bootstrap_native),
213
214 MMUMETHOD(moea64_pte_synch, moea64_pte_synch_native),
215 MMUMETHOD(moea64_pte_clear, moea64_pte_clear_native),
216 MMUMETHOD(moea64_pte_unset, moea64_pte_unset_native),
217 MMUMETHOD(moea64_pte_change, moea64_pte_change_native),
218 MMUMETHOD(moea64_pte_insert, moea64_pte_insert_native),
219 MMUMETHOD(moea64_pvo_to_pte, moea64_pvo_to_pte_native),
220
221 { 0, 0 }
222 };
223
224 MMU_DEF_INHERIT(oea64_mmu_native, MMU_TYPE_G5, moea64_native_methods,
225 0, oea64_mmu);
226
227 static __inline u_int
228 va_to_pteg(uint64_t vsid, vm_offset_t addr, int large)
229 {
230 uint64_t hash;
231 int shift;
232
233 shift = large ? moea64_large_page_shift : ADDR_PIDX_SHFT;
234 hash = (vsid & VSID_HASH_MASK) ^ (((uint64_t)addr & ADDR_PIDX) >>
235 shift);
236 return (hash & moea64_pteg_mask);
237 }
238
239 static void
240 moea64_pte_synch_native(mmu_t mmu, uintptr_t pt_cookie, struct lpte *pvo_pt)
241 {
242 struct lpte *pt = (struct lpte *)pt_cookie;
243
244 pvo_pt->pte_lo |= pt->pte_lo & (LPTE_REF | LPTE_CHG);
245 }
246
247 static void
248 moea64_pte_clear_native(mmu_t mmu, uintptr_t pt_cookie, struct lpte *pvo_pt,
249 uint64_t vpn, uint64_t ptebit)
250 {
251 struct lpte *pt = (struct lpte *)pt_cookie;
252
253 /*
254 * As shown in Section 7.6.3.2.3
255 */
256 pt->pte_lo &= ~ptebit;
257 sched_pin();
258 TLBIE(vpn);
259 sched_unpin();
260 }
261
262 static void
263 moea64_pte_set_native(struct lpte *pt, struct lpte *pvo_pt)
264 {
265
266 pvo_pt->pte_hi |= LPTE_VALID;
267
268 /*
269 * Update the PTE as defined in section 7.6.3.1.
270 * Note that the REF/CHG bits are from pvo_pt and thus should have
271 * been saved so this routine can restore them (if desired).
272 */
273 pt->pte_lo = pvo_pt->pte_lo;
274 EIEIO();
275 pt->pte_hi = pvo_pt->pte_hi;
276 PTESYNC();
277
278 /* Keep statistics for unlocked pages */
279 if (!(pvo_pt->pte_hi & LPTE_LOCKED))
280 moea64_pte_valid++;
281 }
282
283 static void
284 moea64_pte_unset_native(mmu_t mmu, uintptr_t pt_cookie, struct lpte *pvo_pt,
285 uint64_t vpn)
286 {
287 struct lpte *pt = (struct lpte *)pt_cookie;
288
289 /*
290 * Invalidate the pte.
291 */
292 isync();
293 sched_pin();
294 pvo_pt->pte_hi &= ~LPTE_VALID;
295 pt->pte_hi &= ~LPTE_VALID;
296 PTESYNC();
297 TLBIE(vpn);
298 sched_unpin();
299
300 /*
301 * Save the reg & chg bits.
302 */
303 moea64_pte_synch_native(mmu, pt_cookie, pvo_pt);
304
305 /* Keep statistics for unlocked pages */
306 if (!(pvo_pt->pte_hi & LPTE_LOCKED))
307 moea64_pte_valid--;
308 }
309
310 static void
311 moea64_pte_change_native(mmu_t mmu, uintptr_t pt, struct lpte *pvo_pt,
312 uint64_t vpn)
313 {
314
315 /*
316 * Invalidate the PTE
317 */
318 moea64_pte_unset_native(mmu, pt, pvo_pt, vpn);
319 moea64_pte_set_native((struct lpte *)pt, pvo_pt);
320 }
321
322 static void
323 moea64_cpu_bootstrap_native(mmu_t mmup, int ap)
324 {
325 int i = 0;
326 #ifdef __powerpc64__
327 struct slb *slb = PCPU_GET(slb);
328 register_t seg0;
329 #endif
330
331 /*
332 * Initialize segment registers and MMU
333 */
334
335 mtmsr(mfmsr() & ~PSL_DR & ~PSL_IR);
336
337 /*
338 * Install kernel SLB entries
339 */
340
341 #ifdef __powerpc64__
342 __asm __volatile ("slbia");
343 __asm __volatile ("slbmfee %0,%1; slbie %0;" : "=r"(seg0) :
344 "r"(0));
345
346 for (i = 0; i < 64; i++) {
347 if (!(slb[i].slbe & SLBE_VALID))
348 continue;
349
350 __asm __volatile ("slbmte %0, %1" ::
351 "r"(slb[i].slbv), "r"(slb[i].slbe));
352 }
353 #else
354 for (i = 0; i < 16; i++)
355 mtsrin(i << ADDR_SR_SHFT, kernel_pmap->pm_sr[i]);
356 #endif
357
358 /*
359 * Install page table
360 */
361
362 __asm __volatile ("ptesync; mtsdr1 %0; isync"
363 :: "r"((uintptr_t)moea64_pteg_table
364 | (uintptr_t)(flsl(moea64_pteg_mask >> 11))));
365 tlbia();
366 }
367
368 static void
369 moea64_bootstrap_native(mmu_t mmup, vm_offset_t kernelstart,
370 vm_offset_t kernelend)
371 {
372 vm_size_t size;
373 vm_offset_t off;
374 vm_paddr_t pa;
375 register_t msr;
376
377 moea64_early_bootstrap(mmup, kernelstart, kernelend);
378
379 /*
380 * Allocate PTEG table.
381 */
382
383 size = moea64_pteg_count * sizeof(struct lpteg);
384 CTR2(KTR_PMAP, "moea64_bootstrap: %d PTEGs, %d bytes",
385 moea64_pteg_count, size);
386
387 /*
388 * We now need to allocate memory. This memory, to be allocated,
389 * has to reside in a page table. The page table we are about to
390 * allocate. We don't have BAT. So drop to data real mode for a minute
391 * as a measure of last resort. We do this a couple times.
392 */
393
394 moea64_pteg_table = (struct lpteg *)moea64_bootstrap_alloc(size, size);
395 DISABLE_TRANS(msr);
396 bzero((void *)moea64_pteg_table, moea64_pteg_count * sizeof(struct lpteg));
397 ENABLE_TRANS(msr);
398
399 CTR1(KTR_PMAP, "moea64_bootstrap: PTEG table at %p", moea64_pteg_table);
400
401 /*
402 * Initialize the TLBIE lock. TLBIE can only be executed by one CPU.
403 */
404 #ifdef __powerpc64__
405 mtx_init(&tlbie_mutex, "tlbie", NULL, MTX_DEF);
406 #else
407 mtx_init(&tlbie_mutex, "tlbie", NULL, MTX_SPIN);
408 #endif
409
410 moea64_mid_bootstrap(mmup, kernelstart, kernelend);
411
412 /*
413 * Add a mapping for the page table itself if there is no direct map.
414 */
415 if (!hw_direct_map) {
416 size = moea64_pteg_count * sizeof(struct lpteg);
417 off = (vm_offset_t)(moea64_pteg_table);
418 DISABLE_TRANS(msr);
419 for (pa = off; pa < off + size; pa += PAGE_SIZE)
420 pmap_kenter(pa, pa);
421 ENABLE_TRANS(msr);
422 }
423
424 /* Bring up virtual memory */
425 moea64_late_bootstrap(mmup, kernelstart, kernelend);
426 }
427
428 static void
429 tlbia(void)
430 {
431 vm_offset_t i;
432 #ifndef __powerpc64__
433 register_t msr, scratch;
434 #endif
435
436 TLBSYNC();
437
438 for (i = 0; i < 0xFF000; i += 0x00001000) {
439 #ifdef __powerpc64__
440 __asm __volatile("tlbiel %0" :: "r"(i));
441 #else
442 __asm __volatile("\
443 mfmsr %0; \
444 mr %1, %0; \
445 insrdi %1,%3,1,0; \
446 mtmsrd %1; \
447 isync; \
448 \
449 tlbiel %2; \
450 \
451 mtmsrd %0; \
452 isync;"
453 : "=r"(msr), "=r"(scratch) : "r"(i), "r"(1));
454 #endif
455 }
456
457 EIEIO();
458 TLBSYNC();
459 }
460
461 static uintptr_t
462 moea64_pvo_to_pte_native(mmu_t mmu, const struct pvo_entry *pvo)
463 {
464 struct lpte *pt;
465 int pteidx, ptegidx;
466 uint64_t vsid;
467
468 /* If the PTEG index is not set, then there is no page table entry */
469 if (!PVO_PTEGIDX_ISSET(pvo))
470 return (-1);
471
472 /*
473 * Calculate the ptegidx
474 */
475 vsid = PVO_VSID(pvo);
476 ptegidx = va_to_pteg(vsid, PVO_VADDR(pvo),
477 pvo->pvo_vaddr & PVO_LARGE);
478
479 /*
480 * We can find the actual pte entry without searching by grabbing
481 * the PTEG index from 3 unused bits in pvo_vaddr and by
482 * noticing the HID bit.
483 */
484 if (pvo->pvo_pte.lpte.pte_hi & LPTE_HID)
485 ptegidx ^= moea64_pteg_mask;
486
487 pteidx = (ptegidx << 3) | PVO_PTEGIDX_GET(pvo);
488
489 if ((pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) &&
490 !PVO_PTEGIDX_ISSET(pvo)) {
491 panic("moea64_pvo_to_pte: pvo %p has valid pte in pvo but no "
492 "valid pte index", pvo);
493 }
494
495 if ((pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0 &&
496 PVO_PTEGIDX_ISSET(pvo)) {
497 panic("moea64_pvo_to_pte: pvo %p has valid pte index in pvo "
498 "pvo but no valid pte", pvo);
499 }
500
501 pt = &moea64_pteg_table[pteidx >> 3].pt[pteidx & 7];
502 if ((pt->pte_hi ^ (pvo->pvo_pte.lpte.pte_hi & ~LPTE_VALID)) ==
503 LPTE_VALID) {
504 if ((pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0) {
505 panic("moea64_pvo_to_pte: pvo %p has valid pte in "
506 "moea64_pteg_table %p but invalid in pvo", pvo, pt);
507 }
508
509 if (((pt->pte_lo ^ pvo->pvo_pte.lpte.pte_lo) &
510 ~(LPTE_M|LPTE_CHG|LPTE_REF)) != 0) {
511 panic("moea64_pvo_to_pte: pvo %p pte does not match "
512 "pte %p in moea64_pteg_table difference is %#x",
513 pvo, pt,
514 (uint32_t)(pt->pte_lo ^ pvo->pvo_pte.lpte.pte_lo));
515 }
516
517 return ((uintptr_t)pt);
518 }
519
520 if (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) {
521 panic("moea64_pvo_to_pte: pvo %p has invalid pte %p in "
522 "moea64_pteg_table but valid in pvo", pvo, pt);
523 }
524
525 return (-1);
526 }
527
528 static __inline int
529 moea64_pte_spillable_ident(u_int ptegidx)
530 {
531 struct lpte *pt;
532 int i, j, k;
533
534 /* Start at a random slot */
535 i = mftb() % 8;
536 k = -1;
537 for (j = 0; j < 8; j++) {
538 pt = &moea64_pteg_table[ptegidx].pt[(i + j) % 8];
539 if (pt->pte_hi & (LPTE_LOCKED | LPTE_WIRED))
540 continue;
541
542 /* This is a candidate, so remember it */
543 k = (i + j) % 8;
544
545 /* Try to get a page that has not been used lately */
546 if (!(pt->pte_lo & LPTE_REF))
547 return (k);
548 }
549
550 return (k);
551 }
552
553 static int
554 moea64_pte_insert_native(mmu_t mmu, u_int ptegidx, struct lpte *pvo_pt)
555 {
556 struct lpte *pt;
557 struct pvo_entry *pvo;
558 u_int pteg_bktidx;
559 int i;
560
561 /*
562 * First try primary hash.
563 */
564 pteg_bktidx = ptegidx;
565 for (pt = moea64_pteg_table[pteg_bktidx].pt, i = 0; i < 8; i++, pt++) {
566 if ((pt->pte_hi & (LPTE_VALID | LPTE_LOCKED)) == 0) {
567 pvo_pt->pte_hi &= ~LPTE_HID;
568 moea64_pte_set_native(pt, pvo_pt);
569 return (i);
570 }
571 }
572
573 /*
574 * Now try secondary hash.
575 */
576 pteg_bktidx ^= moea64_pteg_mask;
577 for (pt = moea64_pteg_table[pteg_bktidx].pt, i = 0; i < 8; i++, pt++) {
578 if ((pt->pte_hi & (LPTE_VALID | LPTE_LOCKED)) == 0) {
579 pvo_pt->pte_hi |= LPTE_HID;
580 moea64_pte_set_native(pt, pvo_pt);
581 return (i);
582 }
583 }
584
585 /*
586 * Out of luck. Find a PTE to sacrifice.
587 */
588 pteg_bktidx = ptegidx;
589 i = moea64_pte_spillable_ident(pteg_bktidx);
590 if (i < 0) {
591 pteg_bktidx ^= moea64_pteg_mask;
592 i = moea64_pte_spillable_ident(pteg_bktidx);
593 }
594
595 if (i < 0) {
596 /* No freeable slots in either PTEG? We're hosed. */
597 panic("moea64_pte_insert: overflow");
598 return (-1);
599 }
600
601 if (pteg_bktidx == ptegidx)
602 pvo_pt->pte_hi &= ~LPTE_HID;
603 else
604 pvo_pt->pte_hi |= LPTE_HID;
605
606 /*
607 * Synchronize the sacrifice PTE with its PVO, then mark both
608 * invalid. The PVO will be reused when/if the VM system comes
609 * here after a fault.
610 */
611 pt = &moea64_pteg_table[pteg_bktidx].pt[i];
612
613 if (pt->pte_hi & LPTE_HID)
614 pteg_bktidx ^= moea64_pteg_mask; /* PTEs indexed by primary */
615
616 LIST_FOREACH(pvo, &moea64_pvo_table[pteg_bktidx], pvo_olink) {
617 if (pvo->pvo_pte.lpte.pte_hi == pt->pte_hi) {
618 KASSERT(pvo->pvo_pte.lpte.pte_hi & LPTE_VALID,
619 ("Invalid PVO for valid PTE!"));
620 moea64_pte_unset_native(mmu, (uintptr_t)pt,
621 &pvo->pvo_pte.lpte, pvo->pvo_vpn);
622 PVO_PTEGIDX_CLR(pvo);
623 moea64_pte_overflow++;
624 break;
625 }
626 }
627
628 KASSERT(pvo->pvo_pte.lpte.pte_hi == pt->pte_hi,
629 ("Unable to find PVO for spilled PTE"));
630
631 /*
632 * Set the new PTE.
633 */
634 moea64_pte_set_native(pt, pvo_pt);
635
636 return (i);
637 }
638
Cache object: c3866b771ca17f9f231c7684fec8c74c
|