FreeBSD/Linux Kernel Cross Reference
sys/mips/mips/pmap.c
1 /*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 * Copyright (c) 1994 John S. Dyson
5 * All rights reserved.
6 * Copyright (c) 1994 David Greenman
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * the Systems Programming Group of the University of Utah Computer
11 * Science Department and William Jolitz of UUNET Technologies Inc.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
38 * from: src/sys/i386/i386/pmap.c,v 1.250.2.8 2000/11/21 00:09:14 ps
39 * JNPR: pmap.c,v 1.11.2.1 2007/08/16 11:51:06 girish
40 */
41
42 /*
43 * Manages physical address maps.
44 *
45 * In addition to hardware address maps, this
46 * module is called upon to provide software-use-only
47 * maps which may or may not be stored in the same
48 * form as hardware maps. These pseudo-maps are
49 * used to store intermediate results from copy
50 * operations to and from address spaces.
51 *
52 * Since the information managed by this module is
53 * also stored by the logical address mapping module,
54 * this module may throw away valid virtual-to-physical
55 * mappings at almost any time. However, invalidations
56 * of virtual-to-physical mappings must be done as
57 * requested.
58 *
59 * In order to cope with hardware architectures which
60 * make virtual-to-physical map invalidates expensive,
61 * this module may delay invalidate or reduced protection
62 * operations until such time as they are actually
63 * necessary. This module is given full information as
64 * to which processors are currently using which maps,
65 * and to when physical maps must be made correct.
66 */
67
68 #include <sys/cdefs.h>
69 __FBSDID("$FreeBSD$");
70
71 #include "opt_ddb.h"
72
73 #include <sys/param.h>
74 #include <sys/systm.h>
75 #include <sys/proc.h>
76 #include <sys/msgbuf.h>
77 #include <sys/vmmeter.h>
78 #include <sys/mman.h>
79 #include <sys/smp.h>
80 #ifdef DDB
81 #include <ddb/ddb.h>
82 #endif
83
84 #include <vm/vm.h>
85 #include <vm/vm_param.h>
86 #include <vm/vm_phys.h>
87 #include <sys/lock.h>
88 #include <sys/mutex.h>
89 #include <vm/vm_kern.h>
90 #include <vm/vm_page.h>
91 #include <vm/vm_map.h>
92 #include <vm/vm_object.h>
93 #include <vm/vm_extern.h>
94 #include <vm/vm_pageout.h>
95 #include <vm/vm_pager.h>
96 #include <vm/uma.h>
97 #include <sys/pcpu.h>
98 #include <sys/sched.h>
99 #ifdef SMP
100 #include <sys/smp.h>
101 #endif
102
103 #include <machine/cache.h>
104 #include <machine/md_var.h>
105 #include <machine/tlb.h>
106
107 #undef PMAP_DEBUG
108
109 #ifndef PMAP_SHPGPERPROC
110 #define PMAP_SHPGPERPROC 200
111 #endif
112
113 #if !defined(DIAGNOSTIC)
114 #define PMAP_INLINE __inline
115 #else
116 #define PMAP_INLINE
117 #endif
118
119 /*
120 * Get PDEs and PTEs for user/kernel address space
121 */
122 #define pmap_seg_index(v) (((v) >> SEGSHIFT) & (NPDEPG - 1))
123 #define pmap_pde_index(v) (((v) >> PDRSHIFT) & (NPDEPG - 1))
124 #define pmap_pte_index(v) (((v) >> PAGE_SHIFT) & (NPTEPG - 1))
125 #define pmap_pde_pindex(v) ((v) >> PDRSHIFT)
126
127 #ifdef __mips_n64
128 #define NUPDE (NPDEPG * NPDEPG)
129 #define NUSERPGTBLS (NUPDE + NPDEPG)
130 #else
131 #define NUPDE (NPDEPG)
132 #define NUSERPGTBLS (NUPDE)
133 #endif
134
135 #define is_kernel_pmap(x) ((x) == kernel_pmap)
136
137 struct pmap kernel_pmap_store;
138 pd_entry_t *kernel_segmap;
139
140 vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */
141 vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
142
143 static int nkpt;
144 unsigned pmap_max_asid; /* max ASID supported by the system */
145
146 #define PMAP_ASID_RESERVED 0
147
148 vm_offset_t kernel_vm_end = VM_MIN_KERNEL_ADDRESS;
149
150 static void pmap_asid_alloc(pmap_t pmap);
151
152 /*
153 * Data for the pv entry allocation mechanism
154 */
155 static uma_zone_t pvzone;
156 static struct vm_object pvzone_obj;
157 static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
158
159 static PMAP_INLINE void free_pv_entry(pv_entry_t pv);
160 static pv_entry_t get_pv_entry(pmap_t locked_pmap);
161 static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
162 static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
163 vm_offset_t va);
164 static __inline void pmap_changebit(vm_page_t m, int bit, boolean_t setem);
165 static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
166 vm_page_t m, vm_prot_t prot, vm_page_t mpte);
167 static int pmap_remove_pte(struct pmap *pmap, pt_entry_t *ptq, vm_offset_t va);
168 static void pmap_remove_page(struct pmap *pmap, vm_offset_t va);
169 static void pmap_remove_entry(struct pmap *pmap, vm_page_t m, vm_offset_t va);
170 static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_page_t mpte,
171 vm_offset_t va, vm_page_t m);
172 static void pmap_update_page(pmap_t pmap, vm_offset_t va, pt_entry_t pte);
173 static void pmap_invalidate_all(pmap_t pmap);
174 static void pmap_invalidate_page(pmap_t pmap, vm_offset_t va);
175 static void _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m);
176
177 static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags);
178 static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags);
179 static int pmap_unuse_pt(pmap_t, vm_offset_t, vm_page_t);
180 static pt_entry_t init_pte_prot(vm_offset_t va, vm_page_t m, vm_prot_t prot);
181
182 #ifdef SMP
183 static void pmap_invalidate_page_action(void *arg);
184 static void pmap_invalidate_all_action(void *arg);
185 static void pmap_update_page_action(void *arg);
186 #endif
187
188 #ifndef __mips_n64
189 /*
190 * This structure is for high memory (memory above 512Meg in 32 bit) support.
191 * The highmem area does not have a KSEG0 mapping, and we need a mechanism to
192 * do temporary per-CPU mappings for pmap_zero_page, pmap_copy_page etc.
193 *
194 * At bootup, we reserve 2 virtual pages per CPU for mapping highmem pages. To
195 * access a highmem physical address on a CPU, we map the physical address to
196 * the reserved virtual address for the CPU in the kernel pagetable. This is
197 * done with interrupts disabled(although a spinlock and sched_pin would be
198 * sufficient).
199 */
200 struct local_sysmaps {
201 vm_offset_t base;
202 uint32_t saved_intr;
203 uint16_t valid1, valid2;
204 };
205 static struct local_sysmaps sysmap_lmem[MAXCPU];
206
207 static __inline void
208 pmap_alloc_lmem_map(void)
209 {
210 int i;
211
212 for (i = 0; i < MAXCPU; i++) {
213 sysmap_lmem[i].base = virtual_avail;
214 virtual_avail += PAGE_SIZE * 2;
215 sysmap_lmem[i].valid1 = sysmap_lmem[i].valid2 = 0;
216 }
217 }
218
219 static __inline vm_offset_t
220 pmap_lmem_map1(vm_paddr_t phys)
221 {
222 struct local_sysmaps *sysm;
223 pt_entry_t *pte, npte;
224 vm_offset_t va;
225 uint32_t intr;
226 int cpu;
227
228 intr = intr_disable();
229 cpu = PCPU_GET(cpuid);
230 sysm = &sysmap_lmem[cpu];
231 sysm->saved_intr = intr;
232 va = sysm->base;
233 npte = TLBLO_PA_TO_PFN(phys) |
234 PTE_D | PTE_V | PTE_G | PTE_W | PTE_C_CACHE;
235 pte = pmap_pte(kernel_pmap, va);
236 *pte = npte;
237 sysm->valid1 = 1;
238 return (va);
239 }
240
241 static __inline vm_offset_t
242 pmap_lmem_map2(vm_paddr_t phys1, vm_paddr_t phys2)
243 {
244 struct local_sysmaps *sysm;
245 pt_entry_t *pte, npte;
246 vm_offset_t va1, va2;
247 uint32_t intr;
248 int cpu;
249
250 intr = intr_disable();
251 cpu = PCPU_GET(cpuid);
252 sysm = &sysmap_lmem[cpu];
253 sysm->saved_intr = intr;
254 va1 = sysm->base;
255 va2 = sysm->base + PAGE_SIZE;
256 npte = TLBLO_PA_TO_PFN(phys1) |
257 PTE_D | PTE_V | PTE_G | PTE_W | PTE_C_CACHE;
258 pte = pmap_pte(kernel_pmap, va1);
259 *pte = npte;
260 npte = TLBLO_PA_TO_PFN(phys2) |
261 PTE_D | PTE_V | PTE_G | PTE_W | PTE_C_CACHE;
262 pte = pmap_pte(kernel_pmap, va2);
263 *pte = npte;
264 sysm->valid1 = 1;
265 sysm->valid2 = 1;
266 return (va1);
267 }
268
269 static __inline void
270 pmap_lmem_unmap(void)
271 {
272 struct local_sysmaps *sysm;
273 pt_entry_t *pte;
274 int cpu;
275
276 cpu = PCPU_GET(cpuid);
277 sysm = &sysmap_lmem[cpu];
278 pte = pmap_pte(kernel_pmap, sysm->base);
279 *pte = PTE_G;
280 tlb_invalidate_address(kernel_pmap, sysm->base);
281 sysm->valid1 = 0;
282 if (sysm->valid2) {
283 pte = pmap_pte(kernel_pmap, sysm->base + PAGE_SIZE);
284 *pte = PTE_G;
285 tlb_invalidate_address(kernel_pmap, sysm->base + PAGE_SIZE);
286 sysm->valid2 = 0;
287 }
288 intr_restore(sysm->saved_intr);
289 }
290 #else /* __mips_n64 */
291
292 static __inline void
293 pmap_alloc_lmem_map(void)
294 {
295 }
296
297 static __inline vm_offset_t
298 pmap_lmem_map1(vm_paddr_t phys)
299 {
300
301 return (0);
302 }
303
304 static __inline vm_offset_t
305 pmap_lmem_map2(vm_paddr_t phys1, vm_paddr_t phys2)
306 {
307
308 return (0);
309 }
310
311 static __inline vm_offset_t
312 pmap_lmem_unmap(void)
313 {
314
315 return (0);
316 }
317 #endif /* !__mips_n64 */
318
319 /*
320 * Page table entry lookup routines.
321 */
322 static __inline pd_entry_t *
323 pmap_segmap(pmap_t pmap, vm_offset_t va)
324 {
325
326 return (&pmap->pm_segtab[pmap_seg_index(va)]);
327 }
328
329 #ifdef __mips_n64
330 static __inline pd_entry_t *
331 pmap_pdpe_to_pde(pd_entry_t *pdpe, vm_offset_t va)
332 {
333 pd_entry_t *pde;
334
335 pde = (pd_entry_t *)*pdpe;
336 return (&pde[pmap_pde_index(va)]);
337 }
338
339 static __inline pd_entry_t *
340 pmap_pde(pmap_t pmap, vm_offset_t va)
341 {
342 pd_entry_t *pdpe;
343
344 pdpe = pmap_segmap(pmap, va);
345 if (pdpe == NULL || *pdpe == NULL)
346 return (NULL);
347
348 return (pmap_pdpe_to_pde(pdpe, va));
349 }
350 #else
351 static __inline pd_entry_t *
352 pmap_pdpe_to_pde(pd_entry_t *pdpe, vm_offset_t va)
353 {
354
355 return (pdpe);
356 }
357
358 static __inline
359 pd_entry_t *pmap_pde(pmap_t pmap, vm_offset_t va)
360 {
361
362 return (pmap_segmap(pmap, va));
363 }
364 #endif
365
366 static __inline pt_entry_t *
367 pmap_pde_to_pte(pd_entry_t *pde, vm_offset_t va)
368 {
369 pt_entry_t *pte;
370
371 pte = (pt_entry_t *)*pde;
372 return (&pte[pmap_pte_index(va)]);
373 }
374
375 pt_entry_t *
376 pmap_pte(pmap_t pmap, vm_offset_t va)
377 {
378 pd_entry_t *pde;
379
380 pde = pmap_pde(pmap, va);
381 if (pde == NULL || *pde == NULL)
382 return (NULL);
383
384 return (pmap_pde_to_pte(pde, va));
385 }
386
387 vm_offset_t
388 pmap_steal_memory(vm_size_t size)
389 {
390 vm_paddr_t bank_size, pa;
391 vm_offset_t va;
392
393 size = round_page(size);
394 bank_size = phys_avail[1] - phys_avail[0];
395 while (size > bank_size) {
396 int i;
397
398 for (i = 0; phys_avail[i + 2]; i += 2) {
399 phys_avail[i] = phys_avail[i + 2];
400 phys_avail[i + 1] = phys_avail[i + 3];
401 }
402 phys_avail[i] = 0;
403 phys_avail[i + 1] = 0;
404 if (!phys_avail[0])
405 panic("pmap_steal_memory: out of memory");
406 bank_size = phys_avail[1] - phys_avail[0];
407 }
408
409 pa = phys_avail[0];
410 phys_avail[0] += size;
411 if (MIPS_DIRECT_MAPPABLE(pa) == 0)
412 panic("Out of memory below 512Meg?");
413 va = MIPS_PHYS_TO_DIRECT(pa);
414 bzero((caddr_t)va, size);
415 return (va);
416 }
417
418 /*
419 * Bootstrap the system enough to run with virtual memory. This
420 * assumes that the phys_avail array has been initialized.
421 */
422 static void
423 pmap_create_kernel_pagetable(void)
424 {
425 int i, j;
426 vm_offset_t ptaddr;
427 pt_entry_t *pte;
428 #ifdef __mips_n64
429 pd_entry_t *pde;
430 vm_offset_t pdaddr;
431 int npt, npde;
432 #endif
433
434 /*
435 * Allocate segment table for the kernel
436 */
437 kernel_segmap = (pd_entry_t *)pmap_steal_memory(PAGE_SIZE);
438
439 /*
440 * Allocate second level page tables for the kernel
441 */
442 #ifdef __mips_n64
443 npde = howmany(NKPT, NPDEPG);
444 pdaddr = pmap_steal_memory(PAGE_SIZE * npde);
445 #endif
446 nkpt = NKPT;
447 ptaddr = pmap_steal_memory(PAGE_SIZE * nkpt);
448
449 /*
450 * The R[4-7]?00 stores only one copy of the Global bit in the
451 * translation lookaside buffer for each 2 page entry. Thus invalid
452 * entrys must have the Global bit set so when Entry LO and Entry HI
453 * G bits are anded together they will produce a global bit to store
454 * in the tlb.
455 */
456 for (i = 0, pte = (pt_entry_t *)ptaddr; i < (nkpt * NPTEPG); i++, pte++)
457 *pte = PTE_G;
458
459 #ifdef __mips_n64
460 for (i = 0, npt = nkpt; npt > 0; i++) {
461 kernel_segmap[i] = (pd_entry_t)(pdaddr + i * PAGE_SIZE);
462 pde = (pd_entry_t *)kernel_segmap[i];
463
464 for (j = 0; j < NPDEPG && npt > 0; j++, npt--)
465 pde[j] = (pd_entry_t)(ptaddr + (i * NPDEPG + j) * PAGE_SIZE);
466 }
467 #else
468 for (i = 0, j = pmap_seg_index(VM_MIN_KERNEL_ADDRESS); i < nkpt; i++, j++)
469 kernel_segmap[j] = (pd_entry_t)(ptaddr + (i * PAGE_SIZE));
470 #endif
471
472 PMAP_LOCK_INIT(kernel_pmap);
473 kernel_pmap->pm_segtab = kernel_segmap;
474 CPU_FILL(&kernel_pmap->pm_active);
475 TAILQ_INIT(&kernel_pmap->pm_pvlist);
476 kernel_pmap->pm_asid[0].asid = PMAP_ASID_RESERVED;
477 kernel_pmap->pm_asid[0].gen = 0;
478 kernel_vm_end += nkpt * NPTEPG * PAGE_SIZE;
479 }
480
481 void
482 pmap_bootstrap(void)
483 {
484 int i;
485 int need_local_mappings = 0;
486
487 /* Sort. */
488 again:
489 for (i = 0; phys_avail[i + 1] != 0; i += 2) {
490 /*
491 * Keep the memory aligned on page boundary.
492 */
493 phys_avail[i] = round_page(phys_avail[i]);
494 phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
495
496 if (i < 2)
497 continue;
498 if (phys_avail[i - 2] > phys_avail[i]) {
499 vm_paddr_t ptemp[2];
500
501 ptemp[0] = phys_avail[i + 0];
502 ptemp[1] = phys_avail[i + 1];
503
504 phys_avail[i + 0] = phys_avail[i - 2];
505 phys_avail[i + 1] = phys_avail[i - 1];
506
507 phys_avail[i - 2] = ptemp[0];
508 phys_avail[i - 1] = ptemp[1];
509 goto again;
510 }
511 }
512
513 /*
514 * In 32 bit, we may have memory which cannot be mapped directly.
515 * This memory will need temporary mapping before it can be
516 * accessed.
517 */
518 if (!MIPS_DIRECT_MAPPABLE(phys_avail[i - 1] - 1))
519 need_local_mappings = 1;
520
521 /*
522 * Copy the phys_avail[] array before we start stealing memory from it.
523 */
524 for (i = 0; phys_avail[i + 1] != 0; i += 2) {
525 physmem_desc[i] = phys_avail[i];
526 physmem_desc[i + 1] = phys_avail[i + 1];
527 }
528
529 Maxmem = atop(phys_avail[i - 1]);
530
531 if (bootverbose) {
532 printf("Physical memory chunk(s):\n");
533 for (i = 0; phys_avail[i + 1] != 0; i += 2) {
534 vm_paddr_t size;
535
536 size = phys_avail[i + 1] - phys_avail[i];
537 printf("%#08jx - %#08jx, %ju bytes (%ju pages)\n",
538 (uintmax_t) phys_avail[i],
539 (uintmax_t) phys_avail[i + 1] - 1,
540 (uintmax_t) size, (uintmax_t) size / PAGE_SIZE);
541 }
542 printf("Maxmem is 0x%0jx\n", ptoa((uintmax_t)Maxmem));
543 }
544 /*
545 * Steal the message buffer from the beginning of memory.
546 */
547 msgbufp = (struct msgbuf *)pmap_steal_memory(msgbufsize);
548 msgbufinit(msgbufp, msgbufsize);
549
550 /*
551 * Steal thread0 kstack.
552 */
553 kstack0 = pmap_steal_memory(KSTACK_PAGES << PAGE_SHIFT);
554
555 virtual_avail = VM_MIN_KERNEL_ADDRESS;
556 virtual_end = VM_MAX_KERNEL_ADDRESS;
557
558 #ifdef SMP
559 /*
560 * Steal some virtual address space to map the pcpu area.
561 */
562 virtual_avail = roundup2(virtual_avail, PAGE_SIZE * 2);
563 pcpup = (struct pcpu *)virtual_avail;
564 virtual_avail += PAGE_SIZE * 2;
565
566 /*
567 * Initialize the wired TLB entry mapping the pcpu region for
568 * the BSP at 'pcpup'. Up until this point we were operating
569 * with the 'pcpup' for the BSP pointing to a virtual address
570 * in KSEG0 so there was no need for a TLB mapping.
571 */
572 mips_pcpu_tlb_init(PCPU_ADDR(0));
573
574 if (bootverbose)
575 printf("pcpu is available at virtual address %p.\n", pcpup);
576 #endif
577
578 if (need_local_mappings)
579 pmap_alloc_lmem_map();
580 pmap_create_kernel_pagetable();
581 pmap_max_asid = VMNUM_PIDS;
582 mips_wr_entryhi(0);
583 mips_wr_pagemask(0);
584 }
585
586 /*
587 * Initialize a vm_page's machine-dependent fields.
588 */
589 void
590 pmap_page_init(vm_page_t m)
591 {
592
593 TAILQ_INIT(&m->md.pv_list);
594 m->md.pv_list_count = 0;
595 m->md.pv_flags = 0;
596 }
597
598 /*
599 * Initialize the pmap module.
600 * Called by vm_init, to initialize any structures that the pmap
601 * system needs to map virtual memory.
602 * pmap_init has been enhanced to support in a fairly consistant
603 * way, discontiguous physical memory.
604 */
605 void
606 pmap_init(void)
607 {
608
609 /*
610 * Initialize the address space (zone) for the pv entries. Set a
611 * high water mark so that the system can recover from excessive
612 * numbers of pv entries.
613 */
614 pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL,
615 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
616 pv_entry_max = PMAP_SHPGPERPROC * maxproc + cnt.v_page_count;
617 pv_entry_high_water = 9 * (pv_entry_max / 10);
618 uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max);
619 }
620
621 /***************************************************
622 * Low level helper routines.....
623 ***************************************************/
624
625 static __inline void
626 pmap_invalidate_all_local(pmap_t pmap)
627 {
628 u_int cpuid;
629
630 cpuid = PCPU_GET(cpuid);
631
632 if (pmap == kernel_pmap) {
633 tlb_invalidate_all();
634 return;
635 }
636 if (CPU_ISSET(cpuid, &pmap->pm_active))
637 tlb_invalidate_all_user(pmap);
638 else
639 pmap->pm_asid[cpuid].gen = 0;
640 }
641
642 #ifdef SMP
643 static void
644 pmap_invalidate_all(pmap_t pmap)
645 {
646
647 smp_rendezvous(0, pmap_invalidate_all_action, 0, pmap);
648 }
649
650 static void
651 pmap_invalidate_all_action(void *arg)
652 {
653
654 pmap_invalidate_all_local((pmap_t)arg);
655 }
656 #else
657 static void
658 pmap_invalidate_all(pmap_t pmap)
659 {
660
661 pmap_invalidate_all_local(pmap);
662 }
663 #endif
664
665 static __inline void
666 pmap_invalidate_page_local(pmap_t pmap, vm_offset_t va)
667 {
668 u_int cpuid;
669
670 cpuid = PCPU_GET(cpuid);
671
672 if (is_kernel_pmap(pmap)) {
673 tlb_invalidate_address(pmap, va);
674 return;
675 }
676 if (pmap->pm_asid[cpuid].gen != PCPU_GET(asid_generation))
677 return;
678 else if (!CPU_ISSET(cpuid, &pmap->pm_active)) {
679 pmap->pm_asid[cpuid].gen = 0;
680 return;
681 }
682 tlb_invalidate_address(pmap, va);
683 }
684
685 #ifdef SMP
686 struct pmap_invalidate_page_arg {
687 pmap_t pmap;
688 vm_offset_t va;
689 };
690
691 static void
692 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
693 {
694 struct pmap_invalidate_page_arg arg;
695
696 arg.pmap = pmap;
697 arg.va = va;
698 smp_rendezvous(0, pmap_invalidate_page_action, 0, &arg);
699 }
700
701 static void
702 pmap_invalidate_page_action(void *arg)
703 {
704 struct pmap_invalidate_page_arg *p = arg;
705
706 pmap_invalidate_page_local(p->pmap, p->va);
707 }
708 #else
709 static void
710 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
711 {
712
713 pmap_invalidate_page_local(pmap, va);
714 }
715 #endif
716
717 static __inline void
718 pmap_update_page_local(pmap_t pmap, vm_offset_t va, pt_entry_t pte)
719 {
720 u_int cpuid;
721
722 cpuid = PCPU_GET(cpuid);
723
724 if (is_kernel_pmap(pmap)) {
725 tlb_update(pmap, va, pte);
726 return;
727 }
728 if (pmap->pm_asid[cpuid].gen != PCPU_GET(asid_generation))
729 return;
730 else if (!CPU_ISSET(cpuid, &pmap->pm_active)) {
731 pmap->pm_asid[cpuid].gen = 0;
732 return;
733 }
734 tlb_update(pmap, va, pte);
735 }
736
737 #ifdef SMP
738 struct pmap_update_page_arg {
739 pmap_t pmap;
740 vm_offset_t va;
741 pt_entry_t pte;
742 };
743
744 static void
745 pmap_update_page(pmap_t pmap, vm_offset_t va, pt_entry_t pte)
746 {
747 struct pmap_update_page_arg arg;
748
749 arg.pmap = pmap;
750 arg.va = va;
751 arg.pte = pte;
752 smp_rendezvous(0, pmap_update_page_action, 0, &arg);
753 }
754
755 static void
756 pmap_update_page_action(void *arg)
757 {
758 struct pmap_update_page_arg *p = arg;
759
760 pmap_update_page_local(p->pmap, p->va, p->pte);
761 }
762 #else
763 static void
764 pmap_update_page(pmap_t pmap, vm_offset_t va, pt_entry_t pte)
765 {
766
767 pmap_update_page_local(pmap, va, pte);
768 }
769 #endif
770
771 /*
772 * Routine: pmap_extract
773 * Function:
774 * Extract the physical page address associated
775 * with the given map/virtual_address pair.
776 */
777 vm_paddr_t
778 pmap_extract(pmap_t pmap, vm_offset_t va)
779 {
780 pt_entry_t *pte;
781 vm_offset_t retval = 0;
782
783 PMAP_LOCK(pmap);
784 pte = pmap_pte(pmap, va);
785 if (pte) {
786 retval = TLBLO_PTE_TO_PA(*pte) | (va & PAGE_MASK);
787 }
788 PMAP_UNLOCK(pmap);
789 return (retval);
790 }
791
792 /*
793 * Routine: pmap_extract_and_hold
794 * Function:
795 * Atomically extract and hold the physical page
796 * with the given pmap and virtual address pair
797 * if that mapping permits the given protection.
798 */
799 vm_page_t
800 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
801 {
802 pt_entry_t pte;
803 vm_page_t m;
804 vm_paddr_t pa;
805
806 m = NULL;
807 pa = 0;
808 PMAP_LOCK(pmap);
809 retry:
810 pte = *pmap_pte(pmap, va);
811 if (pte != 0 && pte_test(&pte, PTE_V) &&
812 (pte_test(&pte, PTE_D) || (prot & VM_PROT_WRITE) == 0)) {
813 if (vm_page_pa_tryrelock(pmap, TLBLO_PTE_TO_PA(pte), &pa))
814 goto retry;
815
816 m = PHYS_TO_VM_PAGE(TLBLO_PTE_TO_PA(pte));
817 vm_page_hold(m);
818 }
819 PA_UNLOCK_COND(pa);
820 PMAP_UNLOCK(pmap);
821 return (m);
822 }
823
824 /***************************************************
825 * Low level mapping routines.....
826 ***************************************************/
827
828 /*
829 * add a wired page to the kva
830 */
831 void
832 pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int attr)
833 {
834 pt_entry_t *pte;
835 pt_entry_t opte, npte;
836
837 #ifdef PMAP_DEBUG
838 printf("pmap_kenter: va: %p -> pa: %p\n", (void *)va, (void *)pa);
839 #endif
840
841 pte = pmap_pte(kernel_pmap, va);
842 opte = *pte;
843 npte = TLBLO_PA_TO_PFN(pa) | attr | PTE_D | PTE_V | PTE_G;
844 *pte = npte;
845 if (pte_test(&opte, PTE_V) && opte != npte)
846 pmap_update_page(kernel_pmap, va, npte);
847 }
848
849 void
850 pmap_kenter(vm_offset_t va, vm_paddr_t pa)
851 {
852
853 KASSERT(is_cacheable_mem(pa),
854 ("pmap_kenter: memory at 0x%lx is not cacheable", (u_long)pa));
855
856 pmap_kenter_attr(va, pa, PTE_C_CACHE);
857 }
858
859 /*
860 * remove a page from the kernel pagetables
861 */
862 /* PMAP_INLINE */ void
863 pmap_kremove(vm_offset_t va)
864 {
865 pt_entry_t *pte;
866
867 /*
868 * Write back all caches from the page being destroyed
869 */
870 mips_dcache_wbinv_range_index(va, PAGE_SIZE);
871
872 pte = pmap_pte(kernel_pmap, va);
873 *pte = PTE_G;
874 pmap_invalidate_page(kernel_pmap, va);
875 }
876
877 /*
878 * Used to map a range of physical addresses into kernel
879 * virtual address space.
880 *
881 * The value passed in '*virt' is a suggested virtual address for
882 * the mapping. Architectures which can support a direct-mapped
883 * physical to virtual region can return the appropriate address
884 * within that region, leaving '*virt' unchanged. Other
885 * architectures should map the pages starting at '*virt' and
886 * update '*virt' with the first usable address after the mapped
887 * region.
888 *
889 * Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit.
890 */
891 vm_offset_t
892 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
893 {
894 vm_offset_t va, sva;
895
896 if (MIPS_DIRECT_MAPPABLE(end - 1))
897 return (MIPS_PHYS_TO_DIRECT(start));
898
899 va = sva = *virt;
900 while (start < end) {
901 pmap_kenter(va, start);
902 va += PAGE_SIZE;
903 start += PAGE_SIZE;
904 }
905 *virt = va;
906 return (sva);
907 }
908
909 /*
910 * Add a list of wired pages to the kva
911 * this routine is only used for temporary
912 * kernel mappings that do not need to have
913 * page modification or references recorded.
914 * Note that old mappings are simply written
915 * over. The page *must* be wired.
916 */
917 void
918 pmap_qenter(vm_offset_t va, vm_page_t *m, int count)
919 {
920 int i;
921 vm_offset_t origva = va;
922
923 for (i = 0; i < count; i++) {
924 pmap_flush_pvcache(m[i]);
925 pmap_kenter(va, VM_PAGE_TO_PHYS(m[i]));
926 va += PAGE_SIZE;
927 }
928
929 mips_dcache_wbinv_range_index(origva, PAGE_SIZE*count);
930 }
931
932 /*
933 * this routine jerks page mappings from the
934 * kernel -- it is meant only for temporary mappings.
935 */
936 void
937 pmap_qremove(vm_offset_t va, int count)
938 {
939 /*
940 * No need to wb/inv caches here,
941 * pmap_kremove will do it for us
942 */
943
944 while (count-- > 0) {
945 pmap_kremove(va);
946 va += PAGE_SIZE;
947 }
948 }
949
950 /***************************************************
951 * Page table page management routines.....
952 ***************************************************/
953
954 /*
955 * Decrements a page table page's wire count, which is used to record the
956 * number of valid page table entries within the page. If the wire count
957 * drops to zero, then the page table page is unmapped. Returns TRUE if the
958 * page table page was unmapped and FALSE otherwise.
959 */
960 static PMAP_INLINE boolean_t
961 pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m)
962 {
963
964 --m->wire_count;
965 if (m->wire_count == 0) {
966 _pmap_unwire_ptp(pmap, va, m);
967 return (TRUE);
968 } else
969 return (FALSE);
970 }
971
972 static void
973 _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m)
974 {
975 pd_entry_t *pde;
976
977 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
978 /*
979 * unmap the page table page
980 */
981 #ifdef __mips_n64
982 if (m->pindex < NUPDE)
983 pde = pmap_pde(pmap, va);
984 else
985 pde = pmap_segmap(pmap, va);
986 #else
987 pde = pmap_pde(pmap, va);
988 #endif
989 *pde = 0;
990 pmap->pm_stats.resident_count--;
991
992 #ifdef __mips_n64
993 if (m->pindex < NUPDE) {
994 pd_entry_t *pdp;
995 vm_page_t pdpg;
996
997 /*
998 * Recursively decrement next level pagetable refcount
999 */
1000 pdp = (pd_entry_t *)*pmap_segmap(pmap, va);
1001 pdpg = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(pdp));
1002 pmap_unwire_ptp(pmap, va, pdpg);
1003 }
1004 #endif
1005 if (pmap->pm_ptphint == m)
1006 pmap->pm_ptphint = NULL;
1007
1008 /*
1009 * If the page is finally unwired, simply free it.
1010 */
1011 vm_page_free_zero(m);
1012 atomic_subtract_int(&cnt.v_wire_count, 1);
1013 }
1014
1015 /*
1016 * After removing a page table entry, this routine is used to
1017 * conditionally free the page, and manage the hold/wire counts.
1018 */
1019 static int
1020 pmap_unuse_pt(pmap_t pmap, vm_offset_t va, vm_page_t mpte)
1021 {
1022 unsigned ptepindex;
1023 pd_entry_t pteva;
1024
1025 if (va >= VM_MAXUSER_ADDRESS)
1026 return (0);
1027
1028 if (mpte == NULL) {
1029 ptepindex = pmap_pde_pindex(va);
1030 if (pmap->pm_ptphint &&
1031 (pmap->pm_ptphint->pindex == ptepindex)) {
1032 mpte = pmap->pm_ptphint;
1033 } else {
1034 pteva = *pmap_pde(pmap, va);
1035 mpte = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(pteva));
1036 pmap->pm_ptphint = mpte;
1037 }
1038 }
1039 return (pmap_unwire_ptp(pmap, va, mpte));
1040 }
1041
1042 void
1043 pmap_pinit0(pmap_t pmap)
1044 {
1045 int i;
1046
1047 PMAP_LOCK_INIT(pmap);
1048 pmap->pm_segtab = kernel_segmap;
1049 CPU_ZERO(&pmap->pm_active);
1050 pmap->pm_ptphint = NULL;
1051 for (i = 0; i < MAXCPU; i++) {
1052 pmap->pm_asid[i].asid = PMAP_ASID_RESERVED;
1053 pmap->pm_asid[i].gen = 0;
1054 }
1055 PCPU_SET(curpmap, pmap);
1056 TAILQ_INIT(&pmap->pm_pvlist);
1057 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1058 }
1059
1060 void
1061 pmap_grow_direct_page_cache()
1062 {
1063
1064 #ifdef __mips_n64
1065 vm_contig_grow_cache(3, 0, MIPS_XKPHYS_LARGEST_PHYS);
1066 #else
1067 vm_contig_grow_cache(3, 0, MIPS_KSEG0_LARGEST_PHYS);
1068 #endif
1069 }
1070
1071 vm_page_t
1072 pmap_alloc_direct_page(unsigned int index, int req)
1073 {
1074 vm_page_t m;
1075
1076 m = vm_page_alloc_freelist(VM_FREELIST_DIRECT, req | VM_ALLOC_WIRED |
1077 VM_ALLOC_ZERO);
1078 if (m == NULL)
1079 return (NULL);
1080
1081 if ((m->flags & PG_ZERO) == 0)
1082 pmap_zero_page(m);
1083
1084 m->pindex = index;
1085 return (m);
1086 }
1087
1088 /*
1089 * Initialize a preallocated and zeroed pmap structure,
1090 * such as one in a vmspace structure.
1091 */
1092 int
1093 pmap_pinit(pmap_t pmap)
1094 {
1095 vm_offset_t ptdva;
1096 vm_page_t ptdpg;
1097 int i;
1098
1099 PMAP_LOCK_INIT(pmap);
1100
1101 /*
1102 * allocate the page directory page
1103 */
1104 while ((ptdpg = pmap_alloc_direct_page(NUSERPGTBLS, VM_ALLOC_NORMAL)) == NULL)
1105 pmap_grow_direct_page_cache();
1106
1107 ptdva = MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(ptdpg));
1108 pmap->pm_segtab = (pd_entry_t *)ptdva;
1109 CPU_ZERO(&pmap->pm_active);
1110 pmap->pm_ptphint = NULL;
1111 for (i = 0; i < MAXCPU; i++) {
1112 pmap->pm_asid[i].asid = PMAP_ASID_RESERVED;
1113 pmap->pm_asid[i].gen = 0;
1114 }
1115 TAILQ_INIT(&pmap->pm_pvlist);
1116 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1117
1118 return (1);
1119 }
1120
1121 /*
1122 * this routine is called if the page table page is not
1123 * mapped correctly.
1124 */
1125 static vm_page_t
1126 _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags)
1127 {
1128 vm_offset_t pageva;
1129 vm_page_t m;
1130
1131 KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
1132 (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
1133 ("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
1134
1135 /*
1136 * Find or fabricate a new pagetable page
1137 */
1138 if ((m = pmap_alloc_direct_page(ptepindex, VM_ALLOC_NORMAL)) == NULL) {
1139 if (flags & M_WAITOK) {
1140 PMAP_UNLOCK(pmap);
1141 vm_page_unlock_queues();
1142 pmap_grow_direct_page_cache();
1143 vm_page_lock_queues();
1144 PMAP_LOCK(pmap);
1145 }
1146
1147 /*
1148 * Indicate the need to retry. While waiting, the page
1149 * table page may have been allocated.
1150 */
1151 return (NULL);
1152 }
1153
1154 /*
1155 * Map the pagetable page into the process address space, if it
1156 * isn't already there.
1157 */
1158 pageva = MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(m));
1159
1160 #ifdef __mips_n64
1161 if (ptepindex >= NUPDE) {
1162 pmap->pm_segtab[ptepindex - NUPDE] = (pd_entry_t)pageva;
1163 } else {
1164 pd_entry_t *pdep, *pde;
1165 int segindex = ptepindex >> (SEGSHIFT - PDRSHIFT);
1166 int pdeindex = ptepindex & (NPDEPG - 1);
1167 vm_page_t pg;
1168
1169 pdep = &pmap->pm_segtab[segindex];
1170 if (*pdep == NULL) {
1171 /* recurse for allocating page dir */
1172 if (_pmap_allocpte(pmap, NUPDE + segindex,
1173 flags) == NULL) {
1174 /* alloc failed, release current */
1175 --m->wire_count;
1176 atomic_subtract_int(&cnt.v_wire_count, 1);
1177 vm_page_free_zero(m);
1178 return (NULL);
1179 }
1180 } else {
1181 pg = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(*pdep));
1182 pg->wire_count++;
1183 }
1184 /* Next level entry */
1185 pde = (pd_entry_t *)*pdep;
1186 pde[pdeindex] = (pd_entry_t)pageva;
1187 pmap->pm_ptphint = m;
1188 }
1189 #else
1190 pmap->pm_segtab[ptepindex] = (pd_entry_t)pageva;
1191 #endif
1192 pmap->pm_stats.resident_count++;
1193
1194 /*
1195 * Set the page table hint
1196 */
1197 pmap->pm_ptphint = m;
1198 return (m);
1199 }
1200
1201 static vm_page_t
1202 pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags)
1203 {
1204 unsigned ptepindex;
1205 pd_entry_t *pde;
1206 vm_page_t m;
1207
1208 KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
1209 (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
1210 ("pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
1211
1212 /*
1213 * Calculate pagetable page index
1214 */
1215 ptepindex = pmap_pde_pindex(va);
1216 retry:
1217 /*
1218 * Get the page directory entry
1219 */
1220 pde = pmap_pde(pmap, va);
1221
1222 /*
1223 * If the page table page is mapped, we just increment the hold
1224 * count, and activate it.
1225 */
1226 if (pde != NULL && *pde != NULL) {
1227 /*
1228 * In order to get the page table page, try the hint first.
1229 */
1230 if (pmap->pm_ptphint &&
1231 (pmap->pm_ptphint->pindex == ptepindex)) {
1232 m = pmap->pm_ptphint;
1233 } else {
1234 m = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(*pde));
1235 pmap->pm_ptphint = m;
1236 }
1237 m->wire_count++;
1238 } else {
1239 /*
1240 * Here if the pte page isn't mapped, or if it has been
1241 * deallocated.
1242 */
1243 m = _pmap_allocpte(pmap, ptepindex, flags);
1244 if (m == NULL && (flags & M_WAITOK))
1245 goto retry;
1246 }
1247 return (m);
1248 }
1249
1250
1251 /***************************************************
1252 * Pmap allocation/deallocation routines.
1253 ***************************************************/
1254 /*
1255 * Revision 1.397
1256 * - Merged pmap_release and pmap_release_free_page. When pmap_release is
1257 * called only the page directory page(s) can be left in the pmap pte
1258 * object, since all page table pages will have been freed by
1259 * pmap_remove_pages and pmap_remove. In addition, there can only be one
1260 * reference to the pmap and the page directory is wired, so the page(s)
1261 * can never be busy. So all there is to do is clear the magic mappings
1262 * from the page directory and free the page(s).
1263 */
1264
1265
1266 /*
1267 * Release any resources held by the given physical map.
1268 * Called when a pmap initialized by pmap_pinit is being released.
1269 * Should only be called if the map contains no valid mappings.
1270 */
1271 void
1272 pmap_release(pmap_t pmap)
1273 {
1274 vm_offset_t ptdva;
1275 vm_page_t ptdpg;
1276
1277 KASSERT(pmap->pm_stats.resident_count == 0,
1278 ("pmap_release: pmap resident count %ld != 0",
1279 pmap->pm_stats.resident_count));
1280
1281 ptdva = (vm_offset_t)pmap->pm_segtab;
1282 ptdpg = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(ptdva));
1283
1284 ptdpg->wire_count--;
1285 atomic_subtract_int(&cnt.v_wire_count, 1);
1286 vm_page_free_zero(ptdpg);
1287 PMAP_LOCK_DESTROY(pmap);
1288 }
1289
1290 /*
1291 * grow the number of kernel page table entries, if needed
1292 */
1293 void
1294 pmap_growkernel(vm_offset_t addr)
1295 {
1296 vm_page_t nkpg;
1297 pd_entry_t *pde, *pdpe;
1298 pt_entry_t *pte;
1299 int i;
1300
1301 mtx_assert(&kernel_map->system_mtx, MA_OWNED);
1302 addr = roundup2(addr, NBSEG);
1303 if (addr - 1 >= kernel_map->max_offset)
1304 addr = kernel_map->max_offset;
1305 while (kernel_vm_end < addr) {
1306 pdpe = pmap_segmap(kernel_pmap, kernel_vm_end);
1307 #ifdef __mips_n64
1308 if (*pdpe == 0) {
1309 /* new intermediate page table entry */
1310 nkpg = pmap_alloc_direct_page(nkpt, VM_ALLOC_INTERRUPT);
1311 if (nkpg == NULL)
1312 panic("pmap_growkernel: no memory to grow kernel");
1313 *pdpe = (pd_entry_t)MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(nkpg));
1314 continue; /* try again */
1315 }
1316 #endif
1317 pde = pmap_pdpe_to_pde(pdpe, kernel_vm_end);
1318 if (*pde != 0) {
1319 kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK;
1320 if (kernel_vm_end - 1 >= kernel_map->max_offset) {
1321 kernel_vm_end = kernel_map->max_offset;
1322 break;
1323 }
1324 continue;
1325 }
1326
1327 /*
1328 * This index is bogus, but out of the way
1329 */
1330 nkpg = pmap_alloc_direct_page(nkpt, VM_ALLOC_INTERRUPT);
1331 if (!nkpg)
1332 panic("pmap_growkernel: no memory to grow kernel");
1333 nkpt++;
1334 *pde = (pd_entry_t)MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(nkpg));
1335
1336 /*
1337 * The R[4-7]?00 stores only one copy of the Global bit in
1338 * the translation lookaside buffer for each 2 page entry.
1339 * Thus invalid entrys must have the Global bit set so when
1340 * Entry LO and Entry HI G bits are anded together they will
1341 * produce a global bit to store in the tlb.
1342 */
1343 pte = (pt_entry_t *)*pde;
1344 for (i = 0; i < NPTEPG; i++)
1345 pte[i] = PTE_G;
1346
1347 kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK;
1348 if (kernel_vm_end - 1 >= kernel_map->max_offset) {
1349 kernel_vm_end = kernel_map->max_offset;
1350 break;
1351 }
1352 }
1353 }
1354
1355 /***************************************************
1356 * page management routines.
1357 ***************************************************/
1358
1359 /*
1360 * free the pv_entry back to the free list
1361 */
1362 static PMAP_INLINE void
1363 free_pv_entry(pv_entry_t pv)
1364 {
1365
1366 pv_entry_count--;
1367 uma_zfree(pvzone, pv);
1368 }
1369
1370 /*
1371 * get a new pv_entry, allocating a block from the system
1372 * when needed.
1373 * the memory allocation is performed bypassing the malloc code
1374 * because of the possibility of allocations at interrupt time.
1375 */
1376 static pv_entry_t
1377 get_pv_entry(pmap_t locked_pmap)
1378 {
1379 static const struct timeval printinterval = { 60, 0 };
1380 static struct timeval lastprint;
1381 struct vpgqueues *vpq;
1382 pt_entry_t *pte, oldpte;
1383 pmap_t pmap;
1384 pv_entry_t allocated_pv, next_pv, pv;
1385 vm_offset_t va;
1386 vm_page_t m;
1387
1388 PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
1389 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1390 allocated_pv = uma_zalloc(pvzone, M_NOWAIT);
1391 if (allocated_pv != NULL) {
1392 pv_entry_count++;
1393 if (pv_entry_count > pv_entry_high_water)
1394 pagedaemon_wakeup();
1395 else
1396 return (allocated_pv);
1397 }
1398 /*
1399 * Reclaim pv entries: At first, destroy mappings to inactive
1400 * pages. After that, if a pv entry is still needed, destroy
1401 * mappings to active pages.
1402 */
1403 if (ratecheck(&lastprint, &printinterval))
1404 printf("Approaching the limit on PV entries, "
1405 "increase the vm.pmap.shpgperproc tunable.\n");
1406 vpq = &vm_page_queues[PQ_INACTIVE];
1407 retry:
1408 TAILQ_FOREACH(m, &vpq->pl, pageq) {
1409 if ((m->flags & PG_MARKER) != 0 || m->hold_count || m->busy)
1410 continue;
1411 TAILQ_FOREACH_SAFE(pv, &m->md.pv_list, pv_list, next_pv) {
1412 va = pv->pv_va;
1413 pmap = pv->pv_pmap;
1414 /* Avoid deadlock and lock recursion. */
1415 if (pmap > locked_pmap)
1416 PMAP_LOCK(pmap);
1417 else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap))
1418 continue;
1419 pmap->pm_stats.resident_count--;
1420 pte = pmap_pte(pmap, va);
1421 KASSERT(pte != NULL, ("pte"));
1422 oldpte = *pte;
1423 if (is_kernel_pmap(pmap))
1424 *pte = PTE_G;
1425 else
1426 *pte = 0;
1427 KASSERT(!pte_test(&oldpte, PTE_W),
1428 ("wired pte for unwired page"));
1429 if (m->md.pv_flags & PV_TABLE_REF)
1430 vm_page_aflag_set(m, PGA_REFERENCED);
1431 if (pte_test(&oldpte, PTE_D))
1432 vm_page_dirty(m);
1433 pmap_invalidate_page(pmap, va);
1434 TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
1435 m->md.pv_list_count--;
1436 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
1437 pmap_unuse_pt(pmap, va, pv->pv_ptem);
1438 if (pmap != locked_pmap)
1439 PMAP_UNLOCK(pmap);
1440 if (allocated_pv == NULL)
1441 allocated_pv = pv;
1442 else
1443 free_pv_entry(pv);
1444 }
1445 if (TAILQ_EMPTY(&m->md.pv_list)) {
1446 vm_page_aflag_clear(m, PGA_WRITEABLE);
1447 m->md.pv_flags &= ~(PV_TABLE_REF | PV_TABLE_MOD);
1448 }
1449 }
1450 if (allocated_pv == NULL) {
1451 if (vpq == &vm_page_queues[PQ_INACTIVE]) {
1452 vpq = &vm_page_queues[PQ_ACTIVE];
1453 goto retry;
1454 }
1455 panic("get_pv_entry: increase the vm.pmap.shpgperproc tunable");
1456 }
1457 return (allocated_pv);
1458 }
1459
1460 /*
1461 * Revision 1.370
1462 *
1463 * Move pmap_collect() out of the machine-dependent code, rename it
1464 * to reflect its new location, and add page queue and flag locking.
1465 *
1466 * Notes: (1) alpha, i386, and ia64 had identical implementations
1467 * of pmap_collect() in terms of machine-independent interfaces;
1468 * (2) sparc64 doesn't require it; (3) powerpc had it as a TODO.
1469 *
1470 * MIPS implementation was identical to alpha [Junos 8.2]
1471 */
1472
1473 /*
1474 * If it is the first entry on the list, it is actually
1475 * in the header and we must copy the following entry up
1476 * to the header. Otherwise we must search the list for
1477 * the entry. In either case we free the now unused entry.
1478 */
1479
1480 static pv_entry_t
1481 pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
1482 {
1483 pv_entry_t pv;
1484
1485 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1486 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1487 if (pvh->pv_list_count < pmap->pm_stats.resident_count) {
1488 TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
1489 if (pmap == pv->pv_pmap && va == pv->pv_va)
1490 break;
1491 }
1492 } else {
1493 TAILQ_FOREACH(pv, &pmap->pm_pvlist, pv_plist) {
1494 if (va == pv->pv_va)
1495 break;
1496 }
1497 }
1498 if (pv != NULL) {
1499 TAILQ_REMOVE(&pvh->pv_list, pv, pv_list);
1500 pvh->pv_list_count--;
1501 TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
1502 }
1503 return (pv);
1504 }
1505
1506 static void
1507 pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
1508 {
1509 pv_entry_t pv;
1510
1511 pv = pmap_pvh_remove(pvh, pmap, va);
1512 KASSERT(pv != NULL, ("pmap_pvh_free: pv not found, pa %lx va %lx",
1513 (u_long)VM_PAGE_TO_PHYS(member2struct(vm_page, md, pvh)),
1514 (u_long)va));
1515 free_pv_entry(pv);
1516 }
1517
1518 static void
1519 pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
1520 {
1521
1522 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1523 pmap_pvh_free(&m->md, pmap, va);
1524 if (TAILQ_EMPTY(&m->md.pv_list))
1525 vm_page_aflag_clear(m, PGA_WRITEABLE);
1526 }
1527
1528 /*
1529 * Conditionally create a pv entry.
1530 */
1531 static boolean_t
1532 pmap_try_insert_pv_entry(pmap_t pmap, vm_page_t mpte, vm_offset_t va,
1533 vm_page_t m)
1534 {
1535 pv_entry_t pv;
1536
1537 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1538 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1539 if (pv_entry_count < pv_entry_high_water &&
1540 (pv = uma_zalloc(pvzone, M_NOWAIT)) != NULL) {
1541 pv_entry_count++;
1542 pv->pv_va = va;
1543 pv->pv_pmap = pmap;
1544 pv->pv_ptem = mpte;
1545 TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist);
1546 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
1547 m->md.pv_list_count++;
1548 return (TRUE);
1549 } else
1550 return (FALSE);
1551 }
1552
1553 /*
1554 * pmap_remove_pte: do the things to unmap a page in a process
1555 */
1556 static int
1557 pmap_remove_pte(struct pmap *pmap, pt_entry_t *ptq, vm_offset_t va)
1558 {
1559 pt_entry_t oldpte;
1560 vm_page_t m;
1561 vm_paddr_t pa;
1562
1563 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1564 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1565
1566 oldpte = *ptq;
1567 if (is_kernel_pmap(pmap))
1568 *ptq = PTE_G;
1569 else
1570 *ptq = 0;
1571
1572 if (pte_test(&oldpte, PTE_W))
1573 pmap->pm_stats.wired_count -= 1;
1574
1575 pmap->pm_stats.resident_count -= 1;
1576 pa = TLBLO_PTE_TO_PA(oldpte);
1577
1578 if (page_is_managed(pa)) {
1579 m = PHYS_TO_VM_PAGE(pa);
1580 if (pte_test(&oldpte, PTE_D)) {
1581 KASSERT(!pte_test(&oldpte, PTE_RO),
1582 ("%s: modified page not writable: va: %p, pte: %#jx",
1583 __func__, (void *)va, (uintmax_t)oldpte));
1584 vm_page_dirty(m);
1585 }
1586 if (m->md.pv_flags & PV_TABLE_REF)
1587 vm_page_aflag_set(m, PGA_REFERENCED);
1588 m->md.pv_flags &= ~(PV_TABLE_REF | PV_TABLE_MOD);
1589
1590 pmap_remove_entry(pmap, m, va);
1591 }
1592 return (pmap_unuse_pt(pmap, va, NULL));
1593 }
1594
1595 /*
1596 * Remove a single page from a process address space
1597 */
1598 static void
1599 pmap_remove_page(struct pmap *pmap, vm_offset_t va)
1600 {
1601 pt_entry_t *ptq;
1602
1603 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1604 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1605 ptq = pmap_pte(pmap, va);
1606
1607 /*
1608 * if there is no pte for this address, just skip it!!!
1609 */
1610 if (!ptq || !pte_test(ptq, PTE_V)) {
1611 return;
1612 }
1613
1614 /*
1615 * Write back all caches from the page being destroyed
1616 */
1617 mips_dcache_wbinv_range_index(va, PAGE_SIZE);
1618
1619 /*
1620 * get a local va for mappings for this pmap.
1621 */
1622 (void)pmap_remove_pte(pmap, ptq, va);
1623 pmap_invalidate_page(pmap, va);
1624
1625 return;
1626 }
1627
1628 /*
1629 * Remove the given range of addresses from the specified map.
1630 *
1631 * It is assumed that the start and end are properly
1632 * rounded to the page size.
1633 */
1634 void
1635 pmap_remove(struct pmap *pmap, vm_offset_t sva, vm_offset_t eva)
1636 {
1637 vm_offset_t va_next;
1638 pd_entry_t *pde, *pdpe;
1639 pt_entry_t *pte;
1640
1641 if (pmap == NULL)
1642 return;
1643
1644 if (pmap->pm_stats.resident_count == 0)
1645 return;
1646
1647 vm_page_lock_queues();
1648 PMAP_LOCK(pmap);
1649
1650 /*
1651 * special handling of removing one page. a very common operation
1652 * and easy to short circuit some code.
1653 */
1654 if ((sva + PAGE_SIZE) == eva) {
1655 pmap_remove_page(pmap, sva);
1656 goto out;
1657 }
1658 for (; sva < eva; sva = va_next) {
1659 pdpe = pmap_segmap(pmap, sva);
1660 #ifdef __mips_n64
1661 if (*pdpe == 0) {
1662 va_next = (sva + NBSEG) & ~SEGMASK;
1663 if (va_next < sva)
1664 va_next = eva;
1665 continue;
1666 }
1667 #endif
1668 va_next = (sva + NBPDR) & ~PDRMASK;
1669 if (va_next < sva)
1670 va_next = eva;
1671
1672 pde = pmap_pdpe_to_pde(pdpe, sva);
1673 if (*pde == 0)
1674 continue;
1675 if (va_next > eva)
1676 va_next = eva;
1677 for (pte = pmap_pde_to_pte(pde, sva); sva != va_next;
1678 pte++, sva += PAGE_SIZE) {
1679 pmap_remove_page(pmap, sva);
1680 }
1681 }
1682 out:
1683 vm_page_unlock_queues();
1684 PMAP_UNLOCK(pmap);
1685 }
1686
1687 /*
1688 * Routine: pmap_remove_all
1689 * Function:
1690 * Removes this physical page from
1691 * all physical maps in which it resides.
1692 * Reflects back modify bits to the pager.
1693 *
1694 * Notes:
1695 * Original versions of this routine were very
1696 * inefficient because they iteratively called
1697 * pmap_remove (slow...)
1698 */
1699
1700 void
1701 pmap_remove_all(vm_page_t m)
1702 {
1703 pv_entry_t pv;
1704 pt_entry_t *pte, tpte;
1705
1706 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1707 ("pmap_remove_all: page %p is not managed", m));
1708 vm_page_lock_queues();
1709
1710 if (m->md.pv_flags & PV_TABLE_REF)
1711 vm_page_aflag_set(m, PGA_REFERENCED);
1712
1713 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
1714 PMAP_LOCK(pv->pv_pmap);
1715
1716 /*
1717 * If it's last mapping writeback all caches from
1718 * the page being destroyed
1719 */
1720 if (m->md.pv_list_count == 1)
1721 mips_dcache_wbinv_range_index(pv->pv_va, PAGE_SIZE);
1722
1723 pv->pv_pmap->pm_stats.resident_count--;
1724
1725 pte = pmap_pte(pv->pv_pmap, pv->pv_va);
1726
1727 tpte = *pte;
1728 if (is_kernel_pmap(pv->pv_pmap))
1729 *pte = PTE_G;
1730 else
1731 *pte = 0;
1732
1733 if (pte_test(&tpte, PTE_W))
1734 pv->pv_pmap->pm_stats.wired_count--;
1735
1736 /*
1737 * Update the vm_page_t clean and reference bits.
1738 */
1739 if (pte_test(&tpte, PTE_D)) {
1740 KASSERT(!pte_test(&tpte, PTE_RO),
1741 ("%s: modified page not writable: va: %p, pte: %#jx",
1742 __func__, (void *)pv->pv_va, (uintmax_t)tpte));
1743 vm_page_dirty(m);
1744 }
1745 pmap_invalidate_page(pv->pv_pmap, pv->pv_va);
1746
1747 TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist);
1748 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
1749 m->md.pv_list_count--;
1750 pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem);
1751 PMAP_UNLOCK(pv->pv_pmap);
1752 free_pv_entry(pv);
1753 }
1754
1755 vm_page_aflag_clear(m, PGA_WRITEABLE);
1756 m->md.pv_flags &= ~(PV_TABLE_REF | PV_TABLE_MOD);
1757 vm_page_unlock_queues();
1758 }
1759
1760 /*
1761 * Set the physical protection on the
1762 * specified range of this map as requested.
1763 */
1764 void
1765 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
1766 {
1767 pt_entry_t *pte;
1768 pd_entry_t *pde, *pdpe;
1769 vm_offset_t va_next;
1770
1771 if (pmap == NULL)
1772 return;
1773
1774 if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1775 pmap_remove(pmap, sva, eva);
1776 return;
1777 }
1778 if (prot & VM_PROT_WRITE)
1779 return;
1780
1781 vm_page_lock_queues();
1782 PMAP_LOCK(pmap);
1783 for (; sva < eva; sva = va_next) {
1784 pt_entry_t pbits;
1785 vm_page_t m;
1786 vm_paddr_t pa;
1787
1788 pdpe = pmap_segmap(pmap, sva);
1789 #ifdef __mips_n64
1790 if (*pdpe == 0) {
1791 va_next = (sva + NBSEG) & ~SEGMASK;
1792 if (va_next < sva)
1793 va_next = eva;
1794 continue;
1795 }
1796 #endif
1797 va_next = (sva + NBPDR) & ~PDRMASK;
1798 if (va_next < sva)
1799 va_next = eva;
1800
1801 pde = pmap_pdpe_to_pde(pdpe, sva);
1802 if (pde == NULL || *pde == NULL)
1803 continue;
1804 if (va_next > eva)
1805 va_next = eva;
1806
1807 for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
1808 sva += PAGE_SIZE) {
1809
1810 /* Skip invalid PTEs */
1811 if (!pte_test(pte, PTE_V))
1812 continue;
1813 pbits = *pte;
1814 pa = TLBLO_PTE_TO_PA(pbits);
1815 if (page_is_managed(pa) && pte_test(&pbits, PTE_D)) {
1816 m = PHYS_TO_VM_PAGE(pa);
1817 vm_page_dirty(m);
1818 m->md.pv_flags &= ~PV_TABLE_MOD;
1819 }
1820 pte_clear(&pbits, PTE_D);
1821 pte_set(&pbits, PTE_RO);
1822
1823 if (pbits != *pte) {
1824 *pte = pbits;
1825 pmap_update_page(pmap, sva, pbits);
1826 }
1827 }
1828 }
1829 vm_page_unlock_queues();
1830 PMAP_UNLOCK(pmap);
1831 }
1832
1833 /*
1834 * Insert the given physical page (p) at
1835 * the specified virtual address (v) in the
1836 * target physical map with the protection requested.
1837 *
1838 * If specified, the page will be wired down, meaning
1839 * that the related pte can not be reclaimed.
1840 *
1841 * NB: This is the only routine which MAY NOT lazy-evaluate
1842 * or lose information. That is, this routine must actually
1843 * insert this page into the given map NOW.
1844 */
1845 void
1846 pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
1847 vm_prot_t prot, boolean_t wired)
1848 {
1849 vm_paddr_t pa, opa;
1850 pt_entry_t *pte;
1851 pt_entry_t origpte, newpte;
1852 pv_entry_t pv;
1853 vm_page_t mpte, om;
1854 pt_entry_t rw = 0;
1855
1856 if (pmap == NULL)
1857 return;
1858
1859 va &= ~PAGE_MASK;
1860 KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
1861 KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0,
1862 ("pmap_enter: page %p is not busy", m));
1863
1864 mpte = NULL;
1865
1866 vm_page_lock_queues();
1867 PMAP_LOCK(pmap);
1868
1869 /*
1870 * In the case that a page table page is not resident, we are
1871 * creating it here.
1872 */
1873 if (va < VM_MAXUSER_ADDRESS) {
1874 mpte = pmap_allocpte(pmap, va, M_WAITOK);
1875 }
1876 pte = pmap_pte(pmap, va);
1877
1878 /*
1879 * Page Directory table entry not valid, we need a new PT page
1880 */
1881 if (pte == NULL) {
1882 panic("pmap_enter: invalid page directory, pdir=%p, va=%p",
1883 (void *)pmap->pm_segtab, (void *)va);
1884 }
1885 pa = VM_PAGE_TO_PHYS(m);
1886 om = NULL;
1887 origpte = *pte;
1888 opa = TLBLO_PTE_TO_PA(origpte);
1889
1890 /*
1891 * Mapping has not changed, must be protection or wiring change.
1892 */
1893 if (pte_test(&origpte, PTE_V) && opa == pa) {
1894 /*
1895 * Wiring change, just update stats. We don't worry about
1896 * wiring PT pages as they remain resident as long as there
1897 * are valid mappings in them. Hence, if a user page is
1898 * wired, the PT page will be also.
1899 */
1900 if (wired && !pte_test(&origpte, PTE_W))
1901 pmap->pm_stats.wired_count++;
1902 else if (!wired && pte_test(&origpte, PTE_W))
1903 pmap->pm_stats.wired_count--;
1904
1905 KASSERT(!pte_test(&origpte, PTE_D | PTE_RO),
1906 ("%s: modified page not writable: va: %p, pte: %#jx",
1907 __func__, (void *)va, (uintmax_t)origpte));
1908
1909 /*
1910 * Remove extra pte reference
1911 */
1912 if (mpte)
1913 mpte->wire_count--;
1914
1915 if (page_is_managed(opa)) {
1916 om = m;
1917 }
1918 goto validate;
1919 }
1920
1921 pv = NULL;
1922
1923 /*
1924 * Mapping has changed, invalidate old range and fall through to
1925 * handle validating new mapping.
1926 */
1927 if (opa) {
1928 if (pte_test(&origpte, PTE_W))
1929 pmap->pm_stats.wired_count--;
1930
1931 if (page_is_managed(opa)) {
1932 om = PHYS_TO_VM_PAGE(opa);
1933 pv = pmap_pvh_remove(&om->md, pmap, va);
1934 }
1935 if (mpte != NULL) {
1936 mpte->wire_count--;
1937 KASSERT(mpte->wire_count > 0,
1938 ("pmap_enter: missing reference to page table page,"
1939 " va: %p", (void *)va));
1940 }
1941 } else
1942 pmap->pm_stats.resident_count++;
1943
1944 /*
1945 * Enter on the PV list if part of our managed memory. Note that we
1946 * raise IPL while manipulating pv_table since pmap_enter can be
1947 * called at interrupt time.
1948 */
1949 if ((m->oflags & VPO_UNMANAGED) == 0) {
1950 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva,
1951 ("pmap_enter: managed mapping within the clean submap"));
1952 if (pv == NULL)
1953 pv = get_pv_entry(pmap);
1954 pv->pv_va = va;
1955 pv->pv_pmap = pmap;
1956 pv->pv_ptem = mpte;
1957 TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist);
1958 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
1959 m->md.pv_list_count++;
1960 } else if (pv != NULL)
1961 free_pv_entry(pv);
1962
1963 /*
1964 * Increment counters
1965 */
1966 if (wired)
1967 pmap->pm_stats.wired_count++;
1968
1969 validate:
1970 if ((access & VM_PROT_WRITE) != 0)
1971 m->md.pv_flags |= PV_TABLE_MOD | PV_TABLE_REF;
1972 rw = init_pte_prot(va, m, prot);
1973
1974 #ifdef PMAP_DEBUG
1975 printf("pmap_enter: va: %p -> pa: %p\n", (void *)va, (void *)pa);
1976 #endif
1977 /*
1978 * Now validate mapping with desired protection/wiring.
1979 */
1980 newpte = TLBLO_PA_TO_PFN(pa) | rw | PTE_V;
1981
1982 if (is_cacheable_mem(pa))
1983 newpte |= PTE_C_CACHE;
1984 else
1985 newpte |= PTE_C_UNCACHED;
1986
1987 if (wired)
1988 newpte |= PTE_W;
1989
1990 if (is_kernel_pmap(pmap))
1991 newpte |= PTE_G;
1992
1993 /*
1994 * if the mapping or permission bits are different, we need to
1995 * update the pte.
1996 */
1997 if (origpte != newpte) {
1998 if (pte_test(&origpte, PTE_V)) {
1999 *pte = newpte;
2000 if (page_is_managed(opa) && (opa != pa)) {
2001 if (om->md.pv_flags & PV_TABLE_REF)
2002 vm_page_aflag_set(om, PGA_REFERENCED);
2003 om->md.pv_flags &=
2004 ~(PV_TABLE_REF | PV_TABLE_MOD);
2005 }
2006 if (pte_test(&origpte, PTE_D)) {
2007 KASSERT(!pte_test(&origpte, PTE_RO),
2008 ("pmap_enter: modified page not writable:"
2009 " va: %p, pte: %#jx", (void *)va, (uintmax_t)origpte));
2010 if (page_is_managed(opa))
2011 vm_page_dirty(om);
2012 }
2013 if (page_is_managed(opa) &&
2014 TAILQ_EMPTY(&om->md.pv_list))
2015 vm_page_aflag_clear(om, PGA_WRITEABLE);
2016 } else {
2017 *pte = newpte;
2018 }
2019 }
2020 pmap_update_page(pmap, va, newpte);
2021
2022 /*
2023 * Sync I & D caches for executable pages. Do this only if the
2024 * target pmap belongs to the current process. Otherwise, an
2025 * unresolvable TLB miss may occur.
2026 */
2027 if (!is_kernel_pmap(pmap) && (pmap == &curproc->p_vmspace->vm_pmap) &&
2028 (prot & VM_PROT_EXECUTE)) {
2029 mips_icache_sync_range(va, PAGE_SIZE);
2030 mips_dcache_wbinv_range(va, PAGE_SIZE);
2031 }
2032 vm_page_unlock_queues();
2033 PMAP_UNLOCK(pmap);
2034 }
2035
2036 /*
2037 * this code makes some *MAJOR* assumptions:
2038 * 1. Current pmap & pmap exists.
2039 * 2. Not wired.
2040 * 3. Read access.
2041 * 4. No page table pages.
2042 * but is *MUCH* faster than pmap_enter...
2043 */
2044
2045 void
2046 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
2047 {
2048
2049 vm_page_lock_queues();
2050 PMAP_LOCK(pmap);
2051 (void)pmap_enter_quick_locked(pmap, va, m, prot, NULL);
2052 vm_page_unlock_queues();
2053 PMAP_UNLOCK(pmap);
2054 }
2055
2056 static vm_page_t
2057 pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
2058 vm_prot_t prot, vm_page_t mpte)
2059 {
2060 pt_entry_t *pte;
2061 vm_paddr_t pa;
2062
2063 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
2064 (m->oflags & VPO_UNMANAGED) != 0,
2065 ("pmap_enter_quick_locked: managed mapping within the clean submap"));
2066 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2067 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2068
2069 /*
2070 * In the case that a page table page is not resident, we are
2071 * creating it here.
2072 */
2073 if (va < VM_MAXUSER_ADDRESS) {
2074 pd_entry_t *pde;
2075 unsigned ptepindex;
2076
2077 /*
2078 * Calculate pagetable page index
2079 */
2080 ptepindex = pmap_pde_pindex(va);
2081 if (mpte && (mpte->pindex == ptepindex)) {
2082 mpte->wire_count++;
2083 } else {
2084 /*
2085 * Get the page directory entry
2086 */
2087 pde = pmap_pde(pmap, va);
2088
2089 /*
2090 * If the page table page is mapped, we just
2091 * increment the hold count, and activate it.
2092 */
2093 if (pde && *pde != 0) {
2094 if (pmap->pm_ptphint &&
2095 (pmap->pm_ptphint->pindex == ptepindex)) {
2096 mpte = pmap->pm_ptphint;
2097 } else {
2098 mpte = PHYS_TO_VM_PAGE(
2099 MIPS_DIRECT_TO_PHYS(*pde));
2100 pmap->pm_ptphint = mpte;
2101 }
2102 mpte->wire_count++;
2103 } else {
2104 mpte = _pmap_allocpte(pmap, ptepindex,
2105 M_NOWAIT);
2106 if (mpte == NULL)
2107 return (mpte);
2108 }
2109 }
2110 } else {
2111 mpte = NULL;
2112 }
2113
2114 pte = pmap_pte(pmap, va);
2115 if (pte_test(pte, PTE_V)) {
2116 if (mpte != NULL) {
2117 mpte->wire_count--;
2118 mpte = NULL;
2119 }
2120 return (mpte);
2121 }
2122
2123 /*
2124 * Enter on the PV list if part of our managed memory.
2125 */
2126 if ((m->oflags & VPO_UNMANAGED) == 0 &&
2127 !pmap_try_insert_pv_entry(pmap, mpte, va, m)) {
2128 if (mpte != NULL) {
2129 pmap_unwire_ptp(pmap, va, mpte);
2130 mpte = NULL;
2131 }
2132 return (mpte);
2133 }
2134
2135 /*
2136 * Increment counters
2137 */
2138 pmap->pm_stats.resident_count++;
2139
2140 pa = VM_PAGE_TO_PHYS(m);
2141
2142 /*
2143 * Now validate mapping with RO protection
2144 */
2145 *pte = TLBLO_PA_TO_PFN(pa) | PTE_V;
2146
2147 if (is_cacheable_mem(pa))
2148 *pte |= PTE_C_CACHE;
2149 else
2150 *pte |= PTE_C_UNCACHED;
2151
2152 if (is_kernel_pmap(pmap))
2153 *pte |= PTE_G;
2154 else {
2155 *pte |= PTE_RO;
2156 /*
2157 * Sync I & D caches. Do this only if the target pmap
2158 * belongs to the current process. Otherwise, an
2159 * unresolvable TLB miss may occur. */
2160 if (pmap == &curproc->p_vmspace->vm_pmap) {
2161 va &= ~PAGE_MASK;
2162 mips_icache_sync_range(va, PAGE_SIZE);
2163 mips_dcache_wbinv_range(va, PAGE_SIZE);
2164 }
2165 }
2166 return (mpte);
2167 }
2168
2169 /*
2170 * Make a temporary mapping for a physical address. This is only intended
2171 * to be used for panic dumps.
2172 *
2173 * Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit.
2174 */
2175 void *
2176 pmap_kenter_temporary(vm_paddr_t pa, int i)
2177 {
2178 vm_offset_t va;
2179
2180 if (i != 0)
2181 printf("%s: ERROR!!! More than one page of virtual address mapping not supported\n",
2182 __func__);
2183
2184 if (MIPS_DIRECT_MAPPABLE(pa)) {
2185 va = MIPS_PHYS_TO_DIRECT(pa);
2186 } else {
2187 #ifndef __mips_n64 /* XXX : to be converted to new style */
2188 int cpu;
2189 register_t intr;
2190 struct local_sysmaps *sysm;
2191 pt_entry_t *pte, npte;
2192
2193 /* If this is used other than for dumps, we may need to leave
2194 * interrupts disasbled on return. If crash dumps don't work when
2195 * we get to this point, we might want to consider this (leaving things
2196 * disabled as a starting point ;-)
2197 */
2198 intr = intr_disable();
2199 cpu = PCPU_GET(cpuid);
2200 sysm = &sysmap_lmem[cpu];
2201 /* Since this is for the debugger, no locks or any other fun */
2202 npte = TLBLO_PA_TO_PFN(pa) | PTE_D | PTE_V | PTE_G | PTE_W | PTE_C_CACHE;
2203 pte = pmap_pte(kernel_pmap, sysm->base);
2204 *pte = npte;
2205 sysm->valid1 = 1;
2206 pmap_update_page(kernel_pmap, sysm->base, npte);
2207 va = sysm->base;
2208 intr_restore(intr);
2209 #endif
2210 }
2211 return ((void *)va);
2212 }
2213
2214 void
2215 pmap_kenter_temporary_free(vm_paddr_t pa)
2216 {
2217 #ifndef __mips_n64 /* XXX : to be converted to new style */
2218 int cpu;
2219 register_t intr;
2220 struct local_sysmaps *sysm;
2221 #endif
2222
2223 if (MIPS_DIRECT_MAPPABLE(pa)) {
2224 /* nothing to do for this case */
2225 return;
2226 }
2227 #ifndef __mips_n64 /* XXX : to be converted to new style */
2228 cpu = PCPU_GET(cpuid);
2229 sysm = &sysmap_lmem[cpu];
2230 if (sysm->valid1) {
2231 pt_entry_t *pte;
2232
2233 intr = intr_disable();
2234 pte = pmap_pte(kernel_pmap, sysm->base);
2235 *pte = PTE_G;
2236 pmap_invalidate_page(kernel_pmap, sysm->base);
2237 intr_restore(intr);
2238 sysm->valid1 = 0;
2239 }
2240 #endif
2241 }
2242
2243 /*
2244 * Moved the code to Machine Independent
2245 * vm_map_pmap_enter()
2246 */
2247
2248 /*
2249 * Maps a sequence of resident pages belonging to the same object.
2250 * The sequence begins with the given page m_start. This page is
2251 * mapped at the given virtual address start. Each subsequent page is
2252 * mapped at a virtual address that is offset from start by the same
2253 * amount as the page is offset from m_start within the object. The
2254 * last page in the sequence is the page with the largest offset from
2255 * m_start that can be mapped at a virtual address less than the given
2256 * virtual address end. Not every virtual page between start and end
2257 * is mapped; only those for which a resident page exists with the
2258 * corresponding offset from m_start are mapped.
2259 */
2260 void
2261 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
2262 vm_page_t m_start, vm_prot_t prot)
2263 {
2264 vm_page_t m, mpte;
2265 vm_pindex_t diff, psize;
2266
2267 VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED);
2268 psize = atop(end - start);
2269 mpte = NULL;
2270 m = m_start;
2271 vm_page_lock_queues();
2272 PMAP_LOCK(pmap);
2273 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
2274 mpte = pmap_enter_quick_locked(pmap, start + ptoa(diff), m,
2275 prot, mpte);
2276 m = TAILQ_NEXT(m, listq);
2277 }
2278 vm_page_unlock_queues();
2279 PMAP_UNLOCK(pmap);
2280 }
2281
2282 /*
2283 * pmap_object_init_pt preloads the ptes for a given object
2284 * into the specified pmap. This eliminates the blast of soft
2285 * faults on process startup and immediately after an mmap.
2286 */
2287 void
2288 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
2289 vm_object_t object, vm_pindex_t pindex, vm_size_t size)
2290 {
2291 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
2292 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
2293 ("pmap_object_init_pt: non-device object"));
2294 }
2295
2296 /*
2297 * Routine: pmap_change_wiring
2298 * Function: Change the wiring attribute for a map/virtual-address
2299 * pair.
2300 * In/out conditions:
2301 * The mapping must already exist in the pmap.
2302 */
2303 void
2304 pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
2305 {
2306 pt_entry_t *pte;
2307
2308 if (pmap == NULL)
2309 return;
2310
2311 PMAP_LOCK(pmap);
2312 pte = pmap_pte(pmap, va);
2313
2314 if (wired && !pte_test(pte, PTE_W))
2315 pmap->pm_stats.wired_count++;
2316 else if (!wired && pte_test(pte, PTE_W))
2317 pmap->pm_stats.wired_count--;
2318
2319 /*
2320 * Wiring is not a hardware characteristic so there is no need to
2321 * invalidate TLB.
2322 */
2323 if (wired)
2324 pte_set(pte, PTE_W);
2325 else
2326 pte_clear(pte, PTE_W);
2327 PMAP_UNLOCK(pmap);
2328 }
2329
2330 /*
2331 * Copy the range specified by src_addr/len
2332 * from the source map to the range dst_addr/len
2333 * in the destination map.
2334 *
2335 * This routine is only advisory and need not do anything.
2336 */
2337
2338 void
2339 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
2340 vm_size_t len, vm_offset_t src_addr)
2341 {
2342 }
2343
2344 /*
2345 * pmap_zero_page zeros the specified hardware page by mapping
2346 * the page into KVM and using bzero to clear its contents.
2347 *
2348 * Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit.
2349 */
2350 void
2351 pmap_zero_page(vm_page_t m)
2352 {
2353 vm_offset_t va;
2354 vm_paddr_t phys = VM_PAGE_TO_PHYS(m);
2355
2356 if (MIPS_DIRECT_MAPPABLE(phys)) {
2357 va = MIPS_PHYS_TO_DIRECT(phys);
2358 bzero((caddr_t)va, PAGE_SIZE);
2359 mips_dcache_wbinv_range(va, PAGE_SIZE);
2360 } else {
2361 va = pmap_lmem_map1(phys);
2362 bzero((caddr_t)va, PAGE_SIZE);
2363 mips_dcache_wbinv_range(va, PAGE_SIZE);
2364 pmap_lmem_unmap();
2365 }
2366 }
2367
2368 /*
2369 * pmap_zero_page_area zeros the specified hardware page by mapping
2370 * the page into KVM and using bzero to clear its contents.
2371 *
2372 * off and size may not cover an area beyond a single hardware page.
2373 */
2374 void
2375 pmap_zero_page_area(vm_page_t m, int off, int size)
2376 {
2377 vm_offset_t va;
2378 vm_paddr_t phys = VM_PAGE_TO_PHYS(m);
2379
2380 if (MIPS_DIRECT_MAPPABLE(phys)) {
2381 va = MIPS_PHYS_TO_DIRECT(phys);
2382 bzero((char *)(caddr_t)va + off, size);
2383 mips_dcache_wbinv_range(va + off, size);
2384 } else {
2385 va = pmap_lmem_map1(phys);
2386 bzero((char *)va + off, size);
2387 mips_dcache_wbinv_range(va + off, size);
2388 pmap_lmem_unmap();
2389 }
2390 }
2391
2392 void
2393 pmap_zero_page_idle(vm_page_t m)
2394 {
2395 vm_offset_t va;
2396 vm_paddr_t phys = VM_PAGE_TO_PHYS(m);
2397
2398 if (MIPS_DIRECT_MAPPABLE(phys)) {
2399 va = MIPS_PHYS_TO_DIRECT(phys);
2400 bzero((caddr_t)va, PAGE_SIZE);
2401 mips_dcache_wbinv_range(va, PAGE_SIZE);
2402 } else {
2403 va = pmap_lmem_map1(phys);
2404 bzero((caddr_t)va, PAGE_SIZE);
2405 mips_dcache_wbinv_range(va, PAGE_SIZE);
2406 pmap_lmem_unmap();
2407 }
2408 }
2409
2410 /*
2411 * pmap_copy_page copies the specified (machine independent)
2412 * page by mapping the page into virtual memory and using
2413 * bcopy to copy the page, one machine dependent page at a
2414 * time.
2415 *
2416 * Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit.
2417 */
2418 void
2419 pmap_copy_page(vm_page_t src, vm_page_t dst)
2420 {
2421 vm_offset_t va_src, va_dst;
2422 vm_paddr_t phys_src = VM_PAGE_TO_PHYS(src);
2423 vm_paddr_t phys_dst = VM_PAGE_TO_PHYS(dst);
2424
2425 if (MIPS_DIRECT_MAPPABLE(phys_src) && MIPS_DIRECT_MAPPABLE(phys_dst)) {
2426 /* easy case, all can be accessed via KSEG0 */
2427 /*
2428 * Flush all caches for VA that are mapped to this page
2429 * to make sure that data in SDRAM is up to date
2430 */
2431 pmap_flush_pvcache(src);
2432 mips_dcache_wbinv_range_index(
2433 MIPS_PHYS_TO_DIRECT(phys_dst), PAGE_SIZE);
2434 va_src = MIPS_PHYS_TO_DIRECT(phys_src);
2435 va_dst = MIPS_PHYS_TO_DIRECT(phys_dst);
2436 bcopy((caddr_t)va_src, (caddr_t)va_dst, PAGE_SIZE);
2437 mips_dcache_wbinv_range(va_dst, PAGE_SIZE);
2438 } else {
2439 va_src = pmap_lmem_map2(phys_src, phys_dst);
2440 va_dst = va_src + PAGE_SIZE;
2441 bcopy((void *)va_src, (void *)va_dst, PAGE_SIZE);
2442 mips_dcache_wbinv_range(va_dst, PAGE_SIZE);
2443 pmap_lmem_unmap();
2444 }
2445 }
2446
2447 int unmapped_buf_allowed;
2448
2449 void
2450 pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
2451 vm_offset_t b_offset, int xfersize)
2452 {
2453 char *a_cp, *b_cp;
2454 vm_page_t a_m, b_m;
2455 vm_offset_t a_pg_offset, b_pg_offset;
2456 vm_paddr_t a_phys, b_phys;
2457 int cnt;
2458
2459 while (xfersize > 0) {
2460 a_pg_offset = a_offset & PAGE_MASK;
2461 cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
2462 a_m = ma[a_offset >> PAGE_SHIFT];
2463 a_phys = VM_PAGE_TO_PHYS(a_m);
2464 b_pg_offset = b_offset & PAGE_MASK;
2465 cnt = min(cnt, PAGE_SIZE - b_pg_offset);
2466 b_m = mb[b_offset >> PAGE_SHIFT];
2467 b_phys = VM_PAGE_TO_PHYS(b_m);
2468 if (MIPS_DIRECT_MAPPABLE(a_phys) &&
2469 MIPS_DIRECT_MAPPABLE(b_phys)) {
2470 pmap_flush_pvcache(a_m);
2471 mips_dcache_wbinv_range_index(
2472 MIPS_PHYS_TO_DIRECT(b_phys), PAGE_SIZE);
2473 a_cp = (char *)MIPS_PHYS_TO_DIRECT(a_phys) +
2474 a_pg_offset;
2475 b_cp = (char *)MIPS_PHYS_TO_DIRECT(b_phys) +
2476 b_pg_offset;
2477 bcopy(a_cp, b_cp, cnt);
2478 mips_dcache_wbinv_range((vm_offset_t)b_cp, cnt);
2479 } else {
2480 a_cp = (char *)pmap_lmem_map2(a_phys, b_phys);
2481 b_cp = (char *)a_cp + PAGE_SIZE;
2482 a_cp += a_pg_offset;
2483 b_cp += b_pg_offset;
2484 bcopy(a_cp, b_cp, cnt);
2485 mips_dcache_wbinv_range((vm_offset_t)b_cp, cnt);
2486 pmap_lmem_unmap();
2487 }
2488 a_offset += cnt;
2489 b_offset += cnt;
2490 xfersize -= cnt;
2491 }
2492 }
2493
2494 /*
2495 * Returns true if the pmap's pv is one of the first
2496 * 16 pvs linked to from this page. This count may
2497 * be changed upwards or downwards in the future; it
2498 * is only necessary that true be returned for a small
2499 * subset of pmaps for proper page aging.
2500 */
2501 boolean_t
2502 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
2503 {
2504 pv_entry_t pv;
2505 int loops = 0;
2506 boolean_t rv;
2507
2508 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2509 ("pmap_page_exists_quick: page %p is not managed", m));
2510 rv = FALSE;
2511 vm_page_lock_queues();
2512 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2513 if (pv->pv_pmap == pmap) {
2514 rv = TRUE;
2515 break;
2516 }
2517 loops++;
2518 if (loops >= 16)
2519 break;
2520 }
2521 vm_page_unlock_queues();
2522 return (rv);
2523 }
2524
2525 /*
2526 * Remove all pages from specified address space
2527 * this aids process exit speeds. Also, this code
2528 * is special cased for current process only, but
2529 * can have the more generic (and slightly slower)
2530 * mode enabled. This is much faster than pmap_remove
2531 * in the case of running down an entire address space.
2532 */
2533 void
2534 pmap_remove_pages(pmap_t pmap)
2535 {
2536 pt_entry_t *pte, tpte;
2537 pv_entry_t pv, npv;
2538 vm_page_t m;
2539
2540 if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) {
2541 printf("warning: pmap_remove_pages called with non-current pmap\n");
2542 return;
2543 }
2544 vm_page_lock_queues();
2545 PMAP_LOCK(pmap);
2546 for (pv = TAILQ_FIRST(&pmap->pm_pvlist); pv != NULL; pv = npv) {
2547
2548 pte = pmap_pte(pv->pv_pmap, pv->pv_va);
2549 if (!pte_test(pte, PTE_V))
2550 panic("pmap_remove_pages: page on pm_pvlist has no pte");
2551 tpte = *pte;
2552
2553 /*
2554 * We cannot remove wired pages from a process' mapping at this time
2555 */
2556 if (pte_test(&tpte, PTE_W)) {
2557 npv = TAILQ_NEXT(pv, pv_plist);
2558 continue;
2559 }
2560 *pte = is_kernel_pmap(pmap) ? PTE_G : 0;
2561
2562 m = PHYS_TO_VM_PAGE(TLBLO_PTE_TO_PA(tpte));
2563 KASSERT(m != NULL,
2564 ("pmap_remove_pages: bad tpte %#jx", (uintmax_t)tpte));
2565
2566 pv->pv_pmap->pm_stats.resident_count--;
2567
2568 /*
2569 * Update the vm_page_t clean and reference bits.
2570 */
2571 if (pte_test(&tpte, PTE_D)) {
2572 vm_page_dirty(m);
2573 }
2574 npv = TAILQ_NEXT(pv, pv_plist);
2575 TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist);
2576
2577 m->md.pv_list_count--;
2578 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
2579 if (TAILQ_FIRST(&m->md.pv_list) == NULL) {
2580 vm_page_aflag_clear(m, PGA_WRITEABLE);
2581 }
2582 pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem);
2583 free_pv_entry(pv);
2584 }
2585 pmap_invalidate_all(pmap);
2586 PMAP_UNLOCK(pmap);
2587 vm_page_unlock_queues();
2588 }
2589
2590 /*
2591 * pmap_testbit tests bits in pte's
2592 * note that the testbit/changebit routines are inline,
2593 * and a lot of things compile-time evaluate.
2594 */
2595 static boolean_t
2596 pmap_testbit(vm_page_t m, int bit)
2597 {
2598 pv_entry_t pv;
2599 pt_entry_t *pte;
2600 boolean_t rv = FALSE;
2601
2602 if (m->oflags & VPO_UNMANAGED)
2603 return (rv);
2604
2605 if (TAILQ_FIRST(&m->md.pv_list) == NULL)
2606 return (rv);
2607
2608 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2609 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2610 PMAP_LOCK(pv->pv_pmap);
2611 pte = pmap_pte(pv->pv_pmap, pv->pv_va);
2612 rv = pte_test(pte, bit);
2613 PMAP_UNLOCK(pv->pv_pmap);
2614 if (rv)
2615 break;
2616 }
2617 return (rv);
2618 }
2619
2620 /*
2621 * this routine is used to clear dirty bits in ptes
2622 */
2623 static __inline void
2624 pmap_changebit(vm_page_t m, int bit, boolean_t setem)
2625 {
2626 pv_entry_t pv;
2627 pt_entry_t *pte;
2628
2629 if (m->oflags & VPO_UNMANAGED)
2630 return;
2631
2632 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2633 /*
2634 * Loop over all current mappings setting/clearing as appropos If
2635 * setting RO do we need to clear the VAC?
2636 */
2637 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2638 PMAP_LOCK(pv->pv_pmap);
2639 pte = pmap_pte(pv->pv_pmap, pv->pv_va);
2640 if (setem) {
2641 *pte |= bit;
2642 pmap_update_page(pv->pv_pmap, pv->pv_va, *pte);
2643 } else {
2644 pt_entry_t pbits = *pte;
2645
2646 if (pbits & bit) {
2647 if (bit == PTE_D) {
2648 if (pbits & PTE_D)
2649 vm_page_dirty(m);
2650 *pte = (pbits & ~PTE_D) | PTE_RO;
2651 } else {
2652 *pte = pbits & ~bit;
2653 }
2654 pmap_update_page(pv->pv_pmap, pv->pv_va, *pte);
2655 }
2656 }
2657 PMAP_UNLOCK(pv->pv_pmap);
2658 }
2659 if (!setem && bit == PTE_D)
2660 vm_page_aflag_clear(m, PGA_WRITEABLE);
2661 }
2662
2663 /*
2664 * pmap_page_wired_mappings:
2665 *
2666 * Return the number of managed mappings to the given physical page
2667 * that are wired.
2668 */
2669 int
2670 pmap_page_wired_mappings(vm_page_t m)
2671 {
2672 pv_entry_t pv;
2673 pmap_t pmap;
2674 pt_entry_t *pte;
2675 int count;
2676
2677 count = 0;
2678 if ((m->oflags & VPO_UNMANAGED) != 0)
2679 return (count);
2680 vm_page_lock_queues();
2681 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2682 pmap = pv->pv_pmap;
2683 PMAP_LOCK(pmap);
2684 pte = pmap_pte(pmap, pv->pv_va);
2685 if (pte_test(pte, PTE_W))
2686 count++;
2687 PMAP_UNLOCK(pmap);
2688 }
2689 vm_page_unlock_queues();
2690 return (count);
2691 }
2692
2693 /*
2694 * Clear the write and modified bits in each of the given page's mappings.
2695 */
2696 void
2697 pmap_remove_write(vm_page_t m)
2698 {
2699 pv_entry_t pv, npv;
2700 vm_offset_t va;
2701 pt_entry_t *pte;
2702
2703 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2704 ("pmap_remove_write: page %p is not managed", m));
2705
2706 /*
2707 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by
2708 * another thread while the object is locked. Thus, if PGA_WRITEABLE
2709 * is clear, no page table entries need updating.
2710 */
2711 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
2712 if ((m->oflags & VPO_BUSY) == 0 &&
2713 (m->aflags & PGA_WRITEABLE) == 0)
2714 return;
2715
2716 /*
2717 * Loop over all current mappings setting/clearing as appropos.
2718 */
2719 vm_page_lock_queues();
2720 for (pv = TAILQ_FIRST(&m->md.pv_list); pv; pv = npv) {
2721 npv = TAILQ_NEXT(pv, pv_plist);
2722 pte = pmap_pte(pv->pv_pmap, pv->pv_va);
2723 if (pte == NULL || !pte_test(pte, PTE_V))
2724 panic("page on pm_pvlist has no pte");
2725
2726 va = pv->pv_va;
2727 pmap_protect(pv->pv_pmap, va, va + PAGE_SIZE,
2728 VM_PROT_READ | VM_PROT_EXECUTE);
2729 }
2730 vm_page_aflag_clear(m, PGA_WRITEABLE);
2731 vm_page_unlock_queues();
2732 }
2733
2734 /*
2735 * pmap_ts_referenced:
2736 *
2737 * Return the count of reference bits for a page, clearing all of them.
2738 */
2739 int
2740 pmap_ts_referenced(vm_page_t m)
2741 {
2742
2743 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2744 ("pmap_ts_referenced: page %p is not managed", m));
2745 if (m->md.pv_flags & PV_TABLE_REF) {
2746 vm_page_lock_queues();
2747 m->md.pv_flags &= ~PV_TABLE_REF;
2748 vm_page_unlock_queues();
2749 return (1);
2750 }
2751 return (0);
2752 }
2753
2754 /*
2755 * pmap_is_modified:
2756 *
2757 * Return whether or not the specified physical page was modified
2758 * in any physical maps.
2759 */
2760 boolean_t
2761 pmap_is_modified(vm_page_t m)
2762 {
2763 boolean_t rv;
2764
2765 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2766 ("pmap_is_modified: page %p is not managed", m));
2767
2768 /*
2769 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be
2770 * concurrently set while the object is locked. Thus, if PGA_WRITEABLE
2771 * is clear, no PTEs can have PTE_D set.
2772 */
2773 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
2774 if ((m->oflags & VPO_BUSY) == 0 &&
2775 (m->aflags & PGA_WRITEABLE) == 0)
2776 return (FALSE);
2777 vm_page_lock_queues();
2778 if (m->md.pv_flags & PV_TABLE_MOD)
2779 rv = TRUE;
2780 else
2781 rv = pmap_testbit(m, PTE_D);
2782 vm_page_unlock_queues();
2783 return (rv);
2784 }
2785
2786 /* N/C */
2787
2788 /*
2789 * pmap_is_prefaultable:
2790 *
2791 * Return whether or not the specified virtual address is elgible
2792 * for prefault.
2793 */
2794 boolean_t
2795 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
2796 {
2797 pd_entry_t *pde;
2798 pt_entry_t *pte;
2799 boolean_t rv;
2800
2801 rv = FALSE;
2802 PMAP_LOCK(pmap);
2803 pde = pmap_pde(pmap, addr);
2804 if (pde != NULL && *pde != 0) {
2805 pte = pmap_pde_to_pte(pde, addr);
2806 rv = (*pte == 0);
2807 }
2808 PMAP_UNLOCK(pmap);
2809 return (rv);
2810 }
2811
2812 /*
2813 * Clear the modify bits on the specified physical page.
2814 */
2815 void
2816 pmap_clear_modify(vm_page_t m)
2817 {
2818
2819 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2820 ("pmap_clear_modify: page %p is not managed", m));
2821 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
2822 KASSERT((m->oflags & VPO_BUSY) == 0,
2823 ("pmap_clear_modify: page %p is busy", m));
2824
2825 /*
2826 * If the page is not PGA_WRITEABLE, then no PTEs can have PTE_D set.
2827 * If the object containing the page is locked and the page is not
2828 * VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set.
2829 */
2830 if ((m->aflags & PGA_WRITEABLE) == 0)
2831 return;
2832 vm_page_lock_queues();
2833 if (m->md.pv_flags & PV_TABLE_MOD) {
2834 pmap_changebit(m, PTE_D, FALSE);
2835 m->md.pv_flags &= ~PV_TABLE_MOD;
2836 }
2837 vm_page_unlock_queues();
2838 }
2839
2840 /*
2841 * pmap_is_referenced:
2842 *
2843 * Return whether or not the specified physical page was referenced
2844 * in any physical maps.
2845 */
2846 boolean_t
2847 pmap_is_referenced(vm_page_t m)
2848 {
2849
2850 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2851 ("pmap_is_referenced: page %p is not managed", m));
2852 return ((m->md.pv_flags & PV_TABLE_REF) != 0);
2853 }
2854
2855 /*
2856 * pmap_clear_reference:
2857 *
2858 * Clear the reference bit on the specified physical page.
2859 */
2860 void
2861 pmap_clear_reference(vm_page_t m)
2862 {
2863
2864 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2865 ("pmap_clear_reference: page %p is not managed", m));
2866 vm_page_lock_queues();
2867 if (m->md.pv_flags & PV_TABLE_REF) {
2868 m->md.pv_flags &= ~PV_TABLE_REF;
2869 }
2870 vm_page_unlock_queues();
2871 }
2872
2873 /*
2874 * Miscellaneous support routines follow
2875 */
2876
2877 /*
2878 * Map a set of physical memory pages into the kernel virtual
2879 * address space. Return a pointer to where it is mapped. This
2880 * routine is intended to be used for mapping device memory,
2881 * NOT real memory.
2882 */
2883
2884 /*
2885 * Map a set of physical memory pages into the kernel virtual
2886 * address space. Return a pointer to where it is mapped. This
2887 * routine is intended to be used for mapping device memory,
2888 * NOT real memory.
2889 *
2890 * Use XKPHYS uncached for 64 bit, and KSEG1 where possible for 32 bit.
2891 */
2892 void *
2893 pmap_mapdev(vm_paddr_t pa, vm_size_t size)
2894 {
2895 vm_offset_t va, tmpva, offset;
2896
2897 /*
2898 * KSEG1 maps only first 512M of phys address space. For
2899 * pa > 0x20000000 we should make proper mapping * using pmap_kenter.
2900 */
2901 if (MIPS_DIRECT_MAPPABLE(pa + size - 1))
2902 return ((void *)MIPS_PHYS_TO_DIRECT_UNCACHED(pa));
2903 else {
2904 offset = pa & PAGE_MASK;
2905 size = roundup(size + offset, PAGE_SIZE);
2906
2907 va = kmem_alloc_nofault(kernel_map, size);
2908 if (!va)
2909 panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
2910 pa = trunc_page(pa);
2911 for (tmpva = va; size > 0;) {
2912 pmap_kenter_attr(tmpva, pa, PTE_C_UNCACHED);
2913 size -= PAGE_SIZE;
2914 tmpva += PAGE_SIZE;
2915 pa += PAGE_SIZE;
2916 }
2917 }
2918
2919 return ((void *)(va + offset));
2920 }
2921
2922 void
2923 pmap_unmapdev(vm_offset_t va, vm_size_t size)
2924 {
2925 #ifndef __mips_n64
2926 vm_offset_t base, offset;
2927
2928 /* If the address is within KSEG1 then there is nothing to do */
2929 if (va >= MIPS_KSEG1_START && va <= MIPS_KSEG1_END)
2930 return;
2931
2932 base = trunc_page(va);
2933 offset = va & PAGE_MASK;
2934 size = roundup(size + offset, PAGE_SIZE);
2935 kmem_free(kernel_map, base, size);
2936 #endif
2937 }
2938
2939 /*
2940 * perform the pmap work for mincore
2941 */
2942 int
2943 pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
2944 {
2945 pt_entry_t *ptep, pte;
2946 vm_paddr_t pa;
2947 vm_page_t m;
2948 int val;
2949 boolean_t managed;
2950
2951 PMAP_LOCK(pmap);
2952 retry:
2953 ptep = pmap_pte(pmap, addr);
2954 pte = (ptep != NULL) ? *ptep : 0;
2955 if (!pte_test(&pte, PTE_V)) {
2956 val = 0;
2957 goto out;
2958 }
2959 val = MINCORE_INCORE;
2960 if (pte_test(&pte, PTE_D))
2961 val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
2962 pa = TLBLO_PTE_TO_PA(pte);
2963 managed = page_is_managed(pa);
2964 if (managed) {
2965 /*
2966 * This may falsely report the given address as
2967 * MINCORE_REFERENCED. Unfortunately, due to the lack of
2968 * per-PTE reference information, it is impossible to
2969 * determine if the address is MINCORE_REFERENCED.
2970 */
2971 m = PHYS_TO_VM_PAGE(pa);
2972 if ((m->aflags & PGA_REFERENCED) != 0)
2973 val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
2974 }
2975 if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
2976 (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && managed) {
2977 /* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */
2978 if (vm_page_pa_tryrelock(pmap, pa, locked_pa))
2979 goto retry;
2980 } else
2981 out:
2982 PA_UNLOCK_COND(*locked_pa);
2983 PMAP_UNLOCK(pmap);
2984 return (val);
2985 }
2986
2987 void
2988 pmap_activate(struct thread *td)
2989 {
2990 pmap_t pmap, oldpmap;
2991 struct proc *p = td->td_proc;
2992 u_int cpuid;
2993
2994 critical_enter();
2995
2996 pmap = vmspace_pmap(p->p_vmspace);
2997 oldpmap = PCPU_GET(curpmap);
2998 cpuid = PCPU_GET(cpuid);
2999
3000 if (oldpmap)
3001 CPU_CLR_ATOMIC(cpuid, &oldpmap->pm_active);
3002 CPU_SET_ATOMIC(cpuid, &pmap->pm_active);
3003 pmap_asid_alloc(pmap);
3004 if (td == curthread) {
3005 PCPU_SET(segbase, pmap->pm_segtab);
3006 mips_wr_entryhi(pmap->pm_asid[cpuid].asid);
3007 }
3008
3009 PCPU_SET(curpmap, pmap);
3010 critical_exit();
3011 }
3012
3013 void
3014 pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
3015 {
3016 }
3017
3018 /*
3019 * Increase the starting virtual address of the given mapping if a
3020 * different alignment might result in more superpage mappings.
3021 */
3022 void
3023 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
3024 vm_offset_t *addr, vm_size_t size)
3025 {
3026 vm_offset_t superpage_offset;
3027
3028 if (size < NBSEG)
3029 return;
3030 if (object != NULL && (object->flags & OBJ_COLORED) != 0)
3031 offset += ptoa(object->pg_color);
3032 superpage_offset = offset & SEGMASK;
3033 if (size - ((NBSEG - superpage_offset) & SEGMASK) < NBSEG ||
3034 (*addr & SEGMASK) == superpage_offset)
3035 return;
3036 if ((*addr & SEGMASK) < superpage_offset)
3037 *addr = (*addr & ~SEGMASK) + superpage_offset;
3038 else
3039 *addr = ((*addr + SEGMASK) & ~SEGMASK) + superpage_offset;
3040 }
3041
3042 /*
3043 * Increase the starting virtual address of the given mapping so
3044 * that it is aligned to not be the second page in a TLB entry.
3045 * This routine assumes that the length is appropriately-sized so
3046 * that the allocation does not share a TLB entry at all if required.
3047 */
3048 void
3049 pmap_align_tlb(vm_offset_t *addr)
3050 {
3051 if ((*addr & PAGE_SIZE) == 0)
3052 return;
3053 *addr += PAGE_SIZE;
3054 return;
3055 }
3056
3057 #ifdef DDB
3058 DB_SHOW_COMMAND(ptable, ddb_pid_dump)
3059 {
3060 pmap_t pmap;
3061 struct thread *td = NULL;
3062 struct proc *p;
3063 int i, j, k;
3064 vm_paddr_t pa;
3065 vm_offset_t va;
3066
3067 if (have_addr) {
3068 td = db_lookup_thread(addr, TRUE);
3069 if (td == NULL) {
3070 db_printf("Invalid pid or tid");
3071 return;
3072 }
3073 p = td->td_proc;
3074 if (p->p_vmspace == NULL) {
3075 db_printf("No vmspace for process");
3076 return;
3077 }
3078 pmap = vmspace_pmap(p->p_vmspace);
3079 } else
3080 pmap = kernel_pmap;
3081
3082 db_printf("pmap:%p segtab:%p asid:%x generation:%x\n",
3083 pmap, pmap->pm_segtab, pmap->pm_asid[0].asid,
3084 pmap->pm_asid[0].gen);
3085 for (i = 0; i < NPDEPG; i++) {
3086 pd_entry_t *pdpe;
3087 pt_entry_t *pde;
3088 pt_entry_t pte;
3089
3090 pdpe = (pd_entry_t *)pmap->pm_segtab[i];
3091 if (pdpe == NULL)
3092 continue;
3093 db_printf("[%4d] %p\n", i, pdpe);
3094 #ifdef __mips_n64
3095 for (j = 0; j < NPDEPG; j++) {
3096 pde = (pt_entry_t *)pdpe[j];
3097 if (pde == NULL)
3098 continue;
3099 db_printf("\t[%4d] %p\n", j, pde);
3100 #else
3101 {
3102 j = 0;
3103 pde = (pt_entry_t *)pdpe;
3104 #endif
3105 for (k = 0; k < NPTEPG; k++) {
3106 pte = pde[k];
3107 if (pte == 0 || !pte_test(&pte, PTE_V))
3108 continue;
3109 pa = TLBLO_PTE_TO_PA(pte);
3110 va = ((u_long)i << SEGSHIFT) | (j << PDRSHIFT) | (k << PAGE_SHIFT);
3111 db_printf("\t\t[%04d] va: %p pte: %8jx pa:%jx\n",
3112 k, (void *)va, (uintmax_t)pte, (uintmax_t)pa);
3113 }
3114 }
3115 }
3116 }
3117 #endif
3118
3119 #if defined(DEBUG)
3120
3121 static void pads(pmap_t pm);
3122 void pmap_pvdump(vm_offset_t pa);
3123
3124 /* print address space of pmap*/
3125 static void
3126 pads(pmap_t pm)
3127 {
3128 unsigned va, i, j;
3129 pt_entry_t *ptep;
3130
3131 if (pm == kernel_pmap)
3132 return;
3133 for (i = 0; i < NPTEPG; i++)
3134 if (pm->pm_segtab[i])
3135 for (j = 0; j < NPTEPG; j++) {
3136 va = (i << SEGSHIFT) + (j << PAGE_SHIFT);
3137 if (pm == kernel_pmap && va < KERNBASE)
3138 continue;
3139 if (pm != kernel_pmap &&
3140 va >= VM_MAXUSER_ADDRESS)
3141 continue;
3142 ptep = pmap_pte(pm, va);
3143 if (pte_test(ptep, PTE_V))
3144 printf("%x:%x ", va, *(int *)ptep);
3145 }
3146
3147 }
3148
3149 void
3150 pmap_pvdump(vm_offset_t pa)
3151 {
3152 register pv_entry_t pv;
3153 vm_page_t m;
3154
3155 printf("pa %x", pa);
3156 m = PHYS_TO_VM_PAGE(pa);
3157 for (pv = TAILQ_FIRST(&m->md.pv_list); pv;
3158 pv = TAILQ_NEXT(pv, pv_list)) {
3159 printf(" -> pmap %p, va %x", (void *)pv->pv_pmap, pv->pv_va);
3160 pads(pv->pv_pmap);
3161 }
3162 printf(" ");
3163 }
3164
3165 /* N/C */
3166 #endif
3167
3168
3169 /*
3170 * Allocate TLB address space tag (called ASID or TLBPID) and return it.
3171 * It takes almost as much or more time to search the TLB for a
3172 * specific ASID and flush those entries as it does to flush the entire TLB.
3173 * Therefore, when we allocate a new ASID, we just take the next number. When
3174 * we run out of numbers, we flush the TLB, increment the generation count
3175 * and start over. ASID zero is reserved for kernel use.
3176 */
3177 static void
3178 pmap_asid_alloc(pmap)
3179 pmap_t pmap;
3180 {
3181 if (pmap->pm_asid[PCPU_GET(cpuid)].asid != PMAP_ASID_RESERVED &&
3182 pmap->pm_asid[PCPU_GET(cpuid)].gen == PCPU_GET(asid_generation));
3183 else {
3184 if (PCPU_GET(next_asid) == pmap_max_asid) {
3185 tlb_invalidate_all_user(NULL);
3186 PCPU_SET(asid_generation,
3187 (PCPU_GET(asid_generation) + 1) & ASIDGEN_MASK);
3188 if (PCPU_GET(asid_generation) == 0) {
3189 PCPU_SET(asid_generation, 1);
3190 }
3191 PCPU_SET(next_asid, 1); /* 0 means invalid */
3192 }
3193 pmap->pm_asid[PCPU_GET(cpuid)].asid = PCPU_GET(next_asid);
3194 pmap->pm_asid[PCPU_GET(cpuid)].gen = PCPU_GET(asid_generation);
3195 PCPU_SET(next_asid, PCPU_GET(next_asid) + 1);
3196 }
3197 }
3198
3199 int
3200 page_is_managed(vm_paddr_t pa)
3201 {
3202 vm_offset_t pgnum = atop(pa);
3203
3204 if (pgnum >= first_page) {
3205 vm_page_t m;
3206
3207 m = PHYS_TO_VM_PAGE(pa);
3208 if (m == NULL)
3209 return (0);
3210 if ((m->oflags & VPO_UNMANAGED) == 0)
3211 return (1);
3212 }
3213 return (0);
3214 }
3215
3216 static pt_entry_t
3217 init_pte_prot(vm_offset_t va, vm_page_t m, vm_prot_t prot)
3218 {
3219 pt_entry_t rw;
3220
3221 if (!(prot & VM_PROT_WRITE))
3222 rw = PTE_V | PTE_RO | PTE_C_CACHE;
3223 else if ((m->oflags & VPO_UNMANAGED) == 0) {
3224 if ((m->md.pv_flags & PV_TABLE_MOD) != 0)
3225 rw = PTE_V | PTE_D | PTE_C_CACHE;
3226 else
3227 rw = PTE_V | PTE_C_CACHE;
3228 vm_page_aflag_set(m, PGA_WRITEABLE);
3229 } else
3230 /* Needn't emulate a modified bit for unmanaged pages. */
3231 rw = PTE_V | PTE_D | PTE_C_CACHE;
3232 return (rw);
3233 }
3234
3235 /*
3236 * pmap_emulate_modified : do dirty bit emulation
3237 *
3238 * On SMP, update just the local TLB, other CPUs will update their
3239 * TLBs from PTE lazily, if they get the exception.
3240 * Returns 0 in case of sucess, 1 if the page is read only and we
3241 * need to fault.
3242 */
3243 int
3244 pmap_emulate_modified(pmap_t pmap, vm_offset_t va)
3245 {
3246 vm_page_t m;
3247 pt_entry_t *pte;
3248 vm_paddr_t pa;
3249
3250 PMAP_LOCK(pmap);
3251 pte = pmap_pte(pmap, va);
3252 if (pte == NULL)
3253 panic("pmap_emulate_modified: can't find PTE");
3254 #ifdef SMP
3255 /* It is possible that some other CPU changed m-bit */
3256 if (!pte_test(pte, PTE_V) || pte_test(pte, PTE_D)) {
3257 pmap_update_page_local(pmap, va, *pte);
3258 PMAP_UNLOCK(pmap);
3259 return (0);
3260 }
3261 #else
3262 if (!pte_test(pte, PTE_V) || pte_test(pte, PTE_D))
3263 panic("pmap_emulate_modified: invalid pte");
3264 #endif
3265 if (pte_test(pte, PTE_RO)) {
3266 /* write to read only page in the kernel */
3267 PMAP_UNLOCK(pmap);
3268 return (1);
3269 }
3270 pte_set(pte, PTE_D);
3271 pmap_update_page_local(pmap, va, *pte);
3272 pa = TLBLO_PTE_TO_PA(*pte);
3273 if (!page_is_managed(pa))
3274 panic("pmap_emulate_modified: unmanaged page");
3275 m = PHYS_TO_VM_PAGE(pa);
3276 m->md.pv_flags |= (PV_TABLE_REF | PV_TABLE_MOD);
3277 PMAP_UNLOCK(pmap);
3278 return (0);
3279 }
3280
3281 /*
3282 * Routine: pmap_kextract
3283 * Function:
3284 * Extract the physical page address associated
3285 * virtual address.
3286 */
3287 /* PMAP_INLINE */ vm_offset_t
3288 pmap_kextract(vm_offset_t va)
3289 {
3290 int mapped;
3291
3292 /*
3293 * First, the direct-mapped regions.
3294 */
3295 #if defined(__mips_n64)
3296 if (va >= MIPS_XKPHYS_START && va < MIPS_XKPHYS_END)
3297 return (MIPS_XKPHYS_TO_PHYS(va));
3298 #endif
3299 if (va >= MIPS_KSEG0_START && va < MIPS_KSEG0_END)
3300 return (MIPS_KSEG0_TO_PHYS(va));
3301
3302 if (va >= MIPS_KSEG1_START && va < MIPS_KSEG1_END)
3303 return (MIPS_KSEG1_TO_PHYS(va));
3304
3305 /*
3306 * User virtual addresses.
3307 */
3308 if (va < VM_MAXUSER_ADDRESS) {
3309 pt_entry_t *ptep;
3310
3311 if (curproc && curproc->p_vmspace) {
3312 ptep = pmap_pte(&curproc->p_vmspace->vm_pmap, va);
3313 if (ptep) {
3314 return (TLBLO_PTE_TO_PA(*ptep) |
3315 (va & PAGE_MASK));
3316 }
3317 return (0);
3318 }
3319 }
3320
3321 /*
3322 * Should be kernel virtual here, otherwise fail
3323 */
3324 mapped = (va >= MIPS_KSEG2_START || va < MIPS_KSEG2_END);
3325 #if defined(__mips_n64)
3326 mapped = mapped || (va >= MIPS_XKSEG_START || va < MIPS_XKSEG_END);
3327 #endif
3328 /*
3329 * Kernel virtual.
3330 */
3331
3332 if (mapped) {
3333 pt_entry_t *ptep;
3334
3335 /* Is the kernel pmap initialized? */
3336 if (!CPU_EMPTY(&kernel_pmap->pm_active)) {
3337 /* It's inside the virtual address range */
3338 ptep = pmap_pte(kernel_pmap, va);
3339 if (ptep) {
3340 return (TLBLO_PTE_TO_PA(*ptep) |
3341 (va & PAGE_MASK));
3342 }
3343 }
3344 return (0);
3345 }
3346
3347 panic("%s for unknown address space %p.", __func__, (void *)va);
3348 }
3349
3350
3351 void
3352 pmap_flush_pvcache(vm_page_t m)
3353 {
3354 pv_entry_t pv;
3355
3356 if (m != NULL) {
3357 for (pv = TAILQ_FIRST(&m->md.pv_list); pv;
3358 pv = TAILQ_NEXT(pv, pv_list)) {
3359 mips_dcache_wbinv_range_index(pv->pv_va, PAGE_SIZE);
3360 }
3361 }
3362 }
Cache object: 5075f8451b8b4db65882e1460bfadc59
|