FreeBSD/Linux Kernel Cross Reference
sys/amd64/amd64/pmap.c
1 /*-
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 * Copyright (c) 1994 John S. Dyson
5 * All rights reserved.
6 * Copyright (c) 1994 David Greenman
7 * All rights reserved.
8 * Copyright (c) 2003 Peter Wemm
9 * All rights reserved.
10 *
11 * This code is derived from software contributed to Berkeley by
12 * the Systems Programming Group of the University of Utah Computer
13 * Science Department and William Jolitz of UUNET Technologies Inc.
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 * 3. All advertising materials mentioning features or use of this software
24 * must display the following acknowledgement:
25 * This product includes software developed by the University of
26 * California, Berkeley and its contributors.
27 * 4. Neither the name of the University nor the names of its contributors
28 * may be used to endorse or promote products derived from this software
29 * without specific prior written permission.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
32 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
35 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
39 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
40 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41 * SUCH DAMAGE.
42 *
43 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
44 */
45 /*-
46 * Copyright (c) 2003 Networks Associates Technology, Inc.
47 * All rights reserved.
48 *
49 * This software was developed for the FreeBSD Project by Jake Burkholder,
50 * Safeport Network Services, and Network Associates Laboratories, the
51 * Security Research Division of Network Associates, Inc. under
52 * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA
53 * CHATS research program.
54 *
55 * Redistribution and use in source and binary forms, with or without
56 * modification, are permitted provided that the following conditions
57 * are met:
58 * 1. Redistributions of source code must retain the above copyright
59 * notice, this list of conditions and the following disclaimer.
60 * 2. Redistributions in binary form must reproduce the above copyright
61 * notice, this list of conditions and the following disclaimer in the
62 * documentation and/or other materials provided with the distribution.
63 *
64 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
65 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
66 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
67 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
68 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
69 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
70 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
71 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
72 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
73 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
74 * SUCH DAMAGE.
75 */
76
77 #include <sys/cdefs.h>
78 __FBSDID("$FreeBSD: src/sys/amd64/amd64/pmap.c,v 1.491.2.9 2006/04/22 20:51:04 alc Exp $");
79
80 /*
81 * Manages physical address maps.
82 *
83 * In addition to hardware address maps, this
84 * module is called upon to provide software-use-only
85 * maps which may or may not be stored in the same
86 * form as hardware maps. These pseudo-maps are
87 * used to store intermediate results from copy
88 * operations to and from address spaces.
89 *
90 * Since the information managed by this module is
91 * also stored by the logical address mapping module,
92 * this module may throw away valid virtual-to-physical
93 * mappings at almost any time. However, invalidations
94 * of virtual-to-physical mappings must be done as
95 * requested.
96 *
97 * In order to cope with hardware architectures which
98 * make virtual-to-physical map invalidates expensive,
99 * this module may delay invalidate or reduced protection
100 * operations until such time as they are actually
101 * necessary. This module is given full information as
102 * to which processors are currently using which maps,
103 * and to when physical maps must be made correct.
104 */
105
106 #include "opt_msgbuf.h"
107 #include "opt_pmap.h"
108 #include "opt_kstack_pages.h"
109
110 #include <sys/param.h>
111 #include <sys/systm.h>
112 #include <sys/kernel.h>
113 #include <sys/lock.h>
114 #include <sys/malloc.h>
115 #include <sys/mman.h>
116 #include <sys/msgbuf.h>
117 #include <sys/mutex.h>
118 #include <sys/proc.h>
119 #include <sys/sx.h>
120 #include <sys/vmmeter.h>
121 #include <sys/sched.h>
122 #include <sys/sysctl.h>
123 #ifdef SMP
124 #include <sys/smp.h>
125 #endif
126
127 #include <vm/vm.h>
128 #include <vm/vm_param.h>
129 #include <vm/vm_kern.h>
130 #include <vm/vm_page.h>
131 #include <vm/vm_map.h>
132 #include <vm/vm_object.h>
133 #include <vm/vm_extern.h>
134 #include <vm/vm_pageout.h>
135 #include <vm/vm_pager.h>
136 #include <vm/uma.h>
137
138 #include <machine/cpu.h>
139 #include <machine/cputypes.h>
140 #include <machine/md_var.h>
141 #include <machine/pcb.h>
142 #include <machine/specialreg.h>
143 #ifdef SMP
144 #include <machine/smp.h>
145 #endif
146
147 #ifndef PMAP_SHPGPERPROC
148 #define PMAP_SHPGPERPROC 200
149 #endif
150
151 #if defined(DIAGNOSTIC)
152 #define PMAP_DIAGNOSTIC
153 #endif
154
155 #define MINPV 2048
156
157 #if !defined(PMAP_DIAGNOSTIC)
158 #define PMAP_INLINE __inline
159 #else
160 #define PMAP_INLINE
161 #endif
162
163 struct pmap kernel_pmap_store;
164
165 vm_paddr_t avail_start; /* PA of first available physical page */
166 vm_paddr_t avail_end; /* PA of last available physical page */
167 vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */
168 vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
169 static boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
170
171 static int nkpt;
172 static int ndmpdp;
173 static vm_paddr_t dmaplimit;
174 vm_offset_t kernel_vm_end;
175 pt_entry_t pg_nx;
176
177 static u_int64_t KPTphys; /* phys addr of kernel level 1 */
178 static u_int64_t KPDphys; /* phys addr of kernel level 2 */
179 static u_int64_t KPDPphys; /* phys addr of kernel level 3 */
180 u_int64_t KPML4phys; /* phys addr of kernel level 4 */
181
182 static u_int64_t DMPDphys; /* phys addr of direct mapped level 2 */
183 static u_int64_t DMPDPphys; /* phys addr of direct mapped level 3 */
184
185 /*
186 * Data for the pv entry allocation mechanism
187 */
188 static uma_zone_t pvzone;
189 static struct vm_object pvzone_obj;
190 static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
191 int pmap_pagedaemon_waken;
192
193 /*
194 * All those kernel PT submaps that BSD is so fond of
195 */
196 pt_entry_t *CMAP1 = 0;
197 caddr_t CADDR1 = 0;
198 struct msgbuf *msgbufp = 0;
199
200 /*
201 * Crashdump maps.
202 */
203 static caddr_t crashdumpmap;
204
205 static PMAP_INLINE void free_pv_entry(pv_entry_t pv);
206 static pv_entry_t get_pv_entry(void);
207 static void pmap_clear_ptes(vm_page_t m, long bit);
208
209 static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq,
210 vm_offset_t sva, pd_entry_t ptepde);
211 static void pmap_remove_page(struct pmap *pmap, vm_offset_t va);
212 static int pmap_remove_entry(struct pmap *pmap, vm_page_t m,
213 vm_offset_t va, pd_entry_t ptepde);
214 static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m);
215
216 static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags);
217
218 static vm_page_t _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, int flags);
219 static int _pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m);
220 static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t);
221 static vm_offset_t pmap_kmem_choose(vm_offset_t addr);
222
223 CTASSERT(1 << PDESHIFT == sizeof(pd_entry_t));
224 CTASSERT(1 << PTESHIFT == sizeof(pt_entry_t));
225
226 /*
227 * Move the kernel virtual free pointer to the next
228 * 2MB. This is used to help improve performance
229 * by using a large (2MB) page for much of the kernel
230 * (.text, .data, .bss)
231 */
232 static vm_offset_t
233 pmap_kmem_choose(vm_offset_t addr)
234 {
235 vm_offset_t newaddr = addr;
236
237 newaddr = (addr + (NBPDR - 1)) & ~(NBPDR - 1);
238 return newaddr;
239 }
240
241 /********************/
242 /* Inline functions */
243 /********************/
244
245 /* Return a non-clipped PD index for a given VA */
246 static __inline vm_pindex_t
247 pmap_pde_pindex(vm_offset_t va)
248 {
249 return va >> PDRSHIFT;
250 }
251
252
253 /* Return various clipped indexes for a given VA */
254 static __inline vm_pindex_t
255 pmap_pte_index(vm_offset_t va)
256 {
257
258 return ((va >> PAGE_SHIFT) & ((1ul << NPTEPGSHIFT) - 1));
259 }
260
261 static __inline vm_pindex_t
262 pmap_pde_index(vm_offset_t va)
263 {
264
265 return ((va >> PDRSHIFT) & ((1ul << NPDEPGSHIFT) - 1));
266 }
267
268 static __inline vm_pindex_t
269 pmap_pdpe_index(vm_offset_t va)
270 {
271
272 return ((va >> PDPSHIFT) & ((1ul << NPDPEPGSHIFT) - 1));
273 }
274
275 static __inline vm_pindex_t
276 pmap_pml4e_index(vm_offset_t va)
277 {
278
279 return ((va >> PML4SHIFT) & ((1ul << NPML4EPGSHIFT) - 1));
280 }
281
282 /* Return a pointer to the PML4 slot that corresponds to a VA */
283 static __inline pml4_entry_t *
284 pmap_pml4e(pmap_t pmap, vm_offset_t va)
285 {
286
287 if (!pmap)
288 return NULL;
289 return (&pmap->pm_pml4[pmap_pml4e_index(va)]);
290 }
291
292 /* Return a pointer to the PDP slot that corresponds to a VA */
293 static __inline pdp_entry_t *
294 pmap_pdpe(pmap_t pmap, vm_offset_t va)
295 {
296 pml4_entry_t *pml4e;
297 pdp_entry_t *pdpe;
298
299 pml4e = pmap_pml4e(pmap, va);
300 if (pml4e == NULL || (*pml4e & PG_V) == 0)
301 return NULL;
302 pdpe = (pdp_entry_t *)PHYS_TO_DMAP(*pml4e & PG_FRAME);
303 return (&pdpe[pmap_pdpe_index(va)]);
304 }
305
306 /* Return a pointer to the PD slot that corresponds to a VA */
307 static __inline pd_entry_t *
308 pmap_pde(pmap_t pmap, vm_offset_t va)
309 {
310 pdp_entry_t *pdpe;
311 pd_entry_t *pde;
312
313 pdpe = pmap_pdpe(pmap, va);
314 if (pdpe == NULL || (*pdpe & PG_V) == 0)
315 return NULL;
316 pde = (pd_entry_t *)PHYS_TO_DMAP(*pdpe & PG_FRAME);
317 return (&pde[pmap_pde_index(va)]);
318 }
319
320 /* Return a pointer to the PT slot that corresponds to a VA */
321 static __inline pt_entry_t *
322 pmap_pte(pmap_t pmap, vm_offset_t va)
323 {
324 pd_entry_t *pde;
325 pt_entry_t *pte;
326
327 pde = pmap_pde(pmap, va);
328 if (pde == NULL || (*pde & PG_V) == 0)
329 return NULL;
330 if ((*pde & PG_PS) != 0) /* compat with i386 pmap_pte() */
331 return ((pt_entry_t *)pde);
332 pte = (pt_entry_t *)PHYS_TO_DMAP(*pde & PG_FRAME);
333 return (&pte[pmap_pte_index(va)]);
334 }
335
336
337 static __inline pt_entry_t *
338 pmap_pte_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *ptepde)
339 {
340 pd_entry_t *pde;
341 pt_entry_t *pte;
342
343 pde = pmap_pde(pmap, va);
344 if (pde == NULL || (*pde & PG_V) == 0)
345 return NULL;
346 *ptepde = *pde;
347 if ((*pde & PG_PS) != 0) /* compat with i386 pmap_pte() */
348 return ((pt_entry_t *)pde);
349 pte = (pt_entry_t *)PHYS_TO_DMAP(*pde & PG_FRAME);
350 return (&pte[pmap_pte_index(va)]);
351 }
352
353
354 PMAP_INLINE pt_entry_t *
355 vtopte(vm_offset_t va)
356 {
357 u_int64_t mask = ((1ul << (NPTEPGSHIFT + NPDEPGSHIFT + NPDPEPGSHIFT + NPML4EPGSHIFT)) - 1);
358
359 return (PTmap + ((va >> PAGE_SHIFT) & mask));
360 }
361
362 static __inline pd_entry_t *
363 vtopde(vm_offset_t va)
364 {
365 u_int64_t mask = ((1ul << (NPDEPGSHIFT + NPDPEPGSHIFT + NPML4EPGSHIFT)) - 1);
366
367 return (PDmap + ((va >> PDRSHIFT) & mask));
368 }
369
370 static u_int64_t
371 allocpages(int n)
372 {
373 u_int64_t ret;
374
375 ret = avail_start;
376 bzero((void *)ret, n * PAGE_SIZE);
377 avail_start += n * PAGE_SIZE;
378 return (ret);
379 }
380
381 static void
382 create_pagetables(void)
383 {
384 int i;
385
386 /* Allocate pages */
387 KPTphys = allocpages(NKPT);
388 KPML4phys = allocpages(1);
389 KPDPphys = allocpages(NKPML4E);
390 KPDphys = allocpages(NKPDPE);
391
392 ndmpdp = (ptoa(Maxmem) + NBPDP - 1) >> PDPSHIFT;
393 if (ndmpdp < 4) /* Minimum 4GB of dirmap */
394 ndmpdp = 4;
395 DMPDPphys = allocpages(NDMPML4E);
396 DMPDphys = allocpages(ndmpdp);
397 dmaplimit = (vm_paddr_t)ndmpdp << PDPSHIFT;
398
399 /* Fill in the underlying page table pages */
400 /* Read-only from zero to physfree */
401 /* XXX not fully used, underneath 2M pages */
402 for (i = 0; (i << PAGE_SHIFT) < avail_start; i++) {
403 ((pt_entry_t *)KPTphys)[i] = i << PAGE_SHIFT;
404 ((pt_entry_t *)KPTphys)[i] |= PG_RW | PG_V | PG_G;
405 }
406
407 /* Now map the page tables at their location within PTmap */
408 for (i = 0; i < NKPT; i++) {
409 ((pd_entry_t *)KPDphys)[i] = KPTphys + (i << PAGE_SHIFT);
410 ((pd_entry_t *)KPDphys)[i] |= PG_RW | PG_V;
411 }
412
413 /* Map from zero to end of allocations under 2M pages */
414 /* This replaces some of the KPTphys entries above */
415 for (i = 0; (i << PDRSHIFT) < avail_start; i++) {
416 ((pd_entry_t *)KPDphys)[i] = i << PDRSHIFT;
417 ((pd_entry_t *)KPDphys)[i] |= PG_RW | PG_V | PG_PS | PG_G;
418 }
419
420 /* And connect up the PD to the PDP */
421 for (i = 0; i < NKPDPE; i++) {
422 ((pdp_entry_t *)KPDPphys)[i + KPDPI] = KPDphys + (i << PAGE_SHIFT);
423 ((pdp_entry_t *)KPDPphys)[i + KPDPI] |= PG_RW | PG_V | PG_U;
424 }
425
426
427 /* Now set up the direct map space using 2MB pages */
428 for (i = 0; i < NPDEPG * ndmpdp; i++) {
429 ((pd_entry_t *)DMPDphys)[i] = (vm_paddr_t)i << PDRSHIFT;
430 ((pd_entry_t *)DMPDphys)[i] |= PG_RW | PG_V | PG_PS | PG_G;
431 }
432
433 /* And the direct map space's PDP */
434 for (i = 0; i < ndmpdp; i++) {
435 ((pdp_entry_t *)DMPDPphys)[i] = DMPDphys + (i << PAGE_SHIFT);
436 ((pdp_entry_t *)DMPDPphys)[i] |= PG_RW | PG_V | PG_U;
437 }
438
439 /* And recursively map PML4 to itself in order to get PTmap */
440 ((pdp_entry_t *)KPML4phys)[PML4PML4I] = KPML4phys;
441 ((pdp_entry_t *)KPML4phys)[PML4PML4I] |= PG_RW | PG_V | PG_U;
442
443 /* Connect the Direct Map slot up to the PML4 */
444 ((pdp_entry_t *)KPML4phys)[DMPML4I] = DMPDPphys;
445 ((pdp_entry_t *)KPML4phys)[DMPML4I] |= PG_RW | PG_V | PG_U;
446
447 /* Connect the KVA slot up to the PML4 */
448 ((pdp_entry_t *)KPML4phys)[KPML4I] = KPDPphys;
449 ((pdp_entry_t *)KPML4phys)[KPML4I] |= PG_RW | PG_V | PG_U;
450 }
451
452 /*
453 * Bootstrap the system enough to run with virtual memory.
454 *
455 * On amd64 this is called after mapping has already been enabled
456 * and just syncs the pmap module with what has already been done.
457 * [We can't call it easily with mapping off since the kernel is not
458 * mapped with PA == VA, hence we would have to relocate every address
459 * from the linked base (virtual) address "KERNBASE" to the actual
460 * (physical) address starting relative to 0]
461 */
462 void
463 pmap_bootstrap(firstaddr)
464 vm_paddr_t *firstaddr;
465 {
466 vm_offset_t va;
467 pt_entry_t *pte, *unused;
468
469 avail_start = *firstaddr;
470
471 /*
472 * Create an initial set of page tables to run the kernel in.
473 */
474 create_pagetables();
475 *firstaddr = avail_start;
476
477 virtual_avail = (vm_offset_t) KERNBASE + avail_start;
478 virtual_avail = pmap_kmem_choose(virtual_avail);
479
480 virtual_end = VM_MAX_KERNEL_ADDRESS;
481
482
483 /* XXX do %cr0 as well */
484 load_cr4(rcr4() | CR4_PGE | CR4_PSE);
485 load_cr3(KPML4phys);
486
487 /*
488 * Initialize the kernel pmap (which is statically allocated).
489 */
490 PMAP_LOCK_INIT(kernel_pmap);
491 kernel_pmap->pm_pml4 = (pdp_entry_t *) (KERNBASE + KPML4phys);
492 kernel_pmap->pm_active = -1; /* don't allow deactivation */
493 TAILQ_INIT(&kernel_pmap->pm_pvlist);
494 nkpt = NKPT;
495
496 /*
497 * Reserve some special page table entries/VA space for temporary
498 * mapping of pages.
499 */
500 #define SYSMAP(c, p, v, n) \
501 v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n);
502
503 va = virtual_avail;
504 pte = vtopte(va);
505
506 /*
507 * CMAP1 is only used for the memory test.
508 */
509 SYSMAP(caddr_t, CMAP1, CADDR1, 1)
510
511 /*
512 * Crashdump maps.
513 */
514 SYSMAP(caddr_t, unused, crashdumpmap, MAXDUMPPGS)
515
516 /*
517 * msgbufp is used to map the system message buffer.
518 */
519 SYSMAP(struct msgbuf *, unused, msgbufp, atop(round_page(MSGBUF_SIZE)))
520
521 virtual_avail = va;
522
523 *CMAP1 = 0;
524
525 invltlb();
526 }
527
528 /*
529 * Initialize the pmap module.
530 * Called by vm_init, to initialize any structures that the pmap
531 * system needs to map virtual memory.
532 * pmap_init has been enhanced to support in a fairly consistant
533 * way, discontiguous physical memory.
534 */
535 void
536 pmap_init(void)
537 {
538 int i;
539
540 /*
541 * Allocate memory for random pmap data structures. Includes the
542 * pv_head_table.
543 */
544
545 for(i = 0; i < vm_page_array_size; i++) {
546 vm_page_t m;
547
548 m = &vm_page_array[i];
549 TAILQ_INIT(&m->md.pv_list);
550 m->md.pv_list_count = 0;
551 }
552
553 /*
554 * init the pv free list
555 */
556 pvzone = uma_zcreate("PV ENTRY", sizeof (struct pv_entry), NULL, NULL,
557 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
558 uma_prealloc(pvzone, MINPV);
559
560 /*
561 * Now it is safe to enable pv_table recording.
562 */
563 pmap_initialized = TRUE;
564 }
565
566 /*
567 * Initialize the address space (zone) for the pv_entries. Set a
568 * high water mark so that the system can recover from excessive
569 * numbers of pv entries.
570 */
571 void
572 pmap_init2()
573 {
574 int shpgperproc = PMAP_SHPGPERPROC;
575
576 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
577 pv_entry_max = shpgperproc * maxproc + vm_page_array_size;
578 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
579 pv_entry_high_water = 9 * (pv_entry_max / 10);
580 uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max);
581 }
582
583
584 /***************************************************
585 * Low level helper routines.....
586 ***************************************************/
587
588 #if defined(PMAP_DIAGNOSTIC)
589
590 /*
591 * This code checks for non-writeable/modified pages.
592 * This should be an invalid condition.
593 */
594 static int
595 pmap_nw_modified(pt_entry_t ptea)
596 {
597 int pte;
598
599 pte = (int) ptea;
600
601 if ((pte & (PG_M|PG_RW)) == PG_M)
602 return 1;
603 else
604 return 0;
605 }
606 #endif
607
608
609 /*
610 * this routine defines the region(s) of memory that should
611 * not be tested for the modified bit.
612 */
613 static PMAP_INLINE int
614 pmap_track_modified(vm_offset_t va)
615 {
616 if ((va < kmi.clean_sva) || (va >= kmi.clean_eva))
617 return 1;
618 else
619 return 0;
620 }
621
622 #ifdef SMP
623 /*
624 * For SMP, these functions have to use the IPI mechanism for coherence.
625 */
626 void
627 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
628 {
629 u_int cpumask;
630 u_int other_cpus;
631
632 if (smp_started) {
633 if (!(read_rflags() & PSL_I))
634 panic("%s: interrupts disabled", __func__);
635 mtx_lock_spin(&smp_ipi_mtx);
636 } else
637 critical_enter();
638 /*
639 * We need to disable interrupt preemption but MUST NOT have
640 * interrupts disabled here.
641 * XXX we may need to hold schedlock to get a coherent pm_active
642 * XXX critical sections disable interrupts again
643 */
644 if (pmap == kernel_pmap || pmap->pm_active == all_cpus) {
645 invlpg(va);
646 smp_invlpg(va);
647 } else {
648 cpumask = PCPU_GET(cpumask);
649 other_cpus = PCPU_GET(other_cpus);
650 if (pmap->pm_active & cpumask)
651 invlpg(va);
652 if (pmap->pm_active & other_cpus)
653 smp_masked_invlpg(pmap->pm_active & other_cpus, va);
654 }
655 if (smp_started)
656 mtx_unlock_spin(&smp_ipi_mtx);
657 else
658 critical_exit();
659 }
660
661 void
662 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
663 {
664 u_int cpumask;
665 u_int other_cpus;
666 vm_offset_t addr;
667
668 if (smp_started) {
669 if (!(read_rflags() & PSL_I))
670 panic("%s: interrupts disabled", __func__);
671 mtx_lock_spin(&smp_ipi_mtx);
672 } else
673 critical_enter();
674 /*
675 * We need to disable interrupt preemption but MUST NOT have
676 * interrupts disabled here.
677 * XXX we may need to hold schedlock to get a coherent pm_active
678 * XXX critical sections disable interrupts again
679 */
680 if (pmap == kernel_pmap || pmap->pm_active == all_cpus) {
681 for (addr = sva; addr < eva; addr += PAGE_SIZE)
682 invlpg(addr);
683 smp_invlpg_range(sva, eva);
684 } else {
685 cpumask = PCPU_GET(cpumask);
686 other_cpus = PCPU_GET(other_cpus);
687 if (pmap->pm_active & cpumask)
688 for (addr = sva; addr < eva; addr += PAGE_SIZE)
689 invlpg(addr);
690 if (pmap->pm_active & other_cpus)
691 smp_masked_invlpg_range(pmap->pm_active & other_cpus,
692 sva, eva);
693 }
694 if (smp_started)
695 mtx_unlock_spin(&smp_ipi_mtx);
696 else
697 critical_exit();
698 }
699
700 void
701 pmap_invalidate_all(pmap_t pmap)
702 {
703 u_int cpumask;
704 u_int other_cpus;
705
706 if (smp_started) {
707 if (!(read_rflags() & PSL_I))
708 panic("%s: interrupts disabled", __func__);
709 mtx_lock_spin(&smp_ipi_mtx);
710 } else
711 critical_enter();
712 /*
713 * We need to disable interrupt preemption but MUST NOT have
714 * interrupts disabled here.
715 * XXX we may need to hold schedlock to get a coherent pm_active
716 * XXX critical sections disable interrupts again
717 */
718 if (pmap == kernel_pmap || pmap->pm_active == all_cpus) {
719 invltlb();
720 smp_invltlb();
721 } else {
722 cpumask = PCPU_GET(cpumask);
723 other_cpus = PCPU_GET(other_cpus);
724 if (pmap->pm_active & cpumask)
725 invltlb();
726 if (pmap->pm_active & other_cpus)
727 smp_masked_invltlb(pmap->pm_active & other_cpus);
728 }
729 if (smp_started)
730 mtx_unlock_spin(&smp_ipi_mtx);
731 else
732 critical_exit();
733 }
734 #else /* !SMP */
735 /*
736 * Normal, non-SMP, invalidation functions.
737 * We inline these within pmap.c for speed.
738 */
739 PMAP_INLINE void
740 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
741 {
742
743 if (pmap == kernel_pmap || pmap->pm_active)
744 invlpg(va);
745 }
746
747 PMAP_INLINE void
748 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
749 {
750 vm_offset_t addr;
751
752 if (pmap == kernel_pmap || pmap->pm_active)
753 for (addr = sva; addr < eva; addr += PAGE_SIZE)
754 invlpg(addr);
755 }
756
757 PMAP_INLINE void
758 pmap_invalidate_all(pmap_t pmap)
759 {
760
761 if (pmap == kernel_pmap || pmap->pm_active)
762 invltlb();
763 }
764 #endif /* !SMP */
765
766 /*
767 * Are we current address space or kernel?
768 */
769 static __inline int
770 pmap_is_current(pmap_t pmap)
771 {
772 return (pmap == kernel_pmap ||
773 (pmap->pm_pml4[PML4PML4I] & PG_FRAME) == (PML4pml4e[0] & PG_FRAME));
774 }
775
776 /*
777 * Routine: pmap_extract
778 * Function:
779 * Extract the physical page address associated
780 * with the given map/virtual_address pair.
781 */
782 vm_paddr_t
783 pmap_extract(pmap_t pmap, vm_offset_t va)
784 {
785 vm_paddr_t rtval;
786 pt_entry_t *pte;
787 pd_entry_t pde, *pdep;
788
789 rtval = 0;
790 PMAP_LOCK(pmap);
791 pdep = pmap_pde(pmap, va);
792 if (pdep != NULL) {
793 pde = *pdep;
794 if (pde) {
795 if ((pde & PG_PS) != 0) {
796 rtval = (pde & ~PDRMASK) | (va & PDRMASK);
797 PMAP_UNLOCK(pmap);
798 return rtval;
799 }
800 pte = pmap_pte(pmap, va);
801 rtval = (*pte & PG_FRAME) | (va & PAGE_MASK);
802 }
803 }
804 PMAP_UNLOCK(pmap);
805 return (rtval);
806 }
807
808 /*
809 * Routine: pmap_extract_and_hold
810 * Function:
811 * Atomically extract and hold the physical page
812 * with the given pmap and virtual address pair
813 * if that mapping permits the given protection.
814 */
815 vm_page_t
816 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
817 {
818 pd_entry_t pde, *pdep;
819 pt_entry_t pte;
820 vm_page_t m;
821
822 m = NULL;
823 vm_page_lock_queues();
824 PMAP_LOCK(pmap);
825 pdep = pmap_pde(pmap, va);
826 if (pdep != NULL && (pde = *pdep)) {
827 if (pde & PG_PS) {
828 if ((pde & PG_RW) || (prot & VM_PROT_WRITE) == 0) {
829 m = PHYS_TO_VM_PAGE((pde & ~PDRMASK) |
830 (va & PDRMASK));
831 vm_page_hold(m);
832 }
833 } else {
834 pte = *pmap_pte(pmap, va);
835 if ((pte & PG_V) &&
836 ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0)) {
837 m = PHYS_TO_VM_PAGE(pte & PG_FRAME);
838 vm_page_hold(m);
839 }
840 }
841 }
842 vm_page_unlock_queues();
843 PMAP_UNLOCK(pmap);
844 return (m);
845 }
846
847 vm_paddr_t
848 pmap_kextract(vm_offset_t va)
849 {
850 pd_entry_t *pde;
851 vm_paddr_t pa;
852
853 if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) {
854 pa = DMAP_TO_PHYS(va);
855 } else {
856 pde = pmap_pde(kernel_pmap, va);
857 if (*pde & PG_PS) {
858 pa = (*pde & ~(NBPDR - 1)) | (va & (NBPDR - 1));
859 } else {
860 pa = *vtopte(va);
861 pa = (pa & PG_FRAME) | (va & PAGE_MASK);
862 }
863 }
864 return pa;
865 }
866
867 /***************************************************
868 * Low level mapping routines.....
869 ***************************************************/
870
871 /*
872 * Add a wired page to the kva.
873 * Note: not SMP coherent.
874 */
875 PMAP_INLINE void
876 pmap_kenter(vm_offset_t va, vm_paddr_t pa)
877 {
878 pt_entry_t *pte;
879
880 pte = vtopte(va);
881 pte_store(pte, pa | PG_RW | PG_V | PG_G);
882 }
883
884 /*
885 * Remove a page from the kernel pagetables.
886 * Note: not SMP coherent.
887 */
888 PMAP_INLINE void
889 pmap_kremove(vm_offset_t va)
890 {
891 pt_entry_t *pte;
892
893 pte = vtopte(va);
894 pte_clear(pte);
895 }
896
897 /*
898 * Used to map a range of physical addresses into kernel
899 * virtual address space.
900 *
901 * The value passed in '*virt' is a suggested virtual address for
902 * the mapping. Architectures which can support a direct-mapped
903 * physical to virtual region can return the appropriate address
904 * within that region, leaving '*virt' unchanged. Other
905 * architectures should map the pages starting at '*virt' and
906 * update '*virt' with the first usable address after the mapped
907 * region.
908 */
909 vm_offset_t
910 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
911 {
912 return PHYS_TO_DMAP(start);
913 }
914
915
916 /*
917 * Add a list of wired pages to the kva
918 * this routine is only used for temporary
919 * kernel mappings that do not need to have
920 * page modification or references recorded.
921 * Note that old mappings are simply written
922 * over. The page *must* be wired.
923 * Note: SMP coherent. Uses a ranged shootdown IPI.
924 */
925 void
926 pmap_qenter(vm_offset_t sva, vm_page_t *m, int count)
927 {
928 vm_offset_t va;
929
930 va = sva;
931 while (count-- > 0) {
932 pmap_kenter(va, VM_PAGE_TO_PHYS(*m));
933 va += PAGE_SIZE;
934 m++;
935 }
936 pmap_invalidate_range(kernel_pmap, sva, va);
937 }
938
939 /*
940 * This routine tears out page mappings from the
941 * kernel -- it is meant only for temporary mappings.
942 * Note: SMP coherent. Uses a ranged shootdown IPI.
943 */
944 void
945 pmap_qremove(vm_offset_t sva, int count)
946 {
947 vm_offset_t va;
948
949 va = sva;
950 while (count-- > 0) {
951 pmap_kremove(va);
952 va += PAGE_SIZE;
953 }
954 pmap_invalidate_range(kernel_pmap, sva, va);
955 }
956
957 /***************************************************
958 * Page table page management routines.....
959 ***************************************************/
960
961 /*
962 * This routine unholds page table pages, and if the hold count
963 * drops to zero, then it decrements the wire count.
964 */
965 static PMAP_INLINE int
966 pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m)
967 {
968
969 --m->wire_count;
970 if (m->wire_count == 0)
971 return _pmap_unwire_pte_hold(pmap, va, m);
972 else
973 return 0;
974 }
975
976 static int
977 _pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m)
978 {
979 vm_offset_t pteva;
980
981 /*
982 * unmap the page table page
983 */
984 if (m->pindex >= (NUPDE + NUPDPE)) {
985 /* PDP page */
986 pml4_entry_t *pml4;
987 pml4 = pmap_pml4e(pmap, va);
988 pteva = (vm_offset_t) PDPmap + amd64_ptob(m->pindex - (NUPDE + NUPDPE));
989 *pml4 = 0;
990 } else if (m->pindex >= NUPDE) {
991 /* PD page */
992 pdp_entry_t *pdp;
993 pdp = pmap_pdpe(pmap, va);
994 pteva = (vm_offset_t) PDmap + amd64_ptob(m->pindex - NUPDE);
995 *pdp = 0;
996 } else {
997 /* PTE page */
998 pd_entry_t *pd;
999 pd = pmap_pde(pmap, va);
1000 pteva = (vm_offset_t) PTmap + amd64_ptob(m->pindex);
1001 *pd = 0;
1002 }
1003 --pmap->pm_stats.resident_count;
1004 if (m->pindex < NUPDE) {
1005 /* We just released a PT, unhold the matching PD */
1006 vm_page_t pdpg;
1007
1008 pdpg = PHYS_TO_VM_PAGE(*pmap_pdpe(pmap, va) & PG_FRAME);
1009 pmap_unwire_pte_hold(pmap, va, pdpg);
1010 }
1011 if (m->pindex >= NUPDE && m->pindex < (NUPDE + NUPDPE)) {
1012 /* We just released a PD, unhold the matching PDP */
1013 vm_page_t pdppg;
1014
1015 pdppg = PHYS_TO_VM_PAGE(*pmap_pml4e(pmap, va) & PG_FRAME);
1016 pmap_unwire_pte_hold(pmap, va, pdppg);
1017 }
1018
1019 /*
1020 * Do an invltlb to make the invalidated mapping
1021 * take effect immediately.
1022 */
1023 pmap_invalidate_page(pmap, pteva);
1024
1025 vm_page_free_zero(m);
1026 atomic_subtract_int(&cnt.v_wire_count, 1);
1027 return 1;
1028 }
1029
1030 /*
1031 * After removing a page table entry, this routine is used to
1032 * conditionally free the page, and manage the hold/wire counts.
1033 */
1034 static int
1035 pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t ptepde)
1036 {
1037 vm_page_t mpte;
1038
1039 if (va >= VM_MAXUSER_ADDRESS)
1040 return 0;
1041 KASSERT(ptepde != 0, ("pmap_unuse_pt: ptepde != 0"));
1042 mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME);
1043 return pmap_unwire_pte_hold(pmap, va, mpte);
1044 }
1045
1046 void
1047 pmap_pinit0(pmap)
1048 struct pmap *pmap;
1049 {
1050
1051 PMAP_LOCK_INIT(pmap);
1052 pmap->pm_pml4 = (pml4_entry_t *)(KERNBASE + KPML4phys);
1053 pmap->pm_active = 0;
1054 TAILQ_INIT(&pmap->pm_pvlist);
1055 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1056 }
1057
1058 /*
1059 * Initialize a preallocated and zeroed pmap structure,
1060 * such as one in a vmspace structure.
1061 */
1062 void
1063 pmap_pinit(pmap)
1064 register struct pmap *pmap;
1065 {
1066 vm_page_t pml4pg;
1067 static vm_pindex_t color;
1068
1069 PMAP_LOCK_INIT(pmap);
1070
1071 /*
1072 * allocate the page directory page
1073 */
1074 while ((pml4pg = vm_page_alloc(NULL, color++, VM_ALLOC_NOOBJ |
1075 VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL)
1076 VM_WAIT;
1077
1078 pmap->pm_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pml4pg));
1079
1080 if ((pml4pg->flags & PG_ZERO) == 0)
1081 pagezero(pmap->pm_pml4);
1082
1083 /* Wire in kernel global address entries. */
1084 pmap->pm_pml4[KPML4I] = KPDPphys | PG_RW | PG_V | PG_U;
1085 pmap->pm_pml4[DMPML4I] = DMPDPphys | PG_RW | PG_V | PG_U;
1086
1087 /* install self-referential address mapping entry(s) */
1088 pmap->pm_pml4[PML4PML4I] = VM_PAGE_TO_PHYS(pml4pg) | PG_V | PG_RW | PG_A | PG_M;
1089
1090 pmap->pm_active = 0;
1091 TAILQ_INIT(&pmap->pm_pvlist);
1092 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1093 }
1094
1095 /*
1096 * this routine is called if the page table page is not
1097 * mapped correctly.
1098 *
1099 * Note: If a page allocation fails at page table level two or three,
1100 * one or two pages may be held during the wait, only to be released
1101 * afterwards. This conservative approach is easily argued to avoid
1102 * race conditions.
1103 */
1104 static vm_page_t
1105 _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, int flags)
1106 {
1107 vm_page_t m, pdppg, pdpg;
1108
1109 KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
1110 (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
1111 ("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
1112
1113 /*
1114 * Allocate a page table page.
1115 */
1116 if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ |
1117 VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
1118 if (flags & M_WAITOK) {
1119 PMAP_UNLOCK(pmap);
1120 vm_page_unlock_queues();
1121 VM_WAIT;
1122 vm_page_lock_queues();
1123 PMAP_LOCK(pmap);
1124 }
1125
1126 /*
1127 * Indicate the need to retry. While waiting, the page table
1128 * page may have been allocated.
1129 */
1130 return (NULL);
1131 }
1132 if ((m->flags & PG_ZERO) == 0)
1133 pmap_zero_page(m);
1134
1135 /*
1136 * Map the pagetable page into the process address space, if
1137 * it isn't already there.
1138 */
1139
1140 pmap->pm_stats.resident_count++;
1141
1142 if (ptepindex >= (NUPDE + NUPDPE)) {
1143 pml4_entry_t *pml4;
1144 vm_pindex_t pml4index;
1145
1146 /* Wire up a new PDPE page */
1147 pml4index = ptepindex - (NUPDE + NUPDPE);
1148 pml4 = &pmap->pm_pml4[pml4index];
1149 *pml4 = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M;
1150
1151 } else if (ptepindex >= NUPDE) {
1152 vm_pindex_t pml4index;
1153 vm_pindex_t pdpindex;
1154 pml4_entry_t *pml4;
1155 pdp_entry_t *pdp;
1156
1157 /* Wire up a new PDE page */
1158 pdpindex = ptepindex - NUPDE;
1159 pml4index = pdpindex >> NPML4EPGSHIFT;
1160
1161 pml4 = &pmap->pm_pml4[pml4index];
1162 if ((*pml4 & PG_V) == 0) {
1163 /* Have to allocate a new pdp, recurse */
1164 if (_pmap_allocpte(pmap, NUPDE + NUPDPE + pml4index,
1165 flags) == NULL) {
1166 --m->wire_count;
1167 vm_page_free(m);
1168 return (NULL);
1169 }
1170 } else {
1171 /* Add reference to pdp page */
1172 pdppg = PHYS_TO_VM_PAGE(*pml4 & PG_FRAME);
1173 pdppg->wire_count++;
1174 }
1175 pdp = (pdp_entry_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME);
1176
1177 /* Now find the pdp page */
1178 pdp = &pdp[pdpindex & ((1ul << NPDPEPGSHIFT) - 1)];
1179 *pdp = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M;
1180
1181 } else {
1182 vm_pindex_t pml4index;
1183 vm_pindex_t pdpindex;
1184 pml4_entry_t *pml4;
1185 pdp_entry_t *pdp;
1186 pd_entry_t *pd;
1187
1188 /* Wire up a new PTE page */
1189 pdpindex = ptepindex >> NPDPEPGSHIFT;
1190 pml4index = pdpindex >> NPML4EPGSHIFT;
1191
1192 /* First, find the pdp and check that its valid. */
1193 pml4 = &pmap->pm_pml4[pml4index];
1194 if ((*pml4 & PG_V) == 0) {
1195 /* Have to allocate a new pd, recurse */
1196 if (_pmap_allocpte(pmap, NUPDE + pdpindex,
1197 flags) == NULL) {
1198 --m->wire_count;
1199 vm_page_free(m);
1200 return (NULL);
1201 }
1202 pdp = (pdp_entry_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME);
1203 pdp = &pdp[pdpindex & ((1ul << NPDPEPGSHIFT) - 1)];
1204 } else {
1205 pdp = (pdp_entry_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME);
1206 pdp = &pdp[pdpindex & ((1ul << NPDPEPGSHIFT) - 1)];
1207 if ((*pdp & PG_V) == 0) {
1208 /* Have to allocate a new pd, recurse */
1209 if (_pmap_allocpte(pmap, NUPDE + pdpindex,
1210 flags) == NULL) {
1211 --m->wire_count;
1212 vm_page_free(m);
1213 return (NULL);
1214 }
1215 } else {
1216 /* Add reference to the pd page */
1217 pdpg = PHYS_TO_VM_PAGE(*pdp & PG_FRAME);
1218 pdpg->wire_count++;
1219 }
1220 }
1221 pd = (pd_entry_t *)PHYS_TO_DMAP(*pdp & PG_FRAME);
1222
1223 /* Now we know where the page directory page is */
1224 pd = &pd[ptepindex & ((1ul << NPDEPGSHIFT) - 1)];
1225 *pd = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M;
1226 }
1227
1228 return m;
1229 }
1230
1231 static vm_page_t
1232 pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags)
1233 {
1234 vm_pindex_t ptepindex;
1235 pd_entry_t *pd;
1236 vm_page_t m;
1237
1238 KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
1239 (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
1240 ("pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
1241
1242 /*
1243 * Calculate pagetable page index
1244 */
1245 ptepindex = pmap_pde_pindex(va);
1246 retry:
1247 /*
1248 * Get the page directory entry
1249 */
1250 pd = pmap_pde(pmap, va);
1251
1252 /*
1253 * This supports switching from a 2MB page to a
1254 * normal 4K page.
1255 */
1256 if (pd != 0 && (*pd & (PG_PS | PG_V)) == (PG_PS | PG_V)) {
1257 *pd = 0;
1258 pd = 0;
1259 pmap_invalidate_all(kernel_pmap);
1260 }
1261
1262 /*
1263 * If the page table page is mapped, we just increment the
1264 * hold count, and activate it.
1265 */
1266 if (pd != 0 && (*pd & PG_V) != 0) {
1267 m = PHYS_TO_VM_PAGE(*pd & PG_FRAME);
1268 m->wire_count++;
1269 } else {
1270 /*
1271 * Here if the pte page isn't mapped, or if it has been
1272 * deallocated.
1273 */
1274 m = _pmap_allocpte(pmap, ptepindex, flags);
1275 if (m == NULL && (flags & M_WAITOK))
1276 goto retry;
1277 }
1278 return (m);
1279 }
1280
1281
1282 /***************************************************
1283 * Pmap allocation/deallocation routines.
1284 ***************************************************/
1285
1286 /*
1287 * Release any resources held by the given physical map.
1288 * Called when a pmap initialized by pmap_pinit is being released.
1289 * Should only be called if the map contains no valid mappings.
1290 */
1291 void
1292 pmap_release(pmap_t pmap)
1293 {
1294 vm_page_t m;
1295
1296 KASSERT(pmap->pm_stats.resident_count == 0,
1297 ("pmap_release: pmap resident count %ld != 0",
1298 pmap->pm_stats.resident_count));
1299
1300 m = PHYS_TO_VM_PAGE(pmap->pm_pml4[PML4PML4I] & PG_FRAME);
1301
1302 pmap->pm_pml4[KPML4I] = 0; /* KVA */
1303 pmap->pm_pml4[DMPML4I] = 0; /* Direct Map */
1304 pmap->pm_pml4[PML4PML4I] = 0; /* Recursive Mapping */
1305
1306 vm_page_lock_queues();
1307 m->wire_count--;
1308 atomic_subtract_int(&cnt.v_wire_count, 1);
1309 vm_page_free_zero(m);
1310 vm_page_unlock_queues();
1311 PMAP_LOCK_DESTROY(pmap);
1312 }
1313
1314 static int
1315 kvm_size(SYSCTL_HANDLER_ARGS)
1316 {
1317 unsigned long ksize = VM_MAX_KERNEL_ADDRESS - KERNBASE;
1318
1319 return sysctl_handle_long(oidp, &ksize, 0, req);
1320 }
1321 SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG|CTLFLAG_RD,
1322 0, 0, kvm_size, "IU", "Size of KVM");
1323
1324 static int
1325 kvm_free(SYSCTL_HANDLER_ARGS)
1326 {
1327 unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end;
1328
1329 return sysctl_handle_long(oidp, &kfree, 0, req);
1330 }
1331 SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD,
1332 0, 0, kvm_free, "IU", "Amount of KVM free");
1333
1334 /*
1335 * grow the number of kernel page table entries, if needed
1336 */
1337 void
1338 pmap_growkernel(vm_offset_t addr)
1339 {
1340 vm_paddr_t paddr;
1341 vm_page_t nkpg;
1342 pd_entry_t *pde, newpdir;
1343 pdp_entry_t newpdp;
1344
1345 mtx_assert(&kernel_map->system_mtx, MA_OWNED);
1346 if (kernel_vm_end == 0) {
1347 kernel_vm_end = KERNBASE;
1348 nkpt = 0;
1349 while ((*pmap_pde(kernel_pmap, kernel_vm_end) & PG_V) != 0) {
1350 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
1351 nkpt++;
1352 }
1353 }
1354 addr = roundup2(addr, PAGE_SIZE * NPTEPG);
1355 while (kernel_vm_end < addr) {
1356 pde = pmap_pde(kernel_pmap, kernel_vm_end);
1357 if (pde == NULL) {
1358 /* We need a new PDP entry */
1359 nkpg = vm_page_alloc(NULL, nkpt,
1360 VM_ALLOC_NOOBJ | VM_ALLOC_SYSTEM | VM_ALLOC_WIRED);
1361 if (!nkpg)
1362 panic("pmap_growkernel: no memory to grow kernel");
1363 pmap_zero_page(nkpg);
1364 paddr = VM_PAGE_TO_PHYS(nkpg);
1365 newpdp = (pdp_entry_t)
1366 (paddr | PG_V | PG_RW | PG_A | PG_M);
1367 *pmap_pdpe(kernel_pmap, kernel_vm_end) = newpdp;
1368 continue; /* try again */
1369 }
1370 if ((*pde & PG_V) != 0) {
1371 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
1372 continue;
1373 }
1374
1375 /*
1376 * This index is bogus, but out of the way
1377 */
1378 nkpg = vm_page_alloc(NULL, nkpt,
1379 VM_ALLOC_NOOBJ | VM_ALLOC_SYSTEM | VM_ALLOC_WIRED);
1380 if (!nkpg)
1381 panic("pmap_growkernel: no memory to grow kernel");
1382
1383 nkpt++;
1384
1385 pmap_zero_page(nkpg);
1386 paddr = VM_PAGE_TO_PHYS(nkpg);
1387 newpdir = (pd_entry_t) (paddr | PG_V | PG_RW | PG_A | PG_M);
1388 *pmap_pde(kernel_pmap, kernel_vm_end) = newpdir;
1389
1390 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
1391 }
1392 }
1393
1394
1395 /***************************************************
1396 * page management routines.
1397 ***************************************************/
1398
1399 /*
1400 * free the pv_entry back to the free list
1401 */
1402 static PMAP_INLINE void
1403 free_pv_entry(pv_entry_t pv)
1404 {
1405 pv_entry_count--;
1406 uma_zfree(pvzone, pv);
1407 }
1408
1409 /*
1410 * get a new pv_entry, allocating a block from the system
1411 * when needed.
1412 * the memory allocation is performed bypassing the malloc code
1413 * because of the possibility of allocations at interrupt time.
1414 */
1415 static pv_entry_t
1416 get_pv_entry(void)
1417 {
1418 pv_entry_count++;
1419 if (pv_entry_high_water &&
1420 (pv_entry_count > pv_entry_high_water) &&
1421 (pmap_pagedaemon_waken == 0)) {
1422 pmap_pagedaemon_waken = 1;
1423 wakeup (&vm_pages_needed);
1424 }
1425 return uma_zalloc(pvzone, M_NOWAIT);
1426 }
1427
1428
1429 static int
1430 pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va, pd_entry_t ptepde)
1431 {
1432 pv_entry_t pv;
1433 int rtval;
1434
1435 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1436 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1437 if (m->md.pv_list_count < pmap->pm_stats.resident_count) {
1438 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
1439 if (pmap == pv->pv_pmap && va == pv->pv_va)
1440 break;
1441 }
1442 } else {
1443 TAILQ_FOREACH(pv, &pmap->pm_pvlist, pv_plist) {
1444 if (va == pv->pv_va)
1445 break;
1446 }
1447 }
1448
1449 rtval = 0;
1450 if (pv) {
1451 rtval = pmap_unuse_pt(pmap, va, ptepde);
1452 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
1453 m->md.pv_list_count--;
1454 if (TAILQ_FIRST(&m->md.pv_list) == NULL)
1455 vm_page_flag_clear(m, PG_WRITEABLE);
1456
1457 TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
1458 free_pv_entry(pv);
1459 }
1460
1461 return rtval;
1462 }
1463
1464 /*
1465 * Create a pv entry for page at pa for
1466 * (pmap, va).
1467 */
1468 static void
1469 pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
1470 {
1471 pv_entry_t pv;
1472
1473 pv = get_pv_entry();
1474 if (pv == NULL)
1475 panic("no pv entries: increase vm.pmap.shpgperproc");
1476 pv->pv_va = va;
1477 pv->pv_pmap = pmap;
1478
1479 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1480 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1481 TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist);
1482 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
1483 m->md.pv_list_count++;
1484 }
1485
1486 /*
1487 * pmap_remove_pte: do the things to unmap a page in a process
1488 */
1489 static int
1490 pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va, pd_entry_t ptepde)
1491 {
1492 pt_entry_t oldpte;
1493 vm_page_t m;
1494
1495 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1496 oldpte = pte_load_clear(ptq);
1497 if (oldpte & PG_W)
1498 pmap->pm_stats.wired_count -= 1;
1499 /*
1500 * Machines that don't support invlpg, also don't support
1501 * PG_G.
1502 */
1503 if (oldpte & PG_G)
1504 pmap_invalidate_page(kernel_pmap, va);
1505 pmap->pm_stats.resident_count -= 1;
1506 if (oldpte & PG_MANAGED) {
1507 m = PHYS_TO_VM_PAGE(oldpte & PG_FRAME);
1508 if (oldpte & PG_M) {
1509 #if defined(PMAP_DIAGNOSTIC)
1510 if (pmap_nw_modified((pt_entry_t) oldpte)) {
1511 printf(
1512 "pmap_remove: modified page not writable: va: 0x%lx, pte: 0x%lx\n",
1513 va, oldpte);
1514 }
1515 #endif
1516 if (pmap_track_modified(va))
1517 vm_page_dirty(m);
1518 }
1519 if (oldpte & PG_A)
1520 vm_page_flag_set(m, PG_REFERENCED);
1521 return pmap_remove_entry(pmap, m, va, ptepde);
1522 } else {
1523 return pmap_unuse_pt(pmap, va, ptepde);
1524 }
1525 }
1526
1527 /*
1528 * Remove a single page from a process address space
1529 */
1530 static void
1531 pmap_remove_page(pmap_t pmap, vm_offset_t va)
1532 {
1533 pd_entry_t ptepde;
1534 pt_entry_t *pte;
1535
1536 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1537 pte = pmap_pte_pde(pmap, va, &ptepde);
1538 if (pte == NULL || (*pte & PG_V) == 0)
1539 return;
1540 pmap_remove_pte(pmap, pte, va, ptepde);
1541 pmap_invalidate_page(pmap, va);
1542 }
1543
1544 /*
1545 * Remove the given range of addresses from the specified map.
1546 *
1547 * It is assumed that the start and end are properly
1548 * rounded to the page size.
1549 */
1550 void
1551 pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
1552 {
1553 vm_offset_t va_next;
1554 pml4_entry_t *pml4e;
1555 pdp_entry_t *pdpe;
1556 pd_entry_t ptpaddr, *pde;
1557 pt_entry_t *pte;
1558 int anyvalid;
1559
1560 /*
1561 * Perform an unsynchronized read. This is, however, safe.
1562 */
1563 if (pmap->pm_stats.resident_count == 0)
1564 return;
1565
1566 anyvalid = 0;
1567
1568 vm_page_lock_queues();
1569 PMAP_LOCK(pmap);
1570
1571 /*
1572 * special handling of removing one page. a very
1573 * common operation and easy to short circuit some
1574 * code.
1575 */
1576 if (sva + PAGE_SIZE == eva) {
1577 pde = pmap_pde(pmap, sva);
1578 if (pde && (*pde & PG_PS) == 0) {
1579 pmap_remove_page(pmap, sva);
1580 goto out;
1581 }
1582 }
1583
1584 for (; sva < eva; sva = va_next) {
1585
1586 if (pmap->pm_stats.resident_count == 0)
1587 break;
1588
1589 pml4e = pmap_pml4e(pmap, sva);
1590 if (pml4e == 0) {
1591 va_next = (sva + NBPML4) & ~PML4MASK;
1592 continue;
1593 }
1594
1595 pdpe = pmap_pdpe(pmap, sva);
1596 if (pdpe == 0) {
1597 va_next = (sva + NBPDP) & ~PDPMASK;
1598 continue;
1599 }
1600
1601 /*
1602 * Calculate index for next page table.
1603 */
1604 va_next = (sva + NBPDR) & ~PDRMASK;
1605
1606 pde = pmap_pde(pmap, sva);
1607 if (pde == 0)
1608 continue;
1609 ptpaddr = *pde;
1610
1611 /*
1612 * Weed out invalid mappings. Note: we assume that the page
1613 * directory table is always allocated, and in kernel virtual.
1614 */
1615 if (ptpaddr == 0)
1616 continue;
1617
1618 /*
1619 * Check for large page.
1620 */
1621 if ((ptpaddr & PG_PS) != 0) {
1622 *pde = 0;
1623 pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
1624 anyvalid = 1;
1625 continue;
1626 }
1627
1628 /*
1629 * Limit our scan to either the end of the va represented
1630 * by the current page table page, or to the end of the
1631 * range being removed.
1632 */
1633 if (va_next > eva)
1634 va_next = eva;
1635
1636 for (; sva != va_next; sva += PAGE_SIZE) {
1637 pte = pmap_pte(pmap, sva);
1638 if (pte == NULL || *pte == 0)
1639 continue;
1640 anyvalid = 1;
1641 if (pmap_remove_pte(pmap, pte, sva, ptpaddr))
1642 break;
1643 }
1644 }
1645 out:
1646 vm_page_unlock_queues();
1647 if (anyvalid)
1648 pmap_invalidate_all(pmap);
1649 PMAP_UNLOCK(pmap);
1650 }
1651
1652 /*
1653 * Routine: pmap_remove_all
1654 * Function:
1655 * Removes this physical page from
1656 * all physical maps in which it resides.
1657 * Reflects back modify bits to the pager.
1658 *
1659 * Notes:
1660 * Original versions of this routine were very
1661 * inefficient because they iteratively called
1662 * pmap_remove (slow...)
1663 */
1664
1665 void
1666 pmap_remove_all(vm_page_t m)
1667 {
1668 register pv_entry_t pv;
1669 pt_entry_t *pte, tpte;
1670 pd_entry_t ptepde;
1671
1672 #if defined(PMAP_DIAGNOSTIC)
1673 /*
1674 * XXX This makes pmap_remove_all() illegal for non-managed pages!
1675 */
1676 if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) {
1677 panic("pmap_remove_all: illegal for unmanaged page, va: 0x%lx",
1678 VM_PAGE_TO_PHYS(m));
1679 }
1680 #endif
1681 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1682 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
1683 PMAP_LOCK(pv->pv_pmap);
1684 pv->pv_pmap->pm_stats.resident_count--;
1685 pte = pmap_pte_pde(pv->pv_pmap, pv->pv_va, &ptepde);
1686 tpte = pte_load_clear(pte);
1687 if (tpte & PG_W)
1688 pv->pv_pmap->pm_stats.wired_count--;
1689 if (tpte & PG_A)
1690 vm_page_flag_set(m, PG_REFERENCED);
1691
1692 /*
1693 * Update the vm_page_t clean and reference bits.
1694 */
1695 if (tpte & PG_M) {
1696 #if defined(PMAP_DIAGNOSTIC)
1697 if (pmap_nw_modified((pt_entry_t) tpte)) {
1698 printf(
1699 "pmap_remove_all: modified page not writable: va: 0x%lx, pte: 0x%lx\n",
1700 pv->pv_va, tpte);
1701 }
1702 #endif
1703 if (pmap_track_modified(pv->pv_va))
1704 vm_page_dirty(m);
1705 }
1706 pmap_invalidate_page(pv->pv_pmap, pv->pv_va);
1707 TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist);
1708 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
1709 m->md.pv_list_count--;
1710 pmap_unuse_pt(pv->pv_pmap, pv->pv_va, ptepde);
1711 PMAP_UNLOCK(pv->pv_pmap);
1712 free_pv_entry(pv);
1713 }
1714 vm_page_flag_clear(m, PG_WRITEABLE);
1715 }
1716
1717 /*
1718 * Set the physical protection on the
1719 * specified range of this map as requested.
1720 */
1721 void
1722 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
1723 {
1724 vm_offset_t va_next;
1725 pml4_entry_t *pml4e;
1726 pdp_entry_t *pdpe;
1727 pd_entry_t ptpaddr, *pde;
1728 int anychanged;
1729
1730 if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1731 pmap_remove(pmap, sva, eva);
1732 return;
1733 }
1734
1735 if (prot & VM_PROT_WRITE)
1736 return;
1737
1738 anychanged = 0;
1739
1740 vm_page_lock_queues();
1741 PMAP_LOCK(pmap);
1742 for (; sva < eva; sva = va_next) {
1743
1744 pml4e = pmap_pml4e(pmap, sva);
1745 if (pml4e == 0) {
1746 va_next = (sva + NBPML4) & ~PML4MASK;
1747 continue;
1748 }
1749
1750 pdpe = pmap_pdpe(pmap, sva);
1751 if (pdpe == 0) {
1752 va_next = (sva + NBPDP) & ~PDPMASK;
1753 continue;
1754 }
1755
1756 va_next = (sva + NBPDR) & ~PDRMASK;
1757
1758 pde = pmap_pde(pmap, sva);
1759 if (pde == NULL)
1760 continue;
1761 ptpaddr = *pde;
1762
1763 /*
1764 * Weed out invalid mappings. Note: we assume that the page
1765 * directory table is always allocated, and in kernel virtual.
1766 */
1767 if (ptpaddr == 0)
1768 continue;
1769
1770 /*
1771 * Check for large page.
1772 */
1773 if ((ptpaddr & PG_PS) != 0) {
1774 *pde &= ~(PG_M|PG_RW);
1775 pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
1776 anychanged = 1;
1777 continue;
1778 }
1779
1780 if (va_next > eva)
1781 va_next = eva;
1782
1783 for (; sva != va_next; sva += PAGE_SIZE) {
1784 pt_entry_t obits, pbits;
1785 pt_entry_t *pte;
1786 vm_page_t m;
1787
1788 pte = pmap_pte(pmap, sva);
1789 if (pte == NULL)
1790 continue;
1791 retry:
1792 obits = pbits = *pte;
1793 if (pbits & PG_MANAGED) {
1794 m = NULL;
1795 if (pbits & PG_A) {
1796 m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
1797 vm_page_flag_set(m, PG_REFERENCED);
1798 pbits &= ~PG_A;
1799 }
1800 if ((pbits & PG_M) != 0 &&
1801 pmap_track_modified(sva)) {
1802 if (m == NULL)
1803 m = PHYS_TO_VM_PAGE(pbits &
1804 PG_FRAME);
1805 vm_page_dirty(m);
1806 }
1807 }
1808
1809 pbits &= ~(PG_RW | PG_M);
1810
1811 if (pbits != obits) {
1812 if (!atomic_cmpset_long(pte, obits, pbits))
1813 goto retry;
1814 if (obits & PG_G)
1815 pmap_invalidate_page(pmap, sva);
1816 else
1817 anychanged = 1;
1818 }
1819 }
1820 }
1821 vm_page_unlock_queues();
1822 if (anychanged)
1823 pmap_invalidate_all(pmap);
1824 PMAP_UNLOCK(pmap);
1825 }
1826
1827 /*
1828 * Insert the given physical page (p) at
1829 * the specified virtual address (v) in the
1830 * target physical map with the protection requested.
1831 *
1832 * If specified, the page will be wired down, meaning
1833 * that the related pte can not be reclaimed.
1834 *
1835 * NB: This is the only routine which MAY NOT lazy-evaluate
1836 * or lose information. That is, this routine must actually
1837 * insert this page into the given map NOW.
1838 */
1839 void
1840 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
1841 boolean_t wired)
1842 {
1843 vm_paddr_t pa;
1844 register pt_entry_t *pte;
1845 vm_paddr_t opa;
1846 pd_entry_t ptepde;
1847 pt_entry_t origpte, newpte;
1848 vm_page_t mpte, om;
1849
1850 va = trunc_page(va);
1851 #ifdef PMAP_DIAGNOSTIC
1852 if (va > VM_MAX_KERNEL_ADDRESS)
1853 panic("pmap_enter: toobig");
1854 if ((va >= UPT_MIN_ADDRESS) && (va < UPT_MAX_ADDRESS))
1855 panic("pmap_enter: invalid to pmap_enter page table pages (va: 0x%lx)", va);
1856 #endif
1857
1858 mpte = NULL;
1859
1860 vm_page_lock_queues();
1861 PMAP_LOCK(pmap);
1862
1863 /*
1864 * In the case that a page table page is not
1865 * resident, we are creating it here.
1866 */
1867 if (va < VM_MAXUSER_ADDRESS) {
1868 mpte = pmap_allocpte(pmap, va, M_WAITOK);
1869 }
1870 #if 0 && defined(PMAP_DIAGNOSTIC)
1871 else {
1872 pd_entry_t *pdeaddr = pmap_pde(pmap, va);
1873 origpte = *pdeaddr;
1874 if ((origpte & PG_V) == 0) {
1875 panic("pmap_enter: invalid kernel page table page, pde=%p, va=%p\n",
1876 origpte, va);
1877 }
1878 }
1879 #endif
1880
1881 pte = pmap_pte_pde(pmap, va, &ptepde);
1882
1883 /*
1884 * Page Directory table entry not valid, we need a new PT page
1885 */
1886 if (pte == NULL)
1887 panic("pmap_enter: invalid page directory va=%#lx\n", va);
1888
1889 pa = VM_PAGE_TO_PHYS(m);
1890 om = NULL;
1891 origpte = *pte;
1892 opa = origpte & PG_FRAME;
1893
1894 if (origpte & PG_PS)
1895 panic("pmap_enter: attempted pmap_enter on 2MB page");
1896
1897 /*
1898 * Mapping has not changed, must be protection or wiring change.
1899 */
1900 if (origpte && (opa == pa)) {
1901 /*
1902 * Wiring change, just update stats. We don't worry about
1903 * wiring PT pages as they remain resident as long as there
1904 * are valid mappings in them. Hence, if a user page is wired,
1905 * the PT page will be also.
1906 */
1907 if (wired && ((origpte & PG_W) == 0))
1908 pmap->pm_stats.wired_count++;
1909 else if (!wired && (origpte & PG_W))
1910 pmap->pm_stats.wired_count--;
1911
1912 #if defined(PMAP_DIAGNOSTIC)
1913 if (pmap_nw_modified((pt_entry_t) origpte)) {
1914 printf(
1915 "pmap_enter: modified page not writable: va: 0x%lx, pte: 0x%lx\n",
1916 va, origpte);
1917 }
1918 #endif
1919
1920 /*
1921 * Remove extra pte reference
1922 */
1923 if (mpte)
1924 mpte->wire_count--;
1925
1926 /*
1927 * We might be turning off write access to the page,
1928 * so we go ahead and sense modify status.
1929 */
1930 if (origpte & PG_MANAGED) {
1931 om = m;
1932 pa |= PG_MANAGED;
1933 }
1934 goto validate;
1935 }
1936 /*
1937 * Mapping has changed, invalidate old range and fall through to
1938 * handle validating new mapping.
1939 */
1940 if (opa) {
1941 int err;
1942 if (origpte & PG_W)
1943 pmap->pm_stats.wired_count--;
1944 if (origpte & PG_MANAGED) {
1945 om = PHYS_TO_VM_PAGE(opa);
1946 err = pmap_remove_entry(pmap, om, va, ptepde);
1947 } else
1948 err = pmap_unuse_pt(pmap, va, ptepde);
1949 if (err)
1950 panic("pmap_enter: pte vanished, va: 0x%lx", va);
1951 } else
1952 pmap->pm_stats.resident_count++;
1953
1954 /*
1955 * Enter on the PV list if part of our managed memory. Note that we
1956 * raise IPL while manipulating pv_table since pmap_enter can be
1957 * called at interrupt time.
1958 */
1959 if (pmap_initialized &&
1960 (m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0) {
1961 pmap_insert_entry(pmap, va, m);
1962 pa |= PG_MANAGED;
1963 }
1964
1965 /*
1966 * Increment counters
1967 */
1968 if (wired)
1969 pmap->pm_stats.wired_count++;
1970
1971 validate:
1972 /*
1973 * Now validate mapping with desired protection/wiring.
1974 */
1975 newpte = (pt_entry_t)(pa | PG_V);
1976 if ((prot & VM_PROT_WRITE) != 0)
1977 newpte |= PG_RW;
1978 if ((prot & VM_PROT_EXECUTE) == 0)
1979 newpte |= pg_nx;
1980 if (wired)
1981 newpte |= PG_W;
1982 if (va < VM_MAXUSER_ADDRESS)
1983 newpte |= PG_U;
1984 if (pmap == kernel_pmap)
1985 newpte |= PG_G;
1986
1987 /*
1988 * if the mapping or permission bits are different, we need
1989 * to update the pte.
1990 */
1991 if ((origpte & ~(PG_M|PG_A)) != newpte) {
1992 if (origpte & PG_MANAGED) {
1993 origpte = pte_load_store(pte, newpte | PG_A);
1994 if ((origpte & PG_M) && pmap_track_modified(va))
1995 vm_page_dirty(om);
1996 if (origpte & PG_A)
1997 vm_page_flag_set(om, PG_REFERENCED);
1998 } else
1999 pte_store(pte, newpte | PG_A);
2000 if (origpte) {
2001 pmap_invalidate_page(pmap, va);
2002 }
2003 }
2004 vm_page_unlock_queues();
2005 PMAP_UNLOCK(pmap);
2006 }
2007
2008 /*
2009 * this code makes some *MAJOR* assumptions:
2010 * 1. Current pmap & pmap exists.
2011 * 2. Not wired.
2012 * 3. Read access.
2013 * 4. No page table pages.
2014 * 5. Tlbflush is deferred to calling procedure.
2015 * 6. Page IS managed.
2016 * but is *MUCH* faster than pmap_enter...
2017 */
2018
2019 vm_page_t
2020 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
2021 {
2022 pt_entry_t *pte;
2023 vm_paddr_t pa;
2024
2025 vm_page_lock_queues();
2026 PMAP_LOCK(pmap);
2027
2028 /*
2029 * In the case that a page table page is not
2030 * resident, we are creating it here.
2031 */
2032 if (va < VM_MAXUSER_ADDRESS) {
2033 vm_pindex_t ptepindex;
2034 pd_entry_t *ptepa;
2035
2036 /*
2037 * Calculate pagetable page index
2038 */
2039 ptepindex = pmap_pde_pindex(va);
2040 if (mpte && (mpte->pindex == ptepindex)) {
2041 mpte->wire_count++;
2042 } else {
2043 retry:
2044 /*
2045 * Get the page directory entry
2046 */
2047 ptepa = pmap_pde(pmap, va);
2048
2049 /*
2050 * If the page table page is mapped, we just increment
2051 * the hold count, and activate it.
2052 */
2053 if (ptepa && (*ptepa & PG_V) != 0) {
2054 if (*ptepa & PG_PS)
2055 panic("pmap_enter_quick: unexpected mapping into 2MB page");
2056 mpte = PHYS_TO_VM_PAGE(*ptepa & PG_FRAME);
2057 mpte->wire_count++;
2058 } else {
2059 mpte = _pmap_allocpte(pmap, ptepindex,
2060 M_WAITOK);
2061 if (mpte == NULL)
2062 goto retry;
2063 }
2064 }
2065 } else {
2066 mpte = NULL;
2067 }
2068
2069 /*
2070 * This call to vtopte makes the assumption that we are
2071 * entering the page into the current pmap. In order to support
2072 * quick entry into any pmap, one would likely use pmap_pte.
2073 * But that isn't as quick as vtopte.
2074 */
2075 pte = vtopte(va);
2076 if (*pte) {
2077 if (mpte != NULL) {
2078 pmap_unwire_pte_hold(pmap, va, mpte);
2079 mpte = NULL;
2080 }
2081 goto out;
2082 }
2083
2084 /*
2085 * Enter on the PV list if part of our managed memory. Note that we
2086 * raise IPL while manipulating pv_table since pmap_enter can be
2087 * called at interrupt time.
2088 */
2089 if ((m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0)
2090 pmap_insert_entry(pmap, va, m);
2091
2092 /*
2093 * Increment counters
2094 */
2095 pmap->pm_stats.resident_count++;
2096
2097 pa = VM_PAGE_TO_PHYS(m);
2098
2099 /*
2100 * Now validate mapping with RO protection
2101 */
2102 if (m->flags & (PG_FICTITIOUS|PG_UNMANAGED))
2103 pte_store(pte, pa | PG_V | PG_U);
2104 else
2105 pte_store(pte, pa | PG_V | PG_U | PG_MANAGED);
2106 out:
2107 vm_page_unlock_queues();
2108 PMAP_UNLOCK(pmap);
2109 return mpte;
2110 }
2111
2112 /*
2113 * Make a temporary mapping for a physical address. This is only intended
2114 * to be used for panic dumps.
2115 */
2116 void *
2117 pmap_kenter_temporary(vm_paddr_t pa, int i)
2118 {
2119 vm_offset_t va;
2120
2121 va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE);
2122 pmap_kenter(va, pa);
2123 invlpg(va);
2124 return ((void *)crashdumpmap);
2125 }
2126
2127 /*
2128 * This code maps large physical mmap regions into the
2129 * processor address space. Note that some shortcuts
2130 * are taken, but the code works.
2131 */
2132 void
2133 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
2134 vm_object_t object, vm_pindex_t pindex,
2135 vm_size_t size)
2136 {
2137 vm_page_t p;
2138
2139 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
2140 KASSERT(object->type == OBJT_DEVICE,
2141 ("pmap_object_init_pt: non-device object"));
2142 if (((addr & (NBPDR - 1)) == 0) && ((size & (NBPDR - 1)) == 0)) {
2143 int i;
2144 vm_page_t m[1];
2145 int npdes;
2146 pd_entry_t ptepa, *pde;
2147
2148 PMAP_LOCK(pmap);
2149 pde = pmap_pde(pmap, addr);
2150 if (pde != 0 && (*pde & PG_V) != 0)
2151 goto out;
2152 PMAP_UNLOCK(pmap);
2153 retry:
2154 p = vm_page_lookup(object, pindex);
2155 if (p != NULL) {
2156 vm_page_lock_queues();
2157 if (vm_page_sleep_if_busy(p, FALSE, "init4p"))
2158 goto retry;
2159 } else {
2160 p = vm_page_alloc(object, pindex, VM_ALLOC_NORMAL);
2161 if (p == NULL)
2162 return;
2163 m[0] = p;
2164
2165 if (vm_pager_get_pages(object, m, 1, 0) != VM_PAGER_OK) {
2166 vm_page_lock_queues();
2167 vm_page_free(p);
2168 vm_page_unlock_queues();
2169 return;
2170 }
2171
2172 p = vm_page_lookup(object, pindex);
2173 vm_page_lock_queues();
2174 vm_page_wakeup(p);
2175 }
2176 vm_page_unlock_queues();
2177
2178 ptepa = VM_PAGE_TO_PHYS(p);
2179 if (ptepa & (NBPDR - 1))
2180 return;
2181
2182 p->valid = VM_PAGE_BITS_ALL;
2183
2184 PMAP_LOCK(pmap);
2185 pmap->pm_stats.resident_count += size >> PAGE_SHIFT;
2186 npdes = size >> PDRSHIFT;
2187 for(i = 0; i < npdes; i++) {
2188 pde_store(pde, ptepa | PG_U | PG_RW | PG_V | PG_PS);
2189 ptepa += NBPDR;
2190 pde++;
2191 }
2192 pmap_invalidate_all(pmap);
2193 out:
2194 PMAP_UNLOCK(pmap);
2195 }
2196 }
2197
2198 /*
2199 * Routine: pmap_change_wiring
2200 * Function: Change the wiring attribute for a map/virtual-address
2201 * pair.
2202 * In/out conditions:
2203 * The mapping must already exist in the pmap.
2204 */
2205 void
2206 pmap_change_wiring(pmap, va, wired)
2207 register pmap_t pmap;
2208 vm_offset_t va;
2209 boolean_t wired;
2210 {
2211 register pt_entry_t *pte;
2212
2213 /*
2214 * Wiring is not a hardware characteristic so there is no need to
2215 * invalidate TLB.
2216 */
2217 PMAP_LOCK(pmap);
2218 pte = pmap_pte(pmap, va);
2219 if (wired && (*pte & PG_W) == 0) {
2220 pmap->pm_stats.wired_count++;
2221 atomic_set_long(pte, PG_W);
2222 } else if (!wired && (*pte & PG_W) != 0) {
2223 pmap->pm_stats.wired_count--;
2224 atomic_clear_long(pte, PG_W);
2225 }
2226 PMAP_UNLOCK(pmap);
2227 }
2228
2229
2230
2231 /*
2232 * Copy the range specified by src_addr/len
2233 * from the source map to the range dst_addr/len
2234 * in the destination map.
2235 *
2236 * This routine is only advisory and need not do anything.
2237 */
2238
2239 void
2240 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
2241 vm_offset_t src_addr)
2242 {
2243 vm_offset_t addr;
2244 vm_offset_t end_addr = src_addr + len;
2245 vm_offset_t va_next;
2246 vm_page_t m;
2247
2248 if (dst_addr != src_addr)
2249 return;
2250
2251 if (!pmap_is_current(src_pmap))
2252 return;
2253
2254 vm_page_lock_queues();
2255 if (dst_pmap < src_pmap) {
2256 PMAP_LOCK(dst_pmap);
2257 PMAP_LOCK(src_pmap);
2258 } else {
2259 PMAP_LOCK(src_pmap);
2260 PMAP_LOCK(dst_pmap);
2261 }
2262 for (addr = src_addr; addr < end_addr; addr = va_next) {
2263 pt_entry_t *src_pte, *dst_pte;
2264 vm_page_t dstmpte, srcmpte;
2265 pml4_entry_t *pml4e;
2266 pdp_entry_t *pdpe;
2267 pd_entry_t srcptepaddr, *pde;
2268
2269 if (addr >= UPT_MIN_ADDRESS)
2270 panic("pmap_copy: invalid to pmap_copy page tables");
2271
2272 /*
2273 * Don't let optional prefaulting of pages make us go
2274 * way below the low water mark of free pages or way
2275 * above high water mark of used pv entries.
2276 */
2277 if (cnt.v_free_count < cnt.v_free_reserved ||
2278 pv_entry_count > pv_entry_high_water)
2279 break;
2280
2281 pml4e = pmap_pml4e(src_pmap, addr);
2282 if (pml4e == 0) {
2283 va_next = (addr + NBPML4) & ~PML4MASK;
2284 continue;
2285 }
2286
2287 pdpe = pmap_pdpe(src_pmap, addr);
2288 if (pdpe == 0) {
2289 va_next = (addr + NBPDP) & ~PDPMASK;
2290 continue;
2291 }
2292
2293 va_next = (addr + NBPDR) & ~PDRMASK;
2294
2295 pde = pmap_pde(src_pmap, addr);
2296 if (pde)
2297 srcptepaddr = *pde;
2298 else
2299 continue;
2300 if (srcptepaddr == 0)
2301 continue;
2302
2303 if (srcptepaddr & PG_PS) {
2304 pde = pmap_pde(dst_pmap, addr);
2305 if (pde == 0) {
2306 /*
2307 * XXX should do an allocpte here to
2308 * instantiate the pde
2309 */
2310 continue;
2311 }
2312 if (*pde == 0) {
2313 *pde = srcptepaddr;
2314 dst_pmap->pm_stats.resident_count +=
2315 NBPDR / PAGE_SIZE;
2316 }
2317 continue;
2318 }
2319
2320 srcmpte = PHYS_TO_VM_PAGE(srcptepaddr & PG_FRAME);
2321 if (srcmpte->wire_count == 0)
2322 panic("pmap_copy: source page table page is unused");
2323
2324 if (va_next > end_addr)
2325 va_next = end_addr;
2326
2327 src_pte = vtopte(addr);
2328 while (addr < va_next) {
2329 pt_entry_t ptetemp;
2330 ptetemp = *src_pte;
2331 /*
2332 * we only virtual copy managed pages
2333 */
2334 if ((ptetemp & PG_MANAGED) != 0) {
2335 /*
2336 * We have to check after allocpte for the
2337 * pte still being around... allocpte can
2338 * block.
2339 */
2340 dstmpte = pmap_allocpte(dst_pmap, addr,
2341 M_NOWAIT);
2342 if (dstmpte == NULL)
2343 break;
2344 dst_pte = pmap_pte(dst_pmap, addr);
2345 if (*dst_pte == 0) {
2346 /*
2347 * Clear the modified and
2348 * accessed (referenced) bits
2349 * during the copy.
2350 */
2351 m = PHYS_TO_VM_PAGE(ptetemp & PG_FRAME);
2352 *dst_pte = ptetemp & ~(PG_M | PG_A);
2353 dst_pmap->pm_stats.resident_count++;
2354 pmap_insert_entry(dst_pmap, addr, m);
2355 } else
2356 pmap_unwire_pte_hold(dst_pmap, addr, dstmpte);
2357 if (dstmpte->wire_count >= srcmpte->wire_count)
2358 break;
2359 }
2360 addr += PAGE_SIZE;
2361 src_pte++;
2362 }
2363 }
2364 vm_page_unlock_queues();
2365 PMAP_UNLOCK(src_pmap);
2366 PMAP_UNLOCK(dst_pmap);
2367 }
2368
2369 /*
2370 * pmap_zero_page zeros the specified hardware page by mapping
2371 * the page into KVM and using bzero to clear its contents.
2372 */
2373 void
2374 pmap_zero_page(vm_page_t m)
2375 {
2376 vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
2377
2378 pagezero((void *)va);
2379 }
2380
2381 /*
2382 * pmap_zero_page_area zeros the specified hardware page by mapping
2383 * the page into KVM and using bzero to clear its contents.
2384 *
2385 * off and size may not cover an area beyond a single hardware page.
2386 */
2387 void
2388 pmap_zero_page_area(vm_page_t m, int off, int size)
2389 {
2390 vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
2391
2392 if (off == 0 && size == PAGE_SIZE)
2393 pagezero((void *)va);
2394 else
2395 bzero((char *)va + off, size);
2396 }
2397
2398 /*
2399 * pmap_zero_page_idle zeros the specified hardware page by mapping
2400 * the page into KVM and using bzero to clear its contents. This
2401 * is intended to be called from the vm_pagezero process only and
2402 * outside of Giant.
2403 */
2404 void
2405 pmap_zero_page_idle(vm_page_t m)
2406 {
2407 vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
2408
2409 pagezero((void *)va);
2410 }
2411
2412 /*
2413 * pmap_copy_page copies the specified (machine independent)
2414 * page by mapping the page into virtual memory and using
2415 * bcopy to copy the page, one machine dependent page at a
2416 * time.
2417 */
2418 void
2419 pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
2420 {
2421 vm_offset_t src = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(msrc));
2422 vm_offset_t dst = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mdst));
2423
2424 pagecopy((void *)src, (void *)dst);
2425 }
2426
2427 /*
2428 * Returns true if the pmap's pv is one of the first
2429 * 16 pvs linked to from this page. This count may
2430 * be changed upwards or downwards in the future; it
2431 * is only necessary that true be returned for a small
2432 * subset of pmaps for proper page aging.
2433 */
2434 boolean_t
2435 pmap_page_exists_quick(pmap, m)
2436 pmap_t pmap;
2437 vm_page_t m;
2438 {
2439 pv_entry_t pv;
2440 int loops = 0;
2441
2442 if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
2443 return FALSE;
2444
2445 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2446 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2447 if (pv->pv_pmap == pmap) {
2448 return TRUE;
2449 }
2450 loops++;
2451 if (loops >= 16)
2452 break;
2453 }
2454 return (FALSE);
2455 }
2456
2457 #define PMAP_REMOVE_PAGES_CURPROC_ONLY
2458 /*
2459 * Remove all pages from specified address space
2460 * this aids process exit speeds. Also, this code
2461 * is special cased for current process only, but
2462 * can have the more generic (and slightly slower)
2463 * mode enabled. This is much faster than pmap_remove
2464 * in the case of running down an entire address space.
2465 */
2466 void
2467 pmap_remove_pages(pmap, sva, eva)
2468 pmap_t pmap;
2469 vm_offset_t sva, eva;
2470 {
2471 pt_entry_t *pte, tpte;
2472 vm_page_t m;
2473 pv_entry_t pv, npv;
2474
2475 #ifdef PMAP_REMOVE_PAGES_CURPROC_ONLY
2476 if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) {
2477 printf("warning: pmap_remove_pages called with non-current pmap\n");
2478 return;
2479 }
2480 #endif
2481 vm_page_lock_queues();
2482 PMAP_LOCK(pmap);
2483 for (pv = TAILQ_FIRST(&pmap->pm_pvlist); pv; pv = npv) {
2484
2485 if (pv->pv_va >= eva || pv->pv_va < sva) {
2486 npv = TAILQ_NEXT(pv, pv_plist);
2487 continue;
2488 }
2489
2490 #ifdef PMAP_REMOVE_PAGES_CURPROC_ONLY
2491 pte = vtopte(pv->pv_va);
2492 #else
2493 pte = pmap_pte(pmap, pv->pv_va);
2494 #endif
2495 tpte = *pte;
2496
2497 if (tpte == 0) {
2498 printf("TPTE at %p IS ZERO @ VA %08lx\n",
2499 pte, pv->pv_va);
2500 panic("bad pte");
2501 }
2502
2503 /*
2504 * We cannot remove wired pages from a process' mapping at this time
2505 */
2506 if (tpte & PG_W) {
2507 npv = TAILQ_NEXT(pv, pv_plist);
2508 continue;
2509 }
2510
2511 m = PHYS_TO_VM_PAGE(tpte & PG_FRAME);
2512 KASSERT(m->phys_addr == (tpte & PG_FRAME),
2513 ("vm_page_t %p phys_addr mismatch %016jx %016jx",
2514 m, (uintmax_t)m->phys_addr, (uintmax_t)tpte));
2515
2516 KASSERT(m < &vm_page_array[vm_page_array_size],
2517 ("pmap_remove_pages: bad tpte %#jx", (uintmax_t)tpte));
2518
2519 pmap->pm_stats.resident_count--;
2520
2521 pte_clear(pte);
2522
2523 /*
2524 * Update the vm_page_t clean and reference bits.
2525 */
2526 if (tpte & PG_M) {
2527 vm_page_dirty(m);
2528 }
2529
2530 npv = TAILQ_NEXT(pv, pv_plist);
2531 TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
2532
2533 m->md.pv_list_count--;
2534 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
2535 if (TAILQ_EMPTY(&m->md.pv_list))
2536 vm_page_flag_clear(m, PG_WRITEABLE);
2537
2538 pmap_unuse_pt(pmap, pv->pv_va, *vtopde(pv->pv_va));
2539 free_pv_entry(pv);
2540 }
2541 pmap_invalidate_all(pmap);
2542 PMAP_UNLOCK(pmap);
2543 vm_page_unlock_queues();
2544 }
2545
2546 /*
2547 * pmap_is_modified:
2548 *
2549 * Return whether or not the specified physical page was modified
2550 * in any physical maps.
2551 */
2552 boolean_t
2553 pmap_is_modified(vm_page_t m)
2554 {
2555 pv_entry_t pv;
2556 pt_entry_t *pte;
2557 boolean_t rv;
2558
2559 rv = FALSE;
2560 if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
2561 return (rv);
2562
2563 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2564 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2565 /*
2566 * if the bit being tested is the modified bit, then
2567 * mark clean_map and ptes as never
2568 * modified.
2569 */
2570 if (!pmap_track_modified(pv->pv_va))
2571 continue;
2572 #if defined(PMAP_DIAGNOSTIC)
2573 if (!pv->pv_pmap) {
2574 printf("Null pmap (tb) at va: 0x%lx\n", pv->pv_va);
2575 continue;
2576 }
2577 #endif
2578 PMAP_LOCK(pv->pv_pmap);
2579 pte = pmap_pte(pv->pv_pmap, pv->pv_va);
2580 rv = (*pte & PG_M) != 0;
2581 PMAP_UNLOCK(pv->pv_pmap);
2582 if (rv)
2583 break;
2584 }
2585 return (rv);
2586 }
2587
2588 /*
2589 * pmap_is_prefaultable:
2590 *
2591 * Return whether or not the specified virtual address is elgible
2592 * for prefault.
2593 */
2594 boolean_t
2595 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
2596 {
2597 pd_entry_t *pde;
2598 pt_entry_t *pte;
2599 boolean_t rv;
2600
2601 rv = FALSE;
2602 PMAP_LOCK(pmap);
2603 pde = pmap_pde(pmap, addr);
2604 if (pde != NULL && (*pde & PG_V)) {
2605 pte = vtopte(addr);
2606 rv = (*pte & PG_V) == 0;
2607 }
2608 PMAP_UNLOCK(pmap);
2609 return (rv);
2610 }
2611
2612 /*
2613 * Clear the given bit in each of the given page's ptes.
2614 */
2615 static __inline void
2616 pmap_clear_ptes(vm_page_t m, long bit)
2617 {
2618 register pv_entry_t pv;
2619 pt_entry_t pbits, *pte;
2620
2621 if (!pmap_initialized || (m->flags & PG_FICTITIOUS) ||
2622 (bit == PG_RW && (m->flags & PG_WRITEABLE) == 0))
2623 return;
2624
2625 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2626 /*
2627 * Loop over all current mappings setting/clearing as appropos If
2628 * setting RO do we need to clear the VAC?
2629 */
2630 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2631 /*
2632 * don't write protect pager mappings
2633 */
2634 if (bit == PG_RW) {
2635 if (!pmap_track_modified(pv->pv_va))
2636 continue;
2637 }
2638
2639 #if defined(PMAP_DIAGNOSTIC)
2640 if (!pv->pv_pmap) {
2641 printf("Null pmap (cb) at va: 0x%lx\n", pv->pv_va);
2642 continue;
2643 }
2644 #endif
2645
2646 PMAP_LOCK(pv->pv_pmap);
2647 pte = pmap_pte(pv->pv_pmap, pv->pv_va);
2648 retry:
2649 pbits = *pte;
2650 if (pbits & bit) {
2651 if (bit == PG_RW) {
2652 if (!atomic_cmpset_long(pte, pbits,
2653 pbits & ~(PG_RW | PG_M)))
2654 goto retry;
2655 if (pbits & PG_M) {
2656 vm_page_dirty(m);
2657 }
2658 } else {
2659 atomic_clear_long(pte, bit);
2660 }
2661 pmap_invalidate_page(pv->pv_pmap, pv->pv_va);
2662 }
2663 PMAP_UNLOCK(pv->pv_pmap);
2664 }
2665 if (bit == PG_RW)
2666 vm_page_flag_clear(m, PG_WRITEABLE);
2667 }
2668
2669 /*
2670 * pmap_page_protect:
2671 *
2672 * Lower the permission for all mappings to a given page.
2673 */
2674 void
2675 pmap_page_protect(vm_page_t m, vm_prot_t prot)
2676 {
2677 if ((prot & VM_PROT_WRITE) == 0) {
2678 if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) {
2679 pmap_clear_ptes(m, PG_RW);
2680 } else {
2681 pmap_remove_all(m);
2682 }
2683 }
2684 }
2685
2686 /*
2687 * pmap_ts_referenced:
2688 *
2689 * Return a count of reference bits for a page, clearing those bits.
2690 * It is not necessary for every reference bit to be cleared, but it
2691 * is necessary that 0 only be returned when there are truly no
2692 * reference bits set.
2693 *
2694 * XXX: The exact number of bits to check and clear is a matter that
2695 * should be tested and standardized at some point in the future for
2696 * optimal aging of shared pages.
2697 */
2698 int
2699 pmap_ts_referenced(vm_page_t m)
2700 {
2701 register pv_entry_t pv, pvf, pvn;
2702 pt_entry_t *pte;
2703 pt_entry_t v;
2704 int rtval = 0;
2705
2706 if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
2707 return (rtval);
2708
2709 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2710 if ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
2711
2712 pvf = pv;
2713
2714 do {
2715 pvn = TAILQ_NEXT(pv, pv_list);
2716
2717 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
2718
2719 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
2720
2721 if (!pmap_track_modified(pv->pv_va))
2722 continue;
2723
2724 PMAP_LOCK(pv->pv_pmap);
2725 pte = pmap_pte(pv->pv_pmap, pv->pv_va);
2726
2727 if (pte && ((v = pte_load(pte)) & PG_A) != 0) {
2728 atomic_clear_long(pte, PG_A);
2729 pmap_invalidate_page(pv->pv_pmap, pv->pv_va);
2730
2731 rtval++;
2732 if (rtval > 4) {
2733 PMAP_UNLOCK(pv->pv_pmap);
2734 break;
2735 }
2736 }
2737 PMAP_UNLOCK(pv->pv_pmap);
2738 } while ((pv = pvn) != NULL && pv != pvf);
2739 }
2740
2741 return (rtval);
2742 }
2743
2744 /*
2745 * Clear the modify bits on the specified physical page.
2746 */
2747 void
2748 pmap_clear_modify(vm_page_t m)
2749 {
2750 pmap_clear_ptes(m, PG_M);
2751 }
2752
2753 /*
2754 * pmap_clear_reference:
2755 *
2756 * Clear the reference bit on the specified physical page.
2757 */
2758 void
2759 pmap_clear_reference(vm_page_t m)
2760 {
2761 pmap_clear_ptes(m, PG_A);
2762 }
2763
2764 /*
2765 * Miscellaneous support routines follow
2766 */
2767
2768 /*
2769 * Map a set of physical memory pages into the kernel virtual
2770 * address space. Return a pointer to where it is mapped. This
2771 * routine is intended to be used for mapping device memory,
2772 * NOT real memory.
2773 */
2774 void *
2775 pmap_mapdev(pa, size)
2776 vm_paddr_t pa;
2777 vm_size_t size;
2778 {
2779 vm_offset_t va, tmpva, offset;
2780
2781 /* If this fits within the direct map window, use it */
2782 if (pa < dmaplimit && (pa + size) < dmaplimit)
2783 return ((void *)PHYS_TO_DMAP(pa));
2784 offset = pa & PAGE_MASK;
2785 size = roundup(offset + size, PAGE_SIZE);
2786 va = kmem_alloc_nofault(kernel_map, size);
2787 if (!va)
2788 panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
2789 pa = trunc_page(pa);
2790 for (tmpva = va; size > 0; ) {
2791 pmap_kenter(tmpva, pa);
2792 size -= PAGE_SIZE;
2793 tmpva += PAGE_SIZE;
2794 pa += PAGE_SIZE;
2795 }
2796 pmap_invalidate_range(kernel_pmap, va, tmpva);
2797 return ((void *)(va + offset));
2798 }
2799
2800 void
2801 pmap_unmapdev(va, size)
2802 vm_offset_t va;
2803 vm_size_t size;
2804 {
2805 vm_offset_t base, offset, tmpva;
2806
2807 /* If we gave a direct map region in pmap_mapdev, do nothing */
2808 if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS)
2809 return;
2810 base = trunc_page(va);
2811 offset = va & PAGE_MASK;
2812 size = roundup(offset + size, PAGE_SIZE);
2813 for (tmpva = base; tmpva < (base + size); tmpva += PAGE_SIZE)
2814 pmap_kremove(tmpva);
2815 pmap_invalidate_range(kernel_pmap, va, tmpva);
2816 kmem_free(kernel_map, base, size);
2817 }
2818
2819 /*
2820 * perform the pmap work for mincore
2821 */
2822 int
2823 pmap_mincore(pmap, addr)
2824 pmap_t pmap;
2825 vm_offset_t addr;
2826 {
2827 pt_entry_t *ptep, pte;
2828 vm_page_t m;
2829 int val = 0;
2830
2831 PMAP_LOCK(pmap);
2832 ptep = pmap_pte(pmap, addr);
2833 pte = (ptep != NULL) ? *ptep : 0;
2834 PMAP_UNLOCK(pmap);
2835
2836 if (pte != 0) {
2837 vm_paddr_t pa;
2838
2839 val = MINCORE_INCORE;
2840 if ((pte & PG_MANAGED) == 0)
2841 return val;
2842
2843 pa = pte & PG_FRAME;
2844
2845 m = PHYS_TO_VM_PAGE(pa);
2846
2847 /*
2848 * Modified by us
2849 */
2850 if (pte & PG_M)
2851 val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER;
2852 else {
2853 /*
2854 * Modified by someone else
2855 */
2856 vm_page_lock_queues();
2857 if (m->dirty || pmap_is_modified(m))
2858 val |= MINCORE_MODIFIED_OTHER;
2859 vm_page_unlock_queues();
2860 }
2861 /*
2862 * Referenced by us
2863 */
2864 if (pte & PG_A)
2865 val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER;
2866 else {
2867 /*
2868 * Referenced by someone else
2869 */
2870 vm_page_lock_queues();
2871 if ((m->flags & PG_REFERENCED) ||
2872 pmap_ts_referenced(m)) {
2873 val |= MINCORE_REFERENCED_OTHER;
2874 vm_page_flag_set(m, PG_REFERENCED);
2875 }
2876 vm_page_unlock_queues();
2877 }
2878 }
2879 return val;
2880 }
2881
2882 void
2883 pmap_activate(struct thread *td)
2884 {
2885 struct proc *p = td->td_proc;
2886 pmap_t pmap, oldpmap;
2887 u_int64_t cr3;
2888
2889 critical_enter();
2890 pmap = vmspace_pmap(td->td_proc->p_vmspace);
2891 oldpmap = PCPU_GET(curpmap);
2892 #ifdef SMP
2893 if (oldpmap) /* XXX FIXME */
2894 atomic_clear_int(&oldpmap->pm_active, PCPU_GET(cpumask));
2895 atomic_set_int(&pmap->pm_active, PCPU_GET(cpumask));
2896 #else
2897 if (oldpmap) /* XXX FIXME */
2898 oldpmap->pm_active &= ~PCPU_GET(cpumask);
2899 pmap->pm_active |= PCPU_GET(cpumask);
2900 #endif
2901 cr3 = vtophys(pmap->pm_pml4);
2902 /* XXXKSE this is wrong.
2903 * pmap_activate is for the current thread on the current cpu
2904 */
2905 if (p->p_flag & P_SA) {
2906 /* Make sure all other cr3 entries are updated. */
2907 /* what if they are running? XXXKSE (maybe abort them) */
2908 FOREACH_THREAD_IN_PROC(p, td) {
2909 td->td_pcb->pcb_cr3 = cr3;
2910 }
2911 } else {
2912 td->td_pcb->pcb_cr3 = cr3;
2913 }
2914 load_cr3(cr3);
2915 critical_exit();
2916 }
2917
2918 vm_offset_t
2919 pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size)
2920 {
2921
2922 if ((obj == NULL) || (size < NBPDR) || (obj->type != OBJT_DEVICE)) {
2923 return addr;
2924 }
2925
2926 addr = (addr + (NBPDR - 1)) & ~(NBPDR - 1);
2927 return addr;
2928 }
Cache object: a29f2ef828c6fafeb6ea1de681480000
|