FreeBSD/Linux Kernel Cross Reference
sys/amd64/amd64/pmap.c
1 /*-
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 * Copyright (c) 1994 John S. Dyson
5 * All rights reserved.
6 * Copyright (c) 1994 David Greenman
7 * All rights reserved.
8 * Copyright (c) 2003 Peter Wemm
9 * All rights reserved.
10 *
11 * This code is derived from software contributed to Berkeley by
12 * the Systems Programming Group of the University of Utah Computer
13 * Science Department and William Jolitz of UUNET Technologies Inc.
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 * 3. All advertising materials mentioning features or use of this software
24 * must display the following acknowledgement:
25 * This product includes software developed by the University of
26 * California, Berkeley and its contributors.
27 * 4. Neither the name of the University nor the names of its contributors
28 * may be used to endorse or promote products derived from this software
29 * without specific prior written permission.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
32 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
35 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
39 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
40 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41 * SUCH DAMAGE.
42 *
43 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
44 */
45 /*-
46 * Copyright (c) 2003 Networks Associates Technology, Inc.
47 * All rights reserved.
48 *
49 * This software was developed for the FreeBSD Project by Jake Burkholder,
50 * Safeport Network Services, and Network Associates Laboratories, the
51 * Security Research Division of Network Associates, Inc. under
52 * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA
53 * CHATS research program.
54 *
55 * Redistribution and use in source and binary forms, with or without
56 * modification, are permitted provided that the following conditions
57 * are met:
58 * 1. Redistributions of source code must retain the above copyright
59 * notice, this list of conditions and the following disclaimer.
60 * 2. Redistributions in binary form must reproduce the above copyright
61 * notice, this list of conditions and the following disclaimer in the
62 * documentation and/or other materials provided with the distribution.
63 *
64 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
65 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
66 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
67 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
68 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
69 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
70 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
71 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
72 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
73 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
74 * SUCH DAMAGE.
75 */
76
77 #include <sys/cdefs.h>
78 __FBSDID("$FreeBSD: releng/6.0/sys/amd64/amd64/pmap.c 151793 2005-10-28 06:49:49Z ade $");
79
80 /*
81 * Manages physical address maps.
82 *
83 * In addition to hardware address maps, this
84 * module is called upon to provide software-use-only
85 * maps which may or may not be stored in the same
86 * form as hardware maps. These pseudo-maps are
87 * used to store intermediate results from copy
88 * operations to and from address spaces.
89 *
90 * Since the information managed by this module is
91 * also stored by the logical address mapping module,
92 * this module may throw away valid virtual-to-physical
93 * mappings at almost any time. However, invalidations
94 * of virtual-to-physical mappings must be done as
95 * requested.
96 *
97 * In order to cope with hardware architectures which
98 * make virtual-to-physical map invalidates expensive,
99 * this module may delay invalidate or reduced protection
100 * operations until such time as they are actually
101 * necessary. This module is given full information as
102 * to which processors are currently using which maps,
103 * and to when physical maps must be made correct.
104 */
105
106 #include "opt_msgbuf.h"
107 #include "opt_kstack_pages.h"
108
109 #include <sys/param.h>
110 #include <sys/systm.h>
111 #include <sys/kernel.h>
112 #include <sys/lock.h>
113 #include <sys/malloc.h>
114 #include <sys/mman.h>
115 #include <sys/msgbuf.h>
116 #include <sys/mutex.h>
117 #include <sys/proc.h>
118 #include <sys/sx.h>
119 #include <sys/vmmeter.h>
120 #include <sys/sched.h>
121 #include <sys/sysctl.h>
122 #ifdef SMP
123 #include <sys/smp.h>
124 #endif
125
126 #include <vm/vm.h>
127 #include <vm/vm_param.h>
128 #include <vm/vm_kern.h>
129 #include <vm/vm_page.h>
130 #include <vm/vm_map.h>
131 #include <vm/vm_object.h>
132 #include <vm/vm_extern.h>
133 #include <vm/vm_pageout.h>
134 #include <vm/vm_pager.h>
135 #include <vm/uma.h>
136
137 #include <machine/cpu.h>
138 #include <machine/cputypes.h>
139 #include <machine/md_var.h>
140 #include <machine/pcb.h>
141 #include <machine/specialreg.h>
142 #ifdef SMP
143 #include <machine/smp.h>
144 #endif
145
146 #ifndef PMAP_SHPGPERPROC
147 #define PMAP_SHPGPERPROC 200
148 #endif
149
150 #if defined(DIAGNOSTIC)
151 #define PMAP_DIAGNOSTIC
152 #endif
153
154 #define MINPV 2048
155
156 #if !defined(PMAP_DIAGNOSTIC)
157 #define PMAP_INLINE __inline
158 #else
159 #define PMAP_INLINE
160 #endif
161
162 struct pmap kernel_pmap_store;
163
164 vm_paddr_t avail_start; /* PA of first available physical page */
165 vm_paddr_t avail_end; /* PA of last available physical page */
166 vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */
167 vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
168
169 static int nkpt;
170 static int ndmpdp;
171 static vm_paddr_t dmaplimit;
172 vm_offset_t kernel_vm_end;
173 pt_entry_t pg_nx;
174
175 static u_int64_t KPTphys; /* phys addr of kernel level 1 */
176 static u_int64_t KPDphys; /* phys addr of kernel level 2 */
177 static u_int64_t KPDPphys; /* phys addr of kernel level 3 */
178 u_int64_t KPML4phys; /* phys addr of kernel level 4 */
179
180 static u_int64_t DMPDphys; /* phys addr of direct mapped level 2 */
181 static u_int64_t DMPDPphys; /* phys addr of direct mapped level 3 */
182
183 /*
184 * Data for the pv entry allocation mechanism
185 */
186 static uma_zone_t pvzone;
187 static struct vm_object pvzone_obj;
188 static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
189 int pmap_pagedaemon_waken;
190
191 /*
192 * All those kernel PT submaps that BSD is so fond of
193 */
194 pt_entry_t *CMAP1 = 0;
195 caddr_t CADDR1 = 0;
196 struct msgbuf *msgbufp = 0;
197
198 /*
199 * Crashdump maps.
200 */
201 static caddr_t crashdumpmap;
202
203 static PMAP_INLINE void free_pv_entry(pv_entry_t pv);
204 static pv_entry_t get_pv_entry(void);
205 static void pmap_clear_ptes(vm_page_t m, long bit);
206
207 static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq,
208 vm_offset_t sva, pd_entry_t ptepde);
209 static void pmap_remove_page(struct pmap *pmap, vm_offset_t va);
210 static int pmap_remove_entry(struct pmap *pmap, vm_page_t m,
211 vm_offset_t va, pd_entry_t ptepde);
212 static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m);
213
214 static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags);
215
216 static vm_page_t _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, int flags);
217 static int _pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m);
218 static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t);
219 static vm_offset_t pmap_kmem_choose(vm_offset_t addr);
220
221 CTASSERT(1 << PDESHIFT == sizeof(pd_entry_t));
222 CTASSERT(1 << PTESHIFT == sizeof(pt_entry_t));
223
224 /*
225 * Move the kernel virtual free pointer to the next
226 * 2MB. This is used to help improve performance
227 * by using a large (2MB) page for much of the kernel
228 * (.text, .data, .bss)
229 */
230 static vm_offset_t
231 pmap_kmem_choose(vm_offset_t addr)
232 {
233 vm_offset_t newaddr = addr;
234
235 newaddr = (addr + (NBPDR - 1)) & ~(NBPDR - 1);
236 return newaddr;
237 }
238
239 /********************/
240 /* Inline functions */
241 /********************/
242
243 /* Return a non-clipped PD index for a given VA */
244 static __inline vm_pindex_t
245 pmap_pde_pindex(vm_offset_t va)
246 {
247 return va >> PDRSHIFT;
248 }
249
250
251 /* Return various clipped indexes for a given VA */
252 static __inline vm_pindex_t
253 pmap_pte_index(vm_offset_t va)
254 {
255
256 return ((va >> PAGE_SHIFT) & ((1ul << NPTEPGSHIFT) - 1));
257 }
258
259 static __inline vm_pindex_t
260 pmap_pde_index(vm_offset_t va)
261 {
262
263 return ((va >> PDRSHIFT) & ((1ul << NPDEPGSHIFT) - 1));
264 }
265
266 static __inline vm_pindex_t
267 pmap_pdpe_index(vm_offset_t va)
268 {
269
270 return ((va >> PDPSHIFT) & ((1ul << NPDPEPGSHIFT) - 1));
271 }
272
273 static __inline vm_pindex_t
274 pmap_pml4e_index(vm_offset_t va)
275 {
276
277 return ((va >> PML4SHIFT) & ((1ul << NPML4EPGSHIFT) - 1));
278 }
279
280 /* Return a pointer to the PML4 slot that corresponds to a VA */
281 static __inline pml4_entry_t *
282 pmap_pml4e(pmap_t pmap, vm_offset_t va)
283 {
284
285 if (!pmap)
286 return NULL;
287 return (&pmap->pm_pml4[pmap_pml4e_index(va)]);
288 }
289
290 /* Return a pointer to the PDP slot that corresponds to a VA */
291 static __inline pdp_entry_t *
292 pmap_pdpe(pmap_t pmap, vm_offset_t va)
293 {
294 pml4_entry_t *pml4e;
295 pdp_entry_t *pdpe;
296
297 pml4e = pmap_pml4e(pmap, va);
298 if (pml4e == NULL || (*pml4e & PG_V) == 0)
299 return NULL;
300 pdpe = (pdp_entry_t *)PHYS_TO_DMAP(*pml4e & PG_FRAME);
301 return (&pdpe[pmap_pdpe_index(va)]);
302 }
303
304 /* Return a pointer to the PD slot that corresponds to a VA */
305 static __inline pd_entry_t *
306 pmap_pde(pmap_t pmap, vm_offset_t va)
307 {
308 pdp_entry_t *pdpe;
309 pd_entry_t *pde;
310
311 pdpe = pmap_pdpe(pmap, va);
312 if (pdpe == NULL || (*pdpe & PG_V) == 0)
313 return NULL;
314 pde = (pd_entry_t *)PHYS_TO_DMAP(*pdpe & PG_FRAME);
315 return (&pde[pmap_pde_index(va)]);
316 }
317
318 /* Return a pointer to the PT slot that corresponds to a VA */
319 static __inline pt_entry_t *
320 pmap_pde_to_pte(pd_entry_t *pde, vm_offset_t va)
321 {
322 pt_entry_t *pte;
323
324 pte = (pt_entry_t *)PHYS_TO_DMAP(*pde & PG_FRAME);
325 return (&pte[pmap_pte_index(va)]);
326 }
327
328 /* Return a pointer to the PT slot that corresponds to a VA */
329 static __inline pt_entry_t *
330 pmap_pte(pmap_t pmap, vm_offset_t va)
331 {
332 pd_entry_t *pde;
333
334 pde = pmap_pde(pmap, va);
335 if (pde == NULL || (*pde & PG_V) == 0)
336 return NULL;
337 if ((*pde & PG_PS) != 0) /* compat with i386 pmap_pte() */
338 return ((pt_entry_t *)pde);
339 return (pmap_pde_to_pte(pde, va));
340 }
341
342
343 static __inline pt_entry_t *
344 pmap_pte_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *ptepde)
345 {
346 pd_entry_t *pde;
347
348 pde = pmap_pde(pmap, va);
349 if (pde == NULL || (*pde & PG_V) == 0)
350 return NULL;
351 *ptepde = *pde;
352 if ((*pde & PG_PS) != 0) /* compat with i386 pmap_pte() */
353 return ((pt_entry_t *)pde);
354 return (pmap_pde_to_pte(pde, va));
355 }
356
357
358 PMAP_INLINE pt_entry_t *
359 vtopte(vm_offset_t va)
360 {
361 u_int64_t mask = ((1ul << (NPTEPGSHIFT + NPDEPGSHIFT + NPDPEPGSHIFT + NPML4EPGSHIFT)) - 1);
362
363 return (PTmap + ((va >> PAGE_SHIFT) & mask));
364 }
365
366 static __inline pd_entry_t *
367 vtopde(vm_offset_t va)
368 {
369 u_int64_t mask = ((1ul << (NPDEPGSHIFT + NPDPEPGSHIFT + NPML4EPGSHIFT)) - 1);
370
371 return (PDmap + ((va >> PDRSHIFT) & mask));
372 }
373
374 static u_int64_t
375 allocpages(int n)
376 {
377 u_int64_t ret;
378
379 ret = avail_start;
380 bzero((void *)ret, n * PAGE_SIZE);
381 avail_start += n * PAGE_SIZE;
382 return (ret);
383 }
384
385 static void
386 create_pagetables(void)
387 {
388 int i;
389
390 /* Allocate pages */
391 KPTphys = allocpages(NKPT);
392 KPML4phys = allocpages(1);
393 KPDPphys = allocpages(NKPML4E);
394 KPDphys = allocpages(NKPDPE);
395
396 ndmpdp = (ptoa(Maxmem) + NBPDP - 1) >> PDPSHIFT;
397 if (ndmpdp < 4) /* Minimum 4GB of dirmap */
398 ndmpdp = 4;
399 DMPDPphys = allocpages(NDMPML4E);
400 DMPDphys = allocpages(ndmpdp);
401 dmaplimit = (vm_paddr_t)ndmpdp << PDPSHIFT;
402
403 /* Fill in the underlying page table pages */
404 /* Read-only from zero to physfree */
405 /* XXX not fully used, underneath 2M pages */
406 for (i = 0; (i << PAGE_SHIFT) < avail_start; i++) {
407 ((pt_entry_t *)KPTphys)[i] = i << PAGE_SHIFT;
408 ((pt_entry_t *)KPTphys)[i] |= PG_RW | PG_V | PG_G;
409 }
410
411 /* Now map the page tables at their location within PTmap */
412 for (i = 0; i < NKPT; i++) {
413 ((pd_entry_t *)KPDphys)[i] = KPTphys + (i << PAGE_SHIFT);
414 ((pd_entry_t *)KPDphys)[i] |= PG_RW | PG_V;
415 }
416
417 /* Map from zero to end of allocations under 2M pages */
418 /* This replaces some of the KPTphys entries above */
419 for (i = 0; (i << PDRSHIFT) < avail_start; i++) {
420 ((pd_entry_t *)KPDphys)[i] = i << PDRSHIFT;
421 ((pd_entry_t *)KPDphys)[i] |= PG_RW | PG_V | PG_PS | PG_G;
422 }
423
424 /* And connect up the PD to the PDP */
425 for (i = 0; i < NKPDPE; i++) {
426 ((pdp_entry_t *)KPDPphys)[i + KPDPI] = KPDphys + (i << PAGE_SHIFT);
427 ((pdp_entry_t *)KPDPphys)[i + KPDPI] |= PG_RW | PG_V | PG_U;
428 }
429
430
431 /* Now set up the direct map space using 2MB pages */
432 for (i = 0; i < NPDEPG * ndmpdp; i++) {
433 ((pd_entry_t *)DMPDphys)[i] = (vm_paddr_t)i << PDRSHIFT;
434 ((pd_entry_t *)DMPDphys)[i] |= PG_RW | PG_V | PG_PS | PG_G;
435 }
436
437 /* And the direct map space's PDP */
438 for (i = 0; i < ndmpdp; i++) {
439 ((pdp_entry_t *)DMPDPphys)[i] = DMPDphys + (i << PAGE_SHIFT);
440 ((pdp_entry_t *)DMPDPphys)[i] |= PG_RW | PG_V | PG_U;
441 }
442
443 /* And recursively map PML4 to itself in order to get PTmap */
444 ((pdp_entry_t *)KPML4phys)[PML4PML4I] = KPML4phys;
445 ((pdp_entry_t *)KPML4phys)[PML4PML4I] |= PG_RW | PG_V | PG_U;
446
447 /* Connect the Direct Map slot up to the PML4 */
448 ((pdp_entry_t *)KPML4phys)[DMPML4I] = DMPDPphys;
449 ((pdp_entry_t *)KPML4phys)[DMPML4I] |= PG_RW | PG_V | PG_U;
450
451 /* Connect the KVA slot up to the PML4 */
452 ((pdp_entry_t *)KPML4phys)[KPML4I] = KPDPphys;
453 ((pdp_entry_t *)KPML4phys)[KPML4I] |= PG_RW | PG_V | PG_U;
454 }
455
456 /*
457 * Bootstrap the system enough to run with virtual memory.
458 *
459 * On amd64 this is called after mapping has already been enabled
460 * and just syncs the pmap module with what has already been done.
461 * [We can't call it easily with mapping off since the kernel is not
462 * mapped with PA == VA, hence we would have to relocate every address
463 * from the linked base (virtual) address "KERNBASE" to the actual
464 * (physical) address starting relative to 0]
465 */
466 void
467 pmap_bootstrap(firstaddr)
468 vm_paddr_t *firstaddr;
469 {
470 vm_offset_t va;
471 pt_entry_t *pte, *unused;
472
473 avail_start = *firstaddr;
474
475 /*
476 * Create an initial set of page tables to run the kernel in.
477 */
478 create_pagetables();
479 *firstaddr = avail_start;
480
481 virtual_avail = (vm_offset_t) KERNBASE + avail_start;
482 virtual_avail = pmap_kmem_choose(virtual_avail);
483
484 virtual_end = VM_MAX_KERNEL_ADDRESS;
485
486
487 /* XXX do %cr0 as well */
488 load_cr4(rcr4() | CR4_PGE | CR4_PSE);
489 load_cr3(KPML4phys);
490
491 /*
492 * Initialize the kernel pmap (which is statically allocated).
493 */
494 PMAP_LOCK_INIT(kernel_pmap);
495 kernel_pmap->pm_pml4 = (pdp_entry_t *) (KERNBASE + KPML4phys);
496 kernel_pmap->pm_active = -1; /* don't allow deactivation */
497 TAILQ_INIT(&kernel_pmap->pm_pvlist);
498 nkpt = NKPT;
499
500 /*
501 * Reserve some special page table entries/VA space for temporary
502 * mapping of pages.
503 */
504 #define SYSMAP(c, p, v, n) \
505 v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n);
506
507 va = virtual_avail;
508 pte = vtopte(va);
509
510 /*
511 * CMAP1 is only used for the memory test.
512 */
513 SYSMAP(caddr_t, CMAP1, CADDR1, 1)
514
515 /*
516 * Crashdump maps.
517 */
518 SYSMAP(caddr_t, unused, crashdumpmap, MAXDUMPPGS)
519
520 /*
521 * msgbufp is used to map the system message buffer.
522 */
523 SYSMAP(struct msgbuf *, unused, msgbufp, atop(round_page(MSGBUF_SIZE)))
524
525 virtual_avail = va;
526
527 *CMAP1 = 0;
528
529 invltlb();
530 }
531
532 /*
533 * Initialize a vm_page's machine-dependent fields.
534 */
535 void
536 pmap_page_init(vm_page_t m)
537 {
538
539 TAILQ_INIT(&m->md.pv_list);
540 m->md.pv_list_count = 0;
541 }
542
543 /*
544 * Initialize the pmap module.
545 * Called by vm_init, to initialize any structures that the pmap
546 * system needs to map virtual memory.
547 */
548 void
549 pmap_init(void)
550 {
551
552 /*
553 * init the pv free list
554 */
555 pvzone = uma_zcreate("PV ENTRY", sizeof (struct pv_entry), NULL, NULL,
556 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
557 uma_prealloc(pvzone, MINPV);
558 }
559
560 /*
561 * Initialize the address space (zone) for the pv_entries. Set a
562 * high water mark so that the system can recover from excessive
563 * numbers of pv entries.
564 */
565 void
566 pmap_init2()
567 {
568 int shpgperproc = PMAP_SHPGPERPROC;
569
570 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
571 pv_entry_max = shpgperproc * maxproc + vm_page_array_size;
572 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
573 pv_entry_high_water = 9 * (pv_entry_max / 10);
574 uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max);
575 }
576
577
578 /***************************************************
579 * Low level helper routines.....
580 ***************************************************/
581
582 #if defined(PMAP_DIAGNOSTIC)
583
584 /*
585 * This code checks for non-writeable/modified pages.
586 * This should be an invalid condition.
587 */
588 static int
589 pmap_nw_modified(pt_entry_t ptea)
590 {
591 int pte;
592
593 pte = (int) ptea;
594
595 if ((pte & (PG_M|PG_RW)) == PG_M)
596 return 1;
597 else
598 return 0;
599 }
600 #endif
601
602
603 /*
604 * this routine defines the region(s) of memory that should
605 * not be tested for the modified bit.
606 */
607 static PMAP_INLINE int
608 pmap_track_modified(vm_offset_t va)
609 {
610 if ((va < kmi.clean_sva) || (va >= kmi.clean_eva))
611 return 1;
612 else
613 return 0;
614 }
615
616 #ifdef SMP
617 /*
618 * For SMP, these functions have to use the IPI mechanism for coherence.
619 */
620 void
621 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
622 {
623 u_int cpumask;
624 u_int other_cpus;
625
626 if (smp_started) {
627 if (!(read_rflags() & PSL_I))
628 panic("%s: interrupts disabled", __func__);
629 mtx_lock_spin(&smp_ipi_mtx);
630 } else
631 critical_enter();
632 /*
633 * We need to disable interrupt preemption but MUST NOT have
634 * interrupts disabled here.
635 * XXX we may need to hold schedlock to get a coherent pm_active
636 * XXX critical sections disable interrupts again
637 */
638 if (pmap == kernel_pmap || pmap->pm_active == all_cpus) {
639 invlpg(va);
640 smp_invlpg(va);
641 } else {
642 cpumask = PCPU_GET(cpumask);
643 other_cpus = PCPU_GET(other_cpus);
644 if (pmap->pm_active & cpumask)
645 invlpg(va);
646 if (pmap->pm_active & other_cpus)
647 smp_masked_invlpg(pmap->pm_active & other_cpus, va);
648 }
649 if (smp_started)
650 mtx_unlock_spin(&smp_ipi_mtx);
651 else
652 critical_exit();
653 }
654
655 void
656 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
657 {
658 u_int cpumask;
659 u_int other_cpus;
660 vm_offset_t addr;
661
662 if (smp_started) {
663 if (!(read_rflags() & PSL_I))
664 panic("%s: interrupts disabled", __func__);
665 mtx_lock_spin(&smp_ipi_mtx);
666 } else
667 critical_enter();
668 /*
669 * We need to disable interrupt preemption but MUST NOT have
670 * interrupts disabled here.
671 * XXX we may need to hold schedlock to get a coherent pm_active
672 * XXX critical sections disable interrupts again
673 */
674 if (pmap == kernel_pmap || pmap->pm_active == all_cpus) {
675 for (addr = sva; addr < eva; addr += PAGE_SIZE)
676 invlpg(addr);
677 smp_invlpg_range(sva, eva);
678 } else {
679 cpumask = PCPU_GET(cpumask);
680 other_cpus = PCPU_GET(other_cpus);
681 if (pmap->pm_active & cpumask)
682 for (addr = sva; addr < eva; addr += PAGE_SIZE)
683 invlpg(addr);
684 if (pmap->pm_active & other_cpus)
685 smp_masked_invlpg_range(pmap->pm_active & other_cpus,
686 sva, eva);
687 }
688 if (smp_started)
689 mtx_unlock_spin(&smp_ipi_mtx);
690 else
691 critical_exit();
692 }
693
694 void
695 pmap_invalidate_all(pmap_t pmap)
696 {
697 u_int cpumask;
698 u_int other_cpus;
699
700 if (smp_started) {
701 if (!(read_rflags() & PSL_I))
702 panic("%s: interrupts disabled", __func__);
703 mtx_lock_spin(&smp_ipi_mtx);
704 } else
705 critical_enter();
706 /*
707 * We need to disable interrupt preemption but MUST NOT have
708 * interrupts disabled here.
709 * XXX we may need to hold schedlock to get a coherent pm_active
710 * XXX critical sections disable interrupts again
711 */
712 if (pmap == kernel_pmap || pmap->pm_active == all_cpus) {
713 invltlb();
714 smp_invltlb();
715 } else {
716 cpumask = PCPU_GET(cpumask);
717 other_cpus = PCPU_GET(other_cpus);
718 if (pmap->pm_active & cpumask)
719 invltlb();
720 if (pmap->pm_active & other_cpus)
721 smp_masked_invltlb(pmap->pm_active & other_cpus);
722 }
723 if (smp_started)
724 mtx_unlock_spin(&smp_ipi_mtx);
725 else
726 critical_exit();
727 }
728 #else /* !SMP */
729 /*
730 * Normal, non-SMP, invalidation functions.
731 * We inline these within pmap.c for speed.
732 */
733 PMAP_INLINE void
734 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
735 {
736
737 if (pmap == kernel_pmap || pmap->pm_active)
738 invlpg(va);
739 }
740
741 PMAP_INLINE void
742 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
743 {
744 vm_offset_t addr;
745
746 if (pmap == kernel_pmap || pmap->pm_active)
747 for (addr = sva; addr < eva; addr += PAGE_SIZE)
748 invlpg(addr);
749 }
750
751 PMAP_INLINE void
752 pmap_invalidate_all(pmap_t pmap)
753 {
754
755 if (pmap == kernel_pmap || pmap->pm_active)
756 invltlb();
757 }
758 #endif /* !SMP */
759
760 /*
761 * Are we current address space or kernel?
762 */
763 static __inline int
764 pmap_is_current(pmap_t pmap)
765 {
766 return (pmap == kernel_pmap ||
767 (pmap->pm_pml4[PML4PML4I] & PG_FRAME) == (PML4pml4e[0] & PG_FRAME));
768 }
769
770 /*
771 * Routine: pmap_extract
772 * Function:
773 * Extract the physical page address associated
774 * with the given map/virtual_address pair.
775 */
776 vm_paddr_t
777 pmap_extract(pmap_t pmap, vm_offset_t va)
778 {
779 vm_paddr_t rtval;
780 pt_entry_t *pte;
781 pd_entry_t pde, *pdep;
782
783 rtval = 0;
784 PMAP_LOCK(pmap);
785 pdep = pmap_pde(pmap, va);
786 if (pdep != NULL) {
787 pde = *pdep;
788 if (pde) {
789 if ((pde & PG_PS) != 0) {
790 rtval = (pde & ~PDRMASK) | (va & PDRMASK);
791 PMAP_UNLOCK(pmap);
792 return rtval;
793 }
794 pte = pmap_pde_to_pte(pdep, va);
795 rtval = (*pte & PG_FRAME) | (va & PAGE_MASK);
796 }
797 }
798 PMAP_UNLOCK(pmap);
799 return (rtval);
800 }
801
802 /*
803 * Routine: pmap_extract_and_hold
804 * Function:
805 * Atomically extract and hold the physical page
806 * with the given pmap and virtual address pair
807 * if that mapping permits the given protection.
808 */
809 vm_page_t
810 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
811 {
812 pd_entry_t pde, *pdep;
813 pt_entry_t pte;
814 vm_page_t m;
815
816 m = NULL;
817 vm_page_lock_queues();
818 PMAP_LOCK(pmap);
819 pdep = pmap_pde(pmap, va);
820 if (pdep != NULL && (pde = *pdep)) {
821 if (pde & PG_PS) {
822 if ((pde & PG_RW) || (prot & VM_PROT_WRITE) == 0) {
823 m = PHYS_TO_VM_PAGE((pde & ~PDRMASK) |
824 (va & PDRMASK));
825 vm_page_hold(m);
826 }
827 } else {
828 pte = *pmap_pde_to_pte(pdep, va);
829 if ((pte & PG_V) &&
830 ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0)) {
831 m = PHYS_TO_VM_PAGE(pte & PG_FRAME);
832 vm_page_hold(m);
833 }
834 }
835 }
836 vm_page_unlock_queues();
837 PMAP_UNLOCK(pmap);
838 return (m);
839 }
840
841 vm_paddr_t
842 pmap_kextract(vm_offset_t va)
843 {
844 pd_entry_t *pde;
845 vm_paddr_t pa;
846
847 if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) {
848 pa = DMAP_TO_PHYS(va);
849 } else {
850 pde = vtopde(va);
851 if (*pde & PG_PS) {
852 pa = (*pde & ~(NBPDR - 1)) | (va & (NBPDR - 1));
853 } else {
854 pa = *vtopte(va);
855 pa = (pa & PG_FRAME) | (va & PAGE_MASK);
856 }
857 }
858 return pa;
859 }
860
861 /***************************************************
862 * Low level mapping routines.....
863 ***************************************************/
864
865 /*
866 * Add a wired page to the kva.
867 * Note: not SMP coherent.
868 */
869 PMAP_INLINE void
870 pmap_kenter(vm_offset_t va, vm_paddr_t pa)
871 {
872 pt_entry_t *pte;
873
874 pte = vtopte(va);
875 pte_store(pte, pa | PG_RW | PG_V | PG_G);
876 }
877
878 /*
879 * Remove a page from the kernel pagetables.
880 * Note: not SMP coherent.
881 */
882 PMAP_INLINE void
883 pmap_kremove(vm_offset_t va)
884 {
885 pt_entry_t *pte;
886
887 pte = vtopte(va);
888 pte_clear(pte);
889 }
890
891 /*
892 * Used to map a range of physical addresses into kernel
893 * virtual address space.
894 *
895 * The value passed in '*virt' is a suggested virtual address for
896 * the mapping. Architectures which can support a direct-mapped
897 * physical to virtual region can return the appropriate address
898 * within that region, leaving '*virt' unchanged. Other
899 * architectures should map the pages starting at '*virt' and
900 * update '*virt' with the first usable address after the mapped
901 * region.
902 */
903 vm_offset_t
904 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
905 {
906 return PHYS_TO_DMAP(start);
907 }
908
909
910 /*
911 * Add a list of wired pages to the kva
912 * this routine is only used for temporary
913 * kernel mappings that do not need to have
914 * page modification or references recorded.
915 * Note that old mappings are simply written
916 * over. The page *must* be wired.
917 * Note: SMP coherent. Uses a ranged shootdown IPI.
918 */
919 void
920 pmap_qenter(vm_offset_t sva, vm_page_t *m, int count)
921 {
922 vm_offset_t va;
923
924 va = sva;
925 while (count-- > 0) {
926 pmap_kenter(va, VM_PAGE_TO_PHYS(*m));
927 va += PAGE_SIZE;
928 m++;
929 }
930 pmap_invalidate_range(kernel_pmap, sva, va);
931 }
932
933 /*
934 * This routine tears out page mappings from the
935 * kernel -- it is meant only for temporary mappings.
936 * Note: SMP coherent. Uses a ranged shootdown IPI.
937 */
938 void
939 pmap_qremove(vm_offset_t sva, int count)
940 {
941 vm_offset_t va;
942
943 va = sva;
944 while (count-- > 0) {
945 pmap_kremove(va);
946 va += PAGE_SIZE;
947 }
948 pmap_invalidate_range(kernel_pmap, sva, va);
949 }
950
951 /***************************************************
952 * Page table page management routines.....
953 ***************************************************/
954
955 /*
956 * This routine unholds page table pages, and if the hold count
957 * drops to zero, then it decrements the wire count.
958 */
959 static PMAP_INLINE int
960 pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m)
961 {
962
963 --m->wire_count;
964 if (m->wire_count == 0)
965 return _pmap_unwire_pte_hold(pmap, va, m);
966 else
967 return 0;
968 }
969
970 static int
971 _pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m)
972 {
973 vm_offset_t pteva;
974
975 /*
976 * unmap the page table page
977 */
978 if (m->pindex >= (NUPDE + NUPDPE)) {
979 /* PDP page */
980 pml4_entry_t *pml4;
981 pml4 = pmap_pml4e(pmap, va);
982 pteva = (vm_offset_t) PDPmap + amd64_ptob(m->pindex - (NUPDE + NUPDPE));
983 *pml4 = 0;
984 } else if (m->pindex >= NUPDE) {
985 /* PD page */
986 pdp_entry_t *pdp;
987 pdp = pmap_pdpe(pmap, va);
988 pteva = (vm_offset_t) PDmap + amd64_ptob(m->pindex - NUPDE);
989 *pdp = 0;
990 } else {
991 /* PTE page */
992 pd_entry_t *pd;
993 pd = pmap_pde(pmap, va);
994 pteva = (vm_offset_t) PTmap + amd64_ptob(m->pindex);
995 *pd = 0;
996 }
997 --pmap->pm_stats.resident_count;
998 if (m->pindex < NUPDE) {
999 /* We just released a PT, unhold the matching PD */
1000 vm_page_t pdpg;
1001
1002 pdpg = PHYS_TO_VM_PAGE(*pmap_pdpe(pmap, va) & PG_FRAME);
1003 pmap_unwire_pte_hold(pmap, va, pdpg);
1004 }
1005 if (m->pindex >= NUPDE && m->pindex < (NUPDE + NUPDPE)) {
1006 /* We just released a PD, unhold the matching PDP */
1007 vm_page_t pdppg;
1008
1009 pdppg = PHYS_TO_VM_PAGE(*pmap_pml4e(pmap, va) & PG_FRAME);
1010 pmap_unwire_pte_hold(pmap, va, pdppg);
1011 }
1012
1013 /*
1014 * Do an invltlb to make the invalidated mapping
1015 * take effect immediately.
1016 */
1017 pmap_invalidate_page(pmap, pteva);
1018
1019 vm_page_free_zero(m);
1020 atomic_subtract_int(&cnt.v_wire_count, 1);
1021 return 1;
1022 }
1023
1024 /*
1025 * After removing a page table entry, this routine is used to
1026 * conditionally free the page, and manage the hold/wire counts.
1027 */
1028 static int
1029 pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t ptepde)
1030 {
1031 vm_page_t mpte;
1032
1033 if (va >= VM_MAXUSER_ADDRESS)
1034 return 0;
1035 KASSERT(ptepde != 0, ("pmap_unuse_pt: ptepde != 0"));
1036 mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME);
1037 return pmap_unwire_pte_hold(pmap, va, mpte);
1038 }
1039
1040 void
1041 pmap_pinit0(pmap)
1042 struct pmap *pmap;
1043 {
1044
1045 PMAP_LOCK_INIT(pmap);
1046 pmap->pm_pml4 = (pml4_entry_t *)(KERNBASE + KPML4phys);
1047 pmap->pm_active = 0;
1048 TAILQ_INIT(&pmap->pm_pvlist);
1049 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1050 }
1051
1052 /*
1053 * Initialize a preallocated and zeroed pmap structure,
1054 * such as one in a vmspace structure.
1055 */
1056 void
1057 pmap_pinit(pmap)
1058 register struct pmap *pmap;
1059 {
1060 vm_page_t pml4pg;
1061 static vm_pindex_t color;
1062
1063 PMAP_LOCK_INIT(pmap);
1064
1065 /*
1066 * allocate the page directory page
1067 */
1068 while ((pml4pg = vm_page_alloc(NULL, color++, VM_ALLOC_NOOBJ |
1069 VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL)
1070 VM_WAIT;
1071
1072 pmap->pm_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pml4pg));
1073
1074 if ((pml4pg->flags & PG_ZERO) == 0)
1075 pagezero(pmap->pm_pml4);
1076
1077 /* Wire in kernel global address entries. */
1078 pmap->pm_pml4[KPML4I] = KPDPphys | PG_RW | PG_V | PG_U;
1079 pmap->pm_pml4[DMPML4I] = DMPDPphys | PG_RW | PG_V | PG_U;
1080
1081 /* install self-referential address mapping entry(s) */
1082 pmap->pm_pml4[PML4PML4I] = VM_PAGE_TO_PHYS(pml4pg) | PG_V | PG_RW | PG_A | PG_M;
1083
1084 pmap->pm_active = 0;
1085 TAILQ_INIT(&pmap->pm_pvlist);
1086 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1087 }
1088
1089 /*
1090 * this routine is called if the page table page is not
1091 * mapped correctly.
1092 *
1093 * Note: If a page allocation fails at page table level two or three,
1094 * one or two pages may be held during the wait, only to be released
1095 * afterwards. This conservative approach is easily argued to avoid
1096 * race conditions.
1097 */
1098 static vm_page_t
1099 _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, int flags)
1100 {
1101 vm_page_t m, pdppg, pdpg;
1102
1103 KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
1104 (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
1105 ("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
1106
1107 /*
1108 * Allocate a page table page.
1109 */
1110 if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ |
1111 VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
1112 if (flags & M_WAITOK) {
1113 PMAP_UNLOCK(pmap);
1114 vm_page_unlock_queues();
1115 VM_WAIT;
1116 vm_page_lock_queues();
1117 PMAP_LOCK(pmap);
1118 }
1119
1120 /*
1121 * Indicate the need to retry. While waiting, the page table
1122 * page may have been allocated.
1123 */
1124 return (NULL);
1125 }
1126 if ((m->flags & PG_ZERO) == 0)
1127 pmap_zero_page(m);
1128
1129 /*
1130 * Map the pagetable page into the process address space, if
1131 * it isn't already there.
1132 */
1133
1134 pmap->pm_stats.resident_count++;
1135
1136 if (ptepindex >= (NUPDE + NUPDPE)) {
1137 pml4_entry_t *pml4;
1138 vm_pindex_t pml4index;
1139
1140 /* Wire up a new PDPE page */
1141 pml4index = ptepindex - (NUPDE + NUPDPE);
1142 pml4 = &pmap->pm_pml4[pml4index];
1143 *pml4 = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M;
1144
1145 } else if (ptepindex >= NUPDE) {
1146 vm_pindex_t pml4index;
1147 vm_pindex_t pdpindex;
1148 pml4_entry_t *pml4;
1149 pdp_entry_t *pdp;
1150
1151 /* Wire up a new PDE page */
1152 pdpindex = ptepindex - NUPDE;
1153 pml4index = pdpindex >> NPML4EPGSHIFT;
1154
1155 pml4 = &pmap->pm_pml4[pml4index];
1156 if ((*pml4 & PG_V) == 0) {
1157 /* Have to allocate a new pdp, recurse */
1158 if (_pmap_allocpte(pmap, NUPDE + NUPDPE + pml4index,
1159 flags) == NULL) {
1160 --m->wire_count;
1161 vm_page_free(m);
1162 return (NULL);
1163 }
1164 } else {
1165 /* Add reference to pdp page */
1166 pdppg = PHYS_TO_VM_PAGE(*pml4 & PG_FRAME);
1167 pdppg->wire_count++;
1168 }
1169 pdp = (pdp_entry_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME);
1170
1171 /* Now find the pdp page */
1172 pdp = &pdp[pdpindex & ((1ul << NPDPEPGSHIFT) - 1)];
1173 *pdp = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M;
1174
1175 } else {
1176 vm_pindex_t pml4index;
1177 vm_pindex_t pdpindex;
1178 pml4_entry_t *pml4;
1179 pdp_entry_t *pdp;
1180 pd_entry_t *pd;
1181
1182 /* Wire up a new PTE page */
1183 pdpindex = ptepindex >> NPDPEPGSHIFT;
1184 pml4index = pdpindex >> NPML4EPGSHIFT;
1185
1186 /* First, find the pdp and check that its valid. */
1187 pml4 = &pmap->pm_pml4[pml4index];
1188 if ((*pml4 & PG_V) == 0) {
1189 /* Have to allocate a new pd, recurse */
1190 if (_pmap_allocpte(pmap, NUPDE + pdpindex,
1191 flags) == NULL) {
1192 --m->wire_count;
1193 vm_page_free(m);
1194 return (NULL);
1195 }
1196 pdp = (pdp_entry_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME);
1197 pdp = &pdp[pdpindex & ((1ul << NPDPEPGSHIFT) - 1)];
1198 } else {
1199 pdp = (pdp_entry_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME);
1200 pdp = &pdp[pdpindex & ((1ul << NPDPEPGSHIFT) - 1)];
1201 if ((*pdp & PG_V) == 0) {
1202 /* Have to allocate a new pd, recurse */
1203 if (_pmap_allocpte(pmap, NUPDE + pdpindex,
1204 flags) == NULL) {
1205 --m->wire_count;
1206 vm_page_free(m);
1207 return (NULL);
1208 }
1209 } else {
1210 /* Add reference to the pd page */
1211 pdpg = PHYS_TO_VM_PAGE(*pdp & PG_FRAME);
1212 pdpg->wire_count++;
1213 }
1214 }
1215 pd = (pd_entry_t *)PHYS_TO_DMAP(*pdp & PG_FRAME);
1216
1217 /* Now we know where the page directory page is */
1218 pd = &pd[ptepindex & ((1ul << NPDEPGSHIFT) - 1)];
1219 *pd = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M;
1220 }
1221
1222 return m;
1223 }
1224
1225 static vm_page_t
1226 pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags)
1227 {
1228 vm_pindex_t ptepindex;
1229 pd_entry_t *pd;
1230 vm_page_t m;
1231
1232 KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
1233 (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
1234 ("pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
1235
1236 /*
1237 * Calculate pagetable page index
1238 */
1239 ptepindex = pmap_pde_pindex(va);
1240 retry:
1241 /*
1242 * Get the page directory entry
1243 */
1244 pd = pmap_pde(pmap, va);
1245
1246 /*
1247 * This supports switching from a 2MB page to a
1248 * normal 4K page.
1249 */
1250 if (pd != 0 && (*pd & (PG_PS | PG_V)) == (PG_PS | PG_V)) {
1251 *pd = 0;
1252 pd = 0;
1253 pmap_invalidate_all(kernel_pmap);
1254 }
1255
1256 /*
1257 * If the page table page is mapped, we just increment the
1258 * hold count, and activate it.
1259 */
1260 if (pd != 0 && (*pd & PG_V) != 0) {
1261 m = PHYS_TO_VM_PAGE(*pd & PG_FRAME);
1262 m->wire_count++;
1263 } else {
1264 /*
1265 * Here if the pte page isn't mapped, or if it has been
1266 * deallocated.
1267 */
1268 m = _pmap_allocpte(pmap, ptepindex, flags);
1269 if (m == NULL && (flags & M_WAITOK))
1270 goto retry;
1271 }
1272 return (m);
1273 }
1274
1275
1276 /***************************************************
1277 * Pmap allocation/deallocation routines.
1278 ***************************************************/
1279
1280 /*
1281 * Release any resources held by the given physical map.
1282 * Called when a pmap initialized by pmap_pinit is being released.
1283 * Should only be called if the map contains no valid mappings.
1284 */
1285 void
1286 pmap_release(pmap_t pmap)
1287 {
1288 vm_page_t m;
1289
1290 KASSERT(pmap->pm_stats.resident_count == 0,
1291 ("pmap_release: pmap resident count %ld != 0",
1292 pmap->pm_stats.resident_count));
1293
1294 m = PHYS_TO_VM_PAGE(pmap->pm_pml4[PML4PML4I] & PG_FRAME);
1295
1296 pmap->pm_pml4[KPML4I] = 0; /* KVA */
1297 pmap->pm_pml4[DMPML4I] = 0; /* Direct Map */
1298 pmap->pm_pml4[PML4PML4I] = 0; /* Recursive Mapping */
1299
1300 vm_page_lock_queues();
1301 m->wire_count--;
1302 atomic_subtract_int(&cnt.v_wire_count, 1);
1303 vm_page_free_zero(m);
1304 vm_page_unlock_queues();
1305 PMAP_LOCK_DESTROY(pmap);
1306 }
1307
1308 static int
1309 kvm_size(SYSCTL_HANDLER_ARGS)
1310 {
1311 unsigned long ksize = VM_MAX_KERNEL_ADDRESS - KERNBASE;
1312
1313 return sysctl_handle_long(oidp, &ksize, 0, req);
1314 }
1315 SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG|CTLFLAG_RD,
1316 0, 0, kvm_size, "IU", "Size of KVM");
1317
1318 static int
1319 kvm_free(SYSCTL_HANDLER_ARGS)
1320 {
1321 unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end;
1322
1323 return sysctl_handle_long(oidp, &kfree, 0, req);
1324 }
1325 SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD,
1326 0, 0, kvm_free, "IU", "Amount of KVM free");
1327
1328 /*
1329 * grow the number of kernel page table entries, if needed
1330 */
1331 void
1332 pmap_growkernel(vm_offset_t addr)
1333 {
1334 vm_paddr_t paddr;
1335 vm_page_t nkpg;
1336 pd_entry_t *pde, newpdir;
1337 pdp_entry_t newpdp;
1338
1339 mtx_assert(&kernel_map->system_mtx, MA_OWNED);
1340 if (kernel_vm_end == 0) {
1341 kernel_vm_end = KERNBASE;
1342 nkpt = 0;
1343 while ((*pmap_pde(kernel_pmap, kernel_vm_end) & PG_V) != 0) {
1344 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
1345 nkpt++;
1346 }
1347 }
1348 addr = roundup2(addr, PAGE_SIZE * NPTEPG);
1349 while (kernel_vm_end < addr) {
1350 pde = pmap_pde(kernel_pmap, kernel_vm_end);
1351 if (pde == NULL) {
1352 /* We need a new PDP entry */
1353 nkpg = vm_page_alloc(NULL, nkpt,
1354 VM_ALLOC_NOOBJ | VM_ALLOC_SYSTEM | VM_ALLOC_WIRED);
1355 if (!nkpg)
1356 panic("pmap_growkernel: no memory to grow kernel");
1357 pmap_zero_page(nkpg);
1358 paddr = VM_PAGE_TO_PHYS(nkpg);
1359 newpdp = (pdp_entry_t)
1360 (paddr | PG_V | PG_RW | PG_A | PG_M);
1361 *pmap_pdpe(kernel_pmap, kernel_vm_end) = newpdp;
1362 continue; /* try again */
1363 }
1364 if ((*pde & PG_V) != 0) {
1365 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
1366 continue;
1367 }
1368
1369 /*
1370 * This index is bogus, but out of the way
1371 */
1372 nkpg = vm_page_alloc(NULL, nkpt,
1373 VM_ALLOC_NOOBJ | VM_ALLOC_SYSTEM | VM_ALLOC_WIRED);
1374 if (!nkpg)
1375 panic("pmap_growkernel: no memory to grow kernel");
1376
1377 nkpt++;
1378
1379 pmap_zero_page(nkpg);
1380 paddr = VM_PAGE_TO_PHYS(nkpg);
1381 newpdir = (pd_entry_t) (paddr | PG_V | PG_RW | PG_A | PG_M);
1382 *pmap_pde(kernel_pmap, kernel_vm_end) = newpdir;
1383
1384 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
1385 }
1386 }
1387
1388
1389 /***************************************************
1390 * page management routines.
1391 ***************************************************/
1392
1393 /*
1394 * free the pv_entry back to the free list
1395 */
1396 static PMAP_INLINE void
1397 free_pv_entry(pv_entry_t pv)
1398 {
1399 pv_entry_count--;
1400 uma_zfree(pvzone, pv);
1401 }
1402
1403 /*
1404 * get a new pv_entry, allocating a block from the system
1405 * when needed.
1406 * the memory allocation is performed bypassing the malloc code
1407 * because of the possibility of allocations at interrupt time.
1408 */
1409 static pv_entry_t
1410 get_pv_entry(void)
1411 {
1412 pv_entry_count++;
1413 if (pv_entry_high_water &&
1414 (pv_entry_count > pv_entry_high_water) &&
1415 (pmap_pagedaemon_waken == 0)) {
1416 pmap_pagedaemon_waken = 1;
1417 wakeup (&vm_pages_needed);
1418 }
1419 return uma_zalloc(pvzone, M_NOWAIT);
1420 }
1421
1422
1423 static int
1424 pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va, pd_entry_t ptepde)
1425 {
1426 pv_entry_t pv;
1427 int rtval;
1428
1429 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1430 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1431 if (m->md.pv_list_count < pmap->pm_stats.resident_count) {
1432 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
1433 if (pmap == pv->pv_pmap && va == pv->pv_va)
1434 break;
1435 }
1436 } else {
1437 TAILQ_FOREACH(pv, &pmap->pm_pvlist, pv_plist) {
1438 if (va == pv->pv_va)
1439 break;
1440 }
1441 }
1442
1443 rtval = 0;
1444 if (pv) {
1445 rtval = pmap_unuse_pt(pmap, va, ptepde);
1446 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
1447 m->md.pv_list_count--;
1448 if (TAILQ_FIRST(&m->md.pv_list) == NULL)
1449 vm_page_flag_clear(m, PG_WRITEABLE);
1450
1451 TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
1452 free_pv_entry(pv);
1453 }
1454
1455 return rtval;
1456 }
1457
1458 /*
1459 * Create a pv entry for page at pa for
1460 * (pmap, va).
1461 */
1462 static void
1463 pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
1464 {
1465 pv_entry_t pv;
1466
1467 pv = get_pv_entry();
1468 if (pv == NULL)
1469 panic("no pv entries: increase vm.pmap.shpgperproc");
1470 pv->pv_va = va;
1471 pv->pv_pmap = pmap;
1472
1473 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1474 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1475 TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist);
1476 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
1477 m->md.pv_list_count++;
1478 }
1479
1480 /*
1481 * pmap_remove_pte: do the things to unmap a page in a process
1482 */
1483 static int
1484 pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va, pd_entry_t ptepde)
1485 {
1486 pt_entry_t oldpte;
1487 vm_page_t m;
1488
1489 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1490 oldpte = pte_load_clear(ptq);
1491 if (oldpte & PG_W)
1492 pmap->pm_stats.wired_count -= 1;
1493 /*
1494 * Machines that don't support invlpg, also don't support
1495 * PG_G.
1496 */
1497 if (oldpte & PG_G)
1498 pmap_invalidate_page(kernel_pmap, va);
1499 pmap->pm_stats.resident_count -= 1;
1500 if (oldpte & PG_MANAGED) {
1501 m = PHYS_TO_VM_PAGE(oldpte & PG_FRAME);
1502 if (oldpte & PG_M) {
1503 #if defined(PMAP_DIAGNOSTIC)
1504 if (pmap_nw_modified((pt_entry_t) oldpte)) {
1505 printf(
1506 "pmap_remove: modified page not writable: va: 0x%lx, pte: 0x%lx\n",
1507 va, oldpte);
1508 }
1509 #endif
1510 if (pmap_track_modified(va))
1511 vm_page_dirty(m);
1512 }
1513 if (oldpte & PG_A)
1514 vm_page_flag_set(m, PG_REFERENCED);
1515 return pmap_remove_entry(pmap, m, va, ptepde);
1516 } else {
1517 return pmap_unuse_pt(pmap, va, ptepde);
1518 }
1519 }
1520
1521 /*
1522 * Remove a single page from a process address space
1523 */
1524 static void
1525 pmap_remove_page(pmap_t pmap, vm_offset_t va)
1526 {
1527 pd_entry_t ptepde;
1528 pt_entry_t *pte;
1529
1530 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1531 pte = pmap_pte_pde(pmap, va, &ptepde);
1532 if (pte == NULL || (*pte & PG_V) == 0)
1533 return;
1534 pmap_remove_pte(pmap, pte, va, ptepde);
1535 pmap_invalidate_page(pmap, va);
1536 }
1537
1538 /*
1539 * Remove the given range of addresses from the specified map.
1540 *
1541 * It is assumed that the start and end are properly
1542 * rounded to the page size.
1543 */
1544 void
1545 pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
1546 {
1547 vm_offset_t va_next;
1548 pml4_entry_t *pml4e;
1549 pdp_entry_t *pdpe;
1550 pd_entry_t ptpaddr, *pde;
1551 pt_entry_t *pte;
1552 int anyvalid;
1553
1554 /*
1555 * Perform an unsynchronized read. This is, however, safe.
1556 */
1557 if (pmap->pm_stats.resident_count == 0)
1558 return;
1559
1560 anyvalid = 0;
1561
1562 vm_page_lock_queues();
1563 PMAP_LOCK(pmap);
1564
1565 /*
1566 * special handling of removing one page. a very
1567 * common operation and easy to short circuit some
1568 * code.
1569 */
1570 if (sva + PAGE_SIZE == eva) {
1571 pde = pmap_pde(pmap, sva);
1572 if (pde && (*pde & PG_PS) == 0) {
1573 pmap_remove_page(pmap, sva);
1574 goto out;
1575 }
1576 }
1577
1578 for (; sva < eva; sva = va_next) {
1579
1580 if (pmap->pm_stats.resident_count == 0)
1581 break;
1582
1583 pml4e = pmap_pml4e(pmap, sva);
1584 if (pml4e == 0) {
1585 va_next = (sva + NBPML4) & ~PML4MASK;
1586 continue;
1587 }
1588
1589 pdpe = pmap_pdpe(pmap, sva);
1590 if (pdpe == 0) {
1591 va_next = (sva + NBPDP) & ~PDPMASK;
1592 continue;
1593 }
1594
1595 /*
1596 * Calculate index for next page table.
1597 */
1598 va_next = (sva + NBPDR) & ~PDRMASK;
1599
1600 pde = pmap_pde(pmap, sva);
1601 if (pde == 0)
1602 continue;
1603 ptpaddr = *pde;
1604
1605 /*
1606 * Weed out invalid mappings.
1607 */
1608 if (ptpaddr == 0)
1609 continue;
1610
1611 /*
1612 * Check for large page.
1613 */
1614 if ((ptpaddr & PG_PS) != 0) {
1615 *pde = 0;
1616 pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
1617 anyvalid = 1;
1618 continue;
1619 }
1620
1621 /*
1622 * Limit our scan to either the end of the va represented
1623 * by the current page table page, or to the end of the
1624 * range being removed.
1625 */
1626 if (va_next > eva)
1627 va_next = eva;
1628
1629 for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
1630 sva += PAGE_SIZE) {
1631 if (*pte == 0)
1632 continue;
1633 anyvalid = 1;
1634 if (pmap_remove_pte(pmap, pte, sva, ptpaddr))
1635 break;
1636 }
1637 }
1638 out:
1639 vm_page_unlock_queues();
1640 if (anyvalid)
1641 pmap_invalidate_all(pmap);
1642 PMAP_UNLOCK(pmap);
1643 }
1644
1645 /*
1646 * Routine: pmap_remove_all
1647 * Function:
1648 * Removes this physical page from
1649 * all physical maps in which it resides.
1650 * Reflects back modify bits to the pager.
1651 *
1652 * Notes:
1653 * Original versions of this routine were very
1654 * inefficient because they iteratively called
1655 * pmap_remove (slow...)
1656 */
1657
1658 void
1659 pmap_remove_all(vm_page_t m)
1660 {
1661 register pv_entry_t pv;
1662 pt_entry_t *pte, tpte;
1663 pd_entry_t ptepde;
1664
1665 #if defined(PMAP_DIAGNOSTIC)
1666 /*
1667 * XXX This makes pmap_remove_all() illegal for non-managed pages!
1668 */
1669 if (m->flags & PG_FICTITIOUS) {
1670 panic("pmap_remove_all: illegal for unmanaged page, va: 0x%lx",
1671 VM_PAGE_TO_PHYS(m));
1672 }
1673 #endif
1674 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1675 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
1676 PMAP_LOCK(pv->pv_pmap);
1677 pv->pv_pmap->pm_stats.resident_count--;
1678 pte = pmap_pte_pde(pv->pv_pmap, pv->pv_va, &ptepde);
1679 tpte = pte_load_clear(pte);
1680 if (tpte & PG_W)
1681 pv->pv_pmap->pm_stats.wired_count--;
1682 if (tpte & PG_A)
1683 vm_page_flag_set(m, PG_REFERENCED);
1684
1685 /*
1686 * Update the vm_page_t clean and reference bits.
1687 */
1688 if (tpte & PG_M) {
1689 #if defined(PMAP_DIAGNOSTIC)
1690 if (pmap_nw_modified((pt_entry_t) tpte)) {
1691 printf(
1692 "pmap_remove_all: modified page not writable: va: 0x%lx, pte: 0x%lx\n",
1693 pv->pv_va, tpte);
1694 }
1695 #endif
1696 if (pmap_track_modified(pv->pv_va))
1697 vm_page_dirty(m);
1698 }
1699 pmap_invalidate_page(pv->pv_pmap, pv->pv_va);
1700 TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist);
1701 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
1702 m->md.pv_list_count--;
1703 pmap_unuse_pt(pv->pv_pmap, pv->pv_va, ptepde);
1704 PMAP_UNLOCK(pv->pv_pmap);
1705 free_pv_entry(pv);
1706 }
1707 vm_page_flag_clear(m, PG_WRITEABLE);
1708 }
1709
1710 /*
1711 * Set the physical protection on the
1712 * specified range of this map as requested.
1713 */
1714 void
1715 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
1716 {
1717 vm_offset_t va_next;
1718 pml4_entry_t *pml4e;
1719 pdp_entry_t *pdpe;
1720 pd_entry_t ptpaddr, *pde;
1721 pt_entry_t *pte;
1722 int anychanged;
1723
1724 if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1725 pmap_remove(pmap, sva, eva);
1726 return;
1727 }
1728
1729 if (prot & VM_PROT_WRITE)
1730 return;
1731
1732 anychanged = 0;
1733
1734 vm_page_lock_queues();
1735 PMAP_LOCK(pmap);
1736 for (; sva < eva; sva = va_next) {
1737
1738 pml4e = pmap_pml4e(pmap, sva);
1739 if (pml4e == 0) {
1740 va_next = (sva + NBPML4) & ~PML4MASK;
1741 continue;
1742 }
1743
1744 pdpe = pmap_pdpe(pmap, sva);
1745 if (pdpe == 0) {
1746 va_next = (sva + NBPDP) & ~PDPMASK;
1747 continue;
1748 }
1749
1750 va_next = (sva + NBPDR) & ~PDRMASK;
1751
1752 pde = pmap_pde(pmap, sva);
1753 if (pde == NULL)
1754 continue;
1755 ptpaddr = *pde;
1756
1757 /*
1758 * Weed out invalid mappings.
1759 */
1760 if (ptpaddr == 0)
1761 continue;
1762
1763 /*
1764 * Check for large page.
1765 */
1766 if ((ptpaddr & PG_PS) != 0) {
1767 *pde &= ~(PG_M|PG_RW);
1768 pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
1769 anychanged = 1;
1770 continue;
1771 }
1772
1773 if (va_next > eva)
1774 va_next = eva;
1775
1776 for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
1777 sva += PAGE_SIZE) {
1778 pt_entry_t obits, pbits;
1779 vm_page_t m;
1780
1781 retry:
1782 obits = pbits = *pte;
1783 if (pbits & PG_MANAGED) {
1784 m = NULL;
1785 if (pbits & PG_A) {
1786 m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
1787 vm_page_flag_set(m, PG_REFERENCED);
1788 pbits &= ~PG_A;
1789 }
1790 if ((pbits & PG_M) != 0 &&
1791 pmap_track_modified(sva)) {
1792 if (m == NULL)
1793 m = PHYS_TO_VM_PAGE(pbits &
1794 PG_FRAME);
1795 vm_page_dirty(m);
1796 }
1797 }
1798
1799 pbits &= ~(PG_RW | PG_M);
1800
1801 if (pbits != obits) {
1802 if (!atomic_cmpset_long(pte, obits, pbits))
1803 goto retry;
1804 if (obits & PG_G)
1805 pmap_invalidate_page(pmap, sva);
1806 else
1807 anychanged = 1;
1808 }
1809 }
1810 }
1811 vm_page_unlock_queues();
1812 if (anychanged)
1813 pmap_invalidate_all(pmap);
1814 PMAP_UNLOCK(pmap);
1815 }
1816
1817 /*
1818 * Insert the given physical page (p) at
1819 * the specified virtual address (v) in the
1820 * target physical map with the protection requested.
1821 *
1822 * If specified, the page will be wired down, meaning
1823 * that the related pte can not be reclaimed.
1824 *
1825 * NB: This is the only routine which MAY NOT lazy-evaluate
1826 * or lose information. That is, this routine must actually
1827 * insert this page into the given map NOW.
1828 */
1829 void
1830 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
1831 boolean_t wired)
1832 {
1833 vm_paddr_t pa;
1834 register pt_entry_t *pte;
1835 vm_paddr_t opa;
1836 pd_entry_t ptepde;
1837 pt_entry_t origpte, newpte;
1838 vm_page_t mpte, om;
1839
1840 va = trunc_page(va);
1841 #ifdef PMAP_DIAGNOSTIC
1842 if (va > VM_MAX_KERNEL_ADDRESS)
1843 panic("pmap_enter: toobig");
1844 if ((va >= UPT_MIN_ADDRESS) && (va < UPT_MAX_ADDRESS))
1845 panic("pmap_enter: invalid to pmap_enter page table pages (va: 0x%lx)", va);
1846 #endif
1847
1848 mpte = NULL;
1849
1850 vm_page_lock_queues();
1851 PMAP_LOCK(pmap);
1852
1853 /*
1854 * In the case that a page table page is not
1855 * resident, we are creating it here.
1856 */
1857 if (va < VM_MAXUSER_ADDRESS) {
1858 mpte = pmap_allocpte(pmap, va, M_WAITOK);
1859 }
1860 #if 0 && defined(PMAP_DIAGNOSTIC)
1861 else {
1862 pd_entry_t *pdeaddr = pmap_pde(pmap, va);
1863 origpte = *pdeaddr;
1864 if ((origpte & PG_V) == 0) {
1865 panic("pmap_enter: invalid kernel page table page, pde=%p, va=%p\n",
1866 origpte, va);
1867 }
1868 }
1869 #endif
1870
1871 pte = pmap_pte_pde(pmap, va, &ptepde);
1872
1873 /*
1874 * Page Directory table entry not valid, we need a new PT page
1875 */
1876 if (pte == NULL)
1877 panic("pmap_enter: invalid page directory va=%#lx\n", va);
1878
1879 pa = VM_PAGE_TO_PHYS(m);
1880 om = NULL;
1881 origpte = *pte;
1882 opa = origpte & PG_FRAME;
1883
1884 if (origpte & PG_PS)
1885 panic("pmap_enter: attempted pmap_enter on 2MB page");
1886
1887 /*
1888 * Mapping has not changed, must be protection or wiring change.
1889 */
1890 if (origpte && (opa == pa)) {
1891 /*
1892 * Wiring change, just update stats. We don't worry about
1893 * wiring PT pages as they remain resident as long as there
1894 * are valid mappings in them. Hence, if a user page is wired,
1895 * the PT page will be also.
1896 */
1897 if (wired && ((origpte & PG_W) == 0))
1898 pmap->pm_stats.wired_count++;
1899 else if (!wired && (origpte & PG_W))
1900 pmap->pm_stats.wired_count--;
1901
1902 #if defined(PMAP_DIAGNOSTIC)
1903 if (pmap_nw_modified((pt_entry_t) origpte)) {
1904 printf(
1905 "pmap_enter: modified page not writable: va: 0x%lx, pte: 0x%lx\n",
1906 va, origpte);
1907 }
1908 #endif
1909
1910 /*
1911 * Remove extra pte reference
1912 */
1913 if (mpte)
1914 mpte->wire_count--;
1915
1916 /*
1917 * We might be turning off write access to the page,
1918 * so we go ahead and sense modify status.
1919 */
1920 if (origpte & PG_MANAGED) {
1921 om = m;
1922 pa |= PG_MANAGED;
1923 }
1924 goto validate;
1925 }
1926 /*
1927 * Mapping has changed, invalidate old range and fall through to
1928 * handle validating new mapping.
1929 */
1930 if (opa) {
1931 int err;
1932 if (origpte & PG_W)
1933 pmap->pm_stats.wired_count--;
1934 if (origpte & PG_MANAGED) {
1935 om = PHYS_TO_VM_PAGE(opa);
1936 err = pmap_remove_entry(pmap, om, va, ptepde);
1937 } else
1938 err = pmap_unuse_pt(pmap, va, ptepde);
1939 if (err)
1940 panic("pmap_enter: pte vanished, va: 0x%lx", va);
1941 } else
1942 pmap->pm_stats.resident_count++;
1943
1944 /*
1945 * Enter on the PV list if part of our managed memory. Note that we
1946 * raise IPL while manipulating pv_table since pmap_enter can be
1947 * called at interrupt time.
1948 */
1949 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
1950 pmap_insert_entry(pmap, va, m);
1951 pa |= PG_MANAGED;
1952 }
1953
1954 /*
1955 * Increment counters
1956 */
1957 if (wired)
1958 pmap->pm_stats.wired_count++;
1959
1960 validate:
1961 /*
1962 * Now validate mapping with desired protection/wiring.
1963 */
1964 newpte = (pt_entry_t)(pa | PG_V);
1965 if ((prot & VM_PROT_WRITE) != 0)
1966 newpte |= PG_RW;
1967 if ((prot & VM_PROT_EXECUTE) == 0)
1968 newpte |= pg_nx;
1969 if (wired)
1970 newpte |= PG_W;
1971 if (va < VM_MAXUSER_ADDRESS)
1972 newpte |= PG_U;
1973 if (pmap == kernel_pmap)
1974 newpte |= PG_G;
1975
1976 /*
1977 * if the mapping or permission bits are different, we need
1978 * to update the pte.
1979 */
1980 if ((origpte & ~(PG_M|PG_A)) != newpte) {
1981 if (origpte & PG_MANAGED) {
1982 origpte = pte_load_store(pte, newpte | PG_A);
1983 if ((origpte & PG_M) && pmap_track_modified(va))
1984 vm_page_dirty(om);
1985 if (origpte & PG_A)
1986 vm_page_flag_set(om, PG_REFERENCED);
1987 } else
1988 pte_store(pte, newpte | PG_A);
1989 if (origpte) {
1990 pmap_invalidate_page(pmap, va);
1991 }
1992 }
1993 vm_page_unlock_queues();
1994 PMAP_UNLOCK(pmap);
1995 }
1996
1997 /*
1998 * this code makes some *MAJOR* assumptions:
1999 * 1. Current pmap & pmap exists.
2000 * 2. Not wired.
2001 * 3. Read access.
2002 * 4. No page table pages.
2003 * 6. Page IS managed.
2004 * but is *MUCH* faster than pmap_enter...
2005 */
2006
2007 vm_page_t
2008 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
2009 {
2010 pt_entry_t *pte;
2011 vm_paddr_t pa;
2012
2013 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2014 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
2015 PMAP_LOCK(pmap);
2016
2017 /*
2018 * In the case that a page table page is not
2019 * resident, we are creating it here.
2020 */
2021 if (va < VM_MAXUSER_ADDRESS) {
2022 vm_pindex_t ptepindex;
2023 pd_entry_t *ptepa;
2024
2025 /*
2026 * Calculate pagetable page index
2027 */
2028 ptepindex = pmap_pde_pindex(va);
2029 if (mpte && (mpte->pindex == ptepindex)) {
2030 mpte->wire_count++;
2031 } else {
2032 retry:
2033 /*
2034 * Get the page directory entry
2035 */
2036 ptepa = pmap_pde(pmap, va);
2037
2038 /*
2039 * If the page table page is mapped, we just increment
2040 * the hold count, and activate it.
2041 */
2042 if (ptepa && (*ptepa & PG_V) != 0) {
2043 if (*ptepa & PG_PS)
2044 panic("pmap_enter_quick: unexpected mapping into 2MB page");
2045 mpte = PHYS_TO_VM_PAGE(*ptepa & PG_FRAME);
2046 mpte->wire_count++;
2047 } else {
2048 mpte = _pmap_allocpte(pmap, ptepindex,
2049 M_NOWAIT);
2050 if (mpte == NULL) {
2051 PMAP_UNLOCK(pmap);
2052 vm_page_busy(m);
2053 vm_page_unlock_queues();
2054 VM_OBJECT_UNLOCK(m->object);
2055 VM_WAIT;
2056 VM_OBJECT_LOCK(m->object);
2057 vm_page_lock_queues();
2058 vm_page_wakeup(m);
2059 PMAP_LOCK(pmap);
2060 goto retry;
2061 }
2062 }
2063 }
2064 } else {
2065 mpte = NULL;
2066 }
2067
2068 /*
2069 * This call to vtopte makes the assumption that we are
2070 * entering the page into the current pmap. In order to support
2071 * quick entry into any pmap, one would likely use pmap_pte.
2072 * But that isn't as quick as vtopte.
2073 */
2074 pte = vtopte(va);
2075 if (*pte) {
2076 if (mpte != NULL) {
2077 pmap_unwire_pte_hold(pmap, va, mpte);
2078 mpte = NULL;
2079 }
2080 goto out;
2081 }
2082
2083 /*
2084 * Enter on the PV list if part of our managed memory. Note that we
2085 * raise IPL while manipulating pv_table since pmap_enter can be
2086 * called at interrupt time.
2087 */
2088 if ((m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0)
2089 pmap_insert_entry(pmap, va, m);
2090
2091 /*
2092 * Increment counters
2093 */
2094 pmap->pm_stats.resident_count++;
2095
2096 pa = VM_PAGE_TO_PHYS(m);
2097
2098 /*
2099 * Now validate mapping with RO protection
2100 */
2101 if (m->flags & (PG_FICTITIOUS|PG_UNMANAGED))
2102 pte_store(pte, pa | PG_V | PG_U);
2103 else
2104 pte_store(pte, pa | PG_V | PG_U | PG_MANAGED);
2105 out:
2106 PMAP_UNLOCK(pmap);
2107 return mpte;
2108 }
2109
2110 /*
2111 * Make a temporary mapping for a physical address. This is only intended
2112 * to be used for panic dumps.
2113 */
2114 void *
2115 pmap_kenter_temporary(vm_paddr_t pa, int i)
2116 {
2117 vm_offset_t va;
2118
2119 va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE);
2120 pmap_kenter(va, pa);
2121 invlpg(va);
2122 return ((void *)crashdumpmap);
2123 }
2124
2125 /*
2126 * This code maps large physical mmap regions into the
2127 * processor address space. Note that some shortcuts
2128 * are taken, but the code works.
2129 */
2130 void
2131 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
2132 vm_object_t object, vm_pindex_t pindex,
2133 vm_size_t size)
2134 {
2135 vm_page_t p;
2136
2137 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
2138 KASSERT(object->type == OBJT_DEVICE,
2139 ("pmap_object_init_pt: non-device object"));
2140 if (((addr & (NBPDR - 1)) == 0) && ((size & (NBPDR - 1)) == 0)) {
2141 int i;
2142 vm_page_t m[1];
2143 int npdes;
2144 pd_entry_t ptepa, *pde;
2145
2146 PMAP_LOCK(pmap);
2147 pde = pmap_pde(pmap, addr);
2148 if (pde != 0 && (*pde & PG_V) != 0)
2149 goto out;
2150 PMAP_UNLOCK(pmap);
2151 retry:
2152 p = vm_page_lookup(object, pindex);
2153 if (p != NULL) {
2154 vm_page_lock_queues();
2155 if (vm_page_sleep_if_busy(p, FALSE, "init4p"))
2156 goto retry;
2157 } else {
2158 p = vm_page_alloc(object, pindex, VM_ALLOC_NORMAL);
2159 if (p == NULL)
2160 return;
2161 m[0] = p;
2162
2163 if (vm_pager_get_pages(object, m, 1, 0) != VM_PAGER_OK) {
2164 vm_page_lock_queues();
2165 vm_page_free(p);
2166 vm_page_unlock_queues();
2167 return;
2168 }
2169
2170 p = vm_page_lookup(object, pindex);
2171 vm_page_lock_queues();
2172 vm_page_wakeup(p);
2173 }
2174 vm_page_unlock_queues();
2175
2176 ptepa = VM_PAGE_TO_PHYS(p);
2177 if (ptepa & (NBPDR - 1))
2178 return;
2179
2180 p->valid = VM_PAGE_BITS_ALL;
2181
2182 PMAP_LOCK(pmap);
2183 pmap->pm_stats.resident_count += size >> PAGE_SHIFT;
2184 npdes = size >> PDRSHIFT;
2185 for(i = 0; i < npdes; i++) {
2186 pde_store(pde, ptepa | PG_U | PG_RW | PG_V | PG_PS);
2187 ptepa += NBPDR;
2188 pde++;
2189 }
2190 pmap_invalidate_all(pmap);
2191 out:
2192 PMAP_UNLOCK(pmap);
2193 }
2194 }
2195
2196 /*
2197 * Routine: pmap_change_wiring
2198 * Function: Change the wiring attribute for a map/virtual-address
2199 * pair.
2200 * In/out conditions:
2201 * The mapping must already exist in the pmap.
2202 */
2203 void
2204 pmap_change_wiring(pmap, va, wired)
2205 register pmap_t pmap;
2206 vm_offset_t va;
2207 boolean_t wired;
2208 {
2209 register pt_entry_t *pte;
2210
2211 /*
2212 * Wiring is not a hardware characteristic so there is no need to
2213 * invalidate TLB.
2214 */
2215 PMAP_LOCK(pmap);
2216 pte = pmap_pte(pmap, va);
2217 if (wired && (*pte & PG_W) == 0) {
2218 pmap->pm_stats.wired_count++;
2219 atomic_set_long(pte, PG_W);
2220 } else if (!wired && (*pte & PG_W) != 0) {
2221 pmap->pm_stats.wired_count--;
2222 atomic_clear_long(pte, PG_W);
2223 }
2224 PMAP_UNLOCK(pmap);
2225 }
2226
2227
2228
2229 /*
2230 * Copy the range specified by src_addr/len
2231 * from the source map to the range dst_addr/len
2232 * in the destination map.
2233 *
2234 * This routine is only advisory and need not do anything.
2235 */
2236
2237 void
2238 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
2239 vm_offset_t src_addr)
2240 {
2241 vm_offset_t addr;
2242 vm_offset_t end_addr = src_addr + len;
2243 vm_offset_t va_next;
2244 vm_page_t m;
2245
2246 if (dst_addr != src_addr)
2247 return;
2248
2249 if (!pmap_is_current(src_pmap))
2250 return;
2251
2252 vm_page_lock_queues();
2253 if (dst_pmap < src_pmap) {
2254 PMAP_LOCK(dst_pmap);
2255 PMAP_LOCK(src_pmap);
2256 } else {
2257 PMAP_LOCK(src_pmap);
2258 PMAP_LOCK(dst_pmap);
2259 }
2260 for (addr = src_addr; addr < end_addr; addr = va_next) {
2261 pt_entry_t *src_pte, *dst_pte;
2262 vm_page_t dstmpte, srcmpte;
2263 pml4_entry_t *pml4e;
2264 pdp_entry_t *pdpe;
2265 pd_entry_t srcptepaddr, *pde;
2266
2267 if (addr >= UPT_MIN_ADDRESS)
2268 panic("pmap_copy: invalid to pmap_copy page tables");
2269
2270 /*
2271 * Don't let optional prefaulting of pages make us go
2272 * way below the low water mark of free pages or way
2273 * above high water mark of used pv entries.
2274 */
2275 if (cnt.v_free_count < cnt.v_free_reserved ||
2276 pv_entry_count > pv_entry_high_water)
2277 break;
2278
2279 pml4e = pmap_pml4e(src_pmap, addr);
2280 if (pml4e == 0) {
2281 va_next = (addr + NBPML4) & ~PML4MASK;
2282 continue;
2283 }
2284
2285 pdpe = pmap_pdpe(src_pmap, addr);
2286 if (pdpe == 0) {
2287 va_next = (addr + NBPDP) & ~PDPMASK;
2288 continue;
2289 }
2290
2291 va_next = (addr + NBPDR) & ~PDRMASK;
2292
2293 pde = pmap_pde(src_pmap, addr);
2294 if (pde)
2295 srcptepaddr = *pde;
2296 else
2297 continue;
2298 if (srcptepaddr == 0)
2299 continue;
2300
2301 if (srcptepaddr & PG_PS) {
2302 pde = pmap_pde(dst_pmap, addr);
2303 if (pde == 0) {
2304 /*
2305 * XXX should do an allocpte here to
2306 * instantiate the pde
2307 */
2308 continue;
2309 }
2310 if (*pde == 0) {
2311 *pde = srcptepaddr;
2312 dst_pmap->pm_stats.resident_count +=
2313 NBPDR / PAGE_SIZE;
2314 }
2315 continue;
2316 }
2317
2318 srcmpte = PHYS_TO_VM_PAGE(srcptepaddr & PG_FRAME);
2319 if (srcmpte->wire_count == 0)
2320 panic("pmap_copy: source page table page is unused");
2321
2322 if (va_next > end_addr)
2323 va_next = end_addr;
2324
2325 src_pte = vtopte(addr);
2326 while (addr < va_next) {
2327 pt_entry_t ptetemp;
2328 ptetemp = *src_pte;
2329 /*
2330 * we only virtual copy managed pages
2331 */
2332 if ((ptetemp & PG_MANAGED) != 0) {
2333 /*
2334 * We have to check after allocpte for the
2335 * pte still being around... allocpte can
2336 * block.
2337 */
2338 dstmpte = pmap_allocpte(dst_pmap, addr,
2339 M_NOWAIT);
2340 if (dstmpte == NULL)
2341 break;
2342 dst_pte = (pt_entry_t *)
2343 PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dstmpte));
2344 dst_pte = &dst_pte[pmap_pte_index(addr)];
2345 if (*dst_pte == 0) {
2346 /*
2347 * Clear the modified and
2348 * accessed (referenced) bits
2349 * during the copy.
2350 */
2351 m = PHYS_TO_VM_PAGE(ptetemp & PG_FRAME);
2352 *dst_pte = ptetemp & ~(PG_M | PG_A);
2353 dst_pmap->pm_stats.resident_count++;
2354 pmap_insert_entry(dst_pmap, addr, m);
2355 } else
2356 pmap_unwire_pte_hold(dst_pmap, addr, dstmpte);
2357 if (dstmpte->wire_count >= srcmpte->wire_count)
2358 break;
2359 }
2360 addr += PAGE_SIZE;
2361 src_pte++;
2362 }
2363 }
2364 vm_page_unlock_queues();
2365 PMAP_UNLOCK(src_pmap);
2366 PMAP_UNLOCK(dst_pmap);
2367 }
2368
2369 /*
2370 * pmap_zero_page zeros the specified hardware page by mapping
2371 * the page into KVM and using bzero to clear its contents.
2372 */
2373 void
2374 pmap_zero_page(vm_page_t m)
2375 {
2376 vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
2377
2378 pagezero((void *)va);
2379 }
2380
2381 /*
2382 * pmap_zero_page_area zeros the specified hardware page by mapping
2383 * the page into KVM and using bzero to clear its contents.
2384 *
2385 * off and size may not cover an area beyond a single hardware page.
2386 */
2387 void
2388 pmap_zero_page_area(vm_page_t m, int off, int size)
2389 {
2390 vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
2391
2392 if (off == 0 && size == PAGE_SIZE)
2393 pagezero((void *)va);
2394 else
2395 bzero((char *)va + off, size);
2396 }
2397
2398 /*
2399 * pmap_zero_page_idle zeros the specified hardware page by mapping
2400 * the page into KVM and using bzero to clear its contents. This
2401 * is intended to be called from the vm_pagezero process only and
2402 * outside of Giant.
2403 */
2404 void
2405 pmap_zero_page_idle(vm_page_t m)
2406 {
2407 vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
2408
2409 pagezero((void *)va);
2410 }
2411
2412 /*
2413 * pmap_copy_page copies the specified (machine independent)
2414 * page by mapping the page into virtual memory and using
2415 * bcopy to copy the page, one machine dependent page at a
2416 * time.
2417 */
2418 void
2419 pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
2420 {
2421 vm_offset_t src = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(msrc));
2422 vm_offset_t dst = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mdst));
2423
2424 pagecopy((void *)src, (void *)dst);
2425 }
2426
2427 /*
2428 * Returns true if the pmap's pv is one of the first
2429 * 16 pvs linked to from this page. This count may
2430 * be changed upwards or downwards in the future; it
2431 * is only necessary that true be returned for a small
2432 * subset of pmaps for proper page aging.
2433 */
2434 boolean_t
2435 pmap_page_exists_quick(pmap, m)
2436 pmap_t pmap;
2437 vm_page_t m;
2438 {
2439 pv_entry_t pv;
2440 int loops = 0;
2441
2442 if (m->flags & PG_FICTITIOUS)
2443 return FALSE;
2444
2445 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2446 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2447 if (pv->pv_pmap == pmap) {
2448 return TRUE;
2449 }
2450 loops++;
2451 if (loops >= 16)
2452 break;
2453 }
2454 return (FALSE);
2455 }
2456
2457 #define PMAP_REMOVE_PAGES_CURPROC_ONLY
2458 /*
2459 * Remove all pages from specified address space
2460 * this aids process exit speeds. Also, this code
2461 * is special cased for current process only, but
2462 * can have the more generic (and slightly slower)
2463 * mode enabled. This is much faster than pmap_remove
2464 * in the case of running down an entire address space.
2465 */
2466 void
2467 pmap_remove_pages(pmap, sva, eva)
2468 pmap_t pmap;
2469 vm_offset_t sva, eva;
2470 {
2471 pt_entry_t *pte, tpte;
2472 vm_page_t m;
2473 pv_entry_t pv, npv;
2474
2475 #ifdef PMAP_REMOVE_PAGES_CURPROC_ONLY
2476 if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) {
2477 printf("warning: pmap_remove_pages called with non-current pmap\n");
2478 return;
2479 }
2480 #endif
2481 vm_page_lock_queues();
2482 PMAP_LOCK(pmap);
2483 for (pv = TAILQ_FIRST(&pmap->pm_pvlist); pv; pv = npv) {
2484
2485 if (pv->pv_va >= eva || pv->pv_va < sva) {
2486 npv = TAILQ_NEXT(pv, pv_plist);
2487 continue;
2488 }
2489
2490 #ifdef PMAP_REMOVE_PAGES_CURPROC_ONLY
2491 pte = vtopte(pv->pv_va);
2492 #else
2493 pte = pmap_pte(pmap, pv->pv_va);
2494 #endif
2495 tpte = *pte;
2496
2497 if (tpte == 0) {
2498 printf("TPTE at %p IS ZERO @ VA %08lx\n",
2499 pte, pv->pv_va);
2500 panic("bad pte");
2501 }
2502
2503 /*
2504 * We cannot remove wired pages from a process' mapping at this time
2505 */
2506 if (tpte & PG_W) {
2507 npv = TAILQ_NEXT(pv, pv_plist);
2508 continue;
2509 }
2510
2511 m = PHYS_TO_VM_PAGE(tpte & PG_FRAME);
2512 KASSERT(m->phys_addr == (tpte & PG_FRAME),
2513 ("vm_page_t %p phys_addr mismatch %016jx %016jx",
2514 m, (uintmax_t)m->phys_addr, (uintmax_t)tpte));
2515
2516 KASSERT(m < &vm_page_array[vm_page_array_size],
2517 ("pmap_remove_pages: bad tpte %#jx", (uintmax_t)tpte));
2518
2519 pmap->pm_stats.resident_count--;
2520
2521 pte_clear(pte);
2522
2523 /*
2524 * Update the vm_page_t clean and reference bits.
2525 */
2526 if (tpte & PG_M) {
2527 vm_page_dirty(m);
2528 }
2529
2530 npv = TAILQ_NEXT(pv, pv_plist);
2531 TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
2532
2533 m->md.pv_list_count--;
2534 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
2535 if (TAILQ_EMPTY(&m->md.pv_list))
2536 vm_page_flag_clear(m, PG_WRITEABLE);
2537
2538 pmap_unuse_pt(pmap, pv->pv_va, *vtopde(pv->pv_va));
2539 free_pv_entry(pv);
2540 }
2541 pmap_invalidate_all(pmap);
2542 PMAP_UNLOCK(pmap);
2543 vm_page_unlock_queues();
2544 }
2545
2546 /*
2547 * pmap_is_modified:
2548 *
2549 * Return whether or not the specified physical page was modified
2550 * in any physical maps.
2551 */
2552 boolean_t
2553 pmap_is_modified(vm_page_t m)
2554 {
2555 pv_entry_t pv;
2556 pt_entry_t *pte;
2557 boolean_t rv;
2558
2559 rv = FALSE;
2560 if (m->flags & PG_FICTITIOUS)
2561 return (rv);
2562
2563 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2564 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2565 /*
2566 * if the bit being tested is the modified bit, then
2567 * mark clean_map and ptes as never
2568 * modified.
2569 */
2570 if (!pmap_track_modified(pv->pv_va))
2571 continue;
2572 #if defined(PMAP_DIAGNOSTIC)
2573 if (!pv->pv_pmap) {
2574 printf("Null pmap (tb) at va: 0x%lx\n", pv->pv_va);
2575 continue;
2576 }
2577 #endif
2578 PMAP_LOCK(pv->pv_pmap);
2579 pte = pmap_pte(pv->pv_pmap, pv->pv_va);
2580 rv = (*pte & PG_M) != 0;
2581 PMAP_UNLOCK(pv->pv_pmap);
2582 if (rv)
2583 break;
2584 }
2585 return (rv);
2586 }
2587
2588 /*
2589 * pmap_is_prefaultable:
2590 *
2591 * Return whether or not the specified virtual address is elgible
2592 * for prefault.
2593 */
2594 boolean_t
2595 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
2596 {
2597 pd_entry_t *pde;
2598 pt_entry_t *pte;
2599 boolean_t rv;
2600
2601 rv = FALSE;
2602 PMAP_LOCK(pmap);
2603 pde = pmap_pde(pmap, addr);
2604 if (pde != NULL && (*pde & PG_V)) {
2605 pte = vtopte(addr);
2606 rv = (*pte & PG_V) == 0;
2607 }
2608 PMAP_UNLOCK(pmap);
2609 return (rv);
2610 }
2611
2612 /*
2613 * Clear the given bit in each of the given page's ptes.
2614 */
2615 static __inline void
2616 pmap_clear_ptes(vm_page_t m, long bit)
2617 {
2618 register pv_entry_t pv;
2619 pt_entry_t pbits, *pte;
2620
2621 if ((m->flags & PG_FICTITIOUS) ||
2622 (bit == PG_RW && (m->flags & PG_WRITEABLE) == 0))
2623 return;
2624
2625 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2626 /*
2627 * Loop over all current mappings setting/clearing as appropos If
2628 * setting RO do we need to clear the VAC?
2629 */
2630 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2631 /*
2632 * don't write protect pager mappings
2633 */
2634 if (bit == PG_RW) {
2635 if (!pmap_track_modified(pv->pv_va))
2636 continue;
2637 }
2638
2639 #if defined(PMAP_DIAGNOSTIC)
2640 if (!pv->pv_pmap) {
2641 printf("Null pmap (cb) at va: 0x%lx\n", pv->pv_va);
2642 continue;
2643 }
2644 #endif
2645
2646 PMAP_LOCK(pv->pv_pmap);
2647 pte = pmap_pte(pv->pv_pmap, pv->pv_va);
2648 retry:
2649 pbits = *pte;
2650 if (pbits & bit) {
2651 if (bit == PG_RW) {
2652 if (!atomic_cmpset_long(pte, pbits,
2653 pbits & ~(PG_RW | PG_M)))
2654 goto retry;
2655 if (pbits & PG_M) {
2656 vm_page_dirty(m);
2657 }
2658 } else {
2659 atomic_clear_long(pte, bit);
2660 }
2661 pmap_invalidate_page(pv->pv_pmap, pv->pv_va);
2662 }
2663 PMAP_UNLOCK(pv->pv_pmap);
2664 }
2665 if (bit == PG_RW)
2666 vm_page_flag_clear(m, PG_WRITEABLE);
2667 }
2668
2669 /*
2670 * pmap_page_protect:
2671 *
2672 * Lower the permission for all mappings to a given page.
2673 */
2674 void
2675 pmap_page_protect(vm_page_t m, vm_prot_t prot)
2676 {
2677 if ((prot & VM_PROT_WRITE) == 0) {
2678 if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) {
2679 pmap_clear_ptes(m, PG_RW);
2680 } else {
2681 pmap_remove_all(m);
2682 }
2683 }
2684 }
2685
2686 /*
2687 * pmap_ts_referenced:
2688 *
2689 * Return a count of reference bits for a page, clearing those bits.
2690 * It is not necessary for every reference bit to be cleared, but it
2691 * is necessary that 0 only be returned when there are truly no
2692 * reference bits set.
2693 *
2694 * XXX: The exact number of bits to check and clear is a matter that
2695 * should be tested and standardized at some point in the future for
2696 * optimal aging of shared pages.
2697 */
2698 int
2699 pmap_ts_referenced(vm_page_t m)
2700 {
2701 register pv_entry_t pv, pvf, pvn;
2702 pt_entry_t *pte;
2703 pt_entry_t v;
2704 int rtval = 0;
2705
2706 if (m->flags & PG_FICTITIOUS)
2707 return (rtval);
2708
2709 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2710 if ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
2711
2712 pvf = pv;
2713
2714 do {
2715 pvn = TAILQ_NEXT(pv, pv_list);
2716
2717 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
2718
2719 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
2720
2721 if (!pmap_track_modified(pv->pv_va))
2722 continue;
2723
2724 PMAP_LOCK(pv->pv_pmap);
2725 pte = pmap_pte(pv->pv_pmap, pv->pv_va);
2726
2727 if (pte && ((v = pte_load(pte)) & PG_A) != 0) {
2728 atomic_clear_long(pte, PG_A);
2729 pmap_invalidate_page(pv->pv_pmap, pv->pv_va);
2730
2731 rtval++;
2732 if (rtval > 4) {
2733 PMAP_UNLOCK(pv->pv_pmap);
2734 break;
2735 }
2736 }
2737 PMAP_UNLOCK(pv->pv_pmap);
2738 } while ((pv = pvn) != NULL && pv != pvf);
2739 }
2740
2741 return (rtval);
2742 }
2743
2744 /*
2745 * Clear the modify bits on the specified physical page.
2746 */
2747 void
2748 pmap_clear_modify(vm_page_t m)
2749 {
2750 pmap_clear_ptes(m, PG_M);
2751 }
2752
2753 /*
2754 * pmap_clear_reference:
2755 *
2756 * Clear the reference bit on the specified physical page.
2757 */
2758 void
2759 pmap_clear_reference(vm_page_t m)
2760 {
2761 pmap_clear_ptes(m, PG_A);
2762 }
2763
2764 /*
2765 * Miscellaneous support routines follow
2766 */
2767
2768 /*
2769 * Map a set of physical memory pages into the kernel virtual
2770 * address space. Return a pointer to where it is mapped. This
2771 * routine is intended to be used for mapping device memory,
2772 * NOT real memory.
2773 */
2774 void *
2775 pmap_mapdev(pa, size)
2776 vm_paddr_t pa;
2777 vm_size_t size;
2778 {
2779 vm_offset_t va, tmpva, offset;
2780
2781 /* If this fits within the direct map window, use it */
2782 if (pa < dmaplimit && (pa + size) < dmaplimit)
2783 return ((void *)PHYS_TO_DMAP(pa));
2784 offset = pa & PAGE_MASK;
2785 size = roundup(offset + size, PAGE_SIZE);
2786 va = kmem_alloc_nofault(kernel_map, size);
2787 if (!va)
2788 panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
2789 pa = trunc_page(pa);
2790 for (tmpva = va; size > 0; ) {
2791 pmap_kenter(tmpva, pa);
2792 size -= PAGE_SIZE;
2793 tmpva += PAGE_SIZE;
2794 pa += PAGE_SIZE;
2795 }
2796 pmap_invalidate_range(kernel_pmap, va, tmpva);
2797 return ((void *)(va + offset));
2798 }
2799
2800 void
2801 pmap_unmapdev(va, size)
2802 vm_offset_t va;
2803 vm_size_t size;
2804 {
2805 vm_offset_t base, offset, tmpva;
2806
2807 /* If we gave a direct map region in pmap_mapdev, do nothing */
2808 if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS)
2809 return;
2810 base = trunc_page(va);
2811 offset = va & PAGE_MASK;
2812 size = roundup(offset + size, PAGE_SIZE);
2813 for (tmpva = base; tmpva < (base + size); tmpva += PAGE_SIZE)
2814 pmap_kremove(tmpva);
2815 pmap_invalidate_range(kernel_pmap, va, tmpva);
2816 kmem_free(kernel_map, base, size);
2817 }
2818
2819 /*
2820 * perform the pmap work for mincore
2821 */
2822 int
2823 pmap_mincore(pmap, addr)
2824 pmap_t pmap;
2825 vm_offset_t addr;
2826 {
2827 pt_entry_t *ptep, pte;
2828 vm_page_t m;
2829 int val = 0;
2830
2831 PMAP_LOCK(pmap);
2832 ptep = pmap_pte(pmap, addr);
2833 pte = (ptep != NULL) ? *ptep : 0;
2834 PMAP_UNLOCK(pmap);
2835
2836 if (pte != 0) {
2837 vm_paddr_t pa;
2838
2839 val = MINCORE_INCORE;
2840 if ((pte & PG_MANAGED) == 0)
2841 return val;
2842
2843 pa = pte & PG_FRAME;
2844
2845 m = PHYS_TO_VM_PAGE(pa);
2846
2847 /*
2848 * Modified by us
2849 */
2850 if (pte & PG_M)
2851 val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER;
2852 else {
2853 /*
2854 * Modified by someone else
2855 */
2856 vm_page_lock_queues();
2857 if (m->dirty || pmap_is_modified(m))
2858 val |= MINCORE_MODIFIED_OTHER;
2859 vm_page_unlock_queues();
2860 }
2861 /*
2862 * Referenced by us
2863 */
2864 if (pte & PG_A)
2865 val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER;
2866 else {
2867 /*
2868 * Referenced by someone else
2869 */
2870 vm_page_lock_queues();
2871 if ((m->flags & PG_REFERENCED) ||
2872 pmap_ts_referenced(m)) {
2873 val |= MINCORE_REFERENCED_OTHER;
2874 vm_page_flag_set(m, PG_REFERENCED);
2875 }
2876 vm_page_unlock_queues();
2877 }
2878 }
2879 return val;
2880 }
2881
2882 void
2883 pmap_activate(struct thread *td)
2884 {
2885 struct proc *p = td->td_proc;
2886 pmap_t pmap, oldpmap;
2887 u_int64_t cr3;
2888
2889 critical_enter();
2890 pmap = vmspace_pmap(td->td_proc->p_vmspace);
2891 oldpmap = PCPU_GET(curpmap);
2892 #ifdef SMP
2893 if (oldpmap) /* XXX FIXME */
2894 atomic_clear_int(&oldpmap->pm_active, PCPU_GET(cpumask));
2895 atomic_set_int(&pmap->pm_active, PCPU_GET(cpumask));
2896 #else
2897 if (oldpmap) /* XXX FIXME */
2898 oldpmap->pm_active &= ~PCPU_GET(cpumask);
2899 pmap->pm_active |= PCPU_GET(cpumask);
2900 #endif
2901 cr3 = vtophys(pmap->pm_pml4);
2902 /* XXXKSE this is wrong.
2903 * pmap_activate is for the current thread on the current cpu
2904 */
2905 if (p->p_flag & P_SA) {
2906 /* Make sure all other cr3 entries are updated. */
2907 /* what if they are running? XXXKSE (maybe abort them) */
2908 FOREACH_THREAD_IN_PROC(p, td) {
2909 td->td_pcb->pcb_cr3 = cr3;
2910 }
2911 } else {
2912 td->td_pcb->pcb_cr3 = cr3;
2913 }
2914 load_cr3(cr3);
2915 critical_exit();
2916 }
2917
2918 vm_offset_t
2919 pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size)
2920 {
2921
2922 if ((obj == NULL) || (size < NBPDR) || (obj->type != OBJT_DEVICE)) {
2923 return addr;
2924 }
2925
2926 addr = (addr + (NBPDR - 1)) & ~(NBPDR - 1);
2927 return addr;
2928 }
Cache object: aa7a9c337051842eeccf0f0eb58655cb
|