FreeBSD/Linux Kernel Cross Reference
sys/i386/i386/pmap.c
1 /*-
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 * Copyright (c) 1994 John S. Dyson
5 * All rights reserved.
6 * Copyright (c) 1994 David Greenman
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * the Systems Programming Group of the University of Utah Computer
11 * Science Department and William Jolitz of UUNET Technologies Inc.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. All advertising materials mentioning features or use of this software
22 * must display the following acknowledgement:
23 * This product includes software developed by the University of
24 * California, Berkeley and its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
42 */
43 /*-
44 * Copyright (c) 2003 Networks Associates Technology, Inc.
45 * All rights reserved.
46 *
47 * This software was developed for the FreeBSD Project by Jake Burkholder,
48 * Safeport Network Services, and Network Associates Laboratories, the
49 * Security Research Division of Network Associates, Inc. under
50 * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA
51 * CHATS research program.
52 *
53 * Redistribution and use in source and binary forms, with or without
54 * modification, are permitted provided that the following conditions
55 * are met:
56 * 1. Redistributions of source code must retain the above copyright
57 * notice, this list of conditions and the following disclaimer.
58 * 2. Redistributions in binary form must reproduce the above copyright
59 * notice, this list of conditions and the following disclaimer in the
60 * documentation and/or other materials provided with the distribution.
61 *
62 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
63 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
64 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
65 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
66 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
67 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
68 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
69 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
70 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
71 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
72 * SUCH DAMAGE.
73 */
74
75 #include <sys/cdefs.h>
76 __FBSDID("$FreeBSD: src/sys/i386/i386/pmap.c,v 1.494.2.11 2005/10/28 06:50:36 ade Exp $");
77
78 /*
79 * Manages physical address maps.
80 *
81 * In addition to hardware address maps, this
82 * module is called upon to provide software-use-only
83 * maps which may or may not be stored in the same
84 * form as hardware maps. These pseudo-maps are
85 * used to store intermediate results from copy
86 * operations to and from address spaces.
87 *
88 * Since the information managed by this module is
89 * also stored by the logical address mapping module,
90 * this module may throw away valid virtual-to-physical
91 * mappings at almost any time. However, invalidations
92 * of virtual-to-physical mappings must be done as
93 * requested.
94 *
95 * In order to cope with hardware architectures which
96 * make virtual-to-physical map invalidates expensive,
97 * this module may delay invalidate or reduced protection
98 * operations until such time as they are actually
99 * necessary. This module is given full information as
100 * to which processors are currently using which maps,
101 * and to when physical maps must be made correct.
102 */
103
104 #include "opt_cpu.h"
105 #include "opt_pmap.h"
106 #include "opt_msgbuf.h"
107 #include "opt_kstack_pages.h"
108
109 #include <sys/param.h>
110 #include <sys/systm.h>
111 #include <sys/kernel.h>
112 #include <sys/lock.h>
113 #include <sys/malloc.h>
114 #include <sys/mman.h>
115 #include <sys/msgbuf.h>
116 #include <sys/mutex.h>
117 #include <sys/proc.h>
118 #include <sys/sx.h>
119 #include <sys/vmmeter.h>
120 #include <sys/sched.h>
121 #include <sys/sysctl.h>
122 #ifdef SMP
123 #include <sys/smp.h>
124 #endif
125
126 #include <vm/vm.h>
127 #include <vm/vm_param.h>
128 #include <vm/vm_kern.h>
129 #include <vm/vm_page.h>
130 #include <vm/vm_map.h>
131 #include <vm/vm_object.h>
132 #include <vm/vm_extern.h>
133 #include <vm/vm_pageout.h>
134 #include <vm/vm_pager.h>
135 #include <vm/uma.h>
136
137 #include <machine/cpu.h>
138 #include <machine/cputypes.h>
139 #include <machine/md_var.h>
140 #include <machine/pcb.h>
141 #include <machine/specialreg.h>
142 #ifdef SMP
143 #include <machine/smp.h>
144 #endif
145
146 #if !defined(CPU_ENABLE_SSE) && defined(I686_CPU)
147 #define CPU_ENABLE_SSE
148 #endif
149 #if defined(CPU_DISABLE_SSE)
150 #undef CPU_ENABLE_SSE
151 #endif
152
153 #ifndef PMAP_SHPGPERPROC
154 #define PMAP_SHPGPERPROC 200
155 #endif
156
157 #if defined(DIAGNOSTIC)
158 #define PMAP_DIAGNOSTIC
159 #endif
160
161 #define MINPV 2048
162
163 #if !defined(PMAP_DIAGNOSTIC)
164 #define PMAP_INLINE __inline
165 #else
166 #define PMAP_INLINE
167 #endif
168
169 /*
170 * Get PDEs and PTEs for user/kernel address space
171 */
172 #define pmap_pde(m, v) (&((m)->pm_pdir[(vm_offset_t)(v) >> PDRSHIFT]))
173 #define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDRSHIFT])
174
175 #define pmap_pde_v(pte) ((*(int *)pte & PG_V) != 0)
176 #define pmap_pte_w(pte) ((*(int *)pte & PG_W) != 0)
177 #define pmap_pte_m(pte) ((*(int *)pte & PG_M) != 0)
178 #define pmap_pte_u(pte) ((*(int *)pte & PG_A) != 0)
179 #define pmap_pte_v(pte) ((*(int *)pte & PG_V) != 0)
180
181 #define pmap_pte_set_w(pte, v) ((v) ? atomic_set_int((u_int *)(pte), PG_W) : \
182 atomic_clear_int((u_int *)(pte), PG_W))
183 #define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v)))
184
185 struct pmap kernel_pmap_store;
186 LIST_HEAD(pmaplist, pmap);
187 static struct pmaplist allpmaps;
188 static struct mtx allpmaps_lock;
189
190 vm_paddr_t avail_end; /* PA of last available physical page */
191 vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */
192 vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
193 static boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
194 int pgeflag = 0; /* PG_G or-in */
195 int pseflag = 0; /* PG_PS or-in */
196
197 static int nkpt;
198 vm_offset_t kernel_vm_end;
199 extern u_int32_t KERNend;
200
201 #ifdef PAE
202 static uma_zone_t pdptzone;
203 #endif
204
205 /*
206 * Data for the pv entry allocation mechanism
207 */
208 static uma_zone_t pvzone;
209 static struct vm_object pvzone_obj;
210 static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
211 int pmap_pagedaemon_waken;
212
213 /*
214 * All those kernel PT submaps that BSD is so fond of
215 */
216 struct sysmaps {
217 struct mtx lock;
218 pt_entry_t *CMAP1;
219 pt_entry_t *CMAP2;
220 caddr_t CADDR1;
221 caddr_t CADDR2;
222 };
223 static struct sysmaps sysmaps_pcpu[MAXCPU];
224 pt_entry_t *CMAP1 = 0;
225 static pt_entry_t *CMAP3;
226 caddr_t CADDR1 = 0, ptvmmap = 0;
227 static caddr_t CADDR3;
228 struct msgbuf *msgbufp = 0;
229
230 /*
231 * Crashdump maps.
232 */
233 static caddr_t crashdumpmap;
234
235 #ifdef SMP
236 extern pt_entry_t *SMPpt;
237 #endif
238 static pt_entry_t *PMAP1 = 0, *PMAP2;
239 static pt_entry_t *PADDR1 = 0, *PADDR2;
240 #ifdef SMP
241 static int PMAP1cpu;
242 static int PMAP1changedcpu;
243 SYSCTL_INT(_debug, OID_AUTO, PMAP1changedcpu, CTLFLAG_RD,
244 &PMAP1changedcpu, 0,
245 "Number of times pmap_pte_quick changed CPU with same PMAP1");
246 #endif
247 static int PMAP1changed;
248 SYSCTL_INT(_debug, OID_AUTO, PMAP1changed, CTLFLAG_RD,
249 &PMAP1changed, 0,
250 "Number of times pmap_pte_quick changed PMAP1");
251 static int PMAP1unchanged;
252 SYSCTL_INT(_debug, OID_AUTO, PMAP1unchanged, CTLFLAG_RD,
253 &PMAP1unchanged, 0,
254 "Number of times pmap_pte_quick didn't change PMAP1");
255 static struct mtx PMAP2mutex;
256
257 static PMAP_INLINE void free_pv_entry(pv_entry_t pv);
258 static pv_entry_t get_pv_entry(void);
259 static void pmap_clear_ptes(vm_page_t m, int bit);
260
261 static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva);
262 static void pmap_remove_page(struct pmap *pmap, vm_offset_t va);
263 static int pmap_remove_entry(struct pmap *pmap, vm_page_t m,
264 vm_offset_t va);
265 static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m);
266
267 static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags);
268
269 static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags);
270 static int _pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m);
271 static pt_entry_t *pmap_pte_quick(pmap_t pmap, vm_offset_t va);
272 static void pmap_pte_release(pt_entry_t *pte);
273 static int pmap_unuse_pt(pmap_t, vm_offset_t);
274 static vm_offset_t pmap_kmem_choose(vm_offset_t addr);
275 #ifdef PAE
276 static void *pmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait);
277 #endif
278
279 CTASSERT(1 << PDESHIFT == sizeof(pd_entry_t));
280 CTASSERT(1 << PTESHIFT == sizeof(pt_entry_t));
281
282 /*
283 * Move the kernel virtual free pointer to the next
284 * 4MB. This is used to help improve performance
285 * by using a large (4MB) page for much of the kernel
286 * (.text, .data, .bss)
287 */
288 static vm_offset_t
289 pmap_kmem_choose(vm_offset_t addr)
290 {
291 vm_offset_t newaddr = addr;
292
293 #ifndef DISABLE_PSE
294 if (cpu_feature & CPUID_PSE)
295 newaddr = (addr + PDRMASK) & ~PDRMASK;
296 #endif
297 return newaddr;
298 }
299
300 /*
301 * Bootstrap the system enough to run with virtual memory.
302 *
303 * On the i386 this is called after mapping has already been enabled
304 * and just syncs the pmap module with what has already been done.
305 * [We can't call it easily with mapping off since the kernel is not
306 * mapped with PA == VA, hence we would have to relocate every address
307 * from the linked base (virtual) address "KERNBASE" to the actual
308 * (physical) address starting relative to 0]
309 */
310 void
311 pmap_bootstrap(firstaddr, loadaddr)
312 vm_paddr_t firstaddr;
313 vm_paddr_t loadaddr;
314 {
315 vm_offset_t va;
316 pt_entry_t *pte, *unused;
317 struct sysmaps *sysmaps;
318 int i;
319
320 /*
321 * XXX The calculation of virtual_avail is wrong. It's NKPT*PAGE_SIZE too
322 * large. It should instead be correctly calculated in locore.s and
323 * not based on 'first' (which is a physical address, not a virtual
324 * address, for the start of unused physical memory). The kernel
325 * page tables are NOT double mapped and thus should not be included
326 * in this calculation.
327 */
328 virtual_avail = (vm_offset_t) KERNBASE + firstaddr;
329 virtual_avail = pmap_kmem_choose(virtual_avail);
330
331 virtual_end = VM_MAX_KERNEL_ADDRESS;
332
333 /*
334 * Initialize the kernel pmap (which is statically allocated).
335 */
336 PMAP_LOCK_INIT(kernel_pmap);
337 kernel_pmap->pm_pdir = (pd_entry_t *) (KERNBASE + (u_int)IdlePTD);
338 #ifdef PAE
339 kernel_pmap->pm_pdpt = (pdpt_entry_t *) (KERNBASE + (u_int)IdlePDPT);
340 #endif
341 kernel_pmap->pm_active = -1; /* don't allow deactivation */
342 TAILQ_INIT(&kernel_pmap->pm_pvlist);
343 LIST_INIT(&allpmaps);
344 mtx_init(&allpmaps_lock, "allpmaps", NULL, MTX_SPIN);
345 mtx_lock_spin(&allpmaps_lock);
346 LIST_INSERT_HEAD(&allpmaps, kernel_pmap, pm_list);
347 mtx_unlock_spin(&allpmaps_lock);
348 nkpt = NKPT;
349
350 /*
351 * Reserve some special page table entries/VA space for temporary
352 * mapping of pages.
353 */
354 #define SYSMAP(c, p, v, n) \
355 v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n);
356
357 va = virtual_avail;
358 pte = vtopte(va);
359
360 /*
361 * CMAP1/CMAP2 are used for zeroing and copying pages.
362 * CMAP3 is used for the idle process page zeroing.
363 */
364 for (i = 0; i < MAXCPU; i++) {
365 sysmaps = &sysmaps_pcpu[i];
366 mtx_init(&sysmaps->lock, "SYSMAPS", NULL, MTX_DEF);
367 SYSMAP(caddr_t, sysmaps->CMAP1, sysmaps->CADDR1, 1)
368 SYSMAP(caddr_t, sysmaps->CMAP2, sysmaps->CADDR2, 1)
369 }
370 SYSMAP(caddr_t, CMAP1, CADDR1, 1)
371 SYSMAP(caddr_t, CMAP3, CADDR3, 1)
372 *CMAP3 = 0;
373
374 /*
375 * Crashdump maps.
376 */
377 SYSMAP(caddr_t, unused, crashdumpmap, MAXDUMPPGS)
378
379 /*
380 * ptvmmap is used for reading arbitrary physical pages via /dev/mem.
381 */
382 SYSMAP(caddr_t, unused, ptvmmap, 1)
383
384 /*
385 * msgbufp is used to map the system message buffer.
386 */
387 SYSMAP(struct msgbuf *, unused, msgbufp, atop(round_page(MSGBUF_SIZE)))
388
389 /*
390 * ptemap is used for pmap_pte_quick
391 */
392 SYSMAP(pt_entry_t *, PMAP1, PADDR1, 1);
393 SYSMAP(pt_entry_t *, PMAP2, PADDR2, 1);
394
395 mtx_init(&PMAP2mutex, "PMAP2", NULL, MTX_DEF);
396
397 virtual_avail = va;
398
399 *CMAP1 = 0;
400 for (i = 0; i < NKPT; i++)
401 PTD[i] = 0;
402
403 /* Turn on PG_G on kernel page(s) */
404 pmap_set_pg();
405 }
406
407 /*
408 * Set PG_G on kernel pages. Only the BSP calls this when SMP is turned on.
409 */
410 void
411 pmap_set_pg(void)
412 {
413 pd_entry_t pdir;
414 pt_entry_t *pte;
415 vm_offset_t va, endva;
416 int i;
417
418 if (pgeflag == 0)
419 return;
420
421 i = KERNLOAD/NBPDR;
422 endva = KERNBASE + KERNend;
423
424 if (pseflag) {
425 va = KERNBASE + KERNLOAD;
426 while (va < endva) {
427 pdir = kernel_pmap->pm_pdir[KPTDI+i];
428 pdir |= pgeflag;
429 kernel_pmap->pm_pdir[KPTDI+i] = PTD[KPTDI+i] = pdir;
430 invltlb(); /* Play it safe, invltlb() every time */
431 i++;
432 va += NBPDR;
433 }
434 } else {
435 va = (vm_offset_t)btext;
436 while (va < endva) {
437 pte = vtopte(va);
438 if (*pte)
439 *pte |= pgeflag;
440 invltlb(); /* Play it safe, invltlb() every time */
441 va += PAGE_SIZE;
442 }
443 }
444 }
445
446 #ifdef PAE
447
448 static MALLOC_DEFINE(M_PMAPPDPT, "pmap", "pmap pdpt");
449
450 static void *
451 pmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
452 {
453 *flags = UMA_SLAB_PRIV;
454 return (contigmalloc(PAGE_SIZE, M_PMAPPDPT, 0, 0x0ULL, 0xffffffffULL,
455 1, 0));
456 }
457 #endif
458
459 /*
460 * Initialize the pmap module.
461 * Called by vm_init, to initialize any structures that the pmap
462 * system needs to map virtual memory.
463 * pmap_init has been enhanced to support in a fairly consistant
464 * way, discontiguous physical memory.
465 */
466 void
467 pmap_init(void)
468 {
469 int i;
470
471 /*
472 * Allocate memory for random pmap data structures. Includes the
473 * pv_head_table.
474 */
475
476 for(i = 0; i < vm_page_array_size; i++) {
477 vm_page_t m;
478
479 m = &vm_page_array[i];
480 TAILQ_INIT(&m->md.pv_list);
481 m->md.pv_list_count = 0;
482 }
483
484 /*
485 * init the pv free list
486 */
487 pvzone = uma_zcreate("PV ENTRY", sizeof (struct pv_entry), NULL, NULL,
488 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
489 uma_prealloc(pvzone, MINPV);
490
491 #ifdef PAE
492 pdptzone = uma_zcreate("PDPT", NPGPTD * sizeof(pdpt_entry_t), NULL,
493 NULL, NULL, NULL, (NPGPTD * sizeof(pdpt_entry_t)) - 1,
494 UMA_ZONE_VM | UMA_ZONE_NOFREE);
495 uma_zone_set_allocf(pdptzone, pmap_pdpt_allocf);
496 #endif
497
498 /*
499 * Now it is safe to enable pv_table recording.
500 */
501 pmap_initialized = TRUE;
502 }
503
504 /*
505 * Initialize the address space (zone) for the pv_entries. Set a
506 * high water mark so that the system can recover from excessive
507 * numbers of pv entries.
508 */
509 void
510 pmap_init2()
511 {
512 int shpgperproc = PMAP_SHPGPERPROC;
513
514 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
515 pv_entry_max = shpgperproc * maxproc + vm_page_array_size;
516 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
517 pv_entry_high_water = 9 * (pv_entry_max / 10);
518 uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max);
519 }
520
521
522 /***************************************************
523 * Low level helper routines.....
524 ***************************************************/
525
526 #if defined(PMAP_DIAGNOSTIC)
527
528 /*
529 * This code checks for non-writeable/modified pages.
530 * This should be an invalid condition.
531 */
532 static int
533 pmap_nw_modified(pt_entry_t ptea)
534 {
535 int pte;
536
537 pte = (int) ptea;
538
539 if ((pte & (PG_M|PG_RW)) == PG_M)
540 return 1;
541 else
542 return 0;
543 }
544 #endif
545
546
547 /*
548 * this routine defines the region(s) of memory that should
549 * not be tested for the modified bit.
550 */
551 static PMAP_INLINE int
552 pmap_track_modified(vm_offset_t va)
553 {
554 if ((va < kmi.clean_sva) || (va >= kmi.clean_eva))
555 return 1;
556 else
557 return 0;
558 }
559
560 #ifdef I386_CPU
561 /*
562 * i386 only has "invalidate everything" and no SMP to worry about.
563 */
564 PMAP_INLINE void
565 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
566 {
567
568 if (pmap == kernel_pmap || pmap->pm_active)
569 invltlb();
570 }
571
572 PMAP_INLINE void
573 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
574 {
575
576 if (pmap == kernel_pmap || pmap->pm_active)
577 invltlb();
578 }
579
580 PMAP_INLINE void
581 pmap_invalidate_all(pmap_t pmap)
582 {
583
584 if (pmap == kernel_pmap || pmap->pm_active)
585 invltlb();
586 }
587 #else /* !I386_CPU */
588 #ifdef SMP
589 /*
590 * For SMP, these functions have to use the IPI mechanism for coherence.
591 */
592 void
593 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
594 {
595 u_int cpumask;
596 u_int other_cpus;
597
598 if (smp_started) {
599 if (!(read_eflags() & PSL_I))
600 panic("%s: interrupts disabled", __func__);
601 mtx_lock_spin(&smp_ipi_mtx);
602 } else
603 critical_enter();
604 /*
605 * We need to disable interrupt preemption but MUST NOT have
606 * interrupts disabled here.
607 * XXX we may need to hold schedlock to get a coherent pm_active
608 * XXX critical sections disable interrupts again
609 */
610 if (pmap == kernel_pmap || pmap->pm_active == all_cpus) {
611 invlpg(va);
612 smp_invlpg(va);
613 } else {
614 cpumask = PCPU_GET(cpumask);
615 other_cpus = PCPU_GET(other_cpus);
616 if (pmap->pm_active & cpumask)
617 invlpg(va);
618 if (pmap->pm_active & other_cpus)
619 smp_masked_invlpg(pmap->pm_active & other_cpus, va);
620 }
621 if (smp_started)
622 mtx_unlock_spin(&smp_ipi_mtx);
623 else
624 critical_exit();
625 }
626
627 void
628 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
629 {
630 u_int cpumask;
631 u_int other_cpus;
632 vm_offset_t addr;
633
634 if (smp_started) {
635 if (!(read_eflags() & PSL_I))
636 panic("%s: interrupts disabled", __func__);
637 mtx_lock_spin(&smp_ipi_mtx);
638 } else
639 critical_enter();
640 /*
641 * We need to disable interrupt preemption but MUST NOT have
642 * interrupts disabled here.
643 * XXX we may need to hold schedlock to get a coherent pm_active
644 * XXX critical sections disable interrupts again
645 */
646 if (pmap == kernel_pmap || pmap->pm_active == all_cpus) {
647 for (addr = sva; addr < eva; addr += PAGE_SIZE)
648 invlpg(addr);
649 smp_invlpg_range(sva, eva);
650 } else {
651 cpumask = PCPU_GET(cpumask);
652 other_cpus = PCPU_GET(other_cpus);
653 if (pmap->pm_active & cpumask)
654 for (addr = sva; addr < eva; addr += PAGE_SIZE)
655 invlpg(addr);
656 if (pmap->pm_active & other_cpus)
657 smp_masked_invlpg_range(pmap->pm_active & other_cpus,
658 sva, eva);
659 }
660 if (smp_started)
661 mtx_unlock_spin(&smp_ipi_mtx);
662 else
663 critical_exit();
664 }
665
666 void
667 pmap_invalidate_all(pmap_t pmap)
668 {
669 u_int cpumask;
670 u_int other_cpus;
671
672 if (smp_started) {
673 if (!(read_eflags() & PSL_I))
674 panic("%s: interrupts disabled", __func__);
675 mtx_lock_spin(&smp_ipi_mtx);
676 } else
677 critical_enter();
678 /*
679 * We need to disable interrupt preemption but MUST NOT have
680 * interrupts disabled here.
681 * XXX we may need to hold schedlock to get a coherent pm_active
682 * XXX critical sections disable interrupts again
683 */
684 if (pmap == kernel_pmap || pmap->pm_active == all_cpus) {
685 invltlb();
686 smp_invltlb();
687 } else {
688 cpumask = PCPU_GET(cpumask);
689 other_cpus = PCPU_GET(other_cpus);
690 if (pmap->pm_active & cpumask)
691 invltlb();
692 if (pmap->pm_active & other_cpus)
693 smp_masked_invltlb(pmap->pm_active & other_cpus);
694 }
695 if (smp_started)
696 mtx_unlock_spin(&smp_ipi_mtx);
697 else
698 critical_exit();
699 }
700 #else /* !SMP */
701 /*
702 * Normal, non-SMP, 486+ invalidation functions.
703 * We inline these within pmap.c for speed.
704 */
705 PMAP_INLINE void
706 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
707 {
708
709 if (pmap == kernel_pmap || pmap->pm_active)
710 invlpg(va);
711 }
712
713 PMAP_INLINE void
714 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
715 {
716 vm_offset_t addr;
717
718 if (pmap == kernel_pmap || pmap->pm_active)
719 for (addr = sva; addr < eva; addr += PAGE_SIZE)
720 invlpg(addr);
721 }
722
723 PMAP_INLINE void
724 pmap_invalidate_all(pmap_t pmap)
725 {
726
727 if (pmap == kernel_pmap || pmap->pm_active)
728 invltlb();
729 }
730 #endif /* !SMP */
731 #endif /* !I386_CPU */
732
733 /*
734 * Are we current address space or kernel? N.B. We return FALSE when
735 * a pmap's page table is in use because a kernel thread is borrowing
736 * it. The borrowed page table can change spontaneously, making any
737 * dependence on its continued use subject to a race condition.
738 */
739 static __inline int
740 pmap_is_current(pmap_t pmap)
741 {
742
743 return (pmap == kernel_pmap ||
744 (pmap == vmspace_pmap(curthread->td_proc->p_vmspace) &&
745 (pmap->pm_pdir[PTDPTDI] & PG_FRAME) == (PTDpde[0] & PG_FRAME)));
746 }
747
748 /*
749 * If the given pmap is not the current or kernel pmap, the returned pte must
750 * be released by passing it to pmap_pte_release().
751 */
752 pt_entry_t *
753 pmap_pte(pmap_t pmap, vm_offset_t va)
754 {
755 pd_entry_t newpf;
756 pd_entry_t *pde;
757
758 pde = pmap_pde(pmap, va);
759 if (*pde & PG_PS)
760 return (pde);
761 if (*pde != 0) {
762 /* are we current address space or kernel? */
763 if (pmap_is_current(pmap))
764 return (vtopte(va));
765 mtx_lock(&PMAP2mutex);
766 newpf = *pde & PG_FRAME;
767 if ((*PMAP2 & PG_FRAME) != newpf) {
768 *PMAP2 = newpf | PG_RW | PG_V | PG_A | PG_M;
769 pmap_invalidate_page(kernel_pmap, (vm_offset_t)PADDR2);
770 }
771 return (PADDR2 + (i386_btop(va) & (NPTEPG - 1)));
772 }
773 return (0);
774 }
775
776 /*
777 * Releases a pte that was obtained from pmap_pte(). Be prepared for the pte
778 * being NULL.
779 */
780 static __inline void
781 pmap_pte_release(pt_entry_t *pte)
782 {
783
784 if ((pt_entry_t *)((vm_offset_t)pte & ~PAGE_MASK) == PADDR2)
785 mtx_unlock(&PMAP2mutex);
786 }
787
788 static __inline void
789 invlcaddr(void *caddr)
790 {
791 #ifdef I386_CPU
792 invltlb();
793 #else
794 invlpg((u_int)caddr);
795 #endif
796 }
797
798 /*
799 * Super fast pmap_pte routine best used when scanning
800 * the pv lists. This eliminates many coarse-grained
801 * invltlb calls. Note that many of the pv list
802 * scans are across different pmaps. It is very wasteful
803 * to do an entire invltlb for checking a single mapping.
804 *
805 * If the given pmap is not the current pmap, vm_page_queue_mtx
806 * must be held and curthread pinned to a CPU.
807 */
808 static pt_entry_t *
809 pmap_pte_quick(pmap_t pmap, vm_offset_t va)
810 {
811 pd_entry_t newpf;
812 pd_entry_t *pde;
813
814 pde = pmap_pde(pmap, va);
815 if (*pde & PG_PS)
816 return (pde);
817 if (*pde != 0) {
818 /* are we current address space or kernel? */
819 if (pmap_is_current(pmap))
820 return (vtopte(va));
821 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
822 KASSERT(curthread->td_pinned > 0, ("curthread not pinned"));
823 newpf = *pde & PG_FRAME;
824 if ((*PMAP1 & PG_FRAME) != newpf) {
825 *PMAP1 = newpf | PG_RW | PG_V | PG_A | PG_M;
826 #ifdef SMP
827 PMAP1cpu = PCPU_GET(cpuid);
828 #endif
829 invlcaddr(PADDR1);
830 PMAP1changed++;
831 } else
832 #ifdef SMP
833 if (PMAP1cpu != PCPU_GET(cpuid)) {
834 PMAP1cpu = PCPU_GET(cpuid);
835 invlcaddr(PADDR1);
836 PMAP1changedcpu++;
837 } else
838 #endif
839 PMAP1unchanged++;
840 return (PADDR1 + (i386_btop(va) & (NPTEPG - 1)));
841 }
842 return (0);
843 }
844
845 /*
846 * Routine: pmap_extract
847 * Function:
848 * Extract the physical page address associated
849 * with the given map/virtual_address pair.
850 */
851 vm_paddr_t
852 pmap_extract(pmap_t pmap, vm_offset_t va)
853 {
854 vm_paddr_t rtval;
855 pt_entry_t *pte;
856 pd_entry_t pde;
857
858 rtval = 0;
859 PMAP_LOCK(pmap);
860 pde = pmap->pm_pdir[va >> PDRSHIFT];
861 if (pde != 0) {
862 if ((pde & PG_PS) != 0) {
863 rtval = (pde & ~PDRMASK) | (va & PDRMASK);
864 PMAP_UNLOCK(pmap);
865 return rtval;
866 }
867 pte = pmap_pte(pmap, va);
868 rtval = (*pte & PG_FRAME) | (va & PAGE_MASK);
869 pmap_pte_release(pte);
870 }
871 PMAP_UNLOCK(pmap);
872 return (rtval);
873 }
874
875 /*
876 * Routine: pmap_extract_and_hold
877 * Function:
878 * Atomically extract and hold the physical page
879 * with the given pmap and virtual address pair
880 * if that mapping permits the given protection.
881 */
882 vm_page_t
883 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
884 {
885 pd_entry_t pde;
886 pt_entry_t pte;
887 vm_page_t m;
888
889 m = NULL;
890 vm_page_lock_queues();
891 PMAP_LOCK(pmap);
892 pde = *pmap_pde(pmap, va);
893 if (pde != 0) {
894 if (pde & PG_PS) {
895 if ((pde & PG_RW) || (prot & VM_PROT_WRITE) == 0) {
896 m = PHYS_TO_VM_PAGE((pde & ~PDRMASK) |
897 (va & PDRMASK));
898 vm_page_hold(m);
899 }
900 } else {
901 sched_pin();
902 pte = *pmap_pte_quick(pmap, va);
903 if (pte != 0 &&
904 ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0)) {
905 m = PHYS_TO_VM_PAGE(pte & PG_FRAME);
906 vm_page_hold(m);
907 }
908 sched_unpin();
909 }
910 }
911 vm_page_unlock_queues();
912 PMAP_UNLOCK(pmap);
913 return (m);
914 }
915
916 /***************************************************
917 * Low level mapping routines.....
918 ***************************************************/
919
920 /*
921 * Add a wired page to the kva.
922 * Note: not SMP coherent.
923 */
924 PMAP_INLINE void
925 pmap_kenter(vm_offset_t va, vm_paddr_t pa)
926 {
927 pt_entry_t *pte;
928
929 pte = vtopte(va);
930 pte_store(pte, pa | PG_RW | PG_V | pgeflag);
931 }
932
933 /*
934 * Remove a page from the kernel pagetables.
935 * Note: not SMP coherent.
936 */
937 PMAP_INLINE void
938 pmap_kremove(vm_offset_t va)
939 {
940 pt_entry_t *pte;
941
942 pte = vtopte(va);
943 pte_clear(pte);
944 }
945
946 /*
947 * Used to map a range of physical addresses into kernel
948 * virtual address space.
949 *
950 * The value passed in '*virt' is a suggested virtual address for
951 * the mapping. Architectures which can support a direct-mapped
952 * physical to virtual region can return the appropriate address
953 * within that region, leaving '*virt' unchanged. Other
954 * architectures should map the pages starting at '*virt' and
955 * update '*virt' with the first usable address after the mapped
956 * region.
957 */
958 vm_offset_t
959 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
960 {
961 vm_offset_t va, sva;
962
963 va = sva = *virt;
964 while (start < end) {
965 pmap_kenter(va, start);
966 va += PAGE_SIZE;
967 start += PAGE_SIZE;
968 }
969 pmap_invalidate_range(kernel_pmap, sva, va);
970 *virt = va;
971 return (sva);
972 }
973
974
975 /*
976 * Add a list of wired pages to the kva
977 * this routine is only used for temporary
978 * kernel mappings that do not need to have
979 * page modification or references recorded.
980 * Note that old mappings are simply written
981 * over. The page *must* be wired.
982 * Note: SMP coherent. Uses a ranged shootdown IPI.
983 */
984 void
985 pmap_qenter(vm_offset_t sva, vm_page_t *m, int count)
986 {
987 vm_offset_t va;
988
989 va = sva;
990 while (count-- > 0) {
991 pmap_kenter(va, VM_PAGE_TO_PHYS(*m));
992 va += PAGE_SIZE;
993 m++;
994 }
995 pmap_invalidate_range(kernel_pmap, sva, va);
996 }
997
998 /*
999 * This routine tears out page mappings from the
1000 * kernel -- it is meant only for temporary mappings.
1001 * Note: SMP coherent. Uses a ranged shootdown IPI.
1002 */
1003 void
1004 pmap_qremove(vm_offset_t sva, int count)
1005 {
1006 vm_offset_t va;
1007
1008 va = sva;
1009 while (count-- > 0) {
1010 pmap_kremove(va);
1011 va += PAGE_SIZE;
1012 }
1013 pmap_invalidate_range(kernel_pmap, sva, va);
1014 }
1015
1016 /***************************************************
1017 * Page table page management routines.....
1018 ***************************************************/
1019
1020 /*
1021 * This routine unholds page table pages, and if the hold count
1022 * drops to zero, then it decrements the wire count.
1023 */
1024 static PMAP_INLINE int
1025 pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m)
1026 {
1027
1028 --m->wire_count;
1029 if (m->wire_count == 0)
1030 return _pmap_unwire_pte_hold(pmap, m);
1031 else
1032 return 0;
1033 }
1034
1035 static int
1036 _pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m)
1037 {
1038 vm_offset_t pteva;
1039
1040 /*
1041 * unmap the page table page
1042 */
1043 pmap->pm_pdir[m->pindex] = 0;
1044 --pmap->pm_stats.resident_count;
1045
1046 /*
1047 * Do an invltlb to make the invalidated mapping
1048 * take effect immediately.
1049 */
1050 pteva = VM_MAXUSER_ADDRESS + i386_ptob(m->pindex);
1051 pmap_invalidate_page(pmap, pteva);
1052
1053 vm_page_free_zero(m);
1054 atomic_subtract_int(&cnt.v_wire_count, 1);
1055 return 1;
1056 }
1057
1058 /*
1059 * After removing a page table entry, this routine is used to
1060 * conditionally free the page, and manage the hold/wire counts.
1061 */
1062 static int
1063 pmap_unuse_pt(pmap_t pmap, vm_offset_t va)
1064 {
1065 pd_entry_t ptepde;
1066 vm_page_t mpte;
1067
1068 if (va >= VM_MAXUSER_ADDRESS)
1069 return 0;
1070 ptepde = *pmap_pde(pmap, va);
1071 mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME);
1072 return pmap_unwire_pte_hold(pmap, mpte);
1073 }
1074
1075 void
1076 pmap_pinit0(pmap)
1077 struct pmap *pmap;
1078 {
1079
1080 PMAP_LOCK_INIT(pmap);
1081 pmap->pm_pdir = (pd_entry_t *)(KERNBASE + (vm_offset_t)IdlePTD);
1082 #ifdef PAE
1083 pmap->pm_pdpt = (pdpt_entry_t *)(KERNBASE + (vm_offset_t)IdlePDPT);
1084 #endif
1085 pmap->pm_active = 0;
1086 PCPU_SET(curpmap, pmap);
1087 TAILQ_INIT(&pmap->pm_pvlist);
1088 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1089 mtx_lock_spin(&allpmaps_lock);
1090 LIST_INSERT_HEAD(&allpmaps, pmap, pm_list);
1091 mtx_unlock_spin(&allpmaps_lock);
1092 }
1093
1094 /*
1095 * Initialize a preallocated and zeroed pmap structure,
1096 * such as one in a vmspace structure.
1097 */
1098 void
1099 pmap_pinit(pmap)
1100 register struct pmap *pmap;
1101 {
1102 vm_page_t m, ptdpg[NPGPTD];
1103 vm_paddr_t pa;
1104 static int color;
1105 int i;
1106
1107 PMAP_LOCK_INIT(pmap);
1108
1109 /*
1110 * No need to allocate page table space yet but we do need a valid
1111 * page directory table.
1112 */
1113 if (pmap->pm_pdir == NULL) {
1114 pmap->pm_pdir = (pd_entry_t *)kmem_alloc_nofault(kernel_map,
1115 NBPTD);
1116 #ifdef PAE
1117 pmap->pm_pdpt = uma_zalloc(pdptzone, M_WAITOK | M_ZERO);
1118 KASSERT(((vm_offset_t)pmap->pm_pdpt &
1119 ((NPGPTD * sizeof(pdpt_entry_t)) - 1)) == 0,
1120 ("pmap_pinit: pdpt misaligned"));
1121 KASSERT(pmap_kextract((vm_offset_t)pmap->pm_pdpt) < (4ULL<<30),
1122 ("pmap_pinit: pdpt above 4g"));
1123 #endif
1124 }
1125
1126 /*
1127 * allocate the page directory page(s)
1128 */
1129 for (i = 0; i < NPGPTD;) {
1130 m = vm_page_alloc(NULL, color++,
1131 VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
1132 VM_ALLOC_ZERO);
1133 if (m == NULL)
1134 VM_WAIT;
1135 else {
1136 ptdpg[i++] = m;
1137 }
1138 }
1139
1140 pmap_qenter((vm_offset_t)pmap->pm_pdir, ptdpg, NPGPTD);
1141
1142 for (i = 0; i < NPGPTD; i++) {
1143 if ((ptdpg[i]->flags & PG_ZERO) == 0)
1144 bzero(pmap->pm_pdir + (i * NPDEPG), PAGE_SIZE);
1145 }
1146
1147 mtx_lock_spin(&allpmaps_lock);
1148 LIST_INSERT_HEAD(&allpmaps, pmap, pm_list);
1149 mtx_unlock_spin(&allpmaps_lock);
1150 /* Wire in kernel global address entries. */
1151 /* XXX copies current process, does not fill in MPPTDI */
1152 bcopy(PTD + KPTDI, pmap->pm_pdir + KPTDI, nkpt * sizeof(pd_entry_t));
1153 #ifdef SMP
1154 pmap->pm_pdir[MPPTDI] = PTD[MPPTDI];
1155 #endif
1156
1157 /* install self-referential address mapping entry(s) */
1158 for (i = 0; i < NPGPTD; i++) {
1159 pa = VM_PAGE_TO_PHYS(ptdpg[i]);
1160 pmap->pm_pdir[PTDPTDI + i] = pa | PG_V | PG_RW | PG_A | PG_M;
1161 #ifdef PAE
1162 pmap->pm_pdpt[i] = pa | PG_V;
1163 #endif
1164 }
1165
1166 pmap->pm_active = 0;
1167 TAILQ_INIT(&pmap->pm_pvlist);
1168 bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1169 }
1170
1171 /*
1172 * this routine is called if the page table page is not
1173 * mapped correctly.
1174 */
1175 static vm_page_t
1176 _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags)
1177 {
1178 vm_paddr_t ptepa;
1179 vm_page_t m;
1180
1181 KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
1182 (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
1183 ("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
1184
1185 /*
1186 * Allocate a page table page.
1187 */
1188 if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ |
1189 VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
1190 if (flags & M_WAITOK) {
1191 PMAP_UNLOCK(pmap);
1192 vm_page_unlock_queues();
1193 VM_WAIT;
1194 vm_page_lock_queues();
1195 PMAP_LOCK(pmap);
1196 }
1197
1198 /*
1199 * Indicate the need to retry. While waiting, the page table
1200 * page may have been allocated.
1201 */
1202 return (NULL);
1203 }
1204 if ((m->flags & PG_ZERO) == 0)
1205 pmap_zero_page(m);
1206
1207 /*
1208 * Map the pagetable page into the process address space, if
1209 * it isn't already there.
1210 */
1211
1212 pmap->pm_stats.resident_count++;
1213
1214 ptepa = VM_PAGE_TO_PHYS(m);
1215 pmap->pm_pdir[ptepindex] =
1216 (pd_entry_t) (ptepa | PG_U | PG_RW | PG_V | PG_A | PG_M);
1217
1218 return m;
1219 }
1220
1221 static vm_page_t
1222 pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags)
1223 {
1224 unsigned ptepindex;
1225 pd_entry_t ptepa;
1226 vm_page_t m;
1227
1228 KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
1229 (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
1230 ("pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
1231
1232 /*
1233 * Calculate pagetable page index
1234 */
1235 ptepindex = va >> PDRSHIFT;
1236 retry:
1237 /*
1238 * Get the page directory entry
1239 */
1240 ptepa = pmap->pm_pdir[ptepindex];
1241
1242 /*
1243 * This supports switching from a 4MB page to a
1244 * normal 4K page.
1245 */
1246 if (ptepa & PG_PS) {
1247 pmap->pm_pdir[ptepindex] = 0;
1248 ptepa = 0;
1249 pmap_invalidate_all(kernel_pmap);
1250 }
1251
1252 /*
1253 * If the page table page is mapped, we just increment the
1254 * hold count, and activate it.
1255 */
1256 if (ptepa) {
1257 m = PHYS_TO_VM_PAGE(ptepa);
1258 m->wire_count++;
1259 } else {
1260 /*
1261 * Here if the pte page isn't mapped, or if it has
1262 * been deallocated.
1263 */
1264 m = _pmap_allocpte(pmap, ptepindex, flags);
1265 if (m == NULL && (flags & M_WAITOK))
1266 goto retry;
1267 }
1268 return (m);
1269 }
1270
1271
1272 /***************************************************
1273 * Pmap allocation/deallocation routines.
1274 ***************************************************/
1275
1276 #ifdef SMP
1277 /*
1278 * Deal with a SMP shootdown of other users of the pmap that we are
1279 * trying to dispose of. This can be a bit hairy.
1280 */
1281 static u_int *lazymask;
1282 static u_int lazyptd;
1283 static volatile u_int lazywait;
1284
1285 void pmap_lazyfix_action(void);
1286
1287 void
1288 pmap_lazyfix_action(void)
1289 {
1290 u_int mymask = PCPU_GET(cpumask);
1291
1292 if (rcr3() == lazyptd)
1293 load_cr3(PCPU_GET(curpcb)->pcb_cr3);
1294 atomic_clear_int(lazymask, mymask);
1295 atomic_store_rel_int(&lazywait, 1);
1296 }
1297
1298 static void
1299 pmap_lazyfix_self(u_int mymask)
1300 {
1301
1302 if (rcr3() == lazyptd)
1303 load_cr3(PCPU_GET(curpcb)->pcb_cr3);
1304 atomic_clear_int(lazymask, mymask);
1305 }
1306
1307
1308 static void
1309 pmap_lazyfix(pmap_t pmap)
1310 {
1311 u_int mymask = PCPU_GET(cpumask);
1312 u_int mask;
1313 register u_int spins;
1314
1315 while ((mask = pmap->pm_active) != 0) {
1316 spins = 50000000;
1317 mask = mask & -mask; /* Find least significant set bit */
1318 mtx_lock_spin(&smp_ipi_mtx);
1319 #ifdef PAE
1320 lazyptd = vtophys(pmap->pm_pdpt);
1321 #else
1322 lazyptd = vtophys(pmap->pm_pdir);
1323 #endif
1324 if (mask == mymask) {
1325 lazymask = &pmap->pm_active;
1326 pmap_lazyfix_self(mymask);
1327 } else {
1328 atomic_store_rel_int((u_int *)&lazymask,
1329 (u_int)&pmap->pm_active);
1330 atomic_store_rel_int(&lazywait, 0);
1331 ipi_selected(mask, IPI_LAZYPMAP);
1332 while (lazywait == 0) {
1333 ia32_pause();
1334 if (--spins == 0)
1335 break;
1336 }
1337 }
1338 mtx_unlock_spin(&smp_ipi_mtx);
1339 if (spins == 0)
1340 printf("pmap_lazyfix: spun for 50000000\n");
1341 }
1342 }
1343
1344 #else /* SMP */
1345
1346 /*
1347 * Cleaning up on uniprocessor is easy. For various reasons, we're
1348 * unlikely to have to even execute this code, including the fact
1349 * that the cleanup is deferred until the parent does a wait(2), which
1350 * means that another userland process has run.
1351 */
1352 static void
1353 pmap_lazyfix(pmap_t pmap)
1354 {
1355 u_int cr3;
1356
1357 cr3 = vtophys(pmap->pm_pdir);
1358 if (cr3 == rcr3()) {
1359 load_cr3(PCPU_GET(curpcb)->pcb_cr3);
1360 pmap->pm_active &= ~(PCPU_GET(cpumask));
1361 }
1362 }
1363 #endif /* SMP */
1364
1365 /*
1366 * Release any resources held by the given physical map.
1367 * Called when a pmap initialized by pmap_pinit is being released.
1368 * Should only be called if the map contains no valid mappings.
1369 */
1370 void
1371 pmap_release(pmap_t pmap)
1372 {
1373 vm_page_t m, ptdpg[NPGPTD];
1374 int i;
1375
1376 KASSERT(pmap->pm_stats.resident_count == 0,
1377 ("pmap_release: pmap resident count %ld != 0",
1378 pmap->pm_stats.resident_count));
1379
1380 pmap_lazyfix(pmap);
1381 mtx_lock_spin(&allpmaps_lock);
1382 LIST_REMOVE(pmap, pm_list);
1383 mtx_unlock_spin(&allpmaps_lock);
1384
1385 for (i = 0; i < NPGPTD; i++)
1386 ptdpg[i] = PHYS_TO_VM_PAGE(pmap->pm_pdir[PTDPTDI + i]);
1387
1388 bzero(pmap->pm_pdir + PTDPTDI, (nkpt + NPGPTD) *
1389 sizeof(*pmap->pm_pdir));
1390 #ifdef SMP
1391 pmap->pm_pdir[MPPTDI] = 0;
1392 #endif
1393
1394 pmap_qremove((vm_offset_t)pmap->pm_pdir, NPGPTD);
1395
1396 vm_page_lock_queues();
1397 for (i = 0; i < NPGPTD; i++) {
1398 m = ptdpg[i];
1399 #ifdef PAE
1400 KASSERT(VM_PAGE_TO_PHYS(m) == (pmap->pm_pdpt[i] & PG_FRAME),
1401 ("pmap_release: got wrong ptd page"));
1402 #endif
1403 m->wire_count--;
1404 atomic_subtract_int(&cnt.v_wire_count, 1);
1405 vm_page_free_zero(m);
1406 }
1407 vm_page_unlock_queues();
1408 PMAP_LOCK_DESTROY(pmap);
1409 }
1410
1411 static int
1412 kvm_size(SYSCTL_HANDLER_ARGS)
1413 {
1414 unsigned long ksize = VM_MAX_KERNEL_ADDRESS - KERNBASE;
1415
1416 return sysctl_handle_long(oidp, &ksize, 0, req);
1417 }
1418 SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG|CTLFLAG_RD,
1419 0, 0, kvm_size, "IU", "Size of KVM");
1420
1421 static int
1422 kvm_free(SYSCTL_HANDLER_ARGS)
1423 {
1424 unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end;
1425
1426 return sysctl_handle_long(oidp, &kfree, 0, req);
1427 }
1428 SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD,
1429 0, 0, kvm_free, "IU", "Amount of KVM free");
1430
1431 /*
1432 * grow the number of kernel page table entries, if needed
1433 */
1434 void
1435 pmap_growkernel(vm_offset_t addr)
1436 {
1437 struct pmap *pmap;
1438 vm_paddr_t ptppaddr;
1439 vm_page_t nkpg;
1440 pd_entry_t newpdir;
1441 pt_entry_t *pde;
1442
1443 mtx_assert(&kernel_map->system_mtx, MA_OWNED);
1444 if (kernel_vm_end == 0) {
1445 kernel_vm_end = KERNBASE;
1446 nkpt = 0;
1447 while (pdir_pde(PTD, kernel_vm_end)) {
1448 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
1449 nkpt++;
1450 }
1451 }
1452 addr = roundup2(addr, PAGE_SIZE * NPTEPG);
1453 while (kernel_vm_end < addr) {
1454 if (pdir_pde(PTD, kernel_vm_end)) {
1455 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
1456 continue;
1457 }
1458
1459 /*
1460 * This index is bogus, but out of the way
1461 */
1462 nkpg = vm_page_alloc(NULL, nkpt,
1463 VM_ALLOC_NOOBJ | VM_ALLOC_SYSTEM | VM_ALLOC_WIRED);
1464 if (!nkpg)
1465 panic("pmap_growkernel: no memory to grow kernel");
1466
1467 nkpt++;
1468
1469 pmap_zero_page(nkpg);
1470 ptppaddr = VM_PAGE_TO_PHYS(nkpg);
1471 newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M);
1472 pdir_pde(PTD, kernel_vm_end) = newpdir;
1473
1474 mtx_lock_spin(&allpmaps_lock);
1475 LIST_FOREACH(pmap, &allpmaps, pm_list) {
1476 pde = pmap_pde(pmap, kernel_vm_end);
1477 pde_store(pde, newpdir);
1478 }
1479 mtx_unlock_spin(&allpmaps_lock);
1480 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
1481 }
1482 }
1483
1484
1485 /***************************************************
1486 * page management routines.
1487 ***************************************************/
1488
1489 /*
1490 * free the pv_entry back to the free list
1491 */
1492 static PMAP_INLINE void
1493 free_pv_entry(pv_entry_t pv)
1494 {
1495 pv_entry_count--;
1496 uma_zfree(pvzone, pv);
1497 }
1498
1499 /*
1500 * get a new pv_entry, allocating a block from the system
1501 * when needed.
1502 * the memory allocation is performed bypassing the malloc code
1503 * because of the possibility of allocations at interrupt time.
1504 */
1505 static pv_entry_t
1506 get_pv_entry(void)
1507 {
1508 pv_entry_count++;
1509 if (pv_entry_high_water &&
1510 (pv_entry_count > pv_entry_high_water) &&
1511 (pmap_pagedaemon_waken == 0)) {
1512 pmap_pagedaemon_waken = 1;
1513 wakeup (&vm_pages_needed);
1514 }
1515 return uma_zalloc(pvzone, M_NOWAIT);
1516 }
1517
1518
1519 static int
1520 pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
1521 {
1522 pv_entry_t pv;
1523 int rtval;
1524
1525 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1526 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1527 if (m->md.pv_list_count < pmap->pm_stats.resident_count) {
1528 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
1529 if (pmap == pv->pv_pmap && va == pv->pv_va)
1530 break;
1531 }
1532 } else {
1533 TAILQ_FOREACH(pv, &pmap->pm_pvlist, pv_plist) {
1534 if (va == pv->pv_va)
1535 break;
1536 }
1537 }
1538
1539 rtval = 0;
1540 if (pv) {
1541 rtval = pmap_unuse_pt(pmap, va);
1542 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
1543 m->md.pv_list_count--;
1544 if (TAILQ_FIRST(&m->md.pv_list) == NULL)
1545 vm_page_flag_clear(m, PG_WRITEABLE);
1546
1547 TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
1548 free_pv_entry(pv);
1549 }
1550
1551 return rtval;
1552 }
1553
1554 /*
1555 * Create a pv entry for page at pa for
1556 * (pmap, va).
1557 */
1558 static void
1559 pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
1560 {
1561 pv_entry_t pv;
1562
1563 pv = get_pv_entry();
1564 if (pv == NULL)
1565 panic("no pv entries: increase vm.pmap.shpgperproc");
1566 pv->pv_va = va;
1567 pv->pv_pmap = pmap;
1568
1569 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1570 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1571 TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist);
1572 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
1573 m->md.pv_list_count++;
1574 }
1575
1576 /*
1577 * pmap_remove_pte: do the things to unmap a page in a process
1578 */
1579 static int
1580 pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va)
1581 {
1582 pt_entry_t oldpte;
1583 vm_page_t m;
1584
1585 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1586 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1587 oldpte = pte_load_clear(ptq);
1588 if (oldpte & PG_W)
1589 pmap->pm_stats.wired_count -= 1;
1590 /*
1591 * Machines that don't support invlpg, also don't support
1592 * PG_G.
1593 */
1594 if (oldpte & PG_G)
1595 pmap_invalidate_page(kernel_pmap, va);
1596 pmap->pm_stats.resident_count -= 1;
1597 if (oldpte & PG_MANAGED) {
1598 m = PHYS_TO_VM_PAGE(oldpte);
1599 if (oldpte & PG_M) {
1600 #if defined(PMAP_DIAGNOSTIC)
1601 if (pmap_nw_modified((pt_entry_t) oldpte)) {
1602 printf(
1603 "pmap_remove: modified page not writable: va: 0x%x, pte: 0x%x\n",
1604 va, oldpte);
1605 }
1606 #endif
1607 if (pmap_track_modified(va))
1608 vm_page_dirty(m);
1609 }
1610 if (oldpte & PG_A)
1611 vm_page_flag_set(m, PG_REFERENCED);
1612 return pmap_remove_entry(pmap, m, va);
1613 } else {
1614 return pmap_unuse_pt(pmap, va);
1615 }
1616 }
1617
1618 /*
1619 * Remove a single page from a process address space
1620 */
1621 static void
1622 pmap_remove_page(pmap_t pmap, vm_offset_t va)
1623 {
1624 pt_entry_t *pte;
1625
1626 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1627 KASSERT(curthread->td_pinned > 0, ("curthread not pinned"));
1628 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1629 if ((pte = pmap_pte_quick(pmap, va)) == NULL || *pte == 0)
1630 return;
1631 pmap_remove_pte(pmap, pte, va);
1632 pmap_invalidate_page(pmap, va);
1633 }
1634
1635 /*
1636 * Remove the given range of addresses from the specified map.
1637 *
1638 * It is assumed that the start and end are properly
1639 * rounded to the page size.
1640 */
1641 void
1642 pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
1643 {
1644 vm_offset_t pdnxt;
1645 pd_entry_t ptpaddr;
1646 pt_entry_t *pte;
1647 int anyvalid;
1648
1649 /*
1650 * Perform an unsynchronized read. This is, however, safe.
1651 */
1652 if (pmap->pm_stats.resident_count == 0)
1653 return;
1654
1655 anyvalid = 0;
1656
1657 vm_page_lock_queues();
1658 sched_pin();
1659 PMAP_LOCK(pmap);
1660
1661 /*
1662 * special handling of removing one page. a very
1663 * common operation and easy to short circuit some
1664 * code.
1665 */
1666 if ((sva + PAGE_SIZE == eva) &&
1667 ((pmap->pm_pdir[(sva >> PDRSHIFT)] & PG_PS) == 0)) {
1668 pmap_remove_page(pmap, sva);
1669 goto out;
1670 }
1671
1672 for (; sva < eva; sva = pdnxt) {
1673 unsigned pdirindex;
1674
1675 /*
1676 * Calculate index for next page table.
1677 */
1678 pdnxt = (sva + NBPDR) & ~PDRMASK;
1679 if (pmap->pm_stats.resident_count == 0)
1680 break;
1681
1682 pdirindex = sva >> PDRSHIFT;
1683 ptpaddr = pmap->pm_pdir[pdirindex];
1684
1685 /*
1686 * Weed out invalid mappings. Note: we assume that the page
1687 * directory table is always allocated, and in kernel virtual.
1688 */
1689 if (ptpaddr == 0)
1690 continue;
1691
1692 /*
1693 * Check for large page.
1694 */
1695 if ((ptpaddr & PG_PS) != 0) {
1696 pmap->pm_pdir[pdirindex] = 0;
1697 pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
1698 anyvalid = 1;
1699 continue;
1700 }
1701
1702 /*
1703 * Limit our scan to either the end of the va represented
1704 * by the current page table page, or to the end of the
1705 * range being removed.
1706 */
1707 if (pdnxt > eva)
1708 pdnxt = eva;
1709
1710 for (; sva != pdnxt; sva += PAGE_SIZE) {
1711 if ((pte = pmap_pte_quick(pmap, sva)) == NULL ||
1712 *pte == 0)
1713 continue;
1714 anyvalid = 1;
1715 if (pmap_remove_pte(pmap, pte, sva))
1716 break;
1717 }
1718 }
1719 out:
1720 sched_unpin();
1721 vm_page_unlock_queues();
1722 if (anyvalid)
1723 pmap_invalidate_all(pmap);
1724 PMAP_UNLOCK(pmap);
1725 }
1726
1727 /*
1728 * Routine: pmap_remove_all
1729 * Function:
1730 * Removes this physical page from
1731 * all physical maps in which it resides.
1732 * Reflects back modify bits to the pager.
1733 *
1734 * Notes:
1735 * Original versions of this routine were very
1736 * inefficient because they iteratively called
1737 * pmap_remove (slow...)
1738 */
1739
1740 void
1741 pmap_remove_all(vm_page_t m)
1742 {
1743 register pv_entry_t pv;
1744 pt_entry_t *pte, tpte;
1745
1746 #if defined(PMAP_DIAGNOSTIC)
1747 /*
1748 * XXX This makes pmap_remove_all() illegal for non-managed pages!
1749 */
1750 if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) {
1751 panic("pmap_remove_all: illegal for unmanaged page, va: 0x%x",
1752 VM_PAGE_TO_PHYS(m));
1753 }
1754 #endif
1755 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1756 sched_pin();
1757 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
1758 PMAP_LOCK(pv->pv_pmap);
1759 pv->pv_pmap->pm_stats.resident_count--;
1760 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
1761 tpte = pte_load_clear(pte);
1762 if (tpte & PG_W)
1763 pv->pv_pmap->pm_stats.wired_count--;
1764 if (tpte & PG_A)
1765 vm_page_flag_set(m, PG_REFERENCED);
1766
1767 /*
1768 * Update the vm_page_t clean and reference bits.
1769 */
1770 if (tpte & PG_M) {
1771 #if defined(PMAP_DIAGNOSTIC)
1772 if (pmap_nw_modified((pt_entry_t) tpte)) {
1773 printf(
1774 "pmap_remove_all: modified page not writable: va: 0x%x, pte: 0x%x\n",
1775 pv->pv_va, tpte);
1776 }
1777 #endif
1778 if (pmap_track_modified(pv->pv_va))
1779 vm_page_dirty(m);
1780 }
1781 pmap_invalidate_page(pv->pv_pmap, pv->pv_va);
1782 TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist);
1783 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
1784 m->md.pv_list_count--;
1785 pmap_unuse_pt(pv->pv_pmap, pv->pv_va);
1786 PMAP_UNLOCK(pv->pv_pmap);
1787 free_pv_entry(pv);
1788 }
1789 vm_page_flag_clear(m, PG_WRITEABLE);
1790 sched_unpin();
1791 }
1792
1793 /*
1794 * Set the physical protection on the
1795 * specified range of this map as requested.
1796 */
1797 void
1798 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
1799 {
1800 vm_offset_t pdnxt;
1801 pd_entry_t ptpaddr;
1802 int anychanged;
1803
1804 if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1805 pmap_remove(pmap, sva, eva);
1806 return;
1807 }
1808
1809 if (prot & VM_PROT_WRITE)
1810 return;
1811
1812 anychanged = 0;
1813
1814 vm_page_lock_queues();
1815 sched_pin();
1816 PMAP_LOCK(pmap);
1817 for (; sva < eva; sva = pdnxt) {
1818 unsigned obits, pbits, pdirindex;
1819
1820 pdnxt = (sva + NBPDR) & ~PDRMASK;
1821
1822 pdirindex = sva >> PDRSHIFT;
1823 ptpaddr = pmap->pm_pdir[pdirindex];
1824
1825 /*
1826 * Weed out invalid mappings. Note: we assume that the page
1827 * directory table is always allocated, and in kernel virtual.
1828 */
1829 if (ptpaddr == 0)
1830 continue;
1831
1832 /*
1833 * Check for large page.
1834 */
1835 if ((ptpaddr & PG_PS) != 0) {
1836 pmap->pm_pdir[pdirindex] &= ~(PG_M|PG_RW);
1837 pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
1838 anychanged = 1;
1839 continue;
1840 }
1841
1842 if (pdnxt > eva)
1843 pdnxt = eva;
1844
1845 for (; sva != pdnxt; sva += PAGE_SIZE) {
1846 pt_entry_t *pte;
1847 vm_page_t m;
1848
1849 if ((pte = pmap_pte_quick(pmap, sva)) == NULL)
1850 continue;
1851 retry:
1852 /*
1853 * Regardless of whether a pte is 32 or 64 bits in
1854 * size, PG_RW, PG_A, and PG_M are among the least
1855 * significant 32 bits.
1856 */
1857 obits = pbits = *(u_int *)pte;
1858 if (pbits & PG_MANAGED) {
1859 m = NULL;
1860 if (pbits & PG_A) {
1861 m = PHYS_TO_VM_PAGE(*pte);
1862 vm_page_flag_set(m, PG_REFERENCED);
1863 pbits &= ~PG_A;
1864 }
1865 if ((pbits & PG_M) != 0 &&
1866 pmap_track_modified(sva)) {
1867 if (m == NULL)
1868 m = PHYS_TO_VM_PAGE(*pte);
1869 vm_page_dirty(m);
1870 }
1871 }
1872
1873 pbits &= ~(PG_RW | PG_M);
1874
1875 if (pbits != obits) {
1876 if (!atomic_cmpset_int((u_int *)pte, obits,
1877 pbits))
1878 goto retry;
1879 anychanged = 1;
1880 }
1881 }
1882 }
1883 sched_unpin();
1884 vm_page_unlock_queues();
1885 if (anychanged)
1886 pmap_invalidate_all(pmap);
1887 PMAP_UNLOCK(pmap);
1888 }
1889
1890 /*
1891 * Insert the given physical page (p) at
1892 * the specified virtual address (v) in the
1893 * target physical map with the protection requested.
1894 *
1895 * If specified, the page will be wired down, meaning
1896 * that the related pte can not be reclaimed.
1897 *
1898 * NB: This is the only routine which MAY NOT lazy-evaluate
1899 * or lose information. That is, this routine must actually
1900 * insert this page into the given map NOW.
1901 */
1902 void
1903 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
1904 boolean_t wired)
1905 {
1906 vm_paddr_t pa;
1907 register pt_entry_t *pte;
1908 vm_paddr_t opa;
1909 pt_entry_t origpte, newpte;
1910 vm_page_t mpte, om;
1911
1912 va &= PG_FRAME;
1913 #ifdef PMAP_DIAGNOSTIC
1914 if (va > VM_MAX_KERNEL_ADDRESS)
1915 panic("pmap_enter: toobig");
1916 if ((va >= UPT_MIN_ADDRESS) && (va < UPT_MAX_ADDRESS))
1917 panic("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)", va);
1918 #endif
1919
1920 mpte = NULL;
1921
1922 vm_page_lock_queues();
1923 PMAP_LOCK(pmap);
1924 sched_pin();
1925
1926 /*
1927 * In the case that a page table page is not
1928 * resident, we are creating it here.
1929 */
1930 if (va < VM_MAXUSER_ADDRESS) {
1931 mpte = pmap_allocpte(pmap, va, M_WAITOK);
1932 }
1933 #if 0 && defined(PMAP_DIAGNOSTIC)
1934 else {
1935 pd_entry_t *pdeaddr = pmap_pde(pmap, va);
1936 origpte = *pdeaddr;
1937 if ((origpte & PG_V) == 0) {
1938 panic("pmap_enter: invalid kernel page table page, pdir=%p, pde=%p, va=%p\n",
1939 pmap->pm_pdir[PTDPTDI], origpte, va);
1940 }
1941 }
1942 #endif
1943
1944 pte = pmap_pte_quick(pmap, va);
1945
1946 /*
1947 * Page Directory table entry not valid, we need a new PT page
1948 */
1949 if (pte == NULL) {
1950 panic("pmap_enter: invalid page directory pdir=%#jx, va=%#x\n",
1951 (uintmax_t)pmap->pm_pdir[PTDPTDI], va);
1952 }
1953
1954 pa = VM_PAGE_TO_PHYS(m);
1955 om = NULL;
1956 origpte = *pte;
1957 opa = origpte & PG_FRAME;
1958
1959 if (origpte & PG_PS) {
1960 /*
1961 * Yes, I know this will truncate upper address bits for PAE,
1962 * but I'm actually more interested in the lower bits
1963 */
1964 printf("pmap_enter: va %p, pte %p, origpte %p\n",
1965 (void *)va, (void *)pte, (void *)(uintptr_t)origpte);
1966 panic("pmap_enter: attempted pmap_enter on 4MB page");
1967 }
1968
1969 /*
1970 * Mapping has not changed, must be protection or wiring change.
1971 */
1972 if (origpte && (opa == pa)) {
1973 /*
1974 * Wiring change, just update stats. We don't worry about
1975 * wiring PT pages as they remain resident as long as there
1976 * are valid mappings in them. Hence, if a user page is wired,
1977 * the PT page will be also.
1978 */
1979 if (wired && ((origpte & PG_W) == 0))
1980 pmap->pm_stats.wired_count++;
1981 else if (!wired && (origpte & PG_W))
1982 pmap->pm_stats.wired_count--;
1983
1984 #if defined(PMAP_DIAGNOSTIC)
1985 if (pmap_nw_modified((pt_entry_t) origpte)) {
1986 printf(
1987 "pmap_enter: modified page not writable: va: 0x%x, pte: 0x%x\n",
1988 va, origpte);
1989 }
1990 #endif
1991
1992 /*
1993 * Remove extra pte reference
1994 */
1995 if (mpte)
1996 mpte->wire_count--;
1997
1998 /*
1999 * We might be turning off write access to the page,
2000 * so we go ahead and sense modify status.
2001 */
2002 if (origpte & PG_MANAGED) {
2003 om = m;
2004 pa |= PG_MANAGED;
2005 }
2006 goto validate;
2007 }
2008 /*
2009 * Mapping has changed, invalidate old range and fall through to
2010 * handle validating new mapping.
2011 */
2012 if (opa) {
2013 int err;
2014 if (origpte & PG_W)
2015 pmap->pm_stats.wired_count--;
2016 if (origpte & PG_MANAGED) {
2017 om = PHYS_TO_VM_PAGE(opa);
2018 err = pmap_remove_entry(pmap, om, va);
2019 } else
2020 err = pmap_unuse_pt(pmap, va);
2021 if (err)
2022 panic("pmap_enter: pte vanished, va: 0x%x", va);
2023 } else
2024 pmap->pm_stats.resident_count++;
2025
2026 /*
2027 * Enter on the PV list if part of our managed memory. Note that we
2028 * raise IPL while manipulating pv_table since pmap_enter can be
2029 * called at interrupt time.
2030 */
2031 if (pmap_initialized &&
2032 (m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0) {
2033 pmap_insert_entry(pmap, va, m);
2034 pa |= PG_MANAGED;
2035 }
2036
2037 /*
2038 * Increment counters
2039 */
2040 if (wired)
2041 pmap->pm_stats.wired_count++;
2042
2043 validate:
2044 /*
2045 * Now validate mapping with desired protection/wiring.
2046 */
2047 newpte = (pt_entry_t)(pa | PG_V);
2048 if ((prot & VM_PROT_WRITE) != 0)
2049 newpte |= PG_RW;
2050 if (wired)
2051 newpte |= PG_W;
2052 if (va < VM_MAXUSER_ADDRESS)
2053 newpte |= PG_U;
2054 if (pmap == kernel_pmap)
2055 newpte |= pgeflag;
2056
2057 /*
2058 * if the mapping or permission bits are different, we need
2059 * to update the pte.
2060 */
2061 if ((origpte & ~(PG_M|PG_A)) != newpte) {
2062 if (origpte & PG_MANAGED) {
2063 origpte = pte_load_store(pte, newpte | PG_A);
2064 if ((origpte & PG_M) && pmap_track_modified(va))
2065 vm_page_dirty(om);
2066 if (origpte & PG_A)
2067 vm_page_flag_set(om, PG_REFERENCED);
2068 } else
2069 pte_store(pte, newpte | PG_A);
2070 if (origpte) {
2071 pmap_invalidate_page(pmap, va);
2072 }
2073 }
2074 sched_unpin();
2075 vm_page_unlock_queues();
2076 PMAP_UNLOCK(pmap);
2077 }
2078
2079 /*
2080 * this code makes some *MAJOR* assumptions:
2081 * 1. Current pmap & pmap exists.
2082 * 2. Not wired.
2083 * 3. Read access.
2084 * 4. No page table pages.
2085 * 5. Tlbflush is deferred to calling procedure.
2086 * 6. Page IS managed.
2087 * but is *MUCH* faster than pmap_enter...
2088 */
2089
2090 vm_page_t
2091 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
2092 {
2093 pt_entry_t *pte;
2094 vm_paddr_t pa;
2095
2096 vm_page_lock_queues();
2097 PMAP_LOCK(pmap);
2098
2099 /*
2100 * In the case that a page table page is not
2101 * resident, we are creating it here.
2102 */
2103 if (va < VM_MAXUSER_ADDRESS) {
2104 unsigned ptepindex;
2105 pd_entry_t ptepa;
2106
2107 /*
2108 * Calculate pagetable page index
2109 */
2110 ptepindex = va >> PDRSHIFT;
2111 if (mpte && (mpte->pindex == ptepindex)) {
2112 mpte->wire_count++;
2113 } else {
2114 retry:
2115 /*
2116 * Get the page directory entry
2117 */
2118 ptepa = pmap->pm_pdir[ptepindex];
2119
2120 /*
2121 * If the page table page is mapped, we just increment
2122 * the hold count, and activate it.
2123 */
2124 if (ptepa) {
2125 if (ptepa & PG_PS)
2126 panic("pmap_enter_quick: unexpected mapping into 4MB page");
2127 mpte = PHYS_TO_VM_PAGE(ptepa);
2128 mpte->wire_count++;
2129 } else {
2130 mpte = _pmap_allocpte(pmap, ptepindex,
2131 M_WAITOK);
2132 if (mpte == NULL)
2133 goto retry;
2134 }
2135 }
2136 } else {
2137 mpte = NULL;
2138 }
2139
2140 /*
2141 * This call to vtopte makes the assumption that we are
2142 * entering the page into the current pmap. In order to support
2143 * quick entry into any pmap, one would likely use pmap_pte_quick.
2144 * But that isn't as quick as vtopte.
2145 */
2146 pte = vtopte(va);
2147 if (*pte) {
2148 if (mpte != NULL) {
2149 pmap_unwire_pte_hold(pmap, mpte);
2150 mpte = NULL;
2151 }
2152 goto out;
2153 }
2154
2155 /*
2156 * Enter on the PV list if part of our managed memory. Note that we
2157 * raise IPL while manipulating pv_table since pmap_enter can be
2158 * called at interrupt time.
2159 */
2160 if ((m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0)
2161 pmap_insert_entry(pmap, va, m);
2162
2163 /*
2164 * Increment counters
2165 */
2166 pmap->pm_stats.resident_count++;
2167
2168 pa = VM_PAGE_TO_PHYS(m);
2169
2170 /*
2171 * Now validate mapping with RO protection
2172 */
2173 if (m->flags & (PG_FICTITIOUS|PG_UNMANAGED))
2174 pte_store(pte, pa | PG_V | PG_U);
2175 else
2176 pte_store(pte, pa | PG_V | PG_U | PG_MANAGED);
2177 out:
2178 vm_page_unlock_queues();
2179 PMAP_UNLOCK(pmap);
2180 return mpte;
2181 }
2182
2183 /*
2184 * Make a temporary mapping for a physical address. This is only intended
2185 * to be used for panic dumps.
2186 */
2187 void *
2188 pmap_kenter_temporary(vm_paddr_t pa, int i)
2189 {
2190 vm_offset_t va;
2191
2192 va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE);
2193 pmap_kenter(va, pa);
2194 #ifndef I386_CPU
2195 invlpg(va);
2196 #else
2197 invltlb();
2198 #endif
2199 return ((void *)crashdumpmap);
2200 }
2201
2202 /*
2203 * This code maps large physical mmap regions into the
2204 * processor address space. Note that some shortcuts
2205 * are taken, but the code works.
2206 */
2207 void
2208 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
2209 vm_object_t object, vm_pindex_t pindex,
2210 vm_size_t size)
2211 {
2212 vm_page_t p;
2213
2214 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
2215 KASSERT(object->type == OBJT_DEVICE,
2216 ("pmap_object_init_pt: non-device object"));
2217 if (pseflag &&
2218 ((addr & (NBPDR - 1)) == 0) && ((size & (NBPDR - 1)) == 0)) {
2219 int i;
2220 vm_page_t m[1];
2221 unsigned int ptepindex;
2222 int npdes;
2223 pd_entry_t ptepa;
2224
2225 PMAP_LOCK(pmap);
2226 if (pmap->pm_pdir[ptepindex = (addr >> PDRSHIFT)])
2227 goto out;
2228 PMAP_UNLOCK(pmap);
2229 retry:
2230 p = vm_page_lookup(object, pindex);
2231 if (p != NULL) {
2232 vm_page_lock_queues();
2233 if (vm_page_sleep_if_busy(p, FALSE, "init4p"))
2234 goto retry;
2235 } else {
2236 p = vm_page_alloc(object, pindex, VM_ALLOC_NORMAL);
2237 if (p == NULL)
2238 return;
2239 m[0] = p;
2240
2241 if (vm_pager_get_pages(object, m, 1, 0) != VM_PAGER_OK) {
2242 vm_page_lock_queues();
2243 vm_page_free(p);
2244 vm_page_unlock_queues();
2245 return;
2246 }
2247
2248 p = vm_page_lookup(object, pindex);
2249 vm_page_lock_queues();
2250 vm_page_wakeup(p);
2251 }
2252 vm_page_unlock_queues();
2253
2254 ptepa = VM_PAGE_TO_PHYS(p);
2255 if (ptepa & (NBPDR - 1))
2256 return;
2257
2258 p->valid = VM_PAGE_BITS_ALL;
2259
2260 PMAP_LOCK(pmap);
2261 pmap->pm_stats.resident_count += size >> PAGE_SHIFT;
2262 npdes = size >> PDRSHIFT;
2263 for(i = 0; i < npdes; i++) {
2264 pde_store(&pmap->pm_pdir[ptepindex],
2265 ptepa | PG_U | PG_RW | PG_V | PG_PS);
2266 ptepa += NBPDR;
2267 ptepindex += 1;
2268 }
2269 pmap_invalidate_all(pmap);
2270 out:
2271 PMAP_UNLOCK(pmap);
2272 }
2273 }
2274
2275 /*
2276 * Routine: pmap_change_wiring
2277 * Function: Change the wiring attribute for a map/virtual-address
2278 * pair.
2279 * In/out conditions:
2280 * The mapping must already exist in the pmap.
2281 */
2282 void
2283 pmap_change_wiring(pmap, va, wired)
2284 register pmap_t pmap;
2285 vm_offset_t va;
2286 boolean_t wired;
2287 {
2288 register pt_entry_t *pte;
2289
2290 PMAP_LOCK(pmap);
2291 pte = pmap_pte(pmap, va);
2292
2293 if (wired && !pmap_pte_w(pte))
2294 pmap->pm_stats.wired_count++;
2295 else if (!wired && pmap_pte_w(pte))
2296 pmap->pm_stats.wired_count--;
2297
2298 /*
2299 * Wiring is not a hardware characteristic so there is no need to
2300 * invalidate TLB.
2301 */
2302 pmap_pte_set_w(pte, wired);
2303 pmap_pte_release(pte);
2304 PMAP_UNLOCK(pmap);
2305 }
2306
2307
2308
2309 /*
2310 * Copy the range specified by src_addr/len
2311 * from the source map to the range dst_addr/len
2312 * in the destination map.
2313 *
2314 * This routine is only advisory and need not do anything.
2315 */
2316
2317 void
2318 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
2319 vm_offset_t src_addr)
2320 {
2321 vm_offset_t addr;
2322 vm_offset_t end_addr = src_addr + len;
2323 vm_offset_t pdnxt;
2324 vm_page_t m;
2325
2326 if (dst_addr != src_addr)
2327 return;
2328
2329 if (!pmap_is_current(src_pmap))
2330 return;
2331
2332 vm_page_lock_queues();
2333 if (dst_pmap < src_pmap) {
2334 PMAP_LOCK(dst_pmap);
2335 PMAP_LOCK(src_pmap);
2336 } else {
2337 PMAP_LOCK(src_pmap);
2338 PMAP_LOCK(dst_pmap);
2339 }
2340 sched_pin();
2341 for (addr = src_addr; addr < end_addr; addr = pdnxt) {
2342 pt_entry_t *src_pte, *dst_pte;
2343 vm_page_t dstmpte, srcmpte;
2344 pd_entry_t srcptepaddr;
2345 unsigned ptepindex;
2346
2347 if (addr >= UPT_MIN_ADDRESS)
2348 panic("pmap_copy: invalid to pmap_copy page tables");
2349
2350 /*
2351 * Don't let optional prefaulting of pages make us go
2352 * way below the low water mark of free pages or way
2353 * above high water mark of used pv entries.
2354 */
2355 if (cnt.v_free_count < cnt.v_free_reserved ||
2356 pv_entry_count > pv_entry_high_water)
2357 break;
2358
2359 pdnxt = (addr + NBPDR) & ~PDRMASK;
2360 ptepindex = addr >> PDRSHIFT;
2361
2362 srcptepaddr = src_pmap->pm_pdir[ptepindex];
2363 if (srcptepaddr == 0)
2364 continue;
2365
2366 if (srcptepaddr & PG_PS) {
2367 if (dst_pmap->pm_pdir[ptepindex] == 0) {
2368 dst_pmap->pm_pdir[ptepindex] = srcptepaddr;
2369 dst_pmap->pm_stats.resident_count +=
2370 NBPDR / PAGE_SIZE;
2371 }
2372 continue;
2373 }
2374
2375 srcmpte = PHYS_TO_VM_PAGE(srcptepaddr);
2376 if (srcmpte->wire_count == 0)
2377 panic("pmap_copy: source page table page is unused");
2378
2379 if (pdnxt > end_addr)
2380 pdnxt = end_addr;
2381
2382 src_pte = vtopte(addr);
2383 while (addr < pdnxt) {
2384 pt_entry_t ptetemp;
2385 ptetemp = *src_pte;
2386 /*
2387 * we only virtual copy managed pages
2388 */
2389 if ((ptetemp & PG_MANAGED) != 0) {
2390 /*
2391 * We have to check after allocpte for the
2392 * pte still being around... allocpte can
2393 * block.
2394 */
2395 dstmpte = pmap_allocpte(dst_pmap, addr,
2396 M_NOWAIT);
2397 if (dstmpte == NULL)
2398 break;
2399 dst_pte = pmap_pte_quick(dst_pmap, addr);
2400 if (*dst_pte == 0) {
2401 /*
2402 * Clear the modified and
2403 * accessed (referenced) bits
2404 * during the copy.
2405 */
2406 m = PHYS_TO_VM_PAGE(ptetemp);
2407 *dst_pte = ptetemp & ~(PG_M | PG_A);
2408 dst_pmap->pm_stats.resident_count++;
2409 pmap_insert_entry(dst_pmap, addr, m);
2410 } else
2411 pmap_unwire_pte_hold(dst_pmap, dstmpte);
2412 if (dstmpte->wire_count >= srcmpte->wire_count)
2413 break;
2414 }
2415 addr += PAGE_SIZE;
2416 src_pte++;
2417 }
2418 }
2419 sched_unpin();
2420 vm_page_unlock_queues();
2421 PMAP_UNLOCK(src_pmap);
2422 PMAP_UNLOCK(dst_pmap);
2423 }
2424
2425 static __inline void
2426 pagezero(void *page)
2427 {
2428 #if defined(I686_CPU)
2429 if (cpu_class == CPUCLASS_686) {
2430 #if defined(CPU_ENABLE_SSE)
2431 if (cpu_feature & CPUID_SSE2)
2432 sse2_pagezero(page);
2433 else
2434 #endif
2435 i686_pagezero(page);
2436 } else
2437 #endif
2438 bzero(page, PAGE_SIZE);
2439 }
2440
2441 /*
2442 * pmap_zero_page zeros the specified hardware page by mapping
2443 * the page into KVM and using bzero to clear its contents.
2444 */
2445 void
2446 pmap_zero_page(vm_page_t m)
2447 {
2448 struct sysmaps *sysmaps;
2449
2450 sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)];
2451 mtx_lock(&sysmaps->lock);
2452 if (*sysmaps->CMAP2)
2453 panic("pmap_zero_page: CMAP2 busy");
2454 sched_pin();
2455 *sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M;
2456 invlcaddr(sysmaps->CADDR2);
2457 pagezero(sysmaps->CADDR2);
2458 *sysmaps->CMAP2 = 0;
2459 sched_unpin();
2460 mtx_unlock(&sysmaps->lock);
2461 }
2462
2463 /*
2464 * pmap_zero_page_area zeros the specified hardware page by mapping
2465 * the page into KVM and using bzero to clear its contents.
2466 *
2467 * off and size may not cover an area beyond a single hardware page.
2468 */
2469 void
2470 pmap_zero_page_area(vm_page_t m, int off, int size)
2471 {
2472 struct sysmaps *sysmaps;
2473
2474 sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)];
2475 mtx_lock(&sysmaps->lock);
2476 if (*sysmaps->CMAP2)
2477 panic("pmap_zero_page: CMAP2 busy");
2478 sched_pin();
2479 *sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M;
2480 invlcaddr(sysmaps->CADDR2);
2481 if (off == 0 && size == PAGE_SIZE)
2482 pagezero(sysmaps->CADDR2);
2483 else
2484 bzero((char *)sysmaps->CADDR2 + off, size);
2485 *sysmaps->CMAP2 = 0;
2486 sched_unpin();
2487 mtx_unlock(&sysmaps->lock);
2488 }
2489
2490 /*
2491 * pmap_zero_page_idle zeros the specified hardware page by mapping
2492 * the page into KVM and using bzero to clear its contents. This
2493 * is intended to be called from the vm_pagezero process only and
2494 * outside of Giant.
2495 */
2496 void
2497 pmap_zero_page_idle(vm_page_t m)
2498 {
2499
2500 if (*CMAP3)
2501 panic("pmap_zero_page: CMAP3 busy");
2502 sched_pin();
2503 *CMAP3 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M;
2504 invlcaddr(CADDR3);
2505 pagezero(CADDR3);
2506 *CMAP3 = 0;
2507 sched_unpin();
2508 }
2509
2510 /*
2511 * pmap_copy_page copies the specified (machine independent)
2512 * page by mapping the page into virtual memory and using
2513 * bcopy to copy the page, one machine dependent page at a
2514 * time.
2515 */
2516 void
2517 pmap_copy_page(vm_page_t src, vm_page_t dst)
2518 {
2519 struct sysmaps *sysmaps;
2520
2521 sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)];
2522 mtx_lock(&sysmaps->lock);
2523 if (*sysmaps->CMAP1)
2524 panic("pmap_copy_page: CMAP1 busy");
2525 if (*sysmaps->CMAP2)
2526 panic("pmap_copy_page: CMAP2 busy");
2527 sched_pin();
2528 #ifdef I386_CPU
2529 invltlb();
2530 #else
2531 invlpg((u_int)sysmaps->CADDR1);
2532 invlpg((u_int)sysmaps->CADDR2);
2533 #endif
2534 *sysmaps->CMAP1 = PG_V | VM_PAGE_TO_PHYS(src) | PG_A;
2535 *sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(dst) | PG_A | PG_M;
2536 bcopy(sysmaps->CADDR1, sysmaps->CADDR2, PAGE_SIZE);
2537 *sysmaps->CMAP1 = 0;
2538 *sysmaps->CMAP2 = 0;
2539 sched_unpin();
2540 mtx_unlock(&sysmaps->lock);
2541 }
2542
2543 /*
2544 * Returns true if the pmap's pv is one of the first
2545 * 16 pvs linked to from this page. This count may
2546 * be changed upwards or downwards in the future; it
2547 * is only necessary that true be returned for a small
2548 * subset of pmaps for proper page aging.
2549 */
2550 boolean_t
2551 pmap_page_exists_quick(pmap, m)
2552 pmap_t pmap;
2553 vm_page_t m;
2554 {
2555 pv_entry_t pv;
2556 int loops = 0;
2557
2558 if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
2559 return FALSE;
2560
2561 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2562 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2563 if (pv->pv_pmap == pmap) {
2564 return TRUE;
2565 }
2566 loops++;
2567 if (loops >= 16)
2568 break;
2569 }
2570 return (FALSE);
2571 }
2572
2573 #define PMAP_REMOVE_PAGES_CURPROC_ONLY
2574 /*
2575 * Remove all pages from specified address space
2576 * this aids process exit speeds. Also, this code
2577 * is special cased for current process only, but
2578 * can have the more generic (and slightly slower)
2579 * mode enabled. This is much faster than pmap_remove
2580 * in the case of running down an entire address space.
2581 */
2582 void
2583 pmap_remove_pages(pmap, sva, eva)
2584 pmap_t pmap;
2585 vm_offset_t sva, eva;
2586 {
2587 pt_entry_t *pte, tpte;
2588 vm_page_t m;
2589 pv_entry_t pv, npv;
2590
2591 #ifdef PMAP_REMOVE_PAGES_CURPROC_ONLY
2592 if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) {
2593 printf("warning: pmap_remove_pages called with non-current pmap\n");
2594 return;
2595 }
2596 #endif
2597 vm_page_lock_queues();
2598 PMAP_LOCK(pmap);
2599 sched_pin();
2600 for (pv = TAILQ_FIRST(&pmap->pm_pvlist); pv; pv = npv) {
2601
2602 if (pv->pv_va >= eva || pv->pv_va < sva) {
2603 npv = TAILQ_NEXT(pv, pv_plist);
2604 continue;
2605 }
2606
2607 #ifdef PMAP_REMOVE_PAGES_CURPROC_ONLY
2608 pte = vtopte(pv->pv_va);
2609 #else
2610 pte = pmap_pte_quick(pmap, pv->pv_va);
2611 #endif
2612 tpte = *pte;
2613
2614 if (tpte == 0) {
2615 printf("TPTE at %p IS ZERO @ VA %08x\n",
2616 pte, pv->pv_va);
2617 panic("bad pte");
2618 }
2619
2620 /*
2621 * We cannot remove wired pages from a process' mapping at this time
2622 */
2623 if (tpte & PG_W) {
2624 npv = TAILQ_NEXT(pv, pv_plist);
2625 continue;
2626 }
2627
2628 m = PHYS_TO_VM_PAGE(tpte);
2629 KASSERT(m->phys_addr == (tpte & PG_FRAME),
2630 ("vm_page_t %p phys_addr mismatch %016jx %016jx",
2631 m, (uintmax_t)m->phys_addr, (uintmax_t)tpte));
2632
2633 KASSERT(m < &vm_page_array[vm_page_array_size],
2634 ("pmap_remove_pages: bad tpte %#jx", (uintmax_t)tpte));
2635
2636 pmap->pm_stats.resident_count--;
2637
2638 pte_clear(pte);
2639
2640 /*
2641 * Update the vm_page_t clean and reference bits.
2642 */
2643 if (tpte & PG_M) {
2644 vm_page_dirty(m);
2645 }
2646
2647 npv = TAILQ_NEXT(pv, pv_plist);
2648 TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
2649
2650 m->md.pv_list_count--;
2651 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
2652 if (TAILQ_EMPTY(&m->md.pv_list))
2653 vm_page_flag_clear(m, PG_WRITEABLE);
2654
2655 pmap_unuse_pt(pmap, pv->pv_va);
2656 free_pv_entry(pv);
2657 }
2658 sched_unpin();
2659 pmap_invalidate_all(pmap);
2660 PMAP_UNLOCK(pmap);
2661 vm_page_unlock_queues();
2662 }
2663
2664 /*
2665 * pmap_is_modified:
2666 *
2667 * Return whether or not the specified physical page was modified
2668 * in any physical maps.
2669 */
2670 boolean_t
2671 pmap_is_modified(vm_page_t m)
2672 {
2673 pv_entry_t pv;
2674 pt_entry_t *pte;
2675 boolean_t rv;
2676
2677 rv = FALSE;
2678 if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
2679 return (rv);
2680
2681 sched_pin();
2682 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2683 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2684 /*
2685 * if the bit being tested is the modified bit, then
2686 * mark clean_map and ptes as never
2687 * modified.
2688 */
2689 if (!pmap_track_modified(pv->pv_va))
2690 continue;
2691 #if defined(PMAP_DIAGNOSTIC)
2692 if (!pv->pv_pmap) {
2693 printf("Null pmap (tb) at va: 0x%x\n", pv->pv_va);
2694 continue;
2695 }
2696 #endif
2697 PMAP_LOCK(pv->pv_pmap);
2698 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
2699 rv = (*pte & PG_M) != 0;
2700 PMAP_UNLOCK(pv->pv_pmap);
2701 if (rv)
2702 break;
2703 }
2704 sched_unpin();
2705 return (rv);
2706 }
2707
2708 /*
2709 * pmap_is_prefaultable:
2710 *
2711 * Return whether or not the specified virtual address is elgible
2712 * for prefault.
2713 */
2714 boolean_t
2715 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
2716 {
2717 pt_entry_t *pte;
2718 boolean_t rv;
2719
2720 rv = FALSE;
2721 PMAP_LOCK(pmap);
2722 if (*pmap_pde(pmap, addr)) {
2723 pte = vtopte(addr);
2724 rv = *pte == 0;
2725 }
2726 PMAP_UNLOCK(pmap);
2727 return (rv);
2728 }
2729
2730 /*
2731 * Clear the given bit in each of the given page's ptes. The bit is
2732 * expressed as a 32-bit mask. Consequently, if the pte is 64 bits in
2733 * size, only a bit within the least significant 32 can be cleared.
2734 */
2735 static __inline void
2736 pmap_clear_ptes(vm_page_t m, int bit)
2737 {
2738 register pv_entry_t pv;
2739 pt_entry_t pbits, *pte;
2740
2741 if (!pmap_initialized || (m->flags & PG_FICTITIOUS) ||
2742 (bit == PG_RW && (m->flags & PG_WRITEABLE) == 0))
2743 return;
2744
2745 sched_pin();
2746 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2747 /*
2748 * Loop over all current mappings setting/clearing as appropos If
2749 * setting RO do we need to clear the VAC?
2750 */
2751 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2752 /*
2753 * don't write protect pager mappings
2754 */
2755 if (bit == PG_RW) {
2756 if (!pmap_track_modified(pv->pv_va))
2757 continue;
2758 }
2759
2760 #if defined(PMAP_DIAGNOSTIC)
2761 if (!pv->pv_pmap) {
2762 printf("Null pmap (cb) at va: 0x%x\n", pv->pv_va);
2763 continue;
2764 }
2765 #endif
2766
2767 PMAP_LOCK(pv->pv_pmap);
2768 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
2769 retry:
2770 pbits = *pte;
2771 if (pbits & bit) {
2772 if (bit == PG_RW) {
2773 /*
2774 * Regardless of whether a pte is 32 or 64 bits
2775 * in size, PG_RW and PG_M are among the least
2776 * significant 32 bits.
2777 */
2778 if (!atomic_cmpset_int((u_int *)pte, pbits,
2779 pbits & ~(PG_RW | PG_M)))
2780 goto retry;
2781 if (pbits & PG_M) {
2782 vm_page_dirty(m);
2783 }
2784 } else {
2785 atomic_clear_int((u_int *)pte, bit);
2786 }
2787 pmap_invalidate_page(pv->pv_pmap, pv->pv_va);
2788 }
2789 PMAP_UNLOCK(pv->pv_pmap);
2790 }
2791 if (bit == PG_RW)
2792 vm_page_flag_clear(m, PG_WRITEABLE);
2793 sched_unpin();
2794 }
2795
2796 /*
2797 * pmap_page_protect:
2798 *
2799 * Lower the permission for all mappings to a given page.
2800 */
2801 void
2802 pmap_page_protect(vm_page_t m, vm_prot_t prot)
2803 {
2804 if ((prot & VM_PROT_WRITE) == 0) {
2805 if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) {
2806 pmap_clear_ptes(m, PG_RW);
2807 } else {
2808 pmap_remove_all(m);
2809 }
2810 }
2811 }
2812
2813 /*
2814 * pmap_ts_referenced:
2815 *
2816 * Return a count of reference bits for a page, clearing those bits.
2817 * It is not necessary for every reference bit to be cleared, but it
2818 * is necessary that 0 only be returned when there are truly no
2819 * reference bits set.
2820 *
2821 * XXX: The exact number of bits to check and clear is a matter that
2822 * should be tested and standardized at some point in the future for
2823 * optimal aging of shared pages.
2824 */
2825 int
2826 pmap_ts_referenced(vm_page_t m)
2827 {
2828 register pv_entry_t pv, pvf, pvn;
2829 pt_entry_t *pte;
2830 pt_entry_t v;
2831 int rtval = 0;
2832
2833 if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
2834 return (rtval);
2835
2836 sched_pin();
2837 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2838 if ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
2839
2840 pvf = pv;
2841
2842 do {
2843 pvn = TAILQ_NEXT(pv, pv_list);
2844
2845 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
2846
2847 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
2848
2849 if (!pmap_track_modified(pv->pv_va))
2850 continue;
2851
2852 PMAP_LOCK(pv->pv_pmap);
2853 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
2854
2855 if (pte && ((v = pte_load(pte)) & PG_A) != 0) {
2856 atomic_clear_int((u_int *)pte, PG_A);
2857 pmap_invalidate_page(pv->pv_pmap, pv->pv_va);
2858
2859 rtval++;
2860 if (rtval > 4) {
2861 PMAP_UNLOCK(pv->pv_pmap);
2862 break;
2863 }
2864 }
2865 PMAP_UNLOCK(pv->pv_pmap);
2866 } while ((pv = pvn) != NULL && pv != pvf);
2867 }
2868 sched_unpin();
2869
2870 return (rtval);
2871 }
2872
2873 /*
2874 * Clear the modify bits on the specified physical page.
2875 */
2876 void
2877 pmap_clear_modify(vm_page_t m)
2878 {
2879 pmap_clear_ptes(m, PG_M);
2880 }
2881
2882 /*
2883 * pmap_clear_reference:
2884 *
2885 * Clear the reference bit on the specified physical page.
2886 */
2887 void
2888 pmap_clear_reference(vm_page_t m)
2889 {
2890 pmap_clear_ptes(m, PG_A);
2891 }
2892
2893 /*
2894 * Miscellaneous support routines follow
2895 */
2896
2897 /*
2898 * Map a set of physical memory pages into the kernel virtual
2899 * address space. Return a pointer to where it is mapped. This
2900 * routine is intended to be used for mapping device memory,
2901 * NOT real memory.
2902 */
2903 void *
2904 pmap_mapdev(pa, size)
2905 vm_paddr_t pa;
2906 vm_size_t size;
2907 {
2908 vm_offset_t va, tmpva, offset;
2909
2910 offset = pa & PAGE_MASK;
2911 size = roundup(offset + size, PAGE_SIZE);
2912 pa = pa & PG_FRAME;
2913
2914 if (pa < KERNLOAD && pa + size <= KERNLOAD)
2915 va = KERNBASE + pa;
2916 else
2917 va = kmem_alloc_nofault(kernel_map, size);
2918 if (!va)
2919 panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
2920
2921 for (tmpva = va; size > 0; ) {
2922 pmap_kenter(tmpva, pa);
2923 size -= PAGE_SIZE;
2924 tmpva += PAGE_SIZE;
2925 pa += PAGE_SIZE;
2926 }
2927 pmap_invalidate_range(kernel_pmap, va, tmpva);
2928 return ((void *)(va + offset));
2929 }
2930
2931 void
2932 pmap_unmapdev(va, size)
2933 vm_offset_t va;
2934 vm_size_t size;
2935 {
2936 vm_offset_t base, offset, tmpva;
2937
2938 if (va >= KERNBASE && va + size <= KERNBASE + KERNLOAD)
2939 return;
2940 base = va & PG_FRAME;
2941 offset = va & PAGE_MASK;
2942 size = roundup(offset + size, PAGE_SIZE);
2943 for (tmpva = base; tmpva < (base + size); tmpva += PAGE_SIZE)
2944 pmap_kremove(tmpva);
2945 pmap_invalidate_range(kernel_pmap, va, tmpva);
2946 kmem_free(kernel_map, base, size);
2947 }
2948
2949 /*
2950 * perform the pmap work for mincore
2951 */
2952 int
2953 pmap_mincore(pmap, addr)
2954 pmap_t pmap;
2955 vm_offset_t addr;
2956 {
2957 pt_entry_t *ptep, pte;
2958 vm_page_t m;
2959 int val = 0;
2960
2961 PMAP_LOCK(pmap);
2962 ptep = pmap_pte(pmap, addr);
2963 pte = (ptep != NULL) ? *ptep : 0;
2964 pmap_pte_release(ptep);
2965 PMAP_UNLOCK(pmap);
2966
2967 if (pte != 0) {
2968 vm_paddr_t pa;
2969
2970 val = MINCORE_INCORE;
2971 if ((pte & PG_MANAGED) == 0)
2972 return val;
2973
2974 pa = pte & PG_FRAME;
2975
2976 m = PHYS_TO_VM_PAGE(pa);
2977
2978 /*
2979 * Modified by us
2980 */
2981 if (pte & PG_M)
2982 val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER;
2983 else {
2984 /*
2985 * Modified by someone else
2986 */
2987 vm_page_lock_queues();
2988 if (m->dirty || pmap_is_modified(m))
2989 val |= MINCORE_MODIFIED_OTHER;
2990 vm_page_unlock_queues();
2991 }
2992 /*
2993 * Referenced by us
2994 */
2995 if (pte & PG_A)
2996 val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER;
2997 else {
2998 /*
2999 * Referenced by someone else
3000 */
3001 vm_page_lock_queues();
3002 if ((m->flags & PG_REFERENCED) ||
3003 pmap_ts_referenced(m)) {
3004 val |= MINCORE_REFERENCED_OTHER;
3005 vm_page_flag_set(m, PG_REFERENCED);
3006 }
3007 vm_page_unlock_queues();
3008 }
3009 }
3010 return val;
3011 }
3012
3013 void
3014 pmap_activate(struct thread *td)
3015 {
3016 struct proc *p = td->td_proc;
3017 pmap_t pmap, oldpmap;
3018 u_int32_t cr3;
3019
3020 critical_enter();
3021 pmap = vmspace_pmap(td->td_proc->p_vmspace);
3022 oldpmap = PCPU_GET(curpmap);
3023 #if defined(SMP)
3024 atomic_clear_int(&oldpmap->pm_active, PCPU_GET(cpumask));
3025 atomic_set_int(&pmap->pm_active, PCPU_GET(cpumask));
3026 #else
3027 oldpmap->pm_active &= ~1;
3028 pmap->pm_active |= 1;
3029 #endif
3030 #ifdef PAE
3031 cr3 = vtophys(pmap->pm_pdpt);
3032 #else
3033 cr3 = vtophys(pmap->pm_pdir);
3034 #endif
3035 /* XXXKSE this is wrong.
3036 * pmap_activate is for the current thread on the current cpu
3037 */
3038 if (p->p_flag & P_SA) {
3039 /* Make sure all other cr3 entries are updated. */
3040 /* what if they are running? XXXKSE (maybe abort them) */
3041 FOREACH_THREAD_IN_PROC(p, td) {
3042 td->td_pcb->pcb_cr3 = cr3;
3043 }
3044 } else {
3045 td->td_pcb->pcb_cr3 = cr3;
3046 }
3047 load_cr3(cr3);
3048 PCPU_SET(curpmap, pmap);
3049 critical_exit();
3050 }
3051
3052 vm_offset_t
3053 pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size)
3054 {
3055
3056 if ((obj == NULL) || (size < NBPDR) || (obj->type != OBJT_DEVICE)) {
3057 return addr;
3058 }
3059
3060 addr = (addr + PDRMASK) & ~PDRMASK;
3061 return addr;
3062 }
3063
3064
3065 #if defined(PMAP_DEBUG)
3066 pmap_pid_dump(int pid)
3067 {
3068 pmap_t pmap;
3069 struct proc *p;
3070 int npte = 0;
3071 int index;
3072
3073 sx_slock(&allproc_lock);
3074 LIST_FOREACH(p, &allproc, p_list) {
3075 if (p->p_pid != pid)
3076 continue;
3077
3078 if (p->p_vmspace) {
3079 int i,j;
3080 index = 0;
3081 pmap = vmspace_pmap(p->p_vmspace);
3082 for (i = 0; i < NPDEPTD; i++) {
3083 pd_entry_t *pde;
3084 pt_entry_t *pte;
3085 vm_offset_t base = i << PDRSHIFT;
3086
3087 pde = &pmap->pm_pdir[i];
3088 if (pde && pmap_pde_v(pde)) {
3089 for (j = 0; j < NPTEPG; j++) {
3090 vm_offset_t va = base + (j << PAGE_SHIFT);
3091 if (va >= (vm_offset_t) VM_MIN_KERNEL_ADDRESS) {
3092 if (index) {
3093 index = 0;
3094 printf("\n");
3095 }
3096 sx_sunlock(&allproc_lock);
3097 return npte;
3098 }
3099 pte = pmap_pte(pmap, va);
3100 if (pte && pmap_pte_v(pte)) {
3101 pt_entry_t pa;
3102 vm_page_t m;
3103 pa = *pte;
3104 m = PHYS_TO_VM_PAGE(pa);
3105 printf("va: 0x%x, pt: 0x%x, h: %d, w: %d, f: 0x%x",
3106 va, pa, m->hold_count, m->wire_count, m->flags);
3107 npte++;
3108 index++;
3109 if (index >= 2) {
3110 index = 0;
3111 printf("\n");
3112 } else {
3113 printf(" ");
3114 }
3115 }
3116 }
3117 }
3118 }
3119 }
3120 }
3121 sx_sunlock(&allproc_lock);
3122 return npte;
3123 }
3124 #endif
3125
3126 #if defined(DEBUG)
3127
3128 static void pads(pmap_t pm);
3129 void pmap_pvdump(vm_offset_t pa);
3130
3131 /* print address space of pmap*/
3132 static void
3133 pads(pm)
3134 pmap_t pm;
3135 {
3136 int i, j;
3137 vm_paddr_t va;
3138 pt_entry_t *ptep;
3139
3140 if (pm == kernel_pmap)
3141 return;
3142 for (i = 0; i < NPDEPTD; i++)
3143 if (pm->pm_pdir[i])
3144 for (j = 0; j < NPTEPG; j++) {
3145 va = (i << PDRSHIFT) + (j << PAGE_SHIFT);
3146 if (pm == kernel_pmap && va < KERNBASE)
3147 continue;
3148 if (pm != kernel_pmap && va > UPT_MAX_ADDRESS)
3149 continue;
3150 ptep = pmap_pte(pm, va);
3151 if (pmap_pte_v(ptep))
3152 printf("%x:%x ", va, *ptep);
3153 };
3154
3155 }
3156
3157 void
3158 pmap_pvdump(pa)
3159 vm_paddr_t pa;
3160 {
3161 pv_entry_t pv;
3162 vm_page_t m;
3163
3164 printf("pa %x", pa);
3165 m = PHYS_TO_VM_PAGE(pa);
3166 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
3167 printf(" -> pmap %p, va %x", (void *)pv->pv_pmap, pv->pv_va);
3168 pads(pv->pv_pmap);
3169 }
3170 printf(" ");
3171 }
3172 #endif
Cache object: eb0d7205ee2135d7a1d8b848dd847adb
|