The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/i386/i386/pmap.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1991 Regents of the University of California.
    3  * All rights reserved.
    4  * Copyright (c) 1994 John S. Dyson
    5  * All rights reserved.
    6  * Copyright (c) 1994 David Greenman
    7  * All rights reserved.
    8  * Copyright (c) 2005-2008 Alan L. Cox <alc@cs.rice.edu>
    9  * All rights reserved.
   10  *
   11  * This code is derived from software contributed to Berkeley by
   12  * the Systems Programming Group of the University of Utah Computer
   13  * Science Department and William Jolitz of UUNET Technologies Inc.
   14  *
   15  * Redistribution and use in source and binary forms, with or without
   16  * modification, are permitted provided that the following conditions
   17  * are met:
   18  * 1. Redistributions of source code must retain the above copyright
   19  *    notice, this list of conditions and the following disclaimer.
   20  * 2. Redistributions in binary form must reproduce the above copyright
   21  *    notice, this list of conditions and the following disclaimer in the
   22  *    documentation and/or other materials provided with the distribution.
   23  * 3. All advertising materials mentioning features or use of this software
   24  *    must display the following acknowledgement:
   25  *      This product includes software developed by the University of
   26  *      California, Berkeley and its contributors.
   27  * 4. Neither the name of the University nor the names of its contributors
   28  *    may be used to endorse or promote products derived from this software
   29  *    without specific prior written permission.
   30  *
   31  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   32  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   33  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   34  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   35  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   36  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   37  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   38  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   39  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   40  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   41  * SUCH DAMAGE.
   42  *
   43  *      from:   @(#)pmap.c      7.7 (Berkeley)  5/12/91
   44  */
   45 /*-
   46  * Copyright (c) 2003 Networks Associates Technology, Inc.
   47  * All rights reserved.
   48  *
   49  * This software was developed for the FreeBSD Project by Jake Burkholder,
   50  * Safeport Network Services, and Network Associates Laboratories, the
   51  * Security Research Division of Network Associates, Inc. under
   52  * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA
   53  * CHATS research program.
   54  *
   55  * Redistribution and use in source and binary forms, with or without
   56  * modification, are permitted provided that the following conditions
   57  * are met:
   58  * 1. Redistributions of source code must retain the above copyright
   59  *    notice, this list of conditions and the following disclaimer.
   60  * 2. Redistributions in binary form must reproduce the above copyright
   61  *    notice, this list of conditions and the following disclaimer in the
   62  *    documentation and/or other materials provided with the distribution.
   63  *
   64  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   65  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   66  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   67  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   68  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   69  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   70  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   71  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   72  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   73  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   74  * SUCH DAMAGE.
   75  */
   76 
   77 #include <sys/cdefs.h>
   78 __FBSDID("$FreeBSD: releng/8.0/sys/i386/i386/pmap.c 198991 2009-11-06 17:09:04Z attilio $");
   79 
   80 /*
   81  *      Manages physical address maps.
   82  *
   83  *      In addition to hardware address maps, this
   84  *      module is called upon to provide software-use-only
   85  *      maps which may or may not be stored in the same
   86  *      form as hardware maps.  These pseudo-maps are
   87  *      used to store intermediate results from copy
   88  *      operations to and from address spaces.
   89  *
   90  *      Since the information managed by this module is
   91  *      also stored by the logical address mapping module,
   92  *      this module may throw away valid virtual-to-physical
   93  *      mappings at almost any time.  However, invalidations
   94  *      of virtual-to-physical mappings must be done as
   95  *      requested.
   96  *
   97  *      In order to cope with hardware architectures which
   98  *      make virtual-to-physical map invalidates expensive,
   99  *      this module may delay invalidate or reduced protection
  100  *      operations until such time as they are actually
  101  *      necessary.  This module is given full information as
  102  *      to which processors are currently using which maps,
  103  *      and to when physical maps must be made correct.
  104  */
  105 
  106 #include "opt_cpu.h"
  107 #include "opt_pmap.h"
  108 #include "opt_msgbuf.h"
  109 #include "opt_smp.h"
  110 #include "opt_xbox.h"
  111 
  112 #include <sys/param.h>
  113 #include <sys/systm.h>
  114 #include <sys/kernel.h>
  115 #include <sys/ktr.h>
  116 #include <sys/lock.h>
  117 #include <sys/malloc.h>
  118 #include <sys/mman.h>
  119 #include <sys/msgbuf.h>
  120 #include <sys/mutex.h>
  121 #include <sys/proc.h>
  122 #include <sys/sf_buf.h>
  123 #include <sys/sx.h>
  124 #include <sys/vmmeter.h>
  125 #include <sys/sched.h>
  126 #include <sys/sysctl.h>
  127 #ifdef SMP
  128 #include <sys/smp.h>
  129 #endif
  130 
  131 #include <vm/vm.h>
  132 #include <vm/vm_param.h>
  133 #include <vm/vm_kern.h>
  134 #include <vm/vm_page.h>
  135 #include <vm/vm_map.h>
  136 #include <vm/vm_object.h>
  137 #include <vm/vm_extern.h>
  138 #include <vm/vm_pageout.h>
  139 #include <vm/vm_pager.h>
  140 #include <vm/vm_reserv.h>
  141 #include <vm/uma.h>
  142 
  143 #include <machine/cpu.h>
  144 #include <machine/cputypes.h>
  145 #include <machine/md_var.h>
  146 #include <machine/pcb.h>
  147 #include <machine/specialreg.h>
  148 #ifdef SMP
  149 #include <machine/smp.h>
  150 #endif
  151 
  152 #ifdef XBOX
  153 #include <machine/xbox.h>
  154 #endif
  155 
  156 #if !defined(CPU_DISABLE_SSE) && defined(I686_CPU)
  157 #define CPU_ENABLE_SSE
  158 #endif
  159 
  160 #ifndef PMAP_SHPGPERPROC
  161 #define PMAP_SHPGPERPROC 200
  162 #endif
  163 
  164 #if !defined(DIAGNOSTIC)
  165 #define PMAP_INLINE     __gnu89_inline
  166 #else
  167 #define PMAP_INLINE
  168 #endif
  169 
  170 #define PV_STATS
  171 #ifdef PV_STATS
  172 #define PV_STAT(x)      do { x ; } while (0)
  173 #else
  174 #define PV_STAT(x)      do { } while (0)
  175 #endif
  176 
  177 #define pa_index(pa)    ((pa) >> PDRSHIFT)
  178 #define pa_to_pvh(pa)   (&pv_table[pa_index(pa)])
  179 
  180 /*
  181  * Get PDEs and PTEs for user/kernel address space
  182  */
  183 #define pmap_pde(m, v)  (&((m)->pm_pdir[(vm_offset_t)(v) >> PDRSHIFT]))
  184 #define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDRSHIFT])
  185 
  186 #define pmap_pde_v(pte)         ((*(int *)pte & PG_V) != 0)
  187 #define pmap_pte_w(pte)         ((*(int *)pte & PG_W) != 0)
  188 #define pmap_pte_m(pte)         ((*(int *)pte & PG_M) != 0)
  189 #define pmap_pte_u(pte)         ((*(int *)pte & PG_A) != 0)
  190 #define pmap_pte_v(pte)         ((*(int *)pte & PG_V) != 0)
  191 
  192 #define pmap_pte_set_w(pte, v)  ((v) ? atomic_set_int((u_int *)(pte), PG_W) : \
  193     atomic_clear_int((u_int *)(pte), PG_W))
  194 #define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v)))
  195 
  196 struct pmap kernel_pmap_store;
  197 LIST_HEAD(pmaplist, pmap);
  198 static struct pmaplist allpmaps;
  199 static struct mtx allpmaps_lock;
  200 
  201 vm_offset_t virtual_avail;      /* VA of first avail page (after kernel bss) */
  202 vm_offset_t virtual_end;        /* VA of last avail page (end of kernel AS) */
  203 int pgeflag = 0;                /* PG_G or-in */
  204 int pseflag = 0;                /* PG_PS or-in */
  205 
  206 static int nkpt;
  207 vm_offset_t kernel_vm_end;
  208 extern u_int32_t KERNend;
  209 
  210 #ifdef PAE
  211 pt_entry_t pg_nx;
  212 static uma_zone_t pdptzone;
  213 #endif
  214 
  215 static int pat_works;                   /* Is page attribute table sane? */
  216 
  217 SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters");
  218 
  219 static int pg_ps_enabled;
  220 SYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLFLAG_RD, &pg_ps_enabled, 0,
  221     "Are large page mappings enabled?");
  222 
  223 /*
  224  * Data for the pv entry allocation mechanism
  225  */
  226 static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
  227 static struct md_page *pv_table;
  228 static int shpgperproc = PMAP_SHPGPERPROC;
  229 
  230 struct pv_chunk *pv_chunkbase;          /* KVA block for pv_chunks */
  231 int pv_maxchunks;                       /* How many chunks we have KVA for */
  232 vm_offset_t pv_vafree;                  /* freelist stored in the PTE */
  233 
  234 /*
  235  * All those kernel PT submaps that BSD is so fond of
  236  */
  237 struct sysmaps {
  238         struct  mtx lock;
  239         pt_entry_t *CMAP1;
  240         pt_entry_t *CMAP2;
  241         caddr_t CADDR1;
  242         caddr_t CADDR2;
  243 };
  244 static struct sysmaps sysmaps_pcpu[MAXCPU];
  245 pt_entry_t *CMAP1 = 0;
  246 static pt_entry_t *CMAP3;
  247 caddr_t CADDR1 = 0, ptvmmap = 0;
  248 static caddr_t CADDR3;
  249 struct msgbuf *msgbufp = 0;
  250 
  251 /*
  252  * Crashdump maps.
  253  */
  254 static caddr_t crashdumpmap;
  255 
  256 static pt_entry_t *PMAP1 = 0, *PMAP2;
  257 static pt_entry_t *PADDR1 = 0, *PADDR2;
  258 #ifdef SMP
  259 static int PMAP1cpu;
  260 static int PMAP1changedcpu;
  261 SYSCTL_INT(_debug, OID_AUTO, PMAP1changedcpu, CTLFLAG_RD, 
  262            &PMAP1changedcpu, 0,
  263            "Number of times pmap_pte_quick changed CPU with same PMAP1");
  264 #endif
  265 static int PMAP1changed;
  266 SYSCTL_INT(_debug, OID_AUTO, PMAP1changed, CTLFLAG_RD, 
  267            &PMAP1changed, 0,
  268            "Number of times pmap_pte_quick changed PMAP1");
  269 static int PMAP1unchanged;
  270 SYSCTL_INT(_debug, OID_AUTO, PMAP1unchanged, CTLFLAG_RD, 
  271            &PMAP1unchanged, 0,
  272            "Number of times pmap_pte_quick didn't change PMAP1");
  273 static struct mtx PMAP2mutex;
  274 
  275 static void     free_pv_entry(pmap_t pmap, pv_entry_t pv);
  276 static pv_entry_t get_pv_entry(pmap_t locked_pmap, int try);
  277 static void     pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa);
  278 static boolean_t pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa);
  279 static void     pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa);
  280 static void     pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
  281 static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
  282                     vm_offset_t va);
  283 static int      pmap_pvh_wired_mappings(struct md_page *pvh, int count);
  284 
  285 static boolean_t pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va);
  286 static boolean_t pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m,
  287     vm_prot_t prot);
  288 static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
  289     vm_page_t m, vm_prot_t prot, vm_page_t mpte);
  290 static void pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte);
  291 static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte);
  292 static boolean_t pmap_is_modified_pvh(struct md_page *pvh);
  293 static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode);
  294 static vm_page_t pmap_lookup_pt_page(pmap_t pmap, vm_offset_t va);
  295 static void pmap_pde_attr(pd_entry_t *pde, int cache_bits);
  296 static void pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va);
  297 static boolean_t pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva,
  298     vm_prot_t prot);
  299 static void pmap_pte_attr(pt_entry_t *pte, int cache_bits);
  300 static void pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
  301     vm_page_t *free);
  302 static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva,
  303     vm_page_t *free);
  304 static void pmap_remove_pt_page(pmap_t pmap, vm_page_t mpte);
  305 static void pmap_remove_page(struct pmap *pmap, vm_offset_t va,
  306     vm_page_t *free);
  307 static void pmap_remove_entry(struct pmap *pmap, vm_page_t m,
  308                                         vm_offset_t va);
  309 static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m);
  310 static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
  311     vm_page_t m);
  312 
  313 static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags);
  314 
  315 static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags);
  316 static int _pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free);
  317 static pt_entry_t *pmap_pte_quick(pmap_t pmap, vm_offset_t va);
  318 static void pmap_pte_release(pt_entry_t *pte);
  319 static int pmap_unuse_pt(pmap_t, vm_offset_t, vm_page_t *);
  320 static vm_offset_t pmap_kmem_choose(vm_offset_t addr);
  321 #ifdef PAE
  322 static void *pmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait);
  323 #endif
  324 
  325 CTASSERT(1 << PDESHIFT == sizeof(pd_entry_t));
  326 CTASSERT(1 << PTESHIFT == sizeof(pt_entry_t));
  327 
  328 /*
  329  * If you get an error here, then you set KVA_PAGES wrong! See the
  330  * description of KVA_PAGES in sys/i386/include/pmap.h. It must be
  331  * multiple of 4 for a normal kernel, or a multiple of 8 for a PAE.
  332  */
  333 CTASSERT(KERNBASE % (1 << 24) == 0);
  334 
  335 /*
  336  * Move the kernel virtual free pointer to the next
  337  * 4MB.  This is used to help improve performance
  338  * by using a large (4MB) page for much of the kernel
  339  * (.text, .data, .bss)
  340  */
  341 static vm_offset_t
  342 pmap_kmem_choose(vm_offset_t addr)
  343 {
  344         vm_offset_t newaddr = addr;
  345 
  346 #ifndef DISABLE_PSE
  347         if (cpu_feature & CPUID_PSE)
  348                 newaddr = (addr + PDRMASK) & ~PDRMASK;
  349 #endif
  350         return newaddr;
  351 }
  352 
  353 /*
  354  *      Bootstrap the system enough to run with virtual memory.
  355  *
  356  *      On the i386 this is called after mapping has already been enabled
  357  *      and just syncs the pmap module with what has already been done.
  358  *      [We can't call it easily with mapping off since the kernel is not
  359  *      mapped with PA == VA, hence we would have to relocate every address
  360  *      from the linked base (virtual) address "KERNBASE" to the actual
  361  *      (physical) address starting relative to 0]
  362  */
  363 void
  364 pmap_bootstrap(vm_paddr_t firstaddr)
  365 {
  366         vm_offset_t va;
  367         pt_entry_t *pte, *unused;
  368         struct sysmaps *sysmaps;
  369         int i;
  370 
  371         /*
  372          * XXX The calculation of virtual_avail is wrong. It's NKPT*PAGE_SIZE too
  373          * large. It should instead be correctly calculated in locore.s and
  374          * not based on 'first' (which is a physical address, not a virtual
  375          * address, for the start of unused physical memory). The kernel
  376          * page tables are NOT double mapped and thus should not be included
  377          * in this calculation.
  378          */
  379         virtual_avail = (vm_offset_t) KERNBASE + firstaddr;
  380         virtual_avail = pmap_kmem_choose(virtual_avail);
  381 
  382         virtual_end = VM_MAX_KERNEL_ADDRESS;
  383 
  384         /*
  385          * Initialize the kernel pmap (which is statically allocated).
  386          */
  387         PMAP_LOCK_INIT(kernel_pmap);
  388         kernel_pmap->pm_pdir = (pd_entry_t *) (KERNBASE + (u_int)IdlePTD);
  389 #ifdef PAE
  390         kernel_pmap->pm_pdpt = (pdpt_entry_t *) (KERNBASE + (u_int)IdlePDPT);
  391 #endif
  392         kernel_pmap->pm_root = NULL;
  393         kernel_pmap->pm_active = -1;    /* don't allow deactivation */
  394         TAILQ_INIT(&kernel_pmap->pm_pvchunk);
  395         LIST_INIT(&allpmaps);
  396         mtx_init(&allpmaps_lock, "allpmaps", NULL, MTX_SPIN);
  397         mtx_lock_spin(&allpmaps_lock);
  398         LIST_INSERT_HEAD(&allpmaps, kernel_pmap, pm_list);
  399         mtx_unlock_spin(&allpmaps_lock);
  400         nkpt = NKPT;
  401 
  402         /*
  403          * Reserve some special page table entries/VA space for temporary
  404          * mapping of pages.
  405          */
  406 #define SYSMAP(c, p, v, n)      \
  407         v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n);
  408 
  409         va = virtual_avail;
  410         pte = vtopte(va);
  411 
  412         /*
  413          * CMAP1/CMAP2 are used for zeroing and copying pages.
  414          * CMAP3 is used for the idle process page zeroing.
  415          */
  416         for (i = 0; i < MAXCPU; i++) {
  417                 sysmaps = &sysmaps_pcpu[i];
  418                 mtx_init(&sysmaps->lock, "SYSMAPS", NULL, MTX_DEF);
  419                 SYSMAP(caddr_t, sysmaps->CMAP1, sysmaps->CADDR1, 1)
  420                 SYSMAP(caddr_t, sysmaps->CMAP2, sysmaps->CADDR2, 1)
  421         }
  422         SYSMAP(caddr_t, CMAP1, CADDR1, 1)
  423         SYSMAP(caddr_t, CMAP3, CADDR3, 1)
  424         *CMAP3 = 0;
  425 
  426         /*
  427          * Crashdump maps.
  428          */
  429         SYSMAP(caddr_t, unused, crashdumpmap, MAXDUMPPGS)
  430 
  431         /*
  432          * ptvmmap is used for reading arbitrary physical pages via /dev/mem.
  433          */
  434         SYSMAP(caddr_t, unused, ptvmmap, 1)
  435 
  436         /*
  437          * msgbufp is used to map the system message buffer.
  438          */
  439         SYSMAP(struct msgbuf *, unused, msgbufp, atop(round_page(MSGBUF_SIZE)))
  440 
  441         /*
  442          * ptemap is used for pmap_pte_quick
  443          */
  444         SYSMAP(pt_entry_t *, PMAP1, PADDR1, 1);
  445         SYSMAP(pt_entry_t *, PMAP2, PADDR2, 1);
  446 
  447         mtx_init(&PMAP2mutex, "PMAP2", NULL, MTX_DEF);
  448 
  449         virtual_avail = va;
  450 
  451         *CMAP1 = 0;
  452 
  453         /*
  454          * Leave in place an identity mapping (virt == phys) for the low 1 MB
  455          * physical memory region that is used by the ACPI wakeup code.  This
  456          * mapping must not have PG_G set. 
  457          */
  458 #ifdef XBOX
  459         /* FIXME: This is gross, but needed for the XBOX. Since we are in such
  460          * an early stadium, we cannot yet neatly map video memory ... :-(
  461          * Better fixes are very welcome! */
  462         if (!arch_i386_is_xbox)
  463 #endif
  464         for (i = 1; i < NKPT; i++)
  465                 PTD[i] = 0;
  466 
  467         /* Initialize the PAT MSR if present. */
  468         pmap_init_pat();
  469 
  470         /* Turn on PG_G on kernel page(s) */
  471         pmap_set_pg();
  472 }
  473 
  474 /*
  475  * Setup the PAT MSR.
  476  */
  477 void
  478 pmap_init_pat(void)
  479 {
  480         uint64_t pat_msr;
  481 
  482         /* Bail if this CPU doesn't implement PAT. */
  483         if (!(cpu_feature & CPUID_PAT))
  484                 return;
  485 
  486         if (cpu_vendor_id != CPU_VENDOR_INTEL ||
  487             (CPUID_TO_FAMILY(cpu_id) == 6 && CPUID_TO_MODEL(cpu_id) >= 0xe)) {
  488                 /*
  489                  * Leave the indices 0-3 at the default of WB, WT, UC, and UC-.
  490                  * Program 4 and 5 as WP and WC.
  491                  * Leave 6 and 7 as UC and UC-.
  492                  */
  493                 pat_msr = rdmsr(MSR_PAT);
  494                 pat_msr &= ~(PAT_MASK(4) | PAT_MASK(5));
  495                 pat_msr |= PAT_VALUE(4, PAT_WRITE_PROTECTED) |
  496                     PAT_VALUE(5, PAT_WRITE_COMBINING);
  497                 pat_works = 1;
  498         } else {
  499                 /*
  500                  * Due to some Intel errata, we can only safely use the lower 4
  501                  * PAT entries.  Thus, just replace PAT Index 2 with WC instead
  502                  * of UC-.
  503                  *
  504                  *   Intel Pentium III Processor Specification Update
  505                  * Errata E.27 (Upper Four PAT Entries Not Usable With Mode B
  506                  * or Mode C Paging)
  507                  *
  508                  *   Intel Pentium IV  Processor Specification Update
  509                  * Errata N46 (PAT Index MSB May Be Calculated Incorrectly)
  510                  */
  511                 pat_msr = rdmsr(MSR_PAT);
  512                 pat_msr &= ~PAT_MASK(2);
  513                 pat_msr |= PAT_VALUE(2, PAT_WRITE_COMBINING);
  514                 pat_works = 0;
  515         }
  516         wrmsr(MSR_PAT, pat_msr);
  517 }
  518 
  519 /*
  520  * Set PG_G on kernel pages.  Only the BSP calls this when SMP is turned on.
  521  */
  522 void
  523 pmap_set_pg(void)
  524 {
  525         pd_entry_t pdir;
  526         pt_entry_t *pte;
  527         vm_offset_t va, endva;
  528         int i; 
  529 
  530         if (pgeflag == 0)
  531                 return;
  532 
  533         i = KERNLOAD/NBPDR;
  534         endva = KERNBASE + KERNend;
  535 
  536         if (pseflag) {
  537                 va = KERNBASE + KERNLOAD;
  538                 while (va  < endva) {
  539                         pdir = kernel_pmap->pm_pdir[KPTDI+i];
  540                         pdir |= pgeflag;
  541                         kernel_pmap->pm_pdir[KPTDI+i] = PTD[KPTDI+i] = pdir;
  542                         invltlb();      /* Play it safe, invltlb() every time */
  543                         i++;
  544                         va += NBPDR;
  545                 }
  546         } else {
  547                 va = (vm_offset_t)btext;
  548                 while (va < endva) {
  549                         pte = vtopte(va);
  550                         if (*pte)
  551                                 *pte |= pgeflag;
  552                         invltlb();      /* Play it safe, invltlb() every time */
  553                         va += PAGE_SIZE;
  554                 }
  555         }
  556 }
  557 
  558 /*
  559  * Initialize a vm_page's machine-dependent fields.
  560  */
  561 void
  562 pmap_page_init(vm_page_t m)
  563 {
  564 
  565         TAILQ_INIT(&m->md.pv_list);
  566         m->md.pat_mode = PAT_WRITE_BACK;
  567 }
  568 
  569 #ifdef PAE
  570 static void *
  571 pmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
  572 {
  573 
  574         /* Inform UMA that this allocator uses kernel_map/object. */
  575         *flags = UMA_SLAB_KERNEL;
  576         return ((void *)kmem_alloc_contig(kernel_map, bytes, wait, 0x0ULL,
  577             0xffffffffULL, 1, 0, VM_MEMATTR_DEFAULT));
  578 }
  579 #endif
  580 
  581 /*
  582  * ABuse the pte nodes for unmapped kva to thread a kva freelist through.
  583  * Requirements:
  584  *  - Must deal with pages in order to ensure that none of the PG_* bits
  585  *    are ever set, PG_V in particular.
  586  *  - Assumes we can write to ptes without pte_store() atomic ops, even
  587  *    on PAE systems.  This should be ok.
  588  *  - Assumes nothing will ever test these addresses for 0 to indicate
  589  *    no mapping instead of correctly checking PG_V.
  590  *  - Assumes a vm_offset_t will fit in a pte (true for i386).
  591  * Because PG_V is never set, there can be no mappings to invalidate.
  592  */
  593 static vm_offset_t
  594 pmap_ptelist_alloc(vm_offset_t *head)
  595 {
  596         pt_entry_t *pte;
  597         vm_offset_t va;
  598 
  599         va = *head;
  600         if (va == 0)
  601                 return (va);    /* Out of memory */
  602         pte = vtopte(va);
  603         *head = *pte;
  604         if (*head & PG_V)
  605                 panic("pmap_ptelist_alloc: va with PG_V set!");
  606         *pte = 0;
  607         return (va);
  608 }
  609 
  610 static void
  611 pmap_ptelist_free(vm_offset_t *head, vm_offset_t va)
  612 {
  613         pt_entry_t *pte;
  614 
  615         if (va & PG_V)
  616                 panic("pmap_ptelist_free: freeing va with PG_V set!");
  617         pte = vtopte(va);
  618         *pte = *head;           /* virtual! PG_V is 0 though */
  619         *head = va;
  620 }
  621 
  622 static void
  623 pmap_ptelist_init(vm_offset_t *head, void *base, int npages)
  624 {
  625         int i;
  626         vm_offset_t va;
  627 
  628         *head = 0;
  629         for (i = npages - 1; i >= 0; i--) {
  630                 va = (vm_offset_t)base + i * PAGE_SIZE;
  631                 pmap_ptelist_free(head, va);
  632         }
  633 }
  634 
  635 
  636 /*
  637  *      Initialize the pmap module.
  638  *      Called by vm_init, to initialize any structures that the pmap
  639  *      system needs to map virtual memory.
  640  */
  641 void
  642 pmap_init(void)
  643 {
  644         vm_page_t mpte;
  645         vm_size_t s;
  646         int i, pv_npg;
  647 
  648         /*
  649          * Initialize the vm page array entries for the kernel pmap's
  650          * page table pages.
  651          */ 
  652         for (i = 0; i < nkpt; i++) {
  653                 mpte = PHYS_TO_VM_PAGE(PTD[i + KPTDI] & PG_FRAME);
  654                 KASSERT(mpte >= vm_page_array &&
  655                     mpte < &vm_page_array[vm_page_array_size],
  656                     ("pmap_init: page table page is out of range"));
  657                 mpte->pindex = i + KPTDI;
  658                 mpte->phys_addr = PTD[i + KPTDI] & PG_FRAME;
  659         }
  660 
  661         /*
  662          * Initialize the address space (zone) for the pv entries.  Set a
  663          * high water mark so that the system can recover from excessive
  664          * numbers of pv entries.
  665          */
  666         TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
  667         pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
  668         TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
  669         pv_entry_max = roundup(pv_entry_max, _NPCPV);
  670         pv_entry_high_water = 9 * (pv_entry_max / 10);
  671 
  672         /*
  673          * Are large page mappings enabled?
  674          */
  675         TUNABLE_INT_FETCH("vm.pmap.pg_ps_enabled", &pg_ps_enabled);
  676 
  677         /*
  678          * Calculate the size of the pv head table for superpages.
  679          */
  680         for (i = 0; phys_avail[i + 1]; i += 2);
  681         pv_npg = round_4mpage(phys_avail[(i - 2) + 1]) / NBPDR;
  682 
  683         /*
  684          * Allocate memory for the pv head table for superpages.
  685          */
  686         s = (vm_size_t)(pv_npg * sizeof(struct md_page));
  687         s = round_page(s);
  688         pv_table = (struct md_page *)kmem_alloc(kernel_map, s);
  689         for (i = 0; i < pv_npg; i++)
  690                 TAILQ_INIT(&pv_table[i].pv_list);
  691 
  692         pv_maxchunks = MAX(pv_entry_max / _NPCPV, maxproc);
  693         pv_chunkbase = (struct pv_chunk *)kmem_alloc_nofault(kernel_map,
  694             PAGE_SIZE * pv_maxchunks);
  695         if (pv_chunkbase == NULL)
  696                 panic("pmap_init: not enough kvm for pv chunks");
  697         pmap_ptelist_init(&pv_vafree, pv_chunkbase, pv_maxchunks);
  698 #ifdef PAE
  699         pdptzone = uma_zcreate("PDPT", NPGPTD * sizeof(pdpt_entry_t), NULL,
  700             NULL, NULL, NULL, (NPGPTD * sizeof(pdpt_entry_t)) - 1,
  701             UMA_ZONE_VM | UMA_ZONE_NOFREE);
  702         uma_zone_set_allocf(pdptzone, pmap_pdpt_allocf);
  703 #endif
  704 }
  705 
  706 
  707 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_max, CTLFLAG_RD, &pv_entry_max, 0,
  708         "Max number of PV entries");
  709 SYSCTL_INT(_vm_pmap, OID_AUTO, shpgperproc, CTLFLAG_RD, &shpgperproc, 0,
  710         "Page share factor per proc");
  711 
  712 SYSCTL_NODE(_vm_pmap, OID_AUTO, pde, CTLFLAG_RD, 0,
  713     "2/4MB page mapping counters");
  714 
  715 static u_long pmap_pde_demotions;
  716 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, demotions, CTLFLAG_RD,
  717     &pmap_pde_demotions, 0, "2/4MB page demotions");
  718 
  719 static u_long pmap_pde_mappings;
  720 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, mappings, CTLFLAG_RD,
  721     &pmap_pde_mappings, 0, "2/4MB page mappings");
  722 
  723 static u_long pmap_pde_p_failures;
  724 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, p_failures, CTLFLAG_RD,
  725     &pmap_pde_p_failures, 0, "2/4MB page promotion failures");
  726 
  727 static u_long pmap_pde_promotions;
  728 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, promotions, CTLFLAG_RD,
  729     &pmap_pde_promotions, 0, "2/4MB page promotions");
  730 
  731 /***************************************************
  732  * Low level helper routines.....
  733  ***************************************************/
  734 
  735 /*
  736  * Determine the appropriate bits to set in a PTE or PDE for a specified
  737  * caching mode.
  738  */
  739 int
  740 pmap_cache_bits(int mode, boolean_t is_pde)
  741 {
  742         int pat_flag, pat_index, cache_bits;
  743 
  744         /* The PAT bit is different for PTE's and PDE's. */
  745         pat_flag = is_pde ? PG_PDE_PAT : PG_PTE_PAT;
  746 
  747         /* If we don't support PAT, map extended modes to older ones. */
  748         if (!(cpu_feature & CPUID_PAT)) {
  749                 switch (mode) {
  750                 case PAT_UNCACHEABLE:
  751                 case PAT_WRITE_THROUGH:
  752                 case PAT_WRITE_BACK:
  753                         break;
  754                 case PAT_UNCACHED:
  755                 case PAT_WRITE_COMBINING:
  756                 case PAT_WRITE_PROTECTED:
  757                         mode = PAT_UNCACHEABLE;
  758                         break;
  759                 }
  760         }
  761         
  762         /* Map the caching mode to a PAT index. */
  763         if (pat_works) {
  764                 switch (mode) {
  765                 case PAT_UNCACHEABLE:
  766                         pat_index = 3;
  767                         break;
  768                 case PAT_WRITE_THROUGH:
  769                         pat_index = 1;
  770                         break;
  771                 case PAT_WRITE_BACK:
  772                         pat_index = 0;
  773                         break;
  774                 case PAT_UNCACHED:
  775                         pat_index = 2;
  776                         break;
  777                 case PAT_WRITE_COMBINING:
  778                         pat_index = 5;
  779                         break;
  780                 case PAT_WRITE_PROTECTED:
  781                         pat_index = 4;
  782                         break;
  783                 default:
  784                         panic("Unknown caching mode %d\n", mode);
  785                 }
  786         } else {
  787                 switch (mode) {
  788                 case PAT_UNCACHED:
  789                 case PAT_UNCACHEABLE:
  790                 case PAT_WRITE_PROTECTED:
  791                         pat_index = 3;
  792                         break;
  793                 case PAT_WRITE_THROUGH:
  794                         pat_index = 1;
  795                         break;
  796                 case PAT_WRITE_BACK:
  797                         pat_index = 0;
  798                         break;
  799                 case PAT_WRITE_COMBINING:
  800                         pat_index = 2;
  801                         break;
  802                 default:
  803                         panic("Unknown caching mode %d\n", mode);
  804                 }
  805         }
  806 
  807         /* Map the 3-bit index value into the PAT, PCD, and PWT bits. */
  808         cache_bits = 0;
  809         if (pat_index & 0x4)
  810                 cache_bits |= pat_flag;
  811         if (pat_index & 0x2)
  812                 cache_bits |= PG_NC_PCD;
  813         if (pat_index & 0x1)
  814                 cache_bits |= PG_NC_PWT;
  815         return (cache_bits);
  816 }
  817 #ifdef SMP
  818 /*
  819  * For SMP, these functions have to use the IPI mechanism for coherence.
  820  *
  821  * N.B.: Before calling any of the following TLB invalidation functions,
  822  * the calling processor must ensure that all stores updating a non-
  823  * kernel page table are globally performed.  Otherwise, another
  824  * processor could cache an old, pre-update entry without being
  825  * invalidated.  This can happen one of two ways: (1) The pmap becomes
  826  * active on another processor after its pm_active field is checked by
  827  * one of the following functions but before a store updating the page
  828  * table is globally performed. (2) The pmap becomes active on another
  829  * processor before its pm_active field is checked but due to
  830  * speculative loads one of the following functions stills reads the
  831  * pmap as inactive on the other processor.
  832  * 
  833  * The kernel page table is exempt because its pm_active field is
  834  * immutable.  The kernel page table is always active on every
  835  * processor.
  836  */
  837 void
  838 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
  839 {
  840         u_int cpumask;
  841         u_int other_cpus;
  842 
  843         sched_pin();
  844         if (pmap == kernel_pmap || pmap->pm_active == all_cpus) {
  845                 invlpg(va);
  846                 smp_invlpg(va);
  847         } else {
  848                 cpumask = PCPU_GET(cpumask);
  849                 other_cpus = PCPU_GET(other_cpus);
  850                 if (pmap->pm_active & cpumask)
  851                         invlpg(va);
  852                 if (pmap->pm_active & other_cpus)
  853                         smp_masked_invlpg(pmap->pm_active & other_cpus, va);
  854         }
  855         sched_unpin();
  856 }
  857 
  858 void
  859 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
  860 {
  861         u_int cpumask;
  862         u_int other_cpus;
  863         vm_offset_t addr;
  864 
  865         sched_pin();
  866         if (pmap == kernel_pmap || pmap->pm_active == all_cpus) {
  867                 for (addr = sva; addr < eva; addr += PAGE_SIZE)
  868                         invlpg(addr);
  869                 smp_invlpg_range(sva, eva);
  870         } else {
  871                 cpumask = PCPU_GET(cpumask);
  872                 other_cpus = PCPU_GET(other_cpus);
  873                 if (pmap->pm_active & cpumask)
  874                         for (addr = sva; addr < eva; addr += PAGE_SIZE)
  875                                 invlpg(addr);
  876                 if (pmap->pm_active & other_cpus)
  877                         smp_masked_invlpg_range(pmap->pm_active & other_cpus,
  878                             sva, eva);
  879         }
  880         sched_unpin();
  881 }
  882 
  883 void
  884 pmap_invalidate_all(pmap_t pmap)
  885 {
  886         u_int cpumask;
  887         u_int other_cpus;
  888 
  889         sched_pin();
  890         if (pmap == kernel_pmap || pmap->pm_active == all_cpus) {
  891                 invltlb();
  892                 smp_invltlb();
  893         } else {
  894                 cpumask = PCPU_GET(cpumask);
  895                 other_cpus = PCPU_GET(other_cpus);
  896                 if (pmap->pm_active & cpumask)
  897                         invltlb();
  898                 if (pmap->pm_active & other_cpus)
  899                         smp_masked_invltlb(pmap->pm_active & other_cpus);
  900         }
  901         sched_unpin();
  902 }
  903 
  904 void
  905 pmap_invalidate_cache(void)
  906 {
  907 
  908         sched_pin();
  909         wbinvd();
  910         smp_cache_flush();
  911         sched_unpin();
  912 }
  913 #else /* !SMP */
  914 /*
  915  * Normal, non-SMP, 486+ invalidation functions.
  916  * We inline these within pmap.c for speed.
  917  */
  918 PMAP_INLINE void
  919 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
  920 {
  921 
  922         if (pmap == kernel_pmap || pmap->pm_active)
  923                 invlpg(va);
  924 }
  925 
  926 PMAP_INLINE void
  927 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
  928 {
  929         vm_offset_t addr;
  930 
  931         if (pmap == kernel_pmap || pmap->pm_active)
  932                 for (addr = sva; addr < eva; addr += PAGE_SIZE)
  933                         invlpg(addr);
  934 }
  935 
  936 PMAP_INLINE void
  937 pmap_invalidate_all(pmap_t pmap)
  938 {
  939 
  940         if (pmap == kernel_pmap || pmap->pm_active)
  941                 invltlb();
  942 }
  943 
  944 PMAP_INLINE void
  945 pmap_invalidate_cache(void)
  946 {
  947 
  948         wbinvd();
  949 }
  950 #endif /* !SMP */
  951 
  952 void
  953 pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva)
  954 {
  955 
  956         KASSERT((sva & PAGE_MASK) == 0,
  957             ("pmap_invalidate_cache_range: sva not page-aligned"));
  958         KASSERT((eva & PAGE_MASK) == 0,
  959             ("pmap_invalidate_cache_range: eva not page-aligned"));
  960 
  961         if (cpu_feature & CPUID_SS)
  962                 ; /* If "Self Snoop" is supported, do nothing. */
  963         else if (cpu_feature & CPUID_CLFSH) {
  964 
  965                 /*
  966                  * Otherwise, do per-cache line flush.  Use the mfence
  967                  * instruction to insure that previous stores are
  968                  * included in the write-back.  The processor
  969                  * propagates flush to other processors in the cache
  970                  * coherence domain.
  971                  */
  972                 mfence();
  973                 for (; sva < eva; sva += cpu_clflush_line_size)
  974                         clflush(sva);
  975                 mfence();
  976         } else {
  977 
  978                 /*
  979                  * No targeted cache flush methods are supported by CPU,
  980                  * globally invalidate cache as a last resort.
  981                  */
  982                 pmap_invalidate_cache();
  983         }
  984 }
  985 
  986 /*
  987  * Are we current address space or kernel?  N.B. We return FALSE when
  988  * a pmap's page table is in use because a kernel thread is borrowing
  989  * it.  The borrowed page table can change spontaneously, making any
  990  * dependence on its continued use subject to a race condition.
  991  */
  992 static __inline int
  993 pmap_is_current(pmap_t pmap)
  994 {
  995 
  996         return (pmap == kernel_pmap ||
  997                 (pmap == vmspace_pmap(curthread->td_proc->p_vmspace) &&
  998             (pmap->pm_pdir[PTDPTDI] & PG_FRAME) == (PTDpde[0] & PG_FRAME)));
  999 }
 1000 
 1001 /*
 1002  * If the given pmap is not the current or kernel pmap, the returned pte must
 1003  * be released by passing it to pmap_pte_release().
 1004  */
 1005 pt_entry_t *
 1006 pmap_pte(pmap_t pmap, vm_offset_t va)
 1007 {
 1008         pd_entry_t newpf;
 1009         pd_entry_t *pde;
 1010 
 1011         pde = pmap_pde(pmap, va);
 1012         if (*pde & PG_PS)
 1013                 return (pde);
 1014         if (*pde != 0) {
 1015                 /* are we current address space or kernel? */
 1016                 if (pmap_is_current(pmap))
 1017                         return (vtopte(va));
 1018                 mtx_lock(&PMAP2mutex);
 1019                 newpf = *pde & PG_FRAME;
 1020                 if ((*PMAP2 & PG_FRAME) != newpf) {
 1021                         *PMAP2 = newpf | PG_RW | PG_V | PG_A | PG_M;
 1022                         pmap_invalidate_page(kernel_pmap, (vm_offset_t)PADDR2);
 1023                 }
 1024                 return (PADDR2 + (i386_btop(va) & (NPTEPG - 1)));
 1025         }
 1026         return (0);
 1027 }
 1028 
 1029 /*
 1030  * Releases a pte that was obtained from pmap_pte().  Be prepared for the pte
 1031  * being NULL.
 1032  */
 1033 static __inline void
 1034 pmap_pte_release(pt_entry_t *pte)
 1035 {
 1036 
 1037         if ((pt_entry_t *)((vm_offset_t)pte & ~PAGE_MASK) == PADDR2)
 1038                 mtx_unlock(&PMAP2mutex);
 1039 }
 1040 
 1041 static __inline void
 1042 invlcaddr(void *caddr)
 1043 {
 1044 
 1045         invlpg((u_int)caddr);
 1046 }
 1047 
 1048 /*
 1049  * Super fast pmap_pte routine best used when scanning
 1050  * the pv lists.  This eliminates many coarse-grained
 1051  * invltlb calls.  Note that many of the pv list
 1052  * scans are across different pmaps.  It is very wasteful
 1053  * to do an entire invltlb for checking a single mapping.
 1054  *
 1055  * If the given pmap is not the current pmap, vm_page_queue_mtx
 1056  * must be held and curthread pinned to a CPU.
 1057  */
 1058 static pt_entry_t *
 1059 pmap_pte_quick(pmap_t pmap, vm_offset_t va)
 1060 {
 1061         pd_entry_t newpf;
 1062         pd_entry_t *pde;
 1063 
 1064         pde = pmap_pde(pmap, va);
 1065         if (*pde & PG_PS)
 1066                 return (pde);
 1067         if (*pde != 0) {
 1068                 /* are we current address space or kernel? */
 1069                 if (pmap_is_current(pmap))
 1070                         return (vtopte(va));
 1071                 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1072                 KASSERT(curthread->td_pinned > 0, ("curthread not pinned"));
 1073                 newpf = *pde & PG_FRAME;
 1074                 if ((*PMAP1 & PG_FRAME) != newpf) {
 1075                         *PMAP1 = newpf | PG_RW | PG_V | PG_A | PG_M;
 1076 #ifdef SMP
 1077                         PMAP1cpu = PCPU_GET(cpuid);
 1078 #endif
 1079                         invlcaddr(PADDR1);
 1080                         PMAP1changed++;
 1081                 } else
 1082 #ifdef SMP
 1083                 if (PMAP1cpu != PCPU_GET(cpuid)) {
 1084                         PMAP1cpu = PCPU_GET(cpuid);
 1085                         invlcaddr(PADDR1);
 1086                         PMAP1changedcpu++;
 1087                 } else
 1088 #endif
 1089                         PMAP1unchanged++;
 1090                 return (PADDR1 + (i386_btop(va) & (NPTEPG - 1)));
 1091         }
 1092         return (0);
 1093 }
 1094 
 1095 /*
 1096  *      Routine:        pmap_extract
 1097  *      Function:
 1098  *              Extract the physical page address associated
 1099  *              with the given map/virtual_address pair.
 1100  */
 1101 vm_paddr_t 
 1102 pmap_extract(pmap_t pmap, vm_offset_t va)
 1103 {
 1104         vm_paddr_t rtval;
 1105         pt_entry_t *pte;
 1106         pd_entry_t pde;
 1107 
 1108         rtval = 0;
 1109         PMAP_LOCK(pmap);
 1110         pde = pmap->pm_pdir[va >> PDRSHIFT];
 1111         if (pde != 0) {
 1112                 if ((pde & PG_PS) != 0)
 1113                         rtval = (pde & PG_PS_FRAME) | (va & PDRMASK);
 1114                 else {
 1115                         pte = pmap_pte(pmap, va);
 1116                         rtval = (*pte & PG_FRAME) | (va & PAGE_MASK);
 1117                         pmap_pte_release(pte);
 1118                 }
 1119         }
 1120         PMAP_UNLOCK(pmap);
 1121         return (rtval);
 1122 }
 1123 
 1124 /*
 1125  *      Routine:        pmap_extract_and_hold
 1126  *      Function:
 1127  *              Atomically extract and hold the physical page
 1128  *              with the given pmap and virtual address pair
 1129  *              if that mapping permits the given protection.
 1130  */
 1131 vm_page_t
 1132 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
 1133 {
 1134         pd_entry_t pde;
 1135         pt_entry_t pte;
 1136         vm_page_t m;
 1137 
 1138         m = NULL;
 1139         vm_page_lock_queues();
 1140         PMAP_LOCK(pmap);
 1141         pde = *pmap_pde(pmap, va);
 1142         if (pde != 0) {
 1143                 if (pde & PG_PS) {
 1144                         if ((pde & PG_RW) || (prot & VM_PROT_WRITE) == 0) {
 1145                                 m = PHYS_TO_VM_PAGE((pde & PG_PS_FRAME) |
 1146                                     (va & PDRMASK));
 1147                                 vm_page_hold(m);
 1148                         }
 1149                 } else {
 1150                         sched_pin();
 1151                         pte = *pmap_pte_quick(pmap, va);
 1152                         if (pte != 0 &&
 1153                             ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0)) {
 1154                                 m = PHYS_TO_VM_PAGE(pte & PG_FRAME);
 1155                                 vm_page_hold(m);
 1156                         }
 1157                         sched_unpin();
 1158                 }
 1159         }
 1160         vm_page_unlock_queues();
 1161         PMAP_UNLOCK(pmap);
 1162         return (m);
 1163 }
 1164 
 1165 /***************************************************
 1166  * Low level mapping routines.....
 1167  ***************************************************/
 1168 
 1169 /*
 1170  * Add a wired page to the kva.
 1171  * Note: not SMP coherent.
 1172  */
 1173 PMAP_INLINE void 
 1174 pmap_kenter(vm_offset_t va, vm_paddr_t pa)
 1175 {
 1176         pt_entry_t *pte;
 1177 
 1178         pte = vtopte(va);
 1179         pte_store(pte, pa | PG_RW | PG_V | pgeflag);
 1180 }
 1181 
 1182 static __inline void
 1183 pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode)
 1184 {
 1185         pt_entry_t *pte;
 1186 
 1187         pte = vtopte(va);
 1188         pte_store(pte, pa | PG_RW | PG_V | pgeflag | pmap_cache_bits(mode, 0));
 1189 }
 1190 
 1191 /*
 1192  * Remove a page from the kernel pagetables.
 1193  * Note: not SMP coherent.
 1194  */
 1195 PMAP_INLINE void
 1196 pmap_kremove(vm_offset_t va)
 1197 {
 1198         pt_entry_t *pte;
 1199 
 1200         pte = vtopte(va);
 1201         pte_clear(pte);
 1202 }
 1203 
 1204 /*
 1205  *      Used to map a range of physical addresses into kernel
 1206  *      virtual address space.
 1207  *
 1208  *      The value passed in '*virt' is a suggested virtual address for
 1209  *      the mapping. Architectures which can support a direct-mapped
 1210  *      physical to virtual region can return the appropriate address
 1211  *      within that region, leaving '*virt' unchanged. Other
 1212  *      architectures should map the pages starting at '*virt' and
 1213  *      update '*virt' with the first usable address after the mapped
 1214  *      region.
 1215  */
 1216 vm_offset_t
 1217 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
 1218 {
 1219         vm_offset_t va, sva;
 1220 
 1221         va = sva = *virt;
 1222         while (start < end) {
 1223                 pmap_kenter(va, start);
 1224                 va += PAGE_SIZE;
 1225                 start += PAGE_SIZE;
 1226         }
 1227         pmap_invalidate_range(kernel_pmap, sva, va);
 1228         *virt = va;
 1229         return (sva);
 1230 }
 1231 
 1232 
 1233 /*
 1234  * Add a list of wired pages to the kva
 1235  * this routine is only used for temporary
 1236  * kernel mappings that do not need to have
 1237  * page modification or references recorded.
 1238  * Note that old mappings are simply written
 1239  * over.  The page *must* be wired.
 1240  * Note: SMP coherent.  Uses a ranged shootdown IPI.
 1241  */
 1242 void
 1243 pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
 1244 {
 1245         pt_entry_t *endpte, oldpte, *pte;
 1246 
 1247         oldpte = 0;
 1248         pte = vtopte(sva);
 1249         endpte = pte + count;
 1250         while (pte < endpte) {
 1251                 oldpte |= *pte;
 1252                 pte_store(pte, VM_PAGE_TO_PHYS(*ma) | pgeflag |
 1253                     pmap_cache_bits((*ma)->md.pat_mode, 0) | PG_RW | PG_V);
 1254                 pte++;
 1255                 ma++;
 1256         }
 1257         if ((oldpte & PG_V) != 0)
 1258                 pmap_invalidate_range(kernel_pmap, sva, sva + count *
 1259                     PAGE_SIZE);
 1260 }
 1261 
 1262 /*
 1263  * This routine tears out page mappings from the
 1264  * kernel -- it is meant only for temporary mappings.
 1265  * Note: SMP coherent.  Uses a ranged shootdown IPI.
 1266  */
 1267 void
 1268 pmap_qremove(vm_offset_t sva, int count)
 1269 {
 1270         vm_offset_t va;
 1271 
 1272         va = sva;
 1273         while (count-- > 0) {
 1274                 pmap_kremove(va);
 1275                 va += PAGE_SIZE;
 1276         }
 1277         pmap_invalidate_range(kernel_pmap, sva, va);
 1278 }
 1279 
 1280 /***************************************************
 1281  * Page table page management routines.....
 1282  ***************************************************/
 1283 static __inline void
 1284 pmap_free_zero_pages(vm_page_t free)
 1285 {
 1286         vm_page_t m;
 1287 
 1288         while (free != NULL) {
 1289                 m = free;
 1290                 free = m->right;
 1291                 /* Preserve the page's PG_ZERO setting. */
 1292                 vm_page_free_toq(m);
 1293         }
 1294 }
 1295 
 1296 /*
 1297  * Schedule the specified unused page table page to be freed.  Specifically,
 1298  * add the page to the specified list of pages that will be released to the
 1299  * physical memory manager after the TLB has been updated.
 1300  */
 1301 static __inline void
 1302 pmap_add_delayed_free_list(vm_page_t m, vm_page_t *free, boolean_t set_PG_ZERO)
 1303 {
 1304 
 1305         if (set_PG_ZERO)
 1306                 m->flags |= PG_ZERO;
 1307         else
 1308                 m->flags &= ~PG_ZERO;
 1309         m->right = *free;
 1310         *free = m;
 1311 }
 1312 
 1313 /*
 1314  * Inserts the specified page table page into the specified pmap's collection
 1315  * of idle page table pages.  Each of a pmap's page table pages is responsible
 1316  * for mapping a distinct range of virtual addresses.  The pmap's collection is
 1317  * ordered by this virtual address range.
 1318  */
 1319 static void
 1320 pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte)
 1321 {
 1322         vm_page_t root;
 1323 
 1324         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 1325         root = pmap->pm_root;
 1326         if (root == NULL) {
 1327                 mpte->left = NULL;
 1328                 mpte->right = NULL;
 1329         } else {
 1330                 root = vm_page_splay(mpte->pindex, root);
 1331                 if (mpte->pindex < root->pindex) {
 1332                         mpte->left = root->left;
 1333                         mpte->right = root;
 1334                         root->left = NULL;
 1335                 } else if (mpte->pindex == root->pindex)
 1336                         panic("pmap_insert_pt_page: pindex already inserted");
 1337                 else {
 1338                         mpte->right = root->right;
 1339                         mpte->left = root;
 1340                         root->right = NULL;
 1341                 }
 1342         }
 1343         pmap->pm_root = mpte;
 1344 }
 1345 
 1346 /*
 1347  * Looks for a page table page mapping the specified virtual address in the
 1348  * specified pmap's collection of idle page table pages.  Returns NULL if there
 1349  * is no page table page corresponding to the specified virtual address.
 1350  */
 1351 static vm_page_t
 1352 pmap_lookup_pt_page(pmap_t pmap, vm_offset_t va)
 1353 {
 1354         vm_page_t mpte;
 1355         vm_pindex_t pindex = va >> PDRSHIFT;
 1356 
 1357         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 1358         if ((mpte = pmap->pm_root) != NULL && mpte->pindex != pindex) {
 1359                 mpte = vm_page_splay(pindex, mpte);
 1360                 if ((pmap->pm_root = mpte)->pindex != pindex)
 1361                         mpte = NULL;
 1362         }
 1363         return (mpte);
 1364 }
 1365 
 1366 /*
 1367  * Removes the specified page table page from the specified pmap's collection
 1368  * of idle page table pages.  The specified page table page must be a member of
 1369  * the pmap's collection.
 1370  */
 1371 static void
 1372 pmap_remove_pt_page(pmap_t pmap, vm_page_t mpte)
 1373 {
 1374         vm_page_t root;
 1375 
 1376         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 1377         if (mpte != pmap->pm_root)
 1378                 vm_page_splay(mpte->pindex, pmap->pm_root);
 1379         if (mpte->left == NULL)
 1380                 root = mpte->right;
 1381         else {
 1382                 root = vm_page_splay(mpte->pindex, mpte->left);
 1383                 root->right = mpte->right;
 1384         }
 1385         pmap->pm_root = root;
 1386 }
 1387 
 1388 /*
 1389  * This routine unholds page table pages, and if the hold count
 1390  * drops to zero, then it decrements the wire count.
 1391  */
 1392 static __inline int
 1393 pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free)
 1394 {
 1395 
 1396         --m->wire_count;
 1397         if (m->wire_count == 0)
 1398                 return _pmap_unwire_pte_hold(pmap, m, free);
 1399         else
 1400                 return 0;
 1401 }
 1402 
 1403 static int 
 1404 _pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free)
 1405 {
 1406         vm_offset_t pteva;
 1407 
 1408         /*
 1409          * unmap the page table page
 1410          */
 1411         pmap->pm_pdir[m->pindex] = 0;
 1412         --pmap->pm_stats.resident_count;
 1413 
 1414         /*
 1415          * This is a release store so that the ordinary store unmapping
 1416          * the page table page is globally performed before TLB shoot-
 1417          * down is begun.
 1418          */
 1419         atomic_subtract_rel_int(&cnt.v_wire_count, 1);
 1420 
 1421         /*
 1422          * Do an invltlb to make the invalidated mapping
 1423          * take effect immediately.
 1424          */
 1425         pteva = VM_MAXUSER_ADDRESS + i386_ptob(m->pindex);
 1426         pmap_invalidate_page(pmap, pteva);
 1427 
 1428         /* 
 1429          * Put page on a list so that it is released after
 1430          * *ALL* TLB shootdown is done
 1431          */
 1432         pmap_add_delayed_free_list(m, free, TRUE);
 1433 
 1434         return 1;
 1435 }
 1436 
 1437 /*
 1438  * After removing a page table entry, this routine is used to
 1439  * conditionally free the page, and manage the hold/wire counts.
 1440  */
 1441 static int
 1442 pmap_unuse_pt(pmap_t pmap, vm_offset_t va, vm_page_t *free)
 1443 {
 1444         pd_entry_t ptepde;
 1445         vm_page_t mpte;
 1446 
 1447         if (va >= VM_MAXUSER_ADDRESS)
 1448                 return 0;
 1449         ptepde = *pmap_pde(pmap, va);
 1450         mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME);
 1451         return pmap_unwire_pte_hold(pmap, mpte, free);
 1452 }
 1453 
 1454 void
 1455 pmap_pinit0(pmap_t pmap)
 1456 {
 1457 
 1458         PMAP_LOCK_INIT(pmap);
 1459         pmap->pm_pdir = (pd_entry_t *)(KERNBASE + (vm_offset_t)IdlePTD);
 1460 #ifdef PAE
 1461         pmap->pm_pdpt = (pdpt_entry_t *)(KERNBASE + (vm_offset_t)IdlePDPT);
 1462 #endif
 1463         pmap->pm_root = NULL;
 1464         pmap->pm_active = 0;
 1465         PCPU_SET(curpmap, pmap);
 1466         TAILQ_INIT(&pmap->pm_pvchunk);
 1467         bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
 1468         mtx_lock_spin(&allpmaps_lock);
 1469         LIST_INSERT_HEAD(&allpmaps, pmap, pm_list);
 1470         mtx_unlock_spin(&allpmaps_lock);
 1471 }
 1472 
 1473 /*
 1474  * Initialize a preallocated and zeroed pmap structure,
 1475  * such as one in a vmspace structure.
 1476  */
 1477 int
 1478 pmap_pinit(pmap_t pmap)
 1479 {
 1480         vm_page_t m, ptdpg[NPGPTD];
 1481         vm_paddr_t pa;
 1482         static int color;
 1483         int i;
 1484 
 1485         PMAP_LOCK_INIT(pmap);
 1486 
 1487         /*
 1488          * No need to allocate page table space yet but we do need a valid
 1489          * page directory table.
 1490          */
 1491         if (pmap->pm_pdir == NULL) {
 1492                 pmap->pm_pdir = (pd_entry_t *)kmem_alloc_nofault(kernel_map,
 1493                     NBPTD);
 1494 
 1495                 if (pmap->pm_pdir == NULL) {
 1496                         PMAP_LOCK_DESTROY(pmap);
 1497                         return (0);
 1498                 }
 1499 #ifdef PAE
 1500                 pmap->pm_pdpt = uma_zalloc(pdptzone, M_WAITOK | M_ZERO);
 1501                 KASSERT(((vm_offset_t)pmap->pm_pdpt &
 1502                     ((NPGPTD * sizeof(pdpt_entry_t)) - 1)) == 0,
 1503                     ("pmap_pinit: pdpt misaligned"));
 1504                 KASSERT(pmap_kextract((vm_offset_t)pmap->pm_pdpt) < (4ULL<<30),
 1505                     ("pmap_pinit: pdpt above 4g"));
 1506 #endif
 1507                 pmap->pm_root = NULL;
 1508         }
 1509         KASSERT(pmap->pm_root == NULL,
 1510             ("pmap_pinit: pmap has reserved page table page(s)"));
 1511 
 1512         /*
 1513          * allocate the page directory page(s)
 1514          */
 1515         for (i = 0; i < NPGPTD;) {
 1516                 m = vm_page_alloc(NULL, color++,
 1517                     VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
 1518                     VM_ALLOC_ZERO);
 1519                 if (m == NULL)
 1520                         VM_WAIT;
 1521                 else {
 1522                         ptdpg[i++] = m;
 1523                 }
 1524         }
 1525 
 1526         pmap_qenter((vm_offset_t)pmap->pm_pdir, ptdpg, NPGPTD);
 1527 
 1528         for (i = 0; i < NPGPTD; i++) {
 1529                 if ((ptdpg[i]->flags & PG_ZERO) == 0)
 1530                         bzero(pmap->pm_pdir + (i * NPDEPG), PAGE_SIZE);
 1531         }
 1532 
 1533         mtx_lock_spin(&allpmaps_lock);
 1534         LIST_INSERT_HEAD(&allpmaps, pmap, pm_list);
 1535         mtx_unlock_spin(&allpmaps_lock);
 1536         /* Wire in kernel global address entries. */
 1537         bcopy(PTD + KPTDI, pmap->pm_pdir + KPTDI, nkpt * sizeof(pd_entry_t));
 1538 
 1539         /* install self-referential address mapping entry(s) */
 1540         for (i = 0; i < NPGPTD; i++) {
 1541                 pa = VM_PAGE_TO_PHYS(ptdpg[i]);
 1542                 pmap->pm_pdir[PTDPTDI + i] = pa | PG_V | PG_RW | PG_A | PG_M;
 1543 #ifdef PAE
 1544                 pmap->pm_pdpt[i] = pa | PG_V;
 1545 #endif
 1546         }
 1547 
 1548         pmap->pm_active = 0;
 1549         TAILQ_INIT(&pmap->pm_pvchunk);
 1550         bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
 1551 
 1552         return (1);
 1553 }
 1554 
 1555 /*
 1556  * this routine is called if the page table page is not
 1557  * mapped correctly.
 1558  */
 1559 static vm_page_t
 1560 _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags)
 1561 {
 1562         vm_paddr_t ptepa;
 1563         vm_page_t m;
 1564 
 1565         KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
 1566             (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
 1567             ("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
 1568 
 1569         /*
 1570          * Allocate a page table page.
 1571          */
 1572         if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ |
 1573             VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
 1574                 if (flags & M_WAITOK) {
 1575                         PMAP_UNLOCK(pmap);
 1576                         vm_page_unlock_queues();
 1577                         VM_WAIT;
 1578                         vm_page_lock_queues();
 1579                         PMAP_LOCK(pmap);
 1580                 }
 1581 
 1582                 /*
 1583                  * Indicate the need to retry.  While waiting, the page table
 1584                  * page may have been allocated.
 1585                  */
 1586                 return (NULL);
 1587         }
 1588         if ((m->flags & PG_ZERO) == 0)
 1589                 pmap_zero_page(m);
 1590 
 1591         /*
 1592          * Map the pagetable page into the process address space, if
 1593          * it isn't already there.
 1594          */
 1595 
 1596         pmap->pm_stats.resident_count++;
 1597 
 1598         ptepa = VM_PAGE_TO_PHYS(m);
 1599         pmap->pm_pdir[ptepindex] =
 1600                 (pd_entry_t) (ptepa | PG_U | PG_RW | PG_V | PG_A | PG_M);
 1601 
 1602         return m;
 1603 }
 1604 
 1605 static vm_page_t
 1606 pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags)
 1607 {
 1608         unsigned ptepindex;
 1609         pd_entry_t ptepa;
 1610         vm_page_t m;
 1611 
 1612         KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
 1613             (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
 1614             ("pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
 1615 
 1616         /*
 1617          * Calculate pagetable page index
 1618          */
 1619         ptepindex = va >> PDRSHIFT;
 1620 retry:
 1621         /*
 1622          * Get the page directory entry
 1623          */
 1624         ptepa = pmap->pm_pdir[ptepindex];
 1625 
 1626         /*
 1627          * This supports switching from a 4MB page to a
 1628          * normal 4K page.
 1629          */
 1630         if (ptepa & PG_PS) {
 1631                 (void)pmap_demote_pde(pmap, &pmap->pm_pdir[ptepindex], va);
 1632                 ptepa = pmap->pm_pdir[ptepindex];
 1633         }
 1634 
 1635         /*
 1636          * If the page table page is mapped, we just increment the
 1637          * hold count, and activate it.
 1638          */
 1639         if (ptepa) {
 1640                 m = PHYS_TO_VM_PAGE(ptepa & PG_FRAME);
 1641                 m->wire_count++;
 1642         } else {
 1643                 /*
 1644                  * Here if the pte page isn't mapped, or if it has
 1645                  * been deallocated. 
 1646                  */
 1647                 m = _pmap_allocpte(pmap, ptepindex, flags);
 1648                 if (m == NULL && (flags & M_WAITOK))
 1649                         goto retry;
 1650         }
 1651         return (m);
 1652 }
 1653 
 1654 
 1655 /***************************************************
 1656 * Pmap allocation/deallocation routines.
 1657  ***************************************************/
 1658 
 1659 #ifdef SMP
 1660 /*
 1661  * Deal with a SMP shootdown of other users of the pmap that we are
 1662  * trying to dispose of.  This can be a bit hairy.
 1663  */
 1664 static cpumask_t *lazymask;
 1665 static u_int lazyptd;
 1666 static volatile u_int lazywait;
 1667 
 1668 void pmap_lazyfix_action(void);
 1669 
 1670 void
 1671 pmap_lazyfix_action(void)
 1672 {
 1673         cpumask_t mymask = PCPU_GET(cpumask);
 1674 
 1675 #ifdef COUNT_IPIS
 1676         (*ipi_lazypmap_counts[PCPU_GET(cpuid)])++;
 1677 #endif
 1678         if (rcr3() == lazyptd)
 1679                 load_cr3(PCPU_GET(curpcb)->pcb_cr3);
 1680         atomic_clear_int(lazymask, mymask);
 1681         atomic_store_rel_int(&lazywait, 1);
 1682 }
 1683 
 1684 static void
 1685 pmap_lazyfix_self(cpumask_t mymask)
 1686 {
 1687 
 1688         if (rcr3() == lazyptd)
 1689                 load_cr3(PCPU_GET(curpcb)->pcb_cr3);
 1690         atomic_clear_int(lazymask, mymask);
 1691 }
 1692 
 1693 
 1694 static void
 1695 pmap_lazyfix(pmap_t pmap)
 1696 {
 1697         cpumask_t mymask, mask;
 1698         u_int spins;
 1699 
 1700         while ((mask = pmap->pm_active) != 0) {
 1701                 spins = 50000000;
 1702                 mask = mask & -mask;    /* Find least significant set bit */
 1703                 mtx_lock_spin(&smp_ipi_mtx);
 1704 #ifdef PAE
 1705                 lazyptd = vtophys(pmap->pm_pdpt);
 1706 #else
 1707                 lazyptd = vtophys(pmap->pm_pdir);
 1708 #endif
 1709                 mymask = PCPU_GET(cpumask);
 1710                 if (mask == mymask) {
 1711                         lazymask = &pmap->pm_active;
 1712                         pmap_lazyfix_self(mymask);
 1713                 } else {
 1714                         atomic_store_rel_int((u_int *)&lazymask,
 1715                             (u_int)&pmap->pm_active);
 1716                         atomic_store_rel_int(&lazywait, 0);
 1717                         ipi_selected(mask, IPI_LAZYPMAP);
 1718                         while (lazywait == 0) {
 1719                                 ia32_pause();
 1720                                 if (--spins == 0)
 1721                                         break;
 1722                         }
 1723                 }
 1724                 mtx_unlock_spin(&smp_ipi_mtx);
 1725                 if (spins == 0)
 1726                         printf("pmap_lazyfix: spun for 50000000\n");
 1727         }
 1728 }
 1729 
 1730 #else   /* SMP */
 1731 
 1732 /*
 1733  * Cleaning up on uniprocessor is easy.  For various reasons, we're
 1734  * unlikely to have to even execute this code, including the fact
 1735  * that the cleanup is deferred until the parent does a wait(2), which
 1736  * means that another userland process has run.
 1737  */
 1738 static void
 1739 pmap_lazyfix(pmap_t pmap)
 1740 {
 1741         u_int cr3;
 1742 
 1743         cr3 = vtophys(pmap->pm_pdir);
 1744         if (cr3 == rcr3()) {
 1745                 load_cr3(PCPU_GET(curpcb)->pcb_cr3);
 1746                 pmap->pm_active &= ~(PCPU_GET(cpumask));
 1747         }
 1748 }
 1749 #endif  /* SMP */
 1750 
 1751 /*
 1752  * Release any resources held by the given physical map.
 1753  * Called when a pmap initialized by pmap_pinit is being released.
 1754  * Should only be called if the map contains no valid mappings.
 1755  */
 1756 void
 1757 pmap_release(pmap_t pmap)
 1758 {
 1759         vm_page_t m, ptdpg[NPGPTD];
 1760         int i;
 1761 
 1762         KASSERT(pmap->pm_stats.resident_count == 0,
 1763             ("pmap_release: pmap resident count %ld != 0",
 1764             pmap->pm_stats.resident_count));
 1765         KASSERT(pmap->pm_root == NULL,
 1766             ("pmap_release: pmap has reserved page table page(s)"));
 1767 
 1768         pmap_lazyfix(pmap);
 1769         mtx_lock_spin(&allpmaps_lock);
 1770         LIST_REMOVE(pmap, pm_list);
 1771         mtx_unlock_spin(&allpmaps_lock);
 1772 
 1773         for (i = 0; i < NPGPTD; i++)
 1774                 ptdpg[i] = PHYS_TO_VM_PAGE(pmap->pm_pdir[PTDPTDI + i] &
 1775                     PG_FRAME);
 1776 
 1777         bzero(pmap->pm_pdir + PTDPTDI, (nkpt + NPGPTD) *
 1778             sizeof(*pmap->pm_pdir));
 1779 
 1780         pmap_qremove((vm_offset_t)pmap->pm_pdir, NPGPTD);
 1781 
 1782         for (i = 0; i < NPGPTD; i++) {
 1783                 m = ptdpg[i];
 1784 #ifdef PAE
 1785                 KASSERT(VM_PAGE_TO_PHYS(m) == (pmap->pm_pdpt[i] & PG_FRAME),
 1786                     ("pmap_release: got wrong ptd page"));
 1787 #endif
 1788                 m->wire_count--;
 1789                 atomic_subtract_int(&cnt.v_wire_count, 1);
 1790                 vm_page_free_zero(m);
 1791         }
 1792         PMAP_LOCK_DESTROY(pmap);
 1793 }
 1794 
 1795 static int
 1796 kvm_size(SYSCTL_HANDLER_ARGS)
 1797 {
 1798         unsigned long ksize = VM_MAX_KERNEL_ADDRESS - KERNBASE;
 1799 
 1800         return sysctl_handle_long(oidp, &ksize, 0, req);
 1801 }
 1802 SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG|CTLFLAG_RD, 
 1803     0, 0, kvm_size, "IU", "Size of KVM");
 1804 
 1805 static int
 1806 kvm_free(SYSCTL_HANDLER_ARGS)
 1807 {
 1808         unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end;
 1809 
 1810         return sysctl_handle_long(oidp, &kfree, 0, req);
 1811 }
 1812 SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD, 
 1813     0, 0, kvm_free, "IU", "Amount of KVM free");
 1814 
 1815 /*
 1816  * grow the number of kernel page table entries, if needed
 1817  */
 1818 void
 1819 pmap_growkernel(vm_offset_t addr)
 1820 {
 1821         struct pmap *pmap;
 1822         vm_paddr_t ptppaddr;
 1823         vm_page_t nkpg;
 1824         pd_entry_t newpdir;
 1825         pt_entry_t *pde;
 1826 
 1827         mtx_assert(&kernel_map->system_mtx, MA_OWNED);
 1828         if (kernel_vm_end == 0) {
 1829                 kernel_vm_end = KERNBASE;
 1830                 nkpt = 0;
 1831                 while (pdir_pde(PTD, kernel_vm_end)) {
 1832                         kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
 1833                         nkpt++;
 1834                         if (kernel_vm_end - 1 >= kernel_map->max_offset) {
 1835                                 kernel_vm_end = kernel_map->max_offset;
 1836                                 break;
 1837                         }
 1838                 }
 1839         }
 1840         addr = roundup2(addr, PAGE_SIZE * NPTEPG);
 1841         if (addr - 1 >= kernel_map->max_offset)
 1842                 addr = kernel_map->max_offset;
 1843         while (kernel_vm_end < addr) {
 1844                 if (pdir_pde(PTD, kernel_vm_end)) {
 1845                         kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
 1846                         if (kernel_vm_end - 1 >= kernel_map->max_offset) {
 1847                                 kernel_vm_end = kernel_map->max_offset;
 1848                                 break;
 1849                         }
 1850                         continue;
 1851                 }
 1852 
 1853                 nkpg = vm_page_alloc(NULL, kernel_vm_end >> PDRSHIFT,
 1854                     VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
 1855                     VM_ALLOC_ZERO);
 1856                 if (nkpg == NULL)
 1857                         panic("pmap_growkernel: no memory to grow kernel");
 1858 
 1859                 nkpt++;
 1860 
 1861                 if ((nkpg->flags & PG_ZERO) == 0)
 1862                         pmap_zero_page(nkpg);
 1863                 ptppaddr = VM_PAGE_TO_PHYS(nkpg);
 1864                 newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M);
 1865                 pdir_pde(PTD, kernel_vm_end) = newpdir;
 1866 
 1867                 mtx_lock_spin(&allpmaps_lock);
 1868                 LIST_FOREACH(pmap, &allpmaps, pm_list) {
 1869                         pde = pmap_pde(pmap, kernel_vm_end);
 1870                         pde_store(pde, newpdir);
 1871                 }
 1872                 mtx_unlock_spin(&allpmaps_lock);
 1873                 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
 1874                 if (kernel_vm_end - 1 >= kernel_map->max_offset) {
 1875                         kernel_vm_end = kernel_map->max_offset;
 1876                         break;
 1877                 }
 1878         }
 1879 }
 1880 
 1881 
 1882 /***************************************************
 1883  * page management routines.
 1884  ***************************************************/
 1885 
 1886 CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE);
 1887 CTASSERT(_NPCM == 11);
 1888 
 1889 static __inline struct pv_chunk *
 1890 pv_to_chunk(pv_entry_t pv)
 1891 {
 1892 
 1893         return (struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK);
 1894 }
 1895 
 1896 #define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap)
 1897 
 1898 #define PC_FREE0_9      0xfffffffful    /* Free values for index 0 through 9 */
 1899 #define PC_FREE10       0x0000fffful    /* Free values for index 10 */
 1900 
 1901 static uint32_t pc_freemask[11] = {
 1902         PC_FREE0_9, PC_FREE0_9, PC_FREE0_9,
 1903         PC_FREE0_9, PC_FREE0_9, PC_FREE0_9,
 1904         PC_FREE0_9, PC_FREE0_9, PC_FREE0_9,
 1905         PC_FREE0_9, PC_FREE10
 1906 };
 1907 
 1908 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0,
 1909         "Current number of pv entries");
 1910 
 1911 #ifdef PV_STATS
 1912 static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail;
 1913 
 1914 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0,
 1915         "Current number of pv entry chunks");
 1916 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0,
 1917         "Current number of pv entry chunks allocated");
 1918 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0,
 1919         "Current number of pv entry chunks frees");
 1920 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0,
 1921         "Number of times tried to get a chunk page but failed.");
 1922 
 1923 static long pv_entry_frees, pv_entry_allocs;
 1924 static int pv_entry_spare;
 1925 
 1926 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0,
 1927         "Current number of pv entry frees");
 1928 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0,
 1929         "Current number of pv entry allocs");
 1930 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0,
 1931         "Current number of spare pv entries");
 1932 
 1933 static int pmap_collect_inactive, pmap_collect_active;
 1934 
 1935 SYSCTL_INT(_vm_pmap, OID_AUTO, pmap_collect_inactive, CTLFLAG_RD, &pmap_collect_inactive, 0,
 1936         "Current number times pmap_collect called on inactive queue");
 1937 SYSCTL_INT(_vm_pmap, OID_AUTO, pmap_collect_active, CTLFLAG_RD, &pmap_collect_active, 0,
 1938         "Current number times pmap_collect called on active queue");
 1939 #endif
 1940 
 1941 /*
 1942  * We are in a serious low memory condition.  Resort to
 1943  * drastic measures to free some pages so we can allocate
 1944  * another pv entry chunk.  This is normally called to
 1945  * unmap inactive pages, and if necessary, active pages.
 1946  */
 1947 static void
 1948 pmap_collect(pmap_t locked_pmap, struct vpgqueues *vpq)
 1949 {
 1950         struct md_page *pvh;
 1951         pd_entry_t *pde;
 1952         pmap_t pmap;
 1953         pt_entry_t *pte, tpte;
 1954         pv_entry_t next_pv, pv;
 1955         vm_offset_t va;
 1956         vm_page_t m, free;
 1957 
 1958         sched_pin();
 1959         TAILQ_FOREACH(m, &vpq->pl, pageq) {
 1960                 if (m->hold_count || m->busy)
 1961                         continue;
 1962                 TAILQ_FOREACH_SAFE(pv, &m->md.pv_list, pv_list, next_pv) {
 1963                         va = pv->pv_va;
 1964                         pmap = PV_PMAP(pv);
 1965                         /* Avoid deadlock and lock recursion. */
 1966                         if (pmap > locked_pmap)
 1967                                 PMAP_LOCK(pmap);
 1968                         else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap))
 1969                                 continue;
 1970                         pmap->pm_stats.resident_count--;
 1971                         pde = pmap_pde(pmap, va);
 1972                         KASSERT((*pde & PG_PS) == 0, ("pmap_collect: found"
 1973                             " a 4mpage in page %p's pv list", m));
 1974                         pte = pmap_pte_quick(pmap, va);
 1975                         tpte = pte_load_clear(pte);
 1976                         KASSERT((tpte & PG_W) == 0,
 1977                             ("pmap_collect: wired pte %#jx", (uintmax_t)tpte));
 1978                         if (tpte & PG_A)
 1979                                 vm_page_flag_set(m, PG_REFERENCED);
 1980                         if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
 1981                                 vm_page_dirty(m);
 1982                         free = NULL;
 1983                         pmap_unuse_pt(pmap, va, &free);
 1984                         pmap_invalidate_page(pmap, va);
 1985                         pmap_free_zero_pages(free);
 1986                         TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
 1987                         if (TAILQ_EMPTY(&m->md.pv_list)) {
 1988                                 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 1989                                 if (TAILQ_EMPTY(&pvh->pv_list))
 1990                                         vm_page_flag_clear(m, PG_WRITEABLE);
 1991                         }
 1992                         free_pv_entry(pmap, pv);
 1993                         if (pmap != locked_pmap)
 1994                                 PMAP_UNLOCK(pmap);
 1995                 }
 1996         }
 1997         sched_unpin();
 1998 }
 1999 
 2000 
 2001 /*
 2002  * free the pv_entry back to the free list
 2003  */
 2004 static void
 2005 free_pv_entry(pmap_t pmap, pv_entry_t pv)
 2006 {
 2007         vm_page_t m;
 2008         struct pv_chunk *pc;
 2009         int idx, field, bit;
 2010 
 2011         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2012         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2013         PV_STAT(pv_entry_frees++);
 2014         PV_STAT(pv_entry_spare++);
 2015         pv_entry_count--;
 2016         pc = pv_to_chunk(pv);
 2017         idx = pv - &pc->pc_pventry[0];
 2018         field = idx / 32;
 2019         bit = idx % 32;
 2020         pc->pc_map[field] |= 1ul << bit;
 2021         /* move to head of list */
 2022         TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
 2023         for (idx = 0; idx < _NPCM; idx++)
 2024                 if (pc->pc_map[idx] != pc_freemask[idx]) {
 2025                         TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
 2026                         return;
 2027                 }
 2028         PV_STAT(pv_entry_spare -= _NPCPV);
 2029         PV_STAT(pc_chunk_count--);
 2030         PV_STAT(pc_chunk_frees++);
 2031         /* entire chunk is free, return it */
 2032         m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc));
 2033         pmap_qremove((vm_offset_t)pc, 1);
 2034         vm_page_unwire(m, 0);
 2035         vm_page_free(m);
 2036         pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc);
 2037 }
 2038 
 2039 /*
 2040  * get a new pv_entry, allocating a block from the system
 2041  * when needed.
 2042  */
 2043 static pv_entry_t
 2044 get_pv_entry(pmap_t pmap, int try)
 2045 {
 2046         static const struct timeval printinterval = { 60, 0 };
 2047         static struct timeval lastprint;
 2048         static vm_pindex_t colour;
 2049         struct vpgqueues *pq;
 2050         int bit, field;
 2051         pv_entry_t pv;
 2052         struct pv_chunk *pc;
 2053         vm_page_t m;
 2054 
 2055         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2056         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2057         PV_STAT(pv_entry_allocs++);
 2058         pv_entry_count++;
 2059         if (pv_entry_count > pv_entry_high_water)
 2060                 if (ratecheck(&lastprint, &printinterval))
 2061                         printf("Approaching the limit on PV entries, consider "
 2062                             "increasing either the vm.pmap.shpgperproc or the "
 2063                             "vm.pmap.pv_entry_max tunable.\n");
 2064         pq = NULL;
 2065 retry:
 2066         pc = TAILQ_FIRST(&pmap->pm_pvchunk);
 2067         if (pc != NULL) {
 2068                 for (field = 0; field < _NPCM; field++) {
 2069                         if (pc->pc_map[field]) {
 2070                                 bit = bsfl(pc->pc_map[field]);
 2071                                 break;
 2072                         }
 2073                 }
 2074                 if (field < _NPCM) {
 2075                         pv = &pc->pc_pventry[field * 32 + bit];
 2076                         pc->pc_map[field] &= ~(1ul << bit);
 2077                         /* If this was the last item, move it to tail */
 2078                         for (field = 0; field < _NPCM; field++)
 2079                                 if (pc->pc_map[field] != 0) {
 2080                                         PV_STAT(pv_entry_spare--);
 2081                                         return (pv);    /* not full, return */
 2082                                 }
 2083                         TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
 2084                         TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
 2085                         PV_STAT(pv_entry_spare--);
 2086                         return (pv);
 2087                 }
 2088         }
 2089         /*
 2090          * Access to the ptelist "pv_vafree" is synchronized by the page
 2091          * queues lock.  If "pv_vafree" is currently non-empty, it will
 2092          * remain non-empty until pmap_ptelist_alloc() completes.
 2093          */
 2094         if (pv_vafree == 0 || (m = vm_page_alloc(NULL, colour, (pq ==
 2095             &vm_page_queues[PQ_ACTIVE] ? VM_ALLOC_SYSTEM : VM_ALLOC_NORMAL) |
 2096             VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
 2097                 if (try) {
 2098                         pv_entry_count--;
 2099                         PV_STAT(pc_chunk_tryfail++);
 2100                         return (NULL);
 2101                 }
 2102                 /*
 2103                  * Reclaim pv entries: At first, destroy mappings to
 2104                  * inactive pages.  After that, if a pv chunk entry
 2105                  * is still needed, destroy mappings to active pages.
 2106                  */
 2107                 if (pq == NULL) {
 2108                         PV_STAT(pmap_collect_inactive++);
 2109                         pq = &vm_page_queues[PQ_INACTIVE];
 2110                 } else if (pq == &vm_page_queues[PQ_INACTIVE]) {
 2111                         PV_STAT(pmap_collect_active++);
 2112                         pq = &vm_page_queues[PQ_ACTIVE];
 2113                 } else
 2114                         panic("get_pv_entry: increase vm.pmap.shpgperproc");
 2115                 pmap_collect(pmap, pq);
 2116                 goto retry;
 2117         }
 2118         PV_STAT(pc_chunk_count++);
 2119         PV_STAT(pc_chunk_allocs++);
 2120         colour++;
 2121         pc = (struct pv_chunk *)pmap_ptelist_alloc(&pv_vafree);
 2122         pmap_qenter((vm_offset_t)pc, &m, 1);
 2123         pc->pc_pmap = pmap;
 2124         pc->pc_map[0] = pc_freemask[0] & ~1ul;  /* preallocated bit 0 */
 2125         for (field = 1; field < _NPCM; field++)
 2126                 pc->pc_map[field] = pc_freemask[field];
 2127         pv = &pc->pc_pventry[0];
 2128         TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
 2129         PV_STAT(pv_entry_spare += _NPCPV - 1);
 2130         return (pv);
 2131 }
 2132 
 2133 static __inline pv_entry_t
 2134 pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
 2135 {
 2136         pv_entry_t pv;
 2137 
 2138         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2139         TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
 2140                 if (pmap == PV_PMAP(pv) && va == pv->pv_va) {
 2141                         TAILQ_REMOVE(&pvh->pv_list, pv, pv_list);
 2142                         break;
 2143                 }
 2144         }
 2145         return (pv);
 2146 }
 2147 
 2148 static void
 2149 pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
 2150 {
 2151         struct md_page *pvh;
 2152         pv_entry_t pv;
 2153         vm_offset_t va_last;
 2154         vm_page_t m;
 2155 
 2156         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2157         KASSERT((pa & PDRMASK) == 0,
 2158             ("pmap_pv_demote_pde: pa is not 4mpage aligned"));
 2159 
 2160         /*
 2161          * Transfer the 4mpage's pv entry for this mapping to the first
 2162          * page's pv list.
 2163          */
 2164         pvh = pa_to_pvh(pa);
 2165         va = trunc_4mpage(va);
 2166         pv = pmap_pvh_remove(pvh, pmap, va);
 2167         KASSERT(pv != NULL, ("pmap_pv_demote_pde: pv not found"));
 2168         m = PHYS_TO_VM_PAGE(pa);
 2169         TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
 2170         /* Instantiate the remaining NPTEPG - 1 pv entries. */
 2171         va_last = va + NBPDR - PAGE_SIZE;
 2172         do {
 2173                 m++;
 2174                 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
 2175                     ("pmap_pv_demote_pde: page %p is not managed", m));
 2176                 va += PAGE_SIZE;
 2177                 pmap_insert_entry(pmap, va, m);
 2178         } while (va < va_last);
 2179 }
 2180 
 2181 static void
 2182 pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
 2183 {
 2184         struct md_page *pvh;
 2185         pv_entry_t pv;
 2186         vm_offset_t va_last;
 2187         vm_page_t m;
 2188 
 2189         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2190         KASSERT((pa & PDRMASK) == 0,
 2191             ("pmap_pv_promote_pde: pa is not 4mpage aligned"));
 2192 
 2193         /*
 2194          * Transfer the first page's pv entry for this mapping to the
 2195          * 4mpage's pv list.  Aside from avoiding the cost of a call
 2196          * to get_pv_entry(), a transfer avoids the possibility that
 2197          * get_pv_entry() calls pmap_collect() and that pmap_collect()
 2198          * removes one of the mappings that is being promoted.
 2199          */
 2200         m = PHYS_TO_VM_PAGE(pa);
 2201         va = trunc_4mpage(va);
 2202         pv = pmap_pvh_remove(&m->md, pmap, va);
 2203         KASSERT(pv != NULL, ("pmap_pv_promote_pde: pv not found"));
 2204         pvh = pa_to_pvh(pa);
 2205         TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_list);
 2206         /* Free the remaining NPTEPG - 1 pv entries. */
 2207         va_last = va + NBPDR - PAGE_SIZE;
 2208         do {
 2209                 m++;
 2210                 va += PAGE_SIZE;
 2211                 pmap_pvh_free(&m->md, pmap, va);
 2212         } while (va < va_last);
 2213 }
 2214 
 2215 static void
 2216 pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
 2217 {
 2218         pv_entry_t pv;
 2219 
 2220         pv = pmap_pvh_remove(pvh, pmap, va);
 2221         KASSERT(pv != NULL, ("pmap_pvh_free: pv not found"));
 2222         free_pv_entry(pmap, pv);
 2223 }
 2224 
 2225 static void
 2226 pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
 2227 {
 2228         struct md_page *pvh;
 2229 
 2230         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2231         pmap_pvh_free(&m->md, pmap, va);
 2232         if (TAILQ_EMPTY(&m->md.pv_list)) {
 2233                 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 2234                 if (TAILQ_EMPTY(&pvh->pv_list))
 2235                         vm_page_flag_clear(m, PG_WRITEABLE);
 2236         }
 2237 }
 2238 
 2239 /*
 2240  * Create a pv entry for page at pa for
 2241  * (pmap, va).
 2242  */
 2243 static void
 2244 pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
 2245 {
 2246         pv_entry_t pv;
 2247 
 2248         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2249         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2250         pv = get_pv_entry(pmap, FALSE);
 2251         pv->pv_va = va;
 2252         TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
 2253 }
 2254 
 2255 /*
 2256  * Conditionally create a pv entry.
 2257  */
 2258 static boolean_t
 2259 pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
 2260 {
 2261         pv_entry_t pv;
 2262 
 2263         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2264         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2265         if (pv_entry_count < pv_entry_high_water && 
 2266             (pv = get_pv_entry(pmap, TRUE)) != NULL) {
 2267                 pv->pv_va = va;
 2268                 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
 2269                 return (TRUE);
 2270         } else
 2271                 return (FALSE);
 2272 }
 2273 
 2274 /*
 2275  * Create the pv entries for each of the pages within a superpage.
 2276  */
 2277 static boolean_t
 2278 pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
 2279 {
 2280         struct md_page *pvh;
 2281         pv_entry_t pv;
 2282 
 2283         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2284         if (pv_entry_count < pv_entry_high_water && 
 2285             (pv = get_pv_entry(pmap, TRUE)) != NULL) {
 2286                 pv->pv_va = va;
 2287                 pvh = pa_to_pvh(pa);
 2288                 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_list);
 2289                 return (TRUE);
 2290         } else
 2291                 return (FALSE);
 2292 }
 2293 
 2294 /*
 2295  * Fills a page table page with mappings to consecutive physical pages.
 2296  */
 2297 static void
 2298 pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte)
 2299 {
 2300         pt_entry_t *pte;
 2301 
 2302         for (pte = firstpte; pte < firstpte + NPTEPG; pte++) {
 2303                 *pte = newpte;  
 2304                 newpte += PAGE_SIZE;
 2305         }
 2306 }
 2307 
 2308 /*
 2309  * Tries to demote a 2- or 4MB page mapping.  If demotion fails, the
 2310  * 2- or 4MB page mapping is invalidated.
 2311  */
 2312 static boolean_t
 2313 pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
 2314 {
 2315         pd_entry_t newpde, oldpde;
 2316         pmap_t allpmaps_entry;
 2317         pt_entry_t *firstpte, newpte;
 2318         vm_paddr_t mptepa;
 2319         vm_page_t free, mpte;
 2320 
 2321         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2322         oldpde = *pde;
 2323         KASSERT((oldpde & (PG_PS | PG_V)) == (PG_PS | PG_V),
 2324             ("pmap_demote_pde: oldpde is missing PG_PS and/or PG_V"));
 2325         mpte = pmap_lookup_pt_page(pmap, va);
 2326         if (mpte != NULL)
 2327                 pmap_remove_pt_page(pmap, mpte);
 2328         else {
 2329                 KASSERT((oldpde & PG_W) == 0,
 2330                     ("pmap_demote_pde: page table page for a wired mapping"
 2331                     " is missing"));
 2332 
 2333                 /*
 2334                  * Invalidate the 2- or 4MB page mapping and return
 2335                  * "failure" if the mapping was never accessed or the
 2336                  * allocation of the new page table page fails.
 2337                  */
 2338                 if ((oldpde & PG_A) == 0 || (mpte = vm_page_alloc(NULL,
 2339                     va >> PDRSHIFT, VM_ALLOC_NOOBJ | VM_ALLOC_NORMAL |
 2340                     VM_ALLOC_WIRED)) == NULL) {
 2341                         free = NULL;
 2342                         pmap_remove_pde(pmap, pde, trunc_4mpage(va), &free);
 2343                         pmap_invalidate_page(pmap, trunc_4mpage(va));
 2344                         pmap_free_zero_pages(free);
 2345                         CTR2(KTR_PMAP, "pmap_demote_pde: failure for va %#x"
 2346                             " in pmap %p", va, pmap);
 2347                         return (FALSE);
 2348                 }
 2349                 if (va < VM_MAXUSER_ADDRESS)
 2350                         pmap->pm_stats.resident_count++;
 2351         }
 2352         mptepa = VM_PAGE_TO_PHYS(mpte);
 2353 
 2354         /*
 2355          * Temporarily map the page table page (mpte) into the kernel's
 2356          * address space at either PADDR1 or PADDR2.
 2357          */
 2358         if (curthread->td_pinned > 0 && mtx_owned(&vm_page_queue_mtx)) {
 2359                 if ((*PMAP1 & PG_FRAME) != mptepa) {
 2360                         *PMAP1 = mptepa | PG_RW | PG_V | PG_A | PG_M;
 2361 #ifdef SMP
 2362                         PMAP1cpu = PCPU_GET(cpuid);
 2363 #endif
 2364                         invlcaddr(PADDR1);
 2365                         PMAP1changed++;
 2366                 } else
 2367 #ifdef SMP
 2368                 if (PMAP1cpu != PCPU_GET(cpuid)) {
 2369                         PMAP1cpu = PCPU_GET(cpuid);
 2370                         invlcaddr(PADDR1);
 2371                         PMAP1changedcpu++;
 2372                 } else
 2373 #endif
 2374                         PMAP1unchanged++;
 2375                 firstpte = PADDR1;
 2376         } else {
 2377                 mtx_lock(&PMAP2mutex);
 2378                 if ((*PMAP2 & PG_FRAME) != mptepa) {
 2379                         *PMAP2 = mptepa | PG_RW | PG_V | PG_A | PG_M;
 2380                         pmap_invalidate_page(kernel_pmap, (vm_offset_t)PADDR2);
 2381                 }
 2382                 firstpte = PADDR2;
 2383         }
 2384         newpde = mptepa | PG_M | PG_A | (oldpde & PG_U) | PG_RW | PG_V;
 2385         KASSERT((oldpde & PG_A) != 0,
 2386             ("pmap_demote_pde: oldpde is missing PG_A"));
 2387         KASSERT((oldpde & (PG_M | PG_RW)) != PG_RW,
 2388             ("pmap_demote_pde: oldpde is missing PG_M"));
 2389         newpte = oldpde & ~PG_PS;
 2390         if ((newpte & PG_PDE_PAT) != 0)
 2391                 newpte ^= PG_PDE_PAT | PG_PTE_PAT;
 2392 
 2393         /*
 2394          * If the page table page is new, initialize it.
 2395          */
 2396         if (mpte->wire_count == 1) {
 2397                 mpte->wire_count = NPTEPG;
 2398                 pmap_fill_ptp(firstpte, newpte);
 2399         }
 2400         KASSERT((*firstpte & PG_FRAME) == (newpte & PG_FRAME),
 2401             ("pmap_demote_pde: firstpte and newpte map different physical"
 2402             " addresses"));
 2403 
 2404         /*
 2405          * If the mapping has changed attributes, update the page table
 2406          * entries.
 2407          */ 
 2408         if ((*firstpte & PG_PTE_PROMOTE) != (newpte & PG_PTE_PROMOTE))
 2409                 pmap_fill_ptp(firstpte, newpte);
 2410         
 2411         /*
 2412          * Demote the mapping.  This pmap is locked.  The old PDE has
 2413          * PG_A set.  If the old PDE has PG_RW set, it also has PG_M
 2414          * set.  Thus, there is no danger of a race with another
 2415          * processor changing the setting of PG_A and/or PG_M between
 2416          * the read above and the store below. 
 2417          */
 2418         if (pmap == kernel_pmap) {
 2419                 /*
 2420                  * A harmless race exists between this loop and the bcopy()
 2421                  * in pmap_pinit() that initializes the kernel segment of
 2422                  * the new page table.  Specifically, that bcopy() may copy
 2423                  * the new PDE from the PTD, which is first in allpmaps, to
 2424                  * the new page table before this loop updates that new
 2425                  * page table.
 2426                  */
 2427                 mtx_lock_spin(&allpmaps_lock);
 2428                 LIST_FOREACH(allpmaps_entry, &allpmaps, pm_list) {
 2429                         pde = pmap_pde(allpmaps_entry, va);
 2430                         KASSERT(*pde == newpde || (*pde & PG_PTE_PROMOTE) ==
 2431                             (oldpde & PG_PTE_PROMOTE),
 2432                             ("pmap_demote_pde: pde was %#jx, expected %#jx",
 2433                             (uintmax_t)*pde, (uintmax_t)oldpde));
 2434                         pde_store(pde, newpde);
 2435                 }
 2436                 mtx_unlock_spin(&allpmaps_lock);
 2437         } else
 2438                 pde_store(pde, newpde); 
 2439         if (firstpte == PADDR2)
 2440                 mtx_unlock(&PMAP2mutex);
 2441 
 2442         /*
 2443          * Invalidate the recursive mapping of the page table page.
 2444          */
 2445         pmap_invalidate_page(pmap, (vm_offset_t)vtopte(va));
 2446 
 2447         /*
 2448          * Demote the pv entry.  This depends on the earlier demotion
 2449          * of the mapping.  Specifically, the (re)creation of a per-
 2450          * page pv entry might trigger the execution of pmap_collect(),
 2451          * which might reclaim a newly (re)created per-page pv entry
 2452          * and destroy the associated mapping.  In order to destroy
 2453          * the mapping, the PDE must have already changed from mapping
 2454          * the 2mpage to referencing the page table page.
 2455          */
 2456         if ((oldpde & PG_MANAGED) != 0)
 2457                 pmap_pv_demote_pde(pmap, va, oldpde & PG_PS_FRAME);
 2458 
 2459         pmap_pde_demotions++;
 2460         CTR2(KTR_PMAP, "pmap_demote_pde: success for va %#x"
 2461             " in pmap %p", va, pmap);
 2462         return (TRUE);
 2463 }
 2464 
 2465 /*
 2466  * pmap_remove_pde: do the things to unmap a superpage in a process
 2467  */
 2468 static void
 2469 pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
 2470     vm_page_t *free)
 2471 {
 2472         struct md_page *pvh;
 2473         pd_entry_t oldpde;
 2474         vm_offset_t eva, va;
 2475         vm_page_t m, mpte;
 2476 
 2477         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2478         KASSERT((sva & PDRMASK) == 0,
 2479             ("pmap_remove_pde: sva is not 4mpage aligned"));
 2480         oldpde = pte_load_clear(pdq);
 2481         if (oldpde & PG_W)
 2482                 pmap->pm_stats.wired_count -= NBPDR / PAGE_SIZE;
 2483 
 2484         /*
 2485          * Machines that don't support invlpg, also don't support
 2486          * PG_G.
 2487          */
 2488         if (oldpde & PG_G)
 2489                 pmap_invalidate_page(kernel_pmap, sva);
 2490         pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
 2491         if (oldpde & PG_MANAGED) {
 2492                 pvh = pa_to_pvh(oldpde & PG_PS_FRAME);
 2493                 pmap_pvh_free(pvh, pmap, sva);
 2494                 eva = sva + NBPDR;
 2495                 for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
 2496                     va < eva; va += PAGE_SIZE, m++) {
 2497                         if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW))
 2498                                 vm_page_dirty(m);
 2499                         if (oldpde & PG_A)
 2500                                 vm_page_flag_set(m, PG_REFERENCED);
 2501                         if (TAILQ_EMPTY(&m->md.pv_list) &&
 2502                             TAILQ_EMPTY(&pvh->pv_list))
 2503                                 vm_page_flag_clear(m, PG_WRITEABLE);
 2504                 }
 2505         }
 2506         if (pmap == kernel_pmap) {
 2507                 if (!pmap_demote_pde(pmap, pdq, sva))
 2508                         panic("pmap_remove_pde: failed demotion");
 2509         } else {
 2510                 mpte = pmap_lookup_pt_page(pmap, sva);
 2511                 if (mpte != NULL) {
 2512                         pmap_remove_pt_page(pmap, mpte);
 2513                         pmap->pm_stats.resident_count--;
 2514                         KASSERT(mpte->wire_count == NPTEPG,
 2515                             ("pmap_remove_pde: pte page wire count error"));
 2516                         mpte->wire_count = 0;
 2517                         pmap_add_delayed_free_list(mpte, free, FALSE);
 2518                         atomic_subtract_int(&cnt.v_wire_count, 1);
 2519                 }
 2520         }
 2521 }
 2522 
 2523 /*
 2524  * pmap_remove_pte: do the things to unmap a page in a process
 2525  */
 2526 static int
 2527 pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va, vm_page_t *free)
 2528 {
 2529         pt_entry_t oldpte;
 2530         vm_page_t m;
 2531 
 2532         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2533         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2534         oldpte = pte_load_clear(ptq);
 2535         if (oldpte & PG_W)
 2536                 pmap->pm_stats.wired_count -= 1;
 2537         /*
 2538          * Machines that don't support invlpg, also don't support
 2539          * PG_G.
 2540          */
 2541         if (oldpte & PG_G)
 2542                 pmap_invalidate_page(kernel_pmap, va);
 2543         pmap->pm_stats.resident_count -= 1;
 2544         if (oldpte & PG_MANAGED) {
 2545                 m = PHYS_TO_VM_PAGE(oldpte & PG_FRAME);
 2546                 if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
 2547                         vm_page_dirty(m);
 2548                 if (oldpte & PG_A)
 2549                         vm_page_flag_set(m, PG_REFERENCED);
 2550                 pmap_remove_entry(pmap, m, va);
 2551         }
 2552         return (pmap_unuse_pt(pmap, va, free));
 2553 }
 2554 
 2555 /*
 2556  * Remove a single page from a process address space
 2557  */
 2558 static void
 2559 pmap_remove_page(pmap_t pmap, vm_offset_t va, vm_page_t *free)
 2560 {
 2561         pt_entry_t *pte;
 2562 
 2563         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2564         KASSERT(curthread->td_pinned > 0, ("curthread not pinned"));
 2565         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2566         if ((pte = pmap_pte_quick(pmap, va)) == NULL || *pte == 0)
 2567                 return;
 2568         pmap_remove_pte(pmap, pte, va, free);
 2569         pmap_invalidate_page(pmap, va);
 2570 }
 2571 
 2572 /*
 2573  *      Remove the given range of addresses from the specified map.
 2574  *
 2575  *      It is assumed that the start and end are properly
 2576  *      rounded to the page size.
 2577  */
 2578 void
 2579 pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
 2580 {
 2581         vm_offset_t pdnxt;
 2582         pd_entry_t ptpaddr;
 2583         pt_entry_t *pte;
 2584         vm_page_t free = NULL;
 2585         int anyvalid;
 2586 
 2587         /*
 2588          * Perform an unsynchronized read.  This is, however, safe.
 2589          */
 2590         if (pmap->pm_stats.resident_count == 0)
 2591                 return;
 2592 
 2593         anyvalid = 0;
 2594 
 2595         vm_page_lock_queues();
 2596         sched_pin();
 2597         PMAP_LOCK(pmap);
 2598 
 2599         /*
 2600          * special handling of removing one page.  a very
 2601          * common operation and easy to short circuit some
 2602          * code.
 2603          */
 2604         if ((sva + PAGE_SIZE == eva) && 
 2605             ((pmap->pm_pdir[(sva >> PDRSHIFT)] & PG_PS) == 0)) {
 2606                 pmap_remove_page(pmap, sva, &free);
 2607                 goto out;
 2608         }
 2609 
 2610         for (; sva < eva; sva = pdnxt) {
 2611                 unsigned pdirindex;
 2612 
 2613                 /*
 2614                  * Calculate index for next page table.
 2615                  */
 2616                 pdnxt = (sva + NBPDR) & ~PDRMASK;
 2617                 if (pdnxt < sva)
 2618                         pdnxt = eva;
 2619                 if (pmap->pm_stats.resident_count == 0)
 2620                         break;
 2621 
 2622                 pdirindex = sva >> PDRSHIFT;
 2623                 ptpaddr = pmap->pm_pdir[pdirindex];
 2624 
 2625                 /*
 2626                  * Weed out invalid mappings. Note: we assume that the page
 2627                  * directory table is always allocated, and in kernel virtual.
 2628                  */
 2629                 if (ptpaddr == 0)
 2630                         continue;
 2631 
 2632                 /*
 2633                  * Check for large page.
 2634                  */
 2635                 if ((ptpaddr & PG_PS) != 0) {
 2636                         /*
 2637                          * Are we removing the entire large page?  If not,
 2638                          * demote the mapping and fall through.
 2639                          */
 2640                         if (sva + NBPDR == pdnxt && eva >= pdnxt) {
 2641                                 /*
 2642                                  * The TLB entry for a PG_G mapping is
 2643                                  * invalidated by pmap_remove_pde().
 2644                                  */
 2645                                 if ((ptpaddr & PG_G) == 0)
 2646                                         anyvalid = 1;
 2647                                 pmap_remove_pde(pmap,
 2648                                     &pmap->pm_pdir[pdirindex], sva, &free);
 2649                                 continue;
 2650                         } else if (!pmap_demote_pde(pmap,
 2651                             &pmap->pm_pdir[pdirindex], sva)) {
 2652                                 /* The large page mapping was destroyed. */
 2653                                 continue;
 2654                         }
 2655                 }
 2656 
 2657                 /*
 2658                  * Limit our scan to either the end of the va represented
 2659                  * by the current page table page, or to the end of the
 2660                  * range being removed.
 2661                  */
 2662                 if (pdnxt > eva)
 2663                         pdnxt = eva;
 2664 
 2665                 for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++,
 2666                     sva += PAGE_SIZE) {
 2667                         if (*pte == 0)
 2668                                 continue;
 2669 
 2670                         /*
 2671                          * The TLB entry for a PG_G mapping is invalidated
 2672                          * by pmap_remove_pte().
 2673                          */
 2674                         if ((*pte & PG_G) == 0)
 2675                                 anyvalid = 1;
 2676                         if (pmap_remove_pte(pmap, pte, sva, &free))
 2677                                 break;
 2678                 }
 2679         }
 2680 out:
 2681         sched_unpin();
 2682         if (anyvalid)
 2683                 pmap_invalidate_all(pmap);
 2684         vm_page_unlock_queues();
 2685         PMAP_UNLOCK(pmap);
 2686         pmap_free_zero_pages(free);
 2687 }
 2688 
 2689 /*
 2690  *      Routine:        pmap_remove_all
 2691  *      Function:
 2692  *              Removes this physical page from
 2693  *              all physical maps in which it resides.
 2694  *              Reflects back modify bits to the pager.
 2695  *
 2696  *      Notes:
 2697  *              Original versions of this routine were very
 2698  *              inefficient because they iteratively called
 2699  *              pmap_remove (slow...)
 2700  */
 2701 
 2702 void
 2703 pmap_remove_all(vm_page_t m)
 2704 {
 2705         struct md_page *pvh;
 2706         pv_entry_t pv;
 2707         pmap_t pmap;
 2708         pt_entry_t *pte, tpte;
 2709         pd_entry_t *pde;
 2710         vm_offset_t va;
 2711         vm_page_t free;
 2712 
 2713         KASSERT((m->flags & PG_FICTITIOUS) == 0,
 2714             ("pmap_remove_all: page %p is fictitious", m));
 2715         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2716         sched_pin();
 2717         pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 2718         while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) {
 2719                 va = pv->pv_va;
 2720                 pmap = PV_PMAP(pv);
 2721                 PMAP_LOCK(pmap);
 2722                 pde = pmap_pde(pmap, va);
 2723                 (void)pmap_demote_pde(pmap, pde, va);
 2724                 PMAP_UNLOCK(pmap);
 2725         }
 2726         while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
 2727                 pmap = PV_PMAP(pv);
 2728                 PMAP_LOCK(pmap);
 2729                 pmap->pm_stats.resident_count--;
 2730                 pde = pmap_pde(pmap, pv->pv_va);
 2731                 KASSERT((*pde & PG_PS) == 0, ("pmap_remove_all: found"
 2732                     " a 4mpage in page %p's pv list", m));
 2733                 pte = pmap_pte_quick(pmap, pv->pv_va);
 2734                 tpte = pte_load_clear(pte);
 2735                 if (tpte & PG_W)
 2736                         pmap->pm_stats.wired_count--;
 2737                 if (tpte & PG_A)
 2738                         vm_page_flag_set(m, PG_REFERENCED);
 2739 
 2740                 /*
 2741                  * Update the vm_page_t clean and reference bits.
 2742                  */
 2743                 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
 2744                         vm_page_dirty(m);
 2745                 free = NULL;
 2746                 pmap_unuse_pt(pmap, pv->pv_va, &free);
 2747                 pmap_invalidate_page(pmap, pv->pv_va);
 2748                 pmap_free_zero_pages(free);
 2749                 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
 2750                 free_pv_entry(pmap, pv);
 2751                 PMAP_UNLOCK(pmap);
 2752         }
 2753         vm_page_flag_clear(m, PG_WRITEABLE);
 2754         sched_unpin();
 2755 }
 2756 
 2757 /*
 2758  * pmap_protect_pde: do the things to protect a 4mpage in a process
 2759  */
 2760 static boolean_t
 2761 pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot)
 2762 {
 2763         pd_entry_t newpde, oldpde;
 2764         vm_offset_t eva, va;
 2765         vm_page_t m;
 2766         boolean_t anychanged;
 2767 
 2768         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2769         KASSERT((sva & PDRMASK) == 0,
 2770             ("pmap_protect_pde: sva is not 4mpage aligned"));
 2771         anychanged = FALSE;
 2772 retry:
 2773         oldpde = newpde = *pde;
 2774         if (oldpde & PG_MANAGED) {
 2775                 eva = sva + NBPDR;
 2776                 for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
 2777                     va < eva; va += PAGE_SIZE, m++) {
 2778                         /*
 2779                          * In contrast to the analogous operation on a 4KB page
 2780                          * mapping, the mapping's PG_A flag is not cleared and
 2781                          * the page's PG_REFERENCED flag is not set.  The
 2782                          * reason is that pmap_demote_pde() expects that a 2/4MB
 2783                          * page mapping with a stored page table page has PG_A
 2784                          * set.
 2785                          */
 2786                         if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW))
 2787                                 vm_page_dirty(m);
 2788                 }
 2789         }
 2790         if ((prot & VM_PROT_WRITE) == 0)
 2791                 newpde &= ~(PG_RW | PG_M);
 2792 #ifdef PAE
 2793         if ((prot & VM_PROT_EXECUTE) == 0)
 2794                 newpde |= pg_nx;
 2795 #endif
 2796         if (newpde != oldpde) {
 2797                 if (!pde_cmpset(pde, oldpde, newpde))
 2798                         goto retry;
 2799                 if (oldpde & PG_G)
 2800                         pmap_invalidate_page(pmap, sva);
 2801                 else
 2802                         anychanged = TRUE;
 2803         }
 2804         return (anychanged);
 2805 }
 2806 
 2807 /*
 2808  *      Set the physical protection on the
 2809  *      specified range of this map as requested.
 2810  */
 2811 void
 2812 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
 2813 {
 2814         vm_offset_t pdnxt;
 2815         pd_entry_t ptpaddr;
 2816         pt_entry_t *pte;
 2817         int anychanged;
 2818 
 2819         if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
 2820                 pmap_remove(pmap, sva, eva);
 2821                 return;
 2822         }
 2823 
 2824 #ifdef PAE
 2825         if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) ==
 2826             (VM_PROT_WRITE|VM_PROT_EXECUTE))
 2827                 return;
 2828 #else
 2829         if (prot & VM_PROT_WRITE)
 2830                 return;
 2831 #endif
 2832 
 2833         anychanged = 0;
 2834 
 2835         vm_page_lock_queues();
 2836         sched_pin();
 2837         PMAP_LOCK(pmap);
 2838         for (; sva < eva; sva = pdnxt) {
 2839                 pt_entry_t obits, pbits;
 2840                 unsigned pdirindex;
 2841 
 2842                 pdnxt = (sva + NBPDR) & ~PDRMASK;
 2843                 if (pdnxt < sva)
 2844                         pdnxt = eva;
 2845 
 2846                 pdirindex = sva >> PDRSHIFT;
 2847                 ptpaddr = pmap->pm_pdir[pdirindex];
 2848 
 2849                 /*
 2850                  * Weed out invalid mappings. Note: we assume that the page
 2851                  * directory table is always allocated, and in kernel virtual.
 2852                  */
 2853                 if (ptpaddr == 0)
 2854                         continue;
 2855 
 2856                 /*
 2857                  * Check for large page.
 2858                  */
 2859                 if ((ptpaddr & PG_PS) != 0) {
 2860                         /*
 2861                          * Are we protecting the entire large page?  If not,
 2862                          * demote the mapping and fall through.
 2863                          */
 2864                         if (sva + NBPDR == pdnxt && eva >= pdnxt) {
 2865                                 /*
 2866                                  * The TLB entry for a PG_G mapping is
 2867                                  * invalidated by pmap_protect_pde().
 2868                                  */
 2869                                 if (pmap_protect_pde(pmap,
 2870                                     &pmap->pm_pdir[pdirindex], sva, prot))
 2871                                         anychanged = 1;
 2872                                 continue;
 2873                         } else if (!pmap_demote_pde(pmap,
 2874                             &pmap->pm_pdir[pdirindex], sva)) {
 2875                                 /* The large page mapping was destroyed. */
 2876                                 continue;
 2877                         }
 2878                 }
 2879 
 2880                 if (pdnxt > eva)
 2881                         pdnxt = eva;
 2882 
 2883                 for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++,
 2884                     sva += PAGE_SIZE) {
 2885                         vm_page_t m;
 2886 
 2887 retry:
 2888                         /*
 2889                          * Regardless of whether a pte is 32 or 64 bits in
 2890                          * size, PG_RW, PG_A, and PG_M are among the least
 2891                          * significant 32 bits.
 2892                          */
 2893                         obits = pbits = *pte;
 2894                         if ((pbits & PG_V) == 0)
 2895                                 continue;
 2896                         if (pbits & PG_MANAGED) {
 2897                                 m = NULL;
 2898                                 if (pbits & PG_A) {
 2899                                         m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
 2900                                         vm_page_flag_set(m, PG_REFERENCED);
 2901                                         pbits &= ~PG_A;
 2902                                 }
 2903                                 if ((pbits & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
 2904                                         if (m == NULL)
 2905                                                 m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
 2906                                         vm_page_dirty(m);
 2907                                 }
 2908                         }
 2909 
 2910                         if ((prot & VM_PROT_WRITE) == 0)
 2911                                 pbits &= ~(PG_RW | PG_M);
 2912 #ifdef PAE
 2913                         if ((prot & VM_PROT_EXECUTE) == 0)
 2914                                 pbits |= pg_nx;
 2915 #endif
 2916 
 2917                         if (pbits != obits) {
 2918 #ifdef PAE
 2919                                 if (!atomic_cmpset_64(pte, obits, pbits))
 2920                                         goto retry;
 2921 #else
 2922                                 if (!atomic_cmpset_int((u_int *)pte, obits,
 2923                                     pbits))
 2924                                         goto retry;
 2925 #endif
 2926                                 if (obits & PG_G)
 2927                                         pmap_invalidate_page(pmap, sva);
 2928                                 else
 2929                                         anychanged = 1;
 2930                         }
 2931                 }
 2932         }
 2933         sched_unpin();
 2934         if (anychanged)
 2935                 pmap_invalidate_all(pmap);
 2936         vm_page_unlock_queues();
 2937         PMAP_UNLOCK(pmap);
 2938 }
 2939 
 2940 /*
 2941  * Tries to promote the 512 or 1024, contiguous 4KB page mappings that are
 2942  * within a single page table page (PTP) to a single 2- or 4MB page mapping.
 2943  * For promotion to occur, two conditions must be met: (1) the 4KB page
 2944  * mappings must map aligned, contiguous physical memory and (2) the 4KB page
 2945  * mappings must have identical characteristics.
 2946  *
 2947  * Managed (PG_MANAGED) mappings within the kernel address space are not
 2948  * promoted.  The reason is that kernel PDEs are replicated in each pmap but
 2949  * pmap_clear_ptes() and pmap_ts_referenced() only read the PDE from the kernel
 2950  * pmap.
 2951  */
 2952 static void
 2953 pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
 2954 {
 2955         pd_entry_t newpde;
 2956         pmap_t allpmaps_entry;
 2957         pt_entry_t *firstpte, oldpte, pa, *pte;
 2958         vm_offset_t oldpteva;
 2959         vm_page_t mpte;
 2960 
 2961         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2962 
 2963         /*
 2964          * Examine the first PTE in the specified PTP.  Abort if this PTE is
 2965          * either invalid, unused, or does not map the first 4KB physical page
 2966          * within a 2- or 4MB page.
 2967          */
 2968         firstpte = vtopte(trunc_4mpage(va));
 2969 setpde:
 2970         newpde = *firstpte;
 2971         if ((newpde & ((PG_FRAME & PDRMASK) | PG_A | PG_V)) != (PG_A | PG_V)) {
 2972                 pmap_pde_p_failures++;
 2973                 CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x"
 2974                     " in pmap %p", va, pmap);
 2975                 return;
 2976         }
 2977         if ((*firstpte & PG_MANAGED) != 0 && pmap == kernel_pmap) {
 2978                 pmap_pde_p_failures++;
 2979                 CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x"
 2980                     " in pmap %p", va, pmap);
 2981                 return;
 2982         }
 2983         if ((newpde & (PG_M | PG_RW)) == PG_RW) {
 2984                 /*
 2985                  * When PG_M is already clear, PG_RW can be cleared without
 2986                  * a TLB invalidation.
 2987                  */
 2988                 if (!atomic_cmpset_int((u_int *)firstpte, newpde, newpde &
 2989                     ~PG_RW))  
 2990                         goto setpde;
 2991                 newpde &= ~PG_RW;
 2992         }
 2993 
 2994         /* 
 2995          * Examine each of the other PTEs in the specified PTP.  Abort if this
 2996          * PTE maps an unexpected 4KB physical page or does not have identical
 2997          * characteristics to the first PTE.
 2998          */
 2999         pa = (newpde & (PG_PS_FRAME | PG_A | PG_V)) + NBPDR - PAGE_SIZE;
 3000         for (pte = firstpte + NPTEPG - 1; pte > firstpte; pte--) {
 3001 setpte:
 3002                 oldpte = *pte;
 3003                 if ((oldpte & (PG_FRAME | PG_A | PG_V)) != pa) {
 3004                         pmap_pde_p_failures++;
 3005                         CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x"
 3006                             " in pmap %p", va, pmap);
 3007                         return;
 3008                 }
 3009                 if ((oldpte & (PG_M | PG_RW)) == PG_RW) {
 3010                         /*
 3011                          * When PG_M is already clear, PG_RW can be cleared
 3012                          * without a TLB invalidation.
 3013                          */
 3014                         if (!atomic_cmpset_int((u_int *)pte, oldpte,
 3015                             oldpte & ~PG_RW))
 3016                                 goto setpte;
 3017                         oldpte &= ~PG_RW;
 3018                         oldpteva = (oldpte & PG_FRAME & PDRMASK) |
 3019                             (va & ~PDRMASK);
 3020                         CTR2(KTR_PMAP, "pmap_promote_pde: protect for va %#x"
 3021                             " in pmap %p", oldpteva, pmap);
 3022                 }
 3023                 if ((oldpte & PG_PTE_PROMOTE) != (newpde & PG_PTE_PROMOTE)) {
 3024                         pmap_pde_p_failures++;
 3025                         CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x"
 3026                             " in pmap %p", va, pmap);
 3027                         return;
 3028                 }
 3029                 pa -= PAGE_SIZE;
 3030         }
 3031 
 3032         /*
 3033          * Save the page table page in its current state until the PDE
 3034          * mapping the superpage is demoted by pmap_demote_pde() or
 3035          * destroyed by pmap_remove_pde(). 
 3036          */
 3037         mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
 3038         KASSERT(mpte >= vm_page_array &&
 3039             mpte < &vm_page_array[vm_page_array_size],
 3040             ("pmap_promote_pde: page table page is out of range"));
 3041         KASSERT(mpte->pindex == va >> PDRSHIFT,
 3042             ("pmap_promote_pde: page table page's pindex is wrong"));
 3043         pmap_insert_pt_page(pmap, mpte);
 3044 
 3045         /*
 3046          * Promote the pv entries.
 3047          */
 3048         if ((newpde & PG_MANAGED) != 0)
 3049                 pmap_pv_promote_pde(pmap, va, newpde & PG_PS_FRAME);
 3050 
 3051         /*
 3052          * Propagate the PAT index to its proper position.
 3053          */
 3054         if ((newpde & PG_PTE_PAT) != 0)
 3055                 newpde ^= PG_PDE_PAT | PG_PTE_PAT;
 3056 
 3057         /*
 3058          * Map the superpage.
 3059          */
 3060         if (pmap == kernel_pmap) {
 3061                 mtx_lock_spin(&allpmaps_lock);
 3062                 LIST_FOREACH(allpmaps_entry, &allpmaps, pm_list) {
 3063                         pde = pmap_pde(allpmaps_entry, va);
 3064                         pde_store(pde, PG_PS | newpde);
 3065                 }
 3066                 mtx_unlock_spin(&allpmaps_lock);
 3067         } else
 3068                 pde_store(pde, PG_PS | newpde);
 3069 
 3070         pmap_pde_promotions++;
 3071         CTR2(KTR_PMAP, "pmap_promote_pde: success for va %#x"
 3072             " in pmap %p", va, pmap);
 3073 }
 3074 
 3075 /*
 3076  *      Insert the given physical page (p) at
 3077  *      the specified virtual address (v) in the
 3078  *      target physical map with the protection requested.
 3079  *
 3080  *      If specified, the page will be wired down, meaning
 3081  *      that the related pte can not be reclaimed.
 3082  *
 3083  *      NB:  This is the only routine which MAY NOT lazy-evaluate
 3084  *      or lose information.  That is, this routine must actually
 3085  *      insert this page into the given map NOW.
 3086  */
 3087 void
 3088 pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
 3089     vm_prot_t prot, boolean_t wired)
 3090 {
 3091         vm_paddr_t pa;
 3092         pd_entry_t *pde;
 3093         pt_entry_t *pte;
 3094         vm_paddr_t opa;
 3095         pt_entry_t origpte, newpte;
 3096         vm_page_t mpte, om;
 3097         boolean_t invlva;
 3098 
 3099         va = trunc_page(va);
 3100         KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
 3101         KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS,
 3102             ("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)", va));
 3103 
 3104         mpte = NULL;
 3105 
 3106         vm_page_lock_queues();
 3107         PMAP_LOCK(pmap);
 3108         sched_pin();
 3109 
 3110         /*
 3111          * In the case that a page table page is not
 3112          * resident, we are creating it here.
 3113          */
 3114         if (va < VM_MAXUSER_ADDRESS) {
 3115                 mpte = pmap_allocpte(pmap, va, M_WAITOK);
 3116         }
 3117 
 3118         pde = pmap_pde(pmap, va);
 3119         if ((*pde & PG_PS) != 0)
 3120                 panic("pmap_enter: attempted pmap_enter on 4MB page");
 3121         pte = pmap_pte_quick(pmap, va);
 3122 
 3123         /*
 3124          * Page Directory table entry not valid, we need a new PT page
 3125          */
 3126         if (pte == NULL) {
 3127                 panic("pmap_enter: invalid page directory pdir=%#jx, va=%#x",
 3128                         (uintmax_t)pmap->pm_pdir[PTDPTDI], va);
 3129         }
 3130 
 3131         pa = VM_PAGE_TO_PHYS(m);
 3132         om = NULL;
 3133         origpte = *pte;
 3134         opa = origpte & PG_FRAME;
 3135 
 3136         /*
 3137          * Mapping has not changed, must be protection or wiring change.
 3138          */
 3139         if (origpte && (opa == pa)) {
 3140                 /*
 3141                  * Wiring change, just update stats. We don't worry about
 3142                  * wiring PT pages as they remain resident as long as there
 3143                  * are valid mappings in them. Hence, if a user page is wired,
 3144                  * the PT page will be also.
 3145                  */
 3146                 if (wired && ((origpte & PG_W) == 0))
 3147                         pmap->pm_stats.wired_count++;
 3148                 else if (!wired && (origpte & PG_W))
 3149                         pmap->pm_stats.wired_count--;
 3150 
 3151                 /*
 3152                  * Remove extra pte reference
 3153                  */
 3154                 if (mpte)
 3155                         mpte->wire_count--;
 3156 
 3157                 /*
 3158                  * We might be turning off write access to the page,
 3159                  * so we go ahead and sense modify status.
 3160                  */
 3161                 if (origpte & PG_MANAGED) {
 3162                         om = m;
 3163                         pa |= PG_MANAGED;
 3164                 }
 3165                 goto validate;
 3166         } 
 3167         /*
 3168          * Mapping has changed, invalidate old range and fall through to
 3169          * handle validating new mapping.
 3170          */
 3171         if (opa) {
 3172                 if (origpte & PG_W)
 3173                         pmap->pm_stats.wired_count--;
 3174                 if (origpte & PG_MANAGED) {
 3175                         om = PHYS_TO_VM_PAGE(opa);
 3176                         pmap_remove_entry(pmap, om, va);
 3177                 }
 3178                 if (mpte != NULL) {
 3179                         mpte->wire_count--;
 3180                         KASSERT(mpte->wire_count > 0,
 3181                             ("pmap_enter: missing reference to page table page,"
 3182                              " va: 0x%x", va));
 3183                 }
 3184         } else
 3185                 pmap->pm_stats.resident_count++;
 3186 
 3187         /*
 3188          * Enter on the PV list if part of our managed memory.
 3189          */
 3190         if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
 3191                 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva,
 3192                     ("pmap_enter: managed mapping within the clean submap"));
 3193                 pmap_insert_entry(pmap, va, m);
 3194                 pa |= PG_MANAGED;
 3195         }
 3196 
 3197         /*
 3198          * Increment counters
 3199          */
 3200         if (wired)
 3201                 pmap->pm_stats.wired_count++;
 3202 
 3203 validate:
 3204         /*
 3205          * Now validate mapping with desired protection/wiring.
 3206          */
 3207         newpte = (pt_entry_t)(pa | pmap_cache_bits(m->md.pat_mode, 0) | PG_V);
 3208         if ((prot & VM_PROT_WRITE) != 0) {
 3209                 newpte |= PG_RW;
 3210                 vm_page_flag_set(m, PG_WRITEABLE);
 3211         }
 3212 #ifdef PAE
 3213         if ((prot & VM_PROT_EXECUTE) == 0)
 3214                 newpte |= pg_nx;
 3215 #endif
 3216         if (wired)
 3217                 newpte |= PG_W;
 3218         if (va < VM_MAXUSER_ADDRESS)
 3219                 newpte |= PG_U;
 3220         if (pmap == kernel_pmap)
 3221                 newpte |= pgeflag;
 3222 
 3223         /*
 3224          * if the mapping or permission bits are different, we need
 3225          * to update the pte.
 3226          */
 3227         if ((origpte & ~(PG_M|PG_A)) != newpte) {
 3228                 newpte |= PG_A;
 3229                 if ((access & VM_PROT_WRITE) != 0)
 3230                         newpte |= PG_M;
 3231                 if (origpte & PG_V) {
 3232                         invlva = FALSE;
 3233                         origpte = pte_load_store(pte, newpte);
 3234                         if (origpte & PG_A) {
 3235                                 if (origpte & PG_MANAGED)
 3236                                         vm_page_flag_set(om, PG_REFERENCED);
 3237                                 if (opa != VM_PAGE_TO_PHYS(m))
 3238                                         invlva = TRUE;
 3239 #ifdef PAE
 3240                                 if ((origpte & PG_NX) == 0 &&
 3241                                     (newpte & PG_NX) != 0)
 3242                                         invlva = TRUE;
 3243 #endif
 3244                         }
 3245                         if ((origpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
 3246                                 if ((origpte & PG_MANAGED) != 0)
 3247                                         vm_page_dirty(om);
 3248                                 if ((prot & VM_PROT_WRITE) == 0)
 3249                                         invlva = TRUE;
 3250                         }
 3251                         if (invlva)
 3252                                 pmap_invalidate_page(pmap, va);
 3253                 } else
 3254                         pte_store(pte, newpte);
 3255         }
 3256 
 3257         /*
 3258          * If both the page table page and the reservation are fully
 3259          * populated, then attempt promotion.
 3260          */
 3261         if ((mpte == NULL || mpte->wire_count == NPTEPG) &&
 3262             pg_ps_enabled && vm_reserv_level_iffullpop(m) == 0)
 3263                 pmap_promote_pde(pmap, pde, va);
 3264 
 3265         sched_unpin();
 3266         vm_page_unlock_queues();
 3267         PMAP_UNLOCK(pmap);
 3268 }
 3269 
 3270 /*
 3271  * Tries to create a 2- or 4MB page mapping.  Returns TRUE if successful and
 3272  * FALSE otherwise.  Fails if (1) a page table page cannot be allocated without
 3273  * blocking, (2) a mapping already exists at the specified virtual address, or
 3274  * (3) a pv entry cannot be allocated without reclaiming another pv entry. 
 3275  */
 3276 static boolean_t
 3277 pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
 3278 {
 3279         pd_entry_t *pde, newpde;
 3280 
 3281         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 3282         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 3283         pde = pmap_pde(pmap, va);
 3284         if (*pde != 0) {
 3285                 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
 3286                     " in pmap %p", va, pmap);
 3287                 return (FALSE);
 3288         }
 3289         newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 1) |
 3290             PG_PS | PG_V;
 3291         if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
 3292                 newpde |= PG_MANAGED;
 3293 
 3294                 /*
 3295                  * Abort this mapping if its PV entry could not be created.
 3296                  */
 3297                 if (!pmap_pv_insert_pde(pmap, va, VM_PAGE_TO_PHYS(m))) {
 3298                         CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
 3299                             " in pmap %p", va, pmap);
 3300                         return (FALSE);
 3301                 }
 3302         }
 3303 #ifdef PAE
 3304         if ((prot & VM_PROT_EXECUTE) == 0)
 3305                 newpde |= pg_nx;
 3306 #endif
 3307         if (va < VM_MAXUSER_ADDRESS)
 3308                 newpde |= PG_U;
 3309 
 3310         /*
 3311          * Increment counters.
 3312          */
 3313         pmap->pm_stats.resident_count += NBPDR / PAGE_SIZE;
 3314 
 3315         /*
 3316          * Map the superpage.
 3317          */
 3318         pde_store(pde, newpde);
 3319 
 3320         pmap_pde_mappings++;
 3321         CTR2(KTR_PMAP, "pmap_enter_pde: success for va %#lx"
 3322             " in pmap %p", va, pmap);
 3323         return (TRUE);
 3324 }
 3325 
 3326 /*
 3327  * Maps a sequence of resident pages belonging to the same object.
 3328  * The sequence begins with the given page m_start.  This page is
 3329  * mapped at the given virtual address start.  Each subsequent page is
 3330  * mapped at a virtual address that is offset from start by the same
 3331  * amount as the page is offset from m_start within the object.  The
 3332  * last page in the sequence is the page with the largest offset from
 3333  * m_start that can be mapped at a virtual address less than the given
 3334  * virtual address end.  Not every virtual page between start and end
 3335  * is mapped; only those for which a resident page exists with the
 3336  * corresponding offset from m_start are mapped.
 3337  */
 3338 void
 3339 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
 3340     vm_page_t m_start, vm_prot_t prot)
 3341 {
 3342         vm_offset_t va;
 3343         vm_page_t m, mpte;
 3344         vm_pindex_t diff, psize;
 3345 
 3346         VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED);
 3347         psize = atop(end - start);
 3348         mpte = NULL;
 3349         m = m_start;
 3350         PMAP_LOCK(pmap);
 3351         while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
 3352                 va = start + ptoa(diff);
 3353                 if ((va & PDRMASK) == 0 && va + NBPDR <= end &&
 3354                     (VM_PAGE_TO_PHYS(m) & PDRMASK) == 0 &&
 3355                     pg_ps_enabled && vm_reserv_level_iffullpop(m) == 0 &&
 3356                     pmap_enter_pde(pmap, va, m, prot))
 3357                         m = &m[NBPDR / PAGE_SIZE - 1];
 3358                 else
 3359                         mpte = pmap_enter_quick_locked(pmap, va, m, prot,
 3360                             mpte);
 3361                 m = TAILQ_NEXT(m, listq);
 3362         }
 3363         PMAP_UNLOCK(pmap);
 3364 }
 3365 
 3366 /*
 3367  * this code makes some *MAJOR* assumptions:
 3368  * 1. Current pmap & pmap exists.
 3369  * 2. Not wired.
 3370  * 3. Read access.
 3371  * 4. No page table pages.
 3372  * but is *MUCH* faster than pmap_enter...
 3373  */
 3374 
 3375 void
 3376 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
 3377 {
 3378 
 3379         PMAP_LOCK(pmap);
 3380         (void) pmap_enter_quick_locked(pmap, va, m, prot, NULL);
 3381         PMAP_UNLOCK(pmap);
 3382 }
 3383 
 3384 static vm_page_t
 3385 pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
 3386     vm_prot_t prot, vm_page_t mpte)
 3387 {
 3388         pt_entry_t *pte;
 3389         vm_paddr_t pa;
 3390         vm_page_t free;
 3391 
 3392         KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
 3393             (m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0,
 3394             ("pmap_enter_quick_locked: managed mapping within the clean submap"));
 3395         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 3396         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 3397 
 3398         /*
 3399          * In the case that a page table page is not
 3400          * resident, we are creating it here.
 3401          */
 3402         if (va < VM_MAXUSER_ADDRESS) {
 3403                 unsigned ptepindex;
 3404                 pd_entry_t ptepa;
 3405 
 3406                 /*
 3407                  * Calculate pagetable page index
 3408                  */
 3409                 ptepindex = va >> PDRSHIFT;
 3410                 if (mpte && (mpte->pindex == ptepindex)) {
 3411                         mpte->wire_count++;
 3412                 } else {
 3413                         /*
 3414                          * Get the page directory entry
 3415                          */
 3416                         ptepa = pmap->pm_pdir[ptepindex];
 3417 
 3418                         /*
 3419                          * If the page table page is mapped, we just increment
 3420                          * the hold count, and activate it.
 3421                          */
 3422                         if (ptepa) {
 3423                                 if (ptepa & PG_PS)
 3424                                         return (NULL);
 3425                                 mpte = PHYS_TO_VM_PAGE(ptepa & PG_FRAME);
 3426                                 mpte->wire_count++;
 3427                         } else {
 3428                                 mpte = _pmap_allocpte(pmap, ptepindex,
 3429                                     M_NOWAIT);
 3430                                 if (mpte == NULL)
 3431                                         return (mpte);
 3432                         }
 3433                 }
 3434         } else {
 3435                 mpte = NULL;
 3436         }
 3437 
 3438         /*
 3439          * This call to vtopte makes the assumption that we are
 3440          * entering the page into the current pmap.  In order to support
 3441          * quick entry into any pmap, one would likely use pmap_pte_quick.
 3442          * But that isn't as quick as vtopte.
 3443          */
 3444         pte = vtopte(va);
 3445         if (*pte) {
 3446                 if (mpte != NULL) {
 3447                         mpte->wire_count--;
 3448                         mpte = NULL;
 3449                 }
 3450                 return (mpte);
 3451         }
 3452 
 3453         /*
 3454          * Enter on the PV list if part of our managed memory.
 3455          */
 3456         if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0 &&
 3457             !pmap_try_insert_pv_entry(pmap, va, m)) {
 3458                 if (mpte != NULL) {
 3459                         free = NULL;
 3460                         if (pmap_unwire_pte_hold(pmap, mpte, &free)) {
 3461                                 pmap_invalidate_page(pmap, va);
 3462                                 pmap_free_zero_pages(free);
 3463                         }
 3464                         
 3465                         mpte = NULL;
 3466                 }
 3467                 return (mpte);
 3468         }
 3469 
 3470         /*
 3471          * Increment counters
 3472          */
 3473         pmap->pm_stats.resident_count++;
 3474 
 3475         pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 0);
 3476 #ifdef PAE
 3477         if ((prot & VM_PROT_EXECUTE) == 0)
 3478                 pa |= pg_nx;
 3479 #endif
 3480 
 3481         /*
 3482          * Now validate mapping with RO protection
 3483          */
 3484         if (m->flags & (PG_FICTITIOUS|PG_UNMANAGED))
 3485                 pte_store(pte, pa | PG_V | PG_U);
 3486         else
 3487                 pte_store(pte, pa | PG_V | PG_U | PG_MANAGED);
 3488         return mpte;
 3489 }
 3490 
 3491 /*
 3492  * Make a temporary mapping for a physical address.  This is only intended
 3493  * to be used for panic dumps.
 3494  */
 3495 void *
 3496 pmap_kenter_temporary(vm_paddr_t pa, int i)
 3497 {
 3498         vm_offset_t va;
 3499 
 3500         va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE);
 3501         pmap_kenter(va, pa);
 3502         invlpg(va);
 3503         return ((void *)crashdumpmap);
 3504 }
 3505 
 3506 /*
 3507  * This code maps large physical mmap regions into the
 3508  * processor address space.  Note that some shortcuts
 3509  * are taken, but the code works.
 3510  */
 3511 void
 3512 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
 3513     vm_pindex_t pindex, vm_size_t size)
 3514 {
 3515         pd_entry_t *pde;
 3516         vm_paddr_t pa, ptepa;
 3517         vm_page_t p;
 3518         int pat_mode;
 3519 
 3520         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
 3521         KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
 3522             ("pmap_object_init_pt: non-device object"));
 3523         if (pseflag && 
 3524             (addr & (NBPDR - 1)) == 0 && (size & (NBPDR - 1)) == 0) {
 3525                 if (!vm_object_populate(object, pindex, pindex + atop(size)))
 3526                         return;
 3527                 p = vm_page_lookup(object, pindex);
 3528                 KASSERT(p->valid == VM_PAGE_BITS_ALL,
 3529                     ("pmap_object_init_pt: invalid page %p", p));
 3530                 pat_mode = p->md.pat_mode;
 3531 
 3532                 /*
 3533                  * Abort the mapping if the first page is not physically
 3534                  * aligned to a 2/4MB page boundary.
 3535                  */
 3536                 ptepa = VM_PAGE_TO_PHYS(p);
 3537                 if (ptepa & (NBPDR - 1))
 3538                         return;
 3539 
 3540                 /*
 3541                  * Skip the first page.  Abort the mapping if the rest of
 3542                  * the pages are not physically contiguous or have differing
 3543                  * memory attributes.
 3544                  */
 3545                 p = TAILQ_NEXT(p, listq);
 3546                 for (pa = ptepa + PAGE_SIZE; pa < ptepa + size;
 3547                     pa += PAGE_SIZE) {
 3548                         KASSERT(p->valid == VM_PAGE_BITS_ALL,
 3549                             ("pmap_object_init_pt: invalid page %p", p));
 3550                         if (pa != VM_PAGE_TO_PHYS(p) ||
 3551                             pat_mode != p->md.pat_mode)
 3552                                 return;
 3553                         p = TAILQ_NEXT(p, listq);
 3554                 }
 3555 
 3556                 /*
 3557                  * Map using 2/4MB pages.  Since "ptepa" is 2/4M aligned and
 3558                  * "size" is a multiple of 2/4M, adding the PAT setting to
 3559                  * "pa" will not affect the termination of this loop.
 3560                  */
 3561                 PMAP_LOCK(pmap);
 3562                 for (pa = ptepa | pmap_cache_bits(pat_mode, 1); pa < ptepa +
 3563                     size; pa += NBPDR) {
 3564                         pde = pmap_pde(pmap, addr);
 3565                         if (*pde == 0) {
 3566                                 pde_store(pde, pa | PG_PS | PG_M | PG_A |
 3567                                     PG_U | PG_RW | PG_V);
 3568                                 pmap->pm_stats.resident_count += NBPDR /
 3569                                     PAGE_SIZE;
 3570                                 pmap_pde_mappings++;
 3571                         }
 3572                         /* Else continue on if the PDE is already valid. */
 3573                         addr += NBPDR;
 3574                 }
 3575                 PMAP_UNLOCK(pmap);
 3576         }
 3577 }
 3578 
 3579 /*
 3580  *      Routine:        pmap_change_wiring
 3581  *      Function:       Change the wiring attribute for a map/virtual-address
 3582  *                      pair.
 3583  *      In/out conditions:
 3584  *                      The mapping must already exist in the pmap.
 3585  */
 3586 void
 3587 pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
 3588 {
 3589         pd_entry_t *pde;
 3590         pt_entry_t *pte;
 3591         boolean_t are_queues_locked;
 3592 
 3593         are_queues_locked = FALSE;
 3594 retry:
 3595         PMAP_LOCK(pmap);
 3596         pde = pmap_pde(pmap, va);
 3597         if ((*pde & PG_PS) != 0) {
 3598                 if (!wired != ((*pde & PG_W) == 0)) {
 3599                         if (!are_queues_locked) {
 3600                                 are_queues_locked = TRUE;
 3601                                 if (!mtx_trylock(&vm_page_queue_mtx)) {
 3602                                         PMAP_UNLOCK(pmap);
 3603                                         vm_page_lock_queues();
 3604                                         goto retry;
 3605                                 }
 3606                         }
 3607                         if (!pmap_demote_pde(pmap, pde, va))
 3608                                 panic("pmap_change_wiring: demotion failed");
 3609                 } else
 3610                         goto out;
 3611         }
 3612         pte = pmap_pte(pmap, va);
 3613 
 3614         if (wired && !pmap_pte_w(pte))
 3615                 pmap->pm_stats.wired_count++;
 3616         else if (!wired && pmap_pte_w(pte))
 3617                 pmap->pm_stats.wired_count--;
 3618 
 3619         /*
 3620          * Wiring is not a hardware characteristic so there is no need to
 3621          * invalidate TLB.
 3622          */
 3623         pmap_pte_set_w(pte, wired);
 3624         pmap_pte_release(pte);
 3625 out:
 3626         if (are_queues_locked)
 3627                 vm_page_unlock_queues();
 3628         PMAP_UNLOCK(pmap);
 3629 }
 3630 
 3631 
 3632 
 3633 /*
 3634  *      Copy the range specified by src_addr/len
 3635  *      from the source map to the range dst_addr/len
 3636  *      in the destination map.
 3637  *
 3638  *      This routine is only advisory and need not do anything.
 3639  */
 3640 
 3641 void
 3642 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
 3643     vm_offset_t src_addr)
 3644 {
 3645         vm_page_t   free;
 3646         vm_offset_t addr;
 3647         vm_offset_t end_addr = src_addr + len;
 3648         vm_offset_t pdnxt;
 3649 
 3650         if (dst_addr != src_addr)
 3651                 return;
 3652 
 3653         if (!pmap_is_current(src_pmap))
 3654                 return;
 3655 
 3656         vm_page_lock_queues();
 3657         if (dst_pmap < src_pmap) {
 3658                 PMAP_LOCK(dst_pmap);
 3659                 PMAP_LOCK(src_pmap);
 3660         } else {
 3661                 PMAP_LOCK(src_pmap);
 3662                 PMAP_LOCK(dst_pmap);
 3663         }
 3664         sched_pin();
 3665         for (addr = src_addr; addr < end_addr; addr = pdnxt) {
 3666                 pt_entry_t *src_pte, *dst_pte;
 3667                 vm_page_t dstmpte, srcmpte;
 3668                 pd_entry_t srcptepaddr;
 3669                 unsigned ptepindex;
 3670 
 3671                 KASSERT(addr < UPT_MIN_ADDRESS,
 3672                     ("pmap_copy: invalid to pmap_copy page tables"));
 3673 
 3674                 pdnxt = (addr + NBPDR) & ~PDRMASK;
 3675                 if (pdnxt < addr)
 3676                         pdnxt = end_addr;
 3677                 ptepindex = addr >> PDRSHIFT;
 3678 
 3679                 srcptepaddr = src_pmap->pm_pdir[ptepindex];
 3680                 if (srcptepaddr == 0)
 3681                         continue;
 3682                         
 3683                 if (srcptepaddr & PG_PS) {
 3684                         if (dst_pmap->pm_pdir[ptepindex] == 0 &&
 3685                             ((srcptepaddr & PG_MANAGED) == 0 ||
 3686                             pmap_pv_insert_pde(dst_pmap, addr, srcptepaddr &
 3687                             PG_PS_FRAME))) {
 3688                                 dst_pmap->pm_pdir[ptepindex] = srcptepaddr &
 3689                                     ~PG_W;
 3690                                 dst_pmap->pm_stats.resident_count +=
 3691                                     NBPDR / PAGE_SIZE;
 3692                         }
 3693                         continue;
 3694                 }
 3695 
 3696                 srcmpte = PHYS_TO_VM_PAGE(srcptepaddr & PG_FRAME);
 3697                 KASSERT(srcmpte->wire_count > 0,
 3698                     ("pmap_copy: source page table page is unused"));
 3699 
 3700                 if (pdnxt > end_addr)
 3701                         pdnxt = end_addr;
 3702 
 3703                 src_pte = vtopte(addr);
 3704                 while (addr < pdnxt) {
 3705                         pt_entry_t ptetemp;
 3706                         ptetemp = *src_pte;
 3707                         /*
 3708                          * we only virtual copy managed pages
 3709                          */
 3710                         if ((ptetemp & PG_MANAGED) != 0) {
 3711                                 dstmpte = pmap_allocpte(dst_pmap, addr,
 3712                                     M_NOWAIT);
 3713                                 if (dstmpte == NULL)
 3714                                         goto out;
 3715                                 dst_pte = pmap_pte_quick(dst_pmap, addr);
 3716                                 if (*dst_pte == 0 &&
 3717                                     pmap_try_insert_pv_entry(dst_pmap, addr,
 3718                                     PHYS_TO_VM_PAGE(ptetemp & PG_FRAME))) {
 3719                                         /*
 3720                                          * Clear the wired, modified, and
 3721                                          * accessed (referenced) bits
 3722                                          * during the copy.
 3723                                          */
 3724                                         *dst_pte = ptetemp & ~(PG_W | PG_M |
 3725                                             PG_A);
 3726                                         dst_pmap->pm_stats.resident_count++;
 3727                                 } else {
 3728                                         free = NULL;
 3729                                         if (pmap_unwire_pte_hold(dst_pmap,
 3730                                             dstmpte, &free)) {
 3731                                                 pmap_invalidate_page(dst_pmap,
 3732                                                     addr);
 3733                                                 pmap_free_zero_pages(free);
 3734                                         }
 3735                                         goto out;
 3736                                 }
 3737                                 if (dstmpte->wire_count >= srcmpte->wire_count)
 3738                                         break;
 3739                         }
 3740                         addr += PAGE_SIZE;
 3741                         src_pte++;
 3742                 }
 3743         }
 3744 out:
 3745         sched_unpin();
 3746         vm_page_unlock_queues();
 3747         PMAP_UNLOCK(src_pmap);
 3748         PMAP_UNLOCK(dst_pmap);
 3749 }       
 3750 
 3751 static __inline void
 3752 pagezero(void *page)
 3753 {
 3754 #if defined(I686_CPU)
 3755         if (cpu_class == CPUCLASS_686) {
 3756 #if defined(CPU_ENABLE_SSE)
 3757                 if (cpu_feature & CPUID_SSE2)
 3758                         sse2_pagezero(page);
 3759                 else
 3760 #endif
 3761                         i686_pagezero(page);
 3762         } else
 3763 #endif
 3764                 bzero(page, PAGE_SIZE);
 3765 }
 3766 
 3767 /*
 3768  *      pmap_zero_page zeros the specified hardware page by mapping 
 3769  *      the page into KVM and using bzero to clear its contents.
 3770  */
 3771 void
 3772 pmap_zero_page(vm_page_t m)
 3773 {
 3774         struct sysmaps *sysmaps;
 3775 
 3776         sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)];
 3777         mtx_lock(&sysmaps->lock);
 3778         if (*sysmaps->CMAP2)
 3779                 panic("pmap_zero_page: CMAP2 busy");
 3780         sched_pin();
 3781         *sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M |
 3782             pmap_cache_bits(m->md.pat_mode, 0);
 3783         invlcaddr(sysmaps->CADDR2);
 3784         pagezero(sysmaps->CADDR2);
 3785         *sysmaps->CMAP2 = 0;
 3786         sched_unpin();
 3787         mtx_unlock(&sysmaps->lock);
 3788 }
 3789 
 3790 /*
 3791  *      pmap_zero_page_area zeros the specified hardware page by mapping 
 3792  *      the page into KVM and using bzero to clear its contents.
 3793  *
 3794  *      off and size may not cover an area beyond a single hardware page.
 3795  */
 3796 void
 3797 pmap_zero_page_area(vm_page_t m, int off, int size)
 3798 {
 3799         struct sysmaps *sysmaps;
 3800 
 3801         sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)];
 3802         mtx_lock(&sysmaps->lock);
 3803         if (*sysmaps->CMAP2)
 3804                 panic("pmap_zero_page_area: CMAP2 busy");
 3805         sched_pin();
 3806         *sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M |
 3807             pmap_cache_bits(m->md.pat_mode, 0);
 3808         invlcaddr(sysmaps->CADDR2);
 3809         if (off == 0 && size == PAGE_SIZE) 
 3810                 pagezero(sysmaps->CADDR2);
 3811         else
 3812                 bzero((char *)sysmaps->CADDR2 + off, size);
 3813         *sysmaps->CMAP2 = 0;
 3814         sched_unpin();
 3815         mtx_unlock(&sysmaps->lock);
 3816 }
 3817 
 3818 /*
 3819  *      pmap_zero_page_idle zeros the specified hardware page by mapping 
 3820  *      the page into KVM and using bzero to clear its contents.  This
 3821  *      is intended to be called from the vm_pagezero process only and
 3822  *      outside of Giant.
 3823  */
 3824 void
 3825 pmap_zero_page_idle(vm_page_t m)
 3826 {
 3827 
 3828         if (*CMAP3)
 3829                 panic("pmap_zero_page_idle: CMAP3 busy");
 3830         sched_pin();
 3831         *CMAP3 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M |
 3832             pmap_cache_bits(m->md.pat_mode, 0);
 3833         invlcaddr(CADDR3);
 3834         pagezero(CADDR3);
 3835         *CMAP3 = 0;
 3836         sched_unpin();
 3837 }
 3838 
 3839 /*
 3840  *      pmap_copy_page copies the specified (machine independent)
 3841  *      page by mapping the page into virtual memory and using
 3842  *      bcopy to copy the page, one machine dependent page at a
 3843  *      time.
 3844  */
 3845 void
 3846 pmap_copy_page(vm_page_t src, vm_page_t dst)
 3847 {
 3848         struct sysmaps *sysmaps;
 3849 
 3850         sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)];
 3851         mtx_lock(&sysmaps->lock);
 3852         if (*sysmaps->CMAP1)
 3853                 panic("pmap_copy_page: CMAP1 busy");
 3854         if (*sysmaps->CMAP2)
 3855                 panic("pmap_copy_page: CMAP2 busy");
 3856         sched_pin();
 3857         invlpg((u_int)sysmaps->CADDR1);
 3858         invlpg((u_int)sysmaps->CADDR2);
 3859         *sysmaps->CMAP1 = PG_V | VM_PAGE_TO_PHYS(src) | PG_A |
 3860             pmap_cache_bits(src->md.pat_mode, 0);
 3861         *sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(dst) | PG_A | PG_M |
 3862             pmap_cache_bits(dst->md.pat_mode, 0);
 3863         bcopy(sysmaps->CADDR1, sysmaps->CADDR2, PAGE_SIZE);
 3864         *sysmaps->CMAP1 = 0;
 3865         *sysmaps->CMAP2 = 0;
 3866         sched_unpin();
 3867         mtx_unlock(&sysmaps->lock);
 3868 }
 3869 
 3870 /*
 3871  * Returns true if the pmap's pv is one of the first
 3872  * 16 pvs linked to from this page.  This count may
 3873  * be changed upwards or downwards in the future; it
 3874  * is only necessary that true be returned for a small
 3875  * subset of pmaps for proper page aging.
 3876  */
 3877 boolean_t
 3878 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
 3879 {
 3880         struct md_page *pvh;
 3881         pv_entry_t pv;
 3882         int loops = 0;
 3883 
 3884         if (m->flags & PG_FICTITIOUS)
 3885                 return FALSE;
 3886 
 3887         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 3888         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
 3889                 if (PV_PMAP(pv) == pmap) {
 3890                         return TRUE;
 3891                 }
 3892                 loops++;
 3893                 if (loops >= 16)
 3894                         break;
 3895         }
 3896         if (loops < 16) {
 3897                 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 3898                 TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
 3899                         if (PV_PMAP(pv) == pmap)
 3900                                 return (TRUE);
 3901                         loops++;
 3902                         if (loops >= 16)
 3903                                 break;
 3904                 }
 3905         }
 3906         return (FALSE);
 3907 }
 3908 
 3909 /*
 3910  *      pmap_page_wired_mappings:
 3911  *
 3912  *      Return the number of managed mappings to the given physical page
 3913  *      that are wired.
 3914  */
 3915 int
 3916 pmap_page_wired_mappings(vm_page_t m)
 3917 {
 3918         int count;
 3919 
 3920         count = 0;
 3921         if ((m->flags & PG_FICTITIOUS) != 0)
 3922                 return (count);
 3923         count = pmap_pvh_wired_mappings(&m->md, count);
 3924         return (pmap_pvh_wired_mappings(pa_to_pvh(VM_PAGE_TO_PHYS(m)), count));
 3925 }
 3926 
 3927 /*
 3928  *      pmap_pvh_wired_mappings:
 3929  *
 3930  *      Return the updated number "count" of managed mappings that are wired.
 3931  */
 3932 static int
 3933 pmap_pvh_wired_mappings(struct md_page *pvh, int count)
 3934 {
 3935         pmap_t pmap;
 3936         pt_entry_t *pte;
 3937         pv_entry_t pv;
 3938 
 3939         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 3940         sched_pin();
 3941         TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
 3942                 pmap = PV_PMAP(pv);
 3943                 PMAP_LOCK(pmap);
 3944                 pte = pmap_pte_quick(pmap, pv->pv_va);
 3945                 if ((*pte & PG_W) != 0)
 3946                         count++;
 3947                 PMAP_UNLOCK(pmap);
 3948         }
 3949         sched_unpin();
 3950         return (count);
 3951 }
 3952 
 3953 /*
 3954  * Returns TRUE if the given page is mapped individually or as part of
 3955  * a 4mpage.  Otherwise, returns FALSE.
 3956  */
 3957 boolean_t
 3958 pmap_page_is_mapped(vm_page_t m)
 3959 {
 3960         struct md_page *pvh;
 3961 
 3962         if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
 3963                 return (FALSE);
 3964         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 3965         if (TAILQ_EMPTY(&m->md.pv_list)) {
 3966                 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 3967                 return (!TAILQ_EMPTY(&pvh->pv_list));
 3968         } else
 3969                 return (TRUE);
 3970 }
 3971 
 3972 /*
 3973  * Remove all pages from specified address space
 3974  * this aids process exit speeds.  Also, this code
 3975  * is special cased for current process only, but
 3976  * can have the more generic (and slightly slower)
 3977  * mode enabled.  This is much faster than pmap_remove
 3978  * in the case of running down an entire address space.
 3979  */
 3980 void
 3981 pmap_remove_pages(pmap_t pmap)
 3982 {
 3983         pt_entry_t *pte, tpte;
 3984         vm_page_t free = NULL;
 3985         vm_page_t m, mpte, mt;
 3986         pv_entry_t pv;
 3987         struct md_page *pvh;
 3988         struct pv_chunk *pc, *npc;
 3989         int field, idx;
 3990         int32_t bit;
 3991         uint32_t inuse, bitmask;
 3992         int allfree;
 3993 
 3994         if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) {
 3995                 printf("warning: pmap_remove_pages called with non-current pmap\n");
 3996                 return;
 3997         }
 3998         vm_page_lock_queues();
 3999         PMAP_LOCK(pmap);
 4000         sched_pin();
 4001         TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
 4002                 allfree = 1;
 4003                 for (field = 0; field < _NPCM; field++) {
 4004                         inuse = (~(pc->pc_map[field])) & pc_freemask[field];
 4005                         while (inuse != 0) {
 4006                                 bit = bsfl(inuse);
 4007                                 bitmask = 1UL << bit;
 4008                                 idx = field * 32 + bit;
 4009                                 pv = &pc->pc_pventry[idx];
 4010                                 inuse &= ~bitmask;
 4011 
 4012                                 pte = pmap_pde(pmap, pv->pv_va);
 4013                                 tpte = *pte;
 4014                                 if ((tpte & PG_PS) == 0) {
 4015                                         pte = vtopte(pv->pv_va);
 4016                                         tpte = *pte & ~PG_PTE_PAT;
 4017                                 }
 4018 
 4019                                 if (tpte == 0) {
 4020                                         printf(
 4021                                             "TPTE at %p  IS ZERO @ VA %08x\n",
 4022                                             pte, pv->pv_va);
 4023                                         panic("bad pte");
 4024                                 }
 4025 
 4026 /*
 4027  * We cannot remove wired pages from a process' mapping at this time
 4028  */
 4029                                 if (tpte & PG_W) {
 4030                                         allfree = 0;
 4031                                         continue;
 4032                                 }
 4033 
 4034                                 m = PHYS_TO_VM_PAGE(tpte & PG_FRAME);
 4035                                 KASSERT(m->phys_addr == (tpte & PG_FRAME),
 4036                                     ("vm_page_t %p phys_addr mismatch %016jx %016jx",
 4037                                     m, (uintmax_t)m->phys_addr,
 4038                                     (uintmax_t)tpte));
 4039 
 4040                                 KASSERT(m < &vm_page_array[vm_page_array_size],
 4041                                         ("pmap_remove_pages: bad tpte %#jx",
 4042                                         (uintmax_t)tpte));
 4043 
 4044                                 pte_clear(pte);
 4045 
 4046                                 /*
 4047                                  * Update the vm_page_t clean/reference bits.
 4048                                  */
 4049                                 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
 4050                                         if ((tpte & PG_PS) != 0) {
 4051                                                 for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
 4052                                                         vm_page_dirty(mt);
 4053                                         } else
 4054                                                 vm_page_dirty(m);
 4055                                 }
 4056 
 4057                                 /* Mark free */
 4058                                 PV_STAT(pv_entry_frees++);
 4059                                 PV_STAT(pv_entry_spare++);
 4060                                 pv_entry_count--;
 4061                                 pc->pc_map[field] |= bitmask;
 4062                                 if ((tpte & PG_PS) != 0) {
 4063                                         pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
 4064                                         pvh = pa_to_pvh(tpte & PG_PS_FRAME);
 4065                                         TAILQ_REMOVE(&pvh->pv_list, pv, pv_list);
 4066                                         if (TAILQ_EMPTY(&pvh->pv_list)) {
 4067                                                 for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
 4068                                                         if (TAILQ_EMPTY(&mt->md.pv_list))
 4069                                                                 vm_page_flag_clear(mt, PG_WRITEABLE);
 4070                                         }
 4071                                         mpte = pmap_lookup_pt_page(pmap, pv->pv_va);
 4072                                         if (mpte != NULL) {
 4073                                                 pmap_remove_pt_page(pmap, mpte);
 4074                                                 pmap->pm_stats.resident_count--;
 4075                                                 KASSERT(mpte->wire_count == NPTEPG,
 4076                                                     ("pmap_remove_pages: pte page wire count error"));
 4077                                                 mpte->wire_count = 0;
 4078                                                 pmap_add_delayed_free_list(mpte, &free, FALSE);
 4079                                                 atomic_subtract_int(&cnt.v_wire_count, 1);
 4080                                         }
 4081                                 } else {
 4082                                         pmap->pm_stats.resident_count--;
 4083                                         TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
 4084                                         if (TAILQ_EMPTY(&m->md.pv_list)) {
 4085                                                 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 4086                                                 if (TAILQ_EMPTY(&pvh->pv_list))
 4087                                                         vm_page_flag_clear(m, PG_WRITEABLE);
 4088                                         }
 4089                                         pmap_unuse_pt(pmap, pv->pv_va, &free);
 4090                                 }
 4091                         }
 4092                 }
 4093                 if (allfree) {
 4094                         PV_STAT(pv_entry_spare -= _NPCPV);
 4095                         PV_STAT(pc_chunk_count--);
 4096                         PV_STAT(pc_chunk_frees++);
 4097                         TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
 4098                         m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc));
 4099                         pmap_qremove((vm_offset_t)pc, 1);
 4100                         vm_page_unwire(m, 0);
 4101                         vm_page_free(m);
 4102                         pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc);
 4103                 }
 4104         }
 4105         sched_unpin();
 4106         pmap_invalidate_all(pmap);
 4107         vm_page_unlock_queues();
 4108         PMAP_UNLOCK(pmap);
 4109         pmap_free_zero_pages(free);
 4110 }
 4111 
 4112 /*
 4113  *      pmap_is_modified:
 4114  *
 4115  *      Return whether or not the specified physical page was modified
 4116  *      in any physical maps.
 4117  */
 4118 boolean_t
 4119 pmap_is_modified(vm_page_t m)
 4120 {
 4121 
 4122         if (m->flags & PG_FICTITIOUS)
 4123                 return (FALSE);
 4124         if (pmap_is_modified_pvh(&m->md))
 4125                 return (TRUE);
 4126         return (pmap_is_modified_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m))));
 4127 }
 4128 
 4129 /*
 4130  * Returns TRUE if any of the given mappings were used to modify
 4131  * physical memory.  Otherwise, returns FALSE.  Both page and 2mpage
 4132  * mappings are supported.
 4133  */
 4134 static boolean_t
 4135 pmap_is_modified_pvh(struct md_page *pvh)
 4136 {
 4137         pv_entry_t pv;
 4138         pt_entry_t *pte;
 4139         pmap_t pmap;
 4140         boolean_t rv;
 4141 
 4142         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 4143         rv = FALSE;
 4144         sched_pin();
 4145         TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
 4146                 pmap = PV_PMAP(pv);
 4147                 PMAP_LOCK(pmap);
 4148                 pte = pmap_pte_quick(pmap, pv->pv_va);
 4149                 rv = (*pte & (PG_M | PG_RW)) == (PG_M | PG_RW);
 4150                 PMAP_UNLOCK(pmap);
 4151                 if (rv)
 4152                         break;
 4153         }
 4154         sched_unpin();
 4155         return (rv);
 4156 }
 4157 
 4158 /*
 4159  *      pmap_is_prefaultable:
 4160  *
 4161  *      Return whether or not the specified virtual address is elgible
 4162  *      for prefault.
 4163  */
 4164 boolean_t
 4165 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
 4166 {
 4167         pd_entry_t *pde;
 4168         pt_entry_t *pte;
 4169         boolean_t rv;
 4170 
 4171         rv = FALSE;
 4172         PMAP_LOCK(pmap);
 4173         pde = pmap_pde(pmap, addr);
 4174         if (*pde != 0 && (*pde & PG_PS) == 0) {
 4175                 pte = vtopte(addr);
 4176                 rv = *pte == 0;
 4177         }
 4178         PMAP_UNLOCK(pmap);
 4179         return (rv);
 4180 }
 4181 
 4182 /*
 4183  * Clear the write and modified bits in each of the given page's mappings.
 4184  */
 4185 void
 4186 pmap_remove_write(vm_page_t m)
 4187 {
 4188         struct md_page *pvh;
 4189         pv_entry_t next_pv, pv;
 4190         pmap_t pmap;
 4191         pd_entry_t *pde;
 4192         pt_entry_t oldpte, *pte;
 4193         vm_offset_t va;
 4194 
 4195         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 4196         if ((m->flags & PG_FICTITIOUS) != 0 ||
 4197             (m->flags & PG_WRITEABLE) == 0)
 4198                 return;
 4199         sched_pin();
 4200         pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 4201         TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
 4202                 va = pv->pv_va;
 4203                 pmap = PV_PMAP(pv);
 4204                 PMAP_LOCK(pmap);
 4205                 pde = pmap_pde(pmap, va);
 4206                 if ((*pde & PG_RW) != 0)
 4207                         (void)pmap_demote_pde(pmap, pde, va);
 4208                 PMAP_UNLOCK(pmap);
 4209         }
 4210         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
 4211                 pmap = PV_PMAP(pv);
 4212                 PMAP_LOCK(pmap);
 4213                 pde = pmap_pde(pmap, pv->pv_va);
 4214                 KASSERT((*pde & PG_PS) == 0, ("pmap_clear_write: found"
 4215                     " a 4mpage in page %p's pv list", m));
 4216                 pte = pmap_pte_quick(pmap, pv->pv_va);
 4217 retry:
 4218                 oldpte = *pte;
 4219                 if ((oldpte & PG_RW) != 0) {
 4220                         /*
 4221                          * Regardless of whether a pte is 32 or 64 bits
 4222                          * in size, PG_RW and PG_M are among the least
 4223                          * significant 32 bits.
 4224                          */
 4225                         if (!atomic_cmpset_int((u_int *)pte, oldpte,
 4226                             oldpte & ~(PG_RW | PG_M)))
 4227                                 goto retry;
 4228                         if ((oldpte & PG_M) != 0)
 4229                                 vm_page_dirty(m);
 4230                         pmap_invalidate_page(pmap, pv->pv_va);
 4231                 }
 4232                 PMAP_UNLOCK(pmap);
 4233         }
 4234         vm_page_flag_clear(m, PG_WRITEABLE);
 4235         sched_unpin();
 4236 }
 4237 
 4238 /*
 4239  *      pmap_ts_referenced:
 4240  *
 4241  *      Return a count of reference bits for a page, clearing those bits.
 4242  *      It is not necessary for every reference bit to be cleared, but it
 4243  *      is necessary that 0 only be returned when there are truly no
 4244  *      reference bits set.
 4245  *
 4246  *      XXX: The exact number of bits to check and clear is a matter that
 4247  *      should be tested and standardized at some point in the future for
 4248  *      optimal aging of shared pages.
 4249  */
 4250 int
 4251 pmap_ts_referenced(vm_page_t m)
 4252 {
 4253         struct md_page *pvh;
 4254         pv_entry_t pv, pvf, pvn;
 4255         pmap_t pmap;
 4256         pd_entry_t oldpde, *pde;
 4257         pt_entry_t *pte;
 4258         vm_offset_t va;
 4259         int rtval = 0;
 4260 
 4261         if (m->flags & PG_FICTITIOUS)
 4262                 return (rtval);
 4263         sched_pin();
 4264         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 4265         pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 4266         TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, pvn) {
 4267                 va = pv->pv_va;
 4268                 pmap = PV_PMAP(pv);
 4269                 PMAP_LOCK(pmap);
 4270                 pde = pmap_pde(pmap, va);
 4271                 oldpde = *pde;
 4272                 if ((oldpde & PG_A) != 0) {
 4273                         if (pmap_demote_pde(pmap, pde, va)) {
 4274                                 if ((oldpde & PG_W) == 0) {
 4275                                         /*
 4276                                          * Remove the mapping to a single page
 4277                                          * so that a subsequent access may
 4278                                          * repromote.  Since the underlying
 4279                                          * page table page is fully populated,
 4280                                          * this removal never frees a page
 4281                                          * table page.
 4282                                          */
 4283                                         va += VM_PAGE_TO_PHYS(m) - (oldpde &
 4284                                             PG_PS_FRAME);
 4285                                         pmap_remove_page(pmap, va, NULL);
 4286                                         rtval++;
 4287                                         if (rtval > 4) {
 4288                                                 PMAP_UNLOCK(pmap);
 4289                                                 return (rtval);
 4290                                         }
 4291                                 }
 4292                         }
 4293                 }
 4294                 PMAP_UNLOCK(pmap);
 4295         }
 4296         if ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
 4297                 pvf = pv;
 4298                 do {
 4299                         pvn = TAILQ_NEXT(pv, pv_list);
 4300                         TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
 4301                         TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
 4302                         pmap = PV_PMAP(pv);
 4303                         PMAP_LOCK(pmap);
 4304                         pde = pmap_pde(pmap, pv->pv_va);
 4305                         KASSERT((*pde & PG_PS) == 0, ("pmap_ts_referenced:"
 4306                             " found a 4mpage in page %p's pv list", m));
 4307                         pte = pmap_pte_quick(pmap, pv->pv_va);
 4308                         if ((*pte & PG_A) != 0) {
 4309                                 atomic_clear_int((u_int *)pte, PG_A);
 4310                                 pmap_invalidate_page(pmap, pv->pv_va);
 4311                                 rtval++;
 4312                                 if (rtval > 4)
 4313                                         pvn = NULL;
 4314                         }
 4315                         PMAP_UNLOCK(pmap);
 4316                 } while ((pv = pvn) != NULL && pv != pvf);
 4317         }
 4318         sched_unpin();
 4319         return (rtval);
 4320 }
 4321 
 4322 /*
 4323  *      Clear the modify bits on the specified physical page.
 4324  */
 4325 void
 4326 pmap_clear_modify(vm_page_t m)
 4327 {
 4328         struct md_page *pvh;
 4329         pv_entry_t next_pv, pv;
 4330         pmap_t pmap;
 4331         pd_entry_t oldpde, *pde;
 4332         pt_entry_t oldpte, *pte;
 4333         vm_offset_t va;
 4334 
 4335         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 4336         if ((m->flags & PG_FICTITIOUS) != 0)
 4337                 return;
 4338         sched_pin();
 4339         pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 4340         TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
 4341                 va = pv->pv_va;
 4342                 pmap = PV_PMAP(pv);
 4343                 PMAP_LOCK(pmap);
 4344                 pde = pmap_pde(pmap, va);
 4345                 oldpde = *pde;
 4346                 if ((oldpde & PG_RW) != 0) {
 4347                         if (pmap_demote_pde(pmap, pde, va)) {
 4348                                 if ((oldpde & PG_W) == 0) {
 4349                                         /*
 4350                                          * Write protect the mapping to a
 4351                                          * single page so that a subsequent
 4352                                          * write access may repromote.
 4353                                          */
 4354                                         va += VM_PAGE_TO_PHYS(m) - (oldpde &
 4355                                             PG_PS_FRAME);
 4356                                         pte = pmap_pte_quick(pmap, va);
 4357                                         oldpte = *pte;
 4358                                         if ((oldpte & PG_V) != 0) {
 4359                                                 /*
 4360                                                  * Regardless of whether a pte is 32 or 64 bits
 4361                                                  * in size, PG_RW and PG_M are among the least
 4362                                                  * significant 32 bits.
 4363                                                  */
 4364                                                 while (!atomic_cmpset_int((u_int *)pte,
 4365                                                     oldpte,
 4366                                                     oldpte & ~(PG_M | PG_RW)))
 4367                                                         oldpte = *pte;
 4368                                                 vm_page_dirty(m);
 4369                                                 pmap_invalidate_page(pmap, va);
 4370                                         }
 4371                                 }
 4372                         }
 4373                 }
 4374                 PMAP_UNLOCK(pmap);
 4375         }
 4376         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
 4377                 pmap = PV_PMAP(pv);
 4378                 PMAP_LOCK(pmap);
 4379                 pde = pmap_pde(pmap, pv->pv_va);
 4380                 KASSERT((*pde & PG_PS) == 0, ("pmap_clear_modify: found"
 4381                     " a 4mpage in page %p's pv list", m));
 4382                 pte = pmap_pte_quick(pmap, pv->pv_va);
 4383                 if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
 4384                         /*
 4385                          * Regardless of whether a pte is 32 or 64 bits
 4386                          * in size, PG_M is among the least significant
 4387                          * 32 bits. 
 4388                          */
 4389                         atomic_clear_int((u_int *)pte, PG_M);
 4390                         pmap_invalidate_page(pmap, pv->pv_va);
 4391                 }
 4392                 PMAP_UNLOCK(pmap);
 4393         }
 4394         sched_unpin();
 4395 }
 4396 
 4397 /*
 4398  *      pmap_clear_reference:
 4399  *
 4400  *      Clear the reference bit on the specified physical page.
 4401  */
 4402 void
 4403 pmap_clear_reference(vm_page_t m)
 4404 {
 4405         struct md_page *pvh;
 4406         pv_entry_t next_pv, pv;
 4407         pmap_t pmap;
 4408         pd_entry_t oldpde, *pde;
 4409         pt_entry_t *pte;
 4410         vm_offset_t va;
 4411 
 4412         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 4413         if ((m->flags & PG_FICTITIOUS) != 0)
 4414                 return;
 4415         sched_pin();
 4416         pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 4417         TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
 4418                 va = pv->pv_va;
 4419                 pmap = PV_PMAP(pv);
 4420                 PMAP_LOCK(pmap);
 4421                 pde = pmap_pde(pmap, va);
 4422                 oldpde = *pde;
 4423                 if ((oldpde & PG_A) != 0) {
 4424                         if (pmap_demote_pde(pmap, pde, va)) {
 4425                                 /*
 4426                                  * Remove the mapping to a single page so
 4427                                  * that a subsequent access may repromote.
 4428                                  * Since the underlying page table page is
 4429                                  * fully populated, this removal never frees
 4430                                  * a page table page.
 4431                                  */
 4432                                 va += VM_PAGE_TO_PHYS(m) - (oldpde &
 4433                                     PG_PS_FRAME);
 4434                                 pmap_remove_page(pmap, va, NULL);
 4435                         }
 4436                 }
 4437                 PMAP_UNLOCK(pmap);
 4438         }
 4439         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
 4440                 pmap = PV_PMAP(pv);
 4441                 PMAP_LOCK(pmap);
 4442                 pde = pmap_pde(pmap, pv->pv_va);
 4443                 KASSERT((*pde & PG_PS) == 0, ("pmap_clear_reference: found"
 4444                     " a 4mpage in page %p's pv list", m));
 4445                 pte = pmap_pte_quick(pmap, pv->pv_va);
 4446                 if ((*pte & PG_A) != 0) {
 4447                         /*
 4448                          * Regardless of whether a pte is 32 or 64 bits
 4449                          * in size, PG_A is among the least significant
 4450                          * 32 bits. 
 4451                          */
 4452                         atomic_clear_int((u_int *)pte, PG_A);
 4453                         pmap_invalidate_page(pmap, pv->pv_va);
 4454                 }
 4455                 PMAP_UNLOCK(pmap);
 4456         }
 4457         sched_unpin();
 4458 }
 4459 
 4460 /*
 4461  * Miscellaneous support routines follow
 4462  */
 4463 
 4464 /* Adjust the cache mode for a 4KB page mapped via a PTE. */
 4465 static __inline void
 4466 pmap_pte_attr(pt_entry_t *pte, int cache_bits)
 4467 {
 4468         u_int opte, npte;
 4469 
 4470         /*
 4471          * The cache mode bits are all in the low 32-bits of the
 4472          * PTE, so we can just spin on updating the low 32-bits.
 4473          */
 4474         do {
 4475                 opte = *(u_int *)pte;
 4476                 npte = opte & ~PG_PTE_CACHE;
 4477                 npte |= cache_bits;
 4478         } while (npte != opte && !atomic_cmpset_int((u_int *)pte, opte, npte));
 4479 }
 4480 
 4481 /* Adjust the cache mode for a 2/4MB page mapped via a PDE. */
 4482 static __inline void
 4483 pmap_pde_attr(pd_entry_t *pde, int cache_bits)
 4484 {
 4485         u_int opde, npde;
 4486 
 4487         /*
 4488          * The cache mode bits are all in the low 32-bits of the
 4489          * PDE, so we can just spin on updating the low 32-bits.
 4490          */
 4491         do {
 4492                 opde = *(u_int *)pde;
 4493                 npde = opde & ~PG_PDE_CACHE;
 4494                 npde |= cache_bits;
 4495         } while (npde != opde && !atomic_cmpset_int((u_int *)pde, opde, npde));
 4496 }
 4497 
 4498 /*
 4499  * Map a set of physical memory pages into the kernel virtual
 4500  * address space. Return a pointer to where it is mapped. This
 4501  * routine is intended to be used for mapping device memory,
 4502  * NOT real memory.
 4503  */
 4504 void *
 4505 pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
 4506 {
 4507         vm_offset_t va, offset;
 4508         vm_size_t tmpsize;
 4509 
 4510         offset = pa & PAGE_MASK;
 4511         size = roundup(offset + size, PAGE_SIZE);
 4512         pa = pa & PG_FRAME;
 4513 
 4514         if (pa < KERNLOAD && pa + size <= KERNLOAD)
 4515                 va = KERNBASE + pa;
 4516         else
 4517                 va = kmem_alloc_nofault(kernel_map, size);
 4518         if (!va)
 4519                 panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
 4520 
 4521         for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE)
 4522                 pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode);
 4523         pmap_invalidate_range(kernel_pmap, va, va + tmpsize);
 4524         pmap_invalidate_cache_range(va, va + size);
 4525         return ((void *)(va + offset));
 4526 }
 4527 
 4528 void *
 4529 pmap_mapdev(vm_paddr_t pa, vm_size_t size)
 4530 {
 4531 
 4532         return (pmap_mapdev_attr(pa, size, PAT_UNCACHEABLE));
 4533 }
 4534 
 4535 void *
 4536 pmap_mapbios(vm_paddr_t pa, vm_size_t size)
 4537 {
 4538 
 4539         return (pmap_mapdev_attr(pa, size, PAT_WRITE_BACK));
 4540 }
 4541 
 4542 void
 4543 pmap_unmapdev(vm_offset_t va, vm_size_t size)
 4544 {
 4545         vm_offset_t base, offset, tmpva;
 4546 
 4547         if (va >= KERNBASE && va + size <= KERNBASE + KERNLOAD)
 4548                 return;
 4549         base = trunc_page(va);
 4550         offset = va & PAGE_MASK;
 4551         size = roundup(offset + size, PAGE_SIZE);
 4552         for (tmpva = base; tmpva < (base + size); tmpva += PAGE_SIZE)
 4553                 pmap_kremove(tmpva);
 4554         pmap_invalidate_range(kernel_pmap, va, tmpva);
 4555         kmem_free(kernel_map, base, size);
 4556 }
 4557 
 4558 /*
 4559  * Sets the memory attribute for the specified page.
 4560  */
 4561 void
 4562 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
 4563 {
 4564         struct sysmaps *sysmaps;
 4565         vm_offset_t sva, eva;
 4566 
 4567         m->md.pat_mode = ma;
 4568         if ((m->flags & PG_FICTITIOUS) != 0)
 4569                 return;
 4570 
 4571         /*
 4572          * If "m" is a normal page, flush it from the cache.
 4573          * See pmap_invalidate_cache_range().
 4574          *
 4575          * First, try to find an existing mapping of the page by sf
 4576          * buffer. sf_buf_invalidate_cache() modifies mapping and
 4577          * flushes the cache.
 4578          */    
 4579         if (sf_buf_invalidate_cache(m))
 4580                 return;
 4581 
 4582         /*
 4583          * If page is not mapped by sf buffer, but CPU does not
 4584          * support self snoop, map the page transient and do
 4585          * invalidation. In the worst case, whole cache is flushed by
 4586          * pmap_invalidate_cache_range().
 4587          */
 4588         if ((cpu_feature & (CPUID_SS|CPUID_CLFSH)) == CPUID_CLFSH) {
 4589                 sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)];
 4590                 mtx_lock(&sysmaps->lock);
 4591                 if (*sysmaps->CMAP2)
 4592                         panic("pmap_page_set_memattr: CMAP2 busy");
 4593                 sched_pin();
 4594                 *sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) |
 4595                     PG_A | PG_M | pmap_cache_bits(m->md.pat_mode, 0);
 4596                 invlcaddr(sysmaps->CADDR2);
 4597                 sva = (vm_offset_t)sysmaps->CADDR2;
 4598                 eva = sva + PAGE_SIZE;
 4599         } else
 4600                 sva = eva = 0; /* gcc */
 4601         pmap_invalidate_cache_range(sva, eva);
 4602         if (sva != 0) {
 4603                 *sysmaps->CMAP2 = 0;
 4604                 sched_unpin();
 4605                 mtx_unlock(&sysmaps->lock);
 4606         }
 4607 }
 4608 
 4609 /*
 4610  * Changes the specified virtual address range's memory type to that given by
 4611  * the parameter "mode".  The specified virtual address range must be
 4612  * completely contained within either the kernel map.
 4613  *
 4614  * Returns zero if the change completed successfully, and either EINVAL or
 4615  * ENOMEM if the change failed.  Specifically, EINVAL is returned if some part
 4616  * of the virtual address range was not mapped, and ENOMEM is returned if
 4617  * there was insufficient memory available to complete the change.
 4618  */
 4619 int
 4620 pmap_change_attr(vm_offset_t va, vm_size_t size, int mode)
 4621 {
 4622         vm_offset_t base, offset, tmpva;
 4623         pd_entry_t *pde;
 4624         pt_entry_t *pte;
 4625         int cache_bits_pte, cache_bits_pde;
 4626         boolean_t changed;
 4627 
 4628         base = trunc_page(va);
 4629         offset = va & PAGE_MASK;
 4630         size = roundup(offset + size, PAGE_SIZE);
 4631 
 4632         /*
 4633          * Only supported on kernel virtual addresses above the recursive map.
 4634          */
 4635         if (base < VM_MIN_KERNEL_ADDRESS)
 4636                 return (EINVAL);
 4637 
 4638         cache_bits_pde = pmap_cache_bits(mode, 1);
 4639         cache_bits_pte = pmap_cache_bits(mode, 0);
 4640         changed = FALSE;
 4641 
 4642         /*
 4643          * Pages that aren't mapped aren't supported.  Also break down
 4644          * 2/4MB pages into 4KB pages if required.
 4645          */
 4646         PMAP_LOCK(kernel_pmap);
 4647         for (tmpva = base; tmpva < base + size; ) {
 4648                 pde = pmap_pde(kernel_pmap, tmpva);
 4649                 if (*pde == 0) {
 4650                         PMAP_UNLOCK(kernel_pmap);
 4651                         return (EINVAL);
 4652                 }
 4653                 if (*pde & PG_PS) {
 4654                         /*
 4655                          * If the current 2/4MB page already has
 4656                          * the required memory type, then we need not
 4657                          * demote this page.  Just increment tmpva to
 4658                          * the next 2/4MB page frame.
 4659                          */
 4660                         if ((*pde & PG_PDE_CACHE) == cache_bits_pde) {
 4661                                 tmpva = trunc_4mpage(tmpva) + NBPDR;
 4662                                 continue;
 4663                         }
 4664 
 4665                         /*
 4666                          * If the current offset aligns with a 2/4MB
 4667                          * page frame and there is at least 2/4MB left
 4668                          * within the range, then we need not break
 4669                          * down this page into 4KB pages.
 4670                          */
 4671                         if ((tmpva & PDRMASK) == 0 &&
 4672                             tmpva + PDRMASK < base + size) {
 4673                                 tmpva += NBPDR;
 4674                                 continue;
 4675                         }
 4676                         if (!pmap_demote_pde(kernel_pmap, pde, tmpva)) {
 4677                                 PMAP_UNLOCK(kernel_pmap);
 4678                                 return (ENOMEM);
 4679                         }
 4680                 }
 4681                 pte = vtopte(tmpva);
 4682                 if (*pte == 0) {
 4683                         PMAP_UNLOCK(kernel_pmap);
 4684                         return (EINVAL);
 4685                 }
 4686                 tmpva += PAGE_SIZE;
 4687         }
 4688         PMAP_UNLOCK(kernel_pmap);
 4689 
 4690         /*
 4691          * Ok, all the pages exist, so run through them updating their
 4692          * cache mode if required.
 4693          */
 4694         for (tmpva = base; tmpva < base + size; ) {
 4695                 pde = pmap_pde(kernel_pmap, tmpva);
 4696                 if (*pde & PG_PS) {
 4697                         if ((*pde & PG_PDE_CACHE) != cache_bits_pde) {
 4698                                 pmap_pde_attr(pde, cache_bits_pde);
 4699                                 changed = TRUE;
 4700                         }
 4701                         tmpva = trunc_4mpage(tmpva) + NBPDR;
 4702                 } else {
 4703                         pte = vtopte(tmpva);
 4704                         if ((*pte & PG_PTE_CACHE) != cache_bits_pte) {
 4705                                 pmap_pte_attr(pte, cache_bits_pte);
 4706                                 changed = TRUE;
 4707                         }
 4708                         tmpva += PAGE_SIZE;
 4709                 }
 4710         }
 4711 
 4712         /*
 4713          * Flush CPU caches to make sure any data isn't cached that
 4714          * shouldn't be, etc.
 4715          */
 4716         if (changed) {
 4717                 pmap_invalidate_range(kernel_pmap, base, tmpva);
 4718                 pmap_invalidate_cache_range(base, tmpva);
 4719         }
 4720         return (0);
 4721 }
 4722 
 4723 /*
 4724  * perform the pmap work for mincore
 4725  */
 4726 int
 4727 pmap_mincore(pmap_t pmap, vm_offset_t addr)
 4728 {
 4729         pd_entry_t *pdep;
 4730         pt_entry_t *ptep, pte;
 4731         vm_paddr_t pa;
 4732         vm_page_t m;
 4733         int val = 0;
 4734         
 4735         PMAP_LOCK(pmap);
 4736         pdep = pmap_pde(pmap, addr);
 4737         if (*pdep != 0) {
 4738                 if (*pdep & PG_PS) {
 4739                         pte = *pdep;
 4740                         val = MINCORE_SUPER;
 4741                         /* Compute the physical address of the 4KB page. */
 4742                         pa = ((*pdep & PG_PS_FRAME) | (addr & PDRMASK)) &
 4743                             PG_FRAME;
 4744                 } else {
 4745                         ptep = pmap_pte(pmap, addr);
 4746                         pte = *ptep;
 4747                         pmap_pte_release(ptep);
 4748                         pa = pte & PG_FRAME;
 4749                 }
 4750         } else {
 4751                 pte = 0;
 4752                 pa = 0;
 4753         }
 4754         PMAP_UNLOCK(pmap);
 4755 
 4756         if (pte != 0) {
 4757                 val |= MINCORE_INCORE;
 4758                 if ((pte & PG_MANAGED) == 0)
 4759                         return val;
 4760 
 4761                 m = PHYS_TO_VM_PAGE(pa);
 4762 
 4763                 /*
 4764                  * Modified by us
 4765                  */
 4766                 if ((pte & (PG_M | PG_RW)) == (PG_M | PG_RW))
 4767                         val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER;
 4768                 else {
 4769                         /*
 4770                          * Modified by someone else
 4771                          */
 4772                         vm_page_lock_queues();
 4773                         if (m->dirty || pmap_is_modified(m))
 4774                                 val |= MINCORE_MODIFIED_OTHER;
 4775                         vm_page_unlock_queues();
 4776                 }
 4777                 /*
 4778                  * Referenced by us
 4779                  */
 4780                 if (pte & PG_A)
 4781                         val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER;
 4782                 else {
 4783                         /*
 4784                          * Referenced by someone else
 4785                          */
 4786                         vm_page_lock_queues();
 4787                         if ((m->flags & PG_REFERENCED) ||
 4788                             pmap_ts_referenced(m)) {
 4789                                 val |= MINCORE_REFERENCED_OTHER;
 4790                                 vm_page_flag_set(m, PG_REFERENCED);
 4791                         }
 4792                         vm_page_unlock_queues();
 4793                 }
 4794         } 
 4795         return val;
 4796 }
 4797 
 4798 void
 4799 pmap_activate(struct thread *td)
 4800 {
 4801         pmap_t  pmap, oldpmap;
 4802         u_int32_t  cr3;
 4803 
 4804         critical_enter();
 4805         pmap = vmspace_pmap(td->td_proc->p_vmspace);
 4806         oldpmap = PCPU_GET(curpmap);
 4807 #if defined(SMP)
 4808         atomic_clear_int(&oldpmap->pm_active, PCPU_GET(cpumask));
 4809         atomic_set_int(&pmap->pm_active, PCPU_GET(cpumask));
 4810 #else
 4811         oldpmap->pm_active &= ~1;
 4812         pmap->pm_active |= 1;
 4813 #endif
 4814 #ifdef PAE
 4815         cr3 = vtophys(pmap->pm_pdpt);
 4816 #else
 4817         cr3 = vtophys(pmap->pm_pdir);
 4818 #endif
 4819         /*
 4820          * pmap_activate is for the current thread on the current cpu
 4821          */
 4822         td->td_pcb->pcb_cr3 = cr3;
 4823         load_cr3(cr3);
 4824         PCPU_SET(curpmap, pmap);
 4825         critical_exit();
 4826 }
 4827 
 4828 /*
 4829  *      Increase the starting virtual address of the given mapping if a
 4830  *      different alignment might result in more superpage mappings.
 4831  */
 4832 void
 4833 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
 4834     vm_offset_t *addr, vm_size_t size)
 4835 {
 4836         vm_offset_t superpage_offset;
 4837 
 4838         if (size < NBPDR)
 4839                 return;
 4840         if (object != NULL && (object->flags & OBJ_COLORED) != 0)
 4841                 offset += ptoa(object->pg_color);
 4842         superpage_offset = offset & PDRMASK;
 4843         if (size - ((NBPDR - superpage_offset) & PDRMASK) < NBPDR ||
 4844             (*addr & PDRMASK) == superpage_offset)
 4845                 return;
 4846         if ((*addr & PDRMASK) < superpage_offset)
 4847                 *addr = (*addr & ~PDRMASK) + superpage_offset;
 4848         else
 4849                 *addr = ((*addr + PDRMASK) & ~PDRMASK) + superpage_offset;
 4850 }
 4851 
 4852 
 4853 #if defined(PMAP_DEBUG)
 4854 pmap_pid_dump(int pid)
 4855 {
 4856         pmap_t pmap;
 4857         struct proc *p;
 4858         int npte = 0;
 4859         int index;
 4860 
 4861         sx_slock(&allproc_lock);
 4862         FOREACH_PROC_IN_SYSTEM(p) {
 4863                 if (p->p_pid != pid)
 4864                         continue;
 4865 
 4866                 if (p->p_vmspace) {
 4867                         int i,j;
 4868                         index = 0;
 4869                         pmap = vmspace_pmap(p->p_vmspace);
 4870                         for (i = 0; i < NPDEPTD; i++) {
 4871                                 pd_entry_t *pde;
 4872                                 pt_entry_t *pte;
 4873                                 vm_offset_t base = i << PDRSHIFT;
 4874                                 
 4875                                 pde = &pmap->pm_pdir[i];
 4876                                 if (pde && pmap_pde_v(pde)) {
 4877                                         for (j = 0; j < NPTEPG; j++) {
 4878                                                 vm_offset_t va = base + (j << PAGE_SHIFT);
 4879                                                 if (va >= (vm_offset_t) VM_MIN_KERNEL_ADDRESS) {
 4880                                                         if (index) {
 4881                                                                 index = 0;
 4882                                                                 printf("\n");
 4883                                                         }
 4884                                                         sx_sunlock(&allproc_lock);
 4885                                                         return npte;
 4886                                                 }
 4887                                                 pte = pmap_pte(pmap, va);
 4888                                                 if (pte && pmap_pte_v(pte)) {
 4889                                                         pt_entry_t pa;
 4890                                                         vm_page_t m;
 4891                                                         pa = *pte;
 4892                                                         m = PHYS_TO_VM_PAGE(pa & PG_FRAME);
 4893                                                         printf("va: 0x%x, pt: 0x%x, h: %d, w: %d, f: 0x%x",
 4894                                                                 va, pa, m->hold_count, m->wire_count, m->flags);
 4895                                                         npte++;
 4896                                                         index++;
 4897                                                         if (index >= 2) {
 4898                                                                 index = 0;
 4899                                                                 printf("\n");
 4900                                                         } else {
 4901                                                                 printf(" ");
 4902                                                         }
 4903                                                 }
 4904                                         }
 4905                                 }
 4906                         }
 4907                 }
 4908         }
 4909         sx_sunlock(&allproc_lock);
 4910         return npte;
 4911 }
 4912 #endif
 4913 
 4914 #if defined(DEBUG)
 4915 
 4916 static void     pads(pmap_t pm);
 4917 void            pmap_pvdump(vm_offset_t pa);
 4918 
 4919 /* print address space of pmap*/
 4920 static void
 4921 pads(pmap_t pm)
 4922 {
 4923         int i, j;
 4924         vm_paddr_t va;
 4925         pt_entry_t *ptep;
 4926 
 4927         if (pm == kernel_pmap)
 4928                 return;
 4929         for (i = 0; i < NPDEPTD; i++)
 4930                 if (pm->pm_pdir[i])
 4931                         for (j = 0; j < NPTEPG; j++) {
 4932                                 va = (i << PDRSHIFT) + (j << PAGE_SHIFT);
 4933                                 if (pm == kernel_pmap && va < KERNBASE)
 4934                                         continue;
 4935                                 if (pm != kernel_pmap && va > UPT_MAX_ADDRESS)
 4936                                         continue;
 4937                                 ptep = pmap_pte(pm, va);
 4938                                 if (pmap_pte_v(ptep))
 4939                                         printf("%x:%x ", va, *ptep);
 4940                         };
 4941 
 4942 }
 4943 
 4944 void
 4945 pmap_pvdump(vm_paddr_t pa)
 4946 {
 4947         pv_entry_t pv;
 4948         pmap_t pmap;
 4949         vm_page_t m;
 4950 
 4951         printf("pa %x", pa);
 4952         m = PHYS_TO_VM_PAGE(pa);
 4953         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
 4954                 pmap = PV_PMAP(pv);
 4955                 printf(" -> pmap %p, va %x", (void *)pmap, pv->pv_va);
 4956                 pads(pmap);
 4957         }
 4958         printf(" ");
 4959 }
 4960 #endif

Cache object: 85af363e8a8b52b42c0627e365b947bc


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.