The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/amd64/amd64/pmap.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1991 Regents of the University of California.
    3  * All rights reserved.
    4  * Copyright (c) 1994 John S. Dyson
    5  * All rights reserved.
    6  * Copyright (c) 1994 David Greenman
    7  * All rights reserved.
    8  * Copyright (c) 2003 Peter Wemm
    9  * All rights reserved.
   10  * Copyright (c) 2005-2008 Alan L. Cox <alc@cs.rice.edu>
   11  * All rights reserved.
   12  *
   13  * This code is derived from software contributed to Berkeley by
   14  * the Systems Programming Group of the University of Utah Computer
   15  * Science Department and William Jolitz of UUNET Technologies Inc.
   16  *
   17  * Redistribution and use in source and binary forms, with or without
   18  * modification, are permitted provided that the following conditions
   19  * are met:
   20  * 1. Redistributions of source code must retain the above copyright
   21  *    notice, this list of conditions and the following disclaimer.
   22  * 2. Redistributions in binary form must reproduce the above copyright
   23  *    notice, this list of conditions and the following disclaimer in the
   24  *    documentation and/or other materials provided with the distribution.
   25  * 3. All advertising materials mentioning features or use of this software
   26  *    must display the following acknowledgement:
   27  *      This product includes software developed by the University of
   28  *      California, Berkeley and its contributors.
   29  * 4. Neither the name of the University nor the names of its contributors
   30  *    may be used to endorse or promote products derived from this software
   31  *    without specific prior written permission.
   32  *
   33  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   34  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   35  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   36  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   37  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   38  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   39  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   40  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   41  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   42  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   43  * SUCH DAMAGE.
   44  *
   45  *      from:   @(#)pmap.c      7.7 (Berkeley)  5/12/91
   46  */
   47 /*-
   48  * Copyright (c) 2003 Networks Associates Technology, Inc.
   49  * All rights reserved.
   50  *
   51  * This software was developed for the FreeBSD Project by Jake Burkholder,
   52  * Safeport Network Services, and Network Associates Laboratories, the
   53  * Security Research Division of Network Associates, Inc. under
   54  * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA
   55  * CHATS research program.
   56  *
   57  * Redistribution and use in source and binary forms, with or without
   58  * modification, are permitted provided that the following conditions
   59  * are met:
   60  * 1. Redistributions of source code must retain the above copyright
   61  *    notice, this list of conditions and the following disclaimer.
   62  * 2. Redistributions in binary form must reproduce the above copyright
   63  *    notice, this list of conditions and the following disclaimer in the
   64  *    documentation and/or other materials provided with the distribution.
   65  *
   66  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   67  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   68  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   69  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   70  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   71  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   72  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   73  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   74  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   75  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   76  * SUCH DAMAGE.
   77  */
   78 
   79 #include <sys/cdefs.h>
   80 __FBSDID("$FreeBSD: releng/7.3/sys/amd64/amd64/pmap.c 204973 2010-03-10 19:55:27Z kensmith $");
   81 
   82 /*
   83  *      Manages physical address maps.
   84  *
   85  *      In addition to hardware address maps, this
   86  *      module is called upon to provide software-use-only
   87  *      maps which may or may not be stored in the same
   88  *      form as hardware maps.  These pseudo-maps are
   89  *      used to store intermediate results from copy
   90  *      operations to and from address spaces.
   91  *
   92  *      Since the information managed by this module is
   93  *      also stored by the logical address mapping module,
   94  *      this module may throw away valid virtual-to-physical
   95  *      mappings at almost any time.  However, invalidations
   96  *      of virtual-to-physical mappings must be done as
   97  *      requested.
   98  *
   99  *      In order to cope with hardware architectures which
  100  *      make virtual-to-physical map invalidates expensive,
  101  *      this module may delay invalidate or reduced protection
  102  *      operations until such time as they are actually
  103  *      necessary.  This module is given full information as
  104  *      to which processors are currently using which maps,
  105  *      and to when physical maps must be made correct.
  106  */
  107 
  108 #include "opt_msgbuf.h"
  109 #include "opt_pmap.h"
  110 #include "opt_vm.h"
  111 
  112 #include <sys/param.h>
  113 #include <sys/systm.h>
  114 #include <sys/kernel.h>
  115 #include <sys/ktr.h>
  116 #include <sys/lock.h>
  117 #include <sys/malloc.h>
  118 #include <sys/mman.h>
  119 #include <sys/msgbuf.h>
  120 #include <sys/mutex.h>
  121 #include <sys/proc.h>
  122 #include <sys/sx.h>
  123 #include <sys/vmmeter.h>
  124 #include <sys/sched.h>
  125 #include <sys/sysctl.h>
  126 #ifdef SMP
  127 #include <sys/smp.h>
  128 #endif
  129 
  130 #include <vm/vm.h>
  131 #include <vm/vm_param.h>
  132 #include <vm/vm_kern.h>
  133 #include <vm/vm_page.h>
  134 #include <vm/vm_map.h>
  135 #include <vm/vm_object.h>
  136 #include <vm/vm_extern.h>
  137 #include <vm/vm_pageout.h>
  138 #include <vm/vm_pager.h>
  139 #include <vm/vm_reserv.h>
  140 #include <vm/uma.h>
  141 
  142 #include <machine/cpu.h>
  143 #include <machine/cputypes.h>
  144 #include <machine/md_var.h>
  145 #include <machine/pcb.h>
  146 #include <machine/specialreg.h>
  147 #ifdef SMP
  148 #include <machine/smp.h>
  149 #endif
  150 
  151 #ifndef PMAP_SHPGPERPROC
  152 #define PMAP_SHPGPERPROC 200
  153 #endif
  154 
  155 #if !defined(DIAGNOSTIC)
  156 #define PMAP_INLINE     __gnu89_inline
  157 #else
  158 #define PMAP_INLINE
  159 #endif
  160 
  161 #define PV_STATS
  162 #ifdef PV_STATS
  163 #define PV_STAT(x)      do { x ; } while (0)
  164 #else
  165 #define PV_STAT(x)      do { } while (0)
  166 #endif
  167 
  168 #define pa_index(pa)    ((pa) >> PDRSHIFT)
  169 #define pa_to_pvh(pa)   (&pv_table[pa_index(pa)])
  170 
  171 struct pmap kernel_pmap_store;
  172 
  173 vm_offset_t virtual_avail;      /* VA of first avail page (after kernel bss) */
  174 vm_offset_t virtual_end;        /* VA of last avail page (end of kernel AS) */
  175 
  176 static int ndmpdp;
  177 static vm_paddr_t dmaplimit;
  178 vm_offset_t kernel_vm_end = VM_MIN_KERNEL_ADDRESS;
  179 pt_entry_t pg_nx;
  180 
  181 SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters");
  182 
  183 static int pg_ps_enabled;
  184 SYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLFLAG_RDTUN, &pg_ps_enabled, 0,
  185     "Are large page mappings enabled?");
  186 
  187 static u_int64_t        KPTphys;        /* phys addr of kernel level 1 */
  188 static u_int64_t        KPDphys;        /* phys addr of kernel level 2 */
  189 u_int64_t               KPDPphys;       /* phys addr of kernel level 3 */
  190 u_int64_t               KPML4phys;      /* phys addr of kernel level 4 */
  191 
  192 static u_int64_t        DMPDphys;       /* phys addr of direct mapped level 2 */
  193 static u_int64_t        DMPDPphys;      /* phys addr of direct mapped level 3 */
  194 
  195 /*
  196  * Data for the pv entry allocation mechanism
  197  */
  198 static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
  199 static struct md_page *pv_table;
  200 static int shpgperproc = PMAP_SHPGPERPROC;
  201 
  202 /*
  203  * All those kernel PT submaps that BSD is so fond of
  204  */
  205 pt_entry_t *CMAP1 = 0;
  206 caddr_t CADDR1 = 0;
  207 struct msgbuf *msgbufp = 0;
  208 
  209 /*
  210  * Crashdump maps.
  211  */
  212 static caddr_t crashdumpmap;
  213 
  214 static void     free_pv_entry(pmap_t pmap, pv_entry_t pv);
  215 static pv_entry_t get_pv_entry(pmap_t locked_pmap, int try);
  216 static void     pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa);
  217 static boolean_t pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa);
  218 static void     pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa);
  219 static void     pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
  220 static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
  221                     vm_offset_t va);
  222 
  223 static int pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode);
  224 static boolean_t pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va);
  225 static boolean_t pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m,
  226     vm_prot_t prot);
  227 static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
  228     vm_page_t m, vm_prot_t prot, vm_page_t mpte);
  229 static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte);
  230 static void pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte);
  231 static void pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva);
  232 static boolean_t pmap_is_modified_pvh(struct md_page *pvh);
  233 static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode);
  234 static vm_page_t pmap_lookup_pt_page(pmap_t pmap, vm_offset_t va);
  235 static void pmap_pde_attr(pd_entry_t *pde, int cache_bits);
  236 static void pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va);
  237 static boolean_t pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva,
  238     vm_prot_t prot);
  239 static void pmap_pte_attr(pt_entry_t *pte, int cache_bits);
  240 static int pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
  241                 vm_page_t *free);
  242 static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq,
  243                 vm_offset_t sva, pd_entry_t ptepde, vm_page_t *free);
  244 static void pmap_remove_pt_page(pmap_t pmap, vm_page_t mpte);
  245 static void pmap_remove_page(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
  246     vm_page_t *free);
  247 static void pmap_remove_entry(struct pmap *pmap, vm_page_t m,
  248                 vm_offset_t va);
  249 static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m);
  250 static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
  251     vm_page_t m);
  252 
  253 static vm_page_t pmap_allocpde(pmap_t pmap, vm_offset_t va, int flags);
  254 static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags);
  255 
  256 static vm_page_t _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, int flags);
  257 static int _pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m,
  258                 vm_page_t* free);
  259 static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t, vm_page_t *);
  260 static vm_offset_t pmap_kmem_choose(vm_offset_t addr);
  261 
  262 CTASSERT(1 << PDESHIFT == sizeof(pd_entry_t));
  263 CTASSERT(1 << PTESHIFT == sizeof(pt_entry_t));
  264 
  265 /*
  266  * Move the kernel virtual free pointer to the next
  267  * 2MB.  This is used to help improve performance
  268  * by using a large (2MB) page for much of the kernel
  269  * (.text, .data, .bss)
  270  */
  271 static vm_offset_t
  272 pmap_kmem_choose(vm_offset_t addr)
  273 {
  274         vm_offset_t newaddr = addr;
  275 
  276         newaddr = (addr + (NBPDR - 1)) & ~(NBPDR - 1);
  277         return newaddr;
  278 }
  279 
  280 /********************/
  281 /* Inline functions */
  282 /********************/
  283 
  284 /* Return a non-clipped PD index for a given VA */
  285 static __inline vm_pindex_t
  286 pmap_pde_pindex(vm_offset_t va)
  287 {
  288         return va >> PDRSHIFT;
  289 }
  290 
  291 
  292 /* Return various clipped indexes for a given VA */
  293 static __inline vm_pindex_t
  294 pmap_pte_index(vm_offset_t va)
  295 {
  296 
  297         return ((va >> PAGE_SHIFT) & ((1ul << NPTEPGSHIFT) - 1));
  298 }
  299 
  300 static __inline vm_pindex_t
  301 pmap_pde_index(vm_offset_t va)
  302 {
  303 
  304         return ((va >> PDRSHIFT) & ((1ul << NPDEPGSHIFT) - 1));
  305 }
  306 
  307 static __inline vm_pindex_t
  308 pmap_pdpe_index(vm_offset_t va)
  309 {
  310 
  311         return ((va >> PDPSHIFT) & ((1ul << NPDPEPGSHIFT) - 1));
  312 }
  313 
  314 static __inline vm_pindex_t
  315 pmap_pml4e_index(vm_offset_t va)
  316 {
  317 
  318         return ((va >> PML4SHIFT) & ((1ul << NPML4EPGSHIFT) - 1));
  319 }
  320 
  321 /* Return a pointer to the PML4 slot that corresponds to a VA */
  322 static __inline pml4_entry_t *
  323 pmap_pml4e(pmap_t pmap, vm_offset_t va)
  324 {
  325 
  326         return (&pmap->pm_pml4[pmap_pml4e_index(va)]);
  327 }
  328 
  329 /* Return a pointer to the PDP slot that corresponds to a VA */
  330 static __inline pdp_entry_t *
  331 pmap_pml4e_to_pdpe(pml4_entry_t *pml4e, vm_offset_t va)
  332 {
  333         pdp_entry_t *pdpe;
  334 
  335         pdpe = (pdp_entry_t *)PHYS_TO_DMAP(*pml4e & PG_FRAME);
  336         return (&pdpe[pmap_pdpe_index(va)]);
  337 }
  338 
  339 /* Return a pointer to the PDP slot that corresponds to a VA */
  340 static __inline pdp_entry_t *
  341 pmap_pdpe(pmap_t pmap, vm_offset_t va)
  342 {
  343         pml4_entry_t *pml4e;
  344 
  345         pml4e = pmap_pml4e(pmap, va);
  346         if ((*pml4e & PG_V) == 0)
  347                 return NULL;
  348         return (pmap_pml4e_to_pdpe(pml4e, va));
  349 }
  350 
  351 /* Return a pointer to the PD slot that corresponds to a VA */
  352 static __inline pd_entry_t *
  353 pmap_pdpe_to_pde(pdp_entry_t *pdpe, vm_offset_t va)
  354 {
  355         pd_entry_t *pde;
  356 
  357         pde = (pd_entry_t *)PHYS_TO_DMAP(*pdpe & PG_FRAME);
  358         return (&pde[pmap_pde_index(va)]);
  359 }
  360 
  361 /* Return a pointer to the PD slot that corresponds to a VA */
  362 static __inline pd_entry_t *
  363 pmap_pde(pmap_t pmap, vm_offset_t va)
  364 {
  365         pdp_entry_t *pdpe;
  366 
  367         pdpe = pmap_pdpe(pmap, va);
  368         if (pdpe == NULL || (*pdpe & PG_V) == 0)
  369                  return NULL;
  370         return (pmap_pdpe_to_pde(pdpe, va));
  371 }
  372 
  373 /* Return a pointer to the PT slot that corresponds to a VA */
  374 static __inline pt_entry_t *
  375 pmap_pde_to_pte(pd_entry_t *pde, vm_offset_t va)
  376 {
  377         pt_entry_t *pte;
  378 
  379         pte = (pt_entry_t *)PHYS_TO_DMAP(*pde & PG_FRAME);
  380         return (&pte[pmap_pte_index(va)]);
  381 }
  382 
  383 /* Return a pointer to the PT slot that corresponds to a VA */
  384 static __inline pt_entry_t *
  385 pmap_pte(pmap_t pmap, vm_offset_t va)
  386 {
  387         pd_entry_t *pde;
  388 
  389         pde = pmap_pde(pmap, va);
  390         if (pde == NULL || (*pde & PG_V) == 0)
  391                 return NULL;
  392         if ((*pde & PG_PS) != 0)        /* compat with i386 pmap_pte() */
  393                 return ((pt_entry_t *)pde);
  394         return (pmap_pde_to_pte(pde, va));
  395 }
  396 
  397 
  398 PMAP_INLINE pt_entry_t *
  399 vtopte(vm_offset_t va)
  400 {
  401         u_int64_t mask = ((1ul << (NPTEPGSHIFT + NPDEPGSHIFT + NPDPEPGSHIFT + NPML4EPGSHIFT)) - 1);
  402 
  403         return (PTmap + ((va >> PAGE_SHIFT) & mask));
  404 }
  405 
  406 static __inline pd_entry_t *
  407 vtopde(vm_offset_t va)
  408 {
  409         u_int64_t mask = ((1ul << (NPDEPGSHIFT + NPDPEPGSHIFT + NPML4EPGSHIFT)) - 1);
  410 
  411         return (PDmap + ((va >> PDRSHIFT) & mask));
  412 }
  413 
  414 static u_int64_t
  415 allocpages(vm_paddr_t *firstaddr, int n)
  416 {
  417         u_int64_t ret;
  418 
  419         ret = *firstaddr;
  420         bzero((void *)ret, n * PAGE_SIZE);
  421         *firstaddr += n * PAGE_SIZE;
  422         return (ret);
  423 }
  424 
  425 static void
  426 create_pagetables(vm_paddr_t *firstaddr)
  427 {
  428         int i;
  429 
  430         /* Allocate pages */
  431         KPTphys = allocpages(firstaddr, NKPT);
  432         KPML4phys = allocpages(firstaddr, 1);
  433         KPDPphys = allocpages(firstaddr, NKPML4E);
  434         KPDphys = allocpages(firstaddr, NKPDPE);
  435 
  436         ndmpdp = (ptoa(Maxmem) + NBPDP - 1) >> PDPSHIFT;
  437         if (ndmpdp < 4)         /* Minimum 4GB of dirmap */
  438                 ndmpdp = 4;
  439         DMPDPphys = allocpages(firstaddr, NDMPML4E);
  440         DMPDphys = allocpages(firstaddr, ndmpdp);
  441         dmaplimit = (vm_paddr_t)ndmpdp << PDPSHIFT;
  442 
  443         /* Fill in the underlying page table pages */
  444         /* Read-only from zero to physfree */
  445         /* XXX not fully used, underneath 2M pages */
  446         for (i = 0; (i << PAGE_SHIFT) < *firstaddr; i++) {
  447                 ((pt_entry_t *)KPTphys)[i] = i << PAGE_SHIFT;
  448                 ((pt_entry_t *)KPTphys)[i] |= PG_RW | PG_V | PG_G;
  449         }
  450 
  451         /* Now map the page tables at their location within PTmap */
  452         for (i = 0; i < NKPT; i++) {
  453                 ((pd_entry_t *)KPDphys)[i] = KPTphys + (i << PAGE_SHIFT);
  454                 ((pd_entry_t *)KPDphys)[i] |= PG_RW | PG_V;
  455         }
  456 
  457         /* Map from zero to end of allocations under 2M pages */
  458         /* This replaces some of the KPTphys entries above */
  459         for (i = 0; (i << PDRSHIFT) < *firstaddr; i++) {
  460                 ((pd_entry_t *)KPDphys)[i] = i << PDRSHIFT;
  461                 ((pd_entry_t *)KPDphys)[i] |= PG_RW | PG_V | PG_PS | PG_G;
  462         }
  463 
  464         /* And connect up the PD to the PDP */
  465         for (i = 0; i < NKPDPE; i++) {
  466                 ((pdp_entry_t *)KPDPphys)[i + KPDPI] = KPDphys + (i << PAGE_SHIFT);
  467                 ((pdp_entry_t *)KPDPphys)[i + KPDPI] |= PG_RW | PG_V | PG_U;
  468         }
  469 
  470         /* Now set up the direct map space using 2MB pages */
  471         /* Preset PG_M and PG_A because demotion expects it */
  472         for (i = 0; i < NPDEPG * ndmpdp; i++) {
  473                 ((pd_entry_t *)DMPDphys)[i] = (vm_paddr_t)i << PDRSHIFT;
  474                 ((pd_entry_t *)DMPDphys)[i] |= PG_RW | PG_V | PG_PS | PG_G |
  475                     PG_M | PG_A;
  476         }
  477 
  478         /* And the direct map space's PDP */
  479         for (i = 0; i < ndmpdp; i++) {
  480                 ((pdp_entry_t *)DMPDPphys)[i] = DMPDphys + (i << PAGE_SHIFT);
  481                 ((pdp_entry_t *)DMPDPphys)[i] |= PG_RW | PG_V | PG_U;
  482         }
  483 
  484         /* And recursively map PML4 to itself in order to get PTmap */
  485         ((pdp_entry_t *)KPML4phys)[PML4PML4I] = KPML4phys;
  486         ((pdp_entry_t *)KPML4phys)[PML4PML4I] |= PG_RW | PG_V | PG_U;
  487 
  488         /* Connect the Direct Map slot up to the PML4 */
  489         ((pdp_entry_t *)KPML4phys)[DMPML4I] = DMPDPphys;
  490         ((pdp_entry_t *)KPML4phys)[DMPML4I] |= PG_RW | PG_V | PG_U;
  491 
  492         /* Connect the KVA slot up to the PML4 */
  493         ((pdp_entry_t *)KPML4phys)[KPML4I] = KPDPphys;
  494         ((pdp_entry_t *)KPML4phys)[KPML4I] |= PG_RW | PG_V | PG_U;
  495 }
  496 
  497 /*
  498  *      Bootstrap the system enough to run with virtual memory.
  499  *
  500  *      On amd64 this is called after mapping has already been enabled
  501  *      and just syncs the pmap module with what has already been done.
  502  *      [We can't call it easily with mapping off since the kernel is not
  503  *      mapped with PA == VA, hence we would have to relocate every address
  504  *      from the linked base (virtual) address "KERNBASE" to the actual
  505  *      (physical) address starting relative to 0]
  506  */
  507 void
  508 pmap_bootstrap(vm_paddr_t *firstaddr)
  509 {
  510         vm_offset_t va;
  511         pt_entry_t *pte, *unused;
  512 
  513         /*
  514          * Create an initial set of page tables to run the kernel in.
  515          */
  516         create_pagetables(firstaddr);
  517 
  518         virtual_avail = (vm_offset_t) KERNBASE + *firstaddr;
  519         virtual_avail = pmap_kmem_choose(virtual_avail);
  520 
  521         virtual_end = VM_MAX_KERNEL_ADDRESS;
  522 
  523 
  524         /* XXX do %cr0 as well */
  525         load_cr4(rcr4() | CR4_PGE | CR4_PSE);
  526         load_cr3(KPML4phys);
  527 
  528         /*
  529          * Initialize the kernel pmap (which is statically allocated).
  530          */
  531         PMAP_LOCK_INIT(kernel_pmap);
  532         kernel_pmap->pm_pml4 = (pdp_entry_t *)PHYS_TO_DMAP(KPML4phys);
  533         kernel_pmap->pm_root = NULL;
  534         kernel_pmap->pm_active = -1;    /* don't allow deactivation */
  535         TAILQ_INIT(&kernel_pmap->pm_pvchunk);
  536 
  537         /*
  538          * Reserve some special page table entries/VA space for temporary
  539          * mapping of pages.
  540          */
  541 #define SYSMAP(c, p, v, n)      \
  542         v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n);
  543 
  544         va = virtual_avail;
  545         pte = vtopte(va);
  546 
  547         /*
  548          * CMAP1 is only used for the memory test.
  549          */
  550         SYSMAP(caddr_t, CMAP1, CADDR1, 1)
  551 
  552         /*
  553          * Crashdump maps.
  554          */
  555         SYSMAP(caddr_t, unused, crashdumpmap, MAXDUMPPGS)
  556 
  557         /*
  558          * msgbufp is used to map the system message buffer.
  559          */
  560         SYSMAP(struct msgbuf *, unused, msgbufp, atop(round_page(MSGBUF_SIZE)))
  561 
  562         virtual_avail = va;
  563 
  564         *CMAP1 = 0;
  565 
  566         invltlb();
  567 
  568         /* Initialize the PAT MSR. */
  569         pmap_init_pat();
  570 }
  571 
  572 /*
  573  * Setup the PAT MSR.
  574  */
  575 void
  576 pmap_init_pat(void)
  577 {
  578         uint64_t pat_msr;
  579 
  580         /* Bail if this CPU doesn't implement PAT. */
  581         if (!(cpu_feature & CPUID_PAT))
  582                 panic("no PAT??");
  583 
  584 #ifdef PAT_WORKS
  585         /*
  586          * Leave the indices 0-3 at the default of WB, WT, UC, and UC-.
  587          * Program 4 and 5 as WP and WC.
  588          * Leave 6 and 7 as UC and UC-.
  589          */
  590         pat_msr = rdmsr(MSR_PAT);
  591         pat_msr &= ~(PAT_MASK(4) | PAT_MASK(5));
  592         pat_msr |= PAT_VALUE(4, PAT_WRITE_PROTECTED) |
  593             PAT_VALUE(5, PAT_WRITE_COMBINING);
  594 #else
  595         /*
  596          * Due to some Intel errata, we can only safely use the lower 4
  597          * PAT entries.  Thus, just replace PAT Index 2 with WC instead
  598          * of UC-.
  599          *
  600          *   Intel Pentium III Processor Specification Update
  601          * Errata E.27 (Upper Four PAT Entries Not Usable With Mode B
  602          * or Mode C Paging)
  603          *
  604          *   Intel Pentium IV  Processor Specification Update
  605          * Errata N46 (PAT Index MSB May Be Calculated Incorrectly)
  606          */
  607         pat_msr = rdmsr(MSR_PAT);
  608         pat_msr &= ~PAT_MASK(2);
  609         pat_msr |= PAT_VALUE(2, PAT_WRITE_COMBINING);
  610 #endif
  611         wrmsr(MSR_PAT, pat_msr);
  612 }
  613 
  614 /*
  615  *      Initialize a vm_page's machine-dependent fields.
  616  */
  617 void
  618 pmap_page_init(vm_page_t m)
  619 {
  620 
  621         TAILQ_INIT(&m->md.pv_list);
  622         m->md.pat_mode = PAT_WRITE_BACK;
  623 }
  624 
  625 /*
  626  *      Initialize the pmap module.
  627  *      Called by vm_init, to initialize any structures that the pmap
  628  *      system needs to map virtual memory.
  629  */
  630 void
  631 pmap_init(void)
  632 {
  633         vm_page_t mpte;
  634         vm_size_t s;
  635         int i, pv_npg;
  636 
  637         /*
  638          * Initialize the vm page array entries for the kernel pmap's
  639          * page table pages.
  640          */ 
  641         for (i = 0; i < NKPT; i++) {
  642                 mpte = PHYS_TO_VM_PAGE(KPTphys + (i << PAGE_SHIFT));
  643                 KASSERT(mpte >= vm_page_array &&
  644                     mpte < &vm_page_array[vm_page_array_size],
  645                     ("pmap_init: page table page is out of range"));
  646                 mpte->pindex = pmap_pde_pindex(KERNBASE) + i;
  647                 mpte->phys_addr = KPTphys + (i << PAGE_SHIFT);
  648         }
  649 
  650         /*
  651          * Initialize the address space (zone) for the pv entries.  Set a
  652          * high water mark so that the system can recover from excessive
  653          * numbers of pv entries.
  654          */
  655         TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
  656         pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
  657         TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
  658         pv_entry_high_water = 9 * (pv_entry_max / 10);
  659 
  660         /*
  661          * Are large page mappings enabled?
  662          */
  663         TUNABLE_INT_FETCH("vm.pmap.pg_ps_enabled", &pg_ps_enabled);
  664         if (pg_ps_enabled) {
  665                 KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0,
  666                     ("pmap_init: can't assign to pagesizes[1]"));
  667                 pagesizes[1] = NBPDR;
  668         }
  669 
  670         /*
  671          * Calculate the size of the pv head table for superpages.
  672          */
  673         for (i = 0; phys_avail[i + 1]; i += 2);
  674         pv_npg = round_2mpage(phys_avail[(i - 2) + 1]) / NBPDR;
  675 
  676         /*
  677          * Allocate memory for the pv head table for superpages.
  678          */
  679         s = (vm_size_t)(pv_npg * sizeof(struct md_page));
  680         s = round_page(s);
  681         pv_table = (struct md_page *)kmem_alloc(kernel_map, s);
  682         for (i = 0; i < pv_npg; i++)
  683                 TAILQ_INIT(&pv_table[i].pv_list);
  684 }
  685 
  686 static int
  687 pmap_pventry_proc(SYSCTL_HANDLER_ARGS)
  688 {
  689         int error;
  690 
  691         error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
  692         if (error == 0 && req->newptr) {
  693                 shpgperproc = (pv_entry_max - cnt.v_page_count) / maxproc;
  694                 pv_entry_high_water = 9 * (pv_entry_max / 10);
  695         }
  696         return (error);
  697 }
  698 SYSCTL_PROC(_vm_pmap, OID_AUTO, pv_entry_max, CTLTYPE_INT|CTLFLAG_RW, 
  699     &pv_entry_max, 0, pmap_pventry_proc, "IU", "Max number of PV entries");
  700 
  701 static int
  702 pmap_shpgperproc_proc(SYSCTL_HANDLER_ARGS)
  703 {
  704         int error;
  705 
  706         error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
  707         if (error == 0 && req->newptr) {
  708                 pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
  709                 pv_entry_high_water = 9 * (pv_entry_max / 10);
  710         }
  711         return (error);
  712 }
  713 SYSCTL_PROC(_vm_pmap, OID_AUTO, shpgperproc, CTLTYPE_INT|CTLFLAG_RW, 
  714     &shpgperproc, 0, pmap_shpgperproc_proc, "IU", "Page share factor per proc");
  715 
  716 SYSCTL_NODE(_vm_pmap, OID_AUTO, pde, CTLFLAG_RD, 0,
  717     "2MB page mapping counters");
  718 
  719 static u_long pmap_pde_demotions;
  720 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, demotions, CTLFLAG_RD,
  721     &pmap_pde_demotions, 0, "2MB page demotions");
  722 
  723 static u_long pmap_pde_mappings;
  724 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, mappings, CTLFLAG_RD,
  725     &pmap_pde_mappings, 0, "2MB page mappings");
  726 
  727 static u_long pmap_pde_p_failures;
  728 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, p_failures, CTLFLAG_RD,
  729     &pmap_pde_p_failures, 0, "2MB page promotion failures");
  730 
  731 static u_long pmap_pde_promotions;
  732 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, promotions, CTLFLAG_RD,
  733     &pmap_pde_promotions, 0, "2MB page promotions");
  734 
  735 
  736 /***************************************************
  737  * Low level helper routines.....
  738  ***************************************************/
  739 
  740 /*
  741  * Determine the appropriate bits to set in a PTE or PDE for a specified
  742  * caching mode.
  743  */
  744 static int
  745 pmap_cache_bits(int mode, boolean_t is_pde)
  746 {
  747         int pat_flag, pat_index, cache_bits;
  748 
  749         /* The PAT bit is different for PTE's and PDE's. */
  750         pat_flag = is_pde ? PG_PDE_PAT : PG_PTE_PAT;
  751 
  752         /* Map the caching mode to a PAT index. */
  753         switch (mode) {
  754 #ifdef PAT_WORKS
  755         case PAT_UNCACHEABLE:
  756                 pat_index = 3;
  757                 break;
  758         case PAT_WRITE_THROUGH:
  759                 pat_index = 1;
  760                 break;
  761         case PAT_WRITE_BACK:
  762                 pat_index = 0;
  763                 break;
  764         case PAT_UNCACHED:
  765                 pat_index = 2;
  766                 break;
  767         case PAT_WRITE_COMBINING:
  768                 pat_index = 5;
  769                 break;
  770         case PAT_WRITE_PROTECTED:
  771                 pat_index = 4;
  772                 break;
  773 #else
  774         case PAT_UNCACHED:
  775         case PAT_UNCACHEABLE:
  776         case PAT_WRITE_PROTECTED:
  777                 pat_index = 3;
  778                 break;
  779         case PAT_WRITE_THROUGH:
  780                 pat_index = 1;
  781                 break;
  782         case PAT_WRITE_BACK:
  783                 pat_index = 0;
  784                 break;
  785         case PAT_WRITE_COMBINING:
  786                 pat_index = 2;
  787                 break;
  788 #endif
  789         default:
  790                 panic("Unknown caching mode %d\n", mode);
  791         }       
  792 
  793         /* Map the 3-bit index value into the PAT, PCD, and PWT bits. */
  794         cache_bits = 0;
  795         if (pat_index & 0x4)
  796                 cache_bits |= pat_flag;
  797         if (pat_index & 0x2)
  798                 cache_bits |= PG_NC_PCD;
  799         if (pat_index & 0x1)
  800                 cache_bits |= PG_NC_PWT;
  801         return (cache_bits);
  802 }
  803 #ifdef SMP
  804 /*
  805  * For SMP, these functions have to use the IPI mechanism for coherence.
  806  *
  807  * N.B.: Before calling any of the following TLB invalidation functions,
  808  * the calling processor must ensure that all stores updating a non-
  809  * kernel page table are globally performed.  Otherwise, another
  810  * processor could cache an old, pre-update entry without being
  811  * invalidated.  This can happen one of two ways: (1) The pmap becomes
  812  * active on another processor after its pm_active field is checked by
  813  * one of the following functions but before a store updating the page
  814  * table is globally performed. (2) The pmap becomes active on another
  815  * processor before its pm_active field is checked but due to
  816  * speculative loads one of the following functions stills reads the
  817  * pmap as inactive on the other processor.
  818  * 
  819  * The kernel page table is exempt because its pm_active field is
  820  * immutable.  The kernel page table is always active on every
  821  * processor.
  822  */
  823 void
  824 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
  825 {
  826         u_int cpumask;
  827         u_int other_cpus;
  828 
  829         sched_pin();
  830         if (pmap == kernel_pmap || pmap->pm_active == all_cpus) {
  831                 invlpg(va);
  832                 smp_invlpg(va);
  833         } else {
  834                 cpumask = PCPU_GET(cpumask);
  835                 other_cpus = PCPU_GET(other_cpus);
  836                 if (pmap->pm_active & cpumask)
  837                         invlpg(va);
  838                 if (pmap->pm_active & other_cpus)
  839                         smp_masked_invlpg(pmap->pm_active & other_cpus, va);
  840         }
  841         sched_unpin();
  842 }
  843 
  844 void
  845 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
  846 {
  847         u_int cpumask;
  848         u_int other_cpus;
  849         vm_offset_t addr;
  850 
  851         sched_pin();
  852         if (pmap == kernel_pmap || pmap->pm_active == all_cpus) {
  853                 for (addr = sva; addr < eva; addr += PAGE_SIZE)
  854                         invlpg(addr);
  855                 smp_invlpg_range(sva, eva);
  856         } else {
  857                 cpumask = PCPU_GET(cpumask);
  858                 other_cpus = PCPU_GET(other_cpus);
  859                 if (pmap->pm_active & cpumask)
  860                         for (addr = sva; addr < eva; addr += PAGE_SIZE)
  861                                 invlpg(addr);
  862                 if (pmap->pm_active & other_cpus)
  863                         smp_masked_invlpg_range(pmap->pm_active & other_cpus,
  864                             sva, eva);
  865         }
  866         sched_unpin();
  867 }
  868 
  869 void
  870 pmap_invalidate_all(pmap_t pmap)
  871 {
  872         u_int cpumask;
  873         u_int other_cpus;
  874 
  875         sched_pin();
  876         if (pmap == kernel_pmap || pmap->pm_active == all_cpus) {
  877                 invltlb();
  878                 smp_invltlb();
  879         } else {
  880                 cpumask = PCPU_GET(cpumask);
  881                 other_cpus = PCPU_GET(other_cpus);
  882                 if (pmap->pm_active & cpumask)
  883                         invltlb();
  884                 if (pmap->pm_active & other_cpus)
  885                         smp_masked_invltlb(pmap->pm_active & other_cpus);
  886         }
  887         sched_unpin();
  888 }
  889 
  890 void
  891 pmap_invalidate_cache(void)
  892 {
  893 
  894         sched_pin();
  895         wbinvd();
  896         smp_cache_flush();
  897         sched_unpin();
  898 }
  899 #else /* !SMP */
  900 /*
  901  * Normal, non-SMP, invalidation functions.
  902  * We inline these within pmap.c for speed.
  903  */
  904 PMAP_INLINE void
  905 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
  906 {
  907 
  908         if (pmap == kernel_pmap || pmap->pm_active)
  909                 invlpg(va);
  910 }
  911 
  912 PMAP_INLINE void
  913 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
  914 {
  915         vm_offset_t addr;
  916 
  917         if (pmap == kernel_pmap || pmap->pm_active)
  918                 for (addr = sva; addr < eva; addr += PAGE_SIZE)
  919                         invlpg(addr);
  920 }
  921 
  922 PMAP_INLINE void
  923 pmap_invalidate_all(pmap_t pmap)
  924 {
  925 
  926         if (pmap == kernel_pmap || pmap->pm_active)
  927                 invltlb();
  928 }
  929 
  930 PMAP_INLINE void
  931 pmap_invalidate_cache(void)
  932 {
  933 
  934         wbinvd();
  935 }
  936 #endif /* !SMP */
  937 
  938 static void
  939 pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva)
  940 {
  941 
  942         KASSERT((sva & PAGE_MASK) == 0,
  943             ("pmap_invalidate_cache_range: sva not page-aligned"));
  944         KASSERT((eva & PAGE_MASK) == 0,
  945             ("pmap_invalidate_cache_range: eva not page-aligned"));
  946 
  947         if (cpu_feature & CPUID_SS)
  948                 ; /* If "Self Snoop" is supported, do nothing. */
  949         else if (cpu_feature & CPUID_CLFSH) {
  950 
  951                 /*
  952                  * Otherwise, do per-cache line flush.  Use the mfence
  953                  * instruction to insure that previous stores are
  954                  * included in the write-back.  The processor
  955                  * propagates flush to other processors in the cache
  956                  * coherence domain.
  957                  */
  958                 mfence();
  959                 for (; sva < eva; sva += cpu_clflush_line_size)
  960                         clflush(sva);
  961                 mfence();
  962         } else {
  963 
  964                 /*
  965                  * No targeted cache flush methods are supported by CPU,
  966                  * globally invalidate cache as a last resort.
  967                  */
  968                 pmap_invalidate_cache();
  969         }
  970 }
  971 
  972 /*
  973  * Are we current address space or kernel?
  974  */
  975 static __inline int
  976 pmap_is_current(pmap_t pmap)
  977 {
  978         return (pmap == kernel_pmap ||
  979             (pmap->pm_pml4[PML4PML4I] & PG_FRAME) == (PML4pml4e[0] & PG_FRAME));
  980 }
  981 
  982 /*
  983  *      Routine:        pmap_extract
  984  *      Function:
  985  *              Extract the physical page address associated
  986  *              with the given map/virtual_address pair.
  987  */
  988 vm_paddr_t 
  989 pmap_extract(pmap_t pmap, vm_offset_t va)
  990 {
  991         vm_paddr_t rtval;
  992         pt_entry_t *pte;
  993         pd_entry_t pde, *pdep;
  994 
  995         rtval = 0;
  996         PMAP_LOCK(pmap);
  997         pdep = pmap_pde(pmap, va);
  998         if (pdep != NULL) {
  999                 pde = *pdep;
 1000                 if (pde) {
 1001                         if ((pde & PG_PS) != 0)
 1002                                 rtval = (pde & PG_PS_FRAME) | (va & PDRMASK);
 1003                         else {
 1004                                 pte = pmap_pde_to_pte(pdep, va);
 1005                                 rtval = (*pte & PG_FRAME) | (va & PAGE_MASK);
 1006                         }
 1007                 }
 1008         }
 1009         PMAP_UNLOCK(pmap);
 1010         return (rtval);
 1011 }
 1012 
 1013 /*
 1014  *      Routine:        pmap_extract_and_hold
 1015  *      Function:
 1016  *              Atomically extract and hold the physical page
 1017  *              with the given pmap and virtual address pair
 1018  *              if that mapping permits the given protection.
 1019  */
 1020 vm_page_t
 1021 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
 1022 {
 1023         pd_entry_t pde, *pdep;
 1024         pt_entry_t pte;
 1025         vm_page_t m;
 1026 
 1027         m = NULL;
 1028         vm_page_lock_queues();
 1029         PMAP_LOCK(pmap);
 1030         pdep = pmap_pde(pmap, va);
 1031         if (pdep != NULL && (pde = *pdep)) {
 1032                 if (pde & PG_PS) {
 1033                         if ((pde & PG_RW) || (prot & VM_PROT_WRITE) == 0) {
 1034                                 m = PHYS_TO_VM_PAGE((pde & PG_PS_FRAME) |
 1035                                     (va & PDRMASK));
 1036                                 vm_page_hold(m);
 1037                         }
 1038                 } else {
 1039                         pte = *pmap_pde_to_pte(pdep, va);
 1040                         if ((pte & PG_V) &&
 1041                             ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0)) {
 1042                                 m = PHYS_TO_VM_PAGE(pte & PG_FRAME);
 1043                                 vm_page_hold(m);
 1044                         }
 1045                 }
 1046         }
 1047         vm_page_unlock_queues();
 1048         PMAP_UNLOCK(pmap);
 1049         return (m);
 1050 }
 1051 
 1052 vm_paddr_t
 1053 pmap_kextract(vm_offset_t va)
 1054 {
 1055         pd_entry_t pde;
 1056         vm_paddr_t pa;
 1057 
 1058         if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) {
 1059                 pa = DMAP_TO_PHYS(va);
 1060         } else {
 1061                 pde = *vtopde(va);
 1062                 if (pde & PG_PS) {
 1063                         pa = (pde & PG_PS_FRAME) | (va & PDRMASK);
 1064                 } else {
 1065                         /*
 1066                          * Beware of a concurrent promotion that changes the
 1067                          * PDE at this point!  For example, vtopte() must not
 1068                          * be used to access the PTE because it would use the
 1069                          * new PDE.  It is, however, safe to use the old PDE
 1070                          * because the page table page is preserved by the
 1071                          * promotion.
 1072                          */
 1073                         pa = *pmap_pde_to_pte(&pde, va);
 1074                         pa = (pa & PG_FRAME) | (va & PAGE_MASK);
 1075                 }
 1076         }
 1077         return pa;
 1078 }
 1079 
 1080 /***************************************************
 1081  * Low level mapping routines.....
 1082  ***************************************************/
 1083 
 1084 /*
 1085  * Add a wired page to the kva.
 1086  * Note: not SMP coherent.
 1087  */
 1088 PMAP_INLINE void 
 1089 pmap_kenter(vm_offset_t va, vm_paddr_t pa)
 1090 {
 1091         pt_entry_t *pte;
 1092 
 1093         pte = vtopte(va);
 1094         pte_store(pte, pa | PG_RW | PG_V | PG_G);
 1095 }
 1096 
 1097 static __inline void
 1098 pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode)
 1099 {
 1100         pt_entry_t *pte;
 1101 
 1102         pte = vtopte(va);
 1103         pte_store(pte, pa | PG_RW | PG_V | PG_G | pmap_cache_bits(mode, 0));
 1104 }
 1105 
 1106 /*
 1107  * Remove a page from the kernel pagetables.
 1108  * Note: not SMP coherent.
 1109  */
 1110 PMAP_INLINE void
 1111 pmap_kremove(vm_offset_t va)
 1112 {
 1113         pt_entry_t *pte;
 1114 
 1115         pte = vtopte(va);
 1116         pte_clear(pte);
 1117 }
 1118 
 1119 /*
 1120  *      Used to map a range of physical addresses into kernel
 1121  *      virtual address space.
 1122  *
 1123  *      The value passed in '*virt' is a suggested virtual address for
 1124  *      the mapping. Architectures which can support a direct-mapped
 1125  *      physical to virtual region can return the appropriate address
 1126  *      within that region, leaving '*virt' unchanged. Other
 1127  *      architectures should map the pages starting at '*virt' and
 1128  *      update '*virt' with the first usable address after the mapped
 1129  *      region.
 1130  */
 1131 vm_offset_t
 1132 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
 1133 {
 1134         return PHYS_TO_DMAP(start);
 1135 }
 1136 
 1137 
 1138 /*
 1139  * Add a list of wired pages to the kva
 1140  * this routine is only used for temporary
 1141  * kernel mappings that do not need to have
 1142  * page modification or references recorded.
 1143  * Note that old mappings are simply written
 1144  * over.  The page *must* be wired.
 1145  * Note: SMP coherent.  Uses a ranged shootdown IPI.
 1146  */
 1147 void
 1148 pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
 1149 {
 1150         pt_entry_t *endpte, oldpte, *pte;
 1151 
 1152         oldpte = 0;
 1153         pte = vtopte(sva);
 1154         endpte = pte + count;
 1155         while (pte < endpte) {
 1156                 oldpte |= *pte;
 1157                 pte_store(pte, VM_PAGE_TO_PHYS(*ma) | PG_G |
 1158                     pmap_cache_bits((*ma)->md.pat_mode, 0) | PG_RW | PG_V);
 1159                 pte++;
 1160                 ma++;
 1161         }
 1162         if ((oldpte & PG_V) != 0)
 1163                 pmap_invalidate_range(kernel_pmap, sva, sva + count *
 1164                     PAGE_SIZE);
 1165 }
 1166 
 1167 /*
 1168  * This routine tears out page mappings from the
 1169  * kernel -- it is meant only for temporary mappings.
 1170  * Note: SMP coherent.  Uses a ranged shootdown IPI.
 1171  */
 1172 void
 1173 pmap_qremove(vm_offset_t sva, int count)
 1174 {
 1175         vm_offset_t va;
 1176 
 1177         va = sva;
 1178         while (count-- > 0) {
 1179                 pmap_kremove(va);
 1180                 va += PAGE_SIZE;
 1181         }
 1182         pmap_invalidate_range(kernel_pmap, sva, va);
 1183 }
 1184 
 1185 /***************************************************
 1186  * Page table page management routines.....
 1187  ***************************************************/
 1188 static __inline void
 1189 pmap_free_zero_pages(vm_page_t free)
 1190 {
 1191         vm_page_t m;
 1192 
 1193         while (free != NULL) {
 1194                 m = free;
 1195                 free = m->right;
 1196                 /* Preserve the page's PG_ZERO setting. */
 1197                 vm_page_free_toq(m);
 1198         }
 1199 }
 1200 
 1201 /*
 1202  * Schedule the specified unused page table page to be freed.  Specifically,
 1203  * add the page to the specified list of pages that will be released to the
 1204  * physical memory manager after the TLB has been updated.
 1205  */
 1206 static __inline void
 1207 pmap_add_delayed_free_list(vm_page_t m, vm_page_t *free, boolean_t set_PG_ZERO)
 1208 {
 1209 
 1210         if (set_PG_ZERO)
 1211                 m->flags |= PG_ZERO;
 1212         else
 1213                 m->flags &= ~PG_ZERO;
 1214         m->right = *free;
 1215         *free = m;
 1216 }
 1217         
 1218 /*
 1219  * Inserts the specified page table page into the specified pmap's collection
 1220  * of idle page table pages.  Each of a pmap's page table pages is responsible
 1221  * for mapping a distinct range of virtual addresses.  The pmap's collection is
 1222  * ordered by this virtual address range.
 1223  */
 1224 static void
 1225 pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte)
 1226 {
 1227         vm_page_t root;
 1228 
 1229         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 1230         root = pmap->pm_root;
 1231         if (root == NULL) {
 1232                 mpte->left = NULL;
 1233                 mpte->right = NULL;
 1234         } else {
 1235                 root = vm_page_splay(mpte->pindex, root);
 1236                 if (mpte->pindex < root->pindex) {
 1237                         mpte->left = root->left;
 1238                         mpte->right = root;
 1239                         root->left = NULL;
 1240                 } else if (mpte->pindex == root->pindex)
 1241                         panic("pmap_insert_pt_page: pindex already inserted");
 1242                 else {
 1243                         mpte->right = root->right;
 1244                         mpte->left = root;
 1245                         root->right = NULL;
 1246                 }
 1247         }
 1248         pmap->pm_root = mpte;
 1249 }
 1250 
 1251 /*
 1252  * Looks for a page table page mapping the specified virtual address in the
 1253  * specified pmap's collection of idle page table pages.  Returns NULL if there
 1254  * is no page table page corresponding to the specified virtual address.
 1255  */
 1256 static vm_page_t
 1257 pmap_lookup_pt_page(pmap_t pmap, vm_offset_t va)
 1258 {
 1259         vm_page_t mpte;
 1260         vm_pindex_t pindex = pmap_pde_pindex(va);
 1261 
 1262         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 1263         if ((mpte = pmap->pm_root) != NULL && mpte->pindex != pindex) {
 1264                 mpte = vm_page_splay(pindex, mpte);
 1265                 if ((pmap->pm_root = mpte)->pindex != pindex)
 1266                         mpte = NULL;
 1267         }
 1268         return (mpte);
 1269 }
 1270 
 1271 /*
 1272  * Removes the specified page table page from the specified pmap's collection
 1273  * of idle page table pages.  The specified page table page must be a member of
 1274  * the pmap's collection.
 1275  */
 1276 static void
 1277 pmap_remove_pt_page(pmap_t pmap, vm_page_t mpte)
 1278 {
 1279         vm_page_t root;
 1280 
 1281         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 1282         if (mpte != pmap->pm_root) {
 1283                 root = vm_page_splay(mpte->pindex, pmap->pm_root);
 1284                 KASSERT(mpte == root,
 1285                     ("pmap_remove_pt_page: mpte %p is missing from pmap %p",
 1286                     mpte, pmap));
 1287         }
 1288         if (mpte->left == NULL)
 1289                 root = mpte->right;
 1290         else {
 1291                 root = vm_page_splay(mpte->pindex, mpte->left);
 1292                 root->right = mpte->right;
 1293         }
 1294         pmap->pm_root = root;
 1295 }
 1296 
 1297 /*
 1298  * This routine unholds page table pages, and if the hold count
 1299  * drops to zero, then it decrements the wire count.
 1300  */
 1301 static __inline int
 1302 pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t *free)
 1303 {
 1304 
 1305         --m->wire_count;
 1306         if (m->wire_count == 0)
 1307                 return _pmap_unwire_pte_hold(pmap, va, m, free);
 1308         else
 1309                 return 0;
 1310 }
 1311 
 1312 static int 
 1313 _pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m, 
 1314     vm_page_t *free)
 1315 {
 1316 
 1317         /*
 1318          * unmap the page table page
 1319          */
 1320         if (m->pindex >= (NUPDE + NUPDPE)) {
 1321                 /* PDP page */
 1322                 pml4_entry_t *pml4;
 1323                 pml4 = pmap_pml4e(pmap, va);
 1324                 *pml4 = 0;
 1325         } else if (m->pindex >= NUPDE) {
 1326                 /* PD page */
 1327                 pdp_entry_t *pdp;
 1328                 pdp = pmap_pdpe(pmap, va);
 1329                 *pdp = 0;
 1330         } else {
 1331                 /* PTE page */
 1332                 pd_entry_t *pd;
 1333                 pd = pmap_pde(pmap, va);
 1334                 *pd = 0;
 1335         }
 1336         --pmap->pm_stats.resident_count;
 1337         if (m->pindex < NUPDE) {
 1338                 /* We just released a PT, unhold the matching PD */
 1339                 vm_page_t pdpg;
 1340 
 1341                 pdpg = PHYS_TO_VM_PAGE(*pmap_pdpe(pmap, va) & PG_FRAME);
 1342                 pmap_unwire_pte_hold(pmap, va, pdpg, free);
 1343         }
 1344         if (m->pindex >= NUPDE && m->pindex < (NUPDE + NUPDPE)) {
 1345                 /* We just released a PD, unhold the matching PDP */
 1346                 vm_page_t pdppg;
 1347 
 1348                 pdppg = PHYS_TO_VM_PAGE(*pmap_pml4e(pmap, va) & PG_FRAME);
 1349                 pmap_unwire_pte_hold(pmap, va, pdppg, free);
 1350         }
 1351 
 1352         /*
 1353          * This is a release store so that the ordinary store unmapping
 1354          * the page table page is globally performed before TLB shoot-
 1355          * down is begun.
 1356          */
 1357         atomic_subtract_rel_int(&cnt.v_wire_count, 1);
 1358 
 1359         /* 
 1360          * Put page on a list so that it is released after
 1361          * *ALL* TLB shootdown is done
 1362          */
 1363         pmap_add_delayed_free_list(m, free, TRUE);
 1364         
 1365         return 1;
 1366 }
 1367 
 1368 /*
 1369  * After removing a page table entry, this routine is used to
 1370  * conditionally free the page, and manage the hold/wire counts.
 1371  */
 1372 static int
 1373 pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t ptepde, vm_page_t *free)
 1374 {
 1375         vm_page_t mpte;
 1376 
 1377         if (va >= VM_MAXUSER_ADDRESS)
 1378                 return 0;
 1379         KASSERT(ptepde != 0, ("pmap_unuse_pt: ptepde != 0"));
 1380         mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME);
 1381         return pmap_unwire_pte_hold(pmap, va, mpte, free);
 1382 }
 1383 
 1384 void
 1385 pmap_pinit0(pmap_t pmap)
 1386 {
 1387 
 1388         PMAP_LOCK_INIT(pmap);
 1389         pmap->pm_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(KPML4phys);
 1390         pmap->pm_root = NULL;
 1391         pmap->pm_active = 0;
 1392         TAILQ_INIT(&pmap->pm_pvchunk);
 1393         bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
 1394 }
 1395 
 1396 /*
 1397  * Initialize a preallocated and zeroed pmap structure,
 1398  * such as one in a vmspace structure.
 1399  */
 1400 int
 1401 pmap_pinit(pmap_t pmap)
 1402 {
 1403         vm_page_t pml4pg;
 1404         static vm_pindex_t color;
 1405 
 1406         PMAP_LOCK_INIT(pmap);
 1407 
 1408         /*
 1409          * allocate the page directory page
 1410          */
 1411         while ((pml4pg = vm_page_alloc(NULL, color++, VM_ALLOC_NOOBJ |
 1412             VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL)
 1413                 VM_WAIT;
 1414 
 1415         pmap->pm_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pml4pg));
 1416 
 1417         if ((pml4pg->flags & PG_ZERO) == 0)
 1418                 pagezero(pmap->pm_pml4);
 1419 
 1420         /* Wire in kernel global address entries. */
 1421         pmap->pm_pml4[KPML4I] = KPDPphys | PG_RW | PG_V | PG_U;
 1422         pmap->pm_pml4[DMPML4I] = DMPDPphys | PG_RW | PG_V | PG_U;
 1423 
 1424         /* install self-referential address mapping entry(s) */
 1425         pmap->pm_pml4[PML4PML4I] = VM_PAGE_TO_PHYS(pml4pg) | PG_V | PG_RW | PG_A | PG_M;
 1426 
 1427         pmap->pm_root = NULL;
 1428         pmap->pm_active = 0;
 1429         TAILQ_INIT(&pmap->pm_pvchunk);
 1430         bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
 1431 
 1432         return (1);
 1433 }
 1434 
 1435 /*
 1436  * this routine is called if the page table page is not
 1437  * mapped correctly.
 1438  *
 1439  * Note: If a page allocation fails at page table level two or three,
 1440  * one or two pages may be held during the wait, only to be released
 1441  * afterwards.  This conservative approach is easily argued to avoid
 1442  * race conditions.
 1443  */
 1444 static vm_page_t
 1445 _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, int flags)
 1446 {
 1447         vm_page_t m, pdppg, pdpg;
 1448 
 1449         KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
 1450             (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
 1451             ("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
 1452 
 1453         /*
 1454          * Allocate a page table page.
 1455          */
 1456         if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ |
 1457             VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
 1458                 if (flags & M_WAITOK) {
 1459                         PMAP_UNLOCK(pmap);
 1460                         vm_page_unlock_queues();
 1461                         VM_WAIT;
 1462                         vm_page_lock_queues();
 1463                         PMAP_LOCK(pmap);
 1464                 }
 1465 
 1466                 /*
 1467                  * Indicate the need to retry.  While waiting, the page table
 1468                  * page may have been allocated.
 1469                  */
 1470                 return (NULL);
 1471         }
 1472         if ((m->flags & PG_ZERO) == 0)
 1473                 pmap_zero_page(m);
 1474 
 1475         /*
 1476          * Map the pagetable page into the process address space, if
 1477          * it isn't already there.
 1478          */
 1479 
 1480         if (ptepindex >= (NUPDE + NUPDPE)) {
 1481                 pml4_entry_t *pml4;
 1482                 vm_pindex_t pml4index;
 1483 
 1484                 /* Wire up a new PDPE page */
 1485                 pml4index = ptepindex - (NUPDE + NUPDPE);
 1486                 pml4 = &pmap->pm_pml4[pml4index];
 1487                 *pml4 = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M;
 1488 
 1489         } else if (ptepindex >= NUPDE) {
 1490                 vm_pindex_t pml4index;
 1491                 vm_pindex_t pdpindex;
 1492                 pml4_entry_t *pml4;
 1493                 pdp_entry_t *pdp;
 1494 
 1495                 /* Wire up a new PDE page */
 1496                 pdpindex = ptepindex - NUPDE;
 1497                 pml4index = pdpindex >> NPML4EPGSHIFT;
 1498 
 1499                 pml4 = &pmap->pm_pml4[pml4index];
 1500                 if ((*pml4 & PG_V) == 0) {
 1501                         /* Have to allocate a new pdp, recurse */
 1502                         if (_pmap_allocpte(pmap, NUPDE + NUPDPE + pml4index,
 1503                             flags) == NULL) {
 1504                                 --m->wire_count;
 1505                                 atomic_subtract_int(&cnt.v_wire_count, 1);
 1506                                 vm_page_free_zero(m);
 1507                                 return (NULL);
 1508                         }
 1509                 } else {
 1510                         /* Add reference to pdp page */
 1511                         pdppg = PHYS_TO_VM_PAGE(*pml4 & PG_FRAME);
 1512                         pdppg->wire_count++;
 1513                 }
 1514                 pdp = (pdp_entry_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME);
 1515 
 1516                 /* Now find the pdp page */
 1517                 pdp = &pdp[pdpindex & ((1ul << NPDPEPGSHIFT) - 1)];
 1518                 *pdp = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M;
 1519 
 1520         } else {
 1521                 vm_pindex_t pml4index;
 1522                 vm_pindex_t pdpindex;
 1523                 pml4_entry_t *pml4;
 1524                 pdp_entry_t *pdp;
 1525                 pd_entry_t *pd;
 1526 
 1527                 /* Wire up a new PTE page */
 1528                 pdpindex = ptepindex >> NPDPEPGSHIFT;
 1529                 pml4index = pdpindex >> NPML4EPGSHIFT;
 1530 
 1531                 /* First, find the pdp and check that its valid. */
 1532                 pml4 = &pmap->pm_pml4[pml4index];
 1533                 if ((*pml4 & PG_V) == 0) {
 1534                         /* Have to allocate a new pd, recurse */
 1535                         if (_pmap_allocpte(pmap, NUPDE + pdpindex,
 1536                             flags) == NULL) {
 1537                                 --m->wire_count;
 1538                                 atomic_subtract_int(&cnt.v_wire_count, 1);
 1539                                 vm_page_free_zero(m);
 1540                                 return (NULL);
 1541                         }
 1542                         pdp = (pdp_entry_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME);
 1543                         pdp = &pdp[pdpindex & ((1ul << NPDPEPGSHIFT) - 1)];
 1544                 } else {
 1545                         pdp = (pdp_entry_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME);
 1546                         pdp = &pdp[pdpindex & ((1ul << NPDPEPGSHIFT) - 1)];
 1547                         if ((*pdp & PG_V) == 0) {
 1548                                 /* Have to allocate a new pd, recurse */
 1549                                 if (_pmap_allocpte(pmap, NUPDE + pdpindex,
 1550                                     flags) == NULL) {
 1551                                         --m->wire_count;
 1552                                         atomic_subtract_int(&cnt.v_wire_count,
 1553                                             1);
 1554                                         vm_page_free_zero(m);
 1555                                         return (NULL);
 1556                                 }
 1557                         } else {
 1558                                 /* Add reference to the pd page */
 1559                                 pdpg = PHYS_TO_VM_PAGE(*pdp & PG_FRAME);
 1560                                 pdpg->wire_count++;
 1561                         }
 1562                 }
 1563                 pd = (pd_entry_t *)PHYS_TO_DMAP(*pdp & PG_FRAME);
 1564 
 1565                 /* Now we know where the page directory page is */
 1566                 pd = &pd[ptepindex & ((1ul << NPDEPGSHIFT) - 1)];
 1567                 *pd = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M;
 1568         }
 1569 
 1570         pmap->pm_stats.resident_count++;
 1571 
 1572         return m;
 1573 }
 1574 
 1575 static vm_page_t
 1576 pmap_allocpde(pmap_t pmap, vm_offset_t va, int flags)
 1577 {
 1578         vm_pindex_t pdpindex, ptepindex;
 1579         pdp_entry_t *pdpe;
 1580         vm_page_t pdpg;
 1581 
 1582         KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
 1583             (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
 1584             ("pmap_allocpde: flags is neither M_NOWAIT nor M_WAITOK"));
 1585 retry:
 1586         pdpe = pmap_pdpe(pmap, va);
 1587         if (pdpe != NULL && (*pdpe & PG_V) != 0) {
 1588                 /* Add a reference to the pd page. */
 1589                 pdpg = PHYS_TO_VM_PAGE(*pdpe & PG_FRAME);
 1590                 pdpg->wire_count++;
 1591         } else {
 1592                 /* Allocate a pd page. */
 1593                 ptepindex = pmap_pde_pindex(va);
 1594                 pdpindex = ptepindex >> NPDPEPGSHIFT;
 1595                 pdpg = _pmap_allocpte(pmap, NUPDE + pdpindex, flags);
 1596                 if (pdpg == NULL && (flags & M_WAITOK))
 1597                         goto retry;
 1598         }
 1599         return (pdpg);
 1600 }
 1601 
 1602 static vm_page_t
 1603 pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags)
 1604 {
 1605         vm_pindex_t ptepindex;
 1606         pd_entry_t *pd;
 1607         vm_page_t m;
 1608 
 1609         KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
 1610             (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
 1611             ("pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
 1612 
 1613         /*
 1614          * Calculate pagetable page index
 1615          */
 1616         ptepindex = pmap_pde_pindex(va);
 1617 retry:
 1618         /*
 1619          * Get the page directory entry
 1620          */
 1621         pd = pmap_pde(pmap, va);
 1622 
 1623         /*
 1624          * This supports switching from a 2MB page to a
 1625          * normal 4K page.
 1626          */
 1627         if (pd != NULL && (*pd & (PG_PS | PG_V)) == (PG_PS | PG_V)) {
 1628                 if (!pmap_demote_pde(pmap, pd, va)) {
 1629                         /*
 1630                          * Invalidation of the 2MB page mapping may have caused
 1631                          * the deallocation of the underlying PD page.
 1632                          */
 1633                         pd = NULL;
 1634                 }
 1635         }
 1636 
 1637         /*
 1638          * If the page table page is mapped, we just increment the
 1639          * hold count, and activate it.
 1640          */
 1641         if (pd != NULL && (*pd & PG_V) != 0) {
 1642                 m = PHYS_TO_VM_PAGE(*pd & PG_FRAME);
 1643                 m->wire_count++;
 1644         } else {
 1645                 /*
 1646                  * Here if the pte page isn't mapped, or if it has been
 1647                  * deallocated.
 1648                  */
 1649                 m = _pmap_allocpte(pmap, ptepindex, flags);
 1650                 if (m == NULL && (flags & M_WAITOK))
 1651                         goto retry;
 1652         }
 1653         return (m);
 1654 }
 1655 
 1656 
 1657 /***************************************************
 1658  * Pmap allocation/deallocation routines.
 1659  ***************************************************/
 1660 
 1661 /*
 1662  * Release any resources held by the given physical map.
 1663  * Called when a pmap initialized by pmap_pinit is being released.
 1664  * Should only be called if the map contains no valid mappings.
 1665  */
 1666 void
 1667 pmap_release(pmap_t pmap)
 1668 {
 1669         vm_page_t m;
 1670 
 1671         KASSERT(pmap->pm_stats.resident_count == 0,
 1672             ("pmap_release: pmap resident count %ld != 0",
 1673             pmap->pm_stats.resident_count));
 1674         KASSERT(pmap->pm_root == NULL,
 1675             ("pmap_release: pmap has reserved page table page(s)"));
 1676 
 1677         m = PHYS_TO_VM_PAGE(pmap->pm_pml4[PML4PML4I] & PG_FRAME);
 1678 
 1679         pmap->pm_pml4[KPML4I] = 0;      /* KVA */
 1680         pmap->pm_pml4[DMPML4I] = 0;     /* Direct Map */
 1681         pmap->pm_pml4[PML4PML4I] = 0;   /* Recursive Mapping */
 1682 
 1683         m->wire_count--;
 1684         atomic_subtract_int(&cnt.v_wire_count, 1);
 1685         vm_page_free_zero(m);
 1686         PMAP_LOCK_DESTROY(pmap);
 1687 }
 1688 
 1689 static int
 1690 kvm_size(SYSCTL_HANDLER_ARGS)
 1691 {
 1692         unsigned long ksize = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
 1693 
 1694         return sysctl_handle_long(oidp, &ksize, 0, req);
 1695 }
 1696 SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG|CTLFLAG_RD, 
 1697     0, 0, kvm_size, "LU", "Size of KVM");
 1698 
 1699 static int
 1700 kvm_free(SYSCTL_HANDLER_ARGS)
 1701 {
 1702         unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end;
 1703 
 1704         return sysctl_handle_long(oidp, &kfree, 0, req);
 1705 }
 1706 SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD, 
 1707     0, 0, kvm_free, "LU", "Amount of KVM free");
 1708 
 1709 /*
 1710  * grow the number of kernel page table entries, if needed
 1711  */
 1712 void
 1713 pmap_growkernel(vm_offset_t addr)
 1714 {
 1715         vm_paddr_t paddr;
 1716         vm_page_t nkpg;
 1717         pd_entry_t *pde, newpdir;
 1718         pdp_entry_t *pdpe;
 1719 
 1720         mtx_assert(&kernel_map->system_mtx, MA_OWNED);
 1721 
 1722         /*
 1723          * Return if "addr" is within the range of kernel page table pages
 1724          * that were preallocated during pmap bootstrap.  Moreover, leave
 1725          * "kernel_vm_end" and the kernel page table as they were.
 1726          *
 1727          * The correctness of this action is based on the following
 1728          * argument: vm_map_findspace() allocates contiguous ranges of the
 1729          * kernel virtual address space.  It calls this function if a range
 1730          * ends after "kernel_vm_end".  If the kernel is mapped between
 1731          * "kernel_vm_end" and "addr", then the range cannot begin at
 1732          * "kernel_vm_end".  In fact, its beginning address cannot be less
 1733          * than the kernel.  Thus, there is no immediate need to allocate
 1734          * any new kernel page table pages between "kernel_vm_end" and
 1735          * "KERNBASE".
 1736          */
 1737         if (KERNBASE < addr && addr <= KERNBASE + NKPT * NBPDR)
 1738                 return;
 1739 
 1740         addr = roundup2(addr, NBPDR);
 1741         if (addr - 1 >= kernel_map->max_offset)
 1742                 addr = kernel_map->max_offset;
 1743         while (kernel_vm_end < addr) {
 1744                 pdpe = pmap_pdpe(kernel_pmap, kernel_vm_end);
 1745                 if ((*pdpe & PG_V) == 0) {
 1746                         /* We need a new PDP entry */
 1747                         nkpg = vm_page_alloc(NULL, kernel_vm_end >> PDPSHIFT,
 1748                             VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ |
 1749                             VM_ALLOC_WIRED | VM_ALLOC_ZERO);
 1750                         if (nkpg == NULL)
 1751                                 panic("pmap_growkernel: no memory to grow kernel");
 1752                         if ((nkpg->flags & PG_ZERO) == 0)
 1753                                 pmap_zero_page(nkpg);
 1754                         paddr = VM_PAGE_TO_PHYS(nkpg);
 1755                         *pdpe = (pdp_entry_t)
 1756                                 (paddr | PG_V | PG_RW | PG_A | PG_M);
 1757                         continue; /* try again */
 1758                 }
 1759                 pde = pmap_pdpe_to_pde(pdpe, kernel_vm_end);
 1760                 if ((*pde & PG_V) != 0) {
 1761                         kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK;
 1762                         if (kernel_vm_end - 1 >= kernel_map->max_offset) {
 1763                                 kernel_vm_end = kernel_map->max_offset;
 1764                                 break;                       
 1765                         }
 1766                         continue;
 1767                 }
 1768 
 1769                 nkpg = vm_page_alloc(NULL, pmap_pde_pindex(kernel_vm_end),
 1770                     VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
 1771                     VM_ALLOC_ZERO);
 1772                 if (nkpg == NULL)
 1773                         panic("pmap_growkernel: no memory to grow kernel");
 1774                 if ((nkpg->flags & PG_ZERO) == 0)
 1775                         pmap_zero_page(nkpg);
 1776                 paddr = VM_PAGE_TO_PHYS(nkpg);
 1777                 newpdir = (pd_entry_t) (paddr | PG_V | PG_RW | PG_A | PG_M);
 1778                 pde_store(pde, newpdir);
 1779 
 1780                 kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK;
 1781                 if (kernel_vm_end - 1 >= kernel_map->max_offset) {
 1782                         kernel_vm_end = kernel_map->max_offset;
 1783                         break;                       
 1784                 }
 1785         }
 1786 }
 1787 
 1788 
 1789 /***************************************************
 1790  * page management routines.
 1791  ***************************************************/
 1792 
 1793 CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE);
 1794 CTASSERT(_NPCM == 3);
 1795 CTASSERT(_NPCPV == 168);
 1796 
 1797 static __inline struct pv_chunk *
 1798 pv_to_chunk(pv_entry_t pv)
 1799 {
 1800 
 1801         return (struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK);
 1802 }
 1803 
 1804 #define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap)
 1805 
 1806 #define PC_FREE0        0xfffffffffffffffful
 1807 #define PC_FREE1        0xfffffffffffffffful
 1808 #define PC_FREE2        0x000000fffffffffful
 1809 
 1810 static uint64_t pc_freemask[_NPCM] = { PC_FREE0, PC_FREE1, PC_FREE2 };
 1811 
 1812 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0,
 1813         "Current number of pv entries");
 1814 
 1815 #ifdef PV_STATS
 1816 static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail;
 1817 
 1818 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0,
 1819         "Current number of pv entry chunks");
 1820 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0,
 1821         "Current number of pv entry chunks allocated");
 1822 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0,
 1823         "Current number of pv entry chunks frees");
 1824 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0,
 1825         "Number of times tried to get a chunk page but failed.");
 1826 
 1827 static long pv_entry_frees, pv_entry_allocs;
 1828 static int pv_entry_spare;
 1829 
 1830 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0,
 1831         "Current number of pv entry frees");
 1832 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0,
 1833         "Current number of pv entry allocs");
 1834 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0,
 1835         "Current number of spare pv entries");
 1836 
 1837 static int pmap_collect_inactive, pmap_collect_active;
 1838 
 1839 SYSCTL_INT(_vm_pmap, OID_AUTO, pmap_collect_inactive, CTLFLAG_RD, &pmap_collect_inactive, 0,
 1840         "Current number times pmap_collect called on inactive queue");
 1841 SYSCTL_INT(_vm_pmap, OID_AUTO, pmap_collect_active, CTLFLAG_RD, &pmap_collect_active, 0,
 1842         "Current number times pmap_collect called on active queue");
 1843 #endif
 1844 
 1845 /*
 1846  * We are in a serious low memory condition.  Resort to
 1847  * drastic measures to free some pages so we can allocate
 1848  * another pv entry chunk.  This is normally called to
 1849  * unmap inactive pages, and if necessary, active pages.
 1850  *
 1851  * We do not, however, unmap 2mpages because subsequent accesses will
 1852  * allocate per-page pv entries until repromotion occurs, thereby
 1853  * exacerbating the shortage of free pv entries.
 1854  */
 1855 static void
 1856 pmap_collect(pmap_t locked_pmap, struct vpgqueues *vpq)
 1857 {
 1858         struct md_page *pvh;
 1859         pd_entry_t *pde;
 1860         pmap_t pmap;
 1861         pt_entry_t *pte, tpte;
 1862         pv_entry_t next_pv, pv;
 1863         vm_offset_t va;
 1864         vm_page_t m, free;
 1865 
 1866         TAILQ_FOREACH(m, &vpq->pl, pageq) {
 1867                 if (m->hold_count || m->busy)
 1868                         continue;
 1869                 TAILQ_FOREACH_SAFE(pv, &m->md.pv_list, pv_list, next_pv) {
 1870                         va = pv->pv_va;
 1871                         pmap = PV_PMAP(pv);
 1872                         /* Avoid deadlock and lock recursion. */
 1873                         if (pmap > locked_pmap)
 1874                                 PMAP_LOCK(pmap);
 1875                         else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap))
 1876                                 continue;
 1877                         pmap->pm_stats.resident_count--;
 1878                         pde = pmap_pde(pmap, va);
 1879                         KASSERT((*pde & PG_PS) == 0, ("pmap_collect: found"
 1880                             " a 2mpage in page %p's pv list", m));
 1881                         pte = pmap_pde_to_pte(pde, va);
 1882                         tpte = pte_load_clear(pte);
 1883                         KASSERT((tpte & PG_W) == 0,
 1884                             ("pmap_collect: wired pte %#lx", tpte));
 1885                         if (tpte & PG_A)
 1886                                 vm_page_flag_set(m, PG_REFERENCED);
 1887                         if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
 1888                                 vm_page_dirty(m);
 1889                         free = NULL;
 1890                         pmap_unuse_pt(pmap, va, *pde, &free);
 1891                         pmap_invalidate_page(pmap, va);
 1892                         pmap_free_zero_pages(free);
 1893                         TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
 1894                         if (TAILQ_EMPTY(&m->md.pv_list)) {
 1895                                 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 1896                                 if (TAILQ_EMPTY(&pvh->pv_list))
 1897                                         vm_page_flag_clear(m, PG_WRITEABLE);
 1898                         }
 1899                         free_pv_entry(pmap, pv);
 1900                         if (pmap != locked_pmap)
 1901                                 PMAP_UNLOCK(pmap);
 1902                 }
 1903         }
 1904 }
 1905 
 1906 
 1907 /*
 1908  * free the pv_entry back to the free list
 1909  */
 1910 static void
 1911 free_pv_entry(pmap_t pmap, pv_entry_t pv)
 1912 {
 1913         vm_page_t m;
 1914         struct pv_chunk *pc;
 1915         int idx, field, bit;
 1916 
 1917         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1918         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 1919         PV_STAT(pv_entry_frees++);
 1920         PV_STAT(pv_entry_spare++);
 1921         pv_entry_count--;
 1922         pc = pv_to_chunk(pv);
 1923         idx = pv - &pc->pc_pventry[0];
 1924         field = idx / 64;
 1925         bit = idx % 64;
 1926         pc->pc_map[field] |= 1ul << bit;
 1927         /* move to head of list */
 1928         TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
 1929         if (pc->pc_map[0] != PC_FREE0 || pc->pc_map[1] != PC_FREE1 ||
 1930             pc->pc_map[2] != PC_FREE2) {
 1931                 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
 1932                 return;
 1933         }
 1934         PV_STAT(pv_entry_spare -= _NPCPV);
 1935         PV_STAT(pc_chunk_count--);
 1936         PV_STAT(pc_chunk_frees++);
 1937         /* entire chunk is free, return it */
 1938         m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
 1939         dump_drop_page(m->phys_addr);
 1940         vm_page_unwire(m, 0);
 1941         vm_page_free(m);
 1942 }
 1943 
 1944 /*
 1945  * get a new pv_entry, allocating a block from the system
 1946  * when needed.
 1947  */
 1948 static pv_entry_t
 1949 get_pv_entry(pmap_t pmap, int try)
 1950 {
 1951         static const struct timeval printinterval = { 60, 0 };
 1952         static struct timeval lastprint;
 1953         static vm_pindex_t colour;
 1954         struct vpgqueues *pq;
 1955         int bit, field;
 1956         pv_entry_t pv;
 1957         struct pv_chunk *pc;
 1958         vm_page_t m;
 1959 
 1960         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 1961         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1962         PV_STAT(pv_entry_allocs++);
 1963         pv_entry_count++;
 1964         if (pv_entry_count > pv_entry_high_water)
 1965                 if (ratecheck(&lastprint, &printinterval))
 1966                         printf("Approaching the limit on PV entries, consider "
 1967                             "increasing either the vm.pmap.shpgperproc or the "
 1968                             "vm.pmap.pv_entry_max sysctl.\n");
 1969         pq = NULL;
 1970 retry:
 1971         pc = TAILQ_FIRST(&pmap->pm_pvchunk);
 1972         if (pc != NULL) {
 1973                 for (field = 0; field < _NPCM; field++) {
 1974                         if (pc->pc_map[field]) {
 1975                                 bit = bsfq(pc->pc_map[field]);
 1976                                 break;
 1977                         }
 1978                 }
 1979                 if (field < _NPCM) {
 1980                         pv = &pc->pc_pventry[field * 64 + bit];
 1981                         pc->pc_map[field] &= ~(1ul << bit);
 1982                         /* If this was the last item, move it to tail */
 1983                         if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0 &&
 1984                             pc->pc_map[2] == 0) {
 1985                                 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
 1986                                 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
 1987                         }
 1988                         PV_STAT(pv_entry_spare--);
 1989                         return (pv);
 1990                 }
 1991         }
 1992         /* No free items, allocate another chunk */
 1993         m = vm_page_alloc(NULL, colour, (pq == &vm_page_queues[PQ_ACTIVE] ?
 1994             VM_ALLOC_SYSTEM : VM_ALLOC_NORMAL) | VM_ALLOC_NOOBJ |
 1995             VM_ALLOC_WIRED);
 1996         if (m == NULL) {
 1997                 if (try) {
 1998                         pv_entry_count--;
 1999                         PV_STAT(pc_chunk_tryfail++);
 2000                         return (NULL);
 2001                 }
 2002                 /*
 2003                  * Reclaim pv entries: At first, destroy mappings to inactive
 2004                  * pages.  After that, if a pv chunk entry is still needed,
 2005                  * destroy mappings to active pages.
 2006                  */
 2007                 if (pq == NULL) {
 2008                         PV_STAT(pmap_collect_inactive++);
 2009                         pq = &vm_page_queues[PQ_INACTIVE];
 2010                 } else if (pq == &vm_page_queues[PQ_INACTIVE]) {
 2011                         PV_STAT(pmap_collect_active++);
 2012                         pq = &vm_page_queues[PQ_ACTIVE];
 2013                 } else
 2014                         panic("get_pv_entry: increase vm.pmap.shpgperproc");
 2015                 pmap_collect(pmap, pq);
 2016                 goto retry;
 2017         }
 2018         PV_STAT(pc_chunk_count++);
 2019         PV_STAT(pc_chunk_allocs++);
 2020         colour++;
 2021         dump_add_page(m->phys_addr);
 2022         pc = (void *)PHYS_TO_DMAP(m->phys_addr);
 2023         pc->pc_pmap = pmap;
 2024         pc->pc_map[0] = PC_FREE0 & ~1ul;        /* preallocated bit 0 */
 2025         pc->pc_map[1] = PC_FREE1;
 2026         pc->pc_map[2] = PC_FREE2;
 2027         pv = &pc->pc_pventry[0];
 2028         TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
 2029         PV_STAT(pv_entry_spare += _NPCPV - 1);
 2030         return (pv);
 2031 }
 2032 
 2033 /*
 2034  * First find and then remove the pv entry for the specified pmap and virtual
 2035  * address from the specified pv list.  Returns the pv entry if found and NULL
 2036  * otherwise.  This operation can be performed on pv lists for either 4KB or
 2037  * 2MB page mappings.
 2038  */
 2039 static __inline pv_entry_t
 2040 pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
 2041 {
 2042         pv_entry_t pv;
 2043 
 2044         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2045         TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
 2046                 if (pmap == PV_PMAP(pv) && va == pv->pv_va) {
 2047                         TAILQ_REMOVE(&pvh->pv_list, pv, pv_list);
 2048                         break;
 2049                 }
 2050         }
 2051         return (pv);
 2052 }
 2053 
 2054 /*
 2055  * After demotion from a 2MB page mapping to 512 4KB page mappings,
 2056  * destroy the pv entry for the 2MB page mapping and reinstantiate the pv
 2057  * entries for each of the 4KB page mappings.
 2058  */
 2059 static void
 2060 pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
 2061 {
 2062         struct md_page *pvh;
 2063         pv_entry_t pv;
 2064         vm_offset_t va_last;
 2065         vm_page_t m;
 2066 
 2067         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2068         KASSERT((pa & PDRMASK) == 0,
 2069             ("pmap_pv_demote_pde: pa is not 2mpage aligned"));
 2070 
 2071         /*
 2072          * Transfer the 2mpage's pv entry for this mapping to the first
 2073          * page's pv list.
 2074          */
 2075         pvh = pa_to_pvh(pa);
 2076         va = trunc_2mpage(va);
 2077         pv = pmap_pvh_remove(pvh, pmap, va);
 2078         KASSERT(pv != NULL, ("pmap_pv_demote_pde: pv not found"));
 2079         m = PHYS_TO_VM_PAGE(pa);
 2080         TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
 2081         /* Instantiate the remaining NPTEPG - 1 pv entries. */
 2082         va_last = va + NBPDR - PAGE_SIZE;
 2083         do {
 2084                 m++;
 2085                 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
 2086                     ("pmap_pv_demote_pde: page %p is not managed", m));
 2087                 va += PAGE_SIZE;
 2088                 pmap_insert_entry(pmap, va, m);
 2089         } while (va < va_last);
 2090 }
 2091 
 2092 /*
 2093  * After promotion from 512 4KB page mappings to a single 2MB page mapping,
 2094  * replace the many pv entries for the 4KB page mappings by a single pv entry
 2095  * for the 2MB page mapping.
 2096  */
 2097 static void
 2098 pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
 2099 {
 2100         struct md_page *pvh;
 2101         pv_entry_t pv;
 2102         vm_offset_t va_last;
 2103         vm_page_t m;
 2104 
 2105         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2106         KASSERT((pa & PDRMASK) == 0,
 2107             ("pmap_pv_promote_pde: pa is not 2mpage aligned"));
 2108 
 2109         /*
 2110          * Transfer the first page's pv entry for this mapping to the
 2111          * 2mpage's pv list.  Aside from avoiding the cost of a call
 2112          * to get_pv_entry(), a transfer avoids the possibility that
 2113          * get_pv_entry() calls pmap_collect() and that pmap_collect()
 2114          * removes one of the mappings that is being promoted.
 2115          */
 2116         m = PHYS_TO_VM_PAGE(pa);
 2117         va = trunc_2mpage(va);
 2118         pv = pmap_pvh_remove(&m->md, pmap, va);
 2119         KASSERT(pv != NULL, ("pmap_pv_promote_pde: pv not found"));
 2120         pvh = pa_to_pvh(pa);
 2121         TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_list);
 2122         /* Free the remaining NPTEPG - 1 pv entries. */
 2123         va_last = va + NBPDR - PAGE_SIZE;
 2124         do {
 2125                 m++;
 2126                 va += PAGE_SIZE;
 2127                 pmap_pvh_free(&m->md, pmap, va);
 2128         } while (va < va_last);
 2129 }
 2130 
 2131 /*
 2132  * First find and then destroy the pv entry for the specified pmap and virtual
 2133  * address.  This operation can be performed on pv lists for either 4KB or 2MB
 2134  * page mappings.
 2135  */
 2136 static void
 2137 pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
 2138 {
 2139         pv_entry_t pv;
 2140 
 2141         pv = pmap_pvh_remove(pvh, pmap, va);
 2142         KASSERT(pv != NULL, ("pmap_pvh_free: pv not found"));
 2143         free_pv_entry(pmap, pv);
 2144 }
 2145 
 2146 static void
 2147 pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
 2148 {
 2149         struct md_page *pvh;
 2150 
 2151         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2152         pmap_pvh_free(&m->md, pmap, va);
 2153         if (TAILQ_EMPTY(&m->md.pv_list)) {
 2154                 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 2155                 if (TAILQ_EMPTY(&pvh->pv_list))
 2156                         vm_page_flag_clear(m, PG_WRITEABLE);
 2157         }
 2158 }
 2159 
 2160 /*
 2161  * Create a pv entry for page at pa for
 2162  * (pmap, va).
 2163  */
 2164 static void
 2165 pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
 2166 {
 2167         pv_entry_t pv;
 2168 
 2169         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2170         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2171         pv = get_pv_entry(pmap, FALSE);
 2172         pv->pv_va = va;
 2173         TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
 2174 }
 2175 
 2176 /*
 2177  * Conditionally create a pv entry.
 2178  */
 2179 static boolean_t
 2180 pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
 2181 {
 2182         pv_entry_t pv;
 2183 
 2184         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2185         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2186         if (pv_entry_count < pv_entry_high_water && 
 2187             (pv = get_pv_entry(pmap, TRUE)) != NULL) {
 2188                 pv->pv_va = va;
 2189                 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
 2190                 return (TRUE);
 2191         } else
 2192                 return (FALSE);
 2193 }
 2194 
 2195 /*
 2196  * Create the pv entry for a 2MB page mapping.
 2197  */
 2198 static boolean_t
 2199 pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
 2200 {
 2201         struct md_page *pvh;
 2202         pv_entry_t pv;
 2203 
 2204         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2205         if (pv_entry_count < pv_entry_high_water && 
 2206             (pv = get_pv_entry(pmap, TRUE)) != NULL) {
 2207                 pv->pv_va = va;
 2208                 pvh = pa_to_pvh(pa);
 2209                 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_list);
 2210                 return (TRUE);
 2211         } else
 2212                 return (FALSE);
 2213 }
 2214 
 2215 /*
 2216  * Fills a page table page with mappings to consecutive physical pages.
 2217  */
 2218 static void
 2219 pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte)
 2220 {
 2221         pt_entry_t *pte;
 2222 
 2223         for (pte = firstpte; pte < firstpte + NPTEPG; pte++) {
 2224                 *pte = newpte;
 2225                 newpte += PAGE_SIZE;
 2226         }
 2227 }
 2228 
 2229 /*
 2230  * Tries to demote a 2MB page mapping.  If demotion fails, the 2MB page
 2231  * mapping is invalidated.
 2232  */
 2233 static boolean_t
 2234 pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
 2235 {
 2236         pd_entry_t newpde, oldpde;
 2237         pt_entry_t *firstpte, newpte;
 2238         vm_paddr_t mptepa;
 2239         vm_page_t free, mpte;
 2240 
 2241         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2242         oldpde = *pde;
 2243         KASSERT((oldpde & (PG_PS | PG_V)) == (PG_PS | PG_V),
 2244             ("pmap_demote_pde: oldpde is missing PG_PS and/or PG_V"));
 2245         mpte = pmap_lookup_pt_page(pmap, va);
 2246         if (mpte != NULL)
 2247                 pmap_remove_pt_page(pmap, mpte);
 2248         else {
 2249                 KASSERT((oldpde & PG_W) == 0,
 2250                     ("pmap_demote_pde: page table page for a wired mapping"
 2251                     " is missing"));
 2252 
 2253                 /*
 2254                  * Invalidate the 2MB page mapping and return "failure" if the
 2255                  * mapping was never accessed or the allocation of the new
 2256                  * page table page fails.  If the 2MB page mapping belongs to
 2257                  * the direct map region of the kernel's address space, then
 2258                  * the page allocation request specifies the highest possible
 2259                  * priority (VM_ALLOC_INTERRUPT).  Otherwise, the priority is
 2260                  * normal.  Page table pages are preallocated for every other
 2261                  * part of the kernel address space, so the direct map region
 2262                  * is the only part of the kernel address space that must be
 2263                  * handled here.
 2264                  */
 2265                 if ((oldpde & PG_A) == 0 || (mpte = vm_page_alloc(NULL,
 2266                     pmap_pde_pindex(va), (va >= DMAP_MIN_ADDRESS && va <
 2267                     DMAP_MAX_ADDRESS ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL) |
 2268                     VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
 2269                         free = NULL;
 2270                         pmap_remove_pde(pmap, pde, trunc_2mpage(va), &free);
 2271                         pmap_invalidate_page(pmap, trunc_2mpage(va));
 2272                         pmap_free_zero_pages(free);
 2273                         CTR2(KTR_PMAP, "pmap_demote_pde: failure for va %#lx"
 2274                             " in pmap %p", va, pmap);
 2275                         return (FALSE);
 2276                 }
 2277                 if (va < VM_MAXUSER_ADDRESS)
 2278                         pmap->pm_stats.resident_count++;
 2279         }
 2280         mptepa = VM_PAGE_TO_PHYS(mpte);
 2281         firstpte = (pt_entry_t *)PHYS_TO_DMAP(mptepa);
 2282         newpde = mptepa | PG_M | PG_A | (oldpde & PG_U) | PG_RW | PG_V;
 2283         KASSERT((oldpde & PG_A) != 0,
 2284             ("pmap_demote_pde: oldpde is missing PG_A"));
 2285         KASSERT((oldpde & (PG_M | PG_RW)) != PG_RW,
 2286             ("pmap_demote_pde: oldpde is missing PG_M"));
 2287         newpte = oldpde & ~PG_PS;
 2288         if ((newpte & PG_PDE_PAT) != 0)
 2289                 newpte ^= PG_PDE_PAT | PG_PTE_PAT;
 2290 
 2291         /*
 2292          * If the page table page is new, initialize it.
 2293          */
 2294         if (mpte->wire_count == 1) {
 2295                 mpte->wire_count = NPTEPG;
 2296                 pmap_fill_ptp(firstpte, newpte);
 2297         }
 2298         KASSERT((*firstpte & PG_FRAME) == (newpte & PG_FRAME),
 2299             ("pmap_demote_pde: firstpte and newpte map different physical"
 2300             " addresses"));
 2301 
 2302         /*
 2303          * If the mapping has changed attributes, update the page table
 2304          * entries.
 2305          */
 2306         if ((*firstpte & PG_PTE_PROMOTE) != (newpte & PG_PTE_PROMOTE))
 2307                 pmap_fill_ptp(firstpte, newpte);
 2308 
 2309         /*
 2310          * Demote the mapping.  This pmap is locked.  The old PDE has
 2311          * PG_A set.  If the old PDE has PG_RW set, it also has PG_M
 2312          * set.  Thus, there is no danger of a race with another
 2313          * processor changing the setting of PG_A and/or PG_M between
 2314          * the read above and the store below. 
 2315          */
 2316         pde_store(pde, newpde); 
 2317 
 2318         /*
 2319          * Invalidate a stale recursive mapping of the page table page.
 2320          */
 2321         if (va >= VM_MAXUSER_ADDRESS)
 2322                 pmap_invalidate_page(pmap, (vm_offset_t)vtopte(va));
 2323 
 2324         /*
 2325          * Demote the pv entry.  This depends on the earlier demotion
 2326          * of the mapping.  Specifically, the (re)creation of a per-
 2327          * page pv entry might trigger the execution of pmap_collect(),
 2328          * which might reclaim a newly (re)created per-page pv entry
 2329          * and destroy the associated mapping.  In order to destroy
 2330          * the mapping, the PDE must have already changed from mapping
 2331          * the 2mpage to referencing the page table page.
 2332          */
 2333         if ((oldpde & PG_MANAGED) != 0)
 2334                 pmap_pv_demote_pde(pmap, va, oldpde & PG_PS_FRAME);
 2335 
 2336         pmap_pde_demotions++;
 2337         CTR2(KTR_PMAP, "pmap_demote_pde: success for va %#lx"
 2338             " in pmap %p", va, pmap);
 2339         return (TRUE);
 2340 }
 2341 
 2342 /*
 2343  * pmap_remove_pde: do the things to unmap a superpage in a process
 2344  */
 2345 static int
 2346 pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
 2347     vm_page_t *free)
 2348 {
 2349         struct md_page *pvh;
 2350         pd_entry_t oldpde;
 2351         vm_offset_t eva, va;
 2352         vm_page_t m, mpte;
 2353 
 2354         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2355         KASSERT((sva & PDRMASK) == 0,
 2356             ("pmap_remove_pde: sva is not 2mpage aligned"));
 2357         oldpde = pte_load_clear(pdq);
 2358         if (oldpde & PG_W)
 2359                 pmap->pm_stats.wired_count -= NBPDR / PAGE_SIZE;
 2360 
 2361         /*
 2362          * Machines that don't support invlpg, also don't support
 2363          * PG_G.
 2364          */
 2365         if (oldpde & PG_G)
 2366                 pmap_invalidate_page(kernel_pmap, sva);
 2367         pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
 2368         if (oldpde & PG_MANAGED) {
 2369                 pvh = pa_to_pvh(oldpde & PG_PS_FRAME);
 2370                 pmap_pvh_free(pvh, pmap, sva);
 2371                 eva = sva + NBPDR;
 2372                 for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
 2373                     va < eva; va += PAGE_SIZE, m++) {
 2374                         if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW))
 2375                                 vm_page_dirty(m);
 2376                         if (oldpde & PG_A)
 2377                                 vm_page_flag_set(m, PG_REFERENCED);
 2378                         if (TAILQ_EMPTY(&m->md.pv_list) &&
 2379                             TAILQ_EMPTY(&pvh->pv_list))
 2380                                 vm_page_flag_clear(m, PG_WRITEABLE);
 2381                 }
 2382         }
 2383         if (pmap == kernel_pmap) {
 2384                 if (!pmap_demote_pde(pmap, pdq, sva))
 2385                         panic("pmap_remove_pde: failed demotion");
 2386         } else {
 2387                 mpte = pmap_lookup_pt_page(pmap, sva);
 2388                 if (mpte != NULL) {
 2389                         pmap_remove_pt_page(pmap, mpte);
 2390                         pmap->pm_stats.resident_count--;
 2391                         KASSERT(mpte->wire_count == NPTEPG,
 2392                             ("pmap_remove_pde: pte page wire count error"));
 2393                         mpte->wire_count = 0;
 2394                         pmap_add_delayed_free_list(mpte, free, FALSE);
 2395                         atomic_subtract_int(&cnt.v_wire_count, 1);
 2396                 }
 2397         }
 2398         return (pmap_unuse_pt(pmap, sva, *pmap_pdpe(pmap, sva), free));
 2399 }
 2400 
 2401 /*
 2402  * pmap_remove_pte: do the things to unmap a page in a process
 2403  */
 2404 static int
 2405 pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va, 
 2406     pd_entry_t ptepde, vm_page_t *free)
 2407 {
 2408         pt_entry_t oldpte;
 2409         vm_page_t m;
 2410 
 2411         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2412         oldpte = pte_load_clear(ptq);
 2413         if (oldpte & PG_W)
 2414                 pmap->pm_stats.wired_count -= 1;
 2415         /*
 2416          * Machines that don't support invlpg, also don't support
 2417          * PG_G.
 2418          */
 2419         if (oldpte & PG_G)
 2420                 pmap_invalidate_page(kernel_pmap, va);
 2421         pmap->pm_stats.resident_count -= 1;
 2422         if (oldpte & PG_MANAGED) {
 2423                 m = PHYS_TO_VM_PAGE(oldpte & PG_FRAME);
 2424                 if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
 2425                         vm_page_dirty(m);
 2426                 if (oldpte & PG_A)
 2427                         vm_page_flag_set(m, PG_REFERENCED);
 2428                 pmap_remove_entry(pmap, m, va);
 2429         }
 2430         return (pmap_unuse_pt(pmap, va, ptepde, free));
 2431 }
 2432 
 2433 /*
 2434  * Remove a single page from a process address space
 2435  */
 2436 static void
 2437 pmap_remove_page(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, vm_page_t *free)
 2438 {
 2439         pt_entry_t *pte;
 2440 
 2441         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2442         if ((*pde & PG_V) == 0)
 2443                 return;
 2444         pte = pmap_pde_to_pte(pde, va);
 2445         if ((*pte & PG_V) == 0)
 2446                 return;
 2447         pmap_remove_pte(pmap, pte, va, *pde, free);
 2448         pmap_invalidate_page(pmap, va);
 2449 }
 2450 
 2451 /*
 2452  *      Remove the given range of addresses from the specified map.
 2453  *
 2454  *      It is assumed that the start and end are properly
 2455  *      rounded to the page size.
 2456  */
 2457 void
 2458 pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
 2459 {
 2460         vm_offset_t va_next;
 2461         pml4_entry_t *pml4e;
 2462         pdp_entry_t *pdpe;
 2463         pd_entry_t ptpaddr, *pde;
 2464         pt_entry_t *pte;
 2465         vm_page_t free = NULL;
 2466         int anyvalid;
 2467 
 2468         /*
 2469          * Perform an unsynchronized read.  This is, however, safe.
 2470          */
 2471         if (pmap->pm_stats.resident_count == 0)
 2472                 return;
 2473 
 2474         anyvalid = 0;
 2475 
 2476         vm_page_lock_queues();
 2477         PMAP_LOCK(pmap);
 2478 
 2479         /*
 2480          * special handling of removing one page.  a very
 2481          * common operation and easy to short circuit some
 2482          * code.
 2483          */
 2484         if (sva + PAGE_SIZE == eva) {
 2485                 pde = pmap_pde(pmap, sva);
 2486                 if (pde && (*pde & PG_PS) == 0) {
 2487                         pmap_remove_page(pmap, sva, pde, &free);
 2488                         goto out;
 2489                 }
 2490         }
 2491 
 2492         for (; sva < eva; sva = va_next) {
 2493 
 2494                 if (pmap->pm_stats.resident_count == 0)
 2495                         break;
 2496 
 2497                 pml4e = pmap_pml4e(pmap, sva);
 2498                 if ((*pml4e & PG_V) == 0) {
 2499                         va_next = (sva + NBPML4) & ~PML4MASK;
 2500                         if (va_next < sva)
 2501                                 va_next = eva;
 2502                         continue;
 2503                 }
 2504 
 2505                 pdpe = pmap_pml4e_to_pdpe(pml4e, sva);
 2506                 if ((*pdpe & PG_V) == 0) {
 2507                         va_next = (sva + NBPDP) & ~PDPMASK;
 2508                         if (va_next < sva)
 2509                                 va_next = eva;
 2510                         continue;
 2511                 }
 2512 
 2513                 /*
 2514                  * Calculate index for next page table.
 2515                  */
 2516                 va_next = (sva + NBPDR) & ~PDRMASK;
 2517                 if (va_next < sva)
 2518                         va_next = eva;
 2519 
 2520                 pde = pmap_pdpe_to_pde(pdpe, sva);
 2521                 ptpaddr = *pde;
 2522 
 2523                 /*
 2524                  * Weed out invalid mappings.
 2525                  */
 2526                 if (ptpaddr == 0)
 2527                         continue;
 2528 
 2529                 /*
 2530                  * Check for large page.
 2531                  */
 2532                 if ((ptpaddr & PG_PS) != 0) {
 2533                         /*
 2534                          * Are we removing the entire large page?  If not,
 2535                          * demote the mapping and fall through.
 2536                          */
 2537                         if (sva + NBPDR == va_next && eva >= va_next) {
 2538                                 /*
 2539                                  * The TLB entry for a PG_G mapping is
 2540                                  * invalidated by pmap_remove_pde().
 2541                                  */
 2542                                 if ((ptpaddr & PG_G) == 0)
 2543                                         anyvalid = 1;
 2544                                 pmap_remove_pde(pmap, pde, sva, &free);
 2545                                 continue;
 2546                         } else if (!pmap_demote_pde(pmap, pde, sva)) {
 2547                                 /* The large page mapping was destroyed. */
 2548                                 continue;
 2549                         } else
 2550                                 ptpaddr = *pde;
 2551                 }
 2552 
 2553                 /*
 2554                  * Limit our scan to either the end of the va represented
 2555                  * by the current page table page, or to the end of the
 2556                  * range being removed.
 2557                  */
 2558                 if (va_next > eva)
 2559                         va_next = eva;
 2560 
 2561                 for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
 2562                     sva += PAGE_SIZE) {
 2563                         if (*pte == 0)
 2564                                 continue;
 2565 
 2566                         /*
 2567                          * The TLB entry for a PG_G mapping is invalidated
 2568                          * by pmap_remove_pte().
 2569                          */
 2570                         if ((*pte & PG_G) == 0)
 2571                                 anyvalid = 1;
 2572                         if (pmap_remove_pte(pmap, pte, sva, ptpaddr, &free))
 2573                                 break;
 2574                 }
 2575         }
 2576 out:
 2577         if (anyvalid)
 2578                 pmap_invalidate_all(pmap);
 2579         vm_page_unlock_queues();        
 2580         PMAP_UNLOCK(pmap);
 2581         pmap_free_zero_pages(free);
 2582 }
 2583 
 2584 /*
 2585  *      Routine:        pmap_remove_all
 2586  *      Function:
 2587  *              Removes this physical page from
 2588  *              all physical maps in which it resides.
 2589  *              Reflects back modify bits to the pager.
 2590  *
 2591  *      Notes:
 2592  *              Original versions of this routine were very
 2593  *              inefficient because they iteratively called
 2594  *              pmap_remove (slow...)
 2595  */
 2596 
 2597 void
 2598 pmap_remove_all(vm_page_t m)
 2599 {
 2600         struct md_page *pvh;
 2601         pv_entry_t pv;
 2602         pmap_t pmap;
 2603         pt_entry_t *pte, tpte;
 2604         pd_entry_t *pde;
 2605         vm_offset_t va;
 2606         vm_page_t free;
 2607 
 2608         KASSERT((m->flags & PG_FICTITIOUS) == 0,
 2609             ("pmap_remove_all: page %p is fictitious", m));
 2610         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2611         pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 2612         while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) {
 2613                 va = pv->pv_va;
 2614                 pmap = PV_PMAP(pv);
 2615                 PMAP_LOCK(pmap);
 2616                 pde = pmap_pde(pmap, va);
 2617                 (void)pmap_demote_pde(pmap, pde, va);
 2618                 PMAP_UNLOCK(pmap);
 2619         }
 2620         while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
 2621                 pmap = PV_PMAP(pv);
 2622                 PMAP_LOCK(pmap);
 2623                 pmap->pm_stats.resident_count--;
 2624                 pde = pmap_pde(pmap, pv->pv_va);
 2625                 KASSERT((*pde & PG_PS) == 0, ("pmap_remove_all: found"
 2626                     " a 2mpage in page %p's pv list", m));
 2627                 pte = pmap_pde_to_pte(pde, pv->pv_va);
 2628                 tpte = pte_load_clear(pte);
 2629                 if (tpte & PG_W)
 2630                         pmap->pm_stats.wired_count--;
 2631                 if (tpte & PG_A)
 2632                         vm_page_flag_set(m, PG_REFERENCED);
 2633 
 2634                 /*
 2635                  * Update the vm_page_t clean and reference bits.
 2636                  */
 2637                 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
 2638                         vm_page_dirty(m);
 2639                 free = NULL;
 2640                 pmap_unuse_pt(pmap, pv->pv_va, *pde, &free);
 2641                 pmap_invalidate_page(pmap, pv->pv_va);
 2642                 pmap_free_zero_pages(free);
 2643                 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
 2644                 free_pv_entry(pmap, pv);
 2645                 PMAP_UNLOCK(pmap);
 2646         }
 2647         vm_page_flag_clear(m, PG_WRITEABLE);
 2648 }
 2649 
 2650 /*
 2651  * pmap_protect_pde: do the things to protect a 2mpage in a process
 2652  */
 2653 static boolean_t
 2654 pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot)
 2655 {
 2656         pd_entry_t newpde, oldpde;
 2657         vm_offset_t eva, va;
 2658         vm_page_t m;
 2659         boolean_t anychanged;
 2660 
 2661         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2662         KASSERT((sva & PDRMASK) == 0,
 2663             ("pmap_protect_pde: sva is not 2mpage aligned"));
 2664         anychanged = FALSE;
 2665 retry:
 2666         oldpde = newpde = *pde;
 2667         if (oldpde & PG_MANAGED) {
 2668                 eva = sva + NBPDR;
 2669                 for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
 2670                     va < eva; va += PAGE_SIZE, m++) {
 2671                         /*
 2672                          * In contrast to the analogous operation on a 4KB page
 2673                          * mapping, the mapping's PG_A flag is not cleared and
 2674                          * the page's PG_REFERENCED flag is not set.  The
 2675                          * reason is that pmap_demote_pde() expects that a 2MB
 2676                          * page mapping with a stored page table page has PG_A
 2677                          * set.
 2678                          */
 2679                         if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW))
 2680                                 vm_page_dirty(m);
 2681                 }
 2682         }
 2683         if ((prot & VM_PROT_WRITE) == 0)
 2684                 newpde &= ~(PG_RW | PG_M);
 2685         if ((prot & VM_PROT_EXECUTE) == 0)
 2686                 newpde |= pg_nx;
 2687         if (newpde != oldpde) {
 2688                 if (!atomic_cmpset_long(pde, oldpde, newpde))
 2689                         goto retry;
 2690                 if (oldpde & PG_G)
 2691                         pmap_invalidate_page(pmap, sva);
 2692                 else
 2693                         anychanged = TRUE;
 2694         }
 2695         return (anychanged);
 2696 }
 2697 
 2698 /*
 2699  *      Set the physical protection on the
 2700  *      specified range of this map as requested.
 2701  */
 2702 void
 2703 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
 2704 {
 2705         vm_offset_t va_next;
 2706         pml4_entry_t *pml4e;
 2707         pdp_entry_t *pdpe;
 2708         pd_entry_t ptpaddr, *pde;
 2709         pt_entry_t *pte;
 2710         int anychanged;
 2711 
 2712         if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
 2713                 pmap_remove(pmap, sva, eva);
 2714                 return;
 2715         }
 2716 
 2717         if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) ==
 2718             (VM_PROT_WRITE|VM_PROT_EXECUTE))
 2719                 return;
 2720 
 2721         anychanged = 0;
 2722 
 2723         vm_page_lock_queues();
 2724         PMAP_LOCK(pmap);
 2725         for (; sva < eva; sva = va_next) {
 2726 
 2727                 pml4e = pmap_pml4e(pmap, sva);
 2728                 if ((*pml4e & PG_V) == 0) {
 2729                         va_next = (sva + NBPML4) & ~PML4MASK;
 2730                         if (va_next < sva)
 2731                                 va_next = eva;
 2732                         continue;
 2733                 }
 2734 
 2735                 pdpe = pmap_pml4e_to_pdpe(pml4e, sva);
 2736                 if ((*pdpe & PG_V) == 0) {
 2737                         va_next = (sva + NBPDP) & ~PDPMASK;
 2738                         if (va_next < sva)
 2739                                 va_next = eva;
 2740                         continue;
 2741                 }
 2742 
 2743                 va_next = (sva + NBPDR) & ~PDRMASK;
 2744                 if (va_next < sva)
 2745                         va_next = eva;
 2746 
 2747                 pde = pmap_pdpe_to_pde(pdpe, sva);
 2748                 ptpaddr = *pde;
 2749 
 2750                 /*
 2751                  * Weed out invalid mappings.
 2752                  */
 2753                 if (ptpaddr == 0)
 2754                         continue;
 2755 
 2756                 /*
 2757                  * Check for large page.
 2758                  */
 2759                 if ((ptpaddr & PG_PS) != 0) {
 2760                         /*
 2761                          * Are we protecting the entire large page?  If not,
 2762                          * demote the mapping and fall through.
 2763                          */
 2764                         if (sva + NBPDR == va_next && eva >= va_next) {
 2765                                 /*
 2766                                  * The TLB entry for a PG_G mapping is
 2767                                  * invalidated by pmap_protect_pde().
 2768                                  */
 2769                                 if (pmap_protect_pde(pmap, pde, sva, prot))
 2770                                         anychanged = 1;
 2771                                 continue;
 2772                         } else if (!pmap_demote_pde(pmap, pde, sva)) {
 2773                                 /* The large page mapping was destroyed. */
 2774                                 continue;
 2775                         }
 2776                 }
 2777 
 2778                 if (va_next > eva)
 2779                         va_next = eva;
 2780 
 2781                 for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
 2782                     sva += PAGE_SIZE) {
 2783                         pt_entry_t obits, pbits;
 2784                         vm_page_t m;
 2785 
 2786 retry:
 2787                         obits = pbits = *pte;
 2788                         if ((pbits & PG_V) == 0)
 2789                                 continue;
 2790                         if (pbits & PG_MANAGED) {
 2791                                 m = NULL;
 2792                                 if (pbits & PG_A) {
 2793                                         m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
 2794                                         vm_page_flag_set(m, PG_REFERENCED);
 2795                                         pbits &= ~PG_A;
 2796                                 }
 2797                                 if ((pbits & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
 2798                                         if (m == NULL)
 2799                                                 m = PHYS_TO_VM_PAGE(pbits &
 2800                                                     PG_FRAME);
 2801                                         vm_page_dirty(m);
 2802                                 }
 2803                         }
 2804 
 2805                         if ((prot & VM_PROT_WRITE) == 0)
 2806                                 pbits &= ~(PG_RW | PG_M);
 2807                         if ((prot & VM_PROT_EXECUTE) == 0)
 2808                                 pbits |= pg_nx;
 2809 
 2810                         if (pbits != obits) {
 2811                                 if (!atomic_cmpset_long(pte, obits, pbits))
 2812                                         goto retry;
 2813                                 if (obits & PG_G)
 2814                                         pmap_invalidate_page(pmap, sva);
 2815                                 else
 2816                                         anychanged = 1;
 2817                         }
 2818                 }
 2819         }
 2820         if (anychanged)
 2821                 pmap_invalidate_all(pmap);
 2822         vm_page_unlock_queues();
 2823         PMAP_UNLOCK(pmap);
 2824 }
 2825 
 2826 /*
 2827  * Tries to promote the 512, contiguous 4KB page mappings that are within a
 2828  * single page table page (PTP) to a single 2MB page mapping.  For promotion
 2829  * to occur, two conditions must be met: (1) the 4KB page mappings must map
 2830  * aligned, contiguous physical memory and (2) the 4KB page mappings must have
 2831  * identical characteristics. 
 2832  */
 2833 static void
 2834 pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
 2835 {
 2836         pd_entry_t newpde;
 2837         pt_entry_t *firstpte, oldpte, pa, *pte;
 2838         vm_offset_t oldpteva;
 2839         vm_page_t mpte;
 2840 
 2841         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2842 
 2843         /*
 2844          * Examine the first PTE in the specified PTP.  Abort if this PTE is
 2845          * either invalid, unused, or does not map the first 4KB physical page
 2846          * within a 2MB page. 
 2847          */
 2848         firstpte = (pt_entry_t *)PHYS_TO_DMAP(*pde & PG_FRAME);
 2849 setpde:
 2850         newpde = *firstpte;
 2851         if ((newpde & ((PG_FRAME & PDRMASK) | PG_A | PG_V)) != (PG_A | PG_V)) {
 2852                 pmap_pde_p_failures++;
 2853                 CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#lx"
 2854                     " in pmap %p", va, pmap);
 2855                 return;
 2856         }
 2857         if ((newpde & (PG_M | PG_RW)) == PG_RW) {
 2858                 /*
 2859                  * When PG_M is already clear, PG_RW can be cleared without
 2860                  * a TLB invalidation.
 2861                  */
 2862                 if (!atomic_cmpset_long(firstpte, newpde, newpde & ~PG_RW))
 2863                         goto setpde;
 2864                 newpde &= ~PG_RW;
 2865         }
 2866 
 2867         /*
 2868          * Examine each of the other PTEs in the specified PTP.  Abort if this
 2869          * PTE maps an unexpected 4KB physical page or does not have identical
 2870          * characteristics to the first PTE.
 2871          */
 2872         pa = (newpde & (PG_PS_FRAME | PG_A | PG_V)) + NBPDR - PAGE_SIZE;
 2873         for (pte = firstpte + NPTEPG - 1; pte > firstpte; pte--) {
 2874 setpte:
 2875                 oldpte = *pte;
 2876                 if ((oldpte & (PG_FRAME | PG_A | PG_V)) != pa) {
 2877                         pmap_pde_p_failures++;
 2878                         CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#lx"
 2879                             " in pmap %p", va, pmap);
 2880                         return;
 2881                 }
 2882                 if ((oldpte & (PG_M | PG_RW)) == PG_RW) {
 2883                         /*
 2884                          * When PG_M is already clear, PG_RW can be cleared
 2885                          * without a TLB invalidation.
 2886                          */
 2887                         if (!atomic_cmpset_long(pte, oldpte, oldpte & ~PG_RW))
 2888                                 goto setpte;
 2889                         oldpte &= ~PG_RW;
 2890                         oldpteva = (oldpte & PG_FRAME & PDRMASK) |
 2891                             (va & ~PDRMASK);
 2892                         CTR2(KTR_PMAP, "pmap_promote_pde: protect for va %#lx"
 2893                             " in pmap %p", oldpteva, pmap);
 2894                 }
 2895                 if ((oldpte & PG_PTE_PROMOTE) != (newpde & PG_PTE_PROMOTE)) {
 2896                         pmap_pde_p_failures++;
 2897                         CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#lx"
 2898                             " in pmap %p", va, pmap);
 2899                         return;
 2900                 }
 2901                 pa -= PAGE_SIZE;
 2902         }
 2903 
 2904         /*
 2905          * Save the page table page in its current state until the PDE
 2906          * mapping the superpage is demoted by pmap_demote_pde() or
 2907          * destroyed by pmap_remove_pde(). 
 2908          */
 2909         mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
 2910         KASSERT(mpte >= vm_page_array &&
 2911             mpte < &vm_page_array[vm_page_array_size],
 2912             ("pmap_promote_pde: page table page is out of range"));
 2913         KASSERT(mpte->pindex == pmap_pde_pindex(va),
 2914             ("pmap_promote_pde: page table page's pindex is wrong"));
 2915         pmap_insert_pt_page(pmap, mpte);
 2916 
 2917         /*
 2918          * Promote the pv entries.
 2919          */
 2920         if ((newpde & PG_MANAGED) != 0)
 2921                 pmap_pv_promote_pde(pmap, va, newpde & PG_PS_FRAME);
 2922 
 2923         /*
 2924          * Propagate the PAT index to its proper position.
 2925          */
 2926         if ((newpde & PG_PTE_PAT) != 0)
 2927                 newpde ^= PG_PDE_PAT | PG_PTE_PAT;
 2928 
 2929         /*
 2930          * Map the superpage.
 2931          */
 2932         pde_store(pde, PG_PS | newpde);
 2933 
 2934         pmap_pde_promotions++;
 2935         CTR2(KTR_PMAP, "pmap_promote_pde: success for va %#lx"
 2936             " in pmap %p", va, pmap);
 2937 }
 2938 
 2939 /*
 2940  *      Insert the given physical page (p) at
 2941  *      the specified virtual address (v) in the
 2942  *      target physical map with the protection requested.
 2943  *
 2944  *      If specified, the page will be wired down, meaning
 2945  *      that the related pte can not be reclaimed.
 2946  *
 2947  *      NB:  This is the only routine which MAY NOT lazy-evaluate
 2948  *      or lose information.  That is, this routine must actually
 2949  *      insert this page into the given map NOW.
 2950  */
 2951 void
 2952 pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
 2953     vm_prot_t prot, boolean_t wired)
 2954 {
 2955         vm_paddr_t pa;
 2956         pd_entry_t *pde;
 2957         pt_entry_t *pte;
 2958         vm_paddr_t opa;
 2959         pt_entry_t origpte, newpte;
 2960         vm_page_t mpte, om;
 2961         boolean_t invlva;
 2962 
 2963         va = trunc_page(va);
 2964         KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
 2965         KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS,
 2966             ("pmap_enter: invalid to pmap_enter page table pages (va: 0x%lx)", va));
 2967 
 2968         mpte = NULL;
 2969 
 2970         vm_page_lock_queues();
 2971         PMAP_LOCK(pmap);
 2972 
 2973         /*
 2974          * In the case that a page table page is not
 2975          * resident, we are creating it here.
 2976          */
 2977         if (va < VM_MAXUSER_ADDRESS) {
 2978                 mpte = pmap_allocpte(pmap, va, M_WAITOK);
 2979         }
 2980 
 2981         pde = pmap_pde(pmap, va);
 2982         if (pde != NULL && (*pde & PG_V) != 0) {
 2983                 if ((*pde & PG_PS) != 0)
 2984                         panic("pmap_enter: attempted pmap_enter on 2MB page");
 2985                 pte = pmap_pde_to_pte(pde, va);
 2986         } else
 2987                 panic("pmap_enter: invalid page directory va=%#lx", va);
 2988 
 2989         pa = VM_PAGE_TO_PHYS(m);
 2990         om = NULL;
 2991         origpte = *pte;
 2992         opa = origpte & PG_FRAME;
 2993 
 2994         /*
 2995          * Mapping has not changed, must be protection or wiring change.
 2996          */
 2997         if (origpte && (opa == pa)) {
 2998                 /*
 2999                  * Wiring change, just update stats. We don't worry about
 3000                  * wiring PT pages as they remain resident as long as there
 3001                  * are valid mappings in them. Hence, if a user page is wired,
 3002                  * the PT page will be also.
 3003                  */
 3004                 if (wired && ((origpte & PG_W) == 0))
 3005                         pmap->pm_stats.wired_count++;
 3006                 else if (!wired && (origpte & PG_W))
 3007                         pmap->pm_stats.wired_count--;
 3008 
 3009                 /*
 3010                  * Remove extra pte reference
 3011                  */
 3012                 if (mpte)
 3013                         mpte->wire_count--;
 3014 
 3015                 /*
 3016                  * We might be turning off write access to the page,
 3017                  * so we go ahead and sense modify status.
 3018                  */
 3019                 if (origpte & PG_MANAGED) {
 3020                         om = m;
 3021                         pa |= PG_MANAGED;
 3022                 }
 3023                 goto validate;
 3024         } 
 3025         /*
 3026          * Mapping has changed, invalidate old range and fall through to
 3027          * handle validating new mapping.
 3028          */
 3029         if (opa) {
 3030                 if (origpte & PG_W)
 3031                         pmap->pm_stats.wired_count--;
 3032                 if (origpte & PG_MANAGED) {
 3033                         om = PHYS_TO_VM_PAGE(opa);
 3034                         pmap_remove_entry(pmap, om, va);
 3035                 }
 3036                 if (mpte != NULL) {
 3037                         mpte->wire_count--;
 3038                         KASSERT(mpte->wire_count > 0,
 3039                             ("pmap_enter: missing reference to page table page,"
 3040                              " va: 0x%lx", va));
 3041                 }
 3042         } else
 3043                 pmap->pm_stats.resident_count++;
 3044 
 3045         /*
 3046          * Enter on the PV list if part of our managed memory.
 3047          */
 3048         if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
 3049                 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva,
 3050                     ("pmap_enter: managed mapping within the clean submap"));
 3051                 pmap_insert_entry(pmap, va, m);
 3052                 pa |= PG_MANAGED;
 3053         }
 3054 
 3055         /*
 3056          * Increment counters
 3057          */
 3058         if (wired)
 3059                 pmap->pm_stats.wired_count++;
 3060 
 3061 validate:
 3062         /*
 3063          * Now validate mapping with desired protection/wiring.
 3064          */
 3065         newpte = (pt_entry_t)(pa | pmap_cache_bits(m->md.pat_mode, 0) | PG_V);
 3066         if ((prot & VM_PROT_WRITE) != 0) {
 3067                 newpte |= PG_RW;
 3068                 vm_page_flag_set(m, PG_WRITEABLE);
 3069         }
 3070         if ((prot & VM_PROT_EXECUTE) == 0)
 3071                 newpte |= pg_nx;
 3072         if (wired)
 3073                 newpte |= PG_W;
 3074         if (va < VM_MAXUSER_ADDRESS)
 3075                 newpte |= PG_U;
 3076         if (pmap == kernel_pmap)
 3077                 newpte |= PG_G;
 3078 
 3079         /*
 3080          * if the mapping or permission bits are different, we need
 3081          * to update the pte.
 3082          */
 3083         if ((origpte & ~(PG_M|PG_A)) != newpte) {
 3084                 newpte |= PG_A;
 3085                 if ((access & VM_PROT_WRITE) != 0)
 3086                         newpte |= PG_M;
 3087                 if (origpte & PG_V) {
 3088                         invlva = FALSE;
 3089                         origpte = pte_load_store(pte, newpte);
 3090                         if (origpte & PG_A) {
 3091                                 if (origpte & PG_MANAGED)
 3092                                         vm_page_flag_set(om, PG_REFERENCED);
 3093                                 if (opa != VM_PAGE_TO_PHYS(m) || ((origpte &
 3094                                     PG_NX) == 0 && (newpte & PG_NX)))
 3095                                         invlva = TRUE;
 3096                         }
 3097                         if ((origpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
 3098                                 if ((origpte & PG_MANAGED) != 0)
 3099                                         vm_page_dirty(om);
 3100                                 if ((newpte & PG_RW) == 0)
 3101                                         invlva = TRUE;
 3102                         }
 3103                         if (invlva)
 3104                                 pmap_invalidate_page(pmap, va);
 3105                 } else
 3106                         pte_store(pte, newpte);
 3107         }
 3108 
 3109         /*
 3110          * If both the page table page and the reservation are fully
 3111          * populated, then attempt promotion.
 3112          */
 3113         if ((mpte == NULL || mpte->wire_count == NPTEPG) &&
 3114             pg_ps_enabled && vm_reserv_level_iffullpop(m) == 0)
 3115                 pmap_promote_pde(pmap, pde, va);
 3116 
 3117         vm_page_unlock_queues();
 3118         PMAP_UNLOCK(pmap);
 3119 }
 3120 
 3121 /*
 3122  * Tries to create a 2MB page mapping.  Returns TRUE if successful and FALSE
 3123  * otherwise.  Fails if (1) a page table page cannot be allocated without
 3124  * blocking, (2) a mapping already exists at the specified virtual address, or
 3125  * (3) a pv entry cannot be allocated without reclaiming another pv entry. 
 3126  */
 3127 static boolean_t
 3128 pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
 3129 {
 3130         pd_entry_t *pde, newpde;
 3131         vm_page_t free, mpde;
 3132 
 3133         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 3134         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 3135         if ((mpde = pmap_allocpde(pmap, va, M_NOWAIT)) == NULL) {
 3136                 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
 3137                     " in pmap %p", va, pmap);
 3138                 return (FALSE);
 3139         }
 3140         pde = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mpde));
 3141         pde = &pde[pmap_pde_index(va)];
 3142         if ((*pde & PG_V) != 0) {
 3143                 KASSERT(mpde->wire_count > 1,
 3144                     ("pmap_enter_pde: mpde's wire count is too low"));
 3145                 mpde->wire_count--;
 3146                 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
 3147                     " in pmap %p", va, pmap);
 3148                 return (FALSE);
 3149         }
 3150         newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 1) |
 3151             PG_PS | PG_V;
 3152         if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
 3153                 newpde |= PG_MANAGED;
 3154 
 3155                 /*
 3156                  * Abort this mapping if its PV entry could not be created.
 3157                  */
 3158                 if (!pmap_pv_insert_pde(pmap, va, VM_PAGE_TO_PHYS(m))) {
 3159                         free = NULL;
 3160                         if (pmap_unwire_pte_hold(pmap, va, mpde, &free)) {
 3161                                 pmap_invalidate_page(pmap, va);
 3162                                 pmap_free_zero_pages(free);
 3163                         }
 3164                         CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
 3165                             " in pmap %p", va, pmap);
 3166                         return (FALSE);
 3167                 }
 3168         }
 3169         if ((prot & VM_PROT_EXECUTE) == 0)
 3170                 newpde |= pg_nx;
 3171         if (va < VM_MAXUSER_ADDRESS)
 3172                 newpde |= PG_U;
 3173 
 3174         /*
 3175          * Increment counters.
 3176          */
 3177         pmap->pm_stats.resident_count += NBPDR / PAGE_SIZE;
 3178 
 3179         /*
 3180          * Map the superpage.
 3181          */
 3182         pde_store(pde, newpde);
 3183 
 3184         pmap_pde_mappings++;
 3185         CTR2(KTR_PMAP, "pmap_enter_pde: success for va %#lx"
 3186             " in pmap %p", va, pmap);
 3187         return (TRUE);
 3188 }
 3189 
 3190 /*
 3191  * Maps a sequence of resident pages belonging to the same object.
 3192  * The sequence begins with the given page m_start.  This page is
 3193  * mapped at the given virtual address start.  Each subsequent page is
 3194  * mapped at a virtual address that is offset from start by the same
 3195  * amount as the page is offset from m_start within the object.  The
 3196  * last page in the sequence is the page with the largest offset from
 3197  * m_start that can be mapped at a virtual address less than the given
 3198  * virtual address end.  Not every virtual page between start and end
 3199  * is mapped; only those for which a resident page exists with the
 3200  * corresponding offset from m_start are mapped.
 3201  */
 3202 void
 3203 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
 3204     vm_page_t m_start, vm_prot_t prot)
 3205 {
 3206         vm_offset_t va;
 3207         vm_page_t m, mpte;
 3208         vm_pindex_t diff, psize;
 3209 
 3210         VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED);
 3211         psize = atop(end - start);
 3212         mpte = NULL;
 3213         m = m_start;
 3214         PMAP_LOCK(pmap);
 3215         while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
 3216                 va = start + ptoa(diff);
 3217                 if ((va & PDRMASK) == 0 && va + NBPDR <= end &&
 3218                     (VM_PAGE_TO_PHYS(m) & PDRMASK) == 0 &&
 3219                     pg_ps_enabled && vm_reserv_level_iffullpop(m) == 0 &&
 3220                     pmap_enter_pde(pmap, va, m, prot))
 3221                         m = &m[NBPDR / PAGE_SIZE - 1];
 3222                 else
 3223                         mpte = pmap_enter_quick_locked(pmap, va, m, prot,
 3224                             mpte);
 3225                 m = TAILQ_NEXT(m, listq);
 3226         }
 3227         PMAP_UNLOCK(pmap);
 3228 }
 3229 
 3230 /*
 3231  * this code makes some *MAJOR* assumptions:
 3232  * 1. Current pmap & pmap exists.
 3233  * 2. Not wired.
 3234  * 3. Read access.
 3235  * 4. No page table pages.
 3236  * but is *MUCH* faster than pmap_enter...
 3237  */
 3238 
 3239 void
 3240 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
 3241 {
 3242 
 3243         PMAP_LOCK(pmap);
 3244         (void) pmap_enter_quick_locked(pmap, va, m, prot, NULL);
 3245         PMAP_UNLOCK(pmap);
 3246 }
 3247 
 3248 static vm_page_t
 3249 pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
 3250     vm_prot_t prot, vm_page_t mpte)
 3251 {
 3252         vm_page_t free;
 3253         pt_entry_t *pte;
 3254         vm_paddr_t pa;
 3255 
 3256         KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
 3257             (m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0,
 3258             ("pmap_enter_quick_locked: managed mapping within the clean submap"));
 3259         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 3260         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 3261 
 3262         /*
 3263          * In the case that a page table page is not
 3264          * resident, we are creating it here.
 3265          */
 3266         if (va < VM_MAXUSER_ADDRESS) {
 3267                 vm_pindex_t ptepindex;
 3268                 pd_entry_t *ptepa;
 3269 
 3270                 /*
 3271                  * Calculate pagetable page index
 3272                  */
 3273                 ptepindex = pmap_pde_pindex(va);
 3274                 if (mpte && (mpte->pindex == ptepindex)) {
 3275                         mpte->wire_count++;
 3276                 } else {
 3277                         /*
 3278                          * Get the page directory entry
 3279                          */
 3280                         ptepa = pmap_pde(pmap, va);
 3281 
 3282                         /*
 3283                          * If the page table page is mapped, we just increment
 3284                          * the hold count, and activate it.
 3285                          */
 3286                         if (ptepa && (*ptepa & PG_V) != 0) {
 3287                                 if (*ptepa & PG_PS)
 3288                                         return (NULL);
 3289                                 mpte = PHYS_TO_VM_PAGE(*ptepa & PG_FRAME);
 3290                                 mpte->wire_count++;
 3291                         } else {
 3292                                 mpte = _pmap_allocpte(pmap, ptepindex,
 3293                                     M_NOWAIT);
 3294                                 if (mpte == NULL)
 3295                                         return (mpte);
 3296                         }
 3297                 }
 3298                 pte = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mpte));
 3299                 pte = &pte[pmap_pte_index(va)];
 3300         } else {
 3301                 mpte = NULL;
 3302                 pte = vtopte(va);
 3303         }
 3304         if (*pte) {
 3305                 if (mpte != NULL) {
 3306                         mpte->wire_count--;
 3307                         mpte = NULL;
 3308                 }
 3309                 return (mpte);
 3310         }
 3311 
 3312         /*
 3313          * Enter on the PV list if part of our managed memory.
 3314          */
 3315         if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0 &&
 3316             !pmap_try_insert_pv_entry(pmap, va, m)) {
 3317                 if (mpte != NULL) {
 3318                         free = NULL;
 3319                         if (pmap_unwire_pte_hold(pmap, va, mpte, &free)) {
 3320                                 pmap_invalidate_page(pmap, va);
 3321                                 pmap_free_zero_pages(free);
 3322                         }
 3323                         mpte = NULL;
 3324                 }
 3325                 return (mpte);
 3326         }
 3327 
 3328         /*
 3329          * Increment counters
 3330          */
 3331         pmap->pm_stats.resident_count++;
 3332 
 3333         pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 0);
 3334         if ((prot & VM_PROT_EXECUTE) == 0)
 3335                 pa |= pg_nx;
 3336 
 3337         /*
 3338          * Now validate mapping with RO protection
 3339          */
 3340         if (m->flags & (PG_FICTITIOUS|PG_UNMANAGED))
 3341                 pte_store(pte, pa | PG_V | PG_U);
 3342         else
 3343                 pte_store(pte, pa | PG_V | PG_U | PG_MANAGED);
 3344         return mpte;
 3345 }
 3346 
 3347 /*
 3348  * Make a temporary mapping for a physical address.  This is only intended
 3349  * to be used for panic dumps.
 3350  */
 3351 void *
 3352 pmap_kenter_temporary(vm_paddr_t pa, int i)
 3353 {
 3354         vm_offset_t va;
 3355 
 3356         va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE);
 3357         pmap_kenter(va, pa);
 3358         invlpg(va);
 3359         return ((void *)crashdumpmap);
 3360 }
 3361 
 3362 /*
 3363  * This code maps large physical mmap regions into the
 3364  * processor address space.  Note that some shortcuts
 3365  * are taken, but the code works.
 3366  */
 3367 void
 3368 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
 3369     vm_pindex_t pindex, vm_size_t size)
 3370 {
 3371         pd_entry_t *pde;
 3372         vm_paddr_t pa, ptepa;
 3373         vm_page_t p, pdpg;
 3374         int pat_mode;
 3375 
 3376         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
 3377         KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
 3378             ("pmap_object_init_pt: non-device object"));
 3379         if ((addr & (NBPDR - 1)) == 0 && (size & (NBPDR - 1)) == 0) {
 3380                 if (!vm_object_populate(object, pindex, pindex + atop(size)))
 3381                         return;
 3382                 p = vm_page_lookup(object, pindex);
 3383                 KASSERT(p->valid == VM_PAGE_BITS_ALL,
 3384                     ("pmap_object_init_pt: invalid page %p", p));
 3385                 pat_mode = p->md.pat_mode;
 3386 
 3387                 /*
 3388                  * Abort the mapping if the first page is not physically
 3389                  * aligned to a 2MB page boundary.
 3390                  */
 3391                 ptepa = VM_PAGE_TO_PHYS(p);
 3392                 if (ptepa & (NBPDR - 1))
 3393                         return;
 3394 
 3395                 /*
 3396                  * Skip the first page.  Abort the mapping if the rest of
 3397                  * the pages are not physically contiguous or have differing
 3398                  * memory attributes.
 3399                  */
 3400                 p = TAILQ_NEXT(p, listq);
 3401                 for (pa = ptepa + PAGE_SIZE; pa < ptepa + size;
 3402                     pa += PAGE_SIZE) {
 3403                         KASSERT(p->valid == VM_PAGE_BITS_ALL,
 3404                             ("pmap_object_init_pt: invalid page %p", p));
 3405                         if (pa != VM_PAGE_TO_PHYS(p) ||
 3406                             pat_mode != p->md.pat_mode)
 3407                                 return;
 3408                         p = TAILQ_NEXT(p, listq);
 3409                 }
 3410 
 3411                 /*
 3412                  * Map using 2MB pages.  Since "ptepa" is 2M aligned and
 3413                  * "size" is a multiple of 2M, adding the PAT setting to "pa"
 3414                  * will not affect the termination of this loop.
 3415                  */ 
 3416                 PMAP_LOCK(pmap);
 3417                 for (pa = ptepa | pmap_cache_bits(pat_mode, 1); pa < ptepa +
 3418                     size; pa += NBPDR) {
 3419                         pdpg = pmap_allocpde(pmap, addr, M_NOWAIT);
 3420                         if (pdpg == NULL) {
 3421                                 /*
 3422                                  * The creation of mappings below is only an
 3423                                  * optimization.  If a page directory page
 3424                                  * cannot be allocated without blocking,
 3425                                  * continue on to the next mapping rather than
 3426                                  * blocking.
 3427                                  */
 3428                                 addr += NBPDR;
 3429                                 continue;
 3430                         }
 3431                         pde = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pdpg));
 3432                         pde = &pde[pmap_pde_index(addr)];
 3433                         if ((*pde & PG_V) == 0) {
 3434                                 pde_store(pde, pa | PG_PS | PG_M | PG_A |
 3435                                     PG_U | PG_RW | PG_V);
 3436                                 pmap->pm_stats.resident_count += NBPDR /
 3437                                     PAGE_SIZE;
 3438                                 pmap_pde_mappings++;
 3439                         } else {
 3440                                 /* Continue on if the PDE is already valid. */
 3441                                 pdpg->wire_count--;
 3442                                 KASSERT(pdpg->wire_count > 0,
 3443                                     ("pmap_object_init_pt: missing reference "
 3444                                     "to page directory page, va: 0x%lx", addr));
 3445                         }
 3446                         addr += NBPDR;
 3447                 }
 3448                 PMAP_UNLOCK(pmap);
 3449         }
 3450 }
 3451 
 3452 /*
 3453  *      Routine:        pmap_change_wiring
 3454  *      Function:       Change the wiring attribute for a map/virtual-address
 3455  *                      pair.
 3456  *      In/out conditions:
 3457  *                      The mapping must already exist in the pmap.
 3458  */
 3459 void
 3460 pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
 3461 {
 3462         pd_entry_t *pde;
 3463         pt_entry_t *pte;
 3464         boolean_t are_queues_locked;
 3465 
 3466         are_queues_locked = FALSE;
 3467 
 3468         /*
 3469          * Wiring is not a hardware characteristic so there is no need to
 3470          * invalidate TLB.
 3471          */
 3472 retry:
 3473         PMAP_LOCK(pmap);
 3474         pde = pmap_pde(pmap, va);
 3475         if ((*pde & PG_PS) != 0) {
 3476                 if (!wired != ((*pde & PG_W) == 0)) {
 3477                         if (!are_queues_locked) {
 3478                                 are_queues_locked = TRUE;
 3479                                 if (!mtx_trylock(&vm_page_queue_mtx)) {
 3480                                         PMAP_UNLOCK(pmap);
 3481                                         vm_page_lock_queues();
 3482                                         goto retry;
 3483                                 }
 3484                         }
 3485                         if (!pmap_demote_pde(pmap, pde, va))
 3486                                 panic("pmap_change_wiring: demotion failed");
 3487                 } else
 3488                         goto out;
 3489         }
 3490         pte = pmap_pde_to_pte(pde, va);
 3491         if (wired && (*pte & PG_W) == 0) {
 3492                 pmap->pm_stats.wired_count++;
 3493                 atomic_set_long(pte, PG_W);
 3494         } else if (!wired && (*pte & PG_W) != 0) {
 3495                 pmap->pm_stats.wired_count--;
 3496                 atomic_clear_long(pte, PG_W);
 3497         }
 3498 out:
 3499         if (are_queues_locked)
 3500                 vm_page_unlock_queues();
 3501         PMAP_UNLOCK(pmap);
 3502 }
 3503 
 3504 
 3505 
 3506 /*
 3507  *      Copy the range specified by src_addr/len
 3508  *      from the source map to the range dst_addr/len
 3509  *      in the destination map.
 3510  *
 3511  *      This routine is only advisory and need not do anything.
 3512  */
 3513 
 3514 void
 3515 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
 3516     vm_offset_t src_addr)
 3517 {
 3518         vm_page_t   free;
 3519         vm_offset_t addr;
 3520         vm_offset_t end_addr = src_addr + len;
 3521         vm_offset_t va_next;
 3522 
 3523         if (dst_addr != src_addr)
 3524                 return;
 3525 
 3526         vm_page_lock_queues();
 3527         if (dst_pmap < src_pmap) {
 3528                 PMAP_LOCK(dst_pmap);
 3529                 PMAP_LOCK(src_pmap);
 3530         } else {
 3531                 PMAP_LOCK(src_pmap);
 3532                 PMAP_LOCK(dst_pmap);
 3533         }
 3534         for (addr = src_addr; addr < end_addr; addr = va_next) {
 3535                 pt_entry_t *src_pte, *dst_pte;
 3536                 vm_page_t dstmpde, dstmpte, srcmpte;
 3537                 pml4_entry_t *pml4e;
 3538                 pdp_entry_t *pdpe;
 3539                 pd_entry_t srcptepaddr, *pde;
 3540 
 3541                 KASSERT(addr < UPT_MIN_ADDRESS,
 3542                     ("pmap_copy: invalid to pmap_copy page tables"));
 3543 
 3544                 pml4e = pmap_pml4e(src_pmap, addr);
 3545                 if ((*pml4e & PG_V) == 0) {
 3546                         va_next = (addr + NBPML4) & ~PML4MASK;
 3547                         if (va_next < addr)
 3548                                 va_next = end_addr;
 3549                         continue;
 3550                 }
 3551 
 3552                 pdpe = pmap_pml4e_to_pdpe(pml4e, addr);
 3553                 if ((*pdpe & PG_V) == 0) {
 3554                         va_next = (addr + NBPDP) & ~PDPMASK;
 3555                         if (va_next < addr)
 3556                                 va_next = end_addr;
 3557                         continue;
 3558                 }
 3559 
 3560                 va_next = (addr + NBPDR) & ~PDRMASK;
 3561                 if (va_next < addr)
 3562                         va_next = end_addr;
 3563 
 3564                 pde = pmap_pdpe_to_pde(pdpe, addr);
 3565                 srcptepaddr = *pde;
 3566                 if (srcptepaddr == 0)
 3567                         continue;
 3568                         
 3569                 if (srcptepaddr & PG_PS) {
 3570                         dstmpde = pmap_allocpde(dst_pmap, addr, M_NOWAIT);
 3571                         if (dstmpde == NULL)
 3572                                 break;
 3573                         pde = (pd_entry_t *)
 3574                             PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dstmpde));
 3575                         pde = &pde[pmap_pde_index(addr)];
 3576                         if (*pde == 0 && ((srcptepaddr & PG_MANAGED) == 0 ||
 3577                             pmap_pv_insert_pde(dst_pmap, addr, srcptepaddr &
 3578                             PG_PS_FRAME))) {
 3579                                 *pde = srcptepaddr & ~PG_W;
 3580                                 dst_pmap->pm_stats.resident_count +=
 3581                                     NBPDR / PAGE_SIZE;
 3582                         } else
 3583                                 dstmpde->wire_count--;
 3584                         continue;
 3585                 }
 3586 
 3587                 srcptepaddr &= PG_FRAME;
 3588                 srcmpte = PHYS_TO_VM_PAGE(srcptepaddr);
 3589                 KASSERT(srcmpte->wire_count > 0,
 3590                     ("pmap_copy: source page table page is unused"));
 3591 
 3592                 if (va_next > end_addr)
 3593                         va_next = end_addr;
 3594 
 3595                 src_pte = (pt_entry_t *)PHYS_TO_DMAP(srcptepaddr);
 3596                 src_pte = &src_pte[pmap_pte_index(addr)];
 3597                 dstmpte = NULL;
 3598                 while (addr < va_next) {
 3599                         pt_entry_t ptetemp;
 3600                         ptetemp = *src_pte;
 3601                         /*
 3602                          * we only virtual copy managed pages
 3603                          */
 3604                         if ((ptetemp & PG_MANAGED) != 0) {
 3605                                 if (dstmpte != NULL &&
 3606                                     dstmpte->pindex == pmap_pde_pindex(addr))
 3607                                         dstmpte->wire_count++;
 3608                                 else if ((dstmpte = pmap_allocpte(dst_pmap,
 3609                                     addr, M_NOWAIT)) == NULL)
 3610                                         goto out;
 3611                                 dst_pte = (pt_entry_t *)
 3612                                     PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dstmpte));
 3613                                 dst_pte = &dst_pte[pmap_pte_index(addr)];
 3614                                 if (*dst_pte == 0 &&
 3615                                     pmap_try_insert_pv_entry(dst_pmap, addr,
 3616                                     PHYS_TO_VM_PAGE(ptetemp & PG_FRAME))) {
 3617                                         /*
 3618                                          * Clear the wired, modified, and
 3619                                          * accessed (referenced) bits
 3620                                          * during the copy.
 3621                                          */
 3622                                         *dst_pte = ptetemp & ~(PG_W | PG_M |
 3623                                             PG_A);
 3624                                         dst_pmap->pm_stats.resident_count++;
 3625                                 } else {
 3626                                         free = NULL;
 3627                                         if (pmap_unwire_pte_hold(dst_pmap,
 3628                                             addr, dstmpte, &free)) {
 3629                                                 pmap_invalidate_page(dst_pmap,
 3630                                                     addr);
 3631                                                 pmap_free_zero_pages(free);
 3632                                         }
 3633                                         goto out;
 3634                                 }
 3635                                 if (dstmpte->wire_count >= srcmpte->wire_count)
 3636                                         break;
 3637                         }
 3638                         addr += PAGE_SIZE;
 3639                         src_pte++;
 3640                 }
 3641         }
 3642 out:
 3643         vm_page_unlock_queues();
 3644         PMAP_UNLOCK(src_pmap);
 3645         PMAP_UNLOCK(dst_pmap);
 3646 }       
 3647 
 3648 /*
 3649  *      pmap_zero_page zeros the specified hardware page by mapping 
 3650  *      the page into KVM and using bzero to clear its contents.
 3651  */
 3652 void
 3653 pmap_zero_page(vm_page_t m)
 3654 {
 3655         vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
 3656 
 3657         pagezero((void *)va);
 3658 }
 3659 
 3660 /*
 3661  *      pmap_zero_page_area zeros the specified hardware page by mapping 
 3662  *      the page into KVM and using bzero to clear its contents.
 3663  *
 3664  *      off and size may not cover an area beyond a single hardware page.
 3665  */
 3666 void
 3667 pmap_zero_page_area(vm_page_t m, int off, int size)
 3668 {
 3669         vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
 3670 
 3671         if (off == 0 && size == PAGE_SIZE)
 3672                 pagezero((void *)va);
 3673         else
 3674                 bzero((char *)va + off, size);
 3675 }
 3676 
 3677 /*
 3678  *      pmap_zero_page_idle zeros the specified hardware page by mapping 
 3679  *      the page into KVM and using bzero to clear its contents.  This
 3680  *      is intended to be called from the vm_pagezero process only and
 3681  *      outside of Giant.
 3682  */
 3683 void
 3684 pmap_zero_page_idle(vm_page_t m)
 3685 {
 3686         vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
 3687 
 3688         pagezero((void *)va);
 3689 }
 3690 
 3691 /*
 3692  *      pmap_copy_page copies the specified (machine independent)
 3693  *      page by mapping the page into virtual memory and using
 3694  *      bcopy to copy the page, one machine dependent page at a
 3695  *      time.
 3696  */
 3697 void
 3698 pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
 3699 {
 3700         vm_offset_t src = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(msrc));
 3701         vm_offset_t dst = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mdst));
 3702 
 3703         pagecopy((void *)src, (void *)dst);
 3704 }
 3705 
 3706 /*
 3707  * Returns true if the pmap's pv is one of the first
 3708  * 16 pvs linked to from this page.  This count may
 3709  * be changed upwards or downwards in the future; it
 3710  * is only necessary that true be returned for a small
 3711  * subset of pmaps for proper page aging.
 3712  */
 3713 boolean_t
 3714 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
 3715 {
 3716         struct md_page *pvh;
 3717         pv_entry_t pv;
 3718         int loops = 0;
 3719 
 3720         if (m->flags & PG_FICTITIOUS)
 3721                 return FALSE;
 3722 
 3723         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 3724         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
 3725                 if (PV_PMAP(pv) == pmap) {
 3726                         return TRUE;
 3727                 }
 3728                 loops++;
 3729                 if (loops >= 16)
 3730                         break;
 3731         }
 3732         if (loops < 16) {
 3733                 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 3734                 TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
 3735                         if (PV_PMAP(pv) == pmap)
 3736                                 return (TRUE);
 3737                         loops++;
 3738                         if (loops >= 16)
 3739                                 break;
 3740                 }
 3741         }
 3742         return (FALSE);
 3743 }
 3744 
 3745 /*
 3746  * Returns TRUE if the given page is mapped individually or as part of
 3747  * a 2mpage.  Otherwise, returns FALSE.
 3748  */
 3749 boolean_t
 3750 pmap_page_is_mapped(vm_page_t m)
 3751 {
 3752         struct md_page *pvh;
 3753 
 3754         if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
 3755                 return (FALSE);
 3756         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 3757         if (TAILQ_EMPTY(&m->md.pv_list)) {
 3758                 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 3759                 return (!TAILQ_EMPTY(&pvh->pv_list));
 3760         } else
 3761                 return (TRUE);
 3762 }
 3763 
 3764 /*
 3765  * Remove all pages from specified address space
 3766  * this aids process exit speeds.  Also, this code
 3767  * is special cased for current process only, but
 3768  * can have the more generic (and slightly slower)
 3769  * mode enabled.  This is much faster than pmap_remove
 3770  * in the case of running down an entire address space.
 3771  */
 3772 void
 3773 pmap_remove_pages(pmap_t pmap)
 3774 {
 3775         pd_entry_t ptepde;
 3776         pt_entry_t *pte, tpte;
 3777         vm_page_t free = NULL;
 3778         vm_page_t m, mpte, mt;
 3779         pv_entry_t pv;
 3780         struct md_page *pvh;
 3781         struct pv_chunk *pc, *npc;
 3782         int field, idx;
 3783         int64_t bit;
 3784         uint64_t inuse, bitmask;
 3785         int allfree;
 3786 
 3787         if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) {
 3788                 printf("warning: pmap_remove_pages called with non-current pmap\n");
 3789                 return;
 3790         }
 3791         vm_page_lock_queues();
 3792         PMAP_LOCK(pmap);
 3793         TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
 3794                 allfree = 1;
 3795                 for (field = 0; field < _NPCM; field++) {
 3796                         inuse = (~(pc->pc_map[field])) & pc_freemask[field];
 3797                         while (inuse != 0) {
 3798                                 bit = bsfq(inuse);
 3799                                 bitmask = 1UL << bit;
 3800                                 idx = field * 64 + bit;
 3801                                 pv = &pc->pc_pventry[idx];
 3802                                 inuse &= ~bitmask;
 3803 
 3804                                 pte = pmap_pdpe(pmap, pv->pv_va);
 3805                                 ptepde = *pte;
 3806                                 pte = pmap_pdpe_to_pde(pte, pv->pv_va);
 3807                                 tpte = *pte;
 3808                                 if ((tpte & (PG_PS | PG_V)) == PG_V) {
 3809                                         ptepde = tpte;
 3810                                         pte = (pt_entry_t *)PHYS_TO_DMAP(tpte &
 3811                                             PG_FRAME);
 3812                                         pte = &pte[pmap_pte_index(pv->pv_va)];
 3813                                         tpte = *pte & ~PG_PTE_PAT;
 3814                                 }
 3815                                 if ((tpte & PG_V) == 0)
 3816                                         panic("bad pte");
 3817 
 3818 /*
 3819  * We cannot remove wired pages from a process' mapping at this time
 3820  */
 3821                                 if (tpte & PG_W) {
 3822                                         allfree = 0;
 3823                                         continue;
 3824                                 }
 3825 
 3826                                 m = PHYS_TO_VM_PAGE(tpte & PG_FRAME);
 3827                                 KASSERT(m->phys_addr == (tpte & PG_FRAME),
 3828                                     ("vm_page_t %p phys_addr mismatch %016jx %016jx",
 3829                                     m, (uintmax_t)m->phys_addr,
 3830                                     (uintmax_t)tpte));
 3831 
 3832                                 KASSERT(m < &vm_page_array[vm_page_array_size],
 3833                                         ("pmap_remove_pages: bad tpte %#jx",
 3834                                         (uintmax_t)tpte));
 3835 
 3836                                 pte_clear(pte);
 3837 
 3838                                 /*
 3839                                  * Update the vm_page_t clean/reference bits.
 3840                                  */
 3841                                 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
 3842                                         if ((tpte & PG_PS) != 0) {
 3843                                                 for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
 3844                                                         vm_page_dirty(mt);
 3845                                         } else
 3846                                                 vm_page_dirty(m);
 3847                                 }
 3848 
 3849                                 /* Mark free */
 3850                                 PV_STAT(pv_entry_frees++);
 3851                                 PV_STAT(pv_entry_spare++);
 3852                                 pv_entry_count--;
 3853                                 pc->pc_map[field] |= bitmask;
 3854                                 if ((tpte & PG_PS) != 0) {
 3855                                         pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
 3856                                         pvh = pa_to_pvh(tpte & PG_PS_FRAME);
 3857                                         TAILQ_REMOVE(&pvh->pv_list, pv, pv_list);
 3858                                         if (TAILQ_EMPTY(&pvh->pv_list)) {
 3859                                                 for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
 3860                                                         if (TAILQ_EMPTY(&mt->md.pv_list))
 3861                                                                 vm_page_flag_clear(mt, PG_WRITEABLE);
 3862                                         }
 3863                                         mpte = pmap_lookup_pt_page(pmap, pv->pv_va);
 3864                                         if (mpte != NULL) {
 3865                                                 pmap_remove_pt_page(pmap, mpte);
 3866                                                 pmap->pm_stats.resident_count--;
 3867                                                 KASSERT(mpte->wire_count == NPTEPG,
 3868                                                     ("pmap_remove_pages: pte page wire count error"));
 3869                                                 mpte->wire_count = 0;
 3870                                                 pmap_add_delayed_free_list(mpte, &free, FALSE);
 3871                                                 atomic_subtract_int(&cnt.v_wire_count, 1);
 3872                                         }
 3873                                 } else {
 3874                                         pmap->pm_stats.resident_count--;
 3875                                         TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
 3876                                         if (TAILQ_EMPTY(&m->md.pv_list)) {
 3877                                                 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 3878                                                 if (TAILQ_EMPTY(&pvh->pv_list))
 3879                                                         vm_page_flag_clear(m, PG_WRITEABLE);
 3880                                         }
 3881                                 }
 3882                                 pmap_unuse_pt(pmap, pv->pv_va, ptepde, &free);
 3883                         }
 3884                 }
 3885                 if (allfree) {
 3886                         PV_STAT(pv_entry_spare -= _NPCPV);
 3887                         PV_STAT(pc_chunk_count--);
 3888                         PV_STAT(pc_chunk_frees++);
 3889                         TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
 3890                         m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
 3891                         dump_drop_page(m->phys_addr);
 3892                         vm_page_unwire(m, 0);
 3893                         vm_page_free(m);
 3894                 }
 3895         }
 3896         pmap_invalidate_all(pmap);
 3897         vm_page_unlock_queues();
 3898         PMAP_UNLOCK(pmap);
 3899         pmap_free_zero_pages(free);
 3900 }
 3901 
 3902 /*
 3903  *      pmap_is_modified:
 3904  *
 3905  *      Return whether or not the specified physical page was modified
 3906  *      in any physical maps.
 3907  */
 3908 boolean_t
 3909 pmap_is_modified(vm_page_t m)
 3910 {
 3911 
 3912         if (m->flags & PG_FICTITIOUS)
 3913                 return (FALSE);
 3914         if (pmap_is_modified_pvh(&m->md))
 3915                 return (TRUE);
 3916         return (pmap_is_modified_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m))));
 3917 }
 3918 
 3919 /*
 3920  * Returns TRUE if any of the given mappings were used to modify
 3921  * physical memory.  Otherwise, returns FALSE.  Both page and 2mpage
 3922  * mappings are supported.
 3923  */
 3924 static boolean_t
 3925 pmap_is_modified_pvh(struct md_page *pvh)
 3926 {
 3927         pv_entry_t pv;
 3928         pt_entry_t *pte;
 3929         pmap_t pmap;
 3930         boolean_t rv;
 3931 
 3932         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 3933         rv = FALSE;
 3934         TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
 3935                 pmap = PV_PMAP(pv);
 3936                 PMAP_LOCK(pmap);
 3937                 pte = pmap_pte(pmap, pv->pv_va);
 3938                 rv = (*pte & (PG_M | PG_RW)) == (PG_M | PG_RW);
 3939                 PMAP_UNLOCK(pmap);
 3940                 if (rv)
 3941                         break;
 3942         }
 3943         return (rv);
 3944 }
 3945 
 3946 /*
 3947  *      pmap_is_prefaultable:
 3948  *
 3949  *      Return whether or not the specified virtual address is elgible
 3950  *      for prefault.
 3951  */
 3952 boolean_t
 3953 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
 3954 {
 3955         pd_entry_t *pde;
 3956         pt_entry_t *pte;
 3957         boolean_t rv;
 3958 
 3959         rv = FALSE;
 3960         PMAP_LOCK(pmap);
 3961         pde = pmap_pde(pmap, addr);
 3962         if (pde != NULL && (*pde & (PG_PS | PG_V)) == PG_V) {
 3963                 pte = pmap_pde_to_pte(pde, addr);
 3964                 rv = (*pte & PG_V) == 0;
 3965         }
 3966         PMAP_UNLOCK(pmap);
 3967         return (rv);
 3968 }
 3969 
 3970 /*
 3971  * Clear the write and modified bits in each of the given page's mappings.
 3972  */
 3973 void
 3974 pmap_remove_write(vm_page_t m)
 3975 {
 3976         struct md_page *pvh;
 3977         pmap_t pmap;
 3978         pv_entry_t next_pv, pv;
 3979         pd_entry_t *pde;
 3980         pt_entry_t oldpte, *pte;
 3981         vm_offset_t va;
 3982 
 3983         if ((m->flags & PG_FICTITIOUS) != 0 ||
 3984             (m->flags & PG_WRITEABLE) == 0)
 3985                 return;
 3986         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 3987         pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 3988         TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
 3989                 va = pv->pv_va;
 3990                 pmap = PV_PMAP(pv);
 3991                 PMAP_LOCK(pmap);
 3992                 pde = pmap_pde(pmap, va);
 3993                 if ((*pde & PG_RW) != 0)
 3994                         (void)pmap_demote_pde(pmap, pde, va);
 3995                 PMAP_UNLOCK(pmap);
 3996         }
 3997         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
 3998                 pmap = PV_PMAP(pv);
 3999                 PMAP_LOCK(pmap);
 4000                 pde = pmap_pde(pmap, pv->pv_va);
 4001                 KASSERT((*pde & PG_PS) == 0, ("pmap_clear_write: found"
 4002                     " a 2mpage in page %p's pv list", m));
 4003                 pte = pmap_pde_to_pte(pde, pv->pv_va);
 4004 retry:
 4005                 oldpte = *pte;
 4006                 if (oldpte & PG_RW) {
 4007                         if (!atomic_cmpset_long(pte, oldpte, oldpte &
 4008                             ~(PG_RW | PG_M)))
 4009                                 goto retry;
 4010                         if ((oldpte & PG_M) != 0)
 4011                                 vm_page_dirty(m);
 4012                         pmap_invalidate_page(pmap, pv->pv_va);
 4013                 }
 4014                 PMAP_UNLOCK(pmap);
 4015         }
 4016         vm_page_flag_clear(m, PG_WRITEABLE);
 4017 }
 4018 
 4019 /*
 4020  *      pmap_ts_referenced:
 4021  *
 4022  *      Return a count of reference bits for a page, clearing those bits.
 4023  *      It is not necessary for every reference bit to be cleared, but it
 4024  *      is necessary that 0 only be returned when there are truly no
 4025  *      reference bits set.
 4026  *
 4027  *      XXX: The exact number of bits to check and clear is a matter that
 4028  *      should be tested and standardized at some point in the future for
 4029  *      optimal aging of shared pages.
 4030  */
 4031 int
 4032 pmap_ts_referenced(vm_page_t m)
 4033 {
 4034         struct md_page *pvh;
 4035         pv_entry_t pv, pvf, pvn;
 4036         pmap_t pmap;
 4037         pd_entry_t oldpde, *pde;
 4038         pt_entry_t *pte;
 4039         vm_offset_t va;
 4040         int rtval = 0;
 4041 
 4042         if (m->flags & PG_FICTITIOUS)
 4043                 return (rtval);
 4044         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 4045         pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 4046         TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, pvn) {
 4047                 va = pv->pv_va;
 4048                 pmap = PV_PMAP(pv);
 4049                 PMAP_LOCK(pmap);
 4050                 pde = pmap_pde(pmap, va);
 4051                 oldpde = *pde;
 4052                 if ((oldpde & PG_A) != 0) {
 4053                         if (pmap_demote_pde(pmap, pde, va)) {
 4054                                 if ((oldpde & PG_W) == 0) {
 4055                                         /*
 4056                                          * Remove the mapping to a single page
 4057                                          * so that a subsequent access may
 4058                                          * repromote.  Since the underlying
 4059                                          * page table page is fully populated,
 4060                                          * this removal never frees a page
 4061                                          * table page.
 4062                                          */
 4063                                         va += VM_PAGE_TO_PHYS(m) - (oldpde &
 4064                                             PG_PS_FRAME);
 4065                                         pmap_remove_page(pmap, va, pde, NULL);
 4066                                         rtval++;
 4067                                         if (rtval > 4) {
 4068                                                 PMAP_UNLOCK(pmap);
 4069                                                 return (rtval);
 4070                                         }
 4071                                 }
 4072                         }
 4073                 }
 4074                 PMAP_UNLOCK(pmap);
 4075         }
 4076         if ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
 4077                 pvf = pv;
 4078                 do {
 4079                         pvn = TAILQ_NEXT(pv, pv_list);
 4080                         TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
 4081                         TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
 4082                         pmap = PV_PMAP(pv);
 4083                         PMAP_LOCK(pmap);
 4084                         pde = pmap_pde(pmap, pv->pv_va);
 4085                         KASSERT((*pde & PG_PS) == 0, ("pmap_ts_referenced:"
 4086                             " found a 2mpage in page %p's pv list", m));
 4087                         pte = pmap_pde_to_pte(pde, pv->pv_va);
 4088                         if ((*pte & PG_A) != 0) {
 4089                                 atomic_clear_long(pte, PG_A);
 4090                                 pmap_invalidate_page(pmap, pv->pv_va);
 4091                                 rtval++;
 4092                                 if (rtval > 4)
 4093                                         pvn = NULL;
 4094                         }
 4095                         PMAP_UNLOCK(pmap);
 4096                 } while ((pv = pvn) != NULL && pv != pvf);
 4097         }
 4098         return (rtval);
 4099 }
 4100 
 4101 /*
 4102  *      Clear the modify bits on the specified physical page.
 4103  */
 4104 void
 4105 pmap_clear_modify(vm_page_t m)
 4106 {
 4107         struct md_page *pvh;
 4108         pmap_t pmap;
 4109         pv_entry_t next_pv, pv;
 4110         pd_entry_t oldpde, *pde;
 4111         pt_entry_t oldpte, *pte;
 4112         vm_offset_t va;
 4113 
 4114         if ((m->flags & PG_FICTITIOUS) != 0)
 4115                 return;
 4116         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 4117         pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 4118         TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
 4119                 va = pv->pv_va;
 4120                 pmap = PV_PMAP(pv);
 4121                 PMAP_LOCK(pmap);
 4122                 pde = pmap_pde(pmap, va);
 4123                 oldpde = *pde;
 4124                 if ((oldpde & PG_RW) != 0) {
 4125                         if (pmap_demote_pde(pmap, pde, va)) {
 4126                                 if ((oldpde & PG_W) == 0) {
 4127                                         /*
 4128                                          * Write protect the mapping to a
 4129                                          * single page so that a subsequent
 4130                                          * write access may repromote.
 4131                                          */
 4132                                         va += VM_PAGE_TO_PHYS(m) - (oldpde &
 4133                                             PG_PS_FRAME);
 4134                                         pte = pmap_pde_to_pte(pde, va);
 4135                                         oldpte = *pte;
 4136                                         if ((oldpte & PG_V) != 0) {
 4137                                                 while (!atomic_cmpset_long(pte,
 4138                                                     oldpte,
 4139                                                     oldpte & ~(PG_M | PG_RW)))
 4140                                                         oldpte = *pte;
 4141                                                 vm_page_dirty(m);
 4142                                                 pmap_invalidate_page(pmap, va);
 4143                                         }
 4144                                 }
 4145                         }
 4146                 }
 4147                 PMAP_UNLOCK(pmap);
 4148         }
 4149         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
 4150                 pmap = PV_PMAP(pv);
 4151                 PMAP_LOCK(pmap);
 4152                 pde = pmap_pde(pmap, pv->pv_va);
 4153                 KASSERT((*pde & PG_PS) == 0, ("pmap_clear_modify: found"
 4154                     " a 2mpage in page %p's pv list", m));
 4155                 pte = pmap_pde_to_pte(pde, pv->pv_va);
 4156                 if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
 4157                         atomic_clear_long(pte, PG_M);
 4158                         pmap_invalidate_page(pmap, pv->pv_va);
 4159                 }
 4160                 PMAP_UNLOCK(pmap);
 4161         }
 4162 }
 4163 
 4164 /*
 4165  *      pmap_clear_reference:
 4166  *
 4167  *      Clear the reference bit on the specified physical page.
 4168  */
 4169 void
 4170 pmap_clear_reference(vm_page_t m)
 4171 {
 4172         struct md_page *pvh;
 4173         pmap_t pmap;
 4174         pv_entry_t next_pv, pv;
 4175         pd_entry_t oldpde, *pde;
 4176         pt_entry_t *pte;
 4177         vm_offset_t va;
 4178 
 4179         if ((m->flags & PG_FICTITIOUS) != 0)
 4180                 return;
 4181         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 4182         pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 4183         TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
 4184                 va = pv->pv_va;
 4185                 pmap = PV_PMAP(pv);
 4186                 PMAP_LOCK(pmap);
 4187                 pde = pmap_pde(pmap, va);
 4188                 oldpde = *pde;
 4189                 if ((oldpde & PG_A) != 0) {
 4190                         if (pmap_demote_pde(pmap, pde, va)) {
 4191                                 /*
 4192                                  * Remove the mapping to a single page so
 4193                                  * that a subsequent access may repromote.
 4194                                  * Since the underlying page table page is
 4195                                  * fully populated, this removal never frees
 4196                                  * a page table page.
 4197                                  */
 4198                                 va += VM_PAGE_TO_PHYS(m) - (oldpde &
 4199                                     PG_PS_FRAME);
 4200                                 pmap_remove_page(pmap, va, pde, NULL);
 4201                         }
 4202                 }
 4203                 PMAP_UNLOCK(pmap);
 4204         }
 4205         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
 4206                 pmap = PV_PMAP(pv);
 4207                 PMAP_LOCK(pmap);
 4208                 pde = pmap_pde(pmap, pv->pv_va);
 4209                 KASSERT((*pde & PG_PS) == 0, ("pmap_clear_reference: found"
 4210                     " a 2mpage in page %p's pv list", m));
 4211                 pte = pmap_pde_to_pte(pde, pv->pv_va);
 4212                 if (*pte & PG_A) {
 4213                         atomic_clear_long(pte, PG_A);
 4214                         pmap_invalidate_page(pmap, pv->pv_va);
 4215                 }
 4216                 PMAP_UNLOCK(pmap);
 4217         }
 4218 }
 4219 
 4220 /*
 4221  * Miscellaneous support routines follow
 4222  */
 4223 
 4224 /* Adjust the cache mode for a 4KB page mapped via a PTE. */
 4225 static __inline void
 4226 pmap_pte_attr(pt_entry_t *pte, int cache_bits)
 4227 {
 4228         u_int opte, npte;
 4229 
 4230         /*
 4231          * The cache mode bits are all in the low 32-bits of the
 4232          * PTE, so we can just spin on updating the low 32-bits.
 4233          */
 4234         do {
 4235                 opte = *(u_int *)pte;
 4236                 npte = opte & ~PG_PTE_CACHE;
 4237                 npte |= cache_bits;
 4238         } while (npte != opte && !atomic_cmpset_int((u_int *)pte, opte, npte));
 4239 }
 4240 
 4241 /* Adjust the cache mode for a 2MB page mapped via a PDE. */
 4242 static __inline void
 4243 pmap_pde_attr(pd_entry_t *pde, int cache_bits)
 4244 {
 4245         u_int opde, npde;
 4246 
 4247         /*
 4248          * The cache mode bits are all in the low 32-bits of the
 4249          * PDE, so we can just spin on updating the low 32-bits.
 4250          */
 4251         do {
 4252                 opde = *(u_int *)pde;
 4253                 npde = opde & ~PG_PDE_CACHE;
 4254                 npde |= cache_bits;
 4255         } while (npde != opde && !atomic_cmpset_int((u_int *)pde, opde, npde));
 4256 }
 4257 
 4258 /*
 4259  * Map a set of physical memory pages into the kernel virtual
 4260  * address space. Return a pointer to where it is mapped. This
 4261  * routine is intended to be used for mapping device memory,
 4262  * NOT real memory.
 4263  */
 4264 void *
 4265 pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
 4266 {
 4267         vm_offset_t va, offset;
 4268         vm_size_t tmpsize;
 4269 
 4270         /*
 4271          * If the specified range of physical addresses fits within the direct
 4272          * map window, use the direct map. 
 4273          */
 4274         if (pa < dmaplimit && pa + size < dmaplimit) {
 4275                 va = PHYS_TO_DMAP(pa);
 4276                 if (!pmap_change_attr(va, size, mode))
 4277                         return ((void *)va);
 4278         }
 4279         offset = pa & PAGE_MASK;
 4280         size = roundup(offset + size, PAGE_SIZE);
 4281         va = kmem_alloc_nofault(kernel_map, size);
 4282         if (!va)
 4283                 panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
 4284         pa = trunc_page(pa);
 4285         for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE)
 4286                 pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode);
 4287         pmap_invalidate_range(kernel_pmap, va, va + tmpsize);
 4288         pmap_invalidate_cache_range(va, va + tmpsize);
 4289         return ((void *)(va + offset));
 4290 }
 4291 
 4292 void *
 4293 pmap_mapdev(vm_paddr_t pa, vm_size_t size)
 4294 {
 4295 
 4296         return (pmap_mapdev_attr(pa, size, PAT_UNCACHEABLE));
 4297 }
 4298 
 4299 void *
 4300 pmap_mapbios(vm_paddr_t pa, vm_size_t size)
 4301 {
 4302 
 4303         return (pmap_mapdev_attr(pa, size, PAT_WRITE_BACK));
 4304 }
 4305 
 4306 void
 4307 pmap_unmapdev(vm_offset_t va, vm_size_t size)
 4308 {
 4309         vm_offset_t base, offset, tmpva;
 4310 
 4311         /* If we gave a direct map region in pmap_mapdev, do nothing */
 4312         if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS)
 4313                 return;
 4314         base = trunc_page(va);
 4315         offset = va & PAGE_MASK;
 4316         size = roundup(offset + size, PAGE_SIZE);
 4317         for (tmpva = base; tmpva < (base + size); tmpva += PAGE_SIZE)
 4318                 pmap_kremove(tmpva);
 4319         pmap_invalidate_range(kernel_pmap, va, tmpva);
 4320         kmem_free(kernel_map, base, size);
 4321 }
 4322 
 4323 /*
 4324  * Sets the memory attribute for the specified page.
 4325  */
 4326 void
 4327 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
 4328 {
 4329 
 4330         m->md.pat_mode = ma;
 4331 
 4332         /*
 4333          * If "m" is a normal page, update its direct mapping.  This update
 4334          * can be relied upon to perform any cache operations that are
 4335          * required for data coherence.
 4336          */
 4337         if ((m->flags & PG_FICTITIOUS) == 0 &&
 4338             pmap_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), PAGE_SIZE,
 4339             m->md.pat_mode))
 4340                 panic("memory attribute change on the direct map failed");
 4341 }
 4342 
 4343 /*
 4344  * Changes the specified virtual address range's memory type to that given by
 4345  * the parameter "mode".  The specified virtual address range must be
 4346  * completely contained within either the direct map or the kernel map.  If
 4347  * the virtual address range is contained within the kernel map, then the
 4348  * memory type for each of the corresponding ranges of the direct map is also
 4349  * changed.  (The corresponding ranges of the direct map are those ranges that
 4350  * map the same physical pages as the specified virtual address range.)  These
 4351  * changes to the direct map are necessary because Intel describes the
 4352  * behavior of their processors as "undefined" if two or more mappings to the
 4353  * same physical page have different memory types.
 4354  *
 4355  * Returns zero if the change completed successfully, and either EINVAL or
 4356  * ENOMEM if the change failed.  Specifically, EINVAL is returned if some part
 4357  * of the virtual address range was not mapped, and ENOMEM is returned if
 4358  * there was insufficient memory available to complete the change.  In the
 4359  * latter case, the memory type may have been changed on some part of the
 4360  * virtual address range or the direct map.
 4361  */
 4362 int
 4363 pmap_change_attr(vm_offset_t va, vm_size_t size, int mode)
 4364 {
 4365         int error;
 4366 
 4367         PMAP_LOCK(kernel_pmap);
 4368         error = pmap_change_attr_locked(va, size, mode);
 4369         PMAP_UNLOCK(kernel_pmap);
 4370         return (error);
 4371 }
 4372 
 4373 static int
 4374 pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode)
 4375 {
 4376         vm_offset_t base, offset, tmpva;
 4377         vm_paddr_t pa_start, pa_end;
 4378         pd_entry_t *pde;
 4379         pt_entry_t *pte;
 4380         int cache_bits_pte, cache_bits_pde, error;
 4381         boolean_t changed;
 4382 
 4383         PMAP_LOCK_ASSERT(kernel_pmap, MA_OWNED);
 4384         base = trunc_page(va);
 4385         offset = va & PAGE_MASK;
 4386         size = roundup(offset + size, PAGE_SIZE);
 4387 
 4388         /*
 4389          * Only supported on kernel virtual addresses, including the direct
 4390          * map but excluding the recursive map.
 4391          */
 4392         if (base < DMAP_MIN_ADDRESS)
 4393                 return (EINVAL);
 4394 
 4395         cache_bits_pde = pmap_cache_bits(mode, 1);
 4396         cache_bits_pte = pmap_cache_bits(mode, 0);
 4397         changed = FALSE;
 4398 
 4399         /*
 4400          * Pages that aren't mapped aren't supported.  Also break down 2MB pages
 4401          * into 4KB pages if required.
 4402          */
 4403         for (tmpva = base; tmpva < base + size; ) {
 4404                 pde = pmap_pde(kernel_pmap, tmpva);
 4405                 if (*pde == 0)
 4406                         return (EINVAL);
 4407                 if (*pde & PG_PS) {
 4408                         /*
 4409                          * If the current 2MB page already has the required
 4410                          * memory type, then we need not demote this page. Just
 4411                          * increment tmpva to the next 2MB page frame.
 4412                          */
 4413                         if ((*pde & PG_PDE_CACHE) == cache_bits_pde) {
 4414                                 tmpva = trunc_2mpage(tmpva) + NBPDR;
 4415                                 continue;
 4416                         }
 4417 
 4418                         /*
 4419                          * If the current offset aligns with a 2MB page frame
 4420                          * and there is at least 2MB left within the range, then
 4421                          * we need not break down this page into 4KB pages.
 4422                          */
 4423                         if ((tmpva & PDRMASK) == 0 &&
 4424                             tmpva + PDRMASK < base + size) {
 4425                                 tmpva += NBPDR;
 4426                                 continue;
 4427                         }
 4428                         if (!pmap_demote_pde(kernel_pmap, pde, tmpva))
 4429                                 return (ENOMEM);
 4430                 }
 4431                 pte = pmap_pde_to_pte(pde, tmpva);
 4432                 if (*pte == 0)
 4433                         return (EINVAL);
 4434                 tmpva += PAGE_SIZE;
 4435         }
 4436         error = 0;
 4437 
 4438         /*
 4439          * Ok, all the pages exist, so run through them updating their
 4440          * cache mode if required.
 4441          */
 4442         pa_start = pa_end = 0;
 4443         for (tmpva = base; tmpva < base + size; ) {
 4444                 pde = pmap_pde(kernel_pmap, tmpva);
 4445                 if (*pde & PG_PS) {
 4446                         if ((*pde & PG_PDE_CACHE) != cache_bits_pde) {
 4447                                 pmap_pde_attr(pde, cache_bits_pde);
 4448                                 changed = TRUE;
 4449                         }
 4450                         if (tmpva >= VM_MIN_KERNEL_ADDRESS) {
 4451                                 if (pa_start == pa_end) {
 4452                                         /* Start physical address run. */
 4453                                         pa_start = *pde & PG_PS_FRAME;
 4454                                         pa_end = pa_start + NBPDR;
 4455                                 } else if (pa_end == (*pde & PG_PS_FRAME))
 4456                                         pa_end += NBPDR;
 4457                                 else {
 4458                                         /* Run ended, update direct map. */
 4459                                         error = pmap_change_attr_locked(
 4460                                             PHYS_TO_DMAP(pa_start),
 4461                                             pa_end - pa_start, mode);
 4462                                         if (error != 0)
 4463                                                 break;
 4464                                         /* Start physical address run. */
 4465                                         pa_start = *pde & PG_PS_FRAME;
 4466                                         pa_end = pa_start + NBPDR;
 4467                                 }
 4468                         }
 4469                         tmpva = trunc_2mpage(tmpva) + NBPDR;
 4470                 } else {
 4471                         pte = pmap_pde_to_pte(pde, tmpva);
 4472                         if ((*pte & PG_PTE_CACHE) != cache_bits_pte) {
 4473                                 pmap_pte_attr(pte, cache_bits_pte);
 4474                                 changed = TRUE;
 4475                         }
 4476                         if (tmpva >= VM_MIN_KERNEL_ADDRESS) {
 4477                                 if (pa_start == pa_end) {
 4478                                         /* Start physical address run. */
 4479                                         pa_start = *pte & PG_FRAME;
 4480                                         pa_end = pa_start + PAGE_SIZE;
 4481                                 } else if (pa_end == (*pte & PG_FRAME))
 4482                                         pa_end += PAGE_SIZE;
 4483                                 else {
 4484                                         /* Run ended, update direct map. */
 4485                                         error = pmap_change_attr_locked(
 4486                                             PHYS_TO_DMAP(pa_start),
 4487                                             pa_end - pa_start, mode);
 4488                                         if (error != 0)
 4489                                                 break;
 4490                                         /* Start physical address run. */
 4491                                         pa_start = *pte & PG_FRAME;
 4492                                         pa_end = pa_start + PAGE_SIZE;
 4493                                 }
 4494                         }
 4495                         tmpva += PAGE_SIZE;
 4496                 }
 4497         }
 4498         if (error == 0 && pa_start != pa_end)
 4499                 error = pmap_change_attr_locked(PHYS_TO_DMAP(pa_start),
 4500                     pa_end - pa_start, mode);
 4501 
 4502         /*
 4503          * Flush CPU caches if required to make sure any data isn't cached that
 4504          * shouldn't be, etc.
 4505          */
 4506         if (changed) {
 4507                 pmap_invalidate_range(kernel_pmap, base, tmpva);
 4508                 pmap_invalidate_cache_range(base, tmpva);
 4509         }
 4510         return (error);
 4511 }
 4512 
 4513 /*
 4514  * perform the pmap work for mincore
 4515  */
 4516 int
 4517 pmap_mincore(pmap_t pmap, vm_offset_t addr)
 4518 {
 4519         pd_entry_t *pdep;
 4520         pt_entry_t pte;
 4521         vm_paddr_t pa;
 4522         vm_page_t m;
 4523         int val = 0;
 4524         
 4525         PMAP_LOCK(pmap);
 4526         pdep = pmap_pde(pmap, addr);
 4527         if (pdep != NULL && (*pdep & PG_V)) {
 4528                 if (*pdep & PG_PS) {
 4529                         pte = *pdep;
 4530                         val = MINCORE_SUPER;
 4531                         /* Compute the physical address of the 4KB page. */
 4532                         pa = ((*pdep & PG_PS_FRAME) | (addr & PDRMASK)) &
 4533                             PG_FRAME;
 4534                 } else {
 4535                         pte = *pmap_pde_to_pte(pdep, addr);
 4536                         pa = pte & PG_FRAME;
 4537                 }
 4538         } else {
 4539                 pte = 0;
 4540                 pa = 0;
 4541         }
 4542         PMAP_UNLOCK(pmap);
 4543 
 4544         if (pte != 0) {
 4545                 val |= MINCORE_INCORE;
 4546                 if ((pte & PG_MANAGED) == 0)
 4547                         return val;
 4548 
 4549                 m = PHYS_TO_VM_PAGE(pa);
 4550 
 4551                 /*
 4552                  * Modified by us
 4553                  */
 4554                 if ((pte & (PG_M | PG_RW)) == (PG_M | PG_RW))
 4555                         val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER;
 4556                 else {
 4557                         /*
 4558                          * Modified by someone else
 4559                          */
 4560                         vm_page_lock_queues();
 4561                         if (m->dirty || pmap_is_modified(m))
 4562                                 val |= MINCORE_MODIFIED_OTHER;
 4563                         vm_page_unlock_queues();
 4564                 }
 4565                 /*
 4566                  * Referenced by us
 4567                  */
 4568                 if (pte & PG_A)
 4569                         val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER;
 4570                 else {
 4571                         /*
 4572                          * Referenced by someone else
 4573                          */
 4574                         vm_page_lock_queues();
 4575                         if ((m->flags & PG_REFERENCED) ||
 4576                             pmap_ts_referenced(m)) {
 4577                                 val |= MINCORE_REFERENCED_OTHER;
 4578                                 vm_page_flag_set(m, PG_REFERENCED);
 4579                         }
 4580                         vm_page_unlock_queues();
 4581                 }
 4582         } 
 4583         return val;
 4584 }
 4585 
 4586 void
 4587 pmap_activate(struct thread *td)
 4588 {
 4589         pmap_t  pmap, oldpmap;
 4590         u_int64_t  cr3;
 4591 
 4592         critical_enter();
 4593         pmap = vmspace_pmap(td->td_proc->p_vmspace);
 4594         oldpmap = PCPU_GET(curpmap);
 4595 #ifdef SMP
 4596 if (oldpmap)    /* XXX FIXME */
 4597         atomic_clear_int(&oldpmap->pm_active, PCPU_GET(cpumask));
 4598         atomic_set_int(&pmap->pm_active, PCPU_GET(cpumask));
 4599 #else
 4600 if (oldpmap)    /* XXX FIXME */
 4601         oldpmap->pm_active &= ~PCPU_GET(cpumask);
 4602         pmap->pm_active |= PCPU_GET(cpumask);
 4603 #endif
 4604         cr3 = DMAP_TO_PHYS((vm_offset_t)pmap->pm_pml4);
 4605         td->td_pcb->pcb_cr3 = cr3;
 4606         load_cr3(cr3);
 4607         critical_exit();
 4608 }
 4609 
 4610 vm_offset_t
 4611 pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size)
 4612 {
 4613 
 4614         if ((obj == NULL) || (size < NBPDR) ||
 4615             (obj->type != OBJT_DEVICE && obj->type != OBJT_SG)) {
 4616                 return addr;
 4617         }
 4618 
 4619         addr = (addr + (NBPDR - 1)) & ~(NBPDR - 1);
 4620         return addr;
 4621 }
 4622 
 4623 /*
 4624  *      Increase the starting virtual address of the given mapping if a
 4625  *      different alignment might result in more superpage mappings.
 4626  */
 4627 void
 4628 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
 4629     vm_offset_t *addr, vm_size_t size)
 4630 {
 4631         vm_offset_t superpage_offset;
 4632 
 4633         if (size < NBPDR)
 4634                 return;
 4635         if (object != NULL && (object->flags & OBJ_COLORED) != 0)
 4636                 offset += ptoa(object->pg_color);
 4637         superpage_offset = offset & PDRMASK;
 4638         if (size - ((NBPDR - superpage_offset) & PDRMASK) < NBPDR ||
 4639             (*addr & PDRMASK) == superpage_offset)
 4640                 return;
 4641         if ((*addr & PDRMASK) < superpage_offset)
 4642                 *addr = (*addr & ~PDRMASK) + superpage_offset;
 4643         else
 4644                 *addr = ((*addr + PDRMASK) & ~PDRMASK) + superpage_offset;
 4645 }

Cache object: 8a6d8dc182b5251fa0bc308a2774f887


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.