The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/amd64/amd64/pmap.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1991 Regents of the University of California.
    3  * All rights reserved.
    4  * Copyright (c) 1994 John S. Dyson
    5  * All rights reserved.
    6  * Copyright (c) 1994 David Greenman
    7  * All rights reserved.
    8  * Copyright (c) 2003 Peter Wemm
    9  * All rights reserved.
   10  * Copyright (c) 2005-2008 Alan L. Cox <alc@cs.rice.edu>
   11  * All rights reserved.
   12  *
   13  * This code is derived from software contributed to Berkeley by
   14  * the Systems Programming Group of the University of Utah Computer
   15  * Science Department and William Jolitz of UUNET Technologies Inc.
   16  *
   17  * Redistribution and use in source and binary forms, with or without
   18  * modification, are permitted provided that the following conditions
   19  * are met:
   20  * 1. Redistributions of source code must retain the above copyright
   21  *    notice, this list of conditions and the following disclaimer.
   22  * 2. Redistributions in binary form must reproduce the above copyright
   23  *    notice, this list of conditions and the following disclaimer in the
   24  *    documentation and/or other materials provided with the distribution.
   25  * 3. All advertising materials mentioning features or use of this software
   26  *    must display the following acknowledgement:
   27  *      This product includes software developed by the University of
   28  *      California, Berkeley and its contributors.
   29  * 4. Neither the name of the University nor the names of its contributors
   30  *    may be used to endorse or promote products derived from this software
   31  *    without specific prior written permission.
   32  *
   33  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   34  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   35  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   36  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   37  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   38  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   39  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   40  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   41  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   42  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   43  * SUCH DAMAGE.
   44  *
   45  *      from:   @(#)pmap.c      7.7 (Berkeley)  5/12/91
   46  */
   47 /*-
   48  * Copyright (c) 2003 Networks Associates Technology, Inc.
   49  * All rights reserved.
   50  *
   51  * This software was developed for the FreeBSD Project by Jake Burkholder,
   52  * Safeport Network Services, and Network Associates Laboratories, the
   53  * Security Research Division of Network Associates, Inc. under
   54  * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA
   55  * CHATS research program.
   56  *
   57  * Redistribution and use in source and binary forms, with or without
   58  * modification, are permitted provided that the following conditions
   59  * are met:
   60  * 1. Redistributions of source code must retain the above copyright
   61  *    notice, this list of conditions and the following disclaimer.
   62  * 2. Redistributions in binary form must reproduce the above copyright
   63  *    notice, this list of conditions and the following disclaimer in the
   64  *    documentation and/or other materials provided with the distribution.
   65  *
   66  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   67  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   68  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   69  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   70  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   71  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   72  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   73  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   74  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   75  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   76  * SUCH DAMAGE.
   77  */
   78 
   79 #include <sys/cdefs.h>
   80 __FBSDID("$FreeBSD: releng/8.0/sys/amd64/amd64/pmap.c 197700 2009-10-02 05:11:46Z alc $");
   81 
   82 /*
   83  *      Manages physical address maps.
   84  *
   85  *      In addition to hardware address maps, this
   86  *      module is called upon to provide software-use-only
   87  *      maps which may or may not be stored in the same
   88  *      form as hardware maps.  These pseudo-maps are
   89  *      used to store intermediate results from copy
   90  *      operations to and from address spaces.
   91  *
   92  *      Since the information managed by this module is
   93  *      also stored by the logical address mapping module,
   94  *      this module may throw away valid virtual-to-physical
   95  *      mappings at almost any time.  However, invalidations
   96  *      of virtual-to-physical mappings must be done as
   97  *      requested.
   98  *
   99  *      In order to cope with hardware architectures which
  100  *      make virtual-to-physical map invalidates expensive,
  101  *      this module may delay invalidate or reduced protection
  102  *      operations until such time as they are actually
  103  *      necessary.  This module is given full information as
  104  *      to which processors are currently using which maps,
  105  *      and to when physical maps must be made correct.
  106  */
  107 
  108 #include "opt_msgbuf.h"
  109 #include "opt_pmap.h"
  110 #include "opt_vm.h"
  111 
  112 #include <sys/param.h>
  113 #include <sys/systm.h>
  114 #include <sys/kernel.h>
  115 #include <sys/ktr.h>
  116 #include <sys/lock.h>
  117 #include <sys/malloc.h>
  118 #include <sys/mman.h>
  119 #include <sys/msgbuf.h>
  120 #include <sys/mutex.h>
  121 #include <sys/proc.h>
  122 #include <sys/sx.h>
  123 #include <sys/vmmeter.h>
  124 #include <sys/sched.h>
  125 #include <sys/sysctl.h>
  126 #ifdef SMP
  127 #include <sys/smp.h>
  128 #endif
  129 
  130 #include <vm/vm.h>
  131 #include <vm/vm_param.h>
  132 #include <vm/vm_kern.h>
  133 #include <vm/vm_page.h>
  134 #include <vm/vm_map.h>
  135 #include <vm/vm_object.h>
  136 #include <vm/vm_extern.h>
  137 #include <vm/vm_pageout.h>
  138 #include <vm/vm_pager.h>
  139 #include <vm/vm_reserv.h>
  140 #include <vm/uma.h>
  141 
  142 #include <machine/cpu.h>
  143 #include <machine/cputypes.h>
  144 #include <machine/md_var.h>
  145 #include <machine/pcb.h>
  146 #include <machine/specialreg.h>
  147 #ifdef SMP
  148 #include <machine/smp.h>
  149 #endif
  150 
  151 #ifndef PMAP_SHPGPERPROC
  152 #define PMAP_SHPGPERPROC 200
  153 #endif
  154 
  155 #if !defined(DIAGNOSTIC)
  156 #define PMAP_INLINE     __gnu89_inline
  157 #else
  158 #define PMAP_INLINE
  159 #endif
  160 
  161 #define PV_STATS
  162 #ifdef PV_STATS
  163 #define PV_STAT(x)      do { x ; } while (0)
  164 #else
  165 #define PV_STAT(x)      do { } while (0)
  166 #endif
  167 
  168 #define pa_index(pa)    ((pa) >> PDRSHIFT)
  169 #define pa_to_pvh(pa)   (&pv_table[pa_index(pa)])
  170 
  171 struct pmap kernel_pmap_store;
  172 
  173 vm_offset_t virtual_avail;      /* VA of first avail page (after kernel bss) */
  174 vm_offset_t virtual_end;        /* VA of last avail page (end of kernel AS) */
  175 
  176 static int ndmpdp;
  177 static vm_paddr_t dmaplimit;
  178 vm_offset_t kernel_vm_end = VM_MIN_KERNEL_ADDRESS;
  179 pt_entry_t pg_nx;
  180 
  181 SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters");
  182 
  183 static int pg_ps_enabled = 1;
  184 SYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLFLAG_RD, &pg_ps_enabled, 0,
  185     "Are large page mappings enabled?");
  186 
  187 static u_int64_t        KPTphys;        /* phys addr of kernel level 1 */
  188 static u_int64_t        KPDphys;        /* phys addr of kernel level 2 */
  189 u_int64_t               KPDPphys;       /* phys addr of kernel level 3 */
  190 u_int64_t               KPML4phys;      /* phys addr of kernel level 4 */
  191 
  192 static u_int64_t        DMPDphys;       /* phys addr of direct mapped level 2 */
  193 static u_int64_t        DMPDPphys;      /* phys addr of direct mapped level 3 */
  194 
  195 /*
  196  * Data for the pv entry allocation mechanism
  197  */
  198 static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
  199 static struct md_page *pv_table;
  200 static int shpgperproc = PMAP_SHPGPERPROC;
  201 
  202 /*
  203  * All those kernel PT submaps that BSD is so fond of
  204  */
  205 pt_entry_t *CMAP1 = 0;
  206 caddr_t CADDR1 = 0;
  207 struct msgbuf *msgbufp = 0;
  208 
  209 /*
  210  * Crashdump maps.
  211  */
  212 static caddr_t crashdumpmap;
  213 
  214 static void     free_pv_entry(pmap_t pmap, pv_entry_t pv);
  215 static pv_entry_t get_pv_entry(pmap_t locked_pmap, int try);
  216 static void     pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa);
  217 static boolean_t pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa);
  218 static void     pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa);
  219 static void     pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
  220 static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
  221                     vm_offset_t va);
  222 static int      pmap_pvh_wired_mappings(struct md_page *pvh, int count);
  223 
  224 static int pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode);
  225 static boolean_t pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va);
  226 static boolean_t pmap_demote_pdpe(pmap_t pmap, pdp_entry_t *pdpe,
  227     vm_offset_t va);
  228 static boolean_t pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m,
  229     vm_prot_t prot);
  230 static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
  231     vm_page_t m, vm_prot_t prot, vm_page_t mpte);
  232 static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte);
  233 static void pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte);
  234 static void pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva);
  235 static boolean_t pmap_is_modified_pvh(struct md_page *pvh);
  236 static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode);
  237 static vm_page_t pmap_lookup_pt_page(pmap_t pmap, vm_offset_t va);
  238 static void pmap_pde_attr(pd_entry_t *pde, int cache_bits);
  239 static void pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va);
  240 static boolean_t pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva,
  241     vm_prot_t prot);
  242 static void pmap_pte_attr(pt_entry_t *pte, int cache_bits);
  243 static int pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
  244                 vm_page_t *free);
  245 static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq,
  246                 vm_offset_t sva, pd_entry_t ptepde, vm_page_t *free);
  247 static void pmap_remove_pt_page(pmap_t pmap, vm_page_t mpte);
  248 static void pmap_remove_page(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
  249     vm_page_t *free);
  250 static void pmap_remove_entry(struct pmap *pmap, vm_page_t m,
  251                 vm_offset_t va);
  252 static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m);
  253 static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
  254     vm_page_t m);
  255 
  256 static vm_page_t pmap_allocpde(pmap_t pmap, vm_offset_t va, int flags);
  257 static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags);
  258 
  259 static vm_page_t _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, int flags);
  260 static int _pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m,
  261                 vm_page_t* free);
  262 static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t, vm_page_t *);
  263 static vm_offset_t pmap_kmem_choose(vm_offset_t addr);
  264 
  265 CTASSERT(1 << PDESHIFT == sizeof(pd_entry_t));
  266 CTASSERT(1 << PTESHIFT == sizeof(pt_entry_t));
  267 
  268 /*
  269  * Move the kernel virtual free pointer to the next
  270  * 2MB.  This is used to help improve performance
  271  * by using a large (2MB) page for much of the kernel
  272  * (.text, .data, .bss)
  273  */
  274 static vm_offset_t
  275 pmap_kmem_choose(vm_offset_t addr)
  276 {
  277         vm_offset_t newaddr = addr;
  278 
  279         newaddr = (addr + (NBPDR - 1)) & ~(NBPDR - 1);
  280         return newaddr;
  281 }
  282 
  283 /********************/
  284 /* Inline functions */
  285 /********************/
  286 
  287 /* Return a non-clipped PD index for a given VA */
  288 static __inline vm_pindex_t
  289 pmap_pde_pindex(vm_offset_t va)
  290 {
  291         return va >> PDRSHIFT;
  292 }
  293 
  294 
  295 /* Return various clipped indexes for a given VA */
  296 static __inline vm_pindex_t
  297 pmap_pte_index(vm_offset_t va)
  298 {
  299 
  300         return ((va >> PAGE_SHIFT) & ((1ul << NPTEPGSHIFT) - 1));
  301 }
  302 
  303 static __inline vm_pindex_t
  304 pmap_pde_index(vm_offset_t va)
  305 {
  306 
  307         return ((va >> PDRSHIFT) & ((1ul << NPDEPGSHIFT) - 1));
  308 }
  309 
  310 static __inline vm_pindex_t
  311 pmap_pdpe_index(vm_offset_t va)
  312 {
  313 
  314         return ((va >> PDPSHIFT) & ((1ul << NPDPEPGSHIFT) - 1));
  315 }
  316 
  317 static __inline vm_pindex_t
  318 pmap_pml4e_index(vm_offset_t va)
  319 {
  320 
  321         return ((va >> PML4SHIFT) & ((1ul << NPML4EPGSHIFT) - 1));
  322 }
  323 
  324 /* Return a pointer to the PML4 slot that corresponds to a VA */
  325 static __inline pml4_entry_t *
  326 pmap_pml4e(pmap_t pmap, vm_offset_t va)
  327 {
  328 
  329         return (&pmap->pm_pml4[pmap_pml4e_index(va)]);
  330 }
  331 
  332 /* Return a pointer to the PDP slot that corresponds to a VA */
  333 static __inline pdp_entry_t *
  334 pmap_pml4e_to_pdpe(pml4_entry_t *pml4e, vm_offset_t va)
  335 {
  336         pdp_entry_t *pdpe;
  337 
  338         pdpe = (pdp_entry_t *)PHYS_TO_DMAP(*pml4e & PG_FRAME);
  339         return (&pdpe[pmap_pdpe_index(va)]);
  340 }
  341 
  342 /* Return a pointer to the PDP slot that corresponds to a VA */
  343 static __inline pdp_entry_t *
  344 pmap_pdpe(pmap_t pmap, vm_offset_t va)
  345 {
  346         pml4_entry_t *pml4e;
  347 
  348         pml4e = pmap_pml4e(pmap, va);
  349         if ((*pml4e & PG_V) == 0)
  350                 return NULL;
  351         return (pmap_pml4e_to_pdpe(pml4e, va));
  352 }
  353 
  354 /* Return a pointer to the PD slot that corresponds to a VA */
  355 static __inline pd_entry_t *
  356 pmap_pdpe_to_pde(pdp_entry_t *pdpe, vm_offset_t va)
  357 {
  358         pd_entry_t *pde;
  359 
  360         pde = (pd_entry_t *)PHYS_TO_DMAP(*pdpe & PG_FRAME);
  361         return (&pde[pmap_pde_index(va)]);
  362 }
  363 
  364 /* Return a pointer to the PD slot that corresponds to a VA */
  365 static __inline pd_entry_t *
  366 pmap_pde(pmap_t pmap, vm_offset_t va)
  367 {
  368         pdp_entry_t *pdpe;
  369 
  370         pdpe = pmap_pdpe(pmap, va);
  371         if (pdpe == NULL || (*pdpe & PG_V) == 0)
  372                  return NULL;
  373         return (pmap_pdpe_to_pde(pdpe, va));
  374 }
  375 
  376 /* Return a pointer to the PT slot that corresponds to a VA */
  377 static __inline pt_entry_t *
  378 pmap_pde_to_pte(pd_entry_t *pde, vm_offset_t va)
  379 {
  380         pt_entry_t *pte;
  381 
  382         pte = (pt_entry_t *)PHYS_TO_DMAP(*pde & PG_FRAME);
  383         return (&pte[pmap_pte_index(va)]);
  384 }
  385 
  386 /* Return a pointer to the PT slot that corresponds to a VA */
  387 static __inline pt_entry_t *
  388 pmap_pte(pmap_t pmap, vm_offset_t va)
  389 {
  390         pd_entry_t *pde;
  391 
  392         pde = pmap_pde(pmap, va);
  393         if (pde == NULL || (*pde & PG_V) == 0)
  394                 return NULL;
  395         if ((*pde & PG_PS) != 0)        /* compat with i386 pmap_pte() */
  396                 return ((pt_entry_t *)pde);
  397         return (pmap_pde_to_pte(pde, va));
  398 }
  399 
  400 
  401 PMAP_INLINE pt_entry_t *
  402 vtopte(vm_offset_t va)
  403 {
  404         u_int64_t mask = ((1ul << (NPTEPGSHIFT + NPDEPGSHIFT + NPDPEPGSHIFT + NPML4EPGSHIFT)) - 1);
  405 
  406         return (PTmap + ((va >> PAGE_SHIFT) & mask));
  407 }
  408 
  409 static __inline pd_entry_t *
  410 vtopde(vm_offset_t va)
  411 {
  412         u_int64_t mask = ((1ul << (NPDEPGSHIFT + NPDPEPGSHIFT + NPML4EPGSHIFT)) - 1);
  413 
  414         return (PDmap + ((va >> PDRSHIFT) & mask));
  415 }
  416 
  417 static u_int64_t
  418 allocpages(vm_paddr_t *firstaddr, int n)
  419 {
  420         u_int64_t ret;
  421 
  422         ret = *firstaddr;
  423         bzero((void *)ret, n * PAGE_SIZE);
  424         *firstaddr += n * PAGE_SIZE;
  425         return (ret);
  426 }
  427 
  428 static void
  429 create_pagetables(vm_paddr_t *firstaddr)
  430 {
  431         int i;
  432 
  433         /* Allocate pages */
  434         KPTphys = allocpages(firstaddr, NKPT);
  435         KPML4phys = allocpages(firstaddr, 1);
  436         KPDPphys = allocpages(firstaddr, NKPML4E);
  437         KPDphys = allocpages(firstaddr, NKPDPE);
  438 
  439         ndmpdp = (ptoa(Maxmem) + NBPDP - 1) >> PDPSHIFT;
  440         if (ndmpdp < 4)         /* Minimum 4GB of dirmap */
  441                 ndmpdp = 4;
  442         DMPDPphys = allocpages(firstaddr, NDMPML4E);
  443         if (TRUE || (amd_feature & AMDID_PAGE1GB) == 0)
  444                 DMPDphys = allocpages(firstaddr, ndmpdp);
  445         dmaplimit = (vm_paddr_t)ndmpdp << PDPSHIFT;
  446 
  447         /* Fill in the underlying page table pages */
  448         /* Read-only from zero to physfree */
  449         /* XXX not fully used, underneath 2M pages */
  450         for (i = 0; (i << PAGE_SHIFT) < *firstaddr; i++) {
  451                 ((pt_entry_t *)KPTphys)[i] = i << PAGE_SHIFT;
  452                 ((pt_entry_t *)KPTphys)[i] |= PG_RW | PG_V | PG_G;
  453         }
  454 
  455         /* Now map the page tables at their location within PTmap */
  456         for (i = 0; i < NKPT; i++) {
  457                 ((pd_entry_t *)KPDphys)[i] = KPTphys + (i << PAGE_SHIFT);
  458                 ((pd_entry_t *)KPDphys)[i] |= PG_RW | PG_V;
  459         }
  460 
  461         /* Map from zero to end of allocations under 2M pages */
  462         /* This replaces some of the KPTphys entries above */
  463         for (i = 0; (i << PDRSHIFT) < *firstaddr; i++) {
  464                 ((pd_entry_t *)KPDphys)[i] = i << PDRSHIFT;
  465                 ((pd_entry_t *)KPDphys)[i] |= PG_RW | PG_V | PG_PS | PG_G;
  466         }
  467 
  468         /* And connect up the PD to the PDP */
  469         for (i = 0; i < NKPDPE; i++) {
  470                 ((pdp_entry_t *)KPDPphys)[i + KPDPI] = KPDphys +
  471                     (i << PAGE_SHIFT);
  472                 ((pdp_entry_t *)KPDPphys)[i + KPDPI] |= PG_RW | PG_V | PG_U;
  473         }
  474 
  475         /* Now set up the direct map space using either 2MB or 1GB pages */
  476         /* Preset PG_M and PG_A because demotion expects it */
  477         if (TRUE || (amd_feature & AMDID_PAGE1GB) == 0) {
  478                 for (i = 0; i < NPDEPG * ndmpdp; i++) {
  479                         ((pd_entry_t *)DMPDphys)[i] = (vm_paddr_t)i << PDRSHIFT;
  480                         ((pd_entry_t *)DMPDphys)[i] |= PG_RW | PG_V | PG_PS |
  481                             PG_G | PG_M | PG_A;
  482                 }
  483                 /* And the direct map space's PDP */
  484                 for (i = 0; i < ndmpdp; i++) {
  485                         ((pdp_entry_t *)DMPDPphys)[i] = DMPDphys +
  486                             (i << PAGE_SHIFT);
  487                         ((pdp_entry_t *)DMPDPphys)[i] |= PG_RW | PG_V | PG_U;
  488                 }
  489         } else {
  490                 for (i = 0; i < ndmpdp; i++) {
  491                         ((pdp_entry_t *)DMPDPphys)[i] =
  492                             (vm_paddr_t)i << PDPSHIFT;
  493                         ((pdp_entry_t *)DMPDPphys)[i] |= PG_RW | PG_V | PG_PS |
  494                             PG_G | PG_M | PG_A;
  495                 }
  496         }
  497 
  498         /* And recursively map PML4 to itself in order to get PTmap */
  499         ((pdp_entry_t *)KPML4phys)[PML4PML4I] = KPML4phys;
  500         ((pdp_entry_t *)KPML4phys)[PML4PML4I] |= PG_RW | PG_V | PG_U;
  501 
  502         /* Connect the Direct Map slot up to the PML4 */
  503         ((pdp_entry_t *)KPML4phys)[DMPML4I] = DMPDPphys;
  504         ((pdp_entry_t *)KPML4phys)[DMPML4I] |= PG_RW | PG_V | PG_U;
  505 
  506         /* Connect the KVA slot up to the PML4 */
  507         ((pdp_entry_t *)KPML4phys)[KPML4I] = KPDPphys;
  508         ((pdp_entry_t *)KPML4phys)[KPML4I] |= PG_RW | PG_V | PG_U;
  509 }
  510 
  511 /*
  512  *      Bootstrap the system enough to run with virtual memory.
  513  *
  514  *      On amd64 this is called after mapping has already been enabled
  515  *      and just syncs the pmap module with what has already been done.
  516  *      [We can't call it easily with mapping off since the kernel is not
  517  *      mapped with PA == VA, hence we would have to relocate every address
  518  *      from the linked base (virtual) address "KERNBASE" to the actual
  519  *      (physical) address starting relative to 0]
  520  */
  521 void
  522 pmap_bootstrap(vm_paddr_t *firstaddr)
  523 {
  524         vm_offset_t va;
  525         pt_entry_t *pte, *unused;
  526 
  527         /*
  528          * Create an initial set of page tables to run the kernel in.
  529          */
  530         create_pagetables(firstaddr);
  531 
  532         virtual_avail = (vm_offset_t) KERNBASE + *firstaddr;
  533         virtual_avail = pmap_kmem_choose(virtual_avail);
  534 
  535         virtual_end = VM_MAX_KERNEL_ADDRESS;
  536 
  537 
  538         /* XXX do %cr0 as well */
  539         load_cr4(rcr4() | CR4_PGE | CR4_PSE);
  540         load_cr3(KPML4phys);
  541 
  542         /*
  543          * Initialize the kernel pmap (which is statically allocated).
  544          */
  545         PMAP_LOCK_INIT(kernel_pmap);
  546         kernel_pmap->pm_pml4 = (pdp_entry_t *)PHYS_TO_DMAP(KPML4phys);
  547         kernel_pmap->pm_root = NULL;
  548         kernel_pmap->pm_active = -1;    /* don't allow deactivation */
  549         TAILQ_INIT(&kernel_pmap->pm_pvchunk);
  550 
  551         /*
  552          * Reserve some special page table entries/VA space for temporary
  553          * mapping of pages.
  554          */
  555 #define SYSMAP(c, p, v, n)      \
  556         v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n);
  557 
  558         va = virtual_avail;
  559         pte = vtopte(va);
  560 
  561         /*
  562          * CMAP1 is only used for the memory test.
  563          */
  564         SYSMAP(caddr_t, CMAP1, CADDR1, 1)
  565 
  566         /*
  567          * Crashdump maps.
  568          */
  569         SYSMAP(caddr_t, unused, crashdumpmap, MAXDUMPPGS)
  570 
  571         /*
  572          * msgbufp is used to map the system message buffer.
  573          */
  574         SYSMAP(struct msgbuf *, unused, msgbufp, atop(round_page(MSGBUF_SIZE)))
  575 
  576         virtual_avail = va;
  577 
  578         *CMAP1 = 0;
  579 
  580         invltlb();
  581 
  582         /* Initialize the PAT MSR. */
  583         pmap_init_pat();
  584 }
  585 
  586 /*
  587  * Setup the PAT MSR.
  588  */
  589 void
  590 pmap_init_pat(void)
  591 {
  592         uint64_t pat_msr;
  593 
  594         /* Bail if this CPU doesn't implement PAT. */
  595         if (!(cpu_feature & CPUID_PAT))
  596                 panic("no PAT??");
  597 
  598         /*
  599          * Leave the indices 0-3 at the default of WB, WT, UC, and UC-.
  600          * Program 4 and 5 as WP and WC.
  601          * Leave 6 and 7 as UC and UC-.
  602          */
  603         pat_msr = rdmsr(MSR_PAT);
  604         pat_msr &= ~(PAT_MASK(4) | PAT_MASK(5));
  605         pat_msr |= PAT_VALUE(4, PAT_WRITE_PROTECTED) |
  606             PAT_VALUE(5, PAT_WRITE_COMBINING);
  607         wrmsr(MSR_PAT, pat_msr);
  608 }
  609 
  610 /*
  611  *      Initialize a vm_page's machine-dependent fields.
  612  */
  613 void
  614 pmap_page_init(vm_page_t m)
  615 {
  616 
  617         TAILQ_INIT(&m->md.pv_list);
  618         m->md.pat_mode = PAT_WRITE_BACK;
  619 }
  620 
  621 /*
  622  *      Initialize the pmap module.
  623  *      Called by vm_init, to initialize any structures that the pmap
  624  *      system needs to map virtual memory.
  625  */
  626 void
  627 pmap_init(void)
  628 {
  629         pd_entry_t *pd;
  630         vm_page_t mpte;
  631         vm_size_t s;
  632         int i, pv_npg;
  633 
  634         /*
  635          * Initialize the vm page array entries for the kernel pmap's
  636          * page table pages.
  637          */ 
  638         pd = pmap_pde(kernel_pmap, KERNBASE);
  639         for (i = 0; i < NKPT; i++) {
  640                 if ((pd[i] & (PG_PS | PG_V)) == (PG_PS | PG_V))
  641                         continue;
  642                 KASSERT((pd[i] & PG_V) != 0,
  643                     ("pmap_init: page table page is missing"));
  644                 mpte = PHYS_TO_VM_PAGE(pd[i] & PG_FRAME);
  645                 KASSERT(mpte >= vm_page_array &&
  646                     mpte < &vm_page_array[vm_page_array_size],
  647                     ("pmap_init: page table page is out of range"));
  648                 mpte->pindex = pmap_pde_pindex(KERNBASE) + i;
  649                 mpte->phys_addr = pd[i] & PG_FRAME;
  650         }
  651 
  652         /*
  653          * Initialize the address space (zone) for the pv entries.  Set a
  654          * high water mark so that the system can recover from excessive
  655          * numbers of pv entries.
  656          */
  657         TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
  658         pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
  659         TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
  660         pv_entry_high_water = 9 * (pv_entry_max / 10);
  661 
  662         /*
  663          * Are large page mappings enabled?
  664          */
  665         TUNABLE_INT_FETCH("vm.pmap.pg_ps_enabled", &pg_ps_enabled);
  666 
  667         /*
  668          * Calculate the size of the pv head table for superpages.
  669          */
  670         for (i = 0; phys_avail[i + 1]; i += 2);
  671         pv_npg = round_2mpage(phys_avail[(i - 2) + 1]) / NBPDR;
  672 
  673         /*
  674          * Allocate memory for the pv head table for superpages.
  675          */
  676         s = (vm_size_t)(pv_npg * sizeof(struct md_page));
  677         s = round_page(s);
  678         pv_table = (struct md_page *)kmem_alloc(kernel_map, s);
  679         for (i = 0; i < pv_npg; i++)
  680                 TAILQ_INIT(&pv_table[i].pv_list);
  681 }
  682 
  683 static int
  684 pmap_pventry_proc(SYSCTL_HANDLER_ARGS)
  685 {
  686         int error;
  687 
  688         error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
  689         if (error == 0 && req->newptr) {
  690                 shpgperproc = (pv_entry_max - cnt.v_page_count) / maxproc;
  691                 pv_entry_high_water = 9 * (pv_entry_max / 10);
  692         }
  693         return (error);
  694 }
  695 SYSCTL_PROC(_vm_pmap, OID_AUTO, pv_entry_max, CTLTYPE_INT|CTLFLAG_RW, 
  696     &pv_entry_max, 0, pmap_pventry_proc, "IU", "Max number of PV entries");
  697 
  698 static int
  699 pmap_shpgperproc_proc(SYSCTL_HANDLER_ARGS)
  700 {
  701         int error;
  702 
  703         error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
  704         if (error == 0 && req->newptr) {
  705                 pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
  706                 pv_entry_high_water = 9 * (pv_entry_max / 10);
  707         }
  708         return (error);
  709 }
  710 SYSCTL_PROC(_vm_pmap, OID_AUTO, shpgperproc, CTLTYPE_INT|CTLFLAG_RW, 
  711     &shpgperproc, 0, pmap_shpgperproc_proc, "IU", "Page share factor per proc");
  712 
  713 SYSCTL_NODE(_vm_pmap, OID_AUTO, pde, CTLFLAG_RD, 0,
  714     "2MB page mapping counters");
  715 
  716 static u_long pmap_pde_demotions;
  717 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, demotions, CTLFLAG_RD,
  718     &pmap_pde_demotions, 0, "2MB page demotions");
  719 
  720 static u_long pmap_pde_mappings;
  721 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, mappings, CTLFLAG_RD,
  722     &pmap_pde_mappings, 0, "2MB page mappings");
  723 
  724 static u_long pmap_pde_p_failures;
  725 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, p_failures, CTLFLAG_RD,
  726     &pmap_pde_p_failures, 0, "2MB page promotion failures");
  727 
  728 static u_long pmap_pde_promotions;
  729 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, promotions, CTLFLAG_RD,
  730     &pmap_pde_promotions, 0, "2MB page promotions");
  731 
  732 SYSCTL_NODE(_vm_pmap, OID_AUTO, pdpe, CTLFLAG_RD, 0,
  733     "1GB page mapping counters");
  734 
  735 static u_long pmap_pdpe_demotions;
  736 SYSCTL_ULONG(_vm_pmap_pdpe, OID_AUTO, demotions, CTLFLAG_RD,
  737     &pmap_pdpe_demotions, 0, "1GB page demotions");
  738 
  739 
  740 /***************************************************
  741  * Low level helper routines.....
  742  ***************************************************/
  743 
  744 /*
  745  * Determine the appropriate bits to set in a PTE or PDE for a specified
  746  * caching mode.
  747  */
  748 static int
  749 pmap_cache_bits(int mode, boolean_t is_pde)
  750 {
  751         int pat_flag, pat_index, cache_bits;
  752 
  753         /* The PAT bit is different for PTE's and PDE's. */
  754         pat_flag = is_pde ? PG_PDE_PAT : PG_PTE_PAT;
  755 
  756         /* Map the caching mode to a PAT index. */
  757         switch (mode) {
  758         case PAT_UNCACHEABLE:
  759                 pat_index = 3;
  760                 break;
  761         case PAT_WRITE_THROUGH:
  762                 pat_index = 1;
  763                 break;
  764         case PAT_WRITE_BACK:
  765                 pat_index = 0;
  766                 break;
  767         case PAT_UNCACHED:
  768                 pat_index = 2;
  769                 break;
  770         case PAT_WRITE_COMBINING:
  771                 pat_index = 5;
  772                 break;
  773         case PAT_WRITE_PROTECTED:
  774                 pat_index = 4;
  775                 break;
  776         default:
  777                 panic("Unknown caching mode %d\n", mode);
  778         }
  779 
  780         /* Map the 3-bit index value into the PAT, PCD, and PWT bits. */
  781         cache_bits = 0;
  782         if (pat_index & 0x4)
  783                 cache_bits |= pat_flag;
  784         if (pat_index & 0x2)
  785                 cache_bits |= PG_NC_PCD;
  786         if (pat_index & 0x1)
  787                 cache_bits |= PG_NC_PWT;
  788         return (cache_bits);
  789 }
  790 #ifdef SMP
  791 /*
  792  * For SMP, these functions have to use the IPI mechanism for coherence.
  793  *
  794  * N.B.: Before calling any of the following TLB invalidation functions,
  795  * the calling processor must ensure that all stores updating a non-
  796  * kernel page table are globally performed.  Otherwise, another
  797  * processor could cache an old, pre-update entry without being
  798  * invalidated.  This can happen one of two ways: (1) The pmap becomes
  799  * active on another processor after its pm_active field is checked by
  800  * one of the following functions but before a store updating the page
  801  * table is globally performed. (2) The pmap becomes active on another
  802  * processor before its pm_active field is checked but due to
  803  * speculative loads one of the following functions stills reads the
  804  * pmap as inactive on the other processor.
  805  * 
  806  * The kernel page table is exempt because its pm_active field is
  807  * immutable.  The kernel page table is always active on every
  808  * processor.
  809  */
  810 void
  811 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
  812 {
  813         u_int cpumask;
  814         u_int other_cpus;
  815 
  816         sched_pin();
  817         if (pmap == kernel_pmap || pmap->pm_active == all_cpus) {
  818                 invlpg(va);
  819                 smp_invlpg(va);
  820         } else {
  821                 cpumask = PCPU_GET(cpumask);
  822                 other_cpus = PCPU_GET(other_cpus);
  823                 if (pmap->pm_active & cpumask)
  824                         invlpg(va);
  825                 if (pmap->pm_active & other_cpus)
  826                         smp_masked_invlpg(pmap->pm_active & other_cpus, va);
  827         }
  828         sched_unpin();
  829 }
  830 
  831 void
  832 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
  833 {
  834         u_int cpumask;
  835         u_int other_cpus;
  836         vm_offset_t addr;
  837 
  838         sched_pin();
  839         if (pmap == kernel_pmap || pmap->pm_active == all_cpus) {
  840                 for (addr = sva; addr < eva; addr += PAGE_SIZE)
  841                         invlpg(addr);
  842                 smp_invlpg_range(sva, eva);
  843         } else {
  844                 cpumask = PCPU_GET(cpumask);
  845                 other_cpus = PCPU_GET(other_cpus);
  846                 if (pmap->pm_active & cpumask)
  847                         for (addr = sva; addr < eva; addr += PAGE_SIZE)
  848                                 invlpg(addr);
  849                 if (pmap->pm_active & other_cpus)
  850                         smp_masked_invlpg_range(pmap->pm_active & other_cpus,
  851                             sva, eva);
  852         }
  853         sched_unpin();
  854 }
  855 
  856 void
  857 pmap_invalidate_all(pmap_t pmap)
  858 {
  859         u_int cpumask;
  860         u_int other_cpus;
  861 
  862         sched_pin();
  863         if (pmap == kernel_pmap || pmap->pm_active == all_cpus) {
  864                 invltlb();
  865                 smp_invltlb();
  866         } else {
  867                 cpumask = PCPU_GET(cpumask);
  868                 other_cpus = PCPU_GET(other_cpus);
  869                 if (pmap->pm_active & cpumask)
  870                         invltlb();
  871                 if (pmap->pm_active & other_cpus)
  872                         smp_masked_invltlb(pmap->pm_active & other_cpus);
  873         }
  874         sched_unpin();
  875 }
  876 
  877 void
  878 pmap_invalidate_cache(void)
  879 {
  880 
  881         sched_pin();
  882         wbinvd();
  883         smp_cache_flush();
  884         sched_unpin();
  885 }
  886 #else /* !SMP */
  887 /*
  888  * Normal, non-SMP, invalidation functions.
  889  * We inline these within pmap.c for speed.
  890  */
  891 PMAP_INLINE void
  892 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
  893 {
  894 
  895         if (pmap == kernel_pmap || pmap->pm_active)
  896                 invlpg(va);
  897 }
  898 
  899 PMAP_INLINE void
  900 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
  901 {
  902         vm_offset_t addr;
  903 
  904         if (pmap == kernel_pmap || pmap->pm_active)
  905                 for (addr = sva; addr < eva; addr += PAGE_SIZE)
  906                         invlpg(addr);
  907 }
  908 
  909 PMAP_INLINE void
  910 pmap_invalidate_all(pmap_t pmap)
  911 {
  912 
  913         if (pmap == kernel_pmap || pmap->pm_active)
  914                 invltlb();
  915 }
  916 
  917 PMAP_INLINE void
  918 pmap_invalidate_cache(void)
  919 {
  920 
  921         wbinvd();
  922 }
  923 #endif /* !SMP */
  924 
  925 static void
  926 pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva)
  927 {
  928 
  929         KASSERT((sva & PAGE_MASK) == 0,
  930             ("pmap_invalidate_cache_range: sva not page-aligned"));
  931         KASSERT((eva & PAGE_MASK) == 0,
  932             ("pmap_invalidate_cache_range: eva not page-aligned"));
  933 
  934         if (cpu_feature & CPUID_SS)
  935                 ; /* If "Self Snoop" is supported, do nothing. */
  936         else if (cpu_feature & CPUID_CLFSH) {
  937 
  938                 /*
  939                  * Otherwise, do per-cache line flush.  Use the mfence
  940                  * instruction to insure that previous stores are
  941                  * included in the write-back.  The processor
  942                  * propagates flush to other processors in the cache
  943                  * coherence domain.
  944                  */
  945                 mfence();
  946                 for (; sva < eva; sva += cpu_clflush_line_size)
  947                         clflush(sva);
  948                 mfence();
  949         } else {
  950 
  951                 /*
  952                  * No targeted cache flush methods are supported by CPU,
  953                  * globally invalidate cache as a last resort.
  954                  */
  955                 pmap_invalidate_cache();
  956         }
  957 }
  958 
  959 /*
  960  * Are we current address space or kernel?
  961  */
  962 static __inline int
  963 pmap_is_current(pmap_t pmap)
  964 {
  965         return (pmap == kernel_pmap ||
  966             (pmap->pm_pml4[PML4PML4I] & PG_FRAME) == (PML4pml4e[0] & PG_FRAME));
  967 }
  968 
  969 /*
  970  *      Routine:        pmap_extract
  971  *      Function:
  972  *              Extract the physical page address associated
  973  *              with the given map/virtual_address pair.
  974  */
  975 vm_paddr_t 
  976 pmap_extract(pmap_t pmap, vm_offset_t va)
  977 {
  978         vm_paddr_t rtval;
  979         pt_entry_t *pte;
  980         pd_entry_t pde, *pdep;
  981 
  982         rtval = 0;
  983         PMAP_LOCK(pmap);
  984         pdep = pmap_pde(pmap, va);
  985         if (pdep != NULL) {
  986                 pde = *pdep;
  987                 if (pde) {
  988                         if ((pde & PG_PS) != 0)
  989                                 rtval = (pde & PG_PS_FRAME) | (va & PDRMASK);
  990                         else {
  991                                 pte = pmap_pde_to_pte(pdep, va);
  992                                 rtval = (*pte & PG_FRAME) | (va & PAGE_MASK);
  993                         }
  994                 }
  995         }
  996         PMAP_UNLOCK(pmap);
  997         return (rtval);
  998 }
  999 
 1000 /*
 1001  *      Routine:        pmap_extract_and_hold
 1002  *      Function:
 1003  *              Atomically extract and hold the physical page
 1004  *              with the given pmap and virtual address pair
 1005  *              if that mapping permits the given protection.
 1006  */
 1007 vm_page_t
 1008 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
 1009 {
 1010         pd_entry_t pde, *pdep;
 1011         pt_entry_t pte;
 1012         vm_page_t m;
 1013 
 1014         m = NULL;
 1015         vm_page_lock_queues();
 1016         PMAP_LOCK(pmap);
 1017         pdep = pmap_pde(pmap, va);
 1018         if (pdep != NULL && (pde = *pdep)) {
 1019                 if (pde & PG_PS) {
 1020                         if ((pde & PG_RW) || (prot & VM_PROT_WRITE) == 0) {
 1021                                 m = PHYS_TO_VM_PAGE((pde & PG_PS_FRAME) |
 1022                                     (va & PDRMASK));
 1023                                 vm_page_hold(m);
 1024                         }
 1025                 } else {
 1026                         pte = *pmap_pde_to_pte(pdep, va);
 1027                         if ((pte & PG_V) &&
 1028                             ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0)) {
 1029                                 m = PHYS_TO_VM_PAGE(pte & PG_FRAME);
 1030                                 vm_page_hold(m);
 1031                         }
 1032                 }
 1033         }
 1034         vm_page_unlock_queues();
 1035         PMAP_UNLOCK(pmap);
 1036         return (m);
 1037 }
 1038 
 1039 vm_paddr_t
 1040 pmap_kextract(vm_offset_t va)
 1041 {
 1042         pd_entry_t pde;
 1043         vm_paddr_t pa;
 1044 
 1045         if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) {
 1046                 pa = DMAP_TO_PHYS(va);
 1047         } else {
 1048                 pde = *vtopde(va);
 1049                 if (pde & PG_PS) {
 1050                         pa = (pde & PG_PS_FRAME) | (va & PDRMASK);
 1051                 } else {
 1052                         /*
 1053                          * Beware of a concurrent promotion that changes the
 1054                          * PDE at this point!  For example, vtopte() must not
 1055                          * be used to access the PTE because it would use the
 1056                          * new PDE.  It is, however, safe to use the old PDE
 1057                          * because the page table page is preserved by the
 1058                          * promotion.
 1059                          */
 1060                         pa = *pmap_pde_to_pte(&pde, va);
 1061                         pa = (pa & PG_FRAME) | (va & PAGE_MASK);
 1062                 }
 1063         }
 1064         return pa;
 1065 }
 1066 
 1067 /***************************************************
 1068  * Low level mapping routines.....
 1069  ***************************************************/
 1070 
 1071 /*
 1072  * Add a wired page to the kva.
 1073  * Note: not SMP coherent.
 1074  */
 1075 PMAP_INLINE void 
 1076 pmap_kenter(vm_offset_t va, vm_paddr_t pa)
 1077 {
 1078         pt_entry_t *pte;
 1079 
 1080         pte = vtopte(va);
 1081         pte_store(pte, pa | PG_RW | PG_V | PG_G);
 1082 }
 1083 
 1084 static __inline void
 1085 pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode)
 1086 {
 1087         pt_entry_t *pte;
 1088 
 1089         pte = vtopte(va);
 1090         pte_store(pte, pa | PG_RW | PG_V | PG_G | pmap_cache_bits(mode, 0));
 1091 }
 1092 
 1093 /*
 1094  * Remove a page from the kernel pagetables.
 1095  * Note: not SMP coherent.
 1096  */
 1097 PMAP_INLINE void
 1098 pmap_kremove(vm_offset_t va)
 1099 {
 1100         pt_entry_t *pte;
 1101 
 1102         pte = vtopte(va);
 1103         pte_clear(pte);
 1104 }
 1105 
 1106 /*
 1107  *      Used to map a range of physical addresses into kernel
 1108  *      virtual address space.
 1109  *
 1110  *      The value passed in '*virt' is a suggested virtual address for
 1111  *      the mapping. Architectures which can support a direct-mapped
 1112  *      physical to virtual region can return the appropriate address
 1113  *      within that region, leaving '*virt' unchanged. Other
 1114  *      architectures should map the pages starting at '*virt' and
 1115  *      update '*virt' with the first usable address after the mapped
 1116  *      region.
 1117  */
 1118 vm_offset_t
 1119 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
 1120 {
 1121         return PHYS_TO_DMAP(start);
 1122 }
 1123 
 1124 
 1125 /*
 1126  * Add a list of wired pages to the kva
 1127  * this routine is only used for temporary
 1128  * kernel mappings that do not need to have
 1129  * page modification or references recorded.
 1130  * Note that old mappings are simply written
 1131  * over.  The page *must* be wired.
 1132  * Note: SMP coherent.  Uses a ranged shootdown IPI.
 1133  */
 1134 void
 1135 pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
 1136 {
 1137         pt_entry_t *endpte, oldpte, *pte;
 1138 
 1139         oldpte = 0;
 1140         pte = vtopte(sva);
 1141         endpte = pte + count;
 1142         while (pte < endpte) {
 1143                 oldpte |= *pte;
 1144                 pte_store(pte, VM_PAGE_TO_PHYS(*ma) | PG_G |
 1145                     pmap_cache_bits((*ma)->md.pat_mode, 0) | PG_RW | PG_V);
 1146                 pte++;
 1147                 ma++;
 1148         }
 1149         if ((oldpte & PG_V) != 0)
 1150                 pmap_invalidate_range(kernel_pmap, sva, sva + count *
 1151                     PAGE_SIZE);
 1152 }
 1153 
 1154 /*
 1155  * This routine tears out page mappings from the
 1156  * kernel -- it is meant only for temporary mappings.
 1157  * Note: SMP coherent.  Uses a ranged shootdown IPI.
 1158  */
 1159 void
 1160 pmap_qremove(vm_offset_t sva, int count)
 1161 {
 1162         vm_offset_t va;
 1163 
 1164         va = sva;
 1165         while (count-- > 0) {
 1166                 pmap_kremove(va);
 1167                 va += PAGE_SIZE;
 1168         }
 1169         pmap_invalidate_range(kernel_pmap, sva, va);
 1170 }
 1171 
 1172 /***************************************************
 1173  * Page table page management routines.....
 1174  ***************************************************/
 1175 static __inline void
 1176 pmap_free_zero_pages(vm_page_t free)
 1177 {
 1178         vm_page_t m;
 1179 
 1180         while (free != NULL) {
 1181                 m = free;
 1182                 free = m->right;
 1183                 /* Preserve the page's PG_ZERO setting. */
 1184                 vm_page_free_toq(m);
 1185         }
 1186 }
 1187 
 1188 /*
 1189  * Schedule the specified unused page table page to be freed.  Specifically,
 1190  * add the page to the specified list of pages that will be released to the
 1191  * physical memory manager after the TLB has been updated.
 1192  */
 1193 static __inline void
 1194 pmap_add_delayed_free_list(vm_page_t m, vm_page_t *free, boolean_t set_PG_ZERO)
 1195 {
 1196 
 1197         if (set_PG_ZERO)
 1198                 m->flags |= PG_ZERO;
 1199         else
 1200                 m->flags &= ~PG_ZERO;
 1201         m->right = *free;
 1202         *free = m;
 1203 }
 1204         
 1205 /*
 1206  * Inserts the specified page table page into the specified pmap's collection
 1207  * of idle page table pages.  Each of a pmap's page table pages is responsible
 1208  * for mapping a distinct range of virtual addresses.  The pmap's collection is
 1209  * ordered by this virtual address range.
 1210  */
 1211 static void
 1212 pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte)
 1213 {
 1214         vm_page_t root;
 1215 
 1216         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 1217         root = pmap->pm_root;
 1218         if (root == NULL) {
 1219                 mpte->left = NULL;
 1220                 mpte->right = NULL;
 1221         } else {
 1222                 root = vm_page_splay(mpte->pindex, root);
 1223                 if (mpte->pindex < root->pindex) {
 1224                         mpte->left = root->left;
 1225                         mpte->right = root;
 1226                         root->left = NULL;
 1227                 } else if (mpte->pindex == root->pindex)
 1228                         panic("pmap_insert_pt_page: pindex already inserted");
 1229                 else {
 1230                         mpte->right = root->right;
 1231                         mpte->left = root;
 1232                         root->right = NULL;
 1233                 }
 1234         }
 1235         pmap->pm_root = mpte;
 1236 }
 1237 
 1238 /*
 1239  * Looks for a page table page mapping the specified virtual address in the
 1240  * specified pmap's collection of idle page table pages.  Returns NULL if there
 1241  * is no page table page corresponding to the specified virtual address.
 1242  */
 1243 static vm_page_t
 1244 pmap_lookup_pt_page(pmap_t pmap, vm_offset_t va)
 1245 {
 1246         vm_page_t mpte;
 1247         vm_pindex_t pindex = pmap_pde_pindex(va);
 1248 
 1249         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 1250         if ((mpte = pmap->pm_root) != NULL && mpte->pindex != pindex) {
 1251                 mpte = vm_page_splay(pindex, mpte);
 1252                 if ((pmap->pm_root = mpte)->pindex != pindex)
 1253                         mpte = NULL;
 1254         }
 1255         return (mpte);
 1256 }
 1257 
 1258 /*
 1259  * Removes the specified page table page from the specified pmap's collection
 1260  * of idle page table pages.  The specified page table page must be a member of
 1261  * the pmap's collection.
 1262  */
 1263 static void
 1264 pmap_remove_pt_page(pmap_t pmap, vm_page_t mpte)
 1265 {
 1266         vm_page_t root;
 1267 
 1268         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 1269         if (mpte != pmap->pm_root) {
 1270                 root = vm_page_splay(mpte->pindex, pmap->pm_root);
 1271                 KASSERT(mpte == root,
 1272                     ("pmap_remove_pt_page: mpte %p is missing from pmap %p",
 1273                     mpte, pmap));
 1274         }
 1275         if (mpte->left == NULL)
 1276                 root = mpte->right;
 1277         else {
 1278                 root = vm_page_splay(mpte->pindex, mpte->left);
 1279                 root->right = mpte->right;
 1280         }
 1281         pmap->pm_root = root;
 1282 }
 1283 
 1284 /*
 1285  * This routine unholds page table pages, and if the hold count
 1286  * drops to zero, then it decrements the wire count.
 1287  */
 1288 static __inline int
 1289 pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t *free)
 1290 {
 1291 
 1292         --m->wire_count;
 1293         if (m->wire_count == 0)
 1294                 return _pmap_unwire_pte_hold(pmap, va, m, free);
 1295         else
 1296                 return 0;
 1297 }
 1298 
 1299 static int 
 1300 _pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m, 
 1301     vm_page_t *free)
 1302 {
 1303 
 1304         /*
 1305          * unmap the page table page
 1306          */
 1307         if (m->pindex >= (NUPDE + NUPDPE)) {
 1308                 /* PDP page */
 1309                 pml4_entry_t *pml4;
 1310                 pml4 = pmap_pml4e(pmap, va);
 1311                 *pml4 = 0;
 1312         } else if (m->pindex >= NUPDE) {
 1313                 /* PD page */
 1314                 pdp_entry_t *pdp;
 1315                 pdp = pmap_pdpe(pmap, va);
 1316                 *pdp = 0;
 1317         } else {
 1318                 /* PTE page */
 1319                 pd_entry_t *pd;
 1320                 pd = pmap_pde(pmap, va);
 1321                 *pd = 0;
 1322         }
 1323         --pmap->pm_stats.resident_count;
 1324         if (m->pindex < NUPDE) {
 1325                 /* We just released a PT, unhold the matching PD */
 1326                 vm_page_t pdpg;
 1327 
 1328                 pdpg = PHYS_TO_VM_PAGE(*pmap_pdpe(pmap, va) & PG_FRAME);
 1329                 pmap_unwire_pte_hold(pmap, va, pdpg, free);
 1330         }
 1331         if (m->pindex >= NUPDE && m->pindex < (NUPDE + NUPDPE)) {
 1332                 /* We just released a PD, unhold the matching PDP */
 1333                 vm_page_t pdppg;
 1334 
 1335                 pdppg = PHYS_TO_VM_PAGE(*pmap_pml4e(pmap, va) & PG_FRAME);
 1336                 pmap_unwire_pte_hold(pmap, va, pdppg, free);
 1337         }
 1338 
 1339         /*
 1340          * This is a release store so that the ordinary store unmapping
 1341          * the page table page is globally performed before TLB shoot-
 1342          * down is begun.
 1343          */
 1344         atomic_subtract_rel_int(&cnt.v_wire_count, 1);
 1345 
 1346         /* 
 1347          * Put page on a list so that it is released after
 1348          * *ALL* TLB shootdown is done
 1349          */
 1350         pmap_add_delayed_free_list(m, free, TRUE);
 1351         
 1352         return 1;
 1353 }
 1354 
 1355 /*
 1356  * After removing a page table entry, this routine is used to
 1357  * conditionally free the page, and manage the hold/wire counts.
 1358  */
 1359 static int
 1360 pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t ptepde, vm_page_t *free)
 1361 {
 1362         vm_page_t mpte;
 1363 
 1364         if (va >= VM_MAXUSER_ADDRESS)
 1365                 return 0;
 1366         KASSERT(ptepde != 0, ("pmap_unuse_pt: ptepde != 0"));
 1367         mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME);
 1368         return pmap_unwire_pte_hold(pmap, va, mpte, free);
 1369 }
 1370 
 1371 void
 1372 pmap_pinit0(pmap_t pmap)
 1373 {
 1374 
 1375         PMAP_LOCK_INIT(pmap);
 1376         pmap->pm_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(KPML4phys);
 1377         pmap->pm_root = NULL;
 1378         pmap->pm_active = 0;
 1379         TAILQ_INIT(&pmap->pm_pvchunk);
 1380         bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
 1381 }
 1382 
 1383 /*
 1384  * Initialize a preallocated and zeroed pmap structure,
 1385  * such as one in a vmspace structure.
 1386  */
 1387 int
 1388 pmap_pinit(pmap_t pmap)
 1389 {
 1390         vm_page_t pml4pg;
 1391         static vm_pindex_t color;
 1392 
 1393         PMAP_LOCK_INIT(pmap);
 1394 
 1395         /*
 1396          * allocate the page directory page
 1397          */
 1398         while ((pml4pg = vm_page_alloc(NULL, color++, VM_ALLOC_NOOBJ |
 1399             VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL)
 1400                 VM_WAIT;
 1401 
 1402         pmap->pm_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pml4pg));
 1403 
 1404         if ((pml4pg->flags & PG_ZERO) == 0)
 1405                 pagezero(pmap->pm_pml4);
 1406 
 1407         /* Wire in kernel global address entries. */
 1408         pmap->pm_pml4[KPML4I] = KPDPphys | PG_RW | PG_V | PG_U;
 1409         pmap->pm_pml4[DMPML4I] = DMPDPphys | PG_RW | PG_V | PG_U;
 1410 
 1411         /* install self-referential address mapping entry(s) */
 1412         pmap->pm_pml4[PML4PML4I] = VM_PAGE_TO_PHYS(pml4pg) | PG_V | PG_RW | PG_A | PG_M;
 1413 
 1414         pmap->pm_root = NULL;
 1415         pmap->pm_active = 0;
 1416         TAILQ_INIT(&pmap->pm_pvchunk);
 1417         bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
 1418 
 1419         return (1);
 1420 }
 1421 
 1422 /*
 1423  * this routine is called if the page table page is not
 1424  * mapped correctly.
 1425  *
 1426  * Note: If a page allocation fails at page table level two or three,
 1427  * one or two pages may be held during the wait, only to be released
 1428  * afterwards.  This conservative approach is easily argued to avoid
 1429  * race conditions.
 1430  */
 1431 static vm_page_t
 1432 _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, int flags)
 1433 {
 1434         vm_page_t m, pdppg, pdpg;
 1435 
 1436         KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
 1437             (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
 1438             ("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
 1439 
 1440         /*
 1441          * Allocate a page table page.
 1442          */
 1443         if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ |
 1444             VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
 1445                 if (flags & M_WAITOK) {
 1446                         PMAP_UNLOCK(pmap);
 1447                         vm_page_unlock_queues();
 1448                         VM_WAIT;
 1449                         vm_page_lock_queues();
 1450                         PMAP_LOCK(pmap);
 1451                 }
 1452 
 1453                 /*
 1454                  * Indicate the need to retry.  While waiting, the page table
 1455                  * page may have been allocated.
 1456                  */
 1457                 return (NULL);
 1458         }
 1459         if ((m->flags & PG_ZERO) == 0)
 1460                 pmap_zero_page(m);
 1461 
 1462         /*
 1463          * Map the pagetable page into the process address space, if
 1464          * it isn't already there.
 1465          */
 1466 
 1467         if (ptepindex >= (NUPDE + NUPDPE)) {
 1468                 pml4_entry_t *pml4;
 1469                 vm_pindex_t pml4index;
 1470 
 1471                 /* Wire up a new PDPE page */
 1472                 pml4index = ptepindex - (NUPDE + NUPDPE);
 1473                 pml4 = &pmap->pm_pml4[pml4index];
 1474                 *pml4 = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M;
 1475 
 1476         } else if (ptepindex >= NUPDE) {
 1477                 vm_pindex_t pml4index;
 1478                 vm_pindex_t pdpindex;
 1479                 pml4_entry_t *pml4;
 1480                 pdp_entry_t *pdp;
 1481 
 1482                 /* Wire up a new PDE page */
 1483                 pdpindex = ptepindex - NUPDE;
 1484                 pml4index = pdpindex >> NPML4EPGSHIFT;
 1485 
 1486                 pml4 = &pmap->pm_pml4[pml4index];
 1487                 if ((*pml4 & PG_V) == 0) {
 1488                         /* Have to allocate a new pdp, recurse */
 1489                         if (_pmap_allocpte(pmap, NUPDE + NUPDPE + pml4index,
 1490                             flags) == NULL) {
 1491                                 --m->wire_count;
 1492                                 atomic_subtract_int(&cnt.v_wire_count, 1);
 1493                                 vm_page_free_zero(m);
 1494                                 return (NULL);
 1495                         }
 1496                 } else {
 1497                         /* Add reference to pdp page */
 1498                         pdppg = PHYS_TO_VM_PAGE(*pml4 & PG_FRAME);
 1499                         pdppg->wire_count++;
 1500                 }
 1501                 pdp = (pdp_entry_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME);
 1502 
 1503                 /* Now find the pdp page */
 1504                 pdp = &pdp[pdpindex & ((1ul << NPDPEPGSHIFT) - 1)];
 1505                 *pdp = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M;
 1506 
 1507         } else {
 1508                 vm_pindex_t pml4index;
 1509                 vm_pindex_t pdpindex;
 1510                 pml4_entry_t *pml4;
 1511                 pdp_entry_t *pdp;
 1512                 pd_entry_t *pd;
 1513 
 1514                 /* Wire up a new PTE page */
 1515                 pdpindex = ptepindex >> NPDPEPGSHIFT;
 1516                 pml4index = pdpindex >> NPML4EPGSHIFT;
 1517 
 1518                 /* First, find the pdp and check that its valid. */
 1519                 pml4 = &pmap->pm_pml4[pml4index];
 1520                 if ((*pml4 & PG_V) == 0) {
 1521                         /* Have to allocate a new pd, recurse */
 1522                         if (_pmap_allocpte(pmap, NUPDE + pdpindex,
 1523                             flags) == NULL) {
 1524                                 --m->wire_count;
 1525                                 atomic_subtract_int(&cnt.v_wire_count, 1);
 1526                                 vm_page_free_zero(m);
 1527                                 return (NULL);
 1528                         }
 1529                         pdp = (pdp_entry_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME);
 1530                         pdp = &pdp[pdpindex & ((1ul << NPDPEPGSHIFT) - 1)];
 1531                 } else {
 1532                         pdp = (pdp_entry_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME);
 1533                         pdp = &pdp[pdpindex & ((1ul << NPDPEPGSHIFT) - 1)];
 1534                         if ((*pdp & PG_V) == 0) {
 1535                                 /* Have to allocate a new pd, recurse */
 1536                                 if (_pmap_allocpte(pmap, NUPDE + pdpindex,
 1537                                     flags) == NULL) {
 1538                                         --m->wire_count;
 1539                                         atomic_subtract_int(&cnt.v_wire_count,
 1540                                             1);
 1541                                         vm_page_free_zero(m);
 1542                                         return (NULL);
 1543                                 }
 1544                         } else {
 1545                                 /* Add reference to the pd page */
 1546                                 pdpg = PHYS_TO_VM_PAGE(*pdp & PG_FRAME);
 1547                                 pdpg->wire_count++;
 1548                         }
 1549                 }
 1550                 pd = (pd_entry_t *)PHYS_TO_DMAP(*pdp & PG_FRAME);
 1551 
 1552                 /* Now we know where the page directory page is */
 1553                 pd = &pd[ptepindex & ((1ul << NPDEPGSHIFT) - 1)];
 1554                 *pd = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M;
 1555         }
 1556 
 1557         pmap->pm_stats.resident_count++;
 1558 
 1559         return m;
 1560 }
 1561 
 1562 static vm_page_t
 1563 pmap_allocpde(pmap_t pmap, vm_offset_t va, int flags)
 1564 {
 1565         vm_pindex_t pdpindex, ptepindex;
 1566         pdp_entry_t *pdpe;
 1567         vm_page_t pdpg;
 1568 
 1569         KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
 1570             (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
 1571             ("pmap_allocpde: flags is neither M_NOWAIT nor M_WAITOK"));
 1572 retry:
 1573         pdpe = pmap_pdpe(pmap, va);
 1574         if (pdpe != NULL && (*pdpe & PG_V) != 0) {
 1575                 /* Add a reference to the pd page. */
 1576                 pdpg = PHYS_TO_VM_PAGE(*pdpe & PG_FRAME);
 1577                 pdpg->wire_count++;
 1578         } else {
 1579                 /* Allocate a pd page. */
 1580                 ptepindex = pmap_pde_pindex(va);
 1581                 pdpindex = ptepindex >> NPDPEPGSHIFT;
 1582                 pdpg = _pmap_allocpte(pmap, NUPDE + pdpindex, flags);
 1583                 if (pdpg == NULL && (flags & M_WAITOK))
 1584                         goto retry;
 1585         }
 1586         return (pdpg);
 1587 }
 1588 
 1589 static vm_page_t
 1590 pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags)
 1591 {
 1592         vm_pindex_t ptepindex;
 1593         pd_entry_t *pd;
 1594         vm_page_t m;
 1595 
 1596         KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
 1597             (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
 1598             ("pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
 1599 
 1600         /*
 1601          * Calculate pagetable page index
 1602          */
 1603         ptepindex = pmap_pde_pindex(va);
 1604 retry:
 1605         /*
 1606          * Get the page directory entry
 1607          */
 1608         pd = pmap_pde(pmap, va);
 1609 
 1610         /*
 1611          * This supports switching from a 2MB page to a
 1612          * normal 4K page.
 1613          */
 1614         if (pd != NULL && (*pd & (PG_PS | PG_V)) == (PG_PS | PG_V)) {
 1615                 if (!pmap_demote_pde(pmap, pd, va)) {
 1616                         /*
 1617                          * Invalidation of the 2MB page mapping may have caused
 1618                          * the deallocation of the underlying PD page.
 1619                          */
 1620                         pd = NULL;
 1621                 }
 1622         }
 1623 
 1624         /*
 1625          * If the page table page is mapped, we just increment the
 1626          * hold count, and activate it.
 1627          */
 1628         if (pd != NULL && (*pd & PG_V) != 0) {
 1629                 m = PHYS_TO_VM_PAGE(*pd & PG_FRAME);
 1630                 m->wire_count++;
 1631         } else {
 1632                 /*
 1633                  * Here if the pte page isn't mapped, or if it has been
 1634                  * deallocated.
 1635                  */
 1636                 m = _pmap_allocpte(pmap, ptepindex, flags);
 1637                 if (m == NULL && (flags & M_WAITOK))
 1638                         goto retry;
 1639         }
 1640         return (m);
 1641 }
 1642 
 1643 
 1644 /***************************************************
 1645  * Pmap allocation/deallocation routines.
 1646  ***************************************************/
 1647 
 1648 /*
 1649  * Release any resources held by the given physical map.
 1650  * Called when a pmap initialized by pmap_pinit is being released.
 1651  * Should only be called if the map contains no valid mappings.
 1652  */
 1653 void
 1654 pmap_release(pmap_t pmap)
 1655 {
 1656         vm_page_t m;
 1657 
 1658         KASSERT(pmap->pm_stats.resident_count == 0,
 1659             ("pmap_release: pmap resident count %ld != 0",
 1660             pmap->pm_stats.resident_count));
 1661         KASSERT(pmap->pm_root == NULL,
 1662             ("pmap_release: pmap has reserved page table page(s)"));
 1663 
 1664         m = PHYS_TO_VM_PAGE(pmap->pm_pml4[PML4PML4I] & PG_FRAME);
 1665 
 1666         pmap->pm_pml4[KPML4I] = 0;      /* KVA */
 1667         pmap->pm_pml4[DMPML4I] = 0;     /* Direct Map */
 1668         pmap->pm_pml4[PML4PML4I] = 0;   /* Recursive Mapping */
 1669 
 1670         m->wire_count--;
 1671         atomic_subtract_int(&cnt.v_wire_count, 1);
 1672         vm_page_free_zero(m);
 1673         PMAP_LOCK_DESTROY(pmap);
 1674 }
 1675 
 1676 static int
 1677 kvm_size(SYSCTL_HANDLER_ARGS)
 1678 {
 1679         unsigned long ksize = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
 1680 
 1681         return sysctl_handle_long(oidp, &ksize, 0, req);
 1682 }
 1683 SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG|CTLFLAG_RD, 
 1684     0, 0, kvm_size, "LU", "Size of KVM");
 1685 
 1686 static int
 1687 kvm_free(SYSCTL_HANDLER_ARGS)
 1688 {
 1689         unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end;
 1690 
 1691         return sysctl_handle_long(oidp, &kfree, 0, req);
 1692 }
 1693 SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD, 
 1694     0, 0, kvm_free, "LU", "Amount of KVM free");
 1695 
 1696 /*
 1697  * grow the number of kernel page table entries, if needed
 1698  */
 1699 void
 1700 pmap_growkernel(vm_offset_t addr)
 1701 {
 1702         vm_paddr_t paddr;
 1703         vm_page_t nkpg;
 1704         pd_entry_t *pde, newpdir;
 1705         pdp_entry_t *pdpe;
 1706 
 1707         mtx_assert(&kernel_map->system_mtx, MA_OWNED);
 1708 
 1709         /*
 1710          * Return if "addr" is within the range of kernel page table pages
 1711          * that were preallocated during pmap bootstrap.  Moreover, leave
 1712          * "kernel_vm_end" and the kernel page table as they were.
 1713          *
 1714          * The correctness of this action is based on the following
 1715          * argument: vm_map_findspace() allocates contiguous ranges of the
 1716          * kernel virtual address space.  It calls this function if a range
 1717          * ends after "kernel_vm_end".  If the kernel is mapped between
 1718          * "kernel_vm_end" and "addr", then the range cannot begin at
 1719          * "kernel_vm_end".  In fact, its beginning address cannot be less
 1720          * than the kernel.  Thus, there is no immediate need to allocate
 1721          * any new kernel page table pages between "kernel_vm_end" and
 1722          * "KERNBASE".
 1723          */
 1724         if (KERNBASE < addr && addr <= KERNBASE + NKPT * NBPDR)
 1725                 return;
 1726 
 1727         addr = roundup2(addr, NBPDR);
 1728         if (addr - 1 >= kernel_map->max_offset)
 1729                 addr = kernel_map->max_offset;
 1730         while (kernel_vm_end < addr) {
 1731                 pdpe = pmap_pdpe(kernel_pmap, kernel_vm_end);
 1732                 if ((*pdpe & PG_V) == 0) {
 1733                         /* We need a new PDP entry */
 1734                         nkpg = vm_page_alloc(NULL, kernel_vm_end >> PDPSHIFT,
 1735                             VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ |
 1736                             VM_ALLOC_WIRED | VM_ALLOC_ZERO);
 1737                         if (nkpg == NULL)
 1738                                 panic("pmap_growkernel: no memory to grow kernel");
 1739                         if ((nkpg->flags & PG_ZERO) == 0)
 1740                                 pmap_zero_page(nkpg);
 1741                         paddr = VM_PAGE_TO_PHYS(nkpg);
 1742                         *pdpe = (pdp_entry_t)
 1743                                 (paddr | PG_V | PG_RW | PG_A | PG_M);
 1744                         continue; /* try again */
 1745                 }
 1746                 pde = pmap_pdpe_to_pde(pdpe, kernel_vm_end);
 1747                 if ((*pde & PG_V) != 0) {
 1748                         kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK;
 1749                         if (kernel_vm_end - 1 >= kernel_map->max_offset) {
 1750                                 kernel_vm_end = kernel_map->max_offset;
 1751                                 break;                       
 1752                         }
 1753                         continue;
 1754                 }
 1755 
 1756                 nkpg = vm_page_alloc(NULL, pmap_pde_pindex(kernel_vm_end),
 1757                     VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
 1758                     VM_ALLOC_ZERO);
 1759                 if (nkpg == NULL)
 1760                         panic("pmap_growkernel: no memory to grow kernel");
 1761                 if ((nkpg->flags & PG_ZERO) == 0)
 1762                         pmap_zero_page(nkpg);
 1763                 paddr = VM_PAGE_TO_PHYS(nkpg);
 1764                 newpdir = (pd_entry_t) (paddr | PG_V | PG_RW | PG_A | PG_M);
 1765                 pde_store(pde, newpdir);
 1766 
 1767                 kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK;
 1768                 if (kernel_vm_end - 1 >= kernel_map->max_offset) {
 1769                         kernel_vm_end = kernel_map->max_offset;
 1770                         break;                       
 1771                 }
 1772         }
 1773 }
 1774 
 1775 
 1776 /***************************************************
 1777  * page management routines.
 1778  ***************************************************/
 1779 
 1780 CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE);
 1781 CTASSERT(_NPCM == 3);
 1782 CTASSERT(_NPCPV == 168);
 1783 
 1784 static __inline struct pv_chunk *
 1785 pv_to_chunk(pv_entry_t pv)
 1786 {
 1787 
 1788         return (struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK);
 1789 }
 1790 
 1791 #define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap)
 1792 
 1793 #define PC_FREE0        0xfffffffffffffffful
 1794 #define PC_FREE1        0xfffffffffffffffful
 1795 #define PC_FREE2        0x000000fffffffffful
 1796 
 1797 static uint64_t pc_freemask[_NPCM] = { PC_FREE0, PC_FREE1, PC_FREE2 };
 1798 
 1799 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0,
 1800         "Current number of pv entries");
 1801 
 1802 #ifdef PV_STATS
 1803 static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail;
 1804 
 1805 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0,
 1806         "Current number of pv entry chunks");
 1807 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0,
 1808         "Current number of pv entry chunks allocated");
 1809 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0,
 1810         "Current number of pv entry chunks frees");
 1811 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0,
 1812         "Number of times tried to get a chunk page but failed.");
 1813 
 1814 static long pv_entry_frees, pv_entry_allocs;
 1815 static int pv_entry_spare;
 1816 
 1817 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0,
 1818         "Current number of pv entry frees");
 1819 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0,
 1820         "Current number of pv entry allocs");
 1821 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0,
 1822         "Current number of spare pv entries");
 1823 
 1824 static int pmap_collect_inactive, pmap_collect_active;
 1825 
 1826 SYSCTL_INT(_vm_pmap, OID_AUTO, pmap_collect_inactive, CTLFLAG_RD, &pmap_collect_inactive, 0,
 1827         "Current number times pmap_collect called on inactive queue");
 1828 SYSCTL_INT(_vm_pmap, OID_AUTO, pmap_collect_active, CTLFLAG_RD, &pmap_collect_active, 0,
 1829         "Current number times pmap_collect called on active queue");
 1830 #endif
 1831 
 1832 /*
 1833  * We are in a serious low memory condition.  Resort to
 1834  * drastic measures to free some pages so we can allocate
 1835  * another pv entry chunk.  This is normally called to
 1836  * unmap inactive pages, and if necessary, active pages.
 1837  *
 1838  * We do not, however, unmap 2mpages because subsequent accesses will
 1839  * allocate per-page pv entries until repromotion occurs, thereby
 1840  * exacerbating the shortage of free pv entries.
 1841  */
 1842 static void
 1843 pmap_collect(pmap_t locked_pmap, struct vpgqueues *vpq)
 1844 {
 1845         struct md_page *pvh;
 1846         pd_entry_t *pde;
 1847         pmap_t pmap;
 1848         pt_entry_t *pte, tpte;
 1849         pv_entry_t next_pv, pv;
 1850         vm_offset_t va;
 1851         vm_page_t m, free;
 1852 
 1853         TAILQ_FOREACH(m, &vpq->pl, pageq) {
 1854                 if (m->hold_count || m->busy)
 1855                         continue;
 1856                 TAILQ_FOREACH_SAFE(pv, &m->md.pv_list, pv_list, next_pv) {
 1857                         va = pv->pv_va;
 1858                         pmap = PV_PMAP(pv);
 1859                         /* Avoid deadlock and lock recursion. */
 1860                         if (pmap > locked_pmap)
 1861                                 PMAP_LOCK(pmap);
 1862                         else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap))
 1863                                 continue;
 1864                         pmap->pm_stats.resident_count--;
 1865                         pde = pmap_pde(pmap, va);
 1866                         KASSERT((*pde & PG_PS) == 0, ("pmap_collect: found"
 1867                             " a 2mpage in page %p's pv list", m));
 1868                         pte = pmap_pde_to_pte(pde, va);
 1869                         tpte = pte_load_clear(pte);
 1870                         KASSERT((tpte & PG_W) == 0,
 1871                             ("pmap_collect: wired pte %#lx", tpte));
 1872                         if (tpte & PG_A)
 1873                                 vm_page_flag_set(m, PG_REFERENCED);
 1874                         if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
 1875                                 vm_page_dirty(m);
 1876                         free = NULL;
 1877                         pmap_unuse_pt(pmap, va, *pde, &free);
 1878                         pmap_invalidate_page(pmap, va);
 1879                         pmap_free_zero_pages(free);
 1880                         TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
 1881                         if (TAILQ_EMPTY(&m->md.pv_list)) {
 1882                                 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 1883                                 if (TAILQ_EMPTY(&pvh->pv_list))
 1884                                         vm_page_flag_clear(m, PG_WRITEABLE);
 1885                         }
 1886                         free_pv_entry(pmap, pv);
 1887                         if (pmap != locked_pmap)
 1888                                 PMAP_UNLOCK(pmap);
 1889                 }
 1890         }
 1891 }
 1892 
 1893 
 1894 /*
 1895  * free the pv_entry back to the free list
 1896  */
 1897 static void
 1898 free_pv_entry(pmap_t pmap, pv_entry_t pv)
 1899 {
 1900         vm_page_t m;
 1901         struct pv_chunk *pc;
 1902         int idx, field, bit;
 1903 
 1904         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1905         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 1906         PV_STAT(pv_entry_frees++);
 1907         PV_STAT(pv_entry_spare++);
 1908         pv_entry_count--;
 1909         pc = pv_to_chunk(pv);
 1910         idx = pv - &pc->pc_pventry[0];
 1911         field = idx / 64;
 1912         bit = idx % 64;
 1913         pc->pc_map[field] |= 1ul << bit;
 1914         /* move to head of list */
 1915         TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
 1916         if (pc->pc_map[0] != PC_FREE0 || pc->pc_map[1] != PC_FREE1 ||
 1917             pc->pc_map[2] != PC_FREE2) {
 1918                 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
 1919                 return;
 1920         }
 1921         PV_STAT(pv_entry_spare -= _NPCPV);
 1922         PV_STAT(pc_chunk_count--);
 1923         PV_STAT(pc_chunk_frees++);
 1924         /* entire chunk is free, return it */
 1925         m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
 1926         dump_drop_page(m->phys_addr);
 1927         vm_page_unwire(m, 0);
 1928         vm_page_free(m);
 1929 }
 1930 
 1931 /*
 1932  * get a new pv_entry, allocating a block from the system
 1933  * when needed.
 1934  */
 1935 static pv_entry_t
 1936 get_pv_entry(pmap_t pmap, int try)
 1937 {
 1938         static const struct timeval printinterval = { 60, 0 };
 1939         static struct timeval lastprint;
 1940         static vm_pindex_t colour;
 1941         struct vpgqueues *pq;
 1942         int bit, field;
 1943         pv_entry_t pv;
 1944         struct pv_chunk *pc;
 1945         vm_page_t m;
 1946 
 1947         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 1948         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1949         PV_STAT(pv_entry_allocs++);
 1950         pv_entry_count++;
 1951         if (pv_entry_count > pv_entry_high_water)
 1952                 if (ratecheck(&lastprint, &printinterval))
 1953                         printf("Approaching the limit on PV entries, consider "
 1954                             "increasing either the vm.pmap.shpgperproc or the "
 1955                             "vm.pmap.pv_entry_max sysctl.\n");
 1956         pq = NULL;
 1957 retry:
 1958         pc = TAILQ_FIRST(&pmap->pm_pvchunk);
 1959         if (pc != NULL) {
 1960                 for (field = 0; field < _NPCM; field++) {
 1961                         if (pc->pc_map[field]) {
 1962                                 bit = bsfq(pc->pc_map[field]);
 1963                                 break;
 1964                         }
 1965                 }
 1966                 if (field < _NPCM) {
 1967                         pv = &pc->pc_pventry[field * 64 + bit];
 1968                         pc->pc_map[field] &= ~(1ul << bit);
 1969                         /* If this was the last item, move it to tail */
 1970                         if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0 &&
 1971                             pc->pc_map[2] == 0) {
 1972                                 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
 1973                                 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
 1974                         }
 1975                         PV_STAT(pv_entry_spare--);
 1976                         return (pv);
 1977                 }
 1978         }
 1979         /* No free items, allocate another chunk */
 1980         m = vm_page_alloc(NULL, colour, (pq == &vm_page_queues[PQ_ACTIVE] ?
 1981             VM_ALLOC_SYSTEM : VM_ALLOC_NORMAL) | VM_ALLOC_NOOBJ |
 1982             VM_ALLOC_WIRED);
 1983         if (m == NULL) {
 1984                 if (try) {
 1985                         pv_entry_count--;
 1986                         PV_STAT(pc_chunk_tryfail++);
 1987                         return (NULL);
 1988                 }
 1989                 /*
 1990                  * Reclaim pv entries: At first, destroy mappings to inactive
 1991                  * pages.  After that, if a pv chunk entry is still needed,
 1992                  * destroy mappings to active pages.
 1993                  */
 1994                 if (pq == NULL) {
 1995                         PV_STAT(pmap_collect_inactive++);
 1996                         pq = &vm_page_queues[PQ_INACTIVE];
 1997                 } else if (pq == &vm_page_queues[PQ_INACTIVE]) {
 1998                         PV_STAT(pmap_collect_active++);
 1999                         pq = &vm_page_queues[PQ_ACTIVE];
 2000                 } else
 2001                         panic("get_pv_entry: increase vm.pmap.shpgperproc");
 2002                 pmap_collect(pmap, pq);
 2003                 goto retry;
 2004         }
 2005         PV_STAT(pc_chunk_count++);
 2006         PV_STAT(pc_chunk_allocs++);
 2007         colour++;
 2008         dump_add_page(m->phys_addr);
 2009         pc = (void *)PHYS_TO_DMAP(m->phys_addr);
 2010         pc->pc_pmap = pmap;
 2011         pc->pc_map[0] = PC_FREE0 & ~1ul;        /* preallocated bit 0 */
 2012         pc->pc_map[1] = PC_FREE1;
 2013         pc->pc_map[2] = PC_FREE2;
 2014         pv = &pc->pc_pventry[0];
 2015         TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
 2016         PV_STAT(pv_entry_spare += _NPCPV - 1);
 2017         return (pv);
 2018 }
 2019 
 2020 /*
 2021  * First find and then remove the pv entry for the specified pmap and virtual
 2022  * address from the specified pv list.  Returns the pv entry if found and NULL
 2023  * otherwise.  This operation can be performed on pv lists for either 4KB or
 2024  * 2MB page mappings.
 2025  */
 2026 static __inline pv_entry_t
 2027 pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
 2028 {
 2029         pv_entry_t pv;
 2030 
 2031         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2032         TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
 2033                 if (pmap == PV_PMAP(pv) && va == pv->pv_va) {
 2034                         TAILQ_REMOVE(&pvh->pv_list, pv, pv_list);
 2035                         break;
 2036                 }
 2037         }
 2038         return (pv);
 2039 }
 2040 
 2041 /*
 2042  * After demotion from a 2MB page mapping to 512 4KB page mappings,
 2043  * destroy the pv entry for the 2MB page mapping and reinstantiate the pv
 2044  * entries for each of the 4KB page mappings.
 2045  */
 2046 static void
 2047 pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
 2048 {
 2049         struct md_page *pvh;
 2050         pv_entry_t pv;
 2051         vm_offset_t va_last;
 2052         vm_page_t m;
 2053 
 2054         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2055         KASSERT((pa & PDRMASK) == 0,
 2056             ("pmap_pv_demote_pde: pa is not 2mpage aligned"));
 2057 
 2058         /*
 2059          * Transfer the 2mpage's pv entry for this mapping to the first
 2060          * page's pv list.
 2061          */
 2062         pvh = pa_to_pvh(pa);
 2063         va = trunc_2mpage(va);
 2064         pv = pmap_pvh_remove(pvh, pmap, va);
 2065         KASSERT(pv != NULL, ("pmap_pv_demote_pde: pv not found"));
 2066         m = PHYS_TO_VM_PAGE(pa);
 2067         TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
 2068         /* Instantiate the remaining NPTEPG - 1 pv entries. */
 2069         va_last = va + NBPDR - PAGE_SIZE;
 2070         do {
 2071                 m++;
 2072                 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
 2073                     ("pmap_pv_demote_pde: page %p is not managed", m));
 2074                 va += PAGE_SIZE;
 2075                 pmap_insert_entry(pmap, va, m);
 2076         } while (va < va_last);
 2077 }
 2078 
 2079 /*
 2080  * After promotion from 512 4KB page mappings to a single 2MB page mapping,
 2081  * replace the many pv entries for the 4KB page mappings by a single pv entry
 2082  * for the 2MB page mapping.
 2083  */
 2084 static void
 2085 pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
 2086 {
 2087         struct md_page *pvh;
 2088         pv_entry_t pv;
 2089         vm_offset_t va_last;
 2090         vm_page_t m;
 2091 
 2092         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2093         KASSERT((pa & PDRMASK) == 0,
 2094             ("pmap_pv_promote_pde: pa is not 2mpage aligned"));
 2095 
 2096         /*
 2097          * Transfer the first page's pv entry for this mapping to the
 2098          * 2mpage's pv list.  Aside from avoiding the cost of a call
 2099          * to get_pv_entry(), a transfer avoids the possibility that
 2100          * get_pv_entry() calls pmap_collect() and that pmap_collect()
 2101          * removes one of the mappings that is being promoted.
 2102          */
 2103         m = PHYS_TO_VM_PAGE(pa);
 2104         va = trunc_2mpage(va);
 2105         pv = pmap_pvh_remove(&m->md, pmap, va);
 2106         KASSERT(pv != NULL, ("pmap_pv_promote_pde: pv not found"));
 2107         pvh = pa_to_pvh(pa);
 2108         TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_list);
 2109         /* Free the remaining NPTEPG - 1 pv entries. */
 2110         va_last = va + NBPDR - PAGE_SIZE;
 2111         do {
 2112                 m++;
 2113                 va += PAGE_SIZE;
 2114                 pmap_pvh_free(&m->md, pmap, va);
 2115         } while (va < va_last);
 2116 }
 2117 
 2118 /*
 2119  * First find and then destroy the pv entry for the specified pmap and virtual
 2120  * address.  This operation can be performed on pv lists for either 4KB or 2MB
 2121  * page mappings.
 2122  */
 2123 static void
 2124 pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
 2125 {
 2126         pv_entry_t pv;
 2127 
 2128         pv = pmap_pvh_remove(pvh, pmap, va);
 2129         KASSERT(pv != NULL, ("pmap_pvh_free: pv not found"));
 2130         free_pv_entry(pmap, pv);
 2131 }
 2132 
 2133 static void
 2134 pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
 2135 {
 2136         struct md_page *pvh;
 2137 
 2138         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2139         pmap_pvh_free(&m->md, pmap, va);
 2140         if (TAILQ_EMPTY(&m->md.pv_list)) {
 2141                 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 2142                 if (TAILQ_EMPTY(&pvh->pv_list))
 2143                         vm_page_flag_clear(m, PG_WRITEABLE);
 2144         }
 2145 }
 2146 
 2147 /*
 2148  * Create a pv entry for page at pa for
 2149  * (pmap, va).
 2150  */
 2151 static void
 2152 pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
 2153 {
 2154         pv_entry_t pv;
 2155 
 2156         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2157         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2158         pv = get_pv_entry(pmap, FALSE);
 2159         pv->pv_va = va;
 2160         TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
 2161 }
 2162 
 2163 /*
 2164  * Conditionally create a pv entry.
 2165  */
 2166 static boolean_t
 2167 pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
 2168 {
 2169         pv_entry_t pv;
 2170 
 2171         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2172         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2173         if (pv_entry_count < pv_entry_high_water && 
 2174             (pv = get_pv_entry(pmap, TRUE)) != NULL) {
 2175                 pv->pv_va = va;
 2176                 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
 2177                 return (TRUE);
 2178         } else
 2179                 return (FALSE);
 2180 }
 2181 
 2182 /*
 2183  * Create the pv entry for a 2MB page mapping.
 2184  */
 2185 static boolean_t
 2186 pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
 2187 {
 2188         struct md_page *pvh;
 2189         pv_entry_t pv;
 2190 
 2191         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2192         if (pv_entry_count < pv_entry_high_water && 
 2193             (pv = get_pv_entry(pmap, TRUE)) != NULL) {
 2194                 pv->pv_va = va;
 2195                 pvh = pa_to_pvh(pa);
 2196                 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_list);
 2197                 return (TRUE);
 2198         } else
 2199                 return (FALSE);
 2200 }
 2201 
 2202 /*
 2203  * Fills a page table page with mappings to consecutive physical pages.
 2204  */
 2205 static void
 2206 pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte)
 2207 {
 2208         pt_entry_t *pte;
 2209 
 2210         for (pte = firstpte; pte < firstpte + NPTEPG; pte++) {
 2211                 *pte = newpte;
 2212                 newpte += PAGE_SIZE;
 2213         }
 2214 }
 2215 
 2216 /*
 2217  * Tries to demote a 2MB page mapping.  If demotion fails, the 2MB page
 2218  * mapping is invalidated.
 2219  */
 2220 static boolean_t
 2221 pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
 2222 {
 2223         pd_entry_t newpde, oldpde;
 2224         pt_entry_t *firstpte, newpte;
 2225         vm_paddr_t mptepa;
 2226         vm_page_t free, mpte;
 2227 
 2228         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2229         oldpde = *pde;
 2230         KASSERT((oldpde & (PG_PS | PG_V)) == (PG_PS | PG_V),
 2231             ("pmap_demote_pde: oldpde is missing PG_PS and/or PG_V"));
 2232         mpte = pmap_lookup_pt_page(pmap, va);
 2233         if (mpte != NULL)
 2234                 pmap_remove_pt_page(pmap, mpte);
 2235         else {
 2236                 KASSERT((oldpde & PG_W) == 0,
 2237                     ("pmap_demote_pde: page table page for a wired mapping"
 2238                     " is missing"));
 2239 
 2240                 /*
 2241                  * Invalidate the 2MB page mapping and return "failure" if the
 2242                  * mapping was never accessed or the allocation of the new
 2243                  * page table page fails.  If the 2MB page mapping belongs to
 2244                  * the direct map region of the kernel's address space, then
 2245                  * the page allocation request specifies the highest possible
 2246                  * priority (VM_ALLOC_INTERRUPT).  Otherwise, the priority is
 2247                  * normal.  Page table pages are preallocated for every other
 2248                  * part of the kernel address space, so the direct map region
 2249                  * is the only part of the kernel address space that must be
 2250                  * handled here.
 2251                  */
 2252                 if ((oldpde & PG_A) == 0 || (mpte = vm_page_alloc(NULL,
 2253                     pmap_pde_pindex(va), (va >= DMAP_MIN_ADDRESS && va <
 2254                     DMAP_MAX_ADDRESS ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL) |
 2255                     VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
 2256                         free = NULL;
 2257                         pmap_remove_pde(pmap, pde, trunc_2mpage(va), &free);
 2258                         pmap_invalidate_page(pmap, trunc_2mpage(va));
 2259                         pmap_free_zero_pages(free);
 2260                         CTR2(KTR_PMAP, "pmap_demote_pde: failure for va %#lx"
 2261                             " in pmap %p", va, pmap);
 2262                         return (FALSE);
 2263                 }
 2264                 if (va < VM_MAXUSER_ADDRESS)
 2265                         pmap->pm_stats.resident_count++;
 2266         }
 2267         mptepa = VM_PAGE_TO_PHYS(mpte);
 2268         firstpte = (pt_entry_t *)PHYS_TO_DMAP(mptepa);
 2269         newpde = mptepa | PG_M | PG_A | (oldpde & PG_U) | PG_RW | PG_V;
 2270         KASSERT((oldpde & PG_A) != 0,
 2271             ("pmap_demote_pde: oldpde is missing PG_A"));
 2272         KASSERT((oldpde & (PG_M | PG_RW)) != PG_RW,
 2273             ("pmap_demote_pde: oldpde is missing PG_M"));
 2274         newpte = oldpde & ~PG_PS;
 2275         if ((newpte & PG_PDE_PAT) != 0)
 2276                 newpte ^= PG_PDE_PAT | PG_PTE_PAT;
 2277 
 2278         /*
 2279          * If the page table page is new, initialize it.
 2280          */
 2281         if (mpte->wire_count == 1) {
 2282                 mpte->wire_count = NPTEPG;
 2283                 pmap_fill_ptp(firstpte, newpte);
 2284         }
 2285         KASSERT((*firstpte & PG_FRAME) == (newpte & PG_FRAME),
 2286             ("pmap_demote_pde: firstpte and newpte map different physical"
 2287             " addresses"));
 2288 
 2289         /*
 2290          * If the mapping has changed attributes, update the page table
 2291          * entries.
 2292          */
 2293         if ((*firstpte & PG_PTE_PROMOTE) != (newpte & PG_PTE_PROMOTE))
 2294                 pmap_fill_ptp(firstpte, newpte);
 2295 
 2296         /*
 2297          * Demote the mapping.  This pmap is locked.  The old PDE has
 2298          * PG_A set.  If the old PDE has PG_RW set, it also has PG_M
 2299          * set.  Thus, there is no danger of a race with another
 2300          * processor changing the setting of PG_A and/or PG_M between
 2301          * the read above and the store below. 
 2302          */
 2303         pde_store(pde, newpde); 
 2304 
 2305         /*
 2306          * Invalidate a stale recursive mapping of the page table page.
 2307          */
 2308         if (va >= VM_MAXUSER_ADDRESS)
 2309                 pmap_invalidate_page(pmap, (vm_offset_t)vtopte(va));
 2310 
 2311         /*
 2312          * Demote the pv entry.  This depends on the earlier demotion
 2313          * of the mapping.  Specifically, the (re)creation of a per-
 2314          * page pv entry might trigger the execution of pmap_collect(),
 2315          * which might reclaim a newly (re)created per-page pv entry
 2316          * and destroy the associated mapping.  In order to destroy
 2317          * the mapping, the PDE must have already changed from mapping
 2318          * the 2mpage to referencing the page table page.
 2319          */
 2320         if ((oldpde & PG_MANAGED) != 0)
 2321                 pmap_pv_demote_pde(pmap, va, oldpde & PG_PS_FRAME);
 2322 
 2323         pmap_pde_demotions++;
 2324         CTR2(KTR_PMAP, "pmap_demote_pde: success for va %#lx"
 2325             " in pmap %p", va, pmap);
 2326         return (TRUE);
 2327 }
 2328 
 2329 /*
 2330  * pmap_remove_pde: do the things to unmap a superpage in a process
 2331  */
 2332 static int
 2333 pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
 2334     vm_page_t *free)
 2335 {
 2336         struct md_page *pvh;
 2337         pd_entry_t oldpde;
 2338         vm_offset_t eva, va;
 2339         vm_page_t m, mpte;
 2340 
 2341         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2342         KASSERT((sva & PDRMASK) == 0,
 2343             ("pmap_remove_pde: sva is not 2mpage aligned"));
 2344         oldpde = pte_load_clear(pdq);
 2345         if (oldpde & PG_W)
 2346                 pmap->pm_stats.wired_count -= NBPDR / PAGE_SIZE;
 2347 
 2348         /*
 2349          * Machines that don't support invlpg, also don't support
 2350          * PG_G.
 2351          */
 2352         if (oldpde & PG_G)
 2353                 pmap_invalidate_page(kernel_pmap, sva);
 2354         pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
 2355         if (oldpde & PG_MANAGED) {
 2356                 pvh = pa_to_pvh(oldpde & PG_PS_FRAME);
 2357                 pmap_pvh_free(pvh, pmap, sva);
 2358                 eva = sva + NBPDR;
 2359                 for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
 2360                     va < eva; va += PAGE_SIZE, m++) {
 2361                         if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW))
 2362                                 vm_page_dirty(m);
 2363                         if (oldpde & PG_A)
 2364                                 vm_page_flag_set(m, PG_REFERENCED);
 2365                         if (TAILQ_EMPTY(&m->md.pv_list) &&
 2366                             TAILQ_EMPTY(&pvh->pv_list))
 2367                                 vm_page_flag_clear(m, PG_WRITEABLE);
 2368                 }
 2369         }
 2370         if (pmap == kernel_pmap) {
 2371                 if (!pmap_demote_pde(pmap, pdq, sva))
 2372                         panic("pmap_remove_pde: failed demotion");
 2373         } else {
 2374                 mpte = pmap_lookup_pt_page(pmap, sva);
 2375                 if (mpte != NULL) {
 2376                         pmap_remove_pt_page(pmap, mpte);
 2377                         pmap->pm_stats.resident_count--;
 2378                         KASSERT(mpte->wire_count == NPTEPG,
 2379                             ("pmap_remove_pde: pte page wire count error"));
 2380                         mpte->wire_count = 0;
 2381                         pmap_add_delayed_free_list(mpte, free, FALSE);
 2382                         atomic_subtract_int(&cnt.v_wire_count, 1);
 2383                 }
 2384         }
 2385         return (pmap_unuse_pt(pmap, sva, *pmap_pdpe(pmap, sva), free));
 2386 }
 2387 
 2388 /*
 2389  * pmap_remove_pte: do the things to unmap a page in a process
 2390  */
 2391 static int
 2392 pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va, 
 2393     pd_entry_t ptepde, vm_page_t *free)
 2394 {
 2395         pt_entry_t oldpte;
 2396         vm_page_t m;
 2397 
 2398         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2399         oldpte = pte_load_clear(ptq);
 2400         if (oldpte & PG_W)
 2401                 pmap->pm_stats.wired_count -= 1;
 2402         /*
 2403          * Machines that don't support invlpg, also don't support
 2404          * PG_G.
 2405          */
 2406         if (oldpte & PG_G)
 2407                 pmap_invalidate_page(kernel_pmap, va);
 2408         pmap->pm_stats.resident_count -= 1;
 2409         if (oldpte & PG_MANAGED) {
 2410                 m = PHYS_TO_VM_PAGE(oldpte & PG_FRAME);
 2411                 if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
 2412                         vm_page_dirty(m);
 2413                 if (oldpte & PG_A)
 2414                         vm_page_flag_set(m, PG_REFERENCED);
 2415                 pmap_remove_entry(pmap, m, va);
 2416         }
 2417         return (pmap_unuse_pt(pmap, va, ptepde, free));
 2418 }
 2419 
 2420 /*
 2421  * Remove a single page from a process address space
 2422  */
 2423 static void
 2424 pmap_remove_page(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, vm_page_t *free)
 2425 {
 2426         pt_entry_t *pte;
 2427 
 2428         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2429         if ((*pde & PG_V) == 0)
 2430                 return;
 2431         pte = pmap_pde_to_pte(pde, va);
 2432         if ((*pte & PG_V) == 0)
 2433                 return;
 2434         pmap_remove_pte(pmap, pte, va, *pde, free);
 2435         pmap_invalidate_page(pmap, va);
 2436 }
 2437 
 2438 /*
 2439  *      Remove the given range of addresses from the specified map.
 2440  *
 2441  *      It is assumed that the start and end are properly
 2442  *      rounded to the page size.
 2443  */
 2444 void
 2445 pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
 2446 {
 2447         vm_offset_t va_next;
 2448         pml4_entry_t *pml4e;
 2449         pdp_entry_t *pdpe;
 2450         pd_entry_t ptpaddr, *pde;
 2451         pt_entry_t *pte;
 2452         vm_page_t free = NULL;
 2453         int anyvalid;
 2454 
 2455         /*
 2456          * Perform an unsynchronized read.  This is, however, safe.
 2457          */
 2458         if (pmap->pm_stats.resident_count == 0)
 2459                 return;
 2460 
 2461         anyvalid = 0;
 2462 
 2463         vm_page_lock_queues();
 2464         PMAP_LOCK(pmap);
 2465 
 2466         /*
 2467          * special handling of removing one page.  a very
 2468          * common operation and easy to short circuit some
 2469          * code.
 2470          */
 2471         if (sva + PAGE_SIZE == eva) {
 2472                 pde = pmap_pde(pmap, sva);
 2473                 if (pde && (*pde & PG_PS) == 0) {
 2474                         pmap_remove_page(pmap, sva, pde, &free);
 2475                         goto out;
 2476                 }
 2477         }
 2478 
 2479         for (; sva < eva; sva = va_next) {
 2480 
 2481                 if (pmap->pm_stats.resident_count == 0)
 2482                         break;
 2483 
 2484                 pml4e = pmap_pml4e(pmap, sva);
 2485                 if ((*pml4e & PG_V) == 0) {
 2486                         va_next = (sva + NBPML4) & ~PML4MASK;
 2487                         if (va_next < sva)
 2488                                 va_next = eva;
 2489                         continue;
 2490                 }
 2491 
 2492                 pdpe = pmap_pml4e_to_pdpe(pml4e, sva);
 2493                 if ((*pdpe & PG_V) == 0) {
 2494                         va_next = (sva + NBPDP) & ~PDPMASK;
 2495                         if (va_next < sva)
 2496                                 va_next = eva;
 2497                         continue;
 2498                 }
 2499 
 2500                 /*
 2501                  * Calculate index for next page table.
 2502                  */
 2503                 va_next = (sva + NBPDR) & ~PDRMASK;
 2504                 if (va_next < sva)
 2505                         va_next = eva;
 2506 
 2507                 pde = pmap_pdpe_to_pde(pdpe, sva);
 2508                 ptpaddr = *pde;
 2509 
 2510                 /*
 2511                  * Weed out invalid mappings.
 2512                  */
 2513                 if (ptpaddr == 0)
 2514                         continue;
 2515 
 2516                 /*
 2517                  * Check for large page.
 2518                  */
 2519                 if ((ptpaddr & PG_PS) != 0) {
 2520                         /*
 2521                          * Are we removing the entire large page?  If not,
 2522                          * demote the mapping and fall through.
 2523                          */
 2524                         if (sva + NBPDR == va_next && eva >= va_next) {
 2525                                 /*
 2526                                  * The TLB entry for a PG_G mapping is
 2527                                  * invalidated by pmap_remove_pde().
 2528                                  */
 2529                                 if ((ptpaddr & PG_G) == 0)
 2530                                         anyvalid = 1;
 2531                                 pmap_remove_pde(pmap, pde, sva, &free);
 2532                                 continue;
 2533                         } else if (!pmap_demote_pde(pmap, pde, sva)) {
 2534                                 /* The large page mapping was destroyed. */
 2535                                 continue;
 2536                         } else
 2537                                 ptpaddr = *pde;
 2538                 }
 2539 
 2540                 /*
 2541                  * Limit our scan to either the end of the va represented
 2542                  * by the current page table page, or to the end of the
 2543                  * range being removed.
 2544                  */
 2545                 if (va_next > eva)
 2546                         va_next = eva;
 2547 
 2548                 for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
 2549                     sva += PAGE_SIZE) {
 2550                         if (*pte == 0)
 2551                                 continue;
 2552 
 2553                         /*
 2554                          * The TLB entry for a PG_G mapping is invalidated
 2555                          * by pmap_remove_pte().
 2556                          */
 2557                         if ((*pte & PG_G) == 0)
 2558                                 anyvalid = 1;
 2559                         if (pmap_remove_pte(pmap, pte, sva, ptpaddr, &free))
 2560                                 break;
 2561                 }
 2562         }
 2563 out:
 2564         if (anyvalid)
 2565                 pmap_invalidate_all(pmap);
 2566         vm_page_unlock_queues();        
 2567         PMAP_UNLOCK(pmap);
 2568         pmap_free_zero_pages(free);
 2569 }
 2570 
 2571 /*
 2572  *      Routine:        pmap_remove_all
 2573  *      Function:
 2574  *              Removes this physical page from
 2575  *              all physical maps in which it resides.
 2576  *              Reflects back modify bits to the pager.
 2577  *
 2578  *      Notes:
 2579  *              Original versions of this routine were very
 2580  *              inefficient because they iteratively called
 2581  *              pmap_remove (slow...)
 2582  */
 2583 
 2584 void
 2585 pmap_remove_all(vm_page_t m)
 2586 {
 2587         struct md_page *pvh;
 2588         pv_entry_t pv;
 2589         pmap_t pmap;
 2590         pt_entry_t *pte, tpte;
 2591         pd_entry_t *pde;
 2592         vm_offset_t va;
 2593         vm_page_t free;
 2594 
 2595         KASSERT((m->flags & PG_FICTITIOUS) == 0,
 2596             ("pmap_remove_all: page %p is fictitious", m));
 2597         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2598         pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 2599         while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) {
 2600                 va = pv->pv_va;
 2601                 pmap = PV_PMAP(pv);
 2602                 PMAP_LOCK(pmap);
 2603                 pde = pmap_pde(pmap, va);
 2604                 (void)pmap_demote_pde(pmap, pde, va);
 2605                 PMAP_UNLOCK(pmap);
 2606         }
 2607         while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
 2608                 pmap = PV_PMAP(pv);
 2609                 PMAP_LOCK(pmap);
 2610                 pmap->pm_stats.resident_count--;
 2611                 pde = pmap_pde(pmap, pv->pv_va);
 2612                 KASSERT((*pde & PG_PS) == 0, ("pmap_remove_all: found"
 2613                     " a 2mpage in page %p's pv list", m));
 2614                 pte = pmap_pde_to_pte(pde, pv->pv_va);
 2615                 tpte = pte_load_clear(pte);
 2616                 if (tpte & PG_W)
 2617                         pmap->pm_stats.wired_count--;
 2618                 if (tpte & PG_A)
 2619                         vm_page_flag_set(m, PG_REFERENCED);
 2620 
 2621                 /*
 2622                  * Update the vm_page_t clean and reference bits.
 2623                  */
 2624                 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
 2625                         vm_page_dirty(m);
 2626                 free = NULL;
 2627                 pmap_unuse_pt(pmap, pv->pv_va, *pde, &free);
 2628                 pmap_invalidate_page(pmap, pv->pv_va);
 2629                 pmap_free_zero_pages(free);
 2630                 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
 2631                 free_pv_entry(pmap, pv);
 2632                 PMAP_UNLOCK(pmap);
 2633         }
 2634         vm_page_flag_clear(m, PG_WRITEABLE);
 2635 }
 2636 
 2637 /*
 2638  * pmap_protect_pde: do the things to protect a 2mpage in a process
 2639  */
 2640 static boolean_t
 2641 pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot)
 2642 {
 2643         pd_entry_t newpde, oldpde;
 2644         vm_offset_t eva, va;
 2645         vm_page_t m;
 2646         boolean_t anychanged;
 2647 
 2648         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2649         KASSERT((sva & PDRMASK) == 0,
 2650             ("pmap_protect_pde: sva is not 2mpage aligned"));
 2651         anychanged = FALSE;
 2652 retry:
 2653         oldpde = newpde = *pde;
 2654         if (oldpde & PG_MANAGED) {
 2655                 eva = sva + NBPDR;
 2656                 for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
 2657                     va < eva; va += PAGE_SIZE, m++) {
 2658                         /*
 2659                          * In contrast to the analogous operation on a 4KB page
 2660                          * mapping, the mapping's PG_A flag is not cleared and
 2661                          * the page's PG_REFERENCED flag is not set.  The
 2662                          * reason is that pmap_demote_pde() expects that a 2MB
 2663                          * page mapping with a stored page table page has PG_A
 2664                          * set.
 2665                          */
 2666                         if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW))
 2667                                 vm_page_dirty(m);
 2668                 }
 2669         }
 2670         if ((prot & VM_PROT_WRITE) == 0)
 2671                 newpde &= ~(PG_RW | PG_M);
 2672         if ((prot & VM_PROT_EXECUTE) == 0)
 2673                 newpde |= pg_nx;
 2674         if (newpde != oldpde) {
 2675                 if (!atomic_cmpset_long(pde, oldpde, newpde))
 2676                         goto retry;
 2677                 if (oldpde & PG_G)
 2678                         pmap_invalidate_page(pmap, sva);
 2679                 else
 2680                         anychanged = TRUE;
 2681         }
 2682         return (anychanged);
 2683 }
 2684 
 2685 /*
 2686  *      Set the physical protection on the
 2687  *      specified range of this map as requested.
 2688  */
 2689 void
 2690 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
 2691 {
 2692         vm_offset_t va_next;
 2693         pml4_entry_t *pml4e;
 2694         pdp_entry_t *pdpe;
 2695         pd_entry_t ptpaddr, *pde;
 2696         pt_entry_t *pte;
 2697         int anychanged;
 2698 
 2699         if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
 2700                 pmap_remove(pmap, sva, eva);
 2701                 return;
 2702         }
 2703 
 2704         if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) ==
 2705             (VM_PROT_WRITE|VM_PROT_EXECUTE))
 2706                 return;
 2707 
 2708         anychanged = 0;
 2709 
 2710         vm_page_lock_queues();
 2711         PMAP_LOCK(pmap);
 2712         for (; sva < eva; sva = va_next) {
 2713 
 2714                 pml4e = pmap_pml4e(pmap, sva);
 2715                 if ((*pml4e & PG_V) == 0) {
 2716                         va_next = (sva + NBPML4) & ~PML4MASK;
 2717                         if (va_next < sva)
 2718                                 va_next = eva;
 2719                         continue;
 2720                 }
 2721 
 2722                 pdpe = pmap_pml4e_to_pdpe(pml4e, sva);
 2723                 if ((*pdpe & PG_V) == 0) {
 2724                         va_next = (sva + NBPDP) & ~PDPMASK;
 2725                         if (va_next < sva)
 2726                                 va_next = eva;
 2727                         continue;
 2728                 }
 2729 
 2730                 va_next = (sva + NBPDR) & ~PDRMASK;
 2731                 if (va_next < sva)
 2732                         va_next = eva;
 2733 
 2734                 pde = pmap_pdpe_to_pde(pdpe, sva);
 2735                 ptpaddr = *pde;
 2736 
 2737                 /*
 2738                  * Weed out invalid mappings.
 2739                  */
 2740                 if (ptpaddr == 0)
 2741                         continue;
 2742 
 2743                 /*
 2744                  * Check for large page.
 2745                  */
 2746                 if ((ptpaddr & PG_PS) != 0) {
 2747                         /*
 2748                          * Are we protecting the entire large page?  If not,
 2749                          * demote the mapping and fall through.
 2750                          */
 2751                         if (sva + NBPDR == va_next && eva >= va_next) {
 2752                                 /*
 2753                                  * The TLB entry for a PG_G mapping is
 2754                                  * invalidated by pmap_protect_pde().
 2755                                  */
 2756                                 if (pmap_protect_pde(pmap, pde, sva, prot))
 2757                                         anychanged = 1;
 2758                                 continue;
 2759                         } else if (!pmap_demote_pde(pmap, pde, sva)) {
 2760                                 /* The large page mapping was destroyed. */
 2761                                 continue;
 2762                         }
 2763                 }
 2764 
 2765                 if (va_next > eva)
 2766                         va_next = eva;
 2767 
 2768                 for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
 2769                     sva += PAGE_SIZE) {
 2770                         pt_entry_t obits, pbits;
 2771                         vm_page_t m;
 2772 
 2773 retry:
 2774                         obits = pbits = *pte;
 2775                         if ((pbits & PG_V) == 0)
 2776                                 continue;
 2777                         if (pbits & PG_MANAGED) {
 2778                                 m = NULL;
 2779                                 if (pbits & PG_A) {
 2780                                         m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
 2781                                         vm_page_flag_set(m, PG_REFERENCED);
 2782                                         pbits &= ~PG_A;
 2783                                 }
 2784                                 if ((pbits & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
 2785                                         if (m == NULL)
 2786                                                 m = PHYS_TO_VM_PAGE(pbits &
 2787                                                     PG_FRAME);
 2788                                         vm_page_dirty(m);
 2789                                 }
 2790                         }
 2791 
 2792                         if ((prot & VM_PROT_WRITE) == 0)
 2793                                 pbits &= ~(PG_RW | PG_M);
 2794                         if ((prot & VM_PROT_EXECUTE) == 0)
 2795                                 pbits |= pg_nx;
 2796 
 2797                         if (pbits != obits) {
 2798                                 if (!atomic_cmpset_long(pte, obits, pbits))
 2799                                         goto retry;
 2800                                 if (obits & PG_G)
 2801                                         pmap_invalidate_page(pmap, sva);
 2802                                 else
 2803                                         anychanged = 1;
 2804                         }
 2805                 }
 2806         }
 2807         if (anychanged)
 2808                 pmap_invalidate_all(pmap);
 2809         vm_page_unlock_queues();
 2810         PMAP_UNLOCK(pmap);
 2811 }
 2812 
 2813 /*
 2814  * Tries to promote the 512, contiguous 4KB page mappings that are within a
 2815  * single page table page (PTP) to a single 2MB page mapping.  For promotion
 2816  * to occur, two conditions must be met: (1) the 4KB page mappings must map
 2817  * aligned, contiguous physical memory and (2) the 4KB page mappings must have
 2818  * identical characteristics. 
 2819  */
 2820 static void
 2821 pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
 2822 {
 2823         pd_entry_t newpde;
 2824         pt_entry_t *firstpte, oldpte, pa, *pte;
 2825         vm_offset_t oldpteva;
 2826         vm_page_t mpte;
 2827 
 2828         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2829 
 2830         /*
 2831          * Examine the first PTE in the specified PTP.  Abort if this PTE is
 2832          * either invalid, unused, or does not map the first 4KB physical page
 2833          * within a 2MB page. 
 2834          */
 2835         firstpte = (pt_entry_t *)PHYS_TO_DMAP(*pde & PG_FRAME);
 2836 setpde:
 2837         newpde = *firstpte;
 2838         if ((newpde & ((PG_FRAME & PDRMASK) | PG_A | PG_V)) != (PG_A | PG_V)) {
 2839                 pmap_pde_p_failures++;
 2840                 CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#lx"
 2841                     " in pmap %p", va, pmap);
 2842                 return;
 2843         }
 2844         if ((newpde & (PG_M | PG_RW)) == PG_RW) {
 2845                 /*
 2846                  * When PG_M is already clear, PG_RW can be cleared without
 2847                  * a TLB invalidation.
 2848                  */
 2849                 if (!atomic_cmpset_long(firstpte, newpde, newpde & ~PG_RW))
 2850                         goto setpde;
 2851                 newpde &= ~PG_RW;
 2852         }
 2853 
 2854         /*
 2855          * Examine each of the other PTEs in the specified PTP.  Abort if this
 2856          * PTE maps an unexpected 4KB physical page or does not have identical
 2857          * characteristics to the first PTE.
 2858          */
 2859         pa = (newpde & (PG_PS_FRAME | PG_A | PG_V)) + NBPDR - PAGE_SIZE;
 2860         for (pte = firstpte + NPTEPG - 1; pte > firstpte; pte--) {
 2861 setpte:
 2862                 oldpte = *pte;
 2863                 if ((oldpte & (PG_FRAME | PG_A | PG_V)) != pa) {
 2864                         pmap_pde_p_failures++;
 2865                         CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#lx"
 2866                             " in pmap %p", va, pmap);
 2867                         return;
 2868                 }
 2869                 if ((oldpte & (PG_M | PG_RW)) == PG_RW) {
 2870                         /*
 2871                          * When PG_M is already clear, PG_RW can be cleared
 2872                          * without a TLB invalidation.
 2873                          */
 2874                         if (!atomic_cmpset_long(pte, oldpte, oldpte & ~PG_RW))
 2875                                 goto setpte;
 2876                         oldpte &= ~PG_RW;
 2877                         oldpteva = (oldpte & PG_FRAME & PDRMASK) |
 2878                             (va & ~PDRMASK);
 2879                         CTR2(KTR_PMAP, "pmap_promote_pde: protect for va %#lx"
 2880                             " in pmap %p", oldpteva, pmap);
 2881                 }
 2882                 if ((oldpte & PG_PTE_PROMOTE) != (newpde & PG_PTE_PROMOTE)) {
 2883                         pmap_pde_p_failures++;
 2884                         CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#lx"
 2885                             " in pmap %p", va, pmap);
 2886                         return;
 2887                 }
 2888                 pa -= PAGE_SIZE;
 2889         }
 2890 
 2891         /*
 2892          * Save the page table page in its current state until the PDE
 2893          * mapping the superpage is demoted by pmap_demote_pde() or
 2894          * destroyed by pmap_remove_pde(). 
 2895          */
 2896         mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
 2897         KASSERT(mpte >= vm_page_array &&
 2898             mpte < &vm_page_array[vm_page_array_size],
 2899             ("pmap_promote_pde: page table page is out of range"));
 2900         KASSERT(mpte->pindex == pmap_pde_pindex(va),
 2901             ("pmap_promote_pde: page table page's pindex is wrong"));
 2902         pmap_insert_pt_page(pmap, mpte);
 2903 
 2904         /*
 2905          * Promote the pv entries.
 2906          */
 2907         if ((newpde & PG_MANAGED) != 0)
 2908                 pmap_pv_promote_pde(pmap, va, newpde & PG_PS_FRAME);
 2909 
 2910         /*
 2911          * Propagate the PAT index to its proper position.
 2912          */
 2913         if ((newpde & PG_PTE_PAT) != 0)
 2914                 newpde ^= PG_PDE_PAT | PG_PTE_PAT;
 2915 
 2916         /*
 2917          * Map the superpage.
 2918          */
 2919         pde_store(pde, PG_PS | newpde);
 2920 
 2921         pmap_pde_promotions++;
 2922         CTR2(KTR_PMAP, "pmap_promote_pde: success for va %#lx"
 2923             " in pmap %p", va, pmap);
 2924 }
 2925 
 2926 /*
 2927  *      Insert the given physical page (p) at
 2928  *      the specified virtual address (v) in the
 2929  *      target physical map with the protection requested.
 2930  *
 2931  *      If specified, the page will be wired down, meaning
 2932  *      that the related pte can not be reclaimed.
 2933  *
 2934  *      NB:  This is the only routine which MAY NOT lazy-evaluate
 2935  *      or lose information.  That is, this routine must actually
 2936  *      insert this page into the given map NOW.
 2937  */
 2938 void
 2939 pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
 2940     vm_prot_t prot, boolean_t wired)
 2941 {
 2942         vm_paddr_t pa;
 2943         pd_entry_t *pde;
 2944         pt_entry_t *pte;
 2945         vm_paddr_t opa;
 2946         pt_entry_t origpte, newpte;
 2947         vm_page_t mpte, om;
 2948         boolean_t invlva;
 2949 
 2950         va = trunc_page(va);
 2951         KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
 2952         KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS,
 2953             ("pmap_enter: invalid to pmap_enter page table pages (va: 0x%lx)", va));
 2954 
 2955         mpte = NULL;
 2956 
 2957         vm_page_lock_queues();
 2958         PMAP_LOCK(pmap);
 2959 
 2960         /*
 2961          * In the case that a page table page is not
 2962          * resident, we are creating it here.
 2963          */
 2964         if (va < VM_MAXUSER_ADDRESS) {
 2965                 mpte = pmap_allocpte(pmap, va, M_WAITOK);
 2966         }
 2967 
 2968         pde = pmap_pde(pmap, va);
 2969         if (pde != NULL && (*pde & PG_V) != 0) {
 2970                 if ((*pde & PG_PS) != 0)
 2971                         panic("pmap_enter: attempted pmap_enter on 2MB page");
 2972                 pte = pmap_pde_to_pte(pde, va);
 2973         } else
 2974                 panic("pmap_enter: invalid page directory va=%#lx", va);
 2975 
 2976         pa = VM_PAGE_TO_PHYS(m);
 2977         om = NULL;
 2978         origpte = *pte;
 2979         opa = origpte & PG_FRAME;
 2980 
 2981         /*
 2982          * Mapping has not changed, must be protection or wiring change.
 2983          */
 2984         if (origpte && (opa == pa)) {
 2985                 /*
 2986                  * Wiring change, just update stats. We don't worry about
 2987                  * wiring PT pages as they remain resident as long as there
 2988                  * are valid mappings in them. Hence, if a user page is wired,
 2989                  * the PT page will be also.
 2990                  */
 2991                 if (wired && ((origpte & PG_W) == 0))
 2992                         pmap->pm_stats.wired_count++;
 2993                 else if (!wired && (origpte & PG_W))
 2994                         pmap->pm_stats.wired_count--;
 2995 
 2996                 /*
 2997                  * Remove extra pte reference
 2998                  */
 2999                 if (mpte)
 3000                         mpte->wire_count--;
 3001 
 3002                 /*
 3003                  * We might be turning off write access to the page,
 3004                  * so we go ahead and sense modify status.
 3005                  */
 3006                 if (origpte & PG_MANAGED) {
 3007                         om = m;
 3008                         pa |= PG_MANAGED;
 3009                 }
 3010                 goto validate;
 3011         } 
 3012         /*
 3013          * Mapping has changed, invalidate old range and fall through to
 3014          * handle validating new mapping.
 3015          */
 3016         if (opa) {
 3017                 if (origpte & PG_W)
 3018                         pmap->pm_stats.wired_count--;
 3019                 if (origpte & PG_MANAGED) {
 3020                         om = PHYS_TO_VM_PAGE(opa);
 3021                         pmap_remove_entry(pmap, om, va);
 3022                 }
 3023                 if (mpte != NULL) {
 3024                         mpte->wire_count--;
 3025                         KASSERT(mpte->wire_count > 0,
 3026                             ("pmap_enter: missing reference to page table page,"
 3027                              " va: 0x%lx", va));
 3028                 }
 3029         } else
 3030                 pmap->pm_stats.resident_count++;
 3031 
 3032         /*
 3033          * Enter on the PV list if part of our managed memory.
 3034          */
 3035         if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
 3036                 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva,
 3037                     ("pmap_enter: managed mapping within the clean submap"));
 3038                 pmap_insert_entry(pmap, va, m);
 3039                 pa |= PG_MANAGED;
 3040         }
 3041 
 3042         /*
 3043          * Increment counters
 3044          */
 3045         if (wired)
 3046                 pmap->pm_stats.wired_count++;
 3047 
 3048 validate:
 3049         /*
 3050          * Now validate mapping with desired protection/wiring.
 3051          */
 3052         newpte = (pt_entry_t)(pa | pmap_cache_bits(m->md.pat_mode, 0) | PG_V);
 3053         if ((prot & VM_PROT_WRITE) != 0) {
 3054                 newpte |= PG_RW;
 3055                 vm_page_flag_set(m, PG_WRITEABLE);
 3056         }
 3057         if ((prot & VM_PROT_EXECUTE) == 0)
 3058                 newpte |= pg_nx;
 3059         if (wired)
 3060                 newpte |= PG_W;
 3061         if (va < VM_MAXUSER_ADDRESS)
 3062                 newpte |= PG_U;
 3063         if (pmap == kernel_pmap)
 3064                 newpte |= PG_G;
 3065 
 3066         /*
 3067          * if the mapping or permission bits are different, we need
 3068          * to update the pte.
 3069          */
 3070         if ((origpte & ~(PG_M|PG_A)) != newpte) {
 3071                 newpte |= PG_A;
 3072                 if ((access & VM_PROT_WRITE) != 0)
 3073                         newpte |= PG_M;
 3074                 if (origpte & PG_V) {
 3075                         invlva = FALSE;
 3076                         origpte = pte_load_store(pte, newpte);
 3077                         if (origpte & PG_A) {
 3078                                 if (origpte & PG_MANAGED)
 3079                                         vm_page_flag_set(om, PG_REFERENCED);
 3080                                 if (opa != VM_PAGE_TO_PHYS(m) || ((origpte &
 3081                                     PG_NX) == 0 && (newpte & PG_NX)))
 3082                                         invlva = TRUE;
 3083                         }
 3084                         if ((origpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
 3085                                 if ((origpte & PG_MANAGED) != 0)
 3086                                         vm_page_dirty(om);
 3087                                 if ((newpte & PG_RW) == 0)
 3088                                         invlva = TRUE;
 3089                         }
 3090                         if (invlva)
 3091                                 pmap_invalidate_page(pmap, va);
 3092                 } else
 3093                         pte_store(pte, newpte);
 3094         }
 3095 
 3096         /*
 3097          * If both the page table page and the reservation are fully
 3098          * populated, then attempt promotion.
 3099          */
 3100         if ((mpte == NULL || mpte->wire_count == NPTEPG) &&
 3101             pg_ps_enabled && vm_reserv_level_iffullpop(m) == 0)
 3102                 pmap_promote_pde(pmap, pde, va);
 3103 
 3104         vm_page_unlock_queues();
 3105         PMAP_UNLOCK(pmap);
 3106 }
 3107 
 3108 /*
 3109  * Tries to create a 2MB page mapping.  Returns TRUE if successful and FALSE
 3110  * otherwise.  Fails if (1) a page table page cannot be allocated without
 3111  * blocking, (2) a mapping already exists at the specified virtual address, or
 3112  * (3) a pv entry cannot be allocated without reclaiming another pv entry. 
 3113  */
 3114 static boolean_t
 3115 pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
 3116 {
 3117         pd_entry_t *pde, newpde;
 3118         vm_page_t free, mpde;
 3119 
 3120         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 3121         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 3122         if ((mpde = pmap_allocpde(pmap, va, M_NOWAIT)) == NULL) {
 3123                 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
 3124                     " in pmap %p", va, pmap);
 3125                 return (FALSE);
 3126         }
 3127         pde = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mpde));
 3128         pde = &pde[pmap_pde_index(va)];
 3129         if ((*pde & PG_V) != 0) {
 3130                 KASSERT(mpde->wire_count > 1,
 3131                     ("pmap_enter_pde: mpde's wire count is too low"));
 3132                 mpde->wire_count--;
 3133                 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
 3134                     " in pmap %p", va, pmap);
 3135                 return (FALSE);
 3136         }
 3137         newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 1) |
 3138             PG_PS | PG_V;
 3139         if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
 3140                 newpde |= PG_MANAGED;
 3141 
 3142                 /*
 3143                  * Abort this mapping if its PV entry could not be created.
 3144                  */
 3145                 if (!pmap_pv_insert_pde(pmap, va, VM_PAGE_TO_PHYS(m))) {
 3146                         free = NULL;
 3147                         if (pmap_unwire_pte_hold(pmap, va, mpde, &free)) {
 3148                                 pmap_invalidate_page(pmap, va);
 3149                                 pmap_free_zero_pages(free);
 3150                         }
 3151                         CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
 3152                             " in pmap %p", va, pmap);
 3153                         return (FALSE);
 3154                 }
 3155         }
 3156         if ((prot & VM_PROT_EXECUTE) == 0)
 3157                 newpde |= pg_nx;
 3158         if (va < VM_MAXUSER_ADDRESS)
 3159                 newpde |= PG_U;
 3160 
 3161         /*
 3162          * Increment counters.
 3163          */
 3164         pmap->pm_stats.resident_count += NBPDR / PAGE_SIZE;
 3165 
 3166         /*
 3167          * Map the superpage.
 3168          */
 3169         pde_store(pde, newpde);
 3170 
 3171         pmap_pde_mappings++;
 3172         CTR2(KTR_PMAP, "pmap_enter_pde: success for va %#lx"
 3173             " in pmap %p", va, pmap);
 3174         return (TRUE);
 3175 }
 3176 
 3177 /*
 3178  * Maps a sequence of resident pages belonging to the same object.
 3179  * The sequence begins with the given page m_start.  This page is
 3180  * mapped at the given virtual address start.  Each subsequent page is
 3181  * mapped at a virtual address that is offset from start by the same
 3182  * amount as the page is offset from m_start within the object.  The
 3183  * last page in the sequence is the page with the largest offset from
 3184  * m_start that can be mapped at a virtual address less than the given
 3185  * virtual address end.  Not every virtual page between start and end
 3186  * is mapped; only those for which a resident page exists with the
 3187  * corresponding offset from m_start are mapped.
 3188  */
 3189 void
 3190 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
 3191     vm_page_t m_start, vm_prot_t prot)
 3192 {
 3193         vm_offset_t va;
 3194         vm_page_t m, mpte;
 3195         vm_pindex_t diff, psize;
 3196 
 3197         VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED);
 3198         psize = atop(end - start);
 3199         mpte = NULL;
 3200         m = m_start;
 3201         PMAP_LOCK(pmap);
 3202         while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
 3203                 va = start + ptoa(diff);
 3204                 if ((va & PDRMASK) == 0 && va + NBPDR <= end &&
 3205                     (VM_PAGE_TO_PHYS(m) & PDRMASK) == 0 &&
 3206                     pg_ps_enabled && vm_reserv_level_iffullpop(m) == 0 &&
 3207                     pmap_enter_pde(pmap, va, m, prot))
 3208                         m = &m[NBPDR / PAGE_SIZE - 1];
 3209                 else
 3210                         mpte = pmap_enter_quick_locked(pmap, va, m, prot,
 3211                             mpte);
 3212                 m = TAILQ_NEXT(m, listq);
 3213         }
 3214         PMAP_UNLOCK(pmap);
 3215 }
 3216 
 3217 /*
 3218  * this code makes some *MAJOR* assumptions:
 3219  * 1. Current pmap & pmap exists.
 3220  * 2. Not wired.
 3221  * 3. Read access.
 3222  * 4. No page table pages.
 3223  * but is *MUCH* faster than pmap_enter...
 3224  */
 3225 
 3226 void
 3227 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
 3228 {
 3229 
 3230         PMAP_LOCK(pmap);
 3231         (void) pmap_enter_quick_locked(pmap, va, m, prot, NULL);
 3232         PMAP_UNLOCK(pmap);
 3233 }
 3234 
 3235 static vm_page_t
 3236 pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
 3237     vm_prot_t prot, vm_page_t mpte)
 3238 {
 3239         vm_page_t free;
 3240         pt_entry_t *pte;
 3241         vm_paddr_t pa;
 3242 
 3243         KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
 3244             (m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0,
 3245             ("pmap_enter_quick_locked: managed mapping within the clean submap"));
 3246         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 3247         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 3248 
 3249         /*
 3250          * In the case that a page table page is not
 3251          * resident, we are creating it here.
 3252          */
 3253         if (va < VM_MAXUSER_ADDRESS) {
 3254                 vm_pindex_t ptepindex;
 3255                 pd_entry_t *ptepa;
 3256 
 3257                 /*
 3258                  * Calculate pagetable page index
 3259                  */
 3260                 ptepindex = pmap_pde_pindex(va);
 3261                 if (mpte && (mpte->pindex == ptepindex)) {
 3262                         mpte->wire_count++;
 3263                 } else {
 3264                         /*
 3265                          * Get the page directory entry
 3266                          */
 3267                         ptepa = pmap_pde(pmap, va);
 3268 
 3269                         /*
 3270                          * If the page table page is mapped, we just increment
 3271                          * the hold count, and activate it.
 3272                          */
 3273                         if (ptepa && (*ptepa & PG_V) != 0) {
 3274                                 if (*ptepa & PG_PS)
 3275                                         return (NULL);
 3276                                 mpte = PHYS_TO_VM_PAGE(*ptepa & PG_FRAME);
 3277                                 mpte->wire_count++;
 3278                         } else {
 3279                                 mpte = _pmap_allocpte(pmap, ptepindex,
 3280                                     M_NOWAIT);
 3281                                 if (mpte == NULL)
 3282                                         return (mpte);
 3283                         }
 3284                 }
 3285                 pte = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mpte));
 3286                 pte = &pte[pmap_pte_index(va)];
 3287         } else {
 3288                 mpte = NULL;
 3289                 pte = vtopte(va);
 3290         }
 3291         if (*pte) {
 3292                 if (mpte != NULL) {
 3293                         mpte->wire_count--;
 3294                         mpte = NULL;
 3295                 }
 3296                 return (mpte);
 3297         }
 3298 
 3299         /*
 3300          * Enter on the PV list if part of our managed memory.
 3301          */
 3302         if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0 &&
 3303             !pmap_try_insert_pv_entry(pmap, va, m)) {
 3304                 if (mpte != NULL) {
 3305                         free = NULL;
 3306                         if (pmap_unwire_pte_hold(pmap, va, mpte, &free)) {
 3307                                 pmap_invalidate_page(pmap, va);
 3308                                 pmap_free_zero_pages(free);
 3309                         }
 3310                         mpte = NULL;
 3311                 }
 3312                 return (mpte);
 3313         }
 3314 
 3315         /*
 3316          * Increment counters
 3317          */
 3318         pmap->pm_stats.resident_count++;
 3319 
 3320         pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 0);
 3321         if ((prot & VM_PROT_EXECUTE) == 0)
 3322                 pa |= pg_nx;
 3323 
 3324         /*
 3325          * Now validate mapping with RO protection
 3326          */
 3327         if (m->flags & (PG_FICTITIOUS|PG_UNMANAGED))
 3328                 pte_store(pte, pa | PG_V | PG_U);
 3329         else
 3330                 pte_store(pte, pa | PG_V | PG_U | PG_MANAGED);
 3331         return mpte;
 3332 }
 3333 
 3334 /*
 3335  * Make a temporary mapping for a physical address.  This is only intended
 3336  * to be used for panic dumps.
 3337  */
 3338 void *
 3339 pmap_kenter_temporary(vm_paddr_t pa, int i)
 3340 {
 3341         vm_offset_t va;
 3342 
 3343         va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE);
 3344         pmap_kenter(va, pa);
 3345         invlpg(va);
 3346         return ((void *)crashdumpmap);
 3347 }
 3348 
 3349 /*
 3350  * This code maps large physical mmap regions into the
 3351  * processor address space.  Note that some shortcuts
 3352  * are taken, but the code works.
 3353  */
 3354 void
 3355 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
 3356     vm_pindex_t pindex, vm_size_t size)
 3357 {
 3358         pd_entry_t *pde;
 3359         vm_paddr_t pa, ptepa;
 3360         vm_page_t p, pdpg;
 3361         int pat_mode;
 3362 
 3363         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
 3364         KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
 3365             ("pmap_object_init_pt: non-device object"));
 3366         if ((addr & (NBPDR - 1)) == 0 && (size & (NBPDR - 1)) == 0) {
 3367                 if (!vm_object_populate(object, pindex, pindex + atop(size)))
 3368                         return;
 3369                 p = vm_page_lookup(object, pindex);
 3370                 KASSERT(p->valid == VM_PAGE_BITS_ALL,
 3371                     ("pmap_object_init_pt: invalid page %p", p));
 3372                 pat_mode = p->md.pat_mode;
 3373 
 3374                 /*
 3375                  * Abort the mapping if the first page is not physically
 3376                  * aligned to a 2MB page boundary.
 3377                  */
 3378                 ptepa = VM_PAGE_TO_PHYS(p);
 3379                 if (ptepa & (NBPDR - 1))
 3380                         return;
 3381 
 3382                 /*
 3383                  * Skip the first page.  Abort the mapping if the rest of
 3384                  * the pages are not physically contiguous or have differing
 3385                  * memory attributes.
 3386                  */
 3387                 p = TAILQ_NEXT(p, listq);
 3388                 for (pa = ptepa + PAGE_SIZE; pa < ptepa + size;
 3389                     pa += PAGE_SIZE) {
 3390                         KASSERT(p->valid == VM_PAGE_BITS_ALL,
 3391                             ("pmap_object_init_pt: invalid page %p", p));
 3392                         if (pa != VM_PAGE_TO_PHYS(p) ||
 3393                             pat_mode != p->md.pat_mode)
 3394                                 return;
 3395                         p = TAILQ_NEXT(p, listq);
 3396                 }
 3397 
 3398                 /*
 3399                  * Map using 2MB pages.  Since "ptepa" is 2M aligned and
 3400                  * "size" is a multiple of 2M, adding the PAT setting to "pa"
 3401                  * will not affect the termination of this loop.
 3402                  */ 
 3403                 PMAP_LOCK(pmap);
 3404                 for (pa = ptepa | pmap_cache_bits(pat_mode, 1); pa < ptepa +
 3405                     size; pa += NBPDR) {
 3406                         pdpg = pmap_allocpde(pmap, addr, M_NOWAIT);
 3407                         if (pdpg == NULL) {
 3408                                 /*
 3409                                  * The creation of mappings below is only an
 3410                                  * optimization.  If a page directory page
 3411                                  * cannot be allocated without blocking,
 3412                                  * continue on to the next mapping rather than
 3413                                  * blocking.
 3414                                  */
 3415                                 addr += NBPDR;
 3416                                 continue;
 3417                         }
 3418                         pde = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pdpg));
 3419                         pde = &pde[pmap_pde_index(addr)];
 3420                         if ((*pde & PG_V) == 0) {
 3421                                 pde_store(pde, pa | PG_PS | PG_M | PG_A |
 3422                                     PG_U | PG_RW | PG_V);
 3423                                 pmap->pm_stats.resident_count += NBPDR /
 3424                                     PAGE_SIZE;
 3425                                 pmap_pde_mappings++;
 3426                         } else {
 3427                                 /* Continue on if the PDE is already valid. */
 3428                                 pdpg->wire_count--;
 3429                                 KASSERT(pdpg->wire_count > 0,
 3430                                     ("pmap_object_init_pt: missing reference "
 3431                                     "to page directory page, va: 0x%lx", addr));
 3432                         }
 3433                         addr += NBPDR;
 3434                 }
 3435                 PMAP_UNLOCK(pmap);
 3436         }
 3437 }
 3438 
 3439 /*
 3440  *      Routine:        pmap_change_wiring
 3441  *      Function:       Change the wiring attribute for a map/virtual-address
 3442  *                      pair.
 3443  *      In/out conditions:
 3444  *                      The mapping must already exist in the pmap.
 3445  */
 3446 void
 3447 pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
 3448 {
 3449         pd_entry_t *pde;
 3450         pt_entry_t *pte;
 3451         boolean_t are_queues_locked;
 3452 
 3453         are_queues_locked = FALSE;
 3454 
 3455         /*
 3456          * Wiring is not a hardware characteristic so there is no need to
 3457          * invalidate TLB.
 3458          */
 3459 retry:
 3460         PMAP_LOCK(pmap);
 3461         pde = pmap_pde(pmap, va);
 3462         if ((*pde & PG_PS) != 0) {
 3463                 if (!wired != ((*pde & PG_W) == 0)) {
 3464                         if (!are_queues_locked) {
 3465                                 are_queues_locked = TRUE;
 3466                                 if (!mtx_trylock(&vm_page_queue_mtx)) {
 3467                                         PMAP_UNLOCK(pmap);
 3468                                         vm_page_lock_queues();
 3469                                         goto retry;
 3470                                 }
 3471                         }
 3472                         if (!pmap_demote_pde(pmap, pde, va))
 3473                                 panic("pmap_change_wiring: demotion failed");
 3474                 } else
 3475                         goto out;
 3476         }
 3477         pte = pmap_pde_to_pte(pde, va);
 3478         if (wired && (*pte & PG_W) == 0) {
 3479                 pmap->pm_stats.wired_count++;
 3480                 atomic_set_long(pte, PG_W);
 3481         } else if (!wired && (*pte & PG_W) != 0) {
 3482                 pmap->pm_stats.wired_count--;
 3483                 atomic_clear_long(pte, PG_W);
 3484         }
 3485 out:
 3486         if (are_queues_locked)
 3487                 vm_page_unlock_queues();
 3488         PMAP_UNLOCK(pmap);
 3489 }
 3490 
 3491 
 3492 
 3493 /*
 3494  *      Copy the range specified by src_addr/len
 3495  *      from the source map to the range dst_addr/len
 3496  *      in the destination map.
 3497  *
 3498  *      This routine is only advisory and need not do anything.
 3499  */
 3500 
 3501 void
 3502 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
 3503     vm_offset_t src_addr)
 3504 {
 3505         vm_page_t   free;
 3506         vm_offset_t addr;
 3507         vm_offset_t end_addr = src_addr + len;
 3508         vm_offset_t va_next;
 3509 
 3510         if (dst_addr != src_addr)
 3511                 return;
 3512 
 3513         vm_page_lock_queues();
 3514         if (dst_pmap < src_pmap) {
 3515                 PMAP_LOCK(dst_pmap);
 3516                 PMAP_LOCK(src_pmap);
 3517         } else {
 3518                 PMAP_LOCK(src_pmap);
 3519                 PMAP_LOCK(dst_pmap);
 3520         }
 3521         for (addr = src_addr; addr < end_addr; addr = va_next) {
 3522                 pt_entry_t *src_pte, *dst_pte;
 3523                 vm_page_t dstmpde, dstmpte, srcmpte;
 3524                 pml4_entry_t *pml4e;
 3525                 pdp_entry_t *pdpe;
 3526                 pd_entry_t srcptepaddr, *pde;
 3527 
 3528                 KASSERT(addr < UPT_MIN_ADDRESS,
 3529                     ("pmap_copy: invalid to pmap_copy page tables"));
 3530 
 3531                 pml4e = pmap_pml4e(src_pmap, addr);
 3532                 if ((*pml4e & PG_V) == 0) {
 3533                         va_next = (addr + NBPML4) & ~PML4MASK;
 3534                         if (va_next < addr)
 3535                                 va_next = end_addr;
 3536                         continue;
 3537                 }
 3538 
 3539                 pdpe = pmap_pml4e_to_pdpe(pml4e, addr);
 3540                 if ((*pdpe & PG_V) == 0) {
 3541                         va_next = (addr + NBPDP) & ~PDPMASK;
 3542                         if (va_next < addr)
 3543                                 va_next = end_addr;
 3544                         continue;
 3545                 }
 3546 
 3547                 va_next = (addr + NBPDR) & ~PDRMASK;
 3548                 if (va_next < addr)
 3549                         va_next = end_addr;
 3550 
 3551                 pde = pmap_pdpe_to_pde(pdpe, addr);
 3552                 srcptepaddr = *pde;
 3553                 if (srcptepaddr == 0)
 3554                         continue;
 3555                         
 3556                 if (srcptepaddr & PG_PS) {
 3557                         dstmpde = pmap_allocpde(dst_pmap, addr, M_NOWAIT);
 3558                         if (dstmpde == NULL)
 3559                                 break;
 3560                         pde = (pd_entry_t *)
 3561                             PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dstmpde));
 3562                         pde = &pde[pmap_pde_index(addr)];
 3563                         if (*pde == 0 && ((srcptepaddr & PG_MANAGED) == 0 ||
 3564                             pmap_pv_insert_pde(dst_pmap, addr, srcptepaddr &
 3565                             PG_PS_FRAME))) {
 3566                                 *pde = srcptepaddr & ~PG_W;
 3567                                 dst_pmap->pm_stats.resident_count +=
 3568                                     NBPDR / PAGE_SIZE;
 3569                         } else
 3570                                 dstmpde->wire_count--;
 3571                         continue;
 3572                 }
 3573 
 3574                 srcptepaddr &= PG_FRAME;
 3575                 srcmpte = PHYS_TO_VM_PAGE(srcptepaddr);
 3576                 KASSERT(srcmpte->wire_count > 0,
 3577                     ("pmap_copy: source page table page is unused"));
 3578 
 3579                 if (va_next > end_addr)
 3580                         va_next = end_addr;
 3581 
 3582                 src_pte = (pt_entry_t *)PHYS_TO_DMAP(srcptepaddr);
 3583                 src_pte = &src_pte[pmap_pte_index(addr)];
 3584                 dstmpte = NULL;
 3585                 while (addr < va_next) {
 3586                         pt_entry_t ptetemp;
 3587                         ptetemp = *src_pte;
 3588                         /*
 3589                          * we only virtual copy managed pages
 3590                          */
 3591                         if ((ptetemp & PG_MANAGED) != 0) {
 3592                                 if (dstmpte != NULL &&
 3593                                     dstmpte->pindex == pmap_pde_pindex(addr))
 3594                                         dstmpte->wire_count++;
 3595                                 else if ((dstmpte = pmap_allocpte(dst_pmap,
 3596                                     addr, M_NOWAIT)) == NULL)
 3597                                         goto out;
 3598                                 dst_pte = (pt_entry_t *)
 3599                                     PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dstmpte));
 3600                                 dst_pte = &dst_pte[pmap_pte_index(addr)];
 3601                                 if (*dst_pte == 0 &&
 3602                                     pmap_try_insert_pv_entry(dst_pmap, addr,
 3603                                     PHYS_TO_VM_PAGE(ptetemp & PG_FRAME))) {
 3604                                         /*
 3605                                          * Clear the wired, modified, and
 3606                                          * accessed (referenced) bits
 3607                                          * during the copy.
 3608                                          */
 3609                                         *dst_pte = ptetemp & ~(PG_W | PG_M |
 3610                                             PG_A);
 3611                                         dst_pmap->pm_stats.resident_count++;
 3612                                 } else {
 3613                                         free = NULL;
 3614                                         if (pmap_unwire_pte_hold(dst_pmap,
 3615                                             addr, dstmpte, &free)) {
 3616                                                 pmap_invalidate_page(dst_pmap,
 3617                                                     addr);
 3618                                                 pmap_free_zero_pages(free);
 3619                                         }
 3620                                         goto out;
 3621                                 }
 3622                                 if (dstmpte->wire_count >= srcmpte->wire_count)
 3623                                         break;
 3624                         }
 3625                         addr += PAGE_SIZE;
 3626                         src_pte++;
 3627                 }
 3628         }
 3629 out:
 3630         vm_page_unlock_queues();
 3631         PMAP_UNLOCK(src_pmap);
 3632         PMAP_UNLOCK(dst_pmap);
 3633 }       
 3634 
 3635 /*
 3636  *      pmap_zero_page zeros the specified hardware page by mapping 
 3637  *      the page into KVM and using bzero to clear its contents.
 3638  */
 3639 void
 3640 pmap_zero_page(vm_page_t m)
 3641 {
 3642         vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
 3643 
 3644         pagezero((void *)va);
 3645 }
 3646 
 3647 /*
 3648  *      pmap_zero_page_area zeros the specified hardware page by mapping 
 3649  *      the page into KVM and using bzero to clear its contents.
 3650  *
 3651  *      off and size may not cover an area beyond a single hardware page.
 3652  */
 3653 void
 3654 pmap_zero_page_area(vm_page_t m, int off, int size)
 3655 {
 3656         vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
 3657 
 3658         if (off == 0 && size == PAGE_SIZE)
 3659                 pagezero((void *)va);
 3660         else
 3661                 bzero((char *)va + off, size);
 3662 }
 3663 
 3664 /*
 3665  *      pmap_zero_page_idle zeros the specified hardware page by mapping 
 3666  *      the page into KVM and using bzero to clear its contents.  This
 3667  *      is intended to be called from the vm_pagezero process only and
 3668  *      outside of Giant.
 3669  */
 3670 void
 3671 pmap_zero_page_idle(vm_page_t m)
 3672 {
 3673         vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
 3674 
 3675         pagezero((void *)va);
 3676 }
 3677 
 3678 /*
 3679  *      pmap_copy_page copies the specified (machine independent)
 3680  *      page by mapping the page into virtual memory and using
 3681  *      bcopy to copy the page, one machine dependent page at a
 3682  *      time.
 3683  */
 3684 void
 3685 pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
 3686 {
 3687         vm_offset_t src = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(msrc));
 3688         vm_offset_t dst = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mdst));
 3689 
 3690         pagecopy((void *)src, (void *)dst);
 3691 }
 3692 
 3693 /*
 3694  * Returns true if the pmap's pv is one of the first
 3695  * 16 pvs linked to from this page.  This count may
 3696  * be changed upwards or downwards in the future; it
 3697  * is only necessary that true be returned for a small
 3698  * subset of pmaps for proper page aging.
 3699  */
 3700 boolean_t
 3701 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
 3702 {
 3703         struct md_page *pvh;
 3704         pv_entry_t pv;
 3705         int loops = 0;
 3706 
 3707         if (m->flags & PG_FICTITIOUS)
 3708                 return FALSE;
 3709 
 3710         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 3711         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
 3712                 if (PV_PMAP(pv) == pmap) {
 3713                         return TRUE;
 3714                 }
 3715                 loops++;
 3716                 if (loops >= 16)
 3717                         break;
 3718         }
 3719         if (loops < 16) {
 3720                 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 3721                 TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
 3722                         if (PV_PMAP(pv) == pmap)
 3723                                 return (TRUE);
 3724                         loops++;
 3725                         if (loops >= 16)
 3726                                 break;
 3727                 }
 3728         }
 3729         return (FALSE);
 3730 }
 3731 
 3732 /*
 3733  *      pmap_page_wired_mappings:
 3734  *
 3735  *      Return the number of managed mappings to the given physical page
 3736  *      that are wired.
 3737  */
 3738 int
 3739 pmap_page_wired_mappings(vm_page_t m)
 3740 {
 3741         int count;
 3742 
 3743         count = 0;
 3744         if ((m->flags & PG_FICTITIOUS) != 0)
 3745                 return (count);
 3746         count = pmap_pvh_wired_mappings(&m->md, count);
 3747         return (pmap_pvh_wired_mappings(pa_to_pvh(VM_PAGE_TO_PHYS(m)), count));
 3748 }
 3749 
 3750 /*
 3751  *      pmap_pvh_wired_mappings:
 3752  *
 3753  *      Return the updated number "count" of managed mappings that are wired.
 3754  */
 3755 static int
 3756 pmap_pvh_wired_mappings(struct md_page *pvh, int count)
 3757 {
 3758         pmap_t pmap;
 3759         pt_entry_t *pte;
 3760         pv_entry_t pv;
 3761 
 3762         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 3763         TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
 3764                 pmap = PV_PMAP(pv);
 3765                 PMAP_LOCK(pmap);
 3766                 pte = pmap_pte(pmap, pv->pv_va);
 3767                 if ((*pte & PG_W) != 0)
 3768                         count++;
 3769                 PMAP_UNLOCK(pmap);
 3770         }
 3771         return (count);
 3772 }
 3773 
 3774 /*
 3775  * Returns TRUE if the given page is mapped individually or as part of
 3776  * a 2mpage.  Otherwise, returns FALSE.
 3777  */
 3778 boolean_t
 3779 pmap_page_is_mapped(vm_page_t m)
 3780 {
 3781         struct md_page *pvh;
 3782 
 3783         if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
 3784                 return (FALSE);
 3785         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 3786         if (TAILQ_EMPTY(&m->md.pv_list)) {
 3787                 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 3788                 return (!TAILQ_EMPTY(&pvh->pv_list));
 3789         } else
 3790                 return (TRUE);
 3791 }
 3792 
 3793 /*
 3794  * Remove all pages from specified address space
 3795  * this aids process exit speeds.  Also, this code
 3796  * is special cased for current process only, but
 3797  * can have the more generic (and slightly slower)
 3798  * mode enabled.  This is much faster than pmap_remove
 3799  * in the case of running down an entire address space.
 3800  */
 3801 void
 3802 pmap_remove_pages(pmap_t pmap)
 3803 {
 3804         pd_entry_t ptepde;
 3805         pt_entry_t *pte, tpte;
 3806         vm_page_t free = NULL;
 3807         vm_page_t m, mpte, mt;
 3808         pv_entry_t pv;
 3809         struct md_page *pvh;
 3810         struct pv_chunk *pc, *npc;
 3811         int field, idx;
 3812         int64_t bit;
 3813         uint64_t inuse, bitmask;
 3814         int allfree;
 3815 
 3816         if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) {
 3817                 printf("warning: pmap_remove_pages called with non-current pmap\n");
 3818                 return;
 3819         }
 3820         vm_page_lock_queues();
 3821         PMAP_LOCK(pmap);
 3822         TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
 3823                 allfree = 1;
 3824                 for (field = 0; field < _NPCM; field++) {
 3825                         inuse = (~(pc->pc_map[field])) & pc_freemask[field];
 3826                         while (inuse != 0) {
 3827                                 bit = bsfq(inuse);
 3828                                 bitmask = 1UL << bit;
 3829                                 idx = field * 64 + bit;
 3830                                 pv = &pc->pc_pventry[idx];
 3831                                 inuse &= ~bitmask;
 3832 
 3833                                 pte = pmap_pdpe(pmap, pv->pv_va);
 3834                                 ptepde = *pte;
 3835                                 pte = pmap_pdpe_to_pde(pte, pv->pv_va);
 3836                                 tpte = *pte;
 3837                                 if ((tpte & (PG_PS | PG_V)) == PG_V) {
 3838                                         ptepde = tpte;
 3839                                         pte = (pt_entry_t *)PHYS_TO_DMAP(tpte &
 3840                                             PG_FRAME);
 3841                                         pte = &pte[pmap_pte_index(pv->pv_va)];
 3842                                         tpte = *pte & ~PG_PTE_PAT;
 3843                                 }
 3844                                 if ((tpte & PG_V) == 0)
 3845                                         panic("bad pte");
 3846 
 3847 /*
 3848  * We cannot remove wired pages from a process' mapping at this time
 3849  */
 3850                                 if (tpte & PG_W) {
 3851                                         allfree = 0;
 3852                                         continue;
 3853                                 }
 3854 
 3855                                 m = PHYS_TO_VM_PAGE(tpte & PG_FRAME);
 3856                                 KASSERT(m->phys_addr == (tpte & PG_FRAME),
 3857                                     ("vm_page_t %p phys_addr mismatch %016jx %016jx",
 3858                                     m, (uintmax_t)m->phys_addr,
 3859                                     (uintmax_t)tpte));
 3860 
 3861                                 KASSERT(m < &vm_page_array[vm_page_array_size],
 3862                                         ("pmap_remove_pages: bad tpte %#jx",
 3863                                         (uintmax_t)tpte));
 3864 
 3865                                 pte_clear(pte);
 3866 
 3867                                 /*
 3868                                  * Update the vm_page_t clean/reference bits.
 3869                                  */
 3870                                 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
 3871                                         if ((tpte & PG_PS) != 0) {
 3872                                                 for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
 3873                                                         vm_page_dirty(mt);
 3874                                         } else
 3875                                                 vm_page_dirty(m);
 3876                                 }
 3877 
 3878                                 /* Mark free */
 3879                                 PV_STAT(pv_entry_frees++);
 3880                                 PV_STAT(pv_entry_spare++);
 3881                                 pv_entry_count--;
 3882                                 pc->pc_map[field] |= bitmask;
 3883                                 if ((tpte & PG_PS) != 0) {
 3884                                         pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
 3885                                         pvh = pa_to_pvh(tpte & PG_PS_FRAME);
 3886                                         TAILQ_REMOVE(&pvh->pv_list, pv, pv_list);
 3887                                         if (TAILQ_EMPTY(&pvh->pv_list)) {
 3888                                                 for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
 3889                                                         if (TAILQ_EMPTY(&mt->md.pv_list))
 3890                                                                 vm_page_flag_clear(mt, PG_WRITEABLE);
 3891                                         }
 3892                                         mpte = pmap_lookup_pt_page(pmap, pv->pv_va);
 3893                                         if (mpte != NULL) {
 3894                                                 pmap_remove_pt_page(pmap, mpte);
 3895                                                 pmap->pm_stats.resident_count--;
 3896                                                 KASSERT(mpte->wire_count == NPTEPG,
 3897                                                     ("pmap_remove_pages: pte page wire count error"));
 3898                                                 mpte->wire_count = 0;
 3899                                                 pmap_add_delayed_free_list(mpte, &free, FALSE);
 3900                                                 atomic_subtract_int(&cnt.v_wire_count, 1);
 3901                                         }
 3902                                 } else {
 3903                                         pmap->pm_stats.resident_count--;
 3904                                         TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
 3905                                         if (TAILQ_EMPTY(&m->md.pv_list)) {
 3906                                                 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 3907                                                 if (TAILQ_EMPTY(&pvh->pv_list))
 3908                                                         vm_page_flag_clear(m, PG_WRITEABLE);
 3909                                         }
 3910                                 }
 3911                                 pmap_unuse_pt(pmap, pv->pv_va, ptepde, &free);
 3912                         }
 3913                 }
 3914                 if (allfree) {
 3915                         PV_STAT(pv_entry_spare -= _NPCPV);
 3916                         PV_STAT(pc_chunk_count--);
 3917                         PV_STAT(pc_chunk_frees++);
 3918                         TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
 3919                         m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
 3920                         dump_drop_page(m->phys_addr);
 3921                         vm_page_unwire(m, 0);
 3922                         vm_page_free(m);
 3923                 }
 3924         }
 3925         pmap_invalidate_all(pmap);
 3926         vm_page_unlock_queues();
 3927         PMAP_UNLOCK(pmap);
 3928         pmap_free_zero_pages(free);
 3929 }
 3930 
 3931 /*
 3932  *      pmap_is_modified:
 3933  *
 3934  *      Return whether or not the specified physical page was modified
 3935  *      in any physical maps.
 3936  */
 3937 boolean_t
 3938 pmap_is_modified(vm_page_t m)
 3939 {
 3940 
 3941         if (m->flags & PG_FICTITIOUS)
 3942                 return (FALSE);
 3943         if (pmap_is_modified_pvh(&m->md))
 3944                 return (TRUE);
 3945         return (pmap_is_modified_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m))));
 3946 }
 3947 
 3948 /*
 3949  * Returns TRUE if any of the given mappings were used to modify
 3950  * physical memory.  Otherwise, returns FALSE.  Both page and 2mpage
 3951  * mappings are supported.
 3952  */
 3953 static boolean_t
 3954 pmap_is_modified_pvh(struct md_page *pvh)
 3955 {
 3956         pv_entry_t pv;
 3957         pt_entry_t *pte;
 3958         pmap_t pmap;
 3959         boolean_t rv;
 3960 
 3961         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 3962         rv = FALSE;
 3963         TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
 3964                 pmap = PV_PMAP(pv);
 3965                 PMAP_LOCK(pmap);
 3966                 pte = pmap_pte(pmap, pv->pv_va);
 3967                 rv = (*pte & (PG_M | PG_RW)) == (PG_M | PG_RW);
 3968                 PMAP_UNLOCK(pmap);
 3969                 if (rv)
 3970                         break;
 3971         }
 3972         return (rv);
 3973 }
 3974 
 3975 /*
 3976  *      pmap_is_prefaultable:
 3977  *
 3978  *      Return whether or not the specified virtual address is elgible
 3979  *      for prefault.
 3980  */
 3981 boolean_t
 3982 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
 3983 {
 3984         pd_entry_t *pde;
 3985         pt_entry_t *pte;
 3986         boolean_t rv;
 3987 
 3988         rv = FALSE;
 3989         PMAP_LOCK(pmap);
 3990         pde = pmap_pde(pmap, addr);
 3991         if (pde != NULL && (*pde & (PG_PS | PG_V)) == PG_V) {
 3992                 pte = pmap_pde_to_pte(pde, addr);
 3993                 rv = (*pte & PG_V) == 0;
 3994         }
 3995         PMAP_UNLOCK(pmap);
 3996         return (rv);
 3997 }
 3998 
 3999 /*
 4000  * Clear the write and modified bits in each of the given page's mappings.
 4001  */
 4002 void
 4003 pmap_remove_write(vm_page_t m)
 4004 {
 4005         struct md_page *pvh;
 4006         pmap_t pmap;
 4007         pv_entry_t next_pv, pv;
 4008         pd_entry_t *pde;
 4009         pt_entry_t oldpte, *pte;
 4010         vm_offset_t va;
 4011 
 4012         if ((m->flags & PG_FICTITIOUS) != 0 ||
 4013             (m->flags & PG_WRITEABLE) == 0)
 4014                 return;
 4015         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 4016         pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 4017         TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
 4018                 va = pv->pv_va;
 4019                 pmap = PV_PMAP(pv);
 4020                 PMAP_LOCK(pmap);
 4021                 pde = pmap_pde(pmap, va);
 4022                 if ((*pde & PG_RW) != 0)
 4023                         (void)pmap_demote_pde(pmap, pde, va);
 4024                 PMAP_UNLOCK(pmap);
 4025         }
 4026         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
 4027                 pmap = PV_PMAP(pv);
 4028                 PMAP_LOCK(pmap);
 4029                 pde = pmap_pde(pmap, pv->pv_va);
 4030                 KASSERT((*pde & PG_PS) == 0, ("pmap_clear_write: found"
 4031                     " a 2mpage in page %p's pv list", m));
 4032                 pte = pmap_pde_to_pte(pde, pv->pv_va);
 4033 retry:
 4034                 oldpte = *pte;
 4035                 if (oldpte & PG_RW) {
 4036                         if (!atomic_cmpset_long(pte, oldpte, oldpte &
 4037                             ~(PG_RW | PG_M)))
 4038                                 goto retry;
 4039                         if ((oldpte & PG_M) != 0)
 4040                                 vm_page_dirty(m);
 4041                         pmap_invalidate_page(pmap, pv->pv_va);
 4042                 }
 4043                 PMAP_UNLOCK(pmap);
 4044         }
 4045         vm_page_flag_clear(m, PG_WRITEABLE);
 4046 }
 4047 
 4048 /*
 4049  *      pmap_ts_referenced:
 4050  *
 4051  *      Return a count of reference bits for a page, clearing those bits.
 4052  *      It is not necessary for every reference bit to be cleared, but it
 4053  *      is necessary that 0 only be returned when there are truly no
 4054  *      reference bits set.
 4055  *
 4056  *      XXX: The exact number of bits to check and clear is a matter that
 4057  *      should be tested and standardized at some point in the future for
 4058  *      optimal aging of shared pages.
 4059  */
 4060 int
 4061 pmap_ts_referenced(vm_page_t m)
 4062 {
 4063         struct md_page *pvh;
 4064         pv_entry_t pv, pvf, pvn;
 4065         pmap_t pmap;
 4066         pd_entry_t oldpde, *pde;
 4067         pt_entry_t *pte;
 4068         vm_offset_t va;
 4069         int rtval = 0;
 4070 
 4071         if (m->flags & PG_FICTITIOUS)
 4072                 return (rtval);
 4073         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 4074         pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 4075         TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, pvn) {
 4076                 va = pv->pv_va;
 4077                 pmap = PV_PMAP(pv);
 4078                 PMAP_LOCK(pmap);
 4079                 pde = pmap_pde(pmap, va);
 4080                 oldpde = *pde;
 4081                 if ((oldpde & PG_A) != 0) {
 4082                         if (pmap_demote_pde(pmap, pde, va)) {
 4083                                 if ((oldpde & PG_W) == 0) {
 4084                                         /*
 4085                                          * Remove the mapping to a single page
 4086                                          * so that a subsequent access may
 4087                                          * repromote.  Since the underlying
 4088                                          * page table page is fully populated,
 4089                                          * this removal never frees a page
 4090                                          * table page.
 4091                                          */
 4092                                         va += VM_PAGE_TO_PHYS(m) - (oldpde &
 4093                                             PG_PS_FRAME);
 4094                                         pmap_remove_page(pmap, va, pde, NULL);
 4095                                         rtval++;
 4096                                         if (rtval > 4) {
 4097                                                 PMAP_UNLOCK(pmap);
 4098                                                 return (rtval);
 4099                                         }
 4100                                 }
 4101                         }
 4102                 }
 4103                 PMAP_UNLOCK(pmap);
 4104         }
 4105         if ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
 4106                 pvf = pv;
 4107                 do {
 4108                         pvn = TAILQ_NEXT(pv, pv_list);
 4109                         TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
 4110                         TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
 4111                         pmap = PV_PMAP(pv);
 4112                         PMAP_LOCK(pmap);
 4113                         pde = pmap_pde(pmap, pv->pv_va);
 4114                         KASSERT((*pde & PG_PS) == 0, ("pmap_ts_referenced:"
 4115                             " found a 2mpage in page %p's pv list", m));
 4116                         pte = pmap_pde_to_pte(pde, pv->pv_va);
 4117                         if ((*pte & PG_A) != 0) {
 4118                                 atomic_clear_long(pte, PG_A);
 4119                                 pmap_invalidate_page(pmap, pv->pv_va);
 4120                                 rtval++;
 4121                                 if (rtval > 4)
 4122                                         pvn = NULL;
 4123                         }
 4124                         PMAP_UNLOCK(pmap);
 4125                 } while ((pv = pvn) != NULL && pv != pvf);
 4126         }
 4127         return (rtval);
 4128 }
 4129 
 4130 /*
 4131  *      Clear the modify bits on the specified physical page.
 4132  */
 4133 void
 4134 pmap_clear_modify(vm_page_t m)
 4135 {
 4136         struct md_page *pvh;
 4137         pmap_t pmap;
 4138         pv_entry_t next_pv, pv;
 4139         pd_entry_t oldpde, *pde;
 4140         pt_entry_t oldpte, *pte;
 4141         vm_offset_t va;
 4142 
 4143         if ((m->flags & PG_FICTITIOUS) != 0)
 4144                 return;
 4145         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 4146         pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 4147         TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
 4148                 va = pv->pv_va;
 4149                 pmap = PV_PMAP(pv);
 4150                 PMAP_LOCK(pmap);
 4151                 pde = pmap_pde(pmap, va);
 4152                 oldpde = *pde;
 4153                 if ((oldpde & PG_RW) != 0) {
 4154                         if (pmap_demote_pde(pmap, pde, va)) {
 4155                                 if ((oldpde & PG_W) == 0) {
 4156                                         /*
 4157                                          * Write protect the mapping to a
 4158                                          * single page so that a subsequent
 4159                                          * write access may repromote.
 4160                                          */
 4161                                         va += VM_PAGE_TO_PHYS(m) - (oldpde &
 4162                                             PG_PS_FRAME);
 4163                                         pte = pmap_pde_to_pte(pde, va);
 4164                                         oldpte = *pte;
 4165                                         if ((oldpte & PG_V) != 0) {
 4166                                                 while (!atomic_cmpset_long(pte,
 4167                                                     oldpte,
 4168                                                     oldpte & ~(PG_M | PG_RW)))
 4169                                                         oldpte = *pte;
 4170                                                 vm_page_dirty(m);
 4171                                                 pmap_invalidate_page(pmap, va);
 4172                                         }
 4173                                 }
 4174                         }
 4175                 }
 4176                 PMAP_UNLOCK(pmap);
 4177         }
 4178         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
 4179                 pmap = PV_PMAP(pv);
 4180                 PMAP_LOCK(pmap);
 4181                 pde = pmap_pde(pmap, pv->pv_va);
 4182                 KASSERT((*pde & PG_PS) == 0, ("pmap_clear_modify: found"
 4183                     " a 2mpage in page %p's pv list", m));
 4184                 pte = pmap_pde_to_pte(pde, pv->pv_va);
 4185                 if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
 4186                         atomic_clear_long(pte, PG_M);
 4187                         pmap_invalidate_page(pmap, pv->pv_va);
 4188                 }
 4189                 PMAP_UNLOCK(pmap);
 4190         }
 4191 }
 4192 
 4193 /*
 4194  *      pmap_clear_reference:
 4195  *
 4196  *      Clear the reference bit on the specified physical page.
 4197  */
 4198 void
 4199 pmap_clear_reference(vm_page_t m)
 4200 {
 4201         struct md_page *pvh;
 4202         pmap_t pmap;
 4203         pv_entry_t next_pv, pv;
 4204         pd_entry_t oldpde, *pde;
 4205         pt_entry_t *pte;
 4206         vm_offset_t va;
 4207 
 4208         if ((m->flags & PG_FICTITIOUS) != 0)
 4209                 return;
 4210         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 4211         pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 4212         TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
 4213                 va = pv->pv_va;
 4214                 pmap = PV_PMAP(pv);
 4215                 PMAP_LOCK(pmap);
 4216                 pde = pmap_pde(pmap, va);
 4217                 oldpde = *pde;
 4218                 if ((oldpde & PG_A) != 0) {
 4219                         if (pmap_demote_pde(pmap, pde, va)) {
 4220                                 /*
 4221                                  * Remove the mapping to a single page so
 4222                                  * that a subsequent access may repromote.
 4223                                  * Since the underlying page table page is
 4224                                  * fully populated, this removal never frees
 4225                                  * a page table page.
 4226                                  */
 4227                                 va += VM_PAGE_TO_PHYS(m) - (oldpde &
 4228                                     PG_PS_FRAME);
 4229                                 pmap_remove_page(pmap, va, pde, NULL);
 4230                         }
 4231                 }
 4232                 PMAP_UNLOCK(pmap);
 4233         }
 4234         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
 4235                 pmap = PV_PMAP(pv);
 4236                 PMAP_LOCK(pmap);
 4237                 pde = pmap_pde(pmap, pv->pv_va);
 4238                 KASSERT((*pde & PG_PS) == 0, ("pmap_clear_reference: found"
 4239                     " a 2mpage in page %p's pv list", m));
 4240                 pte = pmap_pde_to_pte(pde, pv->pv_va);
 4241                 if (*pte & PG_A) {
 4242                         atomic_clear_long(pte, PG_A);
 4243                         pmap_invalidate_page(pmap, pv->pv_va);
 4244                 }
 4245                 PMAP_UNLOCK(pmap);
 4246         }
 4247 }
 4248 
 4249 /*
 4250  * Miscellaneous support routines follow
 4251  */
 4252 
 4253 /* Adjust the cache mode for a 4KB page mapped via a PTE. */
 4254 static __inline void
 4255 pmap_pte_attr(pt_entry_t *pte, int cache_bits)
 4256 {
 4257         u_int opte, npte;
 4258 
 4259         /*
 4260          * The cache mode bits are all in the low 32-bits of the
 4261          * PTE, so we can just spin on updating the low 32-bits.
 4262          */
 4263         do {
 4264                 opte = *(u_int *)pte;
 4265                 npte = opte & ~PG_PTE_CACHE;
 4266                 npte |= cache_bits;
 4267         } while (npte != opte && !atomic_cmpset_int((u_int *)pte, opte, npte));
 4268 }
 4269 
 4270 /* Adjust the cache mode for a 2MB page mapped via a PDE. */
 4271 static __inline void
 4272 pmap_pde_attr(pd_entry_t *pde, int cache_bits)
 4273 {
 4274         u_int opde, npde;
 4275 
 4276         /*
 4277          * The cache mode bits are all in the low 32-bits of the
 4278          * PDE, so we can just spin on updating the low 32-bits.
 4279          */
 4280         do {
 4281                 opde = *(u_int *)pde;
 4282                 npde = opde & ~PG_PDE_CACHE;
 4283                 npde |= cache_bits;
 4284         } while (npde != opde && !atomic_cmpset_int((u_int *)pde, opde, npde));
 4285 }
 4286 
 4287 /*
 4288  * Map a set of physical memory pages into the kernel virtual
 4289  * address space. Return a pointer to where it is mapped. This
 4290  * routine is intended to be used for mapping device memory,
 4291  * NOT real memory.
 4292  */
 4293 void *
 4294 pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
 4295 {
 4296         vm_offset_t va, offset;
 4297         vm_size_t tmpsize;
 4298 
 4299         /*
 4300          * If the specified range of physical addresses fits within the direct
 4301          * map window, use the direct map. 
 4302          */
 4303         if (pa < dmaplimit && pa + size < dmaplimit) {
 4304                 va = PHYS_TO_DMAP(pa);
 4305                 if (!pmap_change_attr(va, size, mode))
 4306                         return ((void *)va);
 4307         }
 4308         offset = pa & PAGE_MASK;
 4309         size = roundup(offset + size, PAGE_SIZE);
 4310         va = kmem_alloc_nofault(kernel_map, size);
 4311         if (!va)
 4312                 panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
 4313         pa = trunc_page(pa);
 4314         for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE)
 4315                 pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode);
 4316         pmap_invalidate_range(kernel_pmap, va, va + tmpsize);
 4317         pmap_invalidate_cache_range(va, va + tmpsize);
 4318         return ((void *)(va + offset));
 4319 }
 4320 
 4321 void *
 4322 pmap_mapdev(vm_paddr_t pa, vm_size_t size)
 4323 {
 4324 
 4325         return (pmap_mapdev_attr(pa, size, PAT_UNCACHEABLE));
 4326 }
 4327 
 4328 void *
 4329 pmap_mapbios(vm_paddr_t pa, vm_size_t size)
 4330 {
 4331 
 4332         return (pmap_mapdev_attr(pa, size, PAT_WRITE_BACK));
 4333 }
 4334 
 4335 void
 4336 pmap_unmapdev(vm_offset_t va, vm_size_t size)
 4337 {
 4338         vm_offset_t base, offset, tmpva;
 4339 
 4340         /* If we gave a direct map region in pmap_mapdev, do nothing */
 4341         if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS)
 4342                 return;
 4343         base = trunc_page(va);
 4344         offset = va & PAGE_MASK;
 4345         size = roundup(offset + size, PAGE_SIZE);
 4346         for (tmpva = base; tmpva < (base + size); tmpva += PAGE_SIZE)
 4347                 pmap_kremove(tmpva);
 4348         pmap_invalidate_range(kernel_pmap, va, tmpva);
 4349         kmem_free(kernel_map, base, size);
 4350 }
 4351 
 4352 /*
 4353  * Tries to demote a 1GB page mapping.
 4354  */
 4355 static boolean_t
 4356 pmap_demote_pdpe(pmap_t pmap, pdp_entry_t *pdpe, vm_offset_t va)
 4357 {
 4358         pdp_entry_t newpdpe, oldpdpe;
 4359         pd_entry_t *firstpde, newpde, *pde;
 4360         vm_paddr_t mpdepa;
 4361         vm_page_t mpde;
 4362 
 4363         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 4364         oldpdpe = *pdpe;
 4365         KASSERT((oldpdpe & (PG_PS | PG_V)) == (PG_PS | PG_V),
 4366             ("pmap_demote_pdpe: oldpdpe is missing PG_PS and/or PG_V"));
 4367         if ((mpde = vm_page_alloc(NULL, va >> PDPSHIFT, VM_ALLOC_INTERRUPT |
 4368             VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
 4369                 CTR2(KTR_PMAP, "pmap_demote_pdpe: failure for va %#lx"
 4370                     " in pmap %p", va, pmap);
 4371                 return (FALSE);
 4372         }
 4373         mpdepa = VM_PAGE_TO_PHYS(mpde);
 4374         firstpde = (pd_entry_t *)PHYS_TO_DMAP(mpdepa);
 4375         newpdpe = mpdepa | PG_M | PG_A | (oldpdpe & PG_U) | PG_RW | PG_V;
 4376         KASSERT((oldpdpe & PG_A) != 0,
 4377             ("pmap_demote_pdpe: oldpdpe is missing PG_A"));
 4378         KASSERT((oldpdpe & (PG_M | PG_RW)) != PG_RW,
 4379             ("pmap_demote_pdpe: oldpdpe is missing PG_M"));
 4380         newpde = oldpdpe;
 4381 
 4382         /*
 4383          * Initialize the page directory page.
 4384          */
 4385         for (pde = firstpde; pde < firstpde + NPDEPG; pde++) {
 4386                 *pde = newpde;
 4387                 newpde += NBPDR;
 4388         }
 4389 
 4390         /*
 4391          * Demote the mapping.
 4392          */
 4393         *pdpe = newpdpe;
 4394 
 4395         /*
 4396          * Invalidate a stale recursive mapping of the page directory page.
 4397          */
 4398         pmap_invalidate_page(pmap, (vm_offset_t)vtopde(va));
 4399 
 4400         pmap_pdpe_demotions++;
 4401         CTR2(KTR_PMAP, "pmap_demote_pdpe: success for va %#lx"
 4402             " in pmap %p", va, pmap);
 4403         return (TRUE);
 4404 }
 4405 
 4406 /*
 4407  * Sets the memory attribute for the specified page.
 4408  */
 4409 void
 4410 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
 4411 {
 4412 
 4413         m->md.pat_mode = ma;
 4414 
 4415         /*
 4416          * If "m" is a normal page, update its direct mapping.  This update
 4417          * can be relied upon to perform any cache operations that are
 4418          * required for data coherence.
 4419          */
 4420         if ((m->flags & PG_FICTITIOUS) == 0 &&
 4421             pmap_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), PAGE_SIZE,
 4422             m->md.pat_mode))
 4423                 panic("memory attribute change on the direct map failed");
 4424 }
 4425 
 4426 /*
 4427  * Changes the specified virtual address range's memory type to that given by
 4428  * the parameter "mode".  The specified virtual address range must be
 4429  * completely contained within either the direct map or the kernel map.  If
 4430  * the virtual address range is contained within the kernel map, then the
 4431  * memory type for each of the corresponding ranges of the direct map is also
 4432  * changed.  (The corresponding ranges of the direct map are those ranges that
 4433  * map the same physical pages as the specified virtual address range.)  These
 4434  * changes to the direct map are necessary because Intel describes the
 4435  * behavior of their processors as "undefined" if two or more mappings to the
 4436  * same physical page have different memory types.
 4437  *
 4438  * Returns zero if the change completed successfully, and either EINVAL or
 4439  * ENOMEM if the change failed.  Specifically, EINVAL is returned if some part
 4440  * of the virtual address range was not mapped, and ENOMEM is returned if
 4441  * there was insufficient memory available to complete the change.  In the
 4442  * latter case, the memory type may have been changed on some part of the
 4443  * virtual address range or the direct map.
 4444  */
 4445 int
 4446 pmap_change_attr(vm_offset_t va, vm_size_t size, int mode)
 4447 {
 4448         int error;
 4449 
 4450         PMAP_LOCK(kernel_pmap);
 4451         error = pmap_change_attr_locked(va, size, mode);
 4452         PMAP_UNLOCK(kernel_pmap);
 4453         return (error);
 4454 }
 4455 
 4456 static int
 4457 pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode)
 4458 {
 4459         vm_offset_t base, offset, tmpva;
 4460         vm_paddr_t pa_start, pa_end;
 4461         pdp_entry_t *pdpe;
 4462         pd_entry_t *pde;
 4463         pt_entry_t *pte;
 4464         int cache_bits_pte, cache_bits_pde, error;
 4465         boolean_t changed;
 4466 
 4467         PMAP_LOCK_ASSERT(kernel_pmap, MA_OWNED);
 4468         base = trunc_page(va);
 4469         offset = va & PAGE_MASK;
 4470         size = roundup(offset + size, PAGE_SIZE);
 4471 
 4472         /*
 4473          * Only supported on kernel virtual addresses, including the direct
 4474          * map but excluding the recursive map.
 4475          */
 4476         if (base < DMAP_MIN_ADDRESS)
 4477                 return (EINVAL);
 4478 
 4479         cache_bits_pde = pmap_cache_bits(mode, 1);
 4480         cache_bits_pte = pmap_cache_bits(mode, 0);
 4481         changed = FALSE;
 4482 
 4483         /*
 4484          * Pages that aren't mapped aren't supported.  Also break down 2MB pages
 4485          * into 4KB pages if required.
 4486          */
 4487         for (tmpva = base; tmpva < base + size; ) {
 4488                 pdpe = pmap_pdpe(kernel_pmap, tmpva);
 4489                 if (*pdpe == 0)
 4490                         return (EINVAL);
 4491                 if (*pdpe & PG_PS) {
 4492                         /*
 4493                          * If the current 1GB page already has the required
 4494                          * memory type, then we need not demote this page. Just
 4495                          * increment tmpva to the next 1GB page frame.
 4496                          */
 4497                         if ((*pdpe & PG_PDE_CACHE) == cache_bits_pde) {
 4498                                 tmpva = trunc_1gpage(tmpva) + NBPDP;
 4499                                 continue;
 4500                         }
 4501 
 4502                         /*
 4503                          * If the current offset aligns with a 1GB page frame
 4504                          * and there is at least 1GB left within the range, then
 4505                          * we need not break down this page into 2MB pages.
 4506                          */
 4507                         if ((tmpva & PDPMASK) == 0 &&
 4508                             tmpva + PDPMASK < base + size) {
 4509                                 tmpva += NBPDP;
 4510                                 continue;
 4511                         }
 4512                         if (!pmap_demote_pdpe(kernel_pmap, pdpe, tmpva))
 4513                                 return (ENOMEM);
 4514                 }
 4515                 pde = pmap_pdpe_to_pde(pdpe, tmpva);
 4516                 if (*pde == 0)
 4517                         return (EINVAL);
 4518                 if (*pde & PG_PS) {
 4519                         /*
 4520                          * If the current 2MB page already has the required
 4521                          * memory type, then we need not demote this page. Just
 4522                          * increment tmpva to the next 2MB page frame.
 4523                          */
 4524                         if ((*pde & PG_PDE_CACHE) == cache_bits_pde) {
 4525                                 tmpva = trunc_2mpage(tmpva) + NBPDR;
 4526                                 continue;
 4527                         }
 4528 
 4529                         /*
 4530                          * If the current offset aligns with a 2MB page frame
 4531                          * and there is at least 2MB left within the range, then
 4532                          * we need not break down this page into 4KB pages.
 4533                          */
 4534                         if ((tmpva & PDRMASK) == 0 &&
 4535                             tmpva + PDRMASK < base + size) {
 4536                                 tmpva += NBPDR;
 4537                                 continue;
 4538                         }
 4539                         if (!pmap_demote_pde(kernel_pmap, pde, tmpva))
 4540                                 return (ENOMEM);
 4541                 }
 4542                 pte = pmap_pde_to_pte(pde, tmpva);
 4543                 if (*pte == 0)
 4544                         return (EINVAL);
 4545                 tmpva += PAGE_SIZE;
 4546         }
 4547         error = 0;
 4548 
 4549         /*
 4550          * Ok, all the pages exist, so run through them updating their
 4551          * cache mode if required.
 4552          */
 4553         pa_start = pa_end = 0;
 4554         for (tmpva = base; tmpva < base + size; ) {
 4555                 pdpe = pmap_pdpe(kernel_pmap, tmpva);
 4556                 if (*pdpe & PG_PS) {
 4557                         if ((*pdpe & PG_PDE_CACHE) != cache_bits_pde) {
 4558                                 pmap_pde_attr(pdpe, cache_bits_pde);
 4559                                 changed = TRUE;
 4560                         }
 4561                         if (tmpva >= VM_MIN_KERNEL_ADDRESS) {
 4562                                 if (pa_start == pa_end) {
 4563                                         /* Start physical address run. */
 4564                                         pa_start = *pdpe & PG_PS_FRAME;
 4565                                         pa_end = pa_start + NBPDP;
 4566                                 } else if (pa_end == (*pdpe & PG_PS_FRAME))
 4567                                         pa_end += NBPDP;
 4568                                 else {
 4569                                         /* Run ended, update direct map. */
 4570                                         error = pmap_change_attr_locked(
 4571                                             PHYS_TO_DMAP(pa_start),
 4572                                             pa_end - pa_start, mode);
 4573                                         if (error != 0)
 4574                                                 break;
 4575                                         /* Start physical address run. */
 4576                                         pa_start = *pdpe & PG_PS_FRAME;
 4577                                         pa_end = pa_start + NBPDP;
 4578                                 }
 4579                         }
 4580                         tmpva = trunc_1gpage(tmpva) + NBPDP;
 4581                         continue;
 4582                 }
 4583                 pde = pmap_pdpe_to_pde(pdpe, tmpva);
 4584                 if (*pde & PG_PS) {
 4585                         if ((*pde & PG_PDE_CACHE) != cache_bits_pde) {
 4586                                 pmap_pde_attr(pde, cache_bits_pde);
 4587                                 changed = TRUE;
 4588                         }
 4589                         if (tmpva >= VM_MIN_KERNEL_ADDRESS) {
 4590                                 if (pa_start == pa_end) {
 4591                                         /* Start physical address run. */
 4592                                         pa_start = *pde & PG_PS_FRAME;
 4593                                         pa_end = pa_start + NBPDR;
 4594                                 } else if (pa_end == (*pde & PG_PS_FRAME))
 4595                                         pa_end += NBPDR;
 4596                                 else {
 4597                                         /* Run ended, update direct map. */
 4598                                         error = pmap_change_attr_locked(
 4599                                             PHYS_TO_DMAP(pa_start),
 4600                                             pa_end - pa_start, mode);
 4601                                         if (error != 0)
 4602                                                 break;
 4603                                         /* Start physical address run. */
 4604                                         pa_start = *pde & PG_PS_FRAME;
 4605                                         pa_end = pa_start + NBPDR;
 4606                                 }
 4607                         }
 4608                         tmpva = trunc_2mpage(tmpva) + NBPDR;
 4609                 } else {
 4610                         pte = pmap_pde_to_pte(pde, tmpva);
 4611                         if ((*pte & PG_PTE_CACHE) != cache_bits_pte) {
 4612                                 pmap_pte_attr(pte, cache_bits_pte);
 4613                                 changed = TRUE;
 4614                         }
 4615                         if (tmpva >= VM_MIN_KERNEL_ADDRESS) {
 4616                                 if (pa_start == pa_end) {
 4617                                         /* Start physical address run. */
 4618                                         pa_start = *pte & PG_FRAME;
 4619                                         pa_end = pa_start + PAGE_SIZE;
 4620                                 } else if (pa_end == (*pte & PG_FRAME))
 4621                                         pa_end += PAGE_SIZE;
 4622                                 else {
 4623                                         /* Run ended, update direct map. */
 4624                                         error = pmap_change_attr_locked(
 4625                                             PHYS_TO_DMAP(pa_start),
 4626                                             pa_end - pa_start, mode);
 4627                                         if (error != 0)
 4628                                                 break;
 4629                                         /* Start physical address run. */
 4630                                         pa_start = *pte & PG_FRAME;
 4631                                         pa_end = pa_start + PAGE_SIZE;
 4632                                 }
 4633                         }
 4634                         tmpva += PAGE_SIZE;
 4635                 }
 4636         }
 4637         if (error == 0 && pa_start != pa_end)
 4638                 error = pmap_change_attr_locked(PHYS_TO_DMAP(pa_start),
 4639                     pa_end - pa_start, mode);
 4640 
 4641         /*
 4642          * Flush CPU caches if required to make sure any data isn't cached that
 4643          * shouldn't be, etc.
 4644          */
 4645         if (changed) {
 4646                 pmap_invalidate_range(kernel_pmap, base, tmpva);
 4647                 pmap_invalidate_cache_range(base, tmpva);
 4648         }
 4649         return (error);
 4650 }
 4651 
 4652 /*
 4653  * perform the pmap work for mincore
 4654  */
 4655 int
 4656 pmap_mincore(pmap_t pmap, vm_offset_t addr)
 4657 {
 4658         pd_entry_t *pdep;
 4659         pt_entry_t pte;
 4660         vm_paddr_t pa;
 4661         vm_page_t m;
 4662         int val = 0;
 4663         
 4664         PMAP_LOCK(pmap);
 4665         pdep = pmap_pde(pmap, addr);
 4666         if (pdep != NULL && (*pdep & PG_V)) {
 4667                 if (*pdep & PG_PS) {
 4668                         pte = *pdep;
 4669                         val = MINCORE_SUPER;
 4670                         /* Compute the physical address of the 4KB page. */
 4671                         pa = ((*pdep & PG_PS_FRAME) | (addr & PDRMASK)) &
 4672                             PG_FRAME;
 4673                 } else {
 4674                         pte = *pmap_pde_to_pte(pdep, addr);
 4675                         pa = pte & PG_FRAME;
 4676                 }
 4677         } else {
 4678                 pte = 0;
 4679                 pa = 0;
 4680         }
 4681         PMAP_UNLOCK(pmap);
 4682 
 4683         if (pte != 0) {
 4684                 val |= MINCORE_INCORE;
 4685                 if ((pte & PG_MANAGED) == 0)
 4686                         return val;
 4687 
 4688                 m = PHYS_TO_VM_PAGE(pa);
 4689 
 4690                 /*
 4691                  * Modified by us
 4692                  */
 4693                 if ((pte & (PG_M | PG_RW)) == (PG_M | PG_RW))
 4694                         val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER;
 4695                 else {
 4696                         /*
 4697                          * Modified by someone else
 4698                          */
 4699                         vm_page_lock_queues();
 4700                         if (m->dirty || pmap_is_modified(m))
 4701                                 val |= MINCORE_MODIFIED_OTHER;
 4702                         vm_page_unlock_queues();
 4703                 }
 4704                 /*
 4705                  * Referenced by us
 4706                  */
 4707                 if (pte & PG_A)
 4708                         val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER;
 4709                 else {
 4710                         /*
 4711                          * Referenced by someone else
 4712                          */
 4713                         vm_page_lock_queues();
 4714                         if ((m->flags & PG_REFERENCED) ||
 4715                             pmap_ts_referenced(m)) {
 4716                                 val |= MINCORE_REFERENCED_OTHER;
 4717                                 vm_page_flag_set(m, PG_REFERENCED);
 4718                         }
 4719                         vm_page_unlock_queues();
 4720                 }
 4721         } 
 4722         return val;
 4723 }
 4724 
 4725 void
 4726 pmap_activate(struct thread *td)
 4727 {
 4728         pmap_t  pmap, oldpmap;
 4729         u_int64_t  cr3;
 4730 
 4731         critical_enter();
 4732         pmap = vmspace_pmap(td->td_proc->p_vmspace);
 4733         oldpmap = PCPU_GET(curpmap);
 4734 #ifdef SMP
 4735 if (oldpmap)    /* XXX FIXME */
 4736         atomic_clear_int(&oldpmap->pm_active, PCPU_GET(cpumask));
 4737         atomic_set_int(&pmap->pm_active, PCPU_GET(cpumask));
 4738 #else
 4739 if (oldpmap)    /* XXX FIXME */
 4740         oldpmap->pm_active &= ~PCPU_GET(cpumask);
 4741         pmap->pm_active |= PCPU_GET(cpumask);
 4742 #endif
 4743         cr3 = DMAP_TO_PHYS((vm_offset_t)pmap->pm_pml4);
 4744         td->td_pcb->pcb_cr3 = cr3;
 4745         load_cr3(cr3);
 4746         critical_exit();
 4747 }
 4748 
 4749 /*
 4750  *      Increase the starting virtual address of the given mapping if a
 4751  *      different alignment might result in more superpage mappings.
 4752  */
 4753 void
 4754 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
 4755     vm_offset_t *addr, vm_size_t size)
 4756 {
 4757         vm_offset_t superpage_offset;
 4758 
 4759         if (size < NBPDR)
 4760                 return;
 4761         if (object != NULL && (object->flags & OBJ_COLORED) != 0)
 4762                 offset += ptoa(object->pg_color);
 4763         superpage_offset = offset & PDRMASK;
 4764         if (size - ((NBPDR - superpage_offset) & PDRMASK) < NBPDR ||
 4765             (*addr & PDRMASK) == superpage_offset)
 4766                 return;
 4767         if ((*addr & PDRMASK) < superpage_offset)
 4768                 *addr = (*addr & ~PDRMASK) + superpage_offset;
 4769         else
 4770                 *addr = ((*addr + PDRMASK) & ~PDRMASK) + superpage_offset;
 4771 }

Cache object: 98598886760f61c48af93fc9376188c1


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.