The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/amd64/amd64/pmap.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1991 Regents of the University of California.
    3  * All rights reserved.
    4  * Copyright (c) 1994 John S. Dyson
    5  * All rights reserved.
    6  * Copyright (c) 1994 David Greenman
    7  * All rights reserved.
    8  * Copyright (c) 2003 Peter Wemm
    9  * All rights reserved.
   10  * Copyright (c) 2005-2010 Alan L. Cox <alc@cs.rice.edu>
   11  * All rights reserved.
   12  *
   13  * This code is derived from software contributed to Berkeley by
   14  * the Systems Programming Group of the University of Utah Computer
   15  * Science Department and William Jolitz of UUNET Technologies Inc.
   16  *
   17  * Redistribution and use in source and binary forms, with or without
   18  * modification, are permitted provided that the following conditions
   19  * are met:
   20  * 1. Redistributions of source code must retain the above copyright
   21  *    notice, this list of conditions and the following disclaimer.
   22  * 2. Redistributions in binary form must reproduce the above copyright
   23  *    notice, this list of conditions and the following disclaimer in the
   24  *    documentation and/or other materials provided with the distribution.
   25  * 3. All advertising materials mentioning features or use of this software
   26  *    must display the following acknowledgement:
   27  *      This product includes software developed by the University of
   28  *      California, Berkeley and its contributors.
   29  * 4. Neither the name of the University nor the names of its contributors
   30  *    may be used to endorse or promote products derived from this software
   31  *    without specific prior written permission.
   32  *
   33  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   34  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   35  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   36  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   37  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   38  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   39  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   40  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   41  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   42  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   43  * SUCH DAMAGE.
   44  *
   45  *      from:   @(#)pmap.c      7.7 (Berkeley)  5/12/91
   46  */
   47 /*-
   48  * Copyright (c) 2003 Networks Associates Technology, Inc.
   49  * All rights reserved.
   50  *
   51  * This software was developed for the FreeBSD Project by Jake Burkholder,
   52  * Safeport Network Services, and Network Associates Laboratories, the
   53  * Security Research Division of Network Associates, Inc. under
   54  * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA
   55  * CHATS research program.
   56  *
   57  * Redistribution and use in source and binary forms, with or without
   58  * modification, are permitted provided that the following conditions
   59  * are met:
   60  * 1. Redistributions of source code must retain the above copyright
   61  *    notice, this list of conditions and the following disclaimer.
   62  * 2. Redistributions in binary form must reproduce the above copyright
   63  *    notice, this list of conditions and the following disclaimer in the
   64  *    documentation and/or other materials provided with the distribution.
   65  *
   66  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   67  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   68  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   69  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   70  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   71  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   72  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   73  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   74  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   75  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   76  * SUCH DAMAGE.
   77  */
   78 
   79 #include <sys/cdefs.h>
   80 __FBSDID("$FreeBSD$");
   81 
   82 /*
   83  *      Manages physical address maps.
   84  *
   85  *      In addition to hardware address maps, this
   86  *      module is called upon to provide software-use-only
   87  *      maps which may or may not be stored in the same
   88  *      form as hardware maps.  These pseudo-maps are
   89  *      used to store intermediate results from copy
   90  *      operations to and from address spaces.
   91  *
   92  *      Since the information managed by this module is
   93  *      also stored by the logical address mapping module,
   94  *      this module may throw away valid virtual-to-physical
   95  *      mappings at almost any time.  However, invalidations
   96  *      of virtual-to-physical mappings must be done as
   97  *      requested.
   98  *
   99  *      In order to cope with hardware architectures which
  100  *      make virtual-to-physical map invalidates expensive,
  101  *      this module may delay invalidate or reduced protection
  102  *      operations until such time as they are actually
  103  *      necessary.  This module is given full information as
  104  *      to which processors are currently using which maps,
  105  *      and to when physical maps must be made correct.
  106  */
  107 
  108 #include "opt_pmap.h"
  109 #include "opt_vm.h"
  110 
  111 #include <sys/param.h>
  112 #include <sys/bus.h>
  113 #include <sys/systm.h>
  114 #include <sys/kernel.h>
  115 #include <sys/ktr.h>
  116 #include <sys/lock.h>
  117 #include <sys/malloc.h>
  118 #include <sys/mman.h>
  119 #include <sys/mutex.h>
  120 #include <sys/proc.h>
  121 #include <sys/rwlock.h>
  122 #include <sys/sx.h>
  123 #include <sys/vmmeter.h>
  124 #include <sys/sched.h>
  125 #include <sys/sysctl.h>
  126 #ifdef SMP
  127 #include <sys/smp.h>
  128 #else
  129 #include <sys/cpuset.h>
  130 #endif
  131 
  132 #include <vm/vm.h>
  133 #include <vm/vm_param.h>
  134 #include <vm/vm_kern.h>
  135 #include <vm/vm_page.h>
  136 #include <vm/vm_map.h>
  137 #include <vm/vm_object.h>
  138 #include <vm/vm_extern.h>
  139 #include <vm/vm_pageout.h>
  140 #include <vm/vm_pager.h>
  141 #include <vm/vm_reserv.h>
  142 #include <vm/uma.h>
  143 
  144 #include <machine/intr_machdep.h>
  145 #include <machine/apicvar.h>
  146 #include <machine/cpu.h>
  147 #include <machine/cputypes.h>
  148 #include <machine/md_var.h>
  149 #include <machine/pcb.h>
  150 #include <machine/specialreg.h>
  151 #ifdef SMP
  152 #include <machine/smp.h>
  153 #endif
  154 
  155 #if !defined(DIAGNOSTIC)
  156 #ifdef __GNUC_GNU_INLINE__
  157 #define PMAP_INLINE     __attribute__((__gnu_inline__)) inline
  158 #else
  159 #define PMAP_INLINE     extern inline
  160 #endif
  161 #else
  162 #define PMAP_INLINE
  163 #endif
  164 
  165 #ifdef PV_STATS
  166 #define PV_STAT(x)      do { x ; } while (0)
  167 #else
  168 #define PV_STAT(x)      do { } while (0)
  169 #endif
  170 
  171 #define pa_index(pa)    ((pa) >> PDRSHIFT)
  172 #define pa_to_pvh(pa)   (&pv_table[pa_index(pa)])
  173 
  174 #define NPV_LIST_LOCKS  MAXCPU
  175 
  176 #define PHYS_TO_PV_LIST_LOCK(pa)        \
  177                         (&pv_list_locks[pa_index(pa) % NPV_LIST_LOCKS])
  178 
  179 #define CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa)  do {    \
  180         struct rwlock **_lockp = (lockp);               \
  181         struct rwlock *_new_lock;                       \
  182                                                         \
  183         _new_lock = PHYS_TO_PV_LIST_LOCK(pa);           \
  184         if (_new_lock != *_lockp) {                     \
  185                 if (*_lockp != NULL)                    \
  186                         rw_wunlock(*_lockp);            \
  187                 *_lockp = _new_lock;                    \
  188                 rw_wlock(*_lockp);                      \
  189         }                                               \
  190 } while (0)
  191 
  192 #define CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m)        \
  193                         CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, VM_PAGE_TO_PHYS(m))
  194 
  195 #define RELEASE_PV_LIST_LOCK(lockp)             do {    \
  196         struct rwlock **_lockp = (lockp);               \
  197                                                         \
  198         if (*_lockp != NULL) {                          \
  199                 rw_wunlock(*_lockp);                    \
  200                 *_lockp = NULL;                         \
  201         }                                               \
  202 } while (0)
  203 
  204 #define VM_PAGE_TO_PV_LIST_LOCK(m)      \
  205                         PHYS_TO_PV_LIST_LOCK(VM_PAGE_TO_PHYS(m))
  206 
  207 struct pmap kernel_pmap_store;
  208 
  209 vm_offset_t virtual_avail;      /* VA of first avail page (after kernel bss) */
  210 vm_offset_t virtual_end;        /* VA of last avail page (end of kernel AS) */
  211 
  212 static int ndmpdp;
  213 vm_paddr_t dmaplimit;
  214 vm_offset_t kernel_vm_end = VM_MIN_KERNEL_ADDRESS;
  215 pt_entry_t pg_nx;
  216 
  217 static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters");
  218 
  219 static int pat_works = 1;
  220 SYSCTL_INT(_vm_pmap, OID_AUTO, pat_works, CTLFLAG_RD, &pat_works, 1,
  221     "Is page attribute table fully functional?");
  222 
  223 static int pg_ps_enabled = 1;
  224 SYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLFLAG_RDTUN, &pg_ps_enabled, 0,
  225     "Are large page mappings enabled?");
  226 
  227 #define PAT_INDEX_SIZE  8
  228 static int pat_index[PAT_INDEX_SIZE];   /* cache mode to PAT index conversion */
  229 
  230 static u_int64_t        KPTphys;        /* phys addr of kernel level 1 */
  231 static u_int64_t        KPDphys;        /* phys addr of kernel level 2 */
  232 u_int64_t               KPDPphys;       /* phys addr of kernel level 3 */
  233 u_int64_t               KPML4phys;      /* phys addr of kernel level 4 */
  234 
  235 static u_int64_t        DMPDphys;       /* phys addr of direct mapped level 2 */
  236 static u_int64_t        DMPDPphys;      /* phys addr of direct mapped level 3 */
  237 
  238 /*
  239  * Isolate the global pv list lock from data and other locks to prevent false
  240  * sharing within the cache.
  241  */
  242 static struct {
  243         struct rwlock   lock;
  244         char            padding[CACHE_LINE_SIZE - sizeof(struct rwlock)];
  245 } pvh_global __aligned(CACHE_LINE_SIZE);
  246 
  247 #define pvh_global_lock pvh_global.lock
  248 
  249 /*
  250  * Data for the pv entry allocation mechanism
  251  */
  252 static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks);
  253 static struct mtx pv_chunks_mutex;
  254 static struct rwlock pv_list_locks[NPV_LIST_LOCKS];
  255 static struct md_page *pv_table;
  256 
  257 /*
  258  * All those kernel PT submaps that BSD is so fond of
  259  */
  260 pt_entry_t *CMAP1 = 0;
  261 caddr_t CADDR1 = 0;
  262 
  263 /*
  264  * Crashdump maps.
  265  */
  266 static caddr_t crashdumpmap;
  267 
  268 static void     free_pv_chunk(struct pv_chunk *pc);
  269 static void     free_pv_entry(pmap_t pmap, pv_entry_t pv);
  270 static pv_entry_t get_pv_entry(pmap_t pmap, struct rwlock **lockp);
  271 static int      popcnt_pc_map_elem(uint64_t elem);
  272 static vm_page_t reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp);
  273 static void     reserve_pv_entries(pmap_t pmap, int needed,
  274                     struct rwlock **lockp);
  275 static void     pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
  276                     struct rwlock **lockp);
  277 static boolean_t pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
  278                     struct rwlock **lockp);
  279 static void     pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
  280                     struct rwlock **lockp);
  281 static void     pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
  282 static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
  283                     vm_offset_t va);
  284 static int      pmap_pvh_wired_mappings(struct md_page *pvh, int count);
  285 
  286 static int pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode);
  287 static boolean_t pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va);
  288 static boolean_t pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde,
  289     vm_offset_t va, struct rwlock **lockp);
  290 static boolean_t pmap_demote_pdpe(pmap_t pmap, pdp_entry_t *pdpe,
  291     vm_offset_t va);
  292 static boolean_t pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m,
  293     vm_prot_t prot, struct rwlock **lockp);
  294 static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
  295     vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp);
  296 static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte);
  297 static void pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte);
  298 static boolean_t pmap_is_modified_pvh(struct md_page *pvh);
  299 static boolean_t pmap_is_referenced_pvh(struct md_page *pvh);
  300 static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode);
  301 static vm_page_t pmap_lookup_pt_page(pmap_t pmap, vm_offset_t va);
  302 static void pmap_pde_attr(pd_entry_t *pde, int cache_bits);
  303 static void pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
  304     struct rwlock **lockp);
  305 static boolean_t pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva,
  306     vm_prot_t prot);
  307 static void pmap_pte_attr(pt_entry_t *pte, int cache_bits);
  308 static int pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
  309                 vm_page_t *free, struct rwlock **lockp);
  310 static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq,
  311                 vm_offset_t sva, pd_entry_t ptepde, vm_page_t *free,
  312                 struct rwlock **lockp);
  313 static void pmap_remove_pt_page(pmap_t pmap, vm_page_t mpte);
  314 static void pmap_remove_page(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
  315     vm_page_t *free);
  316 static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
  317     vm_page_t m, struct rwlock **lockp);
  318 static void pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
  319     pd_entry_t newpde);
  320 static void pmap_update_pde_invalidate(vm_offset_t va, pd_entry_t newpde);
  321 
  322 static vm_page_t _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex,
  323                 struct rwlock **lockp);
  324 static vm_page_t pmap_allocpde(pmap_t pmap, vm_offset_t va,
  325                 struct rwlock **lockp);
  326 static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va,
  327                 struct rwlock **lockp);
  328 
  329 static void _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m,
  330                 vm_page_t *free);
  331 static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t, vm_page_t *);
  332 static vm_offset_t pmap_kmem_choose(vm_offset_t addr);
  333 
  334 CTASSERT(1 << PDESHIFT == sizeof(pd_entry_t));
  335 CTASSERT(1 << PTESHIFT == sizeof(pt_entry_t));
  336 
  337 /*
  338  * Move the kernel virtual free pointer to the next
  339  * 2MB.  This is used to help improve performance
  340  * by using a large (2MB) page for much of the kernel
  341  * (.text, .data, .bss)
  342  */
  343 static vm_offset_t
  344 pmap_kmem_choose(vm_offset_t addr)
  345 {
  346         vm_offset_t newaddr = addr;
  347 
  348         newaddr = (addr + (NBPDR - 1)) & ~(NBPDR - 1);
  349         return (newaddr);
  350 }
  351 
  352 /********************/
  353 /* Inline functions */
  354 /********************/
  355 
  356 /* Return a non-clipped PD index for a given VA */
  357 static __inline vm_pindex_t
  358 pmap_pde_pindex(vm_offset_t va)
  359 {
  360         return (va >> PDRSHIFT);
  361 }
  362 
  363 
  364 /* Return various clipped indexes for a given VA */
  365 static __inline vm_pindex_t
  366 pmap_pte_index(vm_offset_t va)
  367 {
  368 
  369         return ((va >> PAGE_SHIFT) & ((1ul << NPTEPGSHIFT) - 1));
  370 }
  371 
  372 static __inline vm_pindex_t
  373 pmap_pde_index(vm_offset_t va)
  374 {
  375 
  376         return ((va >> PDRSHIFT) & ((1ul << NPDEPGSHIFT) - 1));
  377 }
  378 
  379 static __inline vm_pindex_t
  380 pmap_pdpe_index(vm_offset_t va)
  381 {
  382 
  383         return ((va >> PDPSHIFT) & ((1ul << NPDPEPGSHIFT) - 1));
  384 }
  385 
  386 static __inline vm_pindex_t
  387 pmap_pml4e_index(vm_offset_t va)
  388 {
  389 
  390         return ((va >> PML4SHIFT) & ((1ul << NPML4EPGSHIFT) - 1));
  391 }
  392 
  393 /* Return a pointer to the PML4 slot that corresponds to a VA */
  394 static __inline pml4_entry_t *
  395 pmap_pml4e(pmap_t pmap, vm_offset_t va)
  396 {
  397 
  398         return (&pmap->pm_pml4[pmap_pml4e_index(va)]);
  399 }
  400 
  401 /* Return a pointer to the PDP slot that corresponds to a VA */
  402 static __inline pdp_entry_t *
  403 pmap_pml4e_to_pdpe(pml4_entry_t *pml4e, vm_offset_t va)
  404 {
  405         pdp_entry_t *pdpe;
  406 
  407         pdpe = (pdp_entry_t *)PHYS_TO_DMAP(*pml4e & PG_FRAME);
  408         return (&pdpe[pmap_pdpe_index(va)]);
  409 }
  410 
  411 /* Return a pointer to the PDP slot that corresponds to a VA */
  412 static __inline pdp_entry_t *
  413 pmap_pdpe(pmap_t pmap, vm_offset_t va)
  414 {
  415         pml4_entry_t *pml4e;
  416 
  417         pml4e = pmap_pml4e(pmap, va);
  418         if ((*pml4e & PG_V) == 0)
  419                 return (NULL);
  420         return (pmap_pml4e_to_pdpe(pml4e, va));
  421 }
  422 
  423 /* Return a pointer to the PD slot that corresponds to a VA */
  424 static __inline pd_entry_t *
  425 pmap_pdpe_to_pde(pdp_entry_t *pdpe, vm_offset_t va)
  426 {
  427         pd_entry_t *pde;
  428 
  429         pde = (pd_entry_t *)PHYS_TO_DMAP(*pdpe & PG_FRAME);
  430         return (&pde[pmap_pde_index(va)]);
  431 }
  432 
  433 /* Return a pointer to the PD slot that corresponds to a VA */
  434 static __inline pd_entry_t *
  435 pmap_pde(pmap_t pmap, vm_offset_t va)
  436 {
  437         pdp_entry_t *pdpe;
  438 
  439         pdpe = pmap_pdpe(pmap, va);
  440         if (pdpe == NULL || (*pdpe & PG_V) == 0)
  441                 return (NULL);
  442         return (pmap_pdpe_to_pde(pdpe, va));
  443 }
  444 
  445 /* Return a pointer to the PT slot that corresponds to a VA */
  446 static __inline pt_entry_t *
  447 pmap_pde_to_pte(pd_entry_t *pde, vm_offset_t va)
  448 {
  449         pt_entry_t *pte;
  450 
  451         pte = (pt_entry_t *)PHYS_TO_DMAP(*pde & PG_FRAME);
  452         return (&pte[pmap_pte_index(va)]);
  453 }
  454 
  455 /* Return a pointer to the PT slot that corresponds to a VA */
  456 static __inline pt_entry_t *
  457 pmap_pte(pmap_t pmap, vm_offset_t va)
  458 {
  459         pd_entry_t *pde;
  460 
  461         pde = pmap_pde(pmap, va);
  462         if (pde == NULL || (*pde & PG_V) == 0)
  463                 return (NULL);
  464         if ((*pde & PG_PS) != 0)        /* compat with i386 pmap_pte() */
  465                 return ((pt_entry_t *)pde);
  466         return (pmap_pde_to_pte(pde, va));
  467 }
  468 
  469 static __inline void
  470 pmap_resident_count_inc(pmap_t pmap, int count)
  471 {
  472 
  473         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
  474         pmap->pm_stats.resident_count += count;
  475 }
  476 
  477 static __inline void
  478 pmap_resident_count_dec(pmap_t pmap, int count)
  479 {
  480 
  481         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
  482         KASSERT(pmap->pm_stats.resident_count >= count,
  483             ("pmap %p resident count underflow %ld %d", pmap,
  484             pmap->pm_stats.resident_count, count));
  485         pmap->pm_stats.resident_count -= count;
  486 }
  487 
  488 PMAP_INLINE pt_entry_t *
  489 vtopte(vm_offset_t va)
  490 {
  491         u_int64_t mask = ((1ul << (NPTEPGSHIFT + NPDEPGSHIFT + NPDPEPGSHIFT + NPML4EPGSHIFT)) - 1);
  492 
  493         return (PTmap + ((va >> PAGE_SHIFT) & mask));
  494 }
  495 
  496 static __inline pd_entry_t *
  497 vtopde(vm_offset_t va)
  498 {
  499         u_int64_t mask = ((1ul << (NPDEPGSHIFT + NPDPEPGSHIFT + NPML4EPGSHIFT)) - 1);
  500 
  501         return (PDmap + ((va >> PDRSHIFT) & mask));
  502 }
  503 
  504 static u_int64_t
  505 allocpages(vm_paddr_t *firstaddr, int n)
  506 {
  507         u_int64_t ret;
  508 
  509         ret = *firstaddr;
  510         bzero((void *)ret, n * PAGE_SIZE);
  511         *firstaddr += n * PAGE_SIZE;
  512         return (ret);
  513 }
  514 
  515 CTASSERT(powerof2(NDMPML4E));
  516 
  517 static void
  518 create_pagetables(vm_paddr_t *firstaddr)
  519 {
  520         int i, j, ndm1g;
  521 
  522         /* Allocate pages */
  523         KPTphys = allocpages(firstaddr, NKPT);
  524         KPML4phys = allocpages(firstaddr, 1);
  525         KPDPphys = allocpages(firstaddr, NKPML4E);
  526         KPDphys = allocpages(firstaddr, NKPDPE);
  527 
  528         ndmpdp = (ptoa(Maxmem) + NBPDP - 1) >> PDPSHIFT;
  529         if (ndmpdp < 4)         /* Minimum 4GB of dirmap */
  530                 ndmpdp = 4;
  531         DMPDPphys = allocpages(firstaddr, NDMPML4E);
  532         ndm1g = 0;
  533         if ((amd_feature & AMDID_PAGE1GB) != 0)
  534                 ndm1g = ptoa(Maxmem) >> PDPSHIFT;
  535         if (ndm1g < ndmpdp)
  536                 DMPDphys = allocpages(firstaddr, ndmpdp - ndm1g);
  537         dmaplimit = (vm_paddr_t)ndmpdp << PDPSHIFT;
  538 
  539         /* Fill in the underlying page table pages */
  540         /* Read-only from zero to physfree */
  541         /* XXX not fully used, underneath 2M pages */
  542         for (i = 0; (i << PAGE_SHIFT) < *firstaddr; i++) {
  543                 ((pt_entry_t *)KPTphys)[i] = i << PAGE_SHIFT;
  544                 ((pt_entry_t *)KPTphys)[i] |= PG_RW | PG_V | PG_G;
  545         }
  546 
  547         /* Now map the page tables at their location within PTmap */
  548         for (i = 0; i < NKPT; i++) {
  549                 ((pd_entry_t *)KPDphys)[i] = KPTphys + (i << PAGE_SHIFT);
  550                 ((pd_entry_t *)KPDphys)[i] |= PG_RW | PG_V;
  551         }
  552 
  553         /* Map from zero to end of allocations under 2M pages */
  554         /* This replaces some of the KPTphys entries above */
  555         for (i = 0; (i << PDRSHIFT) < *firstaddr; i++) {
  556                 ((pd_entry_t *)KPDphys)[i] = i << PDRSHIFT;
  557                 ((pd_entry_t *)KPDphys)[i] |= PG_RW | PG_V | PG_PS | PG_G;
  558         }
  559 
  560         /* And connect up the PD to the PDP */
  561         for (i = 0; i < NKPDPE; i++) {
  562                 ((pdp_entry_t *)KPDPphys)[i + KPDPI] = KPDphys +
  563                     (i << PAGE_SHIFT);
  564                 ((pdp_entry_t *)KPDPphys)[i + KPDPI] |= PG_RW | PG_V | PG_U;
  565         }
  566 
  567         /*
  568          * Now, set up the direct map region using 2MB and/or 1GB pages.  If
  569          * the end of physical memory is not aligned to a 1GB page boundary,
  570          * then the residual physical memory is mapped with 2MB pages.  Later,
  571          * if pmap_mapdev{_attr}() uses the direct map for non-write-back
  572          * memory, pmap_change_attr() will demote any 2MB or 1GB page mappings
  573          * that are partially used. 
  574          */
  575         for (i = NPDEPG * ndm1g, j = 0; i < NPDEPG * ndmpdp; i++, j++) {
  576                 ((pd_entry_t *)DMPDphys)[j] = (vm_paddr_t)i << PDRSHIFT;
  577                 /* Preset PG_M and PG_A because demotion expects it. */
  578                 ((pd_entry_t *)DMPDphys)[j] |= PG_RW | PG_V | PG_PS | PG_G |
  579                     PG_M | PG_A;
  580         }
  581         for (i = 0; i < ndm1g; i++) {
  582                 ((pdp_entry_t *)DMPDPphys)[i] = (vm_paddr_t)i << PDPSHIFT;
  583                 /* Preset PG_M and PG_A because demotion expects it. */
  584                 ((pdp_entry_t *)DMPDPphys)[i] |= PG_RW | PG_V | PG_PS | PG_G |
  585                     PG_M | PG_A;
  586         }
  587         for (j = 0; i < ndmpdp; i++, j++) {
  588                 ((pdp_entry_t *)DMPDPphys)[i] = DMPDphys + (j << PAGE_SHIFT);
  589                 ((pdp_entry_t *)DMPDPphys)[i] |= PG_RW | PG_V | PG_U;
  590         }
  591 
  592         /* And recursively map PML4 to itself in order to get PTmap */
  593         ((pdp_entry_t *)KPML4phys)[PML4PML4I] = KPML4phys;
  594         ((pdp_entry_t *)KPML4phys)[PML4PML4I] |= PG_RW | PG_V | PG_U;
  595 
  596         /* Connect the Direct Map slot(s) up to the PML4. */
  597         for (i = 0; i < NDMPML4E; i++) {
  598                 ((pdp_entry_t *)KPML4phys)[DMPML4I + i] = DMPDPphys +
  599                     (i << PAGE_SHIFT);
  600                 ((pdp_entry_t *)KPML4phys)[DMPML4I + i] |= PG_RW | PG_V | PG_U;
  601         }
  602 
  603         /* Connect the KVA slot up to the PML4 */
  604         ((pdp_entry_t *)KPML4phys)[KPML4I] = KPDPphys;
  605         ((pdp_entry_t *)KPML4phys)[KPML4I] |= PG_RW | PG_V | PG_U;
  606 }
  607 
  608 /*
  609  *      Bootstrap the system enough to run with virtual memory.
  610  *
  611  *      On amd64 this is called after mapping has already been enabled
  612  *      and just syncs the pmap module with what has already been done.
  613  *      [We can't call it easily with mapping off since the kernel is not
  614  *      mapped with PA == VA, hence we would have to relocate every address
  615  *      from the linked base (virtual) address "KERNBASE" to the actual
  616  *      (physical) address starting relative to 0]
  617  */
  618 void
  619 pmap_bootstrap(vm_paddr_t *firstaddr)
  620 {
  621         vm_offset_t va;
  622         pt_entry_t *pte;
  623 
  624         /*
  625          * Create an initial set of page tables to run the kernel in.
  626          */
  627         create_pagetables(firstaddr);
  628 
  629         virtual_avail = (vm_offset_t) KERNBASE + *firstaddr;
  630         virtual_avail = pmap_kmem_choose(virtual_avail);
  631 
  632         virtual_end = VM_MAX_KERNEL_ADDRESS;
  633 
  634 
  635         /* XXX do %cr0 as well */
  636         load_cr4(rcr4() | CR4_PGE | CR4_PSE);
  637         load_cr3(KPML4phys);
  638         if (cpu_stdext_feature & CPUID_STDEXT_SMEP)
  639                 load_cr4(rcr4() | CR4_SMEP);
  640 
  641         /*
  642          * Initialize the kernel pmap (which is statically allocated).
  643          */
  644         PMAP_LOCK_INIT(kernel_pmap);
  645         kernel_pmap->pm_pml4 = (pdp_entry_t *)PHYS_TO_DMAP(KPML4phys);
  646         kernel_pmap->pm_root = NULL;
  647         CPU_FILL(&kernel_pmap->pm_active);      /* don't allow deactivation */
  648         TAILQ_INIT(&kernel_pmap->pm_pvchunk);
  649 
  650         /*
  651          * Initialize the global pv list lock.
  652          */
  653         rw_init(&pvh_global_lock, "pmap pv global");
  654 
  655         /*
  656          * Reserve some special page table entries/VA space for temporary
  657          * mapping of pages.
  658          */
  659 #define SYSMAP(c, p, v, n)      \
  660         v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n);
  661 
  662         va = virtual_avail;
  663         pte = vtopte(va);
  664 
  665         /*
  666          * Crashdump maps.  The first page is reused as CMAP1 for the
  667          * memory test.
  668          */
  669         SYSMAP(caddr_t, CMAP1, crashdumpmap, MAXDUMPPGS)
  670         CADDR1 = crashdumpmap;
  671 
  672         virtual_avail = va;
  673 
  674         /* Initialize the PAT MSR. */
  675         pmap_init_pat();
  676 }
  677 
  678 /*
  679  * Setup the PAT MSR.
  680  */
  681 void
  682 pmap_init_pat(void)
  683 {
  684         int pat_table[PAT_INDEX_SIZE];
  685         uint64_t pat_msr;
  686         u_long cr0, cr4;
  687         int i;
  688 
  689         /* Bail if this CPU doesn't implement PAT. */
  690         if ((cpu_feature & CPUID_PAT) == 0)
  691                 panic("no PAT??");
  692 
  693         /* Set default PAT index table. */
  694         for (i = 0; i < PAT_INDEX_SIZE; i++)
  695                 pat_table[i] = -1;
  696         pat_table[PAT_WRITE_BACK] = 0;
  697         pat_table[PAT_WRITE_THROUGH] = 1;
  698         pat_table[PAT_UNCACHEABLE] = 3;
  699         pat_table[PAT_WRITE_COMBINING] = 3;
  700         pat_table[PAT_WRITE_PROTECTED] = 3;
  701         pat_table[PAT_UNCACHED] = 3;
  702 
  703         /* Initialize default PAT entries. */
  704         pat_msr = PAT_VALUE(0, PAT_WRITE_BACK) |
  705             PAT_VALUE(1, PAT_WRITE_THROUGH) |
  706             PAT_VALUE(2, PAT_UNCACHED) |
  707             PAT_VALUE(3, PAT_UNCACHEABLE) |
  708             PAT_VALUE(4, PAT_WRITE_BACK) |
  709             PAT_VALUE(5, PAT_WRITE_THROUGH) |
  710             PAT_VALUE(6, PAT_UNCACHED) |
  711             PAT_VALUE(7, PAT_UNCACHEABLE);
  712 
  713         if (pat_works) {
  714                 /*
  715                  * Leave the indices 0-3 at the default of WB, WT, UC-, and UC.
  716                  * Program 5 and 6 as WP and WC.
  717                  * Leave 4 and 7 as WB and UC.
  718                  */
  719                 pat_msr &= ~(PAT_MASK(5) | PAT_MASK(6));
  720                 pat_msr |= PAT_VALUE(5, PAT_WRITE_PROTECTED) |
  721                     PAT_VALUE(6, PAT_WRITE_COMBINING);
  722                 pat_table[PAT_UNCACHED] = 2;
  723                 pat_table[PAT_WRITE_PROTECTED] = 5;
  724                 pat_table[PAT_WRITE_COMBINING] = 6;
  725         } else {
  726                 /*
  727                  * Just replace PAT Index 2 with WC instead of UC-.
  728                  */
  729                 pat_msr &= ~PAT_MASK(2);
  730                 pat_msr |= PAT_VALUE(2, PAT_WRITE_COMBINING);
  731                 pat_table[PAT_WRITE_COMBINING] = 2;
  732         }
  733 
  734         /* Disable PGE. */
  735         cr4 = rcr4();
  736         load_cr4(cr4 & ~CR4_PGE);
  737 
  738         /* Disable caches (CD = 1, NW = 0). */
  739         cr0 = rcr0();
  740         load_cr0((cr0 & ~CR0_NW) | CR0_CD);
  741 
  742         /* Flushes caches and TLBs. */
  743         wbinvd();
  744         invltlb();
  745 
  746         /* Update PAT and index table. */
  747         wrmsr(MSR_PAT, pat_msr);
  748         for (i = 0; i < PAT_INDEX_SIZE; i++)
  749                 pat_index[i] = pat_table[i];
  750 
  751         /* Flush caches and TLBs again. */
  752         wbinvd();
  753         invltlb();
  754 
  755         /* Restore caches and PGE. */
  756         load_cr0(cr0);
  757         load_cr4(cr4);
  758 }
  759 
  760 /*
  761  *      Initialize a vm_page's machine-dependent fields.
  762  */
  763 void
  764 pmap_page_init(vm_page_t m)
  765 {
  766 
  767         TAILQ_INIT(&m->md.pv_list);
  768         m->md.pat_mode = PAT_WRITE_BACK;
  769 }
  770 
  771 /*
  772  *      Initialize the pmap module.
  773  *      Called by vm_init, to initialize any structures that the pmap
  774  *      system needs to map virtual memory.
  775  */
  776 void
  777 pmap_init(void)
  778 {
  779         vm_page_t mpte;
  780         vm_size_t s;
  781         int i, pv_npg;
  782 
  783         /*
  784          * Initialize the vm page array entries for the kernel pmap's
  785          * page table pages.
  786          */ 
  787         for (i = 0; i < NKPT; i++) {
  788                 mpte = PHYS_TO_VM_PAGE(KPTphys + (i << PAGE_SHIFT));
  789                 KASSERT(mpte >= vm_page_array &&
  790                     mpte < &vm_page_array[vm_page_array_size],
  791                     ("pmap_init: page table page is out of range"));
  792                 mpte->pindex = pmap_pde_pindex(KERNBASE) + i;
  793                 mpte->phys_addr = KPTphys + (i << PAGE_SHIFT);
  794         }
  795 
  796         /*
  797          * If the kernel is running on a virtual machine, then it must assume
  798          * that MCA is enabled by the hypervisor.  Moreover, the kernel must
  799          * be prepared for the hypervisor changing the vendor and family that
  800          * are reported by CPUID.  Consequently, the workaround for AMD Family
  801          * 10h Erratum 383 is enabled if the processor's feature set does not
  802          * include at least one feature that is only supported by older Intel
  803          * or newer AMD processors.
  804          */
  805         if (vm_guest == VM_GUEST_VM && (cpu_feature & CPUID_SS) == 0 &&
  806             (cpu_feature2 & (CPUID2_SSSE3 | CPUID2_SSE41 | CPUID2_AESNI |
  807             CPUID2_AVX | CPUID2_XSAVE)) == 0 && (amd_feature2 & (AMDID2_XOP |
  808             AMDID2_FMA4)) == 0)
  809                 workaround_erratum383 = 1;
  810 
  811         /*
  812          * Are large page mappings enabled?
  813          */
  814         TUNABLE_INT_FETCH("vm.pmap.pg_ps_enabled", &pg_ps_enabled);
  815         if (pg_ps_enabled) {
  816                 KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0,
  817                     ("pmap_init: can't assign to pagesizes[1]"));
  818                 pagesizes[1] = NBPDR;
  819         }
  820 
  821         /*
  822          * Initialize the pv chunk list mutex.
  823          */
  824         mtx_init(&pv_chunks_mutex, "pmap pv chunk list", NULL, MTX_DEF);
  825 
  826         /*
  827          * Initialize the pool of pv list locks.
  828          */
  829         for (i = 0; i < NPV_LIST_LOCKS; i++)
  830                 rw_init(&pv_list_locks[i], "pmap pv list");
  831 
  832         /*
  833          * Calculate the size of the pv head table for superpages.
  834          */
  835         for (i = 0; phys_avail[i + 1]; i += 2);
  836         pv_npg = round_2mpage(phys_avail[(i - 2) + 1]) / NBPDR;
  837 
  838         /*
  839          * Allocate memory for the pv head table for superpages.
  840          */
  841         s = (vm_size_t)(pv_npg * sizeof(struct md_page));
  842         s = round_page(s);
  843         pv_table = (struct md_page *)kmem_alloc(kernel_map, s);
  844         for (i = 0; i < pv_npg; i++)
  845                 TAILQ_INIT(&pv_table[i].pv_list);
  846 }
  847 
  848 static SYSCTL_NODE(_vm_pmap, OID_AUTO, pde, CTLFLAG_RD, 0,
  849     "2MB page mapping counters");
  850 
  851 static u_long pmap_pde_demotions;
  852 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, demotions, CTLFLAG_RD,
  853     &pmap_pde_demotions, 0, "2MB page demotions");
  854 
  855 static u_long pmap_pde_mappings;
  856 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, mappings, CTLFLAG_RD,
  857     &pmap_pde_mappings, 0, "2MB page mappings");
  858 
  859 static u_long pmap_pde_p_failures;
  860 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, p_failures, CTLFLAG_RD,
  861     &pmap_pde_p_failures, 0, "2MB page promotion failures");
  862 
  863 static u_long pmap_pde_promotions;
  864 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, promotions, CTLFLAG_RD,
  865     &pmap_pde_promotions, 0, "2MB page promotions");
  866 
  867 static SYSCTL_NODE(_vm_pmap, OID_AUTO, pdpe, CTLFLAG_RD, 0,
  868     "1GB page mapping counters");
  869 
  870 static u_long pmap_pdpe_demotions;
  871 SYSCTL_ULONG(_vm_pmap_pdpe, OID_AUTO, demotions, CTLFLAG_RD,
  872     &pmap_pdpe_demotions, 0, "1GB page demotions");
  873 
  874 /***************************************************
  875  * Low level helper routines.....
  876  ***************************************************/
  877 
  878 /*
  879  * Determine the appropriate bits to set in a PTE or PDE for a specified
  880  * caching mode.
  881  */
  882 static int
  883 pmap_cache_bits(int mode, boolean_t is_pde)
  884 {
  885         int cache_bits, pat_flag, pat_idx;
  886 
  887         if (mode < 0 || mode >= PAT_INDEX_SIZE || pat_index[mode] < 0)
  888                 panic("Unknown caching mode %d\n", mode);
  889 
  890         /* The PAT bit is different for PTE's and PDE's. */
  891         pat_flag = is_pde ? PG_PDE_PAT : PG_PTE_PAT;
  892 
  893         /* Map the caching mode to a PAT index. */
  894         pat_idx = pat_index[mode];
  895 
  896         /* Map the 3-bit index value into the PAT, PCD, and PWT bits. */
  897         cache_bits = 0;
  898         if (pat_idx & 0x4)
  899                 cache_bits |= pat_flag;
  900         if (pat_idx & 0x2)
  901                 cache_bits |= PG_NC_PCD;
  902         if (pat_idx & 0x1)
  903                 cache_bits |= PG_NC_PWT;
  904         return (cache_bits);
  905 }
  906 
  907 /*
  908  * After changing the page size for the specified virtual address in the page
  909  * table, flush the corresponding entries from the processor's TLB.  Only the
  910  * calling processor's TLB is affected.
  911  *
  912  * The calling thread must be pinned to a processor.
  913  */
  914 static void
  915 pmap_update_pde_invalidate(vm_offset_t va, pd_entry_t newpde)
  916 {
  917         u_long cr4;
  918 
  919         if ((newpde & PG_PS) == 0)
  920                 /* Demotion: flush a specific 2MB page mapping. */
  921                 invlpg(va);
  922         else if ((newpde & PG_G) == 0)
  923                 /*
  924                  * Promotion: flush every 4KB page mapping from the TLB
  925                  * because there are too many to flush individually.
  926                  */
  927                 invltlb();
  928         else {
  929                 /*
  930                  * Promotion: flush every 4KB page mapping from the TLB,
  931                  * including any global (PG_G) mappings.
  932                  */
  933                 cr4 = rcr4();
  934                 load_cr4(cr4 & ~CR4_PGE);
  935                 /*
  936                  * Although preemption at this point could be detrimental to
  937                  * performance, it would not lead to an error.  PG_G is simply
  938                  * ignored if CR4.PGE is clear.  Moreover, in case this block
  939                  * is re-entered, the load_cr4() either above or below will
  940                  * modify CR4.PGE flushing the TLB.
  941                  */
  942                 load_cr4(cr4 | CR4_PGE);
  943         }
  944 }
  945 #ifdef SMP
  946 /*
  947  * For SMP, these functions have to use the IPI mechanism for coherence.
  948  *
  949  * N.B.: Before calling any of the following TLB invalidation functions,
  950  * the calling processor must ensure that all stores updating a non-
  951  * kernel page table are globally performed.  Otherwise, another
  952  * processor could cache an old, pre-update entry without being
  953  * invalidated.  This can happen one of two ways: (1) The pmap becomes
  954  * active on another processor after its pm_active field is checked by
  955  * one of the following functions but before a store updating the page
  956  * table is globally performed. (2) The pmap becomes active on another
  957  * processor before its pm_active field is checked but due to
  958  * speculative loads one of the following functions stills reads the
  959  * pmap as inactive on the other processor.
  960  * 
  961  * The kernel page table is exempt because its pm_active field is
  962  * immutable.  The kernel page table is always active on every
  963  * processor.
  964  */
  965 void
  966 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
  967 {
  968         cpuset_t other_cpus;
  969         u_int cpuid;
  970 
  971         sched_pin();
  972         if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) {
  973                 invlpg(va);
  974                 smp_invlpg(va);
  975         } else {
  976                 cpuid = PCPU_GET(cpuid);
  977                 other_cpus = all_cpus;
  978                 CPU_CLR(cpuid, &other_cpus);
  979                 if (CPU_ISSET(cpuid, &pmap->pm_active))
  980                         invlpg(va);
  981                 CPU_AND(&other_cpus, &pmap->pm_active);
  982                 if (!CPU_EMPTY(&other_cpus))
  983                         smp_masked_invlpg(other_cpus, va);
  984         }
  985         sched_unpin();
  986 }
  987 
  988 void
  989 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
  990 {
  991         cpuset_t other_cpus;
  992         vm_offset_t addr;
  993         u_int cpuid;
  994 
  995         sched_pin();
  996         if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) {
  997                 for (addr = sva; addr < eva; addr += PAGE_SIZE)
  998                         invlpg(addr);
  999                 smp_invlpg_range(sva, eva);
 1000         } else {
 1001                 cpuid = PCPU_GET(cpuid);
 1002                 other_cpus = all_cpus;
 1003                 CPU_CLR(cpuid, &other_cpus);
 1004                 if (CPU_ISSET(cpuid, &pmap->pm_active))
 1005                         for (addr = sva; addr < eva; addr += PAGE_SIZE)
 1006                                 invlpg(addr);
 1007                 CPU_AND(&other_cpus, &pmap->pm_active);
 1008                 if (!CPU_EMPTY(&other_cpus))
 1009                         smp_masked_invlpg_range(other_cpus, sva, eva);
 1010         }
 1011         sched_unpin();
 1012 }
 1013 
 1014 void
 1015 pmap_invalidate_all(pmap_t pmap)
 1016 {
 1017         cpuset_t other_cpus;
 1018         u_int cpuid;
 1019 
 1020         sched_pin();
 1021         if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) {
 1022                 invltlb();
 1023                 smp_invltlb();
 1024         } else {
 1025                 cpuid = PCPU_GET(cpuid);
 1026                 other_cpus = all_cpus;
 1027                 CPU_CLR(cpuid, &other_cpus);
 1028                 if (CPU_ISSET(cpuid, &pmap->pm_active))
 1029                         invltlb();
 1030                 CPU_AND(&other_cpus, &pmap->pm_active);
 1031                 if (!CPU_EMPTY(&other_cpus))
 1032                         smp_masked_invltlb(other_cpus);
 1033         }
 1034         sched_unpin();
 1035 }
 1036 
 1037 void
 1038 pmap_invalidate_cache(void)
 1039 {
 1040 
 1041         sched_pin();
 1042         wbinvd();
 1043         smp_cache_flush();
 1044         sched_unpin();
 1045 }
 1046 
 1047 struct pde_action {
 1048         cpuset_t invalidate;    /* processors that invalidate their TLB */
 1049         vm_offset_t va;
 1050         pd_entry_t *pde;
 1051         pd_entry_t newpde;
 1052         u_int store;            /* processor that updates the PDE */
 1053 };
 1054 
 1055 static void
 1056 pmap_update_pde_action(void *arg)
 1057 {
 1058         struct pde_action *act = arg;
 1059 
 1060         if (act->store == PCPU_GET(cpuid))
 1061                 pde_store(act->pde, act->newpde);
 1062 }
 1063 
 1064 static void
 1065 pmap_update_pde_teardown(void *arg)
 1066 {
 1067         struct pde_action *act = arg;
 1068 
 1069         if (CPU_ISSET(PCPU_GET(cpuid), &act->invalidate))
 1070                 pmap_update_pde_invalidate(act->va, act->newpde);
 1071 }
 1072 
 1073 /*
 1074  * Change the page size for the specified virtual address in a way that
 1075  * prevents any possibility of the TLB ever having two entries that map the
 1076  * same virtual address using different page sizes.  This is the recommended
 1077  * workaround for Erratum 383 on AMD Family 10h processors.  It prevents a
 1078  * machine check exception for a TLB state that is improperly diagnosed as a
 1079  * hardware error.
 1080  */
 1081 static void
 1082 pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde)
 1083 {
 1084         struct pde_action act;
 1085         cpuset_t active, other_cpus;
 1086         u_int cpuid;
 1087 
 1088         sched_pin();
 1089         cpuid = PCPU_GET(cpuid);
 1090         other_cpus = all_cpus;
 1091         CPU_CLR(cpuid, &other_cpus);
 1092         if (pmap == kernel_pmap)
 1093                 active = all_cpus;
 1094         else
 1095                 active = pmap->pm_active;
 1096         if (CPU_OVERLAP(&active, &other_cpus)) { 
 1097                 act.store = cpuid;
 1098                 act.invalidate = active;
 1099                 act.va = va;
 1100                 act.pde = pde;
 1101                 act.newpde = newpde;
 1102                 CPU_SET(cpuid, &active);
 1103                 smp_rendezvous_cpus(active,
 1104                     smp_no_rendevous_barrier, pmap_update_pde_action,
 1105                     pmap_update_pde_teardown, &act);
 1106         } else {
 1107                 pde_store(pde, newpde);
 1108                 if (CPU_ISSET(cpuid, &active))
 1109                         pmap_update_pde_invalidate(va, newpde);
 1110         }
 1111         sched_unpin();
 1112 }
 1113 #else /* !SMP */
 1114 /*
 1115  * Normal, non-SMP, invalidation functions.
 1116  * We inline these within pmap.c for speed.
 1117  */
 1118 PMAP_INLINE void
 1119 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
 1120 {
 1121 
 1122         if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active))
 1123                 invlpg(va);
 1124 }
 1125 
 1126 PMAP_INLINE void
 1127 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
 1128 {
 1129         vm_offset_t addr;
 1130 
 1131         if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active))
 1132                 for (addr = sva; addr < eva; addr += PAGE_SIZE)
 1133                         invlpg(addr);
 1134 }
 1135 
 1136 PMAP_INLINE void
 1137 pmap_invalidate_all(pmap_t pmap)
 1138 {
 1139 
 1140         if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active))
 1141                 invltlb();
 1142 }
 1143 
 1144 PMAP_INLINE void
 1145 pmap_invalidate_cache(void)
 1146 {
 1147 
 1148         wbinvd();
 1149 }
 1150 
 1151 static void
 1152 pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde)
 1153 {
 1154 
 1155         pde_store(pde, newpde);
 1156         if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active))
 1157                 pmap_update_pde_invalidate(va, newpde);
 1158 }
 1159 #endif /* !SMP */
 1160 
 1161 #define PMAP_CLFLUSH_THRESHOLD   (2 * 1024 * 1024)
 1162 
 1163 void
 1164 pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva)
 1165 {
 1166 
 1167         KASSERT((sva & PAGE_MASK) == 0,
 1168             ("pmap_invalidate_cache_range: sva not page-aligned"));
 1169         KASSERT((eva & PAGE_MASK) == 0,
 1170             ("pmap_invalidate_cache_range: eva not page-aligned"));
 1171 
 1172         if (cpu_feature & CPUID_SS)
 1173                 ; /* If "Self Snoop" is supported, do nothing. */
 1174         else if ((cpu_feature & CPUID_CLFSH) != 0 &&
 1175             eva - sva < PMAP_CLFLUSH_THRESHOLD) {
 1176 
 1177                 /*
 1178                  * XXX: Some CPUs fault, hang, or trash the local APIC
 1179                  * registers if we use CLFLUSH on the local APIC
 1180                  * range.  The local APIC is always uncached, so we
 1181                  * don't need to flush for that range anyway.
 1182                  */
 1183                 if (pmap_kextract(sva) == lapic_paddr)
 1184                         return;
 1185 
 1186                 /*
 1187                  * Otherwise, do per-cache line flush.  Use the mfence
 1188                  * instruction to insure that previous stores are
 1189                  * included in the write-back.  The processor
 1190                  * propagates flush to other processors in the cache
 1191                  * coherence domain.
 1192                  */
 1193                 mfence();
 1194                 for (; sva < eva; sva += cpu_clflush_line_size)
 1195                         clflush(sva);
 1196                 mfence();
 1197         } else {
 1198 
 1199                 /*
 1200                  * No targeted cache flush methods are supported by CPU,
 1201                  * or the supplied range is bigger than 2MB.
 1202                  * Globally invalidate cache.
 1203                  */
 1204                 pmap_invalidate_cache();
 1205         }
 1206 }
 1207 
 1208 /*
 1209  * Remove the specified set of pages from the data and instruction caches.
 1210  *
 1211  * In contrast to pmap_invalidate_cache_range(), this function does not
 1212  * rely on the CPU's self-snoop feature, because it is intended for use
 1213  * when moving pages into a different cache domain.
 1214  */
 1215 void
 1216 pmap_invalidate_cache_pages(vm_page_t *pages, int count)
 1217 {
 1218         vm_offset_t daddr, eva;
 1219         int i;
 1220 
 1221         if (count >= PMAP_CLFLUSH_THRESHOLD / PAGE_SIZE ||
 1222             (cpu_feature & CPUID_CLFSH) == 0)
 1223                 pmap_invalidate_cache();
 1224         else {
 1225                 mfence();
 1226                 for (i = 0; i < count; i++) {
 1227                         daddr = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pages[i]));
 1228                         eva = daddr + PAGE_SIZE;
 1229                         for (; daddr < eva; daddr += cpu_clflush_line_size)
 1230                                 clflush(daddr);
 1231                 }
 1232                 mfence();
 1233         }
 1234 }
 1235 
 1236 /*
 1237  *      Routine:        pmap_extract
 1238  *      Function:
 1239  *              Extract the physical page address associated
 1240  *              with the given map/virtual_address pair.
 1241  */
 1242 vm_paddr_t 
 1243 pmap_extract(pmap_t pmap, vm_offset_t va)
 1244 {
 1245         pdp_entry_t *pdpe;
 1246         pd_entry_t *pde;
 1247         pt_entry_t *pte;
 1248         vm_paddr_t pa;
 1249 
 1250         pa = 0;
 1251         PMAP_LOCK(pmap);
 1252         pdpe = pmap_pdpe(pmap, va);
 1253         if (pdpe != NULL && (*pdpe & PG_V) != 0) {
 1254                 if ((*pdpe & PG_PS) != 0)
 1255                         pa = (*pdpe & PG_PS_FRAME) | (va & PDPMASK);
 1256                 else {
 1257                         pde = pmap_pdpe_to_pde(pdpe, va);
 1258                         if ((*pde & PG_V) != 0) {
 1259                                 if ((*pde & PG_PS) != 0) {
 1260                                         pa = (*pde & PG_PS_FRAME) |
 1261                                             (va & PDRMASK);
 1262                                 } else {
 1263                                         pte = pmap_pde_to_pte(pde, va);
 1264                                         pa = (*pte & PG_FRAME) |
 1265                                             (va & PAGE_MASK);
 1266                                 }
 1267                         }
 1268                 }
 1269         }
 1270         PMAP_UNLOCK(pmap);
 1271         return (pa);
 1272 }
 1273 
 1274 /*
 1275  *      Routine:        pmap_extract_and_hold
 1276  *      Function:
 1277  *              Atomically extract and hold the physical page
 1278  *              with the given pmap and virtual address pair
 1279  *              if that mapping permits the given protection.
 1280  */
 1281 vm_page_t
 1282 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
 1283 {
 1284         pd_entry_t pde, *pdep;
 1285         pt_entry_t pte;
 1286         vm_paddr_t pa;
 1287         vm_page_t m;
 1288 
 1289         pa = 0;
 1290         m = NULL;
 1291         PMAP_LOCK(pmap);
 1292 retry:
 1293         pdep = pmap_pde(pmap, va);
 1294         if (pdep != NULL && (pde = *pdep)) {
 1295                 if (pde & PG_PS) {
 1296                         if ((pde & PG_RW) || (prot & VM_PROT_WRITE) == 0) {
 1297                                 if (vm_page_pa_tryrelock(pmap, (pde &
 1298                                     PG_PS_FRAME) | (va & PDRMASK), &pa))
 1299                                         goto retry;
 1300                                 m = PHYS_TO_VM_PAGE((pde & PG_PS_FRAME) |
 1301                                     (va & PDRMASK));
 1302                                 vm_page_hold(m);
 1303                         }
 1304                 } else {
 1305                         pte = *pmap_pde_to_pte(pdep, va);
 1306                         if ((pte & PG_V) &&
 1307                             ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0)) {
 1308                                 if (vm_page_pa_tryrelock(pmap, pte & PG_FRAME,
 1309                                     &pa))
 1310                                         goto retry;
 1311                                 m = PHYS_TO_VM_PAGE(pte & PG_FRAME);
 1312                                 vm_page_hold(m);
 1313                         }
 1314                 }
 1315         }
 1316         PA_UNLOCK_COND(pa);
 1317         PMAP_UNLOCK(pmap);
 1318         return (m);
 1319 }
 1320 
 1321 vm_paddr_t
 1322 pmap_kextract(vm_offset_t va)
 1323 {
 1324         pd_entry_t pde;
 1325         vm_paddr_t pa;
 1326 
 1327         if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) {
 1328                 pa = DMAP_TO_PHYS(va);
 1329         } else {
 1330                 pde = *vtopde(va);
 1331                 if (pde & PG_PS) {
 1332                         pa = (pde & PG_PS_FRAME) | (va & PDRMASK);
 1333                 } else {
 1334                         /*
 1335                          * Beware of a concurrent promotion that changes the
 1336                          * PDE at this point!  For example, vtopte() must not
 1337                          * be used to access the PTE because it would use the
 1338                          * new PDE.  It is, however, safe to use the old PDE
 1339                          * because the page table page is preserved by the
 1340                          * promotion.
 1341                          */
 1342                         pa = *pmap_pde_to_pte(&pde, va);
 1343                         pa = (pa & PG_FRAME) | (va & PAGE_MASK);
 1344                 }
 1345         }
 1346         return (pa);
 1347 }
 1348 
 1349 /***************************************************
 1350  * Low level mapping routines.....
 1351  ***************************************************/
 1352 
 1353 /*
 1354  * Add a wired page to the kva.
 1355  * Note: not SMP coherent.
 1356  */
 1357 PMAP_INLINE void 
 1358 pmap_kenter(vm_offset_t va, vm_paddr_t pa)
 1359 {
 1360         pt_entry_t *pte;
 1361 
 1362         pte = vtopte(va);
 1363         pte_store(pte, pa | PG_RW | PG_V | PG_G);
 1364 }
 1365 
 1366 static __inline void
 1367 pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode)
 1368 {
 1369         pt_entry_t *pte;
 1370 
 1371         pte = vtopte(va);
 1372         pte_store(pte, pa | PG_RW | PG_V | PG_G | pmap_cache_bits(mode, 0));
 1373 }
 1374 
 1375 /*
 1376  * Remove a page from the kernel pagetables.
 1377  * Note: not SMP coherent.
 1378  */
 1379 PMAP_INLINE void
 1380 pmap_kremove(vm_offset_t va)
 1381 {
 1382         pt_entry_t *pte;
 1383 
 1384         pte = vtopte(va);
 1385         pte_clear(pte);
 1386 }
 1387 
 1388 /*
 1389  *      Used to map a range of physical addresses into kernel
 1390  *      virtual address space.
 1391  *
 1392  *      The value passed in '*virt' is a suggested virtual address for
 1393  *      the mapping. Architectures which can support a direct-mapped
 1394  *      physical to virtual region can return the appropriate address
 1395  *      within that region, leaving '*virt' unchanged. Other
 1396  *      architectures should map the pages starting at '*virt' and
 1397  *      update '*virt' with the first usable address after the mapped
 1398  *      region.
 1399  */
 1400 vm_offset_t
 1401 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
 1402 {
 1403         return PHYS_TO_DMAP(start);
 1404 }
 1405 
 1406 
 1407 /*
 1408  * Add a list of wired pages to the kva
 1409  * this routine is only used for temporary
 1410  * kernel mappings that do not need to have
 1411  * page modification or references recorded.
 1412  * Note that old mappings are simply written
 1413  * over.  The page *must* be wired.
 1414  * Note: SMP coherent.  Uses a ranged shootdown IPI.
 1415  */
 1416 void
 1417 pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
 1418 {
 1419         pt_entry_t *endpte, oldpte, pa, *pte;
 1420         vm_page_t m;
 1421 
 1422         oldpte = 0;
 1423         pte = vtopte(sva);
 1424         endpte = pte + count;
 1425         while (pte < endpte) {
 1426                 m = *ma++;
 1427                 pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 0);
 1428                 if ((*pte & (PG_FRAME | PG_PTE_CACHE)) != pa) {
 1429                         oldpte |= *pte;
 1430                         pte_store(pte, pa | PG_G | PG_RW | PG_V);
 1431                 }
 1432                 pte++;
 1433         }
 1434         if (__predict_false((oldpte & PG_V) != 0))
 1435                 pmap_invalidate_range(kernel_pmap, sva, sva + count *
 1436                     PAGE_SIZE);
 1437 }
 1438 
 1439 /*
 1440  * This routine tears out page mappings from the
 1441  * kernel -- it is meant only for temporary mappings.
 1442  * Note: SMP coherent.  Uses a ranged shootdown IPI.
 1443  */
 1444 void
 1445 pmap_qremove(vm_offset_t sva, int count)
 1446 {
 1447         vm_offset_t va;
 1448 
 1449         va = sva;
 1450         while (count-- > 0) {
 1451                 KASSERT(va >= VM_MIN_KERNEL_ADDRESS, ("usermode va %lx", va));
 1452                 pmap_kremove(va);
 1453                 va += PAGE_SIZE;
 1454         }
 1455         pmap_invalidate_range(kernel_pmap, sva, va);
 1456 }
 1457 
 1458 /***************************************************
 1459  * Page table page management routines.....
 1460  ***************************************************/
 1461 static __inline void
 1462 pmap_free_zero_pages(vm_page_t free)
 1463 {
 1464         vm_page_t m;
 1465 
 1466         while (free != NULL) {
 1467                 m = free;
 1468                 free = m->right;
 1469                 /* Preserve the page's PG_ZERO setting. */
 1470                 vm_page_free_toq(m);
 1471         }
 1472 }
 1473 
 1474 /*
 1475  * Schedule the specified unused page table page to be freed.  Specifically,
 1476  * add the page to the specified list of pages that will be released to the
 1477  * physical memory manager after the TLB has been updated.
 1478  */
 1479 static __inline void
 1480 pmap_add_delayed_free_list(vm_page_t m, vm_page_t *free, boolean_t set_PG_ZERO)
 1481 {
 1482 
 1483         if (set_PG_ZERO)
 1484                 m->flags |= PG_ZERO;
 1485         else
 1486                 m->flags &= ~PG_ZERO;
 1487         m->right = *free;
 1488         *free = m;
 1489 }
 1490         
 1491 /*
 1492  * Inserts the specified page table page into the specified pmap's collection
 1493  * of idle page table pages.  Each of a pmap's page table pages is responsible
 1494  * for mapping a distinct range of virtual addresses.  The pmap's collection is
 1495  * ordered by this virtual address range.
 1496  */
 1497 static void
 1498 pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte)
 1499 {
 1500         vm_page_t root;
 1501 
 1502         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 1503         root = pmap->pm_root;
 1504         if (root == NULL) {
 1505                 mpte->left = NULL;
 1506                 mpte->right = NULL;
 1507         } else {
 1508                 root = vm_page_splay(mpte->pindex, root);
 1509                 if (mpte->pindex < root->pindex) {
 1510                         mpte->left = root->left;
 1511                         mpte->right = root;
 1512                         root->left = NULL;
 1513                 } else if (mpte->pindex == root->pindex)
 1514                         panic("pmap_insert_pt_page: pindex already inserted");
 1515                 else {
 1516                         mpte->right = root->right;
 1517                         mpte->left = root;
 1518                         root->right = NULL;
 1519                 }
 1520         }
 1521         pmap->pm_root = mpte;
 1522 }
 1523 
 1524 /*
 1525  * Looks for a page table page mapping the specified virtual address in the
 1526  * specified pmap's collection of idle page table pages.  Returns NULL if there
 1527  * is no page table page corresponding to the specified virtual address.
 1528  */
 1529 static vm_page_t
 1530 pmap_lookup_pt_page(pmap_t pmap, vm_offset_t va)
 1531 {
 1532         vm_page_t mpte;
 1533         vm_pindex_t pindex = pmap_pde_pindex(va);
 1534 
 1535         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 1536         if ((mpte = pmap->pm_root) != NULL && mpte->pindex != pindex) {
 1537                 mpte = vm_page_splay(pindex, mpte);
 1538                 if ((pmap->pm_root = mpte)->pindex != pindex)
 1539                         mpte = NULL;
 1540         }
 1541         return (mpte);
 1542 }
 1543 
 1544 /*
 1545  * Removes the specified page table page from the specified pmap's collection
 1546  * of idle page table pages.  The specified page table page must be a member of
 1547  * the pmap's collection.
 1548  */
 1549 static void
 1550 pmap_remove_pt_page(pmap_t pmap, vm_page_t mpte)
 1551 {
 1552         vm_page_t root;
 1553 
 1554         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 1555         if (mpte != pmap->pm_root) {
 1556                 root = vm_page_splay(mpte->pindex, pmap->pm_root);
 1557                 KASSERT(mpte == root,
 1558                     ("pmap_remove_pt_page: mpte %p is missing from pmap %p",
 1559                     mpte, pmap));
 1560         }
 1561         if (mpte->left == NULL)
 1562                 root = mpte->right;
 1563         else {
 1564                 root = vm_page_splay(mpte->pindex, mpte->left);
 1565                 root->right = mpte->right;
 1566         }
 1567         pmap->pm_root = root;
 1568 }
 1569 
 1570 /*
 1571  * Decrements a page table page's wire count, which is used to record the
 1572  * number of valid page table entries within the page.  If the wire count
 1573  * drops to zero, then the page table page is unmapped.  Returns TRUE if the
 1574  * page table page was unmapped and FALSE otherwise.
 1575  */
 1576 static inline boolean_t
 1577 pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t *free)
 1578 {
 1579 
 1580         --m->wire_count;
 1581         if (m->wire_count == 0) {
 1582                 _pmap_unwire_ptp(pmap, va, m, free);
 1583                 return (TRUE);
 1584         } else
 1585                 return (FALSE);
 1586 }
 1587 
 1588 static void
 1589 _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t *free)
 1590 {
 1591 
 1592         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 1593         /*
 1594          * unmap the page table page
 1595          */
 1596         if (m->pindex >= (NUPDE + NUPDPE)) {
 1597                 /* PDP page */
 1598                 pml4_entry_t *pml4;
 1599                 pml4 = pmap_pml4e(pmap, va);
 1600                 *pml4 = 0;
 1601         } else if (m->pindex >= NUPDE) {
 1602                 /* PD page */
 1603                 pdp_entry_t *pdp;
 1604                 pdp = pmap_pdpe(pmap, va);
 1605                 *pdp = 0;
 1606         } else {
 1607                 /* PTE page */
 1608                 pd_entry_t *pd;
 1609                 pd = pmap_pde(pmap, va);
 1610                 *pd = 0;
 1611         }
 1612         pmap_resident_count_dec(pmap, 1);
 1613         if (m->pindex < NUPDE) {
 1614                 /* We just released a PT, unhold the matching PD */
 1615                 vm_page_t pdpg;
 1616 
 1617                 pdpg = PHYS_TO_VM_PAGE(*pmap_pdpe(pmap, va) & PG_FRAME);
 1618                 pmap_unwire_ptp(pmap, va, pdpg, free);
 1619         }
 1620         if (m->pindex >= NUPDE && m->pindex < (NUPDE + NUPDPE)) {
 1621                 /* We just released a PD, unhold the matching PDP */
 1622                 vm_page_t pdppg;
 1623 
 1624                 pdppg = PHYS_TO_VM_PAGE(*pmap_pml4e(pmap, va) & PG_FRAME);
 1625                 pmap_unwire_ptp(pmap, va, pdppg, free);
 1626         }
 1627 
 1628         /*
 1629          * This is a release store so that the ordinary store unmapping
 1630          * the page table page is globally performed before TLB shoot-
 1631          * down is begun.
 1632          */
 1633         atomic_subtract_rel_int(&cnt.v_wire_count, 1);
 1634 
 1635         /* 
 1636          * Put page on a list so that it is released after
 1637          * *ALL* TLB shootdown is done
 1638          */
 1639         pmap_add_delayed_free_list(m, free, TRUE);
 1640 }
 1641 
 1642 /*
 1643  * After removing a page table entry, this routine is used to
 1644  * conditionally free the page, and manage the hold/wire counts.
 1645  */
 1646 static int
 1647 pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t ptepde, vm_page_t *free)
 1648 {
 1649         vm_page_t mpte;
 1650 
 1651         if (va >= VM_MAXUSER_ADDRESS)
 1652                 return (0);
 1653         KASSERT(ptepde != 0, ("pmap_unuse_pt: ptepde != 0"));
 1654         mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME);
 1655         return (pmap_unwire_ptp(pmap, va, mpte, free));
 1656 }
 1657 
 1658 void
 1659 pmap_pinit0(pmap_t pmap)
 1660 {
 1661 
 1662         PMAP_LOCK_INIT(pmap);
 1663         pmap->pm_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(KPML4phys);
 1664         pmap->pm_root = NULL;
 1665         CPU_ZERO(&pmap->pm_active);
 1666         PCPU_SET(curpmap, pmap);
 1667         TAILQ_INIT(&pmap->pm_pvchunk);
 1668         bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
 1669 }
 1670 
 1671 /*
 1672  * Initialize a preallocated and zeroed pmap structure,
 1673  * such as one in a vmspace structure.
 1674  */
 1675 int
 1676 pmap_pinit(pmap_t pmap)
 1677 {
 1678         vm_page_t pml4pg;
 1679         int i;
 1680 
 1681         PMAP_LOCK_INIT(pmap);
 1682 
 1683         /*
 1684          * allocate the page directory page
 1685          */
 1686         while ((pml4pg = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
 1687             VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL)
 1688                 VM_WAIT;
 1689 
 1690         pmap->pm_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pml4pg));
 1691 
 1692         if ((pml4pg->flags & PG_ZERO) == 0)
 1693                 pagezero(pmap->pm_pml4);
 1694 
 1695         /* Wire in kernel global address entries. */
 1696         pmap->pm_pml4[KPML4I] = KPDPphys | PG_RW | PG_V | PG_U;
 1697         for (i = 0; i < NDMPML4E; i++) {
 1698                 pmap->pm_pml4[DMPML4I + i] = (DMPDPphys + (i << PAGE_SHIFT)) |
 1699                     PG_RW | PG_V | PG_U;
 1700         }
 1701 
 1702         /* install self-referential address mapping entry(s) */
 1703         pmap->pm_pml4[PML4PML4I] = VM_PAGE_TO_PHYS(pml4pg) | PG_V | PG_RW | PG_A | PG_M;
 1704 
 1705         pmap->pm_root = NULL;
 1706         CPU_ZERO(&pmap->pm_active);
 1707         TAILQ_INIT(&pmap->pm_pvchunk);
 1708         bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
 1709 
 1710         return (1);
 1711 }
 1712 
 1713 /*
 1714  * This routine is called if the desired page table page does not exist.
 1715  *
 1716  * If page table page allocation fails, this routine may sleep before
 1717  * returning NULL.  It sleeps only if a lock pointer was given.
 1718  *
 1719  * Note: If a page allocation fails at page table level two or three,
 1720  * one or two pages may be held during the wait, only to be released
 1721  * afterwards.  This conservative approach is easily argued to avoid
 1722  * race conditions.
 1723  */
 1724 static vm_page_t
 1725 _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
 1726 {
 1727         vm_page_t m, pdppg, pdpg;
 1728 
 1729         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 1730 
 1731         /*
 1732          * Allocate a page table page.
 1733          */
 1734         if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ |
 1735             VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
 1736                 if (lockp != NULL) {
 1737                         RELEASE_PV_LIST_LOCK(lockp);
 1738                         PMAP_UNLOCK(pmap);
 1739                         rw_runlock(&pvh_global_lock);
 1740                         VM_WAIT;
 1741                         rw_rlock(&pvh_global_lock);
 1742                         PMAP_LOCK(pmap);
 1743                 }
 1744 
 1745                 /*
 1746                  * Indicate the need to retry.  While waiting, the page table
 1747                  * page may have been allocated.
 1748                  */
 1749                 return (NULL);
 1750         }
 1751         if ((m->flags & PG_ZERO) == 0)
 1752                 pmap_zero_page(m);
 1753 
 1754         /*
 1755          * Map the pagetable page into the process address space, if
 1756          * it isn't already there.
 1757          */
 1758 
 1759         if (ptepindex >= (NUPDE + NUPDPE)) {
 1760                 pml4_entry_t *pml4;
 1761                 vm_pindex_t pml4index;
 1762 
 1763                 /* Wire up a new PDPE page */
 1764                 pml4index = ptepindex - (NUPDE + NUPDPE);
 1765                 pml4 = &pmap->pm_pml4[pml4index];
 1766                 *pml4 = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M;
 1767 
 1768         } else if (ptepindex >= NUPDE) {
 1769                 vm_pindex_t pml4index;
 1770                 vm_pindex_t pdpindex;
 1771                 pml4_entry_t *pml4;
 1772                 pdp_entry_t *pdp;
 1773 
 1774                 /* Wire up a new PDE page */
 1775                 pdpindex = ptepindex - NUPDE;
 1776                 pml4index = pdpindex >> NPML4EPGSHIFT;
 1777 
 1778                 pml4 = &pmap->pm_pml4[pml4index];
 1779                 if ((*pml4 & PG_V) == 0) {
 1780                         /* Have to allocate a new pdp, recurse */
 1781                         if (_pmap_allocpte(pmap, NUPDE + NUPDPE + pml4index,
 1782                             lockp) == NULL) {
 1783                                 --m->wire_count;
 1784                                 atomic_subtract_int(&cnt.v_wire_count, 1);
 1785                                 vm_page_free_zero(m);
 1786                                 return (NULL);
 1787                         }
 1788                 } else {
 1789                         /* Add reference to pdp page */
 1790                         pdppg = PHYS_TO_VM_PAGE(*pml4 & PG_FRAME);
 1791                         pdppg->wire_count++;
 1792                 }
 1793                 pdp = (pdp_entry_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME);
 1794 
 1795                 /* Now find the pdp page */
 1796                 pdp = &pdp[pdpindex & ((1ul << NPDPEPGSHIFT) - 1)];
 1797                 *pdp = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M;
 1798 
 1799         } else {
 1800                 vm_pindex_t pml4index;
 1801                 vm_pindex_t pdpindex;
 1802                 pml4_entry_t *pml4;
 1803                 pdp_entry_t *pdp;
 1804                 pd_entry_t *pd;
 1805 
 1806                 /* Wire up a new PTE page */
 1807                 pdpindex = ptepindex >> NPDPEPGSHIFT;
 1808                 pml4index = pdpindex >> NPML4EPGSHIFT;
 1809 
 1810                 /* First, find the pdp and check that its valid. */
 1811                 pml4 = &pmap->pm_pml4[pml4index];
 1812                 if ((*pml4 & PG_V) == 0) {
 1813                         /* Have to allocate a new pd, recurse */
 1814                         if (_pmap_allocpte(pmap, NUPDE + pdpindex,
 1815                             lockp) == NULL) {
 1816                                 --m->wire_count;
 1817                                 atomic_subtract_int(&cnt.v_wire_count, 1);
 1818                                 vm_page_free_zero(m);
 1819                                 return (NULL);
 1820                         }
 1821                         pdp = (pdp_entry_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME);
 1822                         pdp = &pdp[pdpindex & ((1ul << NPDPEPGSHIFT) - 1)];
 1823                 } else {
 1824                         pdp = (pdp_entry_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME);
 1825                         pdp = &pdp[pdpindex & ((1ul << NPDPEPGSHIFT) - 1)];
 1826                         if ((*pdp & PG_V) == 0) {
 1827                                 /* Have to allocate a new pd, recurse */
 1828                                 if (_pmap_allocpte(pmap, NUPDE + pdpindex,
 1829                                     lockp) == NULL) {
 1830                                         --m->wire_count;
 1831                                         atomic_subtract_int(&cnt.v_wire_count,
 1832                                             1);
 1833                                         vm_page_free_zero(m);
 1834                                         return (NULL);
 1835                                 }
 1836                         } else {
 1837                                 /* Add reference to the pd page */
 1838                                 pdpg = PHYS_TO_VM_PAGE(*pdp & PG_FRAME);
 1839                                 pdpg->wire_count++;
 1840                         }
 1841                 }
 1842                 pd = (pd_entry_t *)PHYS_TO_DMAP(*pdp & PG_FRAME);
 1843 
 1844                 /* Now we know where the page directory page is */
 1845                 pd = &pd[ptepindex & ((1ul << NPDEPGSHIFT) - 1)];
 1846                 *pd = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M;
 1847         }
 1848 
 1849         pmap_resident_count_inc(pmap, 1);
 1850 
 1851         return (m);
 1852 }
 1853 
 1854 static vm_page_t
 1855 pmap_allocpde(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
 1856 {
 1857         vm_pindex_t pdpindex, ptepindex;
 1858         pdp_entry_t *pdpe;
 1859         vm_page_t pdpg;
 1860 
 1861 retry:
 1862         pdpe = pmap_pdpe(pmap, va);
 1863         if (pdpe != NULL && (*pdpe & PG_V) != 0) {
 1864                 /* Add a reference to the pd page. */
 1865                 pdpg = PHYS_TO_VM_PAGE(*pdpe & PG_FRAME);
 1866                 pdpg->wire_count++;
 1867         } else {
 1868                 /* Allocate a pd page. */
 1869                 ptepindex = pmap_pde_pindex(va);
 1870                 pdpindex = ptepindex >> NPDPEPGSHIFT;
 1871                 pdpg = _pmap_allocpte(pmap, NUPDE + pdpindex, lockp);
 1872                 if (pdpg == NULL && lockp != NULL)
 1873                         goto retry;
 1874         }
 1875         return (pdpg);
 1876 }
 1877 
 1878 static vm_page_t
 1879 pmap_allocpte(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
 1880 {
 1881         vm_pindex_t ptepindex;
 1882         pd_entry_t *pd;
 1883         vm_page_t m;
 1884 
 1885         /*
 1886          * Calculate pagetable page index
 1887          */
 1888         ptepindex = pmap_pde_pindex(va);
 1889 retry:
 1890         /*
 1891          * Get the page directory entry
 1892          */
 1893         pd = pmap_pde(pmap, va);
 1894 
 1895         /*
 1896          * This supports switching from a 2MB page to a
 1897          * normal 4K page.
 1898          */
 1899         if (pd != NULL && (*pd & (PG_PS | PG_V)) == (PG_PS | PG_V)) {
 1900                 if (!pmap_demote_pde_locked(pmap, pd, va, lockp)) {
 1901                         /*
 1902                          * Invalidation of the 2MB page mapping may have caused
 1903                          * the deallocation of the underlying PD page.
 1904                          */
 1905                         pd = NULL;
 1906                 }
 1907         }
 1908 
 1909         /*
 1910          * If the page table page is mapped, we just increment the
 1911          * hold count, and activate it.
 1912          */
 1913         if (pd != NULL && (*pd & PG_V) != 0) {
 1914                 m = PHYS_TO_VM_PAGE(*pd & PG_FRAME);
 1915                 m->wire_count++;
 1916         } else {
 1917                 /*
 1918                  * Here if the pte page isn't mapped, or if it has been
 1919                  * deallocated.
 1920                  */
 1921                 m = _pmap_allocpte(pmap, ptepindex, lockp);
 1922                 if (m == NULL && lockp != NULL)
 1923                         goto retry;
 1924         }
 1925         return (m);
 1926 }
 1927 
 1928 
 1929 /***************************************************
 1930  * Pmap allocation/deallocation routines.
 1931  ***************************************************/
 1932 
 1933 /*
 1934  * Release any resources held by the given physical map.
 1935  * Called when a pmap initialized by pmap_pinit is being released.
 1936  * Should only be called if the map contains no valid mappings.
 1937  */
 1938 void
 1939 pmap_release(pmap_t pmap)
 1940 {
 1941         vm_page_t m;
 1942         int i;
 1943 
 1944         KASSERT(pmap->pm_stats.resident_count == 0,
 1945             ("pmap_release: pmap resident count %ld != 0",
 1946             pmap->pm_stats.resident_count));
 1947         KASSERT(pmap->pm_root == NULL,
 1948             ("pmap_release: pmap has reserved page table page(s)"));
 1949 
 1950         m = PHYS_TO_VM_PAGE(pmap->pm_pml4[PML4PML4I] & PG_FRAME);
 1951 
 1952         pmap->pm_pml4[KPML4I] = 0;      /* KVA */
 1953         for (i = 0; i < NDMPML4E; i++)  /* Direct Map */
 1954                 pmap->pm_pml4[DMPML4I + i] = 0;
 1955         pmap->pm_pml4[PML4PML4I] = 0;   /* Recursive Mapping */
 1956 
 1957         m->wire_count--;
 1958         atomic_subtract_int(&cnt.v_wire_count, 1);
 1959         vm_page_free_zero(m);
 1960         PMAP_LOCK_DESTROY(pmap);
 1961 }
 1962 
 1963 static int
 1964 kvm_size(SYSCTL_HANDLER_ARGS)
 1965 {
 1966         unsigned long ksize = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
 1967 
 1968         return sysctl_handle_long(oidp, &ksize, 0, req);
 1969 }
 1970 SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG|CTLFLAG_RD, 
 1971     0, 0, kvm_size, "LU", "Size of KVM");
 1972 
 1973 static int
 1974 kvm_free(SYSCTL_HANDLER_ARGS)
 1975 {
 1976         unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end;
 1977 
 1978         return sysctl_handle_long(oidp, &kfree, 0, req);
 1979 }
 1980 SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD, 
 1981     0, 0, kvm_free, "LU", "Amount of KVM free");
 1982 
 1983 /*
 1984  * grow the number of kernel page table entries, if needed
 1985  */
 1986 void
 1987 pmap_growkernel(vm_offset_t addr)
 1988 {
 1989         vm_paddr_t paddr;
 1990         vm_page_t nkpg;
 1991         pd_entry_t *pde, newpdir;
 1992         pdp_entry_t *pdpe;
 1993 
 1994         mtx_assert(&kernel_map->system_mtx, MA_OWNED);
 1995 
 1996         /*
 1997          * Return if "addr" is within the range of kernel page table pages
 1998          * that were preallocated during pmap bootstrap.  Moreover, leave
 1999          * "kernel_vm_end" and the kernel page table as they were.
 2000          *
 2001          * The correctness of this action is based on the following
 2002          * argument: vm_map_insert() allocates contiguous ranges of the
 2003          * kernel virtual address space.  It calls this function if a range
 2004          * ends after "kernel_vm_end".  If the kernel is mapped between
 2005          * "kernel_vm_end" and "addr", then the range cannot begin at
 2006          * "kernel_vm_end".  In fact, its beginning address cannot be less
 2007          * than the kernel.  Thus, there is no immediate need to allocate
 2008          * any new kernel page table pages between "kernel_vm_end" and
 2009          * "KERNBASE".
 2010          */
 2011         if (KERNBASE < addr && addr <= KERNBASE + NKPT * NBPDR)
 2012                 return;
 2013 
 2014         addr = roundup2(addr, NBPDR);
 2015         if (addr - 1 >= kernel_map->max_offset)
 2016                 addr = kernel_map->max_offset;
 2017         while (kernel_vm_end < addr) {
 2018                 pdpe = pmap_pdpe(kernel_pmap, kernel_vm_end);
 2019                 if ((*pdpe & PG_V) == 0) {
 2020                         /* We need a new PDP entry */
 2021                         nkpg = vm_page_alloc(NULL, kernel_vm_end >> PDPSHIFT,
 2022                             VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ |
 2023                             VM_ALLOC_WIRED | VM_ALLOC_ZERO);
 2024                         if (nkpg == NULL)
 2025                                 panic("pmap_growkernel: no memory to grow kernel");
 2026                         if ((nkpg->flags & PG_ZERO) == 0)
 2027                                 pmap_zero_page(nkpg);
 2028                         paddr = VM_PAGE_TO_PHYS(nkpg);
 2029                         *pdpe = (pdp_entry_t)
 2030                                 (paddr | PG_V | PG_RW | PG_A | PG_M);
 2031                         continue; /* try again */
 2032                 }
 2033                 pde = pmap_pdpe_to_pde(pdpe, kernel_vm_end);
 2034                 if ((*pde & PG_V) != 0) {
 2035                         kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK;
 2036                         if (kernel_vm_end - 1 >= kernel_map->max_offset) {
 2037                                 kernel_vm_end = kernel_map->max_offset;
 2038                                 break;                       
 2039                         }
 2040                         continue;
 2041                 }
 2042 
 2043                 nkpg = vm_page_alloc(NULL, pmap_pde_pindex(kernel_vm_end),
 2044                     VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
 2045                     VM_ALLOC_ZERO);
 2046                 if (nkpg == NULL)
 2047                         panic("pmap_growkernel: no memory to grow kernel");
 2048                 if ((nkpg->flags & PG_ZERO) == 0)
 2049                         pmap_zero_page(nkpg);
 2050                 paddr = VM_PAGE_TO_PHYS(nkpg);
 2051                 newpdir = (pd_entry_t) (paddr | PG_V | PG_RW | PG_A | PG_M);
 2052                 pde_store(pde, newpdir);
 2053 
 2054                 kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK;
 2055                 if (kernel_vm_end - 1 >= kernel_map->max_offset) {
 2056                         kernel_vm_end = kernel_map->max_offset;
 2057                         break;                       
 2058                 }
 2059         }
 2060 }
 2061 
 2062 
 2063 /***************************************************
 2064  * page management routines.
 2065  ***************************************************/
 2066 
 2067 CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE);
 2068 CTASSERT(_NPCM == 3);
 2069 CTASSERT(_NPCPV == 168);
 2070 
 2071 static __inline struct pv_chunk *
 2072 pv_to_chunk(pv_entry_t pv)
 2073 {
 2074 
 2075         return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK));
 2076 }
 2077 
 2078 #define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap)
 2079 
 2080 #define PC_FREE0        0xfffffffffffffffful
 2081 #define PC_FREE1        0xfffffffffffffffful
 2082 #define PC_FREE2        0x000000fffffffffful
 2083 
 2084 static const uint64_t pc_freemask[_NPCM] = { PC_FREE0, PC_FREE1, PC_FREE2 };
 2085 
 2086 #ifdef PV_STATS
 2087 static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail;
 2088 
 2089 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0,
 2090         "Current number of pv entry chunks");
 2091 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0,
 2092         "Current number of pv entry chunks allocated");
 2093 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0,
 2094         "Current number of pv entry chunks frees");
 2095 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0,
 2096         "Number of times tried to get a chunk page but failed.");
 2097 
 2098 static long pv_entry_frees, pv_entry_allocs, pv_entry_count;
 2099 static int pv_entry_spare;
 2100 
 2101 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0,
 2102         "Current number of pv entry frees");
 2103 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0,
 2104         "Current number of pv entry allocs");
 2105 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0,
 2106         "Current number of pv entries");
 2107 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0,
 2108         "Current number of spare pv entries");
 2109 #endif
 2110 
 2111 /*
 2112  * We are in a serious low memory condition.  Resort to
 2113  * drastic measures to free some pages so we can allocate
 2114  * another pv entry chunk.
 2115  *
 2116  * Returns NULL if PV entries were reclaimed from the specified pmap.
 2117  *
 2118  * We do not, however, unmap 2mpages because subsequent accesses will
 2119  * allocate per-page pv entries until repromotion occurs, thereby
 2120  * exacerbating the shortage of free pv entries.
 2121  */
 2122 static vm_page_t
 2123 reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp)
 2124 {
 2125         struct pch new_tail;
 2126         struct pv_chunk *pc;
 2127         struct md_page *pvh;
 2128         pd_entry_t *pde;
 2129         pmap_t pmap;
 2130         pt_entry_t *pte, tpte;
 2131         pv_entry_t pv;
 2132         vm_offset_t va;
 2133         vm_page_t free, m, m_pc;
 2134         uint64_t inuse;
 2135         int bit, field, freed;
 2136         
 2137         rw_assert(&pvh_global_lock, RA_LOCKED);
 2138         PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
 2139         KASSERT(lockp != NULL, ("reclaim_pv_chunk: lockp is NULL"));
 2140         pmap = NULL;
 2141         free = m_pc = NULL;
 2142         TAILQ_INIT(&new_tail);
 2143         mtx_lock(&pv_chunks_mutex);
 2144         while ((pc = TAILQ_FIRST(&pv_chunks)) != NULL && free == NULL) {
 2145                 TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
 2146                 mtx_unlock(&pv_chunks_mutex);
 2147                 if (pmap != pc->pc_pmap) {
 2148                         if (pmap != NULL) {
 2149                                 pmap_invalidate_all(pmap);
 2150                                 if (pmap != locked_pmap)
 2151                                         PMAP_UNLOCK(pmap);
 2152                         }
 2153                         pmap = pc->pc_pmap;
 2154                         /* Avoid deadlock and lock recursion. */
 2155                         if (pmap > locked_pmap) {
 2156                                 RELEASE_PV_LIST_LOCK(lockp);
 2157                                 PMAP_LOCK(pmap);
 2158                         } else if (pmap != locked_pmap &&
 2159                             !PMAP_TRYLOCK(pmap)) {
 2160                                 pmap = NULL;
 2161                                 TAILQ_INSERT_TAIL(&new_tail, pc, pc_lru);
 2162                                 mtx_lock(&pv_chunks_mutex);
 2163                                 continue;
 2164                         }
 2165                 }
 2166 
 2167                 /*
 2168                  * Destroy every non-wired, 4 KB page mapping in the chunk.
 2169                  */
 2170                 freed = 0;
 2171                 for (field = 0; field < _NPCM; field++) {
 2172                         for (inuse = ~pc->pc_map[field] & pc_freemask[field];
 2173                             inuse != 0; inuse &= ~(1UL << bit)) {
 2174                                 bit = bsfq(inuse);
 2175                                 pv = &pc->pc_pventry[field * 64 + bit];
 2176                                 va = pv->pv_va;
 2177                                 pde = pmap_pde(pmap, va);
 2178                                 if ((*pde & PG_PS) != 0)
 2179                                         continue;
 2180                                 pte = pmap_pde_to_pte(pde, va);
 2181                                 if ((*pte & PG_W) != 0)
 2182                                         continue;
 2183                                 tpte = pte_load_clear(pte);
 2184                                 if ((tpte & PG_G) != 0)
 2185                                         pmap_invalidate_page(pmap, va);
 2186                                 m = PHYS_TO_VM_PAGE(tpte & PG_FRAME);
 2187                                 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
 2188                                         vm_page_dirty(m);
 2189                                 if ((tpte & PG_A) != 0)
 2190                                         vm_page_aflag_set(m, PGA_REFERENCED);
 2191                                 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
 2192                                 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
 2193                                 if (TAILQ_EMPTY(&m->md.pv_list) &&
 2194                                     (m->flags & PG_FICTITIOUS) == 0) {
 2195                                         pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 2196                                         if (TAILQ_EMPTY(&pvh->pv_list)) {
 2197                                                 vm_page_aflag_clear(m,
 2198                                                     PGA_WRITEABLE);
 2199                                         }
 2200                                 }
 2201                                 pc->pc_map[field] |= 1UL << bit;
 2202                                 pmap_unuse_pt(pmap, va, *pde, &free);   
 2203                                 freed++;
 2204                         }
 2205                 }
 2206                 if (freed == 0) {
 2207                         TAILQ_INSERT_TAIL(&new_tail, pc, pc_lru);
 2208                         mtx_lock(&pv_chunks_mutex);
 2209                         continue;
 2210                 }
 2211                 /* Every freed mapping is for a 4 KB page. */
 2212                 pmap_resident_count_dec(pmap, freed);
 2213                 PV_STAT(atomic_add_long(&pv_entry_frees, freed));
 2214                 PV_STAT(atomic_add_int(&pv_entry_spare, freed));
 2215                 PV_STAT(atomic_subtract_long(&pv_entry_count, freed));
 2216                 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
 2217                 if (pc->pc_map[0] == PC_FREE0 && pc->pc_map[1] == PC_FREE1 &&
 2218                     pc->pc_map[2] == PC_FREE2) {
 2219                         PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV));
 2220                         PV_STAT(atomic_subtract_int(&pc_chunk_count, 1));
 2221                         PV_STAT(atomic_add_int(&pc_chunk_frees, 1));
 2222                         /* Entire chunk is free; return it. */
 2223                         m_pc = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
 2224                         dump_drop_page(m_pc->phys_addr);
 2225                         mtx_lock(&pv_chunks_mutex);
 2226                         break;
 2227                 }
 2228                 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
 2229                 TAILQ_INSERT_TAIL(&new_tail, pc, pc_lru);
 2230                 mtx_lock(&pv_chunks_mutex);
 2231                 /* One freed pv entry in locked_pmap is sufficient. */
 2232                 if (pmap == locked_pmap)
 2233                         break;
 2234         }
 2235         TAILQ_CONCAT(&pv_chunks, &new_tail, pc_lru);
 2236         mtx_unlock(&pv_chunks_mutex);
 2237         if (pmap != NULL) {
 2238                 pmap_invalidate_all(pmap);
 2239                 if (pmap != locked_pmap)
 2240                         PMAP_UNLOCK(pmap);
 2241         }
 2242         if (m_pc == NULL && free != NULL) {
 2243                 m_pc = free;
 2244                 free = m_pc->right;
 2245                 /* Recycle a freed page table page. */
 2246                 m_pc->wire_count = 1;
 2247                 atomic_add_int(&cnt.v_wire_count, 1);
 2248         }
 2249         pmap_free_zero_pages(free);
 2250         return (m_pc);
 2251 }
 2252 
 2253 /*
 2254  * free the pv_entry back to the free list
 2255  */
 2256 static void
 2257 free_pv_entry(pmap_t pmap, pv_entry_t pv)
 2258 {
 2259         struct pv_chunk *pc;
 2260         int idx, field, bit;
 2261 
 2262         rw_assert(&pvh_global_lock, RA_LOCKED);
 2263         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2264         PV_STAT(atomic_add_long(&pv_entry_frees, 1));
 2265         PV_STAT(atomic_add_int(&pv_entry_spare, 1));
 2266         PV_STAT(atomic_subtract_long(&pv_entry_count, 1));
 2267         pc = pv_to_chunk(pv);
 2268         idx = pv - &pc->pc_pventry[0];
 2269         field = idx / 64;
 2270         bit = idx % 64;
 2271         pc->pc_map[field] |= 1ul << bit;
 2272         if (pc->pc_map[0] != PC_FREE0 || pc->pc_map[1] != PC_FREE1 ||
 2273             pc->pc_map[2] != PC_FREE2) {
 2274                 /* 98% of the time, pc is already at the head of the list. */
 2275                 if (__predict_false(pc != TAILQ_FIRST(&pmap->pm_pvchunk))) {
 2276                         TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
 2277                         TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
 2278                 }
 2279                 return;
 2280         }
 2281         TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
 2282         free_pv_chunk(pc);
 2283 }
 2284 
 2285 static void
 2286 free_pv_chunk(struct pv_chunk *pc)
 2287 {
 2288         vm_page_t m;
 2289 
 2290         mtx_lock(&pv_chunks_mutex);
 2291         TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
 2292         mtx_unlock(&pv_chunks_mutex);
 2293         PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV));
 2294         PV_STAT(atomic_subtract_int(&pc_chunk_count, 1));
 2295         PV_STAT(atomic_add_int(&pc_chunk_frees, 1));
 2296         /* entire chunk is free, return it */
 2297         m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
 2298         dump_drop_page(m->phys_addr);
 2299         vm_page_unwire(m, 0);
 2300         vm_page_free(m);
 2301 }
 2302 
 2303 /*
 2304  * Returns a new PV entry, allocating a new PV chunk from the system when
 2305  * needed.  If this PV chunk allocation fails and a PV list lock pointer was
 2306  * given, a PV chunk is reclaimed from an arbitrary pmap.  Otherwise, NULL is
 2307  * returned.
 2308  *
 2309  * The given PV list lock may be released.
 2310  */
 2311 static pv_entry_t
 2312 get_pv_entry(pmap_t pmap, struct rwlock **lockp)
 2313 {
 2314         int bit, field;
 2315         pv_entry_t pv;
 2316         struct pv_chunk *pc;
 2317         vm_page_t m;
 2318 
 2319         rw_assert(&pvh_global_lock, RA_LOCKED);
 2320         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2321         PV_STAT(atomic_add_long(&pv_entry_allocs, 1));
 2322 retry:
 2323         pc = TAILQ_FIRST(&pmap->pm_pvchunk);
 2324         if (pc != NULL) {
 2325                 for (field = 0; field < _NPCM; field++) {
 2326                         if (pc->pc_map[field]) {
 2327                                 bit = bsfq(pc->pc_map[field]);
 2328                                 break;
 2329                         }
 2330                 }
 2331                 if (field < _NPCM) {
 2332                         pv = &pc->pc_pventry[field * 64 + bit];
 2333                         pc->pc_map[field] &= ~(1ul << bit);
 2334                         /* If this was the last item, move it to tail */
 2335                         if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0 &&
 2336                             pc->pc_map[2] == 0) {
 2337                                 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
 2338                                 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc,
 2339                                     pc_list);
 2340                         }
 2341                         PV_STAT(atomic_add_long(&pv_entry_count, 1));
 2342                         PV_STAT(atomic_subtract_int(&pv_entry_spare, 1));
 2343                         return (pv);
 2344                 }
 2345         }
 2346         /* No free items, allocate another chunk */
 2347         m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
 2348             VM_ALLOC_WIRED);
 2349         if (m == NULL) {
 2350                 if (lockp == NULL) {
 2351                         PV_STAT(pc_chunk_tryfail++);
 2352                         return (NULL);
 2353                 }
 2354                 m = reclaim_pv_chunk(pmap, lockp);
 2355                 if (m == NULL)
 2356                         goto retry;
 2357         }
 2358         PV_STAT(atomic_add_int(&pc_chunk_count, 1));
 2359         PV_STAT(atomic_add_int(&pc_chunk_allocs, 1));
 2360         dump_add_page(m->phys_addr);
 2361         pc = (void *)PHYS_TO_DMAP(m->phys_addr);
 2362         pc->pc_pmap = pmap;
 2363         pc->pc_map[0] = PC_FREE0 & ~1ul;        /* preallocated bit 0 */
 2364         pc->pc_map[1] = PC_FREE1;
 2365         pc->pc_map[2] = PC_FREE2;
 2366         mtx_lock(&pv_chunks_mutex);
 2367         TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
 2368         mtx_unlock(&pv_chunks_mutex);
 2369         pv = &pc->pc_pventry[0];
 2370         TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
 2371         PV_STAT(atomic_add_long(&pv_entry_count, 1));
 2372         PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV - 1));
 2373         return (pv);
 2374 }
 2375 
 2376 /*
 2377  * Returns the number of one bits within the given PV chunk map element.
 2378  */
 2379 static int
 2380 popcnt_pc_map_elem(uint64_t elem)
 2381 {
 2382         int count;
 2383 
 2384         /*
 2385          * This simple method of counting the one bits performs well because
 2386          * the given element typically contains more zero bits than one bits.
 2387          */
 2388         count = 0;
 2389         for (; elem != 0; elem &= elem - 1)
 2390                 count++;
 2391         return (count);
 2392 }
 2393 
 2394 /*
 2395  * Ensure that the number of spare PV entries in the specified pmap meets or
 2396  * exceeds the given count, "needed".
 2397  *
 2398  * The given PV list lock may be released.
 2399  */
 2400 static void
 2401 reserve_pv_entries(pmap_t pmap, int needed, struct rwlock **lockp)
 2402 {
 2403         struct pch new_tail;
 2404         struct pv_chunk *pc;
 2405         int avail, free;
 2406         vm_page_t m;
 2407 
 2408         rw_assert(&pvh_global_lock, RA_LOCKED);
 2409         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2410         KASSERT(lockp != NULL, ("reserve_pv_entries: lockp is NULL"));
 2411 
 2412         /*
 2413          * Newly allocated PV chunks must be stored in a private list until
 2414          * the required number of PV chunks have been allocated.  Otherwise,
 2415          * reclaim_pv_chunk() could recycle one of these chunks.  In
 2416          * contrast, these chunks must be added to the pmap upon allocation.
 2417          */
 2418         TAILQ_INIT(&new_tail);
 2419 retry:
 2420         avail = 0;
 2421         TAILQ_FOREACH(pc, &pmap->pm_pvchunk, pc_list) {
 2422                 if ((cpu_feature2 & CPUID2_POPCNT) == 0) {
 2423                         free = popcnt_pc_map_elem(pc->pc_map[0]);
 2424                         free += popcnt_pc_map_elem(pc->pc_map[1]);
 2425                         free += popcnt_pc_map_elem(pc->pc_map[2]);
 2426                 } else {
 2427                         free = popcntq(pc->pc_map[0]);
 2428                         free += popcntq(pc->pc_map[1]);
 2429                         free += popcntq(pc->pc_map[2]);
 2430                 }
 2431                 if (free == 0)
 2432                         break;
 2433                 avail += free;
 2434                 if (avail >= needed)
 2435                         break;
 2436         }
 2437         for (; avail < needed; avail += _NPCPV) {
 2438                 m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
 2439                     VM_ALLOC_WIRED);
 2440                 if (m == NULL) {
 2441                         m = reclaim_pv_chunk(pmap, lockp);
 2442                         if (m == NULL)
 2443                                 goto retry;
 2444                 }
 2445                 PV_STAT(atomic_add_int(&pc_chunk_count, 1));
 2446                 PV_STAT(atomic_add_int(&pc_chunk_allocs, 1));
 2447                 dump_add_page(m->phys_addr);
 2448                 pc = (void *)PHYS_TO_DMAP(m->phys_addr);
 2449                 pc->pc_pmap = pmap;
 2450                 pc->pc_map[0] = PC_FREE0;
 2451                 pc->pc_map[1] = PC_FREE1;
 2452                 pc->pc_map[2] = PC_FREE2;
 2453                 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
 2454                 TAILQ_INSERT_TAIL(&new_tail, pc, pc_lru);
 2455                 PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV));
 2456         }
 2457         if (!TAILQ_EMPTY(&new_tail)) {
 2458                 mtx_lock(&pv_chunks_mutex);
 2459                 TAILQ_CONCAT(&pv_chunks, &new_tail, pc_lru);
 2460                 mtx_unlock(&pv_chunks_mutex);
 2461         }
 2462 }
 2463 
 2464 /*
 2465  * First find and then remove the pv entry for the specified pmap and virtual
 2466  * address from the specified pv list.  Returns the pv entry if found and NULL
 2467  * otherwise.  This operation can be performed on pv lists for either 4KB or
 2468  * 2MB page mappings.
 2469  */
 2470 static __inline pv_entry_t
 2471 pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
 2472 {
 2473         pv_entry_t pv;
 2474 
 2475         rw_assert(&pvh_global_lock, RA_LOCKED);
 2476         TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
 2477                 if (pmap == PV_PMAP(pv) && va == pv->pv_va) {
 2478                         TAILQ_REMOVE(&pvh->pv_list, pv, pv_list);
 2479                         break;
 2480                 }
 2481         }
 2482         return (pv);
 2483 }
 2484 
 2485 /*
 2486  * After demotion from a 2MB page mapping to 512 4KB page mappings,
 2487  * destroy the pv entry for the 2MB page mapping and reinstantiate the pv
 2488  * entries for each of the 4KB page mappings.
 2489  */
 2490 static void
 2491 pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
 2492     struct rwlock **lockp)
 2493 {
 2494         struct md_page *pvh;
 2495         struct pv_chunk *pc;
 2496         pv_entry_t pv;
 2497         vm_offset_t va_last;
 2498         vm_page_t m;
 2499         int bit, field;
 2500 
 2501         rw_assert(&pvh_global_lock, RA_LOCKED);
 2502         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2503         KASSERT((pa & PDRMASK) == 0,
 2504             ("pmap_pv_demote_pde: pa is not 2mpage aligned"));
 2505         CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
 2506 
 2507         /*
 2508          * Transfer the 2mpage's pv entry for this mapping to the first
 2509          * page's pv list.  Once this transfer begins, the pv list lock
 2510          * must not be released until the last pv entry is reinstantiated.
 2511          */
 2512         pvh = pa_to_pvh(pa);
 2513         va = trunc_2mpage(va);
 2514         pv = pmap_pvh_remove(pvh, pmap, va);
 2515         KASSERT(pv != NULL, ("pmap_pv_demote_pde: pv not found"));
 2516         m = PHYS_TO_VM_PAGE(pa);
 2517         TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
 2518         /* Instantiate the remaining NPTEPG - 1 pv entries. */
 2519         PV_STAT(atomic_add_long(&pv_entry_allocs, NPTEPG - 1));
 2520         va_last = va + NBPDR - PAGE_SIZE;
 2521         for (;;) {
 2522                 pc = TAILQ_FIRST(&pmap->pm_pvchunk);
 2523                 KASSERT(pc->pc_map[0] != 0 || pc->pc_map[1] != 0 ||
 2524                     pc->pc_map[2] != 0, ("pmap_pv_demote_pde: missing spare"));
 2525                 for (field = 0; field < _NPCM; field++) {
 2526                         while (pc->pc_map[field]) {
 2527                                 bit = bsfq(pc->pc_map[field]);
 2528                                 pc->pc_map[field] &= ~(1ul << bit);
 2529                                 pv = &pc->pc_pventry[field * 64 + bit];
 2530                                 va += PAGE_SIZE;
 2531                                 pv->pv_va = va;
 2532                                 m++;
 2533                                 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 2534                             ("pmap_pv_demote_pde: page %p is not managed", m));
 2535                                 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
 2536                                 if (va == va_last)
 2537                                         goto out;
 2538                         }
 2539                 }
 2540                 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
 2541                 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
 2542         }
 2543 out:
 2544         if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0 && pc->pc_map[2] == 0) {
 2545                 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
 2546                 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
 2547         }
 2548         PV_STAT(atomic_add_long(&pv_entry_count, NPTEPG - 1));
 2549         PV_STAT(atomic_subtract_int(&pv_entry_spare, NPTEPG - 1));
 2550 }
 2551 
 2552 /*
 2553  * After promotion from 512 4KB page mappings to a single 2MB page mapping,
 2554  * replace the many pv entries for the 4KB page mappings by a single pv entry
 2555  * for the 2MB page mapping.
 2556  */
 2557 static void
 2558 pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
 2559     struct rwlock **lockp)
 2560 {
 2561         struct md_page *pvh;
 2562         pv_entry_t pv;
 2563         vm_offset_t va_last;
 2564         vm_page_t m;
 2565 
 2566         rw_assert(&pvh_global_lock, RA_LOCKED);
 2567         KASSERT((pa & PDRMASK) == 0,
 2568             ("pmap_pv_promote_pde: pa is not 2mpage aligned"));
 2569         CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
 2570 
 2571         /*
 2572          * Transfer the first page's pv entry for this mapping to the 2mpage's
 2573          * pv list.  Aside from avoiding the cost of a call to get_pv_entry(),
 2574          * a transfer avoids the possibility that get_pv_entry() calls
 2575          * reclaim_pv_chunk() and that reclaim_pv_chunk() removes one of the
 2576          * mappings that is being promoted.
 2577          */
 2578         m = PHYS_TO_VM_PAGE(pa);
 2579         va = trunc_2mpage(va);
 2580         pv = pmap_pvh_remove(&m->md, pmap, va);
 2581         KASSERT(pv != NULL, ("pmap_pv_promote_pde: pv not found"));
 2582         pvh = pa_to_pvh(pa);
 2583         TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_list);
 2584         /* Free the remaining NPTEPG - 1 pv entries. */
 2585         va_last = va + NBPDR - PAGE_SIZE;
 2586         do {
 2587                 m++;
 2588                 va += PAGE_SIZE;
 2589                 pmap_pvh_free(&m->md, pmap, va);
 2590         } while (va < va_last);
 2591 }
 2592 
 2593 /*
 2594  * First find and then destroy the pv entry for the specified pmap and virtual
 2595  * address.  This operation can be performed on pv lists for either 4KB or 2MB
 2596  * page mappings.
 2597  */
 2598 static void
 2599 pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
 2600 {
 2601         pv_entry_t pv;
 2602 
 2603         pv = pmap_pvh_remove(pvh, pmap, va);
 2604         KASSERT(pv != NULL, ("pmap_pvh_free: pv not found"));
 2605         free_pv_entry(pmap, pv);
 2606 }
 2607 
 2608 /*
 2609  * Conditionally create the PV entry for a 4KB page mapping if the required
 2610  * memory can be allocated without resorting to reclamation.
 2611  */
 2612 static boolean_t
 2613 pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m,
 2614     struct rwlock **lockp)
 2615 {
 2616         pv_entry_t pv;
 2617 
 2618         rw_assert(&pvh_global_lock, RA_LOCKED);
 2619         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2620         /* Pass NULL instead of the lock pointer to disable reclamation. */
 2621         if ((pv = get_pv_entry(pmap, NULL)) != NULL) {
 2622                 pv->pv_va = va;
 2623                 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
 2624                 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
 2625                 return (TRUE);
 2626         } else
 2627                 return (FALSE);
 2628 }
 2629 
 2630 /*
 2631  * Conditionally create the PV entry for a 2MB page mapping if the required
 2632  * memory can be allocated without resorting to reclamation.
 2633  */
 2634 static boolean_t
 2635 pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
 2636     struct rwlock **lockp)
 2637 {
 2638         struct md_page *pvh;
 2639         pv_entry_t pv;
 2640 
 2641         rw_assert(&pvh_global_lock, RA_LOCKED);
 2642         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2643         /* Pass NULL instead of the lock pointer to disable reclamation. */
 2644         if ((pv = get_pv_entry(pmap, NULL)) != NULL) {
 2645                 pv->pv_va = va;
 2646                 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
 2647                 pvh = pa_to_pvh(pa);
 2648                 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_list);
 2649                 return (TRUE);
 2650         } else
 2651                 return (FALSE);
 2652 }
 2653 
 2654 /*
 2655  * Fills a page table page with mappings to consecutive physical pages.
 2656  */
 2657 static void
 2658 pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte)
 2659 {
 2660         pt_entry_t *pte;
 2661 
 2662         for (pte = firstpte; pte < firstpte + NPTEPG; pte++) {
 2663                 *pte = newpte;
 2664                 newpte += PAGE_SIZE;
 2665         }
 2666 }
 2667 
 2668 /*
 2669  * Tries to demote a 2MB page mapping.  If demotion fails, the 2MB page
 2670  * mapping is invalidated.
 2671  */
 2672 static boolean_t
 2673 pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
 2674 {
 2675         struct rwlock *lock;
 2676         boolean_t rv;
 2677 
 2678         lock = NULL;
 2679         rv = pmap_demote_pde_locked(pmap, pde, va, &lock);
 2680         if (lock != NULL)
 2681                 rw_wunlock(lock);
 2682         return (rv);
 2683 }
 2684 
 2685 static boolean_t
 2686 pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
 2687     struct rwlock **lockp)
 2688 {
 2689         pd_entry_t newpde, oldpde;
 2690         pt_entry_t *firstpte, newpte;
 2691         vm_paddr_t mptepa;
 2692         vm_page_t free, mpte;
 2693 
 2694         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2695         oldpde = *pde;
 2696         KASSERT((oldpde & (PG_PS | PG_V)) == (PG_PS | PG_V),
 2697             ("pmap_demote_pde: oldpde is missing PG_PS and/or PG_V"));
 2698         mpte = pmap_lookup_pt_page(pmap, va);
 2699         if (mpte != NULL)
 2700                 pmap_remove_pt_page(pmap, mpte);
 2701         else {
 2702                 KASSERT((oldpde & PG_W) == 0,
 2703                     ("pmap_demote_pde: page table page for a wired mapping"
 2704                     " is missing"));
 2705 
 2706                 /*
 2707                  * Invalidate the 2MB page mapping and return "failure" if the
 2708                  * mapping was never accessed or the allocation of the new
 2709                  * page table page fails.  If the 2MB page mapping belongs to
 2710                  * the direct map region of the kernel's address space, then
 2711                  * the page allocation request specifies the highest possible
 2712                  * priority (VM_ALLOC_INTERRUPT).  Otherwise, the priority is
 2713                  * normal.  Page table pages are preallocated for every other
 2714                  * part of the kernel address space, so the direct map region
 2715                  * is the only part of the kernel address space that must be
 2716                  * handled here.
 2717                  */
 2718                 if ((oldpde & PG_A) == 0 || (mpte = vm_page_alloc(NULL,
 2719                     pmap_pde_pindex(va), (va >= DMAP_MIN_ADDRESS && va <
 2720                     DMAP_MAX_ADDRESS ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL) |
 2721                     VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
 2722                         free = NULL;
 2723                         pmap_remove_pde(pmap, pde, trunc_2mpage(va), &free,
 2724                             lockp);
 2725                         pmap_invalidate_page(pmap, trunc_2mpage(va));
 2726                         pmap_free_zero_pages(free);
 2727                         CTR2(KTR_PMAP, "pmap_demote_pde: failure for va %#lx"
 2728                             " in pmap %p", va, pmap);
 2729                         return (FALSE);
 2730                 }
 2731                 if (va < VM_MAXUSER_ADDRESS)
 2732                         pmap_resident_count_inc(pmap, 1);
 2733         }
 2734         mptepa = VM_PAGE_TO_PHYS(mpte);
 2735         firstpte = (pt_entry_t *)PHYS_TO_DMAP(mptepa);
 2736         newpde = mptepa | PG_M | PG_A | (oldpde & PG_U) | PG_RW | PG_V;
 2737         KASSERT((oldpde & PG_A) != 0,
 2738             ("pmap_demote_pde: oldpde is missing PG_A"));
 2739         KASSERT((oldpde & (PG_M | PG_RW)) != PG_RW,
 2740             ("pmap_demote_pde: oldpde is missing PG_M"));
 2741         newpte = oldpde & ~PG_PS;
 2742         if ((newpte & PG_PDE_PAT) != 0)
 2743                 newpte ^= PG_PDE_PAT | PG_PTE_PAT;
 2744 
 2745         /*
 2746          * If the page table page is new, initialize it.
 2747          */
 2748         if (mpte->wire_count == 1) {
 2749                 mpte->wire_count = NPTEPG;
 2750                 pmap_fill_ptp(firstpte, newpte);
 2751         }
 2752         KASSERT((*firstpte & PG_FRAME) == (newpte & PG_FRAME),
 2753             ("pmap_demote_pde: firstpte and newpte map different physical"
 2754             " addresses"));
 2755 
 2756         /*
 2757          * If the mapping has changed attributes, update the page table
 2758          * entries.
 2759          */
 2760         if ((*firstpte & PG_PTE_PROMOTE) != (newpte & PG_PTE_PROMOTE))
 2761                 pmap_fill_ptp(firstpte, newpte);
 2762 
 2763         /*
 2764          * The spare PV entries must be reserved prior to demoting the
 2765          * mapping, that is, prior to changing the PDE.  Otherwise, the state
 2766          * of the PDE and the PV lists will be inconsistent, which can result
 2767          * in reclaim_pv_chunk() attempting to remove a PV entry from the
 2768          * wrong PV list and pmap_pv_demote_pde() failing to find the expected
 2769          * PV entry for the 2MB page mapping that is being demoted.
 2770          */
 2771         if ((oldpde & PG_MANAGED) != 0)
 2772                 reserve_pv_entries(pmap, NPTEPG - 1, lockp);
 2773 
 2774         /*
 2775          * Demote the mapping.  This pmap is locked.  The old PDE has
 2776          * PG_A set.  If the old PDE has PG_RW set, it also has PG_M
 2777          * set.  Thus, there is no danger of a race with another
 2778          * processor changing the setting of PG_A and/or PG_M between
 2779          * the read above and the store below. 
 2780          */
 2781         if (workaround_erratum383)
 2782                 pmap_update_pde(pmap, va, pde, newpde);
 2783         else
 2784                 pde_store(pde, newpde);
 2785 
 2786         /*
 2787          * Invalidate a stale recursive mapping of the page table page.
 2788          */
 2789         if (va >= VM_MAXUSER_ADDRESS)
 2790                 pmap_invalidate_page(pmap, (vm_offset_t)vtopte(va));
 2791 
 2792         /*
 2793          * Demote the PV entry.
 2794          */
 2795         if ((oldpde & PG_MANAGED) != 0)
 2796                 pmap_pv_demote_pde(pmap, va, oldpde & PG_PS_FRAME, lockp);
 2797 
 2798         atomic_add_long(&pmap_pde_demotions, 1);
 2799         CTR2(KTR_PMAP, "pmap_demote_pde: success for va %#lx"
 2800             " in pmap %p", va, pmap);
 2801         return (TRUE);
 2802 }
 2803 
 2804 /*
 2805  * pmap_remove_pde: do the things to unmap a superpage in a process
 2806  */
 2807 static int
 2808 pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
 2809     vm_page_t *free, struct rwlock **lockp)
 2810 {
 2811         struct md_page *pvh;
 2812         pd_entry_t oldpde;
 2813         vm_offset_t eva, va;
 2814         vm_page_t m, mpte;
 2815 
 2816         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2817         KASSERT((sva & PDRMASK) == 0,
 2818             ("pmap_remove_pde: sva is not 2mpage aligned"));
 2819         oldpde = pte_load_clear(pdq);
 2820         if (oldpde & PG_W)
 2821                 pmap->pm_stats.wired_count -= NBPDR / PAGE_SIZE;
 2822 
 2823         /*
 2824          * Machines that don't support invlpg, also don't support
 2825          * PG_G.
 2826          */
 2827         if (oldpde & PG_G)
 2828                 pmap_invalidate_page(kernel_pmap, sva);
 2829         pmap_resident_count_dec(pmap, NBPDR / PAGE_SIZE);
 2830         if (oldpde & PG_MANAGED) {
 2831                 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, oldpde & PG_PS_FRAME);
 2832                 pvh = pa_to_pvh(oldpde & PG_PS_FRAME);
 2833                 pmap_pvh_free(pvh, pmap, sva);
 2834                 eva = sva + NBPDR;
 2835                 for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
 2836                     va < eva; va += PAGE_SIZE, m++) {
 2837                         if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW))
 2838                                 vm_page_dirty(m);
 2839                         if (oldpde & PG_A)
 2840                                 vm_page_aflag_set(m, PGA_REFERENCED);
 2841                         if (TAILQ_EMPTY(&m->md.pv_list) &&
 2842                             TAILQ_EMPTY(&pvh->pv_list))
 2843                                 vm_page_aflag_clear(m, PGA_WRITEABLE);
 2844                 }
 2845         }
 2846         if (pmap == kernel_pmap) {
 2847                 if (!pmap_demote_pde_locked(pmap, pdq, sva, lockp))
 2848                         panic("pmap_remove_pde: failed demotion");
 2849         } else {
 2850                 mpte = pmap_lookup_pt_page(pmap, sva);
 2851                 if (mpte != NULL) {
 2852                         pmap_remove_pt_page(pmap, mpte);
 2853                         pmap_resident_count_dec(pmap, 1);
 2854                         KASSERT(mpte->wire_count == NPTEPG,
 2855                             ("pmap_remove_pde: pte page wire count error"));
 2856                         mpte->wire_count = 0;
 2857                         pmap_add_delayed_free_list(mpte, free, FALSE);
 2858                         atomic_subtract_int(&cnt.v_wire_count, 1);
 2859                 }
 2860         }
 2861         return (pmap_unuse_pt(pmap, sva, *pmap_pdpe(pmap, sva), free));
 2862 }
 2863 
 2864 /*
 2865  * pmap_remove_pte: do the things to unmap a page in a process
 2866  */
 2867 static int
 2868 pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va, 
 2869     pd_entry_t ptepde, vm_page_t *free, struct rwlock **lockp)
 2870 {
 2871         struct md_page *pvh;
 2872         pt_entry_t oldpte;
 2873         vm_page_t m;
 2874 
 2875         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2876         oldpte = pte_load_clear(ptq);
 2877         if (oldpte & PG_W)
 2878                 pmap->pm_stats.wired_count -= 1;
 2879         pmap_resident_count_dec(pmap, 1);
 2880         if (oldpte & PG_MANAGED) {
 2881                 m = PHYS_TO_VM_PAGE(oldpte & PG_FRAME);
 2882                 if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
 2883                         vm_page_dirty(m);
 2884                 if (oldpte & PG_A)
 2885                         vm_page_aflag_set(m, PGA_REFERENCED);
 2886                 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
 2887                 pmap_pvh_free(&m->md, pmap, va);
 2888                 if (TAILQ_EMPTY(&m->md.pv_list) &&
 2889                     (m->flags & PG_FICTITIOUS) == 0) {
 2890                         pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 2891                         if (TAILQ_EMPTY(&pvh->pv_list))
 2892                                 vm_page_aflag_clear(m, PGA_WRITEABLE);
 2893                 }
 2894         }
 2895         return (pmap_unuse_pt(pmap, va, ptepde, free));
 2896 }
 2897 
 2898 /*
 2899  * Remove a single page from a process address space
 2900  */
 2901 static void
 2902 pmap_remove_page(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, vm_page_t *free)
 2903 {
 2904         struct rwlock *lock;
 2905         pt_entry_t *pte;
 2906 
 2907         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2908         if ((*pde & PG_V) == 0)
 2909                 return;
 2910         pte = pmap_pde_to_pte(pde, va);
 2911         if ((*pte & PG_V) == 0)
 2912                 return;
 2913         lock = NULL;
 2914         pmap_remove_pte(pmap, pte, va, *pde, free, &lock);
 2915         if (lock != NULL)
 2916                 rw_wunlock(lock);
 2917         pmap_invalidate_page(pmap, va);
 2918 }
 2919 
 2920 /*
 2921  *      Remove the given range of addresses from the specified map.
 2922  *
 2923  *      It is assumed that the start and end are properly
 2924  *      rounded to the page size.
 2925  */
 2926 void
 2927 pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
 2928 {
 2929         struct rwlock *lock;
 2930         vm_offset_t va, va_next;
 2931         pml4_entry_t *pml4e;
 2932         pdp_entry_t *pdpe;
 2933         pd_entry_t ptpaddr, *pde;
 2934         pt_entry_t *pte;
 2935         vm_page_t free = NULL;
 2936         int anyvalid;
 2937 
 2938         /*
 2939          * Perform an unsynchronized read.  This is, however, safe.
 2940          */
 2941         if (pmap->pm_stats.resident_count == 0)
 2942                 return;
 2943 
 2944         anyvalid = 0;
 2945 
 2946         rw_rlock(&pvh_global_lock);
 2947         PMAP_LOCK(pmap);
 2948 
 2949         /*
 2950          * special handling of removing one page.  a very
 2951          * common operation and easy to short circuit some
 2952          * code.
 2953          */
 2954         if (sva + PAGE_SIZE == eva) {
 2955                 pde = pmap_pde(pmap, sva);
 2956                 if (pde && (*pde & PG_PS) == 0) {
 2957                         pmap_remove_page(pmap, sva, pde, &free);
 2958                         goto out;
 2959                 }
 2960         }
 2961 
 2962         lock = NULL;
 2963         for (; sva < eva; sva = va_next) {
 2964 
 2965                 if (pmap->pm_stats.resident_count == 0)
 2966                         break;
 2967 
 2968                 pml4e = pmap_pml4e(pmap, sva);
 2969                 if ((*pml4e & PG_V) == 0) {
 2970                         va_next = (sva + NBPML4) & ~PML4MASK;
 2971                         if (va_next < sva)
 2972                                 va_next = eva;
 2973                         continue;
 2974                 }
 2975 
 2976                 pdpe = pmap_pml4e_to_pdpe(pml4e, sva);
 2977                 if ((*pdpe & PG_V) == 0) {
 2978                         va_next = (sva + NBPDP) & ~PDPMASK;
 2979                         if (va_next < sva)
 2980                                 va_next = eva;
 2981                         continue;
 2982                 }
 2983 
 2984                 /*
 2985                  * Calculate index for next page table.
 2986                  */
 2987                 va_next = (sva + NBPDR) & ~PDRMASK;
 2988                 if (va_next < sva)
 2989                         va_next = eva;
 2990 
 2991                 pde = pmap_pdpe_to_pde(pdpe, sva);
 2992                 ptpaddr = *pde;
 2993 
 2994                 /*
 2995                  * Weed out invalid mappings.
 2996                  */
 2997                 if (ptpaddr == 0)
 2998                         continue;
 2999 
 3000                 /*
 3001                  * Check for large page.
 3002                  */
 3003                 if ((ptpaddr & PG_PS) != 0) {
 3004                         /*
 3005                          * Are we removing the entire large page?  If not,
 3006                          * demote the mapping and fall through.
 3007                          */
 3008                         if (sva + NBPDR == va_next && eva >= va_next) {
 3009                                 /*
 3010                                  * The TLB entry for a PG_G mapping is
 3011                                  * invalidated by pmap_remove_pde().
 3012                                  */
 3013                                 if ((ptpaddr & PG_G) == 0)
 3014                                         anyvalid = 1;
 3015                                 pmap_remove_pde(pmap, pde, sva, &free, &lock);
 3016                                 continue;
 3017                         } else if (!pmap_demote_pde_locked(pmap, pde, sva,
 3018                             &lock)) {
 3019                                 /* The large page mapping was destroyed. */
 3020                                 continue;
 3021                         } else
 3022                                 ptpaddr = *pde;
 3023                 }
 3024 
 3025                 /*
 3026                  * Limit our scan to either the end of the va represented
 3027                  * by the current page table page, or to the end of the
 3028                  * range being removed.
 3029                  */
 3030                 if (va_next > eva)
 3031                         va_next = eva;
 3032 
 3033                 va = va_next;
 3034                 for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
 3035                     sva += PAGE_SIZE) {
 3036                         if (*pte == 0) {
 3037                                 if (va != va_next) {
 3038                                         pmap_invalidate_range(pmap, va, sva);
 3039                                         va = va_next;
 3040                                 }
 3041                                 continue;
 3042                         }
 3043                         if ((*pte & PG_G) == 0)
 3044                                 anyvalid = 1;
 3045                         else if (va == va_next)
 3046                                 va = sva;
 3047                         if (pmap_remove_pte(pmap, pte, sva, ptpaddr, &free,
 3048                             &lock)) {
 3049                                 sva += PAGE_SIZE;
 3050                                 break;
 3051                         }
 3052                 }
 3053                 if (va != va_next)
 3054                         pmap_invalidate_range(pmap, va, sva);
 3055         }
 3056         if (lock != NULL)
 3057                 rw_wunlock(lock);
 3058 out:
 3059         if (anyvalid)
 3060                 pmap_invalidate_all(pmap);
 3061         rw_runlock(&pvh_global_lock);   
 3062         PMAP_UNLOCK(pmap);
 3063         pmap_free_zero_pages(free);
 3064 }
 3065 
 3066 /*
 3067  *      Routine:        pmap_remove_all
 3068  *      Function:
 3069  *              Removes this physical page from
 3070  *              all physical maps in which it resides.
 3071  *              Reflects back modify bits to the pager.
 3072  *
 3073  *      Notes:
 3074  *              Original versions of this routine were very
 3075  *              inefficient because they iteratively called
 3076  *              pmap_remove (slow...)
 3077  */
 3078 
 3079 void
 3080 pmap_remove_all(vm_page_t m)
 3081 {
 3082         struct md_page *pvh;
 3083         pv_entry_t pv;
 3084         pmap_t pmap;
 3085         pt_entry_t *pte, tpte;
 3086         pd_entry_t *pde;
 3087         vm_offset_t va;
 3088         vm_page_t free;
 3089 
 3090         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 3091             ("pmap_remove_all: page %p is not managed", m));
 3092         free = NULL;
 3093         rw_wlock(&pvh_global_lock);
 3094         if ((m->flags & PG_FICTITIOUS) != 0)
 3095                 goto small_mappings;
 3096         pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 3097         while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) {
 3098                 pmap = PV_PMAP(pv);
 3099                 PMAP_LOCK(pmap);
 3100                 va = pv->pv_va;
 3101                 pde = pmap_pde(pmap, va);
 3102                 (void)pmap_demote_pde(pmap, pde, va);
 3103                 PMAP_UNLOCK(pmap);
 3104         }
 3105 small_mappings:
 3106         while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
 3107                 pmap = PV_PMAP(pv);
 3108                 PMAP_LOCK(pmap);
 3109                 pmap_resident_count_dec(pmap, 1);
 3110                 pde = pmap_pde(pmap, pv->pv_va);
 3111                 KASSERT((*pde & PG_PS) == 0, ("pmap_remove_all: found"
 3112                     " a 2mpage in page %p's pv list", m));
 3113                 pte = pmap_pde_to_pte(pde, pv->pv_va);
 3114                 tpte = pte_load_clear(pte);
 3115                 if (tpte & PG_W)
 3116                         pmap->pm_stats.wired_count--;
 3117                 if (tpte & PG_A)
 3118                         vm_page_aflag_set(m, PGA_REFERENCED);
 3119 
 3120                 /*
 3121                  * Update the vm_page_t clean and reference bits.
 3122                  */
 3123                 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
 3124                         vm_page_dirty(m);
 3125                 pmap_unuse_pt(pmap, pv->pv_va, *pde, &free);
 3126                 pmap_invalidate_page(pmap, pv->pv_va);
 3127                 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
 3128                 free_pv_entry(pmap, pv);
 3129                 PMAP_UNLOCK(pmap);
 3130         }
 3131         vm_page_aflag_clear(m, PGA_WRITEABLE);
 3132         rw_wunlock(&pvh_global_lock);
 3133         pmap_free_zero_pages(free);
 3134 }
 3135 
 3136 /*
 3137  * pmap_protect_pde: do the things to protect a 2mpage in a process
 3138  */
 3139 static boolean_t
 3140 pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot)
 3141 {
 3142         pd_entry_t newpde, oldpde;
 3143         vm_offset_t eva, va;
 3144         vm_page_t m;
 3145         boolean_t anychanged;
 3146 
 3147         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 3148         KASSERT((sva & PDRMASK) == 0,
 3149             ("pmap_protect_pde: sva is not 2mpage aligned"));
 3150         anychanged = FALSE;
 3151 retry:
 3152         oldpde = newpde = *pde;
 3153         if (oldpde & PG_MANAGED) {
 3154                 eva = sva + NBPDR;
 3155                 for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
 3156                     va < eva; va += PAGE_SIZE, m++)
 3157                         if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW))
 3158                                 vm_page_dirty(m);
 3159         }
 3160         if ((prot & VM_PROT_WRITE) == 0)
 3161                 newpde &= ~(PG_RW | PG_M);
 3162         if ((prot & VM_PROT_EXECUTE) == 0)
 3163                 newpde |= pg_nx;
 3164         if (newpde != oldpde) {
 3165                 if (!atomic_cmpset_long(pde, oldpde, newpde))
 3166                         goto retry;
 3167                 if (oldpde & PG_G)
 3168                         pmap_invalidate_page(pmap, sva);
 3169                 else
 3170                         anychanged = TRUE;
 3171         }
 3172         return (anychanged);
 3173 }
 3174 
 3175 /*
 3176  *      Set the physical protection on the
 3177  *      specified range of this map as requested.
 3178  */
 3179 void
 3180 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
 3181 {
 3182         vm_offset_t va_next;
 3183         pml4_entry_t *pml4e;
 3184         pdp_entry_t *pdpe;
 3185         pd_entry_t ptpaddr, *pde;
 3186         pt_entry_t *pte;
 3187         boolean_t anychanged, pv_lists_locked;
 3188 
 3189         if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
 3190                 pmap_remove(pmap, sva, eva);
 3191                 return;
 3192         }
 3193 
 3194         if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) ==
 3195             (VM_PROT_WRITE|VM_PROT_EXECUTE))
 3196                 return;
 3197 
 3198         pv_lists_locked = FALSE;
 3199 resume:
 3200         anychanged = FALSE;
 3201 
 3202         PMAP_LOCK(pmap);
 3203         for (; sva < eva; sva = va_next) {
 3204 
 3205                 pml4e = pmap_pml4e(pmap, sva);
 3206                 if ((*pml4e & PG_V) == 0) {
 3207                         va_next = (sva + NBPML4) & ~PML4MASK;
 3208                         if (va_next < sva)
 3209                                 va_next = eva;
 3210                         continue;
 3211                 }
 3212 
 3213                 pdpe = pmap_pml4e_to_pdpe(pml4e, sva);
 3214                 if ((*pdpe & PG_V) == 0) {
 3215                         va_next = (sva + NBPDP) & ~PDPMASK;
 3216                         if (va_next < sva)
 3217                                 va_next = eva;
 3218                         continue;
 3219                 }
 3220 
 3221                 va_next = (sva + NBPDR) & ~PDRMASK;
 3222                 if (va_next < sva)
 3223                         va_next = eva;
 3224 
 3225                 pde = pmap_pdpe_to_pde(pdpe, sva);
 3226                 ptpaddr = *pde;
 3227 
 3228                 /*
 3229                  * Weed out invalid mappings.
 3230                  */
 3231                 if (ptpaddr == 0)
 3232                         continue;
 3233 
 3234                 /*
 3235                  * Check for large page.
 3236                  */
 3237                 if ((ptpaddr & PG_PS) != 0) {
 3238                         /*
 3239                          * Are we protecting the entire large page?  If not,
 3240                          * demote the mapping and fall through.
 3241                          */
 3242                         if (sva + NBPDR == va_next && eva >= va_next) {
 3243                                 /*
 3244                                  * The TLB entry for a PG_G mapping is
 3245                                  * invalidated by pmap_protect_pde().
 3246                                  */
 3247                                 if (pmap_protect_pde(pmap, pde, sva, prot))
 3248                                         anychanged = TRUE;
 3249                                 continue;
 3250                         } else {
 3251                                 if (!pv_lists_locked) {
 3252                                         pv_lists_locked = TRUE;
 3253                                         if (!rw_try_rlock(&pvh_global_lock)) {
 3254                                                 if (anychanged)
 3255                                                         pmap_invalidate_all(
 3256                                                             pmap);
 3257                                                 PMAP_UNLOCK(pmap);
 3258                                                 rw_rlock(&pvh_global_lock);
 3259                                                 goto resume;
 3260                                         }
 3261                                 }
 3262                                 if (!pmap_demote_pde(pmap, pde, sva)) {
 3263                                         /*
 3264                                          * The large page mapping was
 3265                                          * destroyed.
 3266                                          */
 3267                                         continue;
 3268                                 }
 3269                         }
 3270                 }
 3271 
 3272                 if (va_next > eva)
 3273                         va_next = eva;
 3274 
 3275                 for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
 3276                     sva += PAGE_SIZE) {
 3277                         pt_entry_t obits, pbits;
 3278                         vm_page_t m;
 3279 
 3280 retry:
 3281                         obits = pbits = *pte;
 3282                         if ((pbits & PG_V) == 0)
 3283                                 continue;
 3284 
 3285                         if ((prot & VM_PROT_WRITE) == 0) {
 3286                                 if ((pbits & (PG_MANAGED | PG_M | PG_RW)) ==
 3287                                     (PG_MANAGED | PG_M | PG_RW)) {
 3288                                         m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
 3289                                         vm_page_dirty(m);
 3290                                 }
 3291                                 pbits &= ~(PG_RW | PG_M);
 3292                         }
 3293                         if ((prot & VM_PROT_EXECUTE) == 0)
 3294                                 pbits |= pg_nx;
 3295 
 3296                         if (pbits != obits) {
 3297                                 if (!atomic_cmpset_long(pte, obits, pbits))
 3298                                         goto retry;
 3299                                 if (obits & PG_G)
 3300                                         pmap_invalidate_page(pmap, sva);
 3301                                 else
 3302                                         anychanged = TRUE;
 3303                         }
 3304                 }
 3305         }
 3306         if (anychanged)
 3307                 pmap_invalidate_all(pmap);
 3308         if (pv_lists_locked)
 3309                 rw_runlock(&pvh_global_lock);
 3310         PMAP_UNLOCK(pmap);
 3311 }
 3312 
 3313 /*
 3314  * Tries to promote the 512, contiguous 4KB page mappings that are within a
 3315  * single page table page (PTP) to a single 2MB page mapping.  For promotion
 3316  * to occur, two conditions must be met: (1) the 4KB page mappings must map
 3317  * aligned, contiguous physical memory and (2) the 4KB page mappings must have
 3318  * identical characteristics. 
 3319  */
 3320 static void
 3321 pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
 3322     struct rwlock **lockp)
 3323 {
 3324         pd_entry_t newpde;
 3325         pt_entry_t *firstpte, oldpte, pa, *pte;
 3326         vm_offset_t oldpteva;
 3327         vm_page_t mpte;
 3328 
 3329         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 3330 
 3331         /*
 3332          * Examine the first PTE in the specified PTP.  Abort if this PTE is
 3333          * either invalid, unused, or does not map the first 4KB physical page
 3334          * within a 2MB page. 
 3335          */
 3336         firstpte = (pt_entry_t *)PHYS_TO_DMAP(*pde & PG_FRAME);
 3337 setpde:
 3338         newpde = *firstpte;
 3339         if ((newpde & ((PG_FRAME & PDRMASK) | PG_A | PG_V)) != (PG_A | PG_V)) {
 3340                 atomic_add_long(&pmap_pde_p_failures, 1);
 3341                 CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#lx"
 3342                     " in pmap %p", va, pmap);
 3343                 return;
 3344         }
 3345         if ((newpde & (PG_M | PG_RW)) == PG_RW) {
 3346                 /*
 3347                  * When PG_M is already clear, PG_RW can be cleared without
 3348                  * a TLB invalidation.
 3349                  */
 3350                 if (!atomic_cmpset_long(firstpte, newpde, newpde & ~PG_RW))
 3351                         goto setpde;
 3352                 newpde &= ~PG_RW;
 3353         }
 3354 
 3355         /*
 3356          * Examine each of the other PTEs in the specified PTP.  Abort if this
 3357          * PTE maps an unexpected 4KB physical page or does not have identical
 3358          * characteristics to the first PTE.
 3359          */
 3360         pa = (newpde & (PG_PS_FRAME | PG_A | PG_V)) + NBPDR - PAGE_SIZE;
 3361         for (pte = firstpte + NPTEPG - 1; pte > firstpte; pte--) {
 3362 setpte:
 3363                 oldpte = *pte;
 3364                 if ((oldpte & (PG_FRAME | PG_A | PG_V)) != pa) {
 3365                         atomic_add_long(&pmap_pde_p_failures, 1);
 3366                         CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#lx"
 3367                             " in pmap %p", va, pmap);
 3368                         return;
 3369                 }
 3370                 if ((oldpte & (PG_M | PG_RW)) == PG_RW) {
 3371                         /*
 3372                          * When PG_M is already clear, PG_RW can be cleared
 3373                          * without a TLB invalidation.
 3374                          */
 3375                         if (!atomic_cmpset_long(pte, oldpte, oldpte & ~PG_RW))
 3376                                 goto setpte;
 3377                         oldpte &= ~PG_RW;
 3378                         oldpteva = (oldpte & PG_FRAME & PDRMASK) |
 3379                             (va & ~PDRMASK);
 3380                         CTR2(KTR_PMAP, "pmap_promote_pde: protect for va %#lx"
 3381                             " in pmap %p", oldpteva, pmap);
 3382                 }
 3383                 if ((oldpte & PG_PTE_PROMOTE) != (newpde & PG_PTE_PROMOTE)) {
 3384                         atomic_add_long(&pmap_pde_p_failures, 1);
 3385                         CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#lx"
 3386                             " in pmap %p", va, pmap);
 3387                         return;
 3388                 }
 3389                 pa -= PAGE_SIZE;
 3390         }
 3391 
 3392         /*
 3393          * Save the page table page in its current state until the PDE
 3394          * mapping the superpage is demoted by pmap_demote_pde() or
 3395          * destroyed by pmap_remove_pde(). 
 3396          */
 3397         mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
 3398         KASSERT(mpte >= vm_page_array &&
 3399             mpte < &vm_page_array[vm_page_array_size],
 3400             ("pmap_promote_pde: page table page is out of range"));
 3401         KASSERT(mpte->pindex == pmap_pde_pindex(va),
 3402             ("pmap_promote_pde: page table page's pindex is wrong"));
 3403         pmap_insert_pt_page(pmap, mpte);
 3404 
 3405         /*
 3406          * Promote the pv entries.
 3407          */
 3408         if ((newpde & PG_MANAGED) != 0)
 3409                 pmap_pv_promote_pde(pmap, va, newpde & PG_PS_FRAME, lockp);
 3410 
 3411         /*
 3412          * Propagate the PAT index to its proper position.
 3413          */
 3414         if ((newpde & PG_PTE_PAT) != 0)
 3415                 newpde ^= PG_PDE_PAT | PG_PTE_PAT;
 3416 
 3417         /*
 3418          * Map the superpage.
 3419          */
 3420         if (workaround_erratum383)
 3421                 pmap_update_pde(pmap, va, pde, PG_PS | newpde);
 3422         else
 3423                 pde_store(pde, PG_PS | newpde);
 3424 
 3425         atomic_add_long(&pmap_pde_promotions, 1);
 3426         CTR2(KTR_PMAP, "pmap_promote_pde: success for va %#lx"
 3427             " in pmap %p", va, pmap);
 3428 }
 3429 
 3430 /*
 3431  *      Insert the given physical page (p) at
 3432  *      the specified virtual address (v) in the
 3433  *      target physical map with the protection requested.
 3434  *
 3435  *      If specified, the page will be wired down, meaning
 3436  *      that the related pte can not be reclaimed.
 3437  *
 3438  *      NB:  This is the only routine which MAY NOT lazy-evaluate
 3439  *      or lose information.  That is, this routine must actually
 3440  *      insert this page into the given map NOW.
 3441  */
 3442 void
 3443 pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
 3444     vm_prot_t prot, boolean_t wired)
 3445 {
 3446         struct rwlock *lock;
 3447         pd_entry_t *pde;
 3448         pt_entry_t *pte;
 3449         pt_entry_t newpte, origpte;
 3450         pv_entry_t pv;
 3451         vm_paddr_t opa, pa;
 3452         vm_page_t mpte, om;
 3453 
 3454         va = trunc_page(va);
 3455         KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
 3456         KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS,
 3457             ("pmap_enter: invalid to pmap_enter page table pages (va: 0x%lx)",
 3458             va));
 3459         KASSERT((m->oflags & VPO_UNMANAGED) != 0 || va < kmi.clean_sva ||
 3460             va >= kmi.clean_eva,
 3461             ("pmap_enter: managed mapping within the clean submap"));
 3462         KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0 ||
 3463             VM_OBJECT_LOCKED(m->object),
 3464             ("pmap_enter: page %p is not busy", m));
 3465         pa = VM_PAGE_TO_PHYS(m);
 3466         newpte = (pt_entry_t)(pa | PG_A | PG_V);
 3467         if ((access & VM_PROT_WRITE) != 0)
 3468                 newpte |= PG_M;
 3469         if ((prot & VM_PROT_WRITE) != 0)
 3470                 newpte |= PG_RW;
 3471         KASSERT((newpte & (PG_M | PG_RW)) != PG_M,
 3472             ("pmap_enter: access includes VM_PROT_WRITE but prot doesn't"));
 3473         if ((prot & VM_PROT_EXECUTE) == 0)
 3474                 newpte |= pg_nx;
 3475         if (wired)
 3476                 newpte |= PG_W;
 3477         if (va < VM_MAXUSER_ADDRESS)
 3478                 newpte |= PG_U;
 3479         if (pmap == kernel_pmap)
 3480                 newpte |= PG_G;
 3481         newpte |= pmap_cache_bits(m->md.pat_mode, 0);
 3482 
 3483         mpte = NULL;
 3484 
 3485         lock = NULL;
 3486         rw_rlock(&pvh_global_lock);
 3487         PMAP_LOCK(pmap);
 3488 
 3489         /*
 3490          * In the case that a page table page is not
 3491          * resident, we are creating it here.
 3492          */
 3493 retry:
 3494         pde = pmap_pde(pmap, va);
 3495         if (pde != NULL && (*pde & PG_V) != 0 && ((*pde & PG_PS) == 0 ||
 3496             pmap_demote_pde_locked(pmap, pde, va, &lock))) {
 3497                 pte = pmap_pde_to_pte(pde, va);
 3498                 if (va < VM_MAXUSER_ADDRESS && mpte == NULL) {
 3499                         mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
 3500                         mpte->wire_count++;
 3501                 }
 3502         } else if (va < VM_MAXUSER_ADDRESS) {
 3503                 /*
 3504                  * Here if the pte page isn't mapped, or if it has been
 3505                  * deallocated.
 3506                  */
 3507                 mpte = _pmap_allocpte(pmap, pmap_pde_pindex(va), &lock);
 3508                 goto retry;
 3509         } else
 3510                 panic("pmap_enter: invalid page directory va=%#lx", va);
 3511 
 3512         origpte = *pte;
 3513 
 3514         /*
 3515          * Is the specified virtual address already mapped?
 3516          */
 3517         if ((origpte & PG_V) != 0) {
 3518                 /*
 3519                  * Wiring change, just update stats. We don't worry about
 3520                  * wiring PT pages as they remain resident as long as there
 3521                  * are valid mappings in them. Hence, if a user page is wired,
 3522                  * the PT page will be also.
 3523                  */
 3524                 if ((newpte & PG_W) != 0 && (origpte & PG_W) == 0)
 3525                         pmap->pm_stats.wired_count++;
 3526                 else if ((newpte & PG_W) == 0 && (origpte & PG_W) != 0)
 3527                         pmap->pm_stats.wired_count--;
 3528 
 3529                 /*
 3530                  * Remove the extra PT page reference.
 3531                  */
 3532                 if (mpte != NULL) {
 3533                         mpte->wire_count--;
 3534                         KASSERT(mpte->wire_count > 0,
 3535                             ("pmap_enter: missing reference to page table page,"
 3536                              " va: 0x%lx", va));
 3537                 }
 3538 
 3539                 /*
 3540                  * Has the physical page changed?
 3541                  */
 3542                 opa = origpte & PG_FRAME;
 3543                 if (opa == pa) {
 3544                         /*
 3545                          * No, might be a protection or wiring change.
 3546                          */
 3547                         if ((origpte & PG_MANAGED) != 0) {
 3548                                 newpte |= PG_MANAGED;
 3549                                 if ((newpte & PG_RW) != 0)
 3550                                         vm_page_aflag_set(m, PGA_WRITEABLE);
 3551                         }
 3552                         if (((origpte ^ newpte) & ~(PG_M | PG_A)) == 0)
 3553                                 goto unchanged;
 3554                         goto validate;
 3555                 }
 3556         } else {
 3557                 /*
 3558                  * Increment the counters.
 3559                  */
 3560                 if ((newpte & PG_W) != 0)
 3561                         pmap->pm_stats.wired_count++;
 3562                 pmap_resident_count_inc(pmap, 1);
 3563         }
 3564 
 3565         /*
 3566          * Enter on the PV list if part of our managed memory.
 3567          */
 3568         if ((m->oflags & VPO_UNMANAGED) == 0) {
 3569                 newpte |= PG_MANAGED;
 3570                 pv = get_pv_entry(pmap, &lock);
 3571                 pv->pv_va = va;
 3572                 CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, pa);
 3573                 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
 3574                 if ((newpte & PG_RW) != 0)
 3575                         vm_page_aflag_set(m, PGA_WRITEABLE);
 3576         }
 3577 
 3578         /*
 3579          * Update the PTE.
 3580          */
 3581         if ((origpte & PG_V) != 0) {
 3582 validate:
 3583                 origpte = pte_load_store(pte, newpte);
 3584                 opa = origpte & PG_FRAME;
 3585                 if (opa != pa) {
 3586                         if ((origpte & PG_MANAGED) != 0) {
 3587                                 om = PHYS_TO_VM_PAGE(opa);
 3588                                 if ((origpte & (PG_M | PG_RW)) == (PG_M |
 3589                                     PG_RW))
 3590                                         vm_page_dirty(om);
 3591                                 if ((origpte & PG_A) != 0)
 3592                                         vm_page_aflag_set(om, PGA_REFERENCED);
 3593                                 CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa);
 3594                                 pmap_pvh_free(&om->md, pmap, va);
 3595                                 if ((om->aflags & PGA_WRITEABLE) != 0 &&
 3596                                     TAILQ_EMPTY(&om->md.pv_list) &&
 3597                                     ((om->flags & PG_FICTITIOUS) != 0 ||
 3598                                     TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
 3599                                         vm_page_aflag_clear(om, PGA_WRITEABLE);
 3600                         }
 3601                 } else if ((newpte & PG_M) == 0 && (origpte & (PG_M |
 3602                     PG_RW)) == (PG_M | PG_RW)) {
 3603                         if ((origpte & PG_MANAGED) != 0)
 3604                                 vm_page_dirty(m);
 3605 
 3606                         /*
 3607                          * Although the PTE may still have PG_RW set, TLB
 3608                          * invalidation may nonetheless be required because
 3609                          * the PTE no longer has PG_M set.
 3610                          */
 3611                 } else if ((origpte & PG_NX) != 0 || (newpte & PG_NX) == 0) {
 3612                         /*
 3613                          * This PTE change does not require TLB invalidation.
 3614                          */
 3615                         goto unchanged;
 3616                 }
 3617                 if ((origpte & PG_A) != 0)
 3618                         pmap_invalidate_page(pmap, va);
 3619         } else
 3620                 pte_store(pte, newpte);
 3621 
 3622 unchanged:
 3623 
 3624         /*
 3625          * If both the page table page and the reservation are fully
 3626          * populated, then attempt promotion.
 3627          */
 3628         if ((mpte == NULL || mpte->wire_count == NPTEPG) &&
 3629             pg_ps_enabled && (m->flags & PG_FICTITIOUS) == 0 &&
 3630             vm_reserv_level_iffullpop(m) == 0)
 3631                 pmap_promote_pde(pmap, pde, va, &lock);
 3632 
 3633         if (lock != NULL)
 3634                 rw_wunlock(lock);
 3635         rw_runlock(&pvh_global_lock);
 3636         PMAP_UNLOCK(pmap);
 3637 }
 3638 
 3639 /*
 3640  * Tries to create a 2MB page mapping.  Returns TRUE if successful and FALSE
 3641  * otherwise.  Fails if (1) a page table page cannot be allocated without
 3642  * blocking, (2) a mapping already exists at the specified virtual address, or
 3643  * (3) a pv entry cannot be allocated without reclaiming another pv entry. 
 3644  */
 3645 static boolean_t
 3646 pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
 3647     struct rwlock **lockp)
 3648 {
 3649         pd_entry_t *pde, newpde;
 3650         vm_page_t free, mpde;
 3651 
 3652         rw_assert(&pvh_global_lock, RA_LOCKED);
 3653         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 3654         if ((mpde = pmap_allocpde(pmap, va, NULL)) == NULL) {
 3655                 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
 3656                     " in pmap %p", va, pmap);
 3657                 return (FALSE);
 3658         }
 3659         pde = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mpde));
 3660         pde = &pde[pmap_pde_index(va)];
 3661         if ((*pde & PG_V) != 0) {
 3662                 KASSERT(mpde->wire_count > 1,
 3663                     ("pmap_enter_pde: mpde's wire count is too low"));
 3664                 mpde->wire_count--;
 3665                 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
 3666                     " in pmap %p", va, pmap);
 3667                 return (FALSE);
 3668         }
 3669         newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 1) |
 3670             PG_PS | PG_V;
 3671         if ((m->oflags & VPO_UNMANAGED) == 0) {
 3672                 newpde |= PG_MANAGED;
 3673 
 3674                 /*
 3675                  * Abort this mapping if its PV entry could not be created.
 3676                  */
 3677                 if (!pmap_pv_insert_pde(pmap, va, VM_PAGE_TO_PHYS(m),
 3678                     lockp)) {
 3679                         free = NULL;
 3680                         if (pmap_unwire_ptp(pmap, va, mpde, &free)) {
 3681                                 pmap_invalidate_page(pmap, va);
 3682                                 pmap_free_zero_pages(free);
 3683                         }
 3684                         CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
 3685                             " in pmap %p", va, pmap);
 3686                         return (FALSE);
 3687                 }
 3688         }
 3689         if ((prot & VM_PROT_EXECUTE) == 0)
 3690                 newpde |= pg_nx;
 3691         if (va < VM_MAXUSER_ADDRESS)
 3692                 newpde |= PG_U;
 3693 
 3694         /*
 3695          * Increment counters.
 3696          */
 3697         pmap_resident_count_inc(pmap, NBPDR / PAGE_SIZE);
 3698 
 3699         /*
 3700          * Map the superpage.
 3701          */
 3702         pde_store(pde, newpde);
 3703 
 3704         atomic_add_long(&pmap_pde_mappings, 1);
 3705         CTR2(KTR_PMAP, "pmap_enter_pde: success for va %#lx"
 3706             " in pmap %p", va, pmap);
 3707         return (TRUE);
 3708 }
 3709 
 3710 /*
 3711  * Maps a sequence of resident pages belonging to the same object.
 3712  * The sequence begins with the given page m_start.  This page is
 3713  * mapped at the given virtual address start.  Each subsequent page is
 3714  * mapped at a virtual address that is offset from start by the same
 3715  * amount as the page is offset from m_start within the object.  The
 3716  * last page in the sequence is the page with the largest offset from
 3717  * m_start that can be mapped at a virtual address less than the given
 3718  * virtual address end.  Not every virtual page between start and end
 3719  * is mapped; only those for which a resident page exists with the
 3720  * corresponding offset from m_start are mapped.
 3721  */
 3722 void
 3723 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
 3724     vm_page_t m_start, vm_prot_t prot)
 3725 {
 3726         struct rwlock *lock;
 3727         vm_offset_t va;
 3728         vm_page_t m, mpte;
 3729         vm_pindex_t diff, psize;
 3730 
 3731         VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED);
 3732         psize = atop(end - start);
 3733         mpte = NULL;
 3734         m = m_start;
 3735         lock = NULL;
 3736         rw_rlock(&pvh_global_lock);
 3737         PMAP_LOCK(pmap);
 3738         while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
 3739                 va = start + ptoa(diff);
 3740                 if ((va & PDRMASK) == 0 && va + NBPDR <= end &&
 3741                     (VM_PAGE_TO_PHYS(m) & PDRMASK) == 0 &&
 3742                     pg_ps_enabled && vm_reserv_level_iffullpop(m) == 0 &&
 3743                     pmap_enter_pde(pmap, va, m, prot, &lock))
 3744                         m = &m[NBPDR / PAGE_SIZE - 1];
 3745                 else
 3746                         mpte = pmap_enter_quick_locked(pmap, va, m, prot,
 3747                             mpte, &lock);
 3748                 m = TAILQ_NEXT(m, listq);
 3749         }
 3750         if (lock != NULL)
 3751                 rw_wunlock(lock);
 3752         rw_runlock(&pvh_global_lock);
 3753         PMAP_UNLOCK(pmap);
 3754 }
 3755 
 3756 /*
 3757  * this code makes some *MAJOR* assumptions:
 3758  * 1. Current pmap & pmap exists.
 3759  * 2. Not wired.
 3760  * 3. Read access.
 3761  * 4. No page table pages.
 3762  * but is *MUCH* faster than pmap_enter...
 3763  */
 3764 
 3765 void
 3766 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
 3767 {
 3768         struct rwlock *lock;
 3769 
 3770         lock = NULL;
 3771         rw_rlock(&pvh_global_lock);
 3772         PMAP_LOCK(pmap);
 3773         (void)pmap_enter_quick_locked(pmap, va, m, prot, NULL, &lock);
 3774         if (lock != NULL)
 3775                 rw_wunlock(lock);
 3776         rw_runlock(&pvh_global_lock);
 3777         PMAP_UNLOCK(pmap);
 3778 }
 3779 
 3780 static vm_page_t
 3781 pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
 3782     vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp)
 3783 {
 3784         vm_page_t free;
 3785         pt_entry_t *pte;
 3786         vm_paddr_t pa;
 3787 
 3788         KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
 3789             (m->oflags & VPO_UNMANAGED) != 0,
 3790             ("pmap_enter_quick_locked: managed mapping within the clean submap"));
 3791         rw_assert(&pvh_global_lock, RA_LOCKED);
 3792         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 3793 
 3794         /*
 3795          * In the case that a page table page is not
 3796          * resident, we are creating it here.
 3797          */
 3798         if (va < VM_MAXUSER_ADDRESS) {
 3799                 vm_pindex_t ptepindex;
 3800                 pd_entry_t *ptepa;
 3801 
 3802                 /*
 3803                  * Calculate pagetable page index
 3804                  */
 3805                 ptepindex = pmap_pde_pindex(va);
 3806                 if (mpte && (mpte->pindex == ptepindex)) {
 3807                         mpte->wire_count++;
 3808                 } else {
 3809                         /*
 3810                          * Get the page directory entry
 3811                          */
 3812                         ptepa = pmap_pde(pmap, va);
 3813 
 3814                         /*
 3815                          * If the page table page is mapped, we just increment
 3816                          * the hold count, and activate it.  Otherwise, we
 3817                          * attempt to allocate a page table page.  If this
 3818                          * attempt fails, we don't retry.  Instead, we give up.
 3819                          */
 3820                         if (ptepa && (*ptepa & PG_V) != 0) {
 3821                                 if (*ptepa & PG_PS)
 3822                                         return (NULL);
 3823                                 mpte = PHYS_TO_VM_PAGE(*ptepa & PG_FRAME);
 3824                                 mpte->wire_count++;
 3825                         } else {
 3826                                 /*
 3827                                  * Pass NULL instead of the PV list lock
 3828                                  * pointer, because we don't intend to sleep.
 3829                                  */
 3830                                 mpte = _pmap_allocpte(pmap, ptepindex, NULL);
 3831                                 if (mpte == NULL)
 3832                                         return (mpte);
 3833                         }
 3834                 }
 3835                 pte = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mpte));
 3836                 pte = &pte[pmap_pte_index(va)];
 3837         } else {
 3838                 mpte = NULL;
 3839                 pte = vtopte(va);
 3840         }
 3841         if (*pte) {
 3842                 if (mpte != NULL) {
 3843                         mpte->wire_count--;
 3844                         mpte = NULL;
 3845                 }
 3846                 return (mpte);
 3847         }
 3848 
 3849         /*
 3850          * Enter on the PV list if part of our managed memory.
 3851          */
 3852         if ((m->oflags & VPO_UNMANAGED) == 0 &&
 3853             !pmap_try_insert_pv_entry(pmap, va, m, lockp)) {
 3854                 if (mpte != NULL) {
 3855                         free = NULL;
 3856                         if (pmap_unwire_ptp(pmap, va, mpte, &free)) {
 3857                                 pmap_invalidate_page(pmap, va);
 3858                                 pmap_free_zero_pages(free);
 3859                         }
 3860                         mpte = NULL;
 3861                 }
 3862                 return (mpte);
 3863         }
 3864 
 3865         /*
 3866          * Increment counters
 3867          */
 3868         pmap_resident_count_inc(pmap, 1);
 3869 
 3870         pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 0);
 3871         if ((prot & VM_PROT_EXECUTE) == 0)
 3872                 pa |= pg_nx;
 3873 
 3874         /*
 3875          * Now validate mapping with RO protection
 3876          */
 3877         if ((m->oflags & VPO_UNMANAGED) != 0)
 3878                 pte_store(pte, pa | PG_V | PG_U);
 3879         else
 3880                 pte_store(pte, pa | PG_V | PG_U | PG_MANAGED);
 3881         return (mpte);
 3882 }
 3883 
 3884 /*
 3885  * Make a temporary mapping for a physical address.  This is only intended
 3886  * to be used for panic dumps.
 3887  */
 3888 void *
 3889 pmap_kenter_temporary(vm_paddr_t pa, int i)
 3890 {
 3891         vm_offset_t va;
 3892 
 3893         va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE);
 3894         pmap_kenter(va, pa);
 3895         invlpg(va);
 3896         return ((void *)crashdumpmap);
 3897 }
 3898 
 3899 /*
 3900  * This code maps large physical mmap regions into the
 3901  * processor address space.  Note that some shortcuts
 3902  * are taken, but the code works.
 3903  */
 3904 void
 3905 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
 3906     vm_pindex_t pindex, vm_size_t size)
 3907 {
 3908         pd_entry_t *pde;
 3909         vm_paddr_t pa, ptepa;
 3910         vm_page_t p, pdpg;
 3911         int pat_mode;
 3912 
 3913         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
 3914         KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
 3915             ("pmap_object_init_pt: non-device object"));
 3916         if ((addr & (NBPDR - 1)) == 0 && (size & (NBPDR - 1)) == 0) {
 3917                 if (!vm_object_populate(object, pindex, pindex + atop(size)))
 3918                         return;
 3919                 p = vm_page_lookup(object, pindex);
 3920                 KASSERT(p->valid == VM_PAGE_BITS_ALL,
 3921                     ("pmap_object_init_pt: invalid page %p", p));
 3922                 pat_mode = p->md.pat_mode;
 3923 
 3924                 /*
 3925                  * Abort the mapping if the first page is not physically
 3926                  * aligned to a 2MB page boundary.
 3927                  */
 3928                 ptepa = VM_PAGE_TO_PHYS(p);
 3929                 if (ptepa & (NBPDR - 1))
 3930                         return;
 3931 
 3932                 /*
 3933                  * Skip the first page.  Abort the mapping if the rest of
 3934                  * the pages are not physically contiguous or have differing
 3935                  * memory attributes.
 3936                  */
 3937                 p = TAILQ_NEXT(p, listq);
 3938                 for (pa = ptepa + PAGE_SIZE; pa < ptepa + size;
 3939                     pa += PAGE_SIZE) {
 3940                         KASSERT(p->valid == VM_PAGE_BITS_ALL,
 3941                             ("pmap_object_init_pt: invalid page %p", p));
 3942                         if (pa != VM_PAGE_TO_PHYS(p) ||
 3943                             pat_mode != p->md.pat_mode)
 3944                                 return;
 3945                         p = TAILQ_NEXT(p, listq);
 3946                 }
 3947 
 3948                 /*
 3949                  * Map using 2MB pages.  Since "ptepa" is 2M aligned and
 3950                  * "size" is a multiple of 2M, adding the PAT setting to "pa"
 3951                  * will not affect the termination of this loop.
 3952                  */ 
 3953                 PMAP_LOCK(pmap);
 3954                 for (pa = ptepa | pmap_cache_bits(pat_mode, 1); pa < ptepa +
 3955                     size; pa += NBPDR) {
 3956                         pdpg = pmap_allocpde(pmap, addr, NULL);
 3957                         if (pdpg == NULL) {
 3958                                 /*
 3959                                  * The creation of mappings below is only an
 3960                                  * optimization.  If a page directory page
 3961                                  * cannot be allocated without blocking,
 3962                                  * continue on to the next mapping rather than
 3963                                  * blocking.
 3964                                  */
 3965                                 addr += NBPDR;
 3966                                 continue;
 3967                         }
 3968                         pde = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pdpg));
 3969                         pde = &pde[pmap_pde_index(addr)];
 3970                         if ((*pde & PG_V) == 0) {
 3971                                 pde_store(pde, pa | PG_PS | PG_M | PG_A |
 3972                                     PG_U | PG_RW | PG_V);
 3973                                 pmap_resident_count_inc(pmap, NBPDR / PAGE_SIZE);
 3974                                 atomic_add_long(&pmap_pde_mappings, 1);
 3975                         } else {
 3976                                 /* Continue on if the PDE is already valid. */
 3977                                 pdpg->wire_count--;
 3978                                 KASSERT(pdpg->wire_count > 0,
 3979                                     ("pmap_object_init_pt: missing reference "
 3980                                     "to page directory page, va: 0x%lx", addr));
 3981                         }
 3982                         addr += NBPDR;
 3983                 }
 3984                 PMAP_UNLOCK(pmap);
 3985         }
 3986 }
 3987 
 3988 /*
 3989  *      Routine:        pmap_change_wiring
 3990  *      Function:       Change the wiring attribute for a map/virtual-address
 3991  *                      pair.
 3992  *      In/out conditions:
 3993  *                      The mapping must already exist in the pmap.
 3994  */
 3995 void
 3996 pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
 3997 {
 3998         pd_entry_t *pde;
 3999         pt_entry_t *pte;
 4000         boolean_t pv_lists_locked;
 4001 
 4002         pv_lists_locked = FALSE;
 4003 
 4004         /*
 4005          * Wiring is not a hardware characteristic so there is no need to
 4006          * invalidate TLB.
 4007          */
 4008 retry:
 4009         PMAP_LOCK(pmap);
 4010         pde = pmap_pde(pmap, va);
 4011         if ((*pde & PG_PS) != 0) {
 4012                 if (!wired != ((*pde & PG_W) == 0)) {
 4013                         if (!pv_lists_locked) {
 4014                                 pv_lists_locked = TRUE;
 4015                                 if (!rw_try_rlock(&pvh_global_lock)) {
 4016                                         PMAP_UNLOCK(pmap);
 4017                                         rw_rlock(&pvh_global_lock);
 4018                                         goto retry;
 4019                                 }
 4020                         }
 4021                         if (!pmap_demote_pde(pmap, pde, va))
 4022                                 panic("pmap_change_wiring: demotion failed");
 4023                 } else
 4024                         goto out;
 4025         }
 4026         pte = pmap_pde_to_pte(pde, va);
 4027         if (wired && (*pte & PG_W) == 0) {
 4028                 pmap->pm_stats.wired_count++;
 4029                 atomic_set_long(pte, PG_W);
 4030         } else if (!wired && (*pte & PG_W) != 0) {
 4031                 pmap->pm_stats.wired_count--;
 4032                 atomic_clear_long(pte, PG_W);
 4033         }
 4034 out:
 4035         if (pv_lists_locked)
 4036                 rw_runlock(&pvh_global_lock);
 4037         PMAP_UNLOCK(pmap);
 4038 }
 4039 
 4040 /*
 4041  *      Copy the range specified by src_addr/len
 4042  *      from the source map to the range dst_addr/len
 4043  *      in the destination map.
 4044  *
 4045  *      This routine is only advisory and need not do anything.
 4046  */
 4047 
 4048 void
 4049 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
 4050     vm_offset_t src_addr)
 4051 {
 4052         struct rwlock *lock;
 4053         vm_page_t   free;
 4054         vm_offset_t addr;
 4055         vm_offset_t end_addr = src_addr + len;
 4056         vm_offset_t va_next;
 4057 
 4058         if (dst_addr != src_addr)
 4059                 return;
 4060 
 4061         lock = NULL;
 4062         rw_rlock(&pvh_global_lock);
 4063         if (dst_pmap < src_pmap) {
 4064                 PMAP_LOCK(dst_pmap);
 4065                 PMAP_LOCK(src_pmap);
 4066         } else {
 4067                 PMAP_LOCK(src_pmap);
 4068                 PMAP_LOCK(dst_pmap);
 4069         }
 4070         for (addr = src_addr; addr < end_addr; addr = va_next) {
 4071                 pt_entry_t *src_pte, *dst_pte;
 4072                 vm_page_t dstmpde, dstmpte, srcmpte;
 4073                 pml4_entry_t *pml4e;
 4074                 pdp_entry_t *pdpe;
 4075                 pd_entry_t srcptepaddr, *pde;
 4076 
 4077                 KASSERT(addr < UPT_MIN_ADDRESS,
 4078                     ("pmap_copy: invalid to pmap_copy page tables"));
 4079 
 4080                 pml4e = pmap_pml4e(src_pmap, addr);
 4081                 if ((*pml4e & PG_V) == 0) {
 4082                         va_next = (addr + NBPML4) & ~PML4MASK;
 4083                         if (va_next < addr)
 4084                                 va_next = end_addr;
 4085                         continue;
 4086                 }
 4087 
 4088                 pdpe = pmap_pml4e_to_pdpe(pml4e, addr);
 4089                 if ((*pdpe & PG_V) == 0) {
 4090                         va_next = (addr + NBPDP) & ~PDPMASK;
 4091                         if (va_next < addr)
 4092                                 va_next = end_addr;
 4093                         continue;
 4094                 }
 4095 
 4096                 va_next = (addr + NBPDR) & ~PDRMASK;
 4097                 if (va_next < addr)
 4098                         va_next = end_addr;
 4099 
 4100                 pde = pmap_pdpe_to_pde(pdpe, addr);
 4101                 srcptepaddr = *pde;
 4102                 if (srcptepaddr == 0)
 4103                         continue;
 4104                         
 4105                 if (srcptepaddr & PG_PS) {
 4106                         if ((addr & PDRMASK) != 0 || addr + NBPDR > end_addr)
 4107                                 continue;
 4108                         dstmpde = pmap_allocpde(dst_pmap, addr, NULL);
 4109                         if (dstmpde == NULL)
 4110                                 break;
 4111                         pde = (pd_entry_t *)
 4112                             PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dstmpde));
 4113                         pde = &pde[pmap_pde_index(addr)];
 4114                         if (*pde == 0 && ((srcptepaddr & PG_MANAGED) == 0 ||
 4115                             pmap_pv_insert_pde(dst_pmap, addr, srcptepaddr &
 4116                             PG_PS_FRAME, &lock))) {
 4117                                 *pde = srcptepaddr & ~PG_W;
 4118                                 pmap_resident_count_inc(dst_pmap, NBPDR / PAGE_SIZE);
 4119                         } else
 4120                                 dstmpde->wire_count--;
 4121                         continue;
 4122                 }
 4123 
 4124                 srcptepaddr &= PG_FRAME;
 4125                 srcmpte = PHYS_TO_VM_PAGE(srcptepaddr);
 4126                 KASSERT(srcmpte->wire_count > 0,
 4127                     ("pmap_copy: source page table page is unused"));
 4128 
 4129                 if (va_next > end_addr)
 4130                         va_next = end_addr;
 4131 
 4132                 src_pte = (pt_entry_t *)PHYS_TO_DMAP(srcptepaddr);
 4133                 src_pte = &src_pte[pmap_pte_index(addr)];
 4134                 dstmpte = NULL;
 4135                 while (addr < va_next) {
 4136                         pt_entry_t ptetemp;
 4137                         ptetemp = *src_pte;
 4138                         /*
 4139                          * we only virtual copy managed pages
 4140                          */
 4141                         if ((ptetemp & PG_MANAGED) != 0) {
 4142                                 if (dstmpte != NULL &&
 4143                                     dstmpte->pindex == pmap_pde_pindex(addr))
 4144                                         dstmpte->wire_count++;
 4145                                 else if ((dstmpte = pmap_allocpte(dst_pmap,
 4146                                     addr, NULL)) == NULL)
 4147                                         goto out;
 4148                                 dst_pte = (pt_entry_t *)
 4149                                     PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dstmpte));
 4150                                 dst_pte = &dst_pte[pmap_pte_index(addr)];
 4151                                 if (*dst_pte == 0 &&
 4152                                     pmap_try_insert_pv_entry(dst_pmap, addr,
 4153                                     PHYS_TO_VM_PAGE(ptetemp & PG_FRAME),
 4154                                     &lock)) {
 4155                                         /*
 4156                                          * Clear the wired, modified, and
 4157                                          * accessed (referenced) bits
 4158                                          * during the copy.
 4159                                          */
 4160                                         *dst_pte = ptetemp & ~(PG_W | PG_M |
 4161                                             PG_A);
 4162                                         pmap_resident_count_inc(dst_pmap, 1);
 4163                                 } else {
 4164                                         free = NULL;
 4165                                         if (pmap_unwire_ptp(dst_pmap, addr,
 4166                                             dstmpte, &free)) {
 4167                                                 pmap_invalidate_page(dst_pmap,
 4168                                                     addr);
 4169                                                 pmap_free_zero_pages(free);
 4170                                         }
 4171                                         goto out;
 4172                                 }
 4173                                 if (dstmpte->wire_count >= srcmpte->wire_count)
 4174                                         break;
 4175                         }
 4176                         addr += PAGE_SIZE;
 4177                         src_pte++;
 4178                 }
 4179         }
 4180 out:
 4181         if (lock != NULL)
 4182                 rw_wunlock(lock);
 4183         rw_runlock(&pvh_global_lock);
 4184         PMAP_UNLOCK(src_pmap);
 4185         PMAP_UNLOCK(dst_pmap);
 4186 }       
 4187 
 4188 /*
 4189  *      pmap_zero_page zeros the specified hardware page by mapping 
 4190  *      the page into KVM and using bzero to clear its contents.
 4191  */
 4192 void
 4193 pmap_zero_page(vm_page_t m)
 4194 {
 4195         vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
 4196 
 4197         pagezero((void *)va);
 4198 }
 4199 
 4200 /*
 4201  *      pmap_zero_page_area zeros the specified hardware page by mapping 
 4202  *      the page into KVM and using bzero to clear its contents.
 4203  *
 4204  *      off and size may not cover an area beyond a single hardware page.
 4205  */
 4206 void
 4207 pmap_zero_page_area(vm_page_t m, int off, int size)
 4208 {
 4209         vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
 4210 
 4211         if (off == 0 && size == PAGE_SIZE)
 4212                 pagezero((void *)va);
 4213         else
 4214                 bzero((char *)va + off, size);
 4215 }
 4216 
 4217 /*
 4218  *      pmap_zero_page_idle zeros the specified hardware page by mapping 
 4219  *      the page into KVM and using bzero to clear its contents.  This
 4220  *      is intended to be called from the vm_pagezero process only and
 4221  *      outside of Giant.
 4222  */
 4223 void
 4224 pmap_zero_page_idle(vm_page_t m)
 4225 {
 4226         vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
 4227 
 4228         pagezero((void *)va);
 4229 }
 4230 
 4231 /*
 4232  *      pmap_copy_page copies the specified (machine independent)
 4233  *      page by mapping the page into virtual memory and using
 4234  *      bcopy to copy the page, one machine dependent page at a
 4235  *      time.
 4236  */
 4237 void
 4238 pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
 4239 {
 4240         vm_offset_t src = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(msrc));
 4241         vm_offset_t dst = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mdst));
 4242 
 4243         pagecopy((void *)src, (void *)dst);
 4244 }
 4245 
 4246 int unmapped_buf_allowed = 1;
 4247 
 4248 void
 4249 pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
 4250     vm_offset_t b_offset, int xfersize)
 4251 {
 4252         void *a_cp, *b_cp;
 4253         vm_offset_t a_pg_offset, b_pg_offset;
 4254         int cnt;
 4255 
 4256         while (xfersize > 0) {
 4257                 a_pg_offset = a_offset & PAGE_MASK;
 4258                 cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
 4259                 a_cp = (char *)PHYS_TO_DMAP(ma[a_offset >> PAGE_SHIFT]->
 4260                     phys_addr) + a_pg_offset;
 4261                 b_pg_offset = b_offset & PAGE_MASK;
 4262                 cnt = min(cnt, PAGE_SIZE - b_pg_offset);
 4263                 b_cp = (char *)PHYS_TO_DMAP(mb[b_offset >> PAGE_SHIFT]->
 4264                     phys_addr) + b_pg_offset;
 4265                 bcopy(a_cp, b_cp, cnt);
 4266                 a_offset += cnt;
 4267                 b_offset += cnt;
 4268                 xfersize -= cnt;
 4269         }
 4270 }
 4271 
 4272 /*
 4273  * Returns true if the pmap's pv is one of the first
 4274  * 16 pvs linked to from this page.  This count may
 4275  * be changed upwards or downwards in the future; it
 4276  * is only necessary that true be returned for a small
 4277  * subset of pmaps for proper page aging.
 4278  */
 4279 boolean_t
 4280 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
 4281 {
 4282         struct md_page *pvh;
 4283         struct rwlock *lock;
 4284         pv_entry_t pv;
 4285         int loops = 0;
 4286         boolean_t rv;
 4287 
 4288         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 4289             ("pmap_page_exists_quick: page %p is not managed", m));
 4290         rv = FALSE;
 4291         rw_rlock(&pvh_global_lock);
 4292         lock = VM_PAGE_TO_PV_LIST_LOCK(m);
 4293         rw_rlock(lock);
 4294         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
 4295                 if (PV_PMAP(pv) == pmap) {
 4296                         rv = TRUE;
 4297                         break;
 4298                 }
 4299                 loops++;
 4300                 if (loops >= 16)
 4301                         break;
 4302         }
 4303         if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) {
 4304                 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 4305                 TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
 4306                         if (PV_PMAP(pv) == pmap) {
 4307                                 rv = TRUE;
 4308                                 break;
 4309                         }
 4310                         loops++;
 4311                         if (loops >= 16)
 4312                                 break;
 4313                 }
 4314         }
 4315         rw_runlock(lock);
 4316         rw_runlock(&pvh_global_lock);
 4317         return (rv);
 4318 }
 4319 
 4320 /*
 4321  *      pmap_page_wired_mappings:
 4322  *
 4323  *      Return the number of managed mappings to the given physical page
 4324  *      that are wired.
 4325  */
 4326 int
 4327 pmap_page_wired_mappings(vm_page_t m)
 4328 {
 4329         int count;
 4330 
 4331         count = 0;
 4332         if ((m->oflags & VPO_UNMANAGED) != 0)
 4333                 return (count);
 4334         rw_wlock(&pvh_global_lock);
 4335         count = pmap_pvh_wired_mappings(&m->md, count);
 4336         if ((m->flags & PG_FICTITIOUS) == 0) {
 4337             count = pmap_pvh_wired_mappings(pa_to_pvh(VM_PAGE_TO_PHYS(m)),
 4338                 count);
 4339         }
 4340         rw_wunlock(&pvh_global_lock);
 4341         return (count);
 4342 }
 4343 
 4344 /*
 4345  *      pmap_pvh_wired_mappings:
 4346  *
 4347  *      Return the updated number "count" of managed mappings that are wired.
 4348  */
 4349 static int
 4350 pmap_pvh_wired_mappings(struct md_page *pvh, int count)
 4351 {
 4352         pmap_t pmap;
 4353         pt_entry_t *pte;
 4354         pv_entry_t pv;
 4355 
 4356         rw_assert(&pvh_global_lock, RA_WLOCKED);
 4357         TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
 4358                 pmap = PV_PMAP(pv);
 4359                 PMAP_LOCK(pmap);
 4360                 pte = pmap_pte(pmap, pv->pv_va);
 4361                 if ((*pte & PG_W) != 0)
 4362                         count++;
 4363                 PMAP_UNLOCK(pmap);
 4364         }
 4365         return (count);
 4366 }
 4367 
 4368 /*
 4369  * Returns TRUE if the given page is mapped individually or as part of
 4370  * a 2mpage.  Otherwise, returns FALSE.
 4371  */
 4372 boolean_t
 4373 pmap_page_is_mapped(vm_page_t m)
 4374 {
 4375         struct rwlock *lock;
 4376         boolean_t rv;
 4377 
 4378         if ((m->oflags & VPO_UNMANAGED) != 0)
 4379                 return (FALSE);
 4380         rw_rlock(&pvh_global_lock);
 4381         lock = VM_PAGE_TO_PV_LIST_LOCK(m);
 4382         rw_rlock(lock);
 4383         rv = !TAILQ_EMPTY(&m->md.pv_list) ||
 4384             ((m->flags & PG_FICTITIOUS) == 0 &&
 4385             !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list));
 4386         rw_runlock(lock);
 4387         rw_runlock(&pvh_global_lock);
 4388         return (rv);
 4389 }
 4390 
 4391 /*
 4392  * Destroy all managed, non-wired mappings in the given user-space
 4393  * pmap.  This pmap cannot be active on any processor besides the
 4394  * caller.
 4395  *                                                                                
 4396  * This function cannot be applied to the kernel pmap.  Moreover, it
 4397  * is not intended for general use.  It is only to be used during
 4398  * process termination.  Consequently, it can be implemented in ways
 4399  * that make it faster than pmap_remove().  First, it can more quickly
 4400  * destroy mappings by iterating over the pmap's collection of PV
 4401  * entries, rather than searching the page table.  Second, it doesn't
 4402  * have to test and clear the page table entries atomically, because
 4403  * no processor is currently accessing the user address space.  In
 4404  * particular, a page table entry's dirty bit won't change state once
 4405  * this function starts.
 4406  */
 4407 void
 4408 pmap_remove_pages(pmap_t pmap)
 4409 {
 4410         pd_entry_t ptepde;
 4411         pt_entry_t *pte, tpte;
 4412         vm_page_t free = NULL;
 4413         vm_page_t m, mpte, mt;
 4414         pv_entry_t pv;
 4415         struct md_page *pvh;
 4416         struct pv_chunk *pc, *npc;
 4417         struct rwlock *lock;
 4418         int64_t bit;
 4419         uint64_t inuse, bitmask;
 4420         int allfree, field, freed, idx;
 4421 
 4422         /*
 4423          * Assert that the given pmap is only active on the current
 4424          * CPU.  Unfortunately, we cannot block another CPU from
 4425          * activating the pmap while this function is executing.
 4426          */
 4427         KASSERT(pmap == PCPU_GET(curpmap), ("non-current pmap %p", pmap));
 4428 #ifdef INVARIANTS
 4429         {
 4430                 cpuset_t other_cpus;
 4431 
 4432                 other_cpus = all_cpus;
 4433                 critical_enter();
 4434                 CPU_CLR(PCPU_GET(cpuid), &other_cpus);
 4435                 CPU_AND(&other_cpus, &pmap->pm_active);
 4436                 critical_exit();
 4437                 KASSERT(CPU_EMPTY(&other_cpus), ("pmap active %p", pmap));
 4438         }
 4439 #endif
 4440 
 4441         lock = NULL;
 4442         rw_rlock(&pvh_global_lock);
 4443         PMAP_LOCK(pmap);
 4444         TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
 4445                 allfree = 1;
 4446                 freed = 0;
 4447                 for (field = 0; field < _NPCM; field++) {
 4448                         inuse = ~pc->pc_map[field] & pc_freemask[field];
 4449                         while (inuse != 0) {
 4450                                 bit = bsfq(inuse);
 4451                                 bitmask = 1UL << bit;
 4452                                 idx = field * 64 + bit;
 4453                                 pv = &pc->pc_pventry[idx];
 4454                                 inuse &= ~bitmask;
 4455 
 4456                                 pte = pmap_pdpe(pmap, pv->pv_va);
 4457                                 ptepde = *pte;
 4458                                 pte = pmap_pdpe_to_pde(pte, pv->pv_va);
 4459                                 tpte = *pte;
 4460                                 if ((tpte & (PG_PS | PG_V)) == PG_V) {
 4461                                         ptepde = tpte;
 4462                                         pte = (pt_entry_t *)PHYS_TO_DMAP(tpte &
 4463                                             PG_FRAME);
 4464                                         pte = &pte[pmap_pte_index(pv->pv_va)];
 4465                                         tpte = *pte & ~PG_PTE_PAT;
 4466                                 }
 4467                                 if ((tpte & PG_V) == 0) {
 4468                                         panic("bad pte va %lx pte %lx",
 4469                                             pv->pv_va, tpte);
 4470                                 }
 4471 
 4472 /*
 4473  * We cannot remove wired pages from a process' mapping at this time
 4474  */
 4475                                 if (tpte & PG_W) {
 4476                                         allfree = 0;
 4477                                         continue;
 4478                                 }
 4479 
 4480                                 m = PHYS_TO_VM_PAGE(tpte & PG_FRAME);
 4481                                 KASSERT(m->phys_addr == (tpte & PG_FRAME),
 4482                                     ("vm_page_t %p phys_addr mismatch %016jx %016jx",
 4483                                     m, (uintmax_t)m->phys_addr,
 4484                                     (uintmax_t)tpte));
 4485 
 4486                                 KASSERT((m->flags & PG_FICTITIOUS) != 0 ||
 4487                                     m < &vm_page_array[vm_page_array_size],
 4488                                     ("pmap_remove_pages: bad tpte %#jx",
 4489                                     (uintmax_t)tpte));
 4490 
 4491                                 pte_clear(pte);
 4492 
 4493                                 /*
 4494                                  * Update the vm_page_t clean/reference bits.
 4495                                  */
 4496                                 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
 4497                                         if ((tpte & PG_PS) != 0) {
 4498                                                 for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
 4499                                                         vm_page_dirty(mt);
 4500                                         } else
 4501                                                 vm_page_dirty(m);
 4502                                 }
 4503 
 4504                                 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(&lock, m);
 4505 
 4506                                 /* Mark free */
 4507                                 pc->pc_map[field] |= bitmask;
 4508                                 if ((tpte & PG_PS) != 0) {
 4509                                         pmap_resident_count_dec(pmap, NBPDR / PAGE_SIZE);
 4510                                         pvh = pa_to_pvh(tpte & PG_PS_FRAME);
 4511                                         TAILQ_REMOVE(&pvh->pv_list, pv, pv_list);
 4512                                         if (TAILQ_EMPTY(&pvh->pv_list)) {
 4513                                                 for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
 4514                                                         if ((mt->aflags & PGA_WRITEABLE) != 0 &&
 4515                                                             TAILQ_EMPTY(&mt->md.pv_list))
 4516                                                                 vm_page_aflag_clear(mt, PGA_WRITEABLE);
 4517                                         }
 4518                                         mpte = pmap_lookup_pt_page(pmap, pv->pv_va);
 4519                                         if (mpte != NULL) {
 4520                                                 pmap_remove_pt_page(pmap, mpte);
 4521                                                 pmap_resident_count_dec(pmap, 1);
 4522                                                 KASSERT(mpte->wire_count == NPTEPG,
 4523                                                     ("pmap_remove_pages: pte page wire count error"));
 4524                                                 mpte->wire_count = 0;
 4525                                                 pmap_add_delayed_free_list(mpte, &free, FALSE);
 4526                                                 atomic_subtract_int(&cnt.v_wire_count, 1);
 4527                                         }
 4528                                 } else {
 4529                                         pmap_resident_count_dec(pmap, 1);
 4530                                         TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
 4531                                         if ((m->aflags & PGA_WRITEABLE) != 0 &&
 4532                                             TAILQ_EMPTY(&m->md.pv_list) &&
 4533                                             (m->flags & PG_FICTITIOUS) == 0) {
 4534                                                 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 4535                                                 if (TAILQ_EMPTY(&pvh->pv_list))
 4536                                                         vm_page_aflag_clear(m, PGA_WRITEABLE);
 4537                                         }
 4538                                 }
 4539                                 pmap_unuse_pt(pmap, pv->pv_va, ptepde, &free);
 4540                                 freed++;
 4541                         }
 4542                 }
 4543                 PV_STAT(atomic_add_long(&pv_entry_frees, freed));
 4544                 PV_STAT(atomic_add_int(&pv_entry_spare, freed));
 4545                 PV_STAT(atomic_subtract_long(&pv_entry_count, freed));
 4546                 if (allfree) {
 4547                         TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
 4548                         free_pv_chunk(pc);
 4549                 }
 4550         }
 4551         if (lock != NULL)
 4552                 rw_wunlock(lock);
 4553         pmap_invalidate_all(pmap);
 4554         rw_runlock(&pvh_global_lock);
 4555         PMAP_UNLOCK(pmap);
 4556         pmap_free_zero_pages(free);
 4557 }
 4558 
 4559 /*
 4560  *      pmap_is_modified:
 4561  *
 4562  *      Return whether or not the specified physical page was modified
 4563  *      in any physical maps.
 4564  */
 4565 boolean_t
 4566 pmap_is_modified(vm_page_t m)
 4567 {
 4568         boolean_t rv;
 4569 
 4570         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 4571             ("pmap_is_modified: page %p is not managed", m));
 4572 
 4573         /*
 4574          * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be
 4575          * concurrently set while the object is locked.  Thus, if PGA_WRITEABLE
 4576          * is clear, no PTEs can have PG_M set.
 4577          */
 4578         VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
 4579         if ((m->oflags & VPO_BUSY) == 0 &&
 4580             (m->aflags & PGA_WRITEABLE) == 0)
 4581                 return (FALSE);
 4582         rw_wlock(&pvh_global_lock);
 4583         rv = pmap_is_modified_pvh(&m->md) ||
 4584             ((m->flags & PG_FICTITIOUS) == 0 &&
 4585             pmap_is_modified_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m))));
 4586         rw_wunlock(&pvh_global_lock);
 4587         return (rv);
 4588 }
 4589 
 4590 /*
 4591  * Returns TRUE if any of the given mappings were used to modify
 4592  * physical memory.  Otherwise, returns FALSE.  Both page and 2mpage
 4593  * mappings are supported.
 4594  */
 4595 static boolean_t
 4596 pmap_is_modified_pvh(struct md_page *pvh)
 4597 {
 4598         pv_entry_t pv;
 4599         pt_entry_t *pte;
 4600         pmap_t pmap;
 4601         boolean_t rv;
 4602 
 4603         rw_assert(&pvh_global_lock, RA_WLOCKED);
 4604         rv = FALSE;
 4605         TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
 4606                 pmap = PV_PMAP(pv);
 4607                 PMAP_LOCK(pmap);
 4608                 pte = pmap_pte(pmap, pv->pv_va);
 4609                 rv = (*pte & (PG_M | PG_RW)) == (PG_M | PG_RW);
 4610                 PMAP_UNLOCK(pmap);
 4611                 if (rv)
 4612                         break;
 4613         }
 4614         return (rv);
 4615 }
 4616 
 4617 /*
 4618  *      pmap_is_prefaultable:
 4619  *
 4620  *      Return whether or not the specified virtual address is elgible
 4621  *      for prefault.
 4622  */
 4623 boolean_t
 4624 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
 4625 {
 4626         pd_entry_t *pde;
 4627         pt_entry_t *pte;
 4628         boolean_t rv;
 4629 
 4630         rv = FALSE;
 4631         PMAP_LOCK(pmap);
 4632         pde = pmap_pde(pmap, addr);
 4633         if (pde != NULL && (*pde & (PG_PS | PG_V)) == PG_V) {
 4634                 pte = pmap_pde_to_pte(pde, addr);
 4635                 rv = (*pte & PG_V) == 0;
 4636         }
 4637         PMAP_UNLOCK(pmap);
 4638         return (rv);
 4639 }
 4640 
 4641 /*
 4642  *      pmap_is_referenced:
 4643  *
 4644  *      Return whether or not the specified physical page was referenced
 4645  *      in any physical maps.
 4646  */
 4647 boolean_t
 4648 pmap_is_referenced(vm_page_t m)
 4649 {
 4650         boolean_t rv;
 4651 
 4652         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 4653             ("pmap_is_referenced: page %p is not managed", m));
 4654         rw_wlock(&pvh_global_lock);
 4655         rv = pmap_is_referenced_pvh(&m->md) ||
 4656             ((m->flags & PG_FICTITIOUS) == 0 &&
 4657             pmap_is_referenced_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m))));
 4658         rw_wunlock(&pvh_global_lock);
 4659         return (rv);
 4660 }
 4661 
 4662 /*
 4663  * Returns TRUE if any of the given mappings were referenced and FALSE
 4664  * otherwise.  Both page and 2mpage mappings are supported.
 4665  */
 4666 static boolean_t
 4667 pmap_is_referenced_pvh(struct md_page *pvh)
 4668 {
 4669         pv_entry_t pv;
 4670         pt_entry_t *pte;
 4671         pmap_t pmap;
 4672         boolean_t rv;
 4673 
 4674         rw_assert(&pvh_global_lock, RA_WLOCKED);
 4675         rv = FALSE;
 4676         TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
 4677                 pmap = PV_PMAP(pv);
 4678                 PMAP_LOCK(pmap);
 4679                 pte = pmap_pte(pmap, pv->pv_va);
 4680                 rv = (*pte & (PG_A | PG_V)) == (PG_A | PG_V);
 4681                 PMAP_UNLOCK(pmap);
 4682                 if (rv)
 4683                         break;
 4684         }
 4685         return (rv);
 4686 }
 4687 
 4688 /*
 4689  * Clear the write and modified bits in each of the given page's mappings.
 4690  */
 4691 void
 4692 pmap_remove_write(vm_page_t m)
 4693 {
 4694         struct md_page *pvh;
 4695         pmap_t pmap;
 4696         pv_entry_t next_pv, pv;
 4697         pd_entry_t *pde;
 4698         pt_entry_t oldpte, *pte;
 4699         vm_offset_t va;
 4700 
 4701         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 4702             ("pmap_remove_write: page %p is not managed", m));
 4703 
 4704         /*
 4705          * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by
 4706          * another thread while the object is locked.  Thus, if PGA_WRITEABLE
 4707          * is clear, no page table entries need updating.
 4708          */
 4709         VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
 4710         if ((m->oflags & VPO_BUSY) == 0 &&
 4711             (m->aflags & PGA_WRITEABLE) == 0)
 4712                 return;
 4713         rw_wlock(&pvh_global_lock);
 4714         if ((m->flags & PG_FICTITIOUS) != 0)
 4715                 goto small_mappings;
 4716         pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 4717         TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
 4718                 pmap = PV_PMAP(pv);
 4719                 PMAP_LOCK(pmap);
 4720                 va = pv->pv_va;
 4721                 pde = pmap_pde(pmap, va);
 4722                 if ((*pde & PG_RW) != 0)
 4723                         (void)pmap_demote_pde(pmap, pde, va);
 4724                 PMAP_UNLOCK(pmap);
 4725         }
 4726 small_mappings:
 4727         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
 4728                 pmap = PV_PMAP(pv);
 4729                 PMAP_LOCK(pmap);
 4730                 pde = pmap_pde(pmap, pv->pv_va);
 4731                 KASSERT((*pde & PG_PS) == 0,
 4732                     ("pmap_remove_write: found a 2mpage in page %p's pv list",
 4733                     m));
 4734                 pte = pmap_pde_to_pte(pde, pv->pv_va);
 4735 retry:
 4736                 oldpte = *pte;
 4737                 if (oldpte & PG_RW) {
 4738                         if (!atomic_cmpset_long(pte, oldpte, oldpte &
 4739                             ~(PG_RW | PG_M)))
 4740                                 goto retry;
 4741                         if ((oldpte & PG_M) != 0)
 4742                                 vm_page_dirty(m);
 4743                         pmap_invalidate_page(pmap, pv->pv_va);
 4744                 }
 4745                 PMAP_UNLOCK(pmap);
 4746         }
 4747         vm_page_aflag_clear(m, PGA_WRITEABLE);
 4748         rw_wunlock(&pvh_global_lock);
 4749 }
 4750 
 4751 /*
 4752  *      pmap_ts_referenced:
 4753  *
 4754  *      Return a count of reference bits for a page, clearing those bits.
 4755  *      It is not necessary for every reference bit to be cleared, but it
 4756  *      is necessary that 0 only be returned when there are truly no
 4757  *      reference bits set.
 4758  *
 4759  *      XXX: The exact number of bits to check and clear is a matter that
 4760  *      should be tested and standardized at some point in the future for
 4761  *      optimal aging of shared pages.
 4762  */
 4763 int
 4764 pmap_ts_referenced(vm_page_t m)
 4765 {
 4766         struct md_page *pvh;
 4767         pv_entry_t pv, pvf, pvn;
 4768         pmap_t pmap;
 4769         pd_entry_t oldpde, *pde;
 4770         pt_entry_t *pte;
 4771         vm_offset_t va;
 4772         int rtval = 0;
 4773 
 4774         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 4775             ("pmap_ts_referenced: page %p is not managed", m));
 4776         rw_wlock(&pvh_global_lock);
 4777         if ((m->flags & PG_FICTITIOUS) != 0)
 4778                 goto small_mappings;
 4779         pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 4780         TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, pvn) {
 4781                 pmap = PV_PMAP(pv);
 4782                 PMAP_LOCK(pmap);
 4783                 va = pv->pv_va;
 4784                 pde = pmap_pde(pmap, va);
 4785                 oldpde = *pde;
 4786                 if ((oldpde & PG_A) != 0) {
 4787                         if (pmap_demote_pde(pmap, pde, va)) {
 4788                                 if ((oldpde & PG_W) == 0) {
 4789                                         /*
 4790                                          * Remove the mapping to a single page
 4791                                          * so that a subsequent access may
 4792                                          * repromote.  Since the underlying
 4793                                          * page table page is fully populated,
 4794                                          * this removal never frees a page
 4795                                          * table page.
 4796                                          */
 4797                                         va += VM_PAGE_TO_PHYS(m) - (oldpde &
 4798                                             PG_PS_FRAME);
 4799                                         pmap_remove_page(pmap, va, pde, NULL);
 4800                                         rtval++;
 4801                                         if (rtval > 4) {
 4802                                                 PMAP_UNLOCK(pmap);
 4803                                                 goto out;
 4804                                         }
 4805                                 }
 4806                         }
 4807                 }
 4808                 PMAP_UNLOCK(pmap);
 4809         }
 4810 small_mappings:
 4811         if ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
 4812                 pvf = pv;
 4813                 do {
 4814                         pvn = TAILQ_NEXT(pv, pv_list);
 4815                         TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
 4816                         TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
 4817                         pmap = PV_PMAP(pv);
 4818                         PMAP_LOCK(pmap);
 4819                         pde = pmap_pde(pmap, pv->pv_va);
 4820                         KASSERT((*pde & PG_PS) == 0, ("pmap_ts_referenced:"
 4821                             " found a 2mpage in page %p's pv list", m));
 4822                         pte = pmap_pde_to_pte(pde, pv->pv_va);
 4823                         if ((*pte & PG_A) != 0) {
 4824                                 atomic_clear_long(pte, PG_A);
 4825                                 pmap_invalidate_page(pmap, pv->pv_va);
 4826                                 rtval++;
 4827                                 if (rtval > 4)
 4828                                         pvn = NULL;
 4829                         }
 4830                         PMAP_UNLOCK(pmap);
 4831                 } while ((pv = pvn) != NULL && pv != pvf);
 4832         }
 4833 out:
 4834         rw_wunlock(&pvh_global_lock);
 4835         return (rtval);
 4836 }
 4837 
 4838 /*
 4839  *      Clear the modify bits on the specified physical page.
 4840  */
 4841 void
 4842 pmap_clear_modify(vm_page_t m)
 4843 {
 4844         struct md_page *pvh;
 4845         pmap_t pmap;
 4846         pv_entry_t next_pv, pv;
 4847         pd_entry_t oldpde, *pde;
 4848         pt_entry_t oldpte, *pte;
 4849         vm_offset_t va;
 4850 
 4851         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 4852             ("pmap_clear_modify: page %p is not managed", m));
 4853         VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
 4854         KASSERT((m->oflags & VPO_BUSY) == 0,
 4855             ("pmap_clear_modify: page %p is busy", m));
 4856 
 4857         /*
 4858          * If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set.
 4859          * If the object containing the page is locked and the page is not
 4860          * VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set.
 4861          */
 4862         if ((m->aflags & PGA_WRITEABLE) == 0)
 4863                 return;
 4864         rw_wlock(&pvh_global_lock);
 4865         if ((m->flags & PG_FICTITIOUS) != 0)
 4866                 goto small_mappings;
 4867         pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 4868         TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
 4869                 pmap = PV_PMAP(pv);
 4870                 PMAP_LOCK(pmap);
 4871                 va = pv->pv_va;
 4872                 pde = pmap_pde(pmap, va);
 4873                 oldpde = *pde;
 4874                 if ((oldpde & PG_RW) != 0) {
 4875                         if (pmap_demote_pde(pmap, pde, va)) {
 4876                                 if ((oldpde & PG_W) == 0) {
 4877                                         /*
 4878                                          * Write protect the mapping to a
 4879                                          * single page so that a subsequent
 4880                                          * write access may repromote.
 4881                                          */
 4882                                         va += VM_PAGE_TO_PHYS(m) - (oldpde &
 4883                                             PG_PS_FRAME);
 4884                                         pte = pmap_pde_to_pte(pde, va);
 4885                                         oldpte = *pte;
 4886                                         if ((oldpte & PG_V) != 0) {
 4887                                                 while (!atomic_cmpset_long(pte,
 4888                                                     oldpte,
 4889                                                     oldpte & ~(PG_M | PG_RW)))
 4890                                                         oldpte = *pte;
 4891                                                 vm_page_dirty(m);
 4892                                                 pmap_invalidate_page(pmap, va);
 4893                                         }
 4894                                 }
 4895                         }
 4896                 }
 4897                 PMAP_UNLOCK(pmap);
 4898         }
 4899 small_mappings:
 4900         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
 4901                 pmap = PV_PMAP(pv);
 4902                 PMAP_LOCK(pmap);
 4903                 pde = pmap_pde(pmap, pv->pv_va);
 4904                 KASSERT((*pde & PG_PS) == 0, ("pmap_clear_modify: found"
 4905                     " a 2mpage in page %p's pv list", m));
 4906                 pte = pmap_pde_to_pte(pde, pv->pv_va);
 4907                 if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
 4908                         atomic_clear_long(pte, PG_M);
 4909                         pmap_invalidate_page(pmap, pv->pv_va);
 4910                 }
 4911                 PMAP_UNLOCK(pmap);
 4912         }
 4913         rw_wunlock(&pvh_global_lock);
 4914 }
 4915 
 4916 /*
 4917  *      pmap_clear_reference:
 4918  *
 4919  *      Clear the reference bit on the specified physical page.
 4920  */
 4921 void
 4922 pmap_clear_reference(vm_page_t m)
 4923 {
 4924         struct md_page *pvh;
 4925         pmap_t pmap;
 4926         pv_entry_t next_pv, pv;
 4927         pd_entry_t oldpde, *pde;
 4928         pt_entry_t *pte;
 4929         vm_offset_t va;
 4930 
 4931         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 4932             ("pmap_clear_reference: page %p is not managed", m));
 4933         rw_wlock(&pvh_global_lock);
 4934         if ((m->flags & PG_FICTITIOUS) != 0)
 4935                 goto small_mappings;
 4936         pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 4937         TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
 4938                 pmap = PV_PMAP(pv);
 4939                 PMAP_LOCK(pmap);
 4940                 va = pv->pv_va;
 4941                 pde = pmap_pde(pmap, va);
 4942                 oldpde = *pde;
 4943                 if ((oldpde & PG_A) != 0) {
 4944                         if (pmap_demote_pde(pmap, pde, va)) {
 4945                                 /*
 4946                                  * Remove the mapping to a single page so
 4947                                  * that a subsequent access may repromote.
 4948                                  * Since the underlying page table page is
 4949                                  * fully populated, this removal never frees
 4950                                  * a page table page.
 4951                                  */
 4952                                 va += VM_PAGE_TO_PHYS(m) - (oldpde &
 4953                                     PG_PS_FRAME);
 4954                                 pmap_remove_page(pmap, va, pde, NULL);
 4955                         }
 4956                 }
 4957                 PMAP_UNLOCK(pmap);
 4958         }
 4959 small_mappings:
 4960         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
 4961                 pmap = PV_PMAP(pv);
 4962                 PMAP_LOCK(pmap);
 4963                 pde = pmap_pde(pmap, pv->pv_va);
 4964                 KASSERT((*pde & PG_PS) == 0, ("pmap_clear_reference: found"
 4965                     " a 2mpage in page %p's pv list", m));
 4966                 pte = pmap_pde_to_pte(pde, pv->pv_va);
 4967                 if (*pte & PG_A) {
 4968                         atomic_clear_long(pte, PG_A);
 4969                         pmap_invalidate_page(pmap, pv->pv_va);
 4970                 }
 4971                 PMAP_UNLOCK(pmap);
 4972         }
 4973         rw_wunlock(&pvh_global_lock);
 4974 }
 4975 
 4976 /*
 4977  * Miscellaneous support routines follow
 4978  */
 4979 
 4980 /* Adjust the cache mode for a 4KB page mapped via a PTE. */
 4981 static __inline void
 4982 pmap_pte_attr(pt_entry_t *pte, int cache_bits)
 4983 {
 4984         u_int opte, npte;
 4985 
 4986         /*
 4987          * The cache mode bits are all in the low 32-bits of the
 4988          * PTE, so we can just spin on updating the low 32-bits.
 4989          */
 4990         do {
 4991                 opte = *(u_int *)pte;
 4992                 npte = opte & ~PG_PTE_CACHE;
 4993                 npte |= cache_bits;
 4994         } while (npte != opte && !atomic_cmpset_int((u_int *)pte, opte, npte));
 4995 }
 4996 
 4997 /* Adjust the cache mode for a 2MB page mapped via a PDE. */
 4998 static __inline void
 4999 pmap_pde_attr(pd_entry_t *pde, int cache_bits)
 5000 {
 5001         u_int opde, npde;
 5002 
 5003         /*
 5004          * The cache mode bits are all in the low 32-bits of the
 5005          * PDE, so we can just spin on updating the low 32-bits.
 5006          */
 5007         do {
 5008                 opde = *(u_int *)pde;
 5009                 npde = opde & ~PG_PDE_CACHE;
 5010                 npde |= cache_bits;
 5011         } while (npde != opde && !atomic_cmpset_int((u_int *)pde, opde, npde));
 5012 }
 5013 
 5014 /*
 5015  * Map a set of physical memory pages into the kernel virtual
 5016  * address space. Return a pointer to where it is mapped. This
 5017  * routine is intended to be used for mapping device memory,
 5018  * NOT real memory.
 5019  */
 5020 void *
 5021 pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
 5022 {
 5023         vm_offset_t va, offset;
 5024         vm_size_t tmpsize;
 5025 
 5026         /*
 5027          * If the specified range of physical addresses fits within the direct
 5028          * map window, use the direct map. 
 5029          */
 5030         if (pa < dmaplimit && pa + size < dmaplimit) {
 5031                 va = PHYS_TO_DMAP(pa);
 5032                 if (!pmap_change_attr(va, size, mode))
 5033                         return ((void *)va);
 5034         }
 5035         offset = pa & PAGE_MASK;
 5036         size = roundup(offset + size, PAGE_SIZE);
 5037         va = kmem_alloc_nofault(kernel_map, size);
 5038         if (!va)
 5039                 panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
 5040         pa = trunc_page(pa);
 5041         for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE)
 5042                 pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode);
 5043         PMAP_LOCK(kernel_pmap);
 5044         pmap_resident_count_inc(kernel_pmap, OFF_TO_IDX(size));
 5045         PMAP_UNLOCK(kernel_pmap);
 5046         pmap_invalidate_range(kernel_pmap, va, va + tmpsize);
 5047         pmap_invalidate_cache_range(va, va + tmpsize);
 5048         return ((void *)(va + offset));
 5049 }
 5050 
 5051 void *
 5052 pmap_mapdev(vm_paddr_t pa, vm_size_t size)
 5053 {
 5054 
 5055         return (pmap_mapdev_attr(pa, size, PAT_UNCACHEABLE));
 5056 }
 5057 
 5058 void *
 5059 pmap_mapbios(vm_paddr_t pa, vm_size_t size)
 5060 {
 5061 
 5062         return (pmap_mapdev_attr(pa, size, PAT_WRITE_BACK));
 5063 }
 5064 
 5065 void
 5066 pmap_unmapdev(vm_offset_t va, vm_size_t size)
 5067 {
 5068         vm_offset_t base, offset;
 5069 
 5070         /* If we gave a direct map region in pmap_mapdev, do nothing */
 5071         if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS)
 5072                 return;
 5073         base = trunc_page(va);
 5074         offset = va & PAGE_MASK;
 5075         size = roundup(offset + size, PAGE_SIZE);
 5076         kmem_free(kernel_map, base, size);
 5077 }
 5078 
 5079 /*
 5080  * Tries to demote a 1GB page mapping.
 5081  */
 5082 static boolean_t
 5083 pmap_demote_pdpe(pmap_t pmap, pdp_entry_t *pdpe, vm_offset_t va)
 5084 {
 5085         pdp_entry_t newpdpe, oldpdpe;
 5086         pd_entry_t *firstpde, newpde, *pde;
 5087         vm_paddr_t mpdepa;
 5088         vm_page_t mpde;
 5089 
 5090         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 5091         oldpdpe = *pdpe;
 5092         KASSERT((oldpdpe & (PG_PS | PG_V)) == (PG_PS | PG_V),
 5093             ("pmap_demote_pdpe: oldpdpe is missing PG_PS and/or PG_V"));
 5094         if ((mpde = vm_page_alloc(NULL, va >> PDPSHIFT, VM_ALLOC_INTERRUPT |
 5095             VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
 5096                 CTR2(KTR_PMAP, "pmap_demote_pdpe: failure for va %#lx"
 5097                     " in pmap %p", va, pmap);
 5098                 return (FALSE);
 5099         }
 5100         mpdepa = VM_PAGE_TO_PHYS(mpde);
 5101         firstpde = (pd_entry_t *)PHYS_TO_DMAP(mpdepa);
 5102         newpdpe = mpdepa | PG_M | PG_A | (oldpdpe & PG_U) | PG_RW | PG_V;
 5103         KASSERT((oldpdpe & PG_A) != 0,
 5104             ("pmap_demote_pdpe: oldpdpe is missing PG_A"));
 5105         KASSERT((oldpdpe & (PG_M | PG_RW)) != PG_RW,
 5106             ("pmap_demote_pdpe: oldpdpe is missing PG_M"));
 5107         newpde = oldpdpe;
 5108 
 5109         /*
 5110          * Initialize the page directory page.
 5111          */
 5112         for (pde = firstpde; pde < firstpde + NPDEPG; pde++) {
 5113                 *pde = newpde;
 5114                 newpde += NBPDR;
 5115         }
 5116 
 5117         /*
 5118          * Demote the mapping.
 5119          */
 5120         *pdpe = newpdpe;
 5121 
 5122         /*
 5123          * Invalidate a stale recursive mapping of the page directory page.
 5124          */
 5125         pmap_invalidate_page(pmap, (vm_offset_t)vtopde(va));
 5126 
 5127         pmap_pdpe_demotions++;
 5128         CTR2(KTR_PMAP, "pmap_demote_pdpe: success for va %#lx"
 5129             " in pmap %p", va, pmap);
 5130         return (TRUE);
 5131 }
 5132 
 5133 /*
 5134  * Sets the memory attribute for the specified page.
 5135  */
 5136 void
 5137 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
 5138 {
 5139 
 5140         m->md.pat_mode = ma;
 5141 
 5142         /*
 5143          * If "m" is a normal page, update its direct mapping.  This update
 5144          * can be relied upon to perform any cache operations that are
 5145          * required for data coherence.
 5146          */
 5147         if ((m->flags & PG_FICTITIOUS) == 0 &&
 5148             pmap_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), PAGE_SIZE,
 5149             m->md.pat_mode))
 5150                 panic("memory attribute change on the direct map failed");
 5151 }
 5152 
 5153 /*
 5154  * Changes the specified virtual address range's memory type to that given by
 5155  * the parameter "mode".  The specified virtual address range must be
 5156  * completely contained within either the direct map or the kernel map.  If
 5157  * the virtual address range is contained within the kernel map, then the
 5158  * memory type for each of the corresponding ranges of the direct map is also
 5159  * changed.  (The corresponding ranges of the direct map are those ranges that
 5160  * map the same physical pages as the specified virtual address range.)  These
 5161  * changes to the direct map are necessary because Intel describes the
 5162  * behavior of their processors as "undefined" if two or more mappings to the
 5163  * same physical page have different memory types.
 5164  *
 5165  * Returns zero if the change completed successfully, and either EINVAL or
 5166  * ENOMEM if the change failed.  Specifically, EINVAL is returned if some part
 5167  * of the virtual address range was not mapped, and ENOMEM is returned if
 5168  * there was insufficient memory available to complete the change.  In the
 5169  * latter case, the memory type may have been changed on some part of the
 5170  * virtual address range or the direct map.
 5171  */
 5172 int
 5173 pmap_change_attr(vm_offset_t va, vm_size_t size, int mode)
 5174 {
 5175         int error;
 5176 
 5177         PMAP_LOCK(kernel_pmap);
 5178         error = pmap_change_attr_locked(va, size, mode);
 5179         PMAP_UNLOCK(kernel_pmap);
 5180         return (error);
 5181 }
 5182 
 5183 static int
 5184 pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode)
 5185 {
 5186         vm_offset_t base, offset, tmpva;
 5187         vm_paddr_t pa_start, pa_end;
 5188         pdp_entry_t *pdpe;
 5189         pd_entry_t *pde;
 5190         pt_entry_t *pte;
 5191         int cache_bits_pte, cache_bits_pde, error;
 5192         boolean_t changed;
 5193 
 5194         PMAP_LOCK_ASSERT(kernel_pmap, MA_OWNED);
 5195         base = trunc_page(va);
 5196         offset = va & PAGE_MASK;
 5197         size = roundup(offset + size, PAGE_SIZE);
 5198 
 5199         /*
 5200          * Only supported on kernel virtual addresses, including the direct
 5201          * map but excluding the recursive map.
 5202          */
 5203         if (base < DMAP_MIN_ADDRESS)
 5204                 return (EINVAL);
 5205 
 5206         cache_bits_pde = pmap_cache_bits(mode, 1);
 5207         cache_bits_pte = pmap_cache_bits(mode, 0);
 5208         changed = FALSE;
 5209 
 5210         /*
 5211          * Pages that aren't mapped aren't supported.  Also break down 2MB pages
 5212          * into 4KB pages if required.
 5213          */
 5214         for (tmpva = base; tmpva < base + size; ) {
 5215                 pdpe = pmap_pdpe(kernel_pmap, tmpva);
 5216                 if (*pdpe == 0)
 5217                         return (EINVAL);
 5218                 if (*pdpe & PG_PS) {
 5219                         /*
 5220                          * If the current 1GB page already has the required
 5221                          * memory type, then we need not demote this page. Just
 5222                          * increment tmpva to the next 1GB page frame.
 5223                          */
 5224                         if ((*pdpe & PG_PDE_CACHE) == cache_bits_pde) {
 5225                                 tmpva = trunc_1gpage(tmpva) + NBPDP;
 5226                                 continue;
 5227                         }
 5228 
 5229                         /*
 5230                          * If the current offset aligns with a 1GB page frame
 5231                          * and there is at least 1GB left within the range, then
 5232                          * we need not break down this page into 2MB pages.
 5233                          */
 5234                         if ((tmpva & PDPMASK) == 0 &&
 5235                             tmpva + PDPMASK < base + size) {
 5236                                 tmpva += NBPDP;
 5237                                 continue;
 5238                         }
 5239                         if (!pmap_demote_pdpe(kernel_pmap, pdpe, tmpva))
 5240                                 return (ENOMEM);
 5241                 }
 5242                 pde = pmap_pdpe_to_pde(pdpe, tmpva);
 5243                 if (*pde == 0)
 5244                         return (EINVAL);
 5245                 if (*pde & PG_PS) {
 5246                         /*
 5247                          * If the current 2MB page already has the required
 5248                          * memory type, then we need not demote this page. Just
 5249                          * increment tmpva to the next 2MB page frame.
 5250                          */
 5251                         if ((*pde & PG_PDE_CACHE) == cache_bits_pde) {
 5252                                 tmpva = trunc_2mpage(tmpva) + NBPDR;
 5253                                 continue;
 5254                         }
 5255 
 5256                         /*
 5257                          * If the current offset aligns with a 2MB page frame
 5258                          * and there is at least 2MB left within the range, then
 5259                          * we need not break down this page into 4KB pages.
 5260                          */
 5261                         if ((tmpva & PDRMASK) == 0 &&
 5262                             tmpva + PDRMASK < base + size) {
 5263                                 tmpva += NBPDR;
 5264                                 continue;
 5265                         }
 5266                         if (!pmap_demote_pde(kernel_pmap, pde, tmpva))
 5267                                 return (ENOMEM);
 5268                 }
 5269                 pte = pmap_pde_to_pte(pde, tmpva);
 5270                 if (*pte == 0)
 5271                         return (EINVAL);
 5272                 tmpva += PAGE_SIZE;
 5273         }
 5274         error = 0;
 5275 
 5276         /*
 5277          * Ok, all the pages exist, so run through them updating their
 5278          * cache mode if required.
 5279          */
 5280         pa_start = pa_end = 0;
 5281         for (tmpva = base; tmpva < base + size; ) {
 5282                 pdpe = pmap_pdpe(kernel_pmap, tmpva);
 5283                 if (*pdpe & PG_PS) {
 5284                         if ((*pdpe & PG_PDE_CACHE) != cache_bits_pde) {
 5285                                 pmap_pde_attr(pdpe, cache_bits_pde);
 5286                                 changed = TRUE;
 5287                         }
 5288                         if (tmpva >= VM_MIN_KERNEL_ADDRESS) {
 5289                                 if (pa_start == pa_end) {
 5290                                         /* Start physical address run. */
 5291                                         pa_start = *pdpe & PG_PS_FRAME;
 5292                                         pa_end = pa_start + NBPDP;
 5293                                 } else if (pa_end == (*pdpe & PG_PS_FRAME))
 5294                                         pa_end += NBPDP;
 5295                                 else {
 5296                                         /* Run ended, update direct map. */
 5297                                         error = pmap_change_attr_locked(
 5298                                             PHYS_TO_DMAP(pa_start),
 5299                                             pa_end - pa_start, mode);
 5300                                         if (error != 0)
 5301                                                 break;
 5302                                         /* Start physical address run. */
 5303                                         pa_start = *pdpe & PG_PS_FRAME;
 5304                                         pa_end = pa_start + NBPDP;
 5305                                 }
 5306                         }
 5307                         tmpva = trunc_1gpage(tmpva) + NBPDP;
 5308                         continue;
 5309                 }
 5310                 pde = pmap_pdpe_to_pde(pdpe, tmpva);
 5311                 if (*pde & PG_PS) {
 5312                         if ((*pde & PG_PDE_CACHE) != cache_bits_pde) {
 5313                                 pmap_pde_attr(pde, cache_bits_pde);
 5314                                 changed = TRUE;
 5315                         }
 5316                         if (tmpva >= VM_MIN_KERNEL_ADDRESS) {
 5317                                 if (pa_start == pa_end) {
 5318                                         /* Start physical address run. */
 5319                                         pa_start = *pde & PG_PS_FRAME;
 5320                                         pa_end = pa_start + NBPDR;
 5321                                 } else if (pa_end == (*pde & PG_PS_FRAME))
 5322                                         pa_end += NBPDR;
 5323                                 else {
 5324                                         /* Run ended, update direct map. */
 5325                                         error = pmap_change_attr_locked(
 5326                                             PHYS_TO_DMAP(pa_start),
 5327                                             pa_end - pa_start, mode);
 5328                                         if (error != 0)
 5329                                                 break;
 5330                                         /* Start physical address run. */
 5331                                         pa_start = *pde & PG_PS_FRAME;
 5332                                         pa_end = pa_start + NBPDR;
 5333                                 }
 5334                         }
 5335                         tmpva = trunc_2mpage(tmpva) + NBPDR;
 5336                 } else {
 5337                         pte = pmap_pde_to_pte(pde, tmpva);
 5338                         if ((*pte & PG_PTE_CACHE) != cache_bits_pte) {
 5339                                 pmap_pte_attr(pte, cache_bits_pte);
 5340                                 changed = TRUE;
 5341                         }
 5342                         if (tmpva >= VM_MIN_KERNEL_ADDRESS) {
 5343                                 if (pa_start == pa_end) {
 5344                                         /* Start physical address run. */
 5345                                         pa_start = *pte & PG_FRAME;
 5346                                         pa_end = pa_start + PAGE_SIZE;
 5347                                 } else if (pa_end == (*pte & PG_FRAME))
 5348                                         pa_end += PAGE_SIZE;
 5349                                 else {
 5350                                         /* Run ended, update direct map. */
 5351                                         error = pmap_change_attr_locked(
 5352                                             PHYS_TO_DMAP(pa_start),
 5353                                             pa_end - pa_start, mode);
 5354                                         if (error != 0)
 5355                                                 break;
 5356                                         /* Start physical address run. */
 5357                                         pa_start = *pte & PG_FRAME;
 5358                                         pa_end = pa_start + PAGE_SIZE;
 5359                                 }
 5360                         }
 5361                         tmpva += PAGE_SIZE;
 5362                 }
 5363         }
 5364         if (error == 0 && pa_start != pa_end)
 5365                 error = pmap_change_attr_locked(PHYS_TO_DMAP(pa_start),
 5366                     pa_end - pa_start, mode);
 5367 
 5368         /*
 5369          * Flush CPU caches if required to make sure any data isn't cached that
 5370          * shouldn't be, etc.
 5371          */
 5372         if (changed) {
 5373                 pmap_invalidate_range(kernel_pmap, base, tmpva);
 5374                 pmap_invalidate_cache_range(base, tmpva);
 5375         }
 5376         return (error);
 5377 }
 5378 
 5379 /*
 5380  * Demotes any mapping within the direct map region that covers more than the
 5381  * specified range of physical addresses.  This range's size must be a power
 5382  * of two and its starting address must be a multiple of its size.  Since the
 5383  * demotion does not change any attributes of the mapping, a TLB invalidation
 5384  * is not mandatory.  The caller may, however, request a TLB invalidation.
 5385  */
 5386 void
 5387 pmap_demote_DMAP(vm_paddr_t base, vm_size_t len, boolean_t invalidate)
 5388 {
 5389         pdp_entry_t *pdpe;
 5390         pd_entry_t *pde;
 5391         vm_offset_t va;
 5392         boolean_t changed;
 5393 
 5394         if (len == 0)
 5395                 return;
 5396         KASSERT(powerof2(len), ("pmap_demote_DMAP: len is not a power of 2"));
 5397         KASSERT((base & (len - 1)) == 0,
 5398             ("pmap_demote_DMAP: base is not a multiple of len"));
 5399         if (len < NBPDP && base < dmaplimit) {
 5400                 va = PHYS_TO_DMAP(base);
 5401                 changed = FALSE;
 5402                 PMAP_LOCK(kernel_pmap);
 5403                 pdpe = pmap_pdpe(kernel_pmap, va);
 5404                 if ((*pdpe & PG_V) == 0)
 5405                         panic("pmap_demote_DMAP: invalid PDPE");
 5406                 if ((*pdpe & PG_PS) != 0) {
 5407                         if (!pmap_demote_pdpe(kernel_pmap, pdpe, va))
 5408                                 panic("pmap_demote_DMAP: PDPE failed");
 5409                         changed = TRUE;
 5410                 }
 5411                 if (len < NBPDR) {
 5412                         pde = pmap_pdpe_to_pde(pdpe, va);
 5413                         if ((*pde & PG_V) == 0)
 5414                                 panic("pmap_demote_DMAP: invalid PDE");
 5415                         if ((*pde & PG_PS) != 0) {
 5416                                 if (!pmap_demote_pde(kernel_pmap, pde, va))
 5417                                         panic("pmap_demote_DMAP: PDE failed");
 5418                                 changed = TRUE;
 5419                         }
 5420                 }
 5421                 if (changed && invalidate)
 5422                         pmap_invalidate_page(kernel_pmap, va);
 5423                 PMAP_UNLOCK(kernel_pmap);
 5424         }
 5425 }
 5426 
 5427 /*
 5428  * perform the pmap work for mincore
 5429  */
 5430 int
 5431 pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
 5432 {
 5433         pd_entry_t *pdep;
 5434         pt_entry_t pte;
 5435         vm_paddr_t pa;
 5436         int val;
 5437 
 5438         PMAP_LOCK(pmap);
 5439 retry:
 5440         pdep = pmap_pde(pmap, addr);
 5441         if (pdep != NULL && (*pdep & PG_V)) {
 5442                 if (*pdep & PG_PS) {
 5443                         pte = *pdep;
 5444                         /* Compute the physical address of the 4KB page. */
 5445                         pa = ((*pdep & PG_PS_FRAME) | (addr & PDRMASK)) &
 5446                             PG_FRAME;
 5447                         val = MINCORE_SUPER;
 5448                 } else {
 5449                         pte = *pmap_pde_to_pte(pdep, addr);
 5450                         pa = pte & PG_FRAME;
 5451                         val = 0;
 5452                 }
 5453         } else {
 5454                 pte = 0;
 5455                 pa = 0;
 5456                 val = 0;
 5457         }
 5458         if ((pte & PG_V) != 0) {
 5459                 val |= MINCORE_INCORE;
 5460                 if ((pte & (PG_M | PG_RW)) == (PG_M | PG_RW))
 5461                         val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
 5462                 if ((pte & PG_A) != 0)
 5463                         val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
 5464         }
 5465         if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
 5466             (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) &&
 5467             (pte & (PG_MANAGED | PG_V)) == (PG_MANAGED | PG_V)) {
 5468                 /* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */
 5469                 if (vm_page_pa_tryrelock(pmap, pa, locked_pa))
 5470                         goto retry;
 5471         } else
 5472                 PA_UNLOCK_COND(*locked_pa);
 5473         PMAP_UNLOCK(pmap);
 5474         return (val);
 5475 }
 5476 
 5477 void
 5478 pmap_activate(struct thread *td)
 5479 {
 5480         pmap_t  pmap, oldpmap;
 5481         u_int   cpuid;
 5482         u_int64_t  cr3;
 5483 
 5484         critical_enter();
 5485         pmap = vmspace_pmap(td->td_proc->p_vmspace);
 5486         oldpmap = PCPU_GET(curpmap);
 5487         cpuid = PCPU_GET(cpuid);
 5488 #ifdef SMP
 5489         CPU_CLR_ATOMIC(cpuid, &oldpmap->pm_active);
 5490         CPU_SET_ATOMIC(cpuid, &pmap->pm_active);
 5491 #else
 5492         CPU_CLR(cpuid, &oldpmap->pm_active);
 5493         CPU_SET(cpuid, &pmap->pm_active);
 5494 #endif
 5495         cr3 = DMAP_TO_PHYS((vm_offset_t)pmap->pm_pml4);
 5496         td->td_pcb->pcb_cr3 = cr3;
 5497         load_cr3(cr3);
 5498         PCPU_SET(curpmap, pmap);
 5499         critical_exit();
 5500 }
 5501 
 5502 void
 5503 pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
 5504 {
 5505 }
 5506 
 5507 /*
 5508  *      Increase the starting virtual address of the given mapping if a
 5509  *      different alignment might result in more superpage mappings.
 5510  */
 5511 void
 5512 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
 5513     vm_offset_t *addr, vm_size_t size)
 5514 {
 5515         vm_offset_t superpage_offset;
 5516 
 5517         if (size < NBPDR)
 5518                 return;
 5519         if (object != NULL && (object->flags & OBJ_COLORED) != 0)
 5520                 offset += ptoa(object->pg_color);
 5521         superpage_offset = offset & PDRMASK;
 5522         if (size - ((NBPDR - superpage_offset) & PDRMASK) < NBPDR ||
 5523             (*addr & PDRMASK) == superpage_offset)
 5524                 return;
 5525         if ((*addr & PDRMASK) < superpage_offset)
 5526                 *addr = (*addr & ~PDRMASK) + superpage_offset;
 5527         else
 5528                 *addr = ((*addr + PDRMASK) & ~PDRMASK) + superpage_offset;
 5529 }
 5530 
 5531 #include "opt_ddb.h"
 5532 #ifdef DDB
 5533 #include <ddb/ddb.h>
 5534 
 5535 DB_SHOW_COMMAND(pte, pmap_print_pte)
 5536 {
 5537         pmap_t pmap;
 5538         pml4_entry_t *pml4;
 5539         pdp_entry_t *pdp;
 5540         pd_entry_t *pde;
 5541         pt_entry_t *pte;
 5542         vm_offset_t va;
 5543 
 5544         if (have_addr) {
 5545                 va = (vm_offset_t)addr;
 5546                 pmap = PCPU_GET(curpmap); /* XXX */
 5547         } else {
 5548                 db_printf("show pte addr\n");
 5549                 return;
 5550         }
 5551         pml4 = pmap_pml4e(pmap, va);
 5552         db_printf("VA %#016lx pml4e %#016lx", va, *pml4);
 5553         if ((*pml4 & PG_V) == 0) {
 5554                 db_printf("\n");
 5555                 return;
 5556         }
 5557         pdp = pmap_pml4e_to_pdpe(pml4, va);
 5558         db_printf(" pdpe %#016lx", *pdp);
 5559         if ((*pdp & PG_V) == 0 || (*pdp & PG_PS) != 0) {
 5560                 db_printf("\n");
 5561                 return;
 5562         }
 5563         pde = pmap_pdpe_to_pde(pdp, va);
 5564         db_printf(" pde %#016lx", *pde);
 5565         if ((*pde & PG_V) == 0 || (*pde & PG_PS) != 0) {
 5566                 db_printf("\n");
 5567                 return;
 5568         }
 5569         pte = pmap_pde_to_pte(pde, va);
 5570         db_printf(" pte %#016lx\n", *pte);
 5571 }
 5572 
 5573 DB_SHOW_COMMAND(phys2dmap, pmap_phys2dmap)
 5574 {
 5575         vm_paddr_t a;
 5576 
 5577         if (have_addr) {
 5578                 a = (vm_paddr_t)addr;
 5579                 db_printf("0x%jx\n", (uintmax_t)PHYS_TO_DMAP(a));
 5580         } else {
 5581                 db_printf("show phys2dmap addr\n");
 5582         }
 5583 }
 5584 #endif

Cache object: 750e6f4a9abd1004c6210746dc0a4a96


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.