The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/amd64/amd64/pmap.c

Version: -  FREEBSD  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-2  -  FREEBSD-11-1  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-4  -  FREEBSD-10-3  -  FREEBSD-10-2  -  FREEBSD-10-1  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-3  -  FREEBSD-9-2  -  FREEBSD-9-1  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-4  -  FREEBSD-8-3  -  FREEBSD-8-2  -  FREEBSD-8-1  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-4  -  FREEBSD-7-3  -  FREEBSD-7-2  -  FREEBSD-7-1  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-4  -  FREEBSD-6-3  -  FREEBSD-6-2  -  FREEBSD-6-1  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-5  -  FREEBSD-5-4  -  FREEBSD-5-3  -  FREEBSD-5-2  -  FREEBSD-5-1  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  linux-2.6  -  linux-2.4.22  -  MK83  -  MK84  -  PLAN9  -  DFBSD  -  NETBSD  -  NETBSD5  -  NETBSD4  -  NETBSD3  -  NETBSD20  -  OPENBSD  -  xnu-517  -  xnu-792  -  xnu-792.6.70  -  xnu-1228  -  xnu-1456.1.26  -  xnu-1699.24.8  -  xnu-2050.18.24  -  OPENSOLARIS  -  minix-3-1-1 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1991 Regents of the University of California.
    3  * All rights reserved.
    4  * Copyright (c) 1994 John S. Dyson
    5  * All rights reserved.
    6  * Copyright (c) 1994 David Greenman
    7  * All rights reserved.
    8  * Copyright (c) 2003 Peter Wemm
    9  * All rights reserved.
   10  * Copyright (c) 2005-2010 Alan L. Cox <alc@cs.rice.edu>
   11  * All rights reserved.
   12  *
   13  * This code is derived from software contributed to Berkeley by
   14  * the Systems Programming Group of the University of Utah Computer
   15  * Science Department and William Jolitz of UUNET Technologies Inc.
   16  *
   17  * Redistribution and use in source and binary forms, with or without
   18  * modification, are permitted provided that the following conditions
   19  * are met:
   20  * 1. Redistributions of source code must retain the above copyright
   21  *    notice, this list of conditions and the following disclaimer.
   22  * 2. Redistributions in binary form must reproduce the above copyright
   23  *    notice, this list of conditions and the following disclaimer in the
   24  *    documentation and/or other materials provided with the distribution.
   25  * 3. All advertising materials mentioning features or use of this software
   26  *    must display the following acknowledgement:
   27  *      This product includes software developed by the University of
   28  *      California, Berkeley and its contributors.
   29  * 4. Neither the name of the University nor the names of its contributors
   30  *    may be used to endorse or promote products derived from this software
   31  *    without specific prior written permission.
   32  *
   33  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   34  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   35  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   36  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   37  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   38  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   39  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   40  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   41  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   42  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   43  * SUCH DAMAGE.
   44  *
   45  *      from:   @(#)pmap.c      7.7 (Berkeley)  5/12/91
   46  */
   47 /*-
   48  * Copyright (c) 2003 Networks Associates Technology, Inc.
   49  * All rights reserved.
   50  *
   51  * This software was developed for the FreeBSD Project by Jake Burkholder,
   52  * Safeport Network Services, and Network Associates Laboratories, the
   53  * Security Research Division of Network Associates, Inc. under
   54  * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA
   55  * CHATS research program.
   56  *
   57  * Redistribution and use in source and binary forms, with or without
   58  * modification, are permitted provided that the following conditions
   59  * are met:
   60  * 1. Redistributions of source code must retain the above copyright
   61  *    notice, this list of conditions and the following disclaimer.
   62  * 2. Redistributions in binary form must reproduce the above copyright
   63  *    notice, this list of conditions and the following disclaimer in the
   64  *    documentation and/or other materials provided with the distribution.
   65  *
   66  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   67  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   68  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   69  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   70  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   71  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   72  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   73  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   74  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   75  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   76  * SUCH DAMAGE.
   77  */
   78 
   79 #include <sys/cdefs.h>
   80 __FBSDID("$FreeBSD: releng/9.1/sys/amd64/amd64/pmap.c 238006 2012-07-02 17:37:40Z alc $");
   81 
   82 /*
   83  *      Manages physical address maps.
   84  *
   85  *      In addition to hardware address maps, this
   86  *      module is called upon to provide software-use-only
   87  *      maps which may or may not be stored in the same
   88  *      form as hardware maps.  These pseudo-maps are
   89  *      used to store intermediate results from copy
   90  *      operations to and from address spaces.
   91  *
   92  *      Since the information managed by this module is
   93  *      also stored by the logical address mapping module,
   94  *      this module may throw away valid virtual-to-physical
   95  *      mappings at almost any time.  However, invalidations
   96  *      of virtual-to-physical mappings must be done as
   97  *      requested.
   98  *
   99  *      In order to cope with hardware architectures which
  100  *      make virtual-to-physical map invalidates expensive,
  101  *      this module may delay invalidate or reduced protection
  102  *      operations until such time as they are actually
  103  *      necessary.  This module is given full information as
  104  *      to which processors are currently using which maps,
  105  *      and to when physical maps must be made correct.
  106  */
  107 
  108 #include "opt_pmap.h"
  109 #include "opt_vm.h"
  110 
  111 #include <sys/param.h>
  112 #include <sys/systm.h>
  113 #include <sys/kernel.h>
  114 #include <sys/ktr.h>
  115 #include <sys/lock.h>
  116 #include <sys/malloc.h>
  117 #include <sys/mman.h>
  118 #include <sys/mutex.h>
  119 #include <sys/proc.h>
  120 #include <sys/rwlock.h>
  121 #include <sys/sx.h>
  122 #include <sys/vmmeter.h>
  123 #include <sys/sched.h>
  124 #include <sys/sysctl.h>
  125 #ifdef SMP
  126 #include <sys/smp.h>
  127 #else
  128 #include <sys/cpuset.h>
  129 #endif
  130 
  131 #include <vm/vm.h>
  132 #include <vm/vm_param.h>
  133 #include <vm/vm_kern.h>
  134 #include <vm/vm_page.h>
  135 #include <vm/vm_map.h>
  136 #include <vm/vm_object.h>
  137 #include <vm/vm_extern.h>
  138 #include <vm/vm_pageout.h>
  139 #include <vm/vm_pager.h>
  140 #include <vm/vm_reserv.h>
  141 #include <vm/uma.h>
  142 
  143 #include <machine/cpu.h>
  144 #include <machine/cputypes.h>
  145 #include <machine/md_var.h>
  146 #include <machine/pcb.h>
  147 #include <machine/specialreg.h>
  148 #ifdef SMP
  149 #include <machine/smp.h>
  150 #endif
  151 
  152 #if !defined(DIAGNOSTIC)
  153 #ifdef __GNUC_GNU_INLINE__
  154 #define PMAP_INLINE     __attribute__((__gnu_inline__)) inline
  155 #else
  156 #define PMAP_INLINE     extern inline
  157 #endif
  158 #else
  159 #define PMAP_INLINE
  160 #endif
  161 
  162 #ifdef PV_STATS
  163 #define PV_STAT(x)      do { x ; } while (0)
  164 #else
  165 #define PV_STAT(x)      do { } while (0)
  166 #endif
  167 
  168 #define pa_index(pa)    ((pa) >> PDRSHIFT)
  169 #define pa_to_pvh(pa)   (&pv_table[pa_index(pa)])
  170 
  171 struct pmap kernel_pmap_store;
  172 
  173 vm_offset_t virtual_avail;      /* VA of first avail page (after kernel bss) */
  174 vm_offset_t virtual_end;        /* VA of last avail page (end of kernel AS) */
  175 
  176 static int ndmpdp;
  177 static vm_paddr_t dmaplimit;
  178 vm_offset_t kernel_vm_end = VM_MIN_KERNEL_ADDRESS;
  179 pt_entry_t pg_nx;
  180 
  181 SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters");
  182 
  183 static int pat_works = 1;
  184 SYSCTL_INT(_vm_pmap, OID_AUTO, pat_works, CTLFLAG_RD, &pat_works, 1,
  185     "Is page attribute table fully functional?");
  186 
  187 static int pg_ps_enabled = 1;
  188 SYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLFLAG_RDTUN, &pg_ps_enabled, 0,
  189     "Are large page mappings enabled?");
  190 
  191 #define PAT_INDEX_SIZE  8
  192 static int pat_index[PAT_INDEX_SIZE];   /* cache mode to PAT index conversion */
  193 
  194 static u_int64_t        KPTphys;        /* phys addr of kernel level 1 */
  195 static u_int64_t        KPDphys;        /* phys addr of kernel level 2 */
  196 u_int64_t               KPDPphys;       /* phys addr of kernel level 3 */
  197 u_int64_t               KPML4phys;      /* phys addr of kernel level 4 */
  198 
  199 static u_int64_t        DMPDphys;       /* phys addr of direct mapped level 2 */
  200 static u_int64_t        DMPDPphys;      /* phys addr of direct mapped level 3 */
  201 
  202 /*
  203  * Isolate the global pv list lock from data and other locks to prevent false
  204  * sharing within the cache.
  205  */
  206 static struct {
  207         struct rwlock   lock;
  208         char            padding[CACHE_LINE_SIZE - sizeof(struct rwlock)];
  209 } pvh_global __aligned(CACHE_LINE_SIZE);
  210 
  211 #define pvh_global_lock pvh_global.lock
  212 
  213 /*
  214  * Data for the pv entry allocation mechanism
  215  */
  216 static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks);
  217 static long pv_entry_count;
  218 static struct md_page *pv_table;
  219 
  220 /*
  221  * All those kernel PT submaps that BSD is so fond of
  222  */
  223 pt_entry_t *CMAP1 = 0;
  224 caddr_t CADDR1 = 0;
  225 
  226 /*
  227  * Crashdump maps.
  228  */
  229 static caddr_t crashdumpmap;
  230 
  231 static void     free_pv_chunk(struct pv_chunk *pc);
  232 static void     free_pv_entry(pmap_t pmap, pv_entry_t pv);
  233 static pv_entry_t get_pv_entry(pmap_t pmap, boolean_t try);
  234 static void     pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa);
  235 static boolean_t pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa);
  236 static void     pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa);
  237 static void     pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
  238 static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
  239                     vm_offset_t va);
  240 static int      pmap_pvh_wired_mappings(struct md_page *pvh, int count);
  241 
  242 static int pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode);
  243 static boolean_t pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va);
  244 static boolean_t pmap_demote_pdpe(pmap_t pmap, pdp_entry_t *pdpe,
  245     vm_offset_t va);
  246 static boolean_t pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m,
  247     vm_prot_t prot);
  248 static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
  249     vm_page_t m, vm_prot_t prot, vm_page_t mpte);
  250 static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte);
  251 static void pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte);
  252 static boolean_t pmap_is_modified_pvh(struct md_page *pvh);
  253 static boolean_t pmap_is_referenced_pvh(struct md_page *pvh);
  254 static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode);
  255 static vm_page_t pmap_lookup_pt_page(pmap_t pmap, vm_offset_t va);
  256 static void pmap_pde_attr(pd_entry_t *pde, int cache_bits);
  257 static void pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va);
  258 static boolean_t pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva,
  259     vm_prot_t prot);
  260 static void pmap_pte_attr(pt_entry_t *pte, int cache_bits);
  261 static int pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
  262                 vm_page_t *free);
  263 static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq,
  264                 vm_offset_t sva, pd_entry_t ptepde, vm_page_t *free);
  265 static void pmap_remove_pt_page(pmap_t pmap, vm_page_t mpte);
  266 static void pmap_remove_page(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
  267     vm_page_t *free);
  268 static void pmap_remove_entry(struct pmap *pmap, vm_page_t m,
  269                 vm_offset_t va);
  270 static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m);
  271 static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
  272     vm_page_t m);
  273 static void pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
  274     pd_entry_t newpde);
  275 static void pmap_update_pde_invalidate(vm_offset_t va, pd_entry_t newpde);
  276 
  277 static vm_page_t pmap_allocpde(pmap_t pmap, vm_offset_t va, int flags);
  278 static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags);
  279 
  280 static vm_page_t _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, int flags);
  281 static int _pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m,
  282                 vm_page_t* free);
  283 static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t, vm_page_t *);
  284 static vm_offset_t pmap_kmem_choose(vm_offset_t addr);
  285 
  286 CTASSERT(1 << PDESHIFT == sizeof(pd_entry_t));
  287 CTASSERT(1 << PTESHIFT == sizeof(pt_entry_t));
  288 
  289 /*
  290  * Move the kernel virtual free pointer to the next
  291  * 2MB.  This is used to help improve performance
  292  * by using a large (2MB) page for much of the kernel
  293  * (.text, .data, .bss)
  294  */
  295 static vm_offset_t
  296 pmap_kmem_choose(vm_offset_t addr)
  297 {
  298         vm_offset_t newaddr = addr;
  299 
  300         newaddr = (addr + (NBPDR - 1)) & ~(NBPDR - 1);
  301         return (newaddr);
  302 }
  303 
  304 /********************/
  305 /* Inline functions */
  306 /********************/
  307 
  308 /* Return a non-clipped PD index for a given VA */
  309 static __inline vm_pindex_t
  310 pmap_pde_pindex(vm_offset_t va)
  311 {
  312         return (va >> PDRSHIFT);
  313 }
  314 
  315 
  316 /* Return various clipped indexes for a given VA */
  317 static __inline vm_pindex_t
  318 pmap_pte_index(vm_offset_t va)
  319 {
  320 
  321         return ((va >> PAGE_SHIFT) & ((1ul << NPTEPGSHIFT) - 1));
  322 }
  323 
  324 static __inline vm_pindex_t
  325 pmap_pde_index(vm_offset_t va)
  326 {
  327 
  328         return ((va >> PDRSHIFT) & ((1ul << NPDEPGSHIFT) - 1));
  329 }
  330 
  331 static __inline vm_pindex_t
  332 pmap_pdpe_index(vm_offset_t va)
  333 {
  334 
  335         return ((va >> PDPSHIFT) & ((1ul << NPDPEPGSHIFT) - 1));
  336 }
  337 
  338 static __inline vm_pindex_t
  339 pmap_pml4e_index(vm_offset_t va)
  340 {
  341 
  342         return ((va >> PML4SHIFT) & ((1ul << NPML4EPGSHIFT) - 1));
  343 }
  344 
  345 /* Return a pointer to the PML4 slot that corresponds to a VA */
  346 static __inline pml4_entry_t *
  347 pmap_pml4e(pmap_t pmap, vm_offset_t va)
  348 {
  349 
  350         return (&pmap->pm_pml4[pmap_pml4e_index(va)]);
  351 }
  352 
  353 /* Return a pointer to the PDP slot that corresponds to a VA */
  354 static __inline pdp_entry_t *
  355 pmap_pml4e_to_pdpe(pml4_entry_t *pml4e, vm_offset_t va)
  356 {
  357         pdp_entry_t *pdpe;
  358 
  359         pdpe = (pdp_entry_t *)PHYS_TO_DMAP(*pml4e & PG_FRAME);
  360         return (&pdpe[pmap_pdpe_index(va)]);
  361 }
  362 
  363 /* Return a pointer to the PDP slot that corresponds to a VA */
  364 static __inline pdp_entry_t *
  365 pmap_pdpe(pmap_t pmap, vm_offset_t va)
  366 {
  367         pml4_entry_t *pml4e;
  368 
  369         pml4e = pmap_pml4e(pmap, va);
  370         if ((*pml4e & PG_V) == 0)
  371                 return (NULL);
  372         return (pmap_pml4e_to_pdpe(pml4e, va));
  373 }
  374 
  375 /* Return a pointer to the PD slot that corresponds to a VA */
  376 static __inline pd_entry_t *
  377 pmap_pdpe_to_pde(pdp_entry_t *pdpe, vm_offset_t va)
  378 {
  379         pd_entry_t *pde;
  380 
  381         pde = (pd_entry_t *)PHYS_TO_DMAP(*pdpe & PG_FRAME);
  382         return (&pde[pmap_pde_index(va)]);
  383 }
  384 
  385 /* Return a pointer to the PD slot that corresponds to a VA */
  386 static __inline pd_entry_t *
  387 pmap_pde(pmap_t pmap, vm_offset_t va)
  388 {
  389         pdp_entry_t *pdpe;
  390 
  391         pdpe = pmap_pdpe(pmap, va);
  392         if (pdpe == NULL || (*pdpe & PG_V) == 0)
  393                 return (NULL);
  394         return (pmap_pdpe_to_pde(pdpe, va));
  395 }
  396 
  397 /* Return a pointer to the PT slot that corresponds to a VA */
  398 static __inline pt_entry_t *
  399 pmap_pde_to_pte(pd_entry_t *pde, vm_offset_t va)
  400 {
  401         pt_entry_t *pte;
  402 
  403         pte = (pt_entry_t *)PHYS_TO_DMAP(*pde & PG_FRAME);
  404         return (&pte[pmap_pte_index(va)]);
  405 }
  406 
  407 /* Return a pointer to the PT slot that corresponds to a VA */
  408 static __inline pt_entry_t *
  409 pmap_pte(pmap_t pmap, vm_offset_t va)
  410 {
  411         pd_entry_t *pde;
  412 
  413         pde = pmap_pde(pmap, va);
  414         if (pde == NULL || (*pde & PG_V) == 0)
  415                 return (NULL);
  416         if ((*pde & PG_PS) != 0)        /* compat with i386 pmap_pte() */
  417                 return ((pt_entry_t *)pde);
  418         return (pmap_pde_to_pte(pde, va));
  419 }
  420 
  421 static __inline void
  422 pmap_resident_count_inc(pmap_t pmap, int count)
  423 {
  424 
  425         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
  426         pmap->pm_stats.resident_count += count;
  427 }
  428 
  429 static __inline void
  430 pmap_resident_count_dec(pmap_t pmap, int count)
  431 {
  432 
  433         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
  434         pmap->pm_stats.resident_count -= count;
  435 }
  436 
  437 PMAP_INLINE pt_entry_t *
  438 vtopte(vm_offset_t va)
  439 {
  440         u_int64_t mask = ((1ul << (NPTEPGSHIFT + NPDEPGSHIFT + NPDPEPGSHIFT + NPML4EPGSHIFT)) - 1);
  441 
  442         return (PTmap + ((va >> PAGE_SHIFT) & mask));
  443 }
  444 
  445 static __inline pd_entry_t *
  446 vtopde(vm_offset_t va)
  447 {
  448         u_int64_t mask = ((1ul << (NPDEPGSHIFT + NPDPEPGSHIFT + NPML4EPGSHIFT)) - 1);
  449 
  450         return (PDmap + ((va >> PDRSHIFT) & mask));
  451 }
  452 
  453 static u_int64_t
  454 allocpages(vm_paddr_t *firstaddr, int n)
  455 {
  456         u_int64_t ret;
  457 
  458         ret = *firstaddr;
  459         bzero((void *)ret, n * PAGE_SIZE);
  460         *firstaddr += n * PAGE_SIZE;
  461         return (ret);
  462 }
  463 
  464 CTASSERT(powerof2(NDMPML4E));
  465 
  466 static void
  467 create_pagetables(vm_paddr_t *firstaddr)
  468 {
  469         int i, j, ndm1g;
  470 
  471         /* Allocate pages */
  472         KPTphys = allocpages(firstaddr, NKPT);
  473         KPML4phys = allocpages(firstaddr, 1);
  474         KPDPphys = allocpages(firstaddr, NKPML4E);
  475         KPDphys = allocpages(firstaddr, NKPDPE);
  476 
  477         ndmpdp = (ptoa(Maxmem) + NBPDP - 1) >> PDPSHIFT;
  478         if (ndmpdp < 4)         /* Minimum 4GB of dirmap */
  479                 ndmpdp = 4;
  480         DMPDPphys = allocpages(firstaddr, NDMPML4E);
  481         ndm1g = 0;
  482         if ((amd_feature & AMDID_PAGE1GB) != 0)
  483                 ndm1g = ptoa(Maxmem) >> PDPSHIFT;
  484         if (ndm1g < ndmpdp)
  485                 DMPDphys = allocpages(firstaddr, ndmpdp - ndm1g);
  486         dmaplimit = (vm_paddr_t)ndmpdp << PDPSHIFT;
  487 
  488         /* Fill in the underlying page table pages */
  489         /* Read-only from zero to physfree */
  490         /* XXX not fully used, underneath 2M pages */
  491         for (i = 0; (i << PAGE_SHIFT) < *firstaddr; i++) {
  492                 ((pt_entry_t *)KPTphys)[i] = i << PAGE_SHIFT;
  493                 ((pt_entry_t *)KPTphys)[i] |= PG_RW | PG_V | PG_G;
  494         }
  495 
  496         /* Now map the page tables at their location within PTmap */
  497         for (i = 0; i < NKPT; i++) {
  498                 ((pd_entry_t *)KPDphys)[i] = KPTphys + (i << PAGE_SHIFT);
  499                 ((pd_entry_t *)KPDphys)[i] |= PG_RW | PG_V;
  500         }
  501 
  502         /* Map from zero to end of allocations under 2M pages */
  503         /* This replaces some of the KPTphys entries above */
  504         for (i = 0; (i << PDRSHIFT) < *firstaddr; i++) {
  505                 ((pd_entry_t *)KPDphys)[i] = i << PDRSHIFT;
  506                 ((pd_entry_t *)KPDphys)[i] |= PG_RW | PG_V | PG_PS | PG_G;
  507         }
  508 
  509         /* And connect up the PD to the PDP */
  510         for (i = 0; i < NKPDPE; i++) {
  511                 ((pdp_entry_t *)KPDPphys)[i + KPDPI] = KPDphys +
  512                     (i << PAGE_SHIFT);
  513                 ((pdp_entry_t *)KPDPphys)[i + KPDPI] |= PG_RW | PG_V | PG_U;
  514         }
  515 
  516         /*
  517          * Now, set up the direct map region using 2MB and/or 1GB pages.  If
  518          * the end of physical memory is not aligned to a 1GB page boundary,
  519          * then the residual physical memory is mapped with 2MB pages.  Later,
  520          * if pmap_mapdev{_attr}() uses the direct map for non-write-back
  521          * memory, pmap_change_attr() will demote any 2MB or 1GB page mappings
  522          * that are partially used. 
  523          */
  524         for (i = NPDEPG * ndm1g, j = 0; i < NPDEPG * ndmpdp; i++, j++) {
  525                 ((pd_entry_t *)DMPDphys)[j] = (vm_paddr_t)i << PDRSHIFT;
  526                 /* Preset PG_M and PG_A because demotion expects it. */
  527                 ((pd_entry_t *)DMPDphys)[j] |= PG_RW | PG_V | PG_PS | PG_G |
  528                     PG_M | PG_A;
  529         }
  530         for (i = 0; i < ndm1g; i++) {
  531                 ((pdp_entry_t *)DMPDPphys)[i] = (vm_paddr_t)i << PDPSHIFT;
  532                 /* Preset PG_M and PG_A because demotion expects it. */
  533                 ((pdp_entry_t *)DMPDPphys)[i] |= PG_RW | PG_V | PG_PS | PG_G |
  534                     PG_M | PG_A;
  535         }
  536         for (j = 0; i < ndmpdp; i++, j++) {
  537                 ((pdp_entry_t *)DMPDPphys)[i] = DMPDphys + (j << PAGE_SHIFT);
  538                 ((pdp_entry_t *)DMPDPphys)[i] |= PG_RW | PG_V | PG_U;
  539         }
  540 
  541         /* And recursively map PML4 to itself in order to get PTmap */
  542         ((pdp_entry_t *)KPML4phys)[PML4PML4I] = KPML4phys;
  543         ((pdp_entry_t *)KPML4phys)[PML4PML4I] |= PG_RW | PG_V | PG_U;
  544 
  545         /* Connect the Direct Map slot(s) up to the PML4. */
  546         for (i = 0; i < NDMPML4E; i++) {
  547                 ((pdp_entry_t *)KPML4phys)[DMPML4I + i] = DMPDPphys +
  548                     (i << PAGE_SHIFT);
  549                 ((pdp_entry_t *)KPML4phys)[DMPML4I + i] |= PG_RW | PG_V | PG_U;
  550         }
  551 
  552         /* Connect the KVA slot up to the PML4 */
  553         ((pdp_entry_t *)KPML4phys)[KPML4I] = KPDPphys;
  554         ((pdp_entry_t *)KPML4phys)[KPML4I] |= PG_RW | PG_V | PG_U;
  555 }
  556 
  557 /*
  558  *      Bootstrap the system enough to run with virtual memory.
  559  *
  560  *      On amd64 this is called after mapping has already been enabled
  561  *      and just syncs the pmap module with what has already been done.
  562  *      [We can't call it easily with mapping off since the kernel is not
  563  *      mapped with PA == VA, hence we would have to relocate every address
  564  *      from the linked base (virtual) address "KERNBASE" to the actual
  565  *      (physical) address starting relative to 0]
  566  */
  567 void
  568 pmap_bootstrap(vm_paddr_t *firstaddr)
  569 {
  570         vm_offset_t va;
  571         pt_entry_t *pte, *unused;
  572 
  573         /*
  574          * Create an initial set of page tables to run the kernel in.
  575          */
  576         create_pagetables(firstaddr);
  577 
  578         virtual_avail = (vm_offset_t) KERNBASE + *firstaddr;
  579         virtual_avail = pmap_kmem_choose(virtual_avail);
  580 
  581         virtual_end = VM_MAX_KERNEL_ADDRESS;
  582 
  583 
  584         /* XXX do %cr0 as well */
  585         load_cr4(rcr4() | CR4_PGE | CR4_PSE);
  586         load_cr3(KPML4phys);
  587 
  588         /*
  589          * Initialize the kernel pmap (which is statically allocated).
  590          */
  591         PMAP_LOCK_INIT(kernel_pmap);
  592         kernel_pmap->pm_pml4 = (pdp_entry_t *)PHYS_TO_DMAP(KPML4phys);
  593         kernel_pmap->pm_root = NULL;
  594         CPU_FILL(&kernel_pmap->pm_active);      /* don't allow deactivation */
  595         TAILQ_INIT(&kernel_pmap->pm_pvchunk);
  596 
  597         /*
  598          * Initialize the global pv list lock.
  599          */
  600         rw_init(&pvh_global_lock, "pvh global");
  601 
  602         /*
  603          * Reserve some special page table entries/VA space for temporary
  604          * mapping of pages.
  605          */
  606 #define SYSMAP(c, p, v, n)      \
  607         v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n);
  608 
  609         va = virtual_avail;
  610         pte = vtopte(va);
  611 
  612         /*
  613          * CMAP1 is only used for the memory test.
  614          */
  615         SYSMAP(caddr_t, CMAP1, CADDR1, 1)
  616 
  617         /*
  618          * Crashdump maps.
  619          */
  620         SYSMAP(caddr_t, unused, crashdumpmap, MAXDUMPPGS)
  621 
  622         virtual_avail = va;
  623 
  624         /* Initialize the PAT MSR. */
  625         pmap_init_pat();
  626 }
  627 
  628 /*
  629  * Setup the PAT MSR.
  630  */
  631 void
  632 pmap_init_pat(void)
  633 {
  634         int pat_table[PAT_INDEX_SIZE];
  635         uint64_t pat_msr;
  636         u_long cr0, cr4;
  637         int i;
  638 
  639         /* Bail if this CPU doesn't implement PAT. */
  640         if ((cpu_feature & CPUID_PAT) == 0)
  641                 panic("no PAT??");
  642 
  643         /* Set default PAT index table. */
  644         for (i = 0; i < PAT_INDEX_SIZE; i++)
  645                 pat_table[i] = -1;
  646         pat_table[PAT_WRITE_BACK] = 0;
  647         pat_table[PAT_WRITE_THROUGH] = 1;
  648         pat_table[PAT_UNCACHEABLE] = 3;
  649         pat_table[PAT_WRITE_COMBINING] = 3;
  650         pat_table[PAT_WRITE_PROTECTED] = 3;
  651         pat_table[PAT_UNCACHED] = 3;
  652 
  653         /* Initialize default PAT entries. */
  654         pat_msr = PAT_VALUE(0, PAT_WRITE_BACK) |
  655             PAT_VALUE(1, PAT_WRITE_THROUGH) |
  656             PAT_VALUE(2, PAT_UNCACHED) |
  657             PAT_VALUE(3, PAT_UNCACHEABLE) |
  658             PAT_VALUE(4, PAT_WRITE_BACK) |
  659             PAT_VALUE(5, PAT_WRITE_THROUGH) |
  660             PAT_VALUE(6, PAT_UNCACHED) |
  661             PAT_VALUE(7, PAT_UNCACHEABLE);
  662 
  663         if (pat_works) {
  664                 /*
  665                  * Leave the indices 0-3 at the default of WB, WT, UC-, and UC.
  666                  * Program 5 and 6 as WP and WC.
  667                  * Leave 4 and 7 as WB and UC.
  668                  */
  669                 pat_msr &= ~(PAT_MASK(5) | PAT_MASK(6));
  670                 pat_msr |= PAT_VALUE(5, PAT_WRITE_PROTECTED) |
  671                     PAT_VALUE(6, PAT_WRITE_COMBINING);
  672                 pat_table[PAT_UNCACHED] = 2;
  673                 pat_table[PAT_WRITE_PROTECTED] = 5;
  674                 pat_table[PAT_WRITE_COMBINING] = 6;
  675         } else {
  676                 /*
  677                  * Just replace PAT Index 2 with WC instead of UC-.
  678                  */
  679                 pat_msr &= ~PAT_MASK(2);
  680                 pat_msr |= PAT_VALUE(2, PAT_WRITE_COMBINING);
  681                 pat_table[PAT_WRITE_COMBINING] = 2;
  682         }
  683 
  684         /* Disable PGE. */
  685         cr4 = rcr4();
  686         load_cr4(cr4 & ~CR4_PGE);
  687 
  688         /* Disable caches (CD = 1, NW = 0). */
  689         cr0 = rcr0();
  690         load_cr0((cr0 & ~CR0_NW) | CR0_CD);
  691 
  692         /* Flushes caches and TLBs. */
  693         wbinvd();
  694         invltlb();
  695 
  696         /* Update PAT and index table. */
  697         wrmsr(MSR_PAT, pat_msr);
  698         for (i = 0; i < PAT_INDEX_SIZE; i++)
  699                 pat_index[i] = pat_table[i];
  700 
  701         /* Flush caches and TLBs again. */
  702         wbinvd();
  703         invltlb();
  704 
  705         /* Restore caches and PGE. */
  706         load_cr0(cr0);
  707         load_cr4(cr4);
  708 }
  709 
  710 /*
  711  *      Initialize a vm_page's machine-dependent fields.
  712  */
  713 void
  714 pmap_page_init(vm_page_t m)
  715 {
  716 
  717         TAILQ_INIT(&m->md.pv_list);
  718         m->md.pat_mode = PAT_WRITE_BACK;
  719 }
  720 
  721 /*
  722  *      Initialize the pmap module.
  723  *      Called by vm_init, to initialize any structures that the pmap
  724  *      system needs to map virtual memory.
  725  */
  726 void
  727 pmap_init(void)
  728 {
  729         vm_page_t mpte;
  730         vm_size_t s;
  731         int i, pv_npg;
  732 
  733         /*
  734          * Initialize the vm page array entries for the kernel pmap's
  735          * page table pages.
  736          */ 
  737         for (i = 0; i < NKPT; i++) {
  738                 mpte = PHYS_TO_VM_PAGE(KPTphys + (i << PAGE_SHIFT));
  739                 KASSERT(mpte >= vm_page_array &&
  740                     mpte < &vm_page_array[vm_page_array_size],
  741                     ("pmap_init: page table page is out of range"));
  742                 mpte->pindex = pmap_pde_pindex(KERNBASE) + i;
  743                 mpte->phys_addr = KPTphys + (i << PAGE_SHIFT);
  744         }
  745 
  746         /*
  747          * If the kernel is running in a virtual machine on an AMD Family 10h
  748          * processor, then it must assume that MCA is enabled by the virtual
  749          * machine monitor.
  750          */
  751         if (vm_guest == VM_GUEST_VM && cpu_vendor_id == CPU_VENDOR_AMD &&
  752             CPUID_TO_FAMILY(cpu_id) == 0x10)
  753                 workaround_erratum383 = 1;
  754 
  755         /*
  756          * Are large page mappings enabled?
  757          */
  758         TUNABLE_INT_FETCH("vm.pmap.pg_ps_enabled", &pg_ps_enabled);
  759         if (pg_ps_enabled) {
  760                 KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0,
  761                     ("pmap_init: can't assign to pagesizes[1]"));
  762                 pagesizes[1] = NBPDR;
  763         }
  764 
  765         /*
  766          * Calculate the size of the pv head table for superpages.
  767          */
  768         for (i = 0; phys_avail[i + 1]; i += 2);
  769         pv_npg = round_2mpage(phys_avail[(i - 2) + 1]) / NBPDR;
  770 
  771         /*
  772          * Allocate memory for the pv head table for superpages.
  773          */
  774         s = (vm_size_t)(pv_npg * sizeof(struct md_page));
  775         s = round_page(s);
  776         pv_table = (struct md_page *)kmem_alloc(kernel_map, s);
  777         for (i = 0; i < pv_npg; i++)
  778                 TAILQ_INIT(&pv_table[i].pv_list);
  779 }
  780 
  781 SYSCTL_NODE(_vm_pmap, OID_AUTO, pde, CTLFLAG_RD, 0,
  782     "2MB page mapping counters");
  783 
  784 static u_long pmap_pde_demotions;
  785 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, demotions, CTLFLAG_RD,
  786     &pmap_pde_demotions, 0, "2MB page demotions");
  787 
  788 static u_long pmap_pde_mappings;
  789 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, mappings, CTLFLAG_RD,
  790     &pmap_pde_mappings, 0, "2MB page mappings");
  791 
  792 static u_long pmap_pde_p_failures;
  793 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, p_failures, CTLFLAG_RD,
  794     &pmap_pde_p_failures, 0, "2MB page promotion failures");
  795 
  796 static u_long pmap_pde_promotions;
  797 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, promotions, CTLFLAG_RD,
  798     &pmap_pde_promotions, 0, "2MB page promotions");
  799 
  800 SYSCTL_NODE(_vm_pmap, OID_AUTO, pdpe, CTLFLAG_RD, 0,
  801     "1GB page mapping counters");
  802 
  803 static u_long pmap_pdpe_demotions;
  804 SYSCTL_ULONG(_vm_pmap_pdpe, OID_AUTO, demotions, CTLFLAG_RD,
  805     &pmap_pdpe_demotions, 0, "1GB page demotions");
  806 
  807 /***************************************************
  808  * Low level helper routines.....
  809  ***************************************************/
  810 
  811 /*
  812  * Determine the appropriate bits to set in a PTE or PDE for a specified
  813  * caching mode.
  814  */
  815 static int
  816 pmap_cache_bits(int mode, boolean_t is_pde)
  817 {
  818         int cache_bits, pat_flag, pat_idx;
  819 
  820         if (mode < 0 || mode >= PAT_INDEX_SIZE || pat_index[mode] < 0)
  821                 panic("Unknown caching mode %d\n", mode);
  822 
  823         /* The PAT bit is different for PTE's and PDE's. */
  824         pat_flag = is_pde ? PG_PDE_PAT : PG_PTE_PAT;
  825 
  826         /* Map the caching mode to a PAT index. */
  827         pat_idx = pat_index[mode];
  828 
  829         /* Map the 3-bit index value into the PAT, PCD, and PWT bits. */
  830         cache_bits = 0;
  831         if (pat_idx & 0x4)
  832                 cache_bits |= pat_flag;
  833         if (pat_idx & 0x2)
  834                 cache_bits |= PG_NC_PCD;
  835         if (pat_idx & 0x1)
  836                 cache_bits |= PG_NC_PWT;
  837         return (cache_bits);
  838 }
  839 
  840 /*
  841  * After changing the page size for the specified virtual address in the page
  842  * table, flush the corresponding entries from the processor's TLB.  Only the
  843  * calling processor's TLB is affected.
  844  *
  845  * The calling thread must be pinned to a processor.
  846  */
  847 static void
  848 pmap_update_pde_invalidate(vm_offset_t va, pd_entry_t newpde)
  849 {
  850         u_long cr4;
  851 
  852         if ((newpde & PG_PS) == 0)
  853                 /* Demotion: flush a specific 2MB page mapping. */
  854                 invlpg(va);
  855         else if ((newpde & PG_G) == 0)
  856                 /*
  857                  * Promotion: flush every 4KB page mapping from the TLB
  858                  * because there are too many to flush individually.
  859                  */
  860                 invltlb();
  861         else {
  862                 /*
  863                  * Promotion: flush every 4KB page mapping from the TLB,
  864                  * including any global (PG_G) mappings.
  865                  */
  866                 cr4 = rcr4();
  867                 load_cr4(cr4 & ~CR4_PGE);
  868                 /*
  869                  * Although preemption at this point could be detrimental to
  870                  * performance, it would not lead to an error.  PG_G is simply
  871                  * ignored if CR4.PGE is clear.  Moreover, in case this block
  872                  * is re-entered, the load_cr4() either above or below will
  873                  * modify CR4.PGE flushing the TLB.
  874                  */
  875                 load_cr4(cr4 | CR4_PGE);
  876         }
  877 }
  878 #ifdef SMP
  879 /*
  880  * For SMP, these functions have to use the IPI mechanism for coherence.
  881  *
  882  * N.B.: Before calling any of the following TLB invalidation functions,
  883  * the calling processor must ensure that all stores updating a non-
  884  * kernel page table are globally performed.  Otherwise, another
  885  * processor could cache an old, pre-update entry without being
  886  * invalidated.  This can happen one of two ways: (1) The pmap becomes
  887  * active on another processor after its pm_active field is checked by
  888  * one of the following functions but before a store updating the page
  889  * table is globally performed. (2) The pmap becomes active on another
  890  * processor before its pm_active field is checked but due to
  891  * speculative loads one of the following functions stills reads the
  892  * pmap as inactive on the other processor.
  893  * 
  894  * The kernel page table is exempt because its pm_active field is
  895  * immutable.  The kernel page table is always active on every
  896  * processor.
  897  */
  898 void
  899 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
  900 {
  901         cpuset_t other_cpus;
  902         u_int cpuid;
  903 
  904         sched_pin();
  905         if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) {
  906                 invlpg(va);
  907                 smp_invlpg(va);
  908         } else {
  909                 cpuid = PCPU_GET(cpuid);
  910                 other_cpus = all_cpus;
  911                 CPU_CLR(cpuid, &other_cpus);
  912                 if (CPU_ISSET(cpuid, &pmap->pm_active))
  913                         invlpg(va);
  914                 CPU_AND(&other_cpus, &pmap->pm_active);
  915                 if (!CPU_EMPTY(&other_cpus))
  916                         smp_masked_invlpg(other_cpus, va);
  917         }
  918         sched_unpin();
  919 }
  920 
  921 void
  922 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
  923 {
  924         cpuset_t other_cpus;
  925         vm_offset_t addr;
  926         u_int cpuid;
  927 
  928         sched_pin();
  929         if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) {
  930                 for (addr = sva; addr < eva; addr += PAGE_SIZE)
  931                         invlpg(addr);
  932                 smp_invlpg_range(sva, eva);
  933         } else {
  934                 cpuid = PCPU_GET(cpuid);
  935                 other_cpus = all_cpus;
  936                 CPU_CLR(cpuid, &other_cpus);
  937                 if (CPU_ISSET(cpuid, &pmap->pm_active))
  938                         for (addr = sva; addr < eva; addr += PAGE_SIZE)
  939                                 invlpg(addr);
  940                 CPU_AND(&other_cpus, &pmap->pm_active);
  941                 if (!CPU_EMPTY(&other_cpus))
  942                         smp_masked_invlpg_range(other_cpus, sva, eva);
  943         }
  944         sched_unpin();
  945 }
  946 
  947 void
  948 pmap_invalidate_all(pmap_t pmap)
  949 {
  950         cpuset_t other_cpus;
  951         u_int cpuid;
  952 
  953         sched_pin();
  954         if (pmap == kernel_pmap || !CPU_CMP(&pmap->pm_active, &all_cpus)) {
  955                 invltlb();
  956                 smp_invltlb();
  957         } else {
  958                 cpuid = PCPU_GET(cpuid);
  959                 other_cpus = all_cpus;
  960                 CPU_CLR(cpuid, &other_cpus);
  961                 if (CPU_ISSET(cpuid, &pmap->pm_active))
  962                         invltlb();
  963                 CPU_AND(&other_cpus, &pmap->pm_active);
  964                 if (!CPU_EMPTY(&other_cpus))
  965                         smp_masked_invltlb(other_cpus);
  966         }
  967         sched_unpin();
  968 }
  969 
  970 void
  971 pmap_invalidate_cache(void)
  972 {
  973 
  974         sched_pin();
  975         wbinvd();
  976         smp_cache_flush();
  977         sched_unpin();
  978 }
  979 
  980 struct pde_action {
  981         cpuset_t invalidate;    /* processors that invalidate their TLB */
  982         vm_offset_t va;
  983         pd_entry_t *pde;
  984         pd_entry_t newpde;
  985         u_int store;            /* processor that updates the PDE */
  986 };
  987 
  988 static void
  989 pmap_update_pde_action(void *arg)
  990 {
  991         struct pde_action *act = arg;
  992 
  993         if (act->store == PCPU_GET(cpuid))
  994                 pde_store(act->pde, act->newpde);
  995 }
  996 
  997 static void
  998 pmap_update_pde_teardown(void *arg)
  999 {
 1000         struct pde_action *act = arg;
 1001 
 1002         if (CPU_ISSET(PCPU_GET(cpuid), &act->invalidate))
 1003                 pmap_update_pde_invalidate(act->va, act->newpde);
 1004 }
 1005 
 1006 /*
 1007  * Change the page size for the specified virtual address in a way that
 1008  * prevents any possibility of the TLB ever having two entries that map the
 1009  * same virtual address using different page sizes.  This is the recommended
 1010  * workaround for Erratum 383 on AMD Family 10h processors.  It prevents a
 1011  * machine check exception for a TLB state that is improperly diagnosed as a
 1012  * hardware error.
 1013  */
 1014 static void
 1015 pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde)
 1016 {
 1017         struct pde_action act;
 1018         cpuset_t active, other_cpus;
 1019         u_int cpuid;
 1020 
 1021         sched_pin();
 1022         cpuid = PCPU_GET(cpuid);
 1023         other_cpus = all_cpus;
 1024         CPU_CLR(cpuid, &other_cpus);
 1025         if (pmap == kernel_pmap)
 1026                 active = all_cpus;
 1027         else
 1028                 active = pmap->pm_active;
 1029         if (CPU_OVERLAP(&active, &other_cpus)) { 
 1030                 act.store = cpuid;
 1031                 act.invalidate = active;
 1032                 act.va = va;
 1033                 act.pde = pde;
 1034                 act.newpde = newpde;
 1035                 CPU_SET(cpuid, &active);
 1036                 smp_rendezvous_cpus(active,
 1037                     smp_no_rendevous_barrier, pmap_update_pde_action,
 1038                     pmap_update_pde_teardown, &act);
 1039         } else {
 1040                 pde_store(pde, newpde);
 1041                 if (CPU_ISSET(cpuid, &active))
 1042                         pmap_update_pde_invalidate(va, newpde);
 1043         }
 1044         sched_unpin();
 1045 }
 1046 #else /* !SMP */
 1047 /*
 1048  * Normal, non-SMP, invalidation functions.
 1049  * We inline these within pmap.c for speed.
 1050  */
 1051 PMAP_INLINE void
 1052 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
 1053 {
 1054 
 1055         if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active))
 1056                 invlpg(va);
 1057 }
 1058 
 1059 PMAP_INLINE void
 1060 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
 1061 {
 1062         vm_offset_t addr;
 1063 
 1064         if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active))
 1065                 for (addr = sva; addr < eva; addr += PAGE_SIZE)
 1066                         invlpg(addr);
 1067 }
 1068 
 1069 PMAP_INLINE void
 1070 pmap_invalidate_all(pmap_t pmap)
 1071 {
 1072 
 1073         if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active))
 1074                 invltlb();
 1075 }
 1076 
 1077 PMAP_INLINE void
 1078 pmap_invalidate_cache(void)
 1079 {
 1080 
 1081         wbinvd();
 1082 }
 1083 
 1084 static void
 1085 pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde)
 1086 {
 1087 
 1088         pde_store(pde, newpde);
 1089         if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active))
 1090                 pmap_update_pde_invalidate(va, newpde);
 1091 }
 1092 #endif /* !SMP */
 1093 
 1094 #define PMAP_CLFLUSH_THRESHOLD   (2 * 1024 * 1024)
 1095 
 1096 void
 1097 pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva)
 1098 {
 1099 
 1100         KASSERT((sva & PAGE_MASK) == 0,
 1101             ("pmap_invalidate_cache_range: sva not page-aligned"));
 1102         KASSERT((eva & PAGE_MASK) == 0,
 1103             ("pmap_invalidate_cache_range: eva not page-aligned"));
 1104 
 1105         if (cpu_feature & CPUID_SS)
 1106                 ; /* If "Self Snoop" is supported, do nothing. */
 1107         else if ((cpu_feature & CPUID_CLFSH) != 0 &&
 1108             eva - sva < PMAP_CLFLUSH_THRESHOLD) {
 1109 
 1110                 /*
 1111                  * Otherwise, do per-cache line flush.  Use the mfence
 1112                  * instruction to insure that previous stores are
 1113                  * included in the write-back.  The processor
 1114                  * propagates flush to other processors in the cache
 1115                  * coherence domain.
 1116                  */
 1117                 mfence();
 1118                 for (; sva < eva; sva += cpu_clflush_line_size)
 1119                         clflush(sva);
 1120                 mfence();
 1121         } else {
 1122 
 1123                 /*
 1124                  * No targeted cache flush methods are supported by CPU,
 1125                  * or the supplied range is bigger than 2MB.
 1126                  * Globally invalidate cache.
 1127                  */
 1128                 pmap_invalidate_cache();
 1129         }
 1130 }
 1131 
 1132 /*
 1133  * Remove the specified set of pages from the data and instruction caches.
 1134  *
 1135  * In contrast to pmap_invalidate_cache_range(), this function does not
 1136  * rely on the CPU's self-snoop feature, because it is intended for use
 1137  * when moving pages into a different cache domain.
 1138  */
 1139 void
 1140 pmap_invalidate_cache_pages(vm_page_t *pages, int count)
 1141 {
 1142         vm_offset_t daddr, eva;
 1143         int i;
 1144 
 1145         if (count >= PMAP_CLFLUSH_THRESHOLD / PAGE_SIZE ||
 1146             (cpu_feature & CPUID_CLFSH) == 0)
 1147                 pmap_invalidate_cache();
 1148         else {
 1149                 mfence();
 1150                 for (i = 0; i < count; i++) {
 1151                         daddr = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pages[i]));
 1152                         eva = daddr + PAGE_SIZE;
 1153                         for (; daddr < eva; daddr += cpu_clflush_line_size)
 1154                                 clflush(daddr);
 1155                 }
 1156                 mfence();
 1157         }
 1158 }
 1159 
 1160 /*
 1161  * Are we current address space or kernel?
 1162  */
 1163 static __inline int
 1164 pmap_is_current(pmap_t pmap)
 1165 {
 1166         return (pmap == kernel_pmap ||
 1167             (pmap->pm_pml4[PML4PML4I] & PG_FRAME) == (PML4pml4e[0] & PG_FRAME));
 1168 }
 1169 
 1170 /*
 1171  *      Routine:        pmap_extract
 1172  *      Function:
 1173  *              Extract the physical page address associated
 1174  *              with the given map/virtual_address pair.
 1175  */
 1176 vm_paddr_t 
 1177 pmap_extract(pmap_t pmap, vm_offset_t va)
 1178 {
 1179         pdp_entry_t *pdpe;
 1180         pd_entry_t *pde;
 1181         pt_entry_t *pte;
 1182         vm_paddr_t pa;
 1183 
 1184         pa = 0;
 1185         PMAP_LOCK(pmap);
 1186         pdpe = pmap_pdpe(pmap, va);
 1187         if (pdpe != NULL && (*pdpe & PG_V) != 0) {
 1188                 if ((*pdpe & PG_PS) != 0)
 1189                         pa = (*pdpe & PG_PS_FRAME) | (va & PDPMASK);
 1190                 else {
 1191                         pde = pmap_pdpe_to_pde(pdpe, va);
 1192                         if ((*pde & PG_V) != 0) {
 1193                                 if ((*pde & PG_PS) != 0) {
 1194                                         pa = (*pde & PG_PS_FRAME) |
 1195                                             (va & PDRMASK);
 1196                                 } else {
 1197                                         pte = pmap_pde_to_pte(pde, va);
 1198                                         pa = (*pte & PG_FRAME) |
 1199                                             (va & PAGE_MASK);
 1200                                 }
 1201                         }
 1202                 }
 1203         }
 1204         PMAP_UNLOCK(pmap);
 1205         return (pa);
 1206 }
 1207 
 1208 /*
 1209  *      Routine:        pmap_extract_and_hold
 1210  *      Function:
 1211  *              Atomically extract and hold the physical page
 1212  *              with the given pmap and virtual address pair
 1213  *              if that mapping permits the given protection.
 1214  */
 1215 vm_page_t
 1216 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
 1217 {
 1218         pd_entry_t pde, *pdep;
 1219         pt_entry_t pte;
 1220         vm_paddr_t pa;
 1221         vm_page_t m;
 1222 
 1223         pa = 0;
 1224         m = NULL;
 1225         PMAP_LOCK(pmap);
 1226 retry:
 1227         pdep = pmap_pde(pmap, va);
 1228         if (pdep != NULL && (pde = *pdep)) {
 1229                 if (pde & PG_PS) {
 1230                         if ((pde & PG_RW) || (prot & VM_PROT_WRITE) == 0) {
 1231                                 if (vm_page_pa_tryrelock(pmap, (pde &
 1232                                     PG_PS_FRAME) | (va & PDRMASK), &pa))
 1233                                         goto retry;
 1234                                 m = PHYS_TO_VM_PAGE((pde & PG_PS_FRAME) |
 1235                                     (va & PDRMASK));
 1236                                 vm_page_hold(m);
 1237                         }
 1238                 } else {
 1239                         pte = *pmap_pde_to_pte(pdep, va);
 1240                         if ((pte & PG_V) &&
 1241                             ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0)) {
 1242                                 if (vm_page_pa_tryrelock(pmap, pte & PG_FRAME,
 1243                                     &pa))
 1244                                         goto retry;
 1245                                 m = PHYS_TO_VM_PAGE(pte & PG_FRAME);
 1246                                 vm_page_hold(m);
 1247                         }
 1248                 }
 1249         }
 1250         PA_UNLOCK_COND(pa);
 1251         PMAP_UNLOCK(pmap);
 1252         return (m);
 1253 }
 1254 
 1255 vm_paddr_t
 1256 pmap_kextract(vm_offset_t va)
 1257 {
 1258         pd_entry_t pde;
 1259         vm_paddr_t pa;
 1260 
 1261         if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) {
 1262                 pa = DMAP_TO_PHYS(va);
 1263         } else {
 1264                 pde = *vtopde(va);
 1265                 if (pde & PG_PS) {
 1266                         pa = (pde & PG_PS_FRAME) | (va & PDRMASK);
 1267                 } else {
 1268                         /*
 1269                          * Beware of a concurrent promotion that changes the
 1270                          * PDE at this point!  For example, vtopte() must not
 1271                          * be used to access the PTE because it would use the
 1272                          * new PDE.  It is, however, safe to use the old PDE
 1273                          * because the page table page is preserved by the
 1274                          * promotion.
 1275                          */
 1276                         pa = *pmap_pde_to_pte(&pde, va);
 1277                         pa = (pa & PG_FRAME) | (va & PAGE_MASK);
 1278                 }
 1279         }
 1280         return (pa);
 1281 }
 1282 
 1283 /***************************************************
 1284  * Low level mapping routines.....
 1285  ***************************************************/
 1286 
 1287 /*
 1288  * Add a wired page to the kva.
 1289  * Note: not SMP coherent.
 1290  */
 1291 PMAP_INLINE void 
 1292 pmap_kenter(vm_offset_t va, vm_paddr_t pa)
 1293 {
 1294         pt_entry_t *pte;
 1295 
 1296         pte = vtopte(va);
 1297         pte_store(pte, pa | PG_RW | PG_V | PG_G);
 1298 }
 1299 
 1300 static __inline void
 1301 pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode)
 1302 {
 1303         pt_entry_t *pte;
 1304 
 1305         pte = vtopte(va);
 1306         pte_store(pte, pa | PG_RW | PG_V | PG_G | pmap_cache_bits(mode, 0));
 1307 }
 1308 
 1309 /*
 1310  * Remove a page from the kernel pagetables.
 1311  * Note: not SMP coherent.
 1312  */
 1313 PMAP_INLINE void
 1314 pmap_kremove(vm_offset_t va)
 1315 {
 1316         pt_entry_t *pte;
 1317 
 1318         pte = vtopte(va);
 1319         pte_clear(pte);
 1320 }
 1321 
 1322 /*
 1323  *      Used to map a range of physical addresses into kernel
 1324  *      virtual address space.
 1325  *
 1326  *      The value passed in '*virt' is a suggested virtual address for
 1327  *      the mapping. Architectures which can support a direct-mapped
 1328  *      physical to virtual region can return the appropriate address
 1329  *      within that region, leaving '*virt' unchanged. Other
 1330  *      architectures should map the pages starting at '*virt' and
 1331  *      update '*virt' with the first usable address after the mapped
 1332  *      region.
 1333  */
 1334 vm_offset_t
 1335 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
 1336 {
 1337         return PHYS_TO_DMAP(start);
 1338 }
 1339 
 1340 
 1341 /*
 1342  * Add a list of wired pages to the kva
 1343  * this routine is only used for temporary
 1344  * kernel mappings that do not need to have
 1345  * page modification or references recorded.
 1346  * Note that old mappings are simply written
 1347  * over.  The page *must* be wired.
 1348  * Note: SMP coherent.  Uses a ranged shootdown IPI.
 1349  */
 1350 void
 1351 pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
 1352 {
 1353         pt_entry_t *endpte, oldpte, pa, *pte;
 1354         vm_page_t m;
 1355 
 1356         oldpte = 0;
 1357         pte = vtopte(sva);
 1358         endpte = pte + count;
 1359         while (pte < endpte) {
 1360                 m = *ma++;
 1361                 pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 0);
 1362                 if ((*pte & (PG_FRAME | PG_PTE_CACHE)) != pa) {
 1363                         oldpte |= *pte;
 1364                         pte_store(pte, pa | PG_G | PG_RW | PG_V);
 1365                 }
 1366                 pte++;
 1367         }
 1368         if (__predict_false((oldpte & PG_V) != 0))
 1369                 pmap_invalidate_range(kernel_pmap, sva, sva + count *
 1370                     PAGE_SIZE);
 1371 }
 1372 
 1373 /*
 1374  * This routine tears out page mappings from the
 1375  * kernel -- it is meant only for temporary mappings.
 1376  * Note: SMP coherent.  Uses a ranged shootdown IPI.
 1377  */
 1378 void
 1379 pmap_qremove(vm_offset_t sva, int count)
 1380 {
 1381         vm_offset_t va;
 1382 
 1383         va = sva;
 1384         while (count-- > 0) {
 1385                 pmap_kremove(va);
 1386                 va += PAGE_SIZE;
 1387         }
 1388         pmap_invalidate_range(kernel_pmap, sva, va);
 1389 }
 1390 
 1391 /***************************************************
 1392  * Page table page management routines.....
 1393  ***************************************************/
 1394 static __inline void
 1395 pmap_free_zero_pages(vm_page_t free)
 1396 {
 1397         vm_page_t m;
 1398 
 1399         while (free != NULL) {
 1400                 m = free;
 1401                 free = m->right;
 1402                 /* Preserve the page's PG_ZERO setting. */
 1403                 vm_page_free_toq(m);
 1404         }
 1405 }
 1406 
 1407 /*
 1408  * Schedule the specified unused page table page to be freed.  Specifically,
 1409  * add the page to the specified list of pages that will be released to the
 1410  * physical memory manager after the TLB has been updated.
 1411  */
 1412 static __inline void
 1413 pmap_add_delayed_free_list(vm_page_t m, vm_page_t *free, boolean_t set_PG_ZERO)
 1414 {
 1415 
 1416         if (set_PG_ZERO)
 1417                 m->flags |= PG_ZERO;
 1418         else
 1419                 m->flags &= ~PG_ZERO;
 1420         m->right = *free;
 1421         *free = m;
 1422 }
 1423         
 1424 /*
 1425  * Inserts the specified page table page into the specified pmap's collection
 1426  * of idle page table pages.  Each of a pmap's page table pages is responsible
 1427  * for mapping a distinct range of virtual addresses.  The pmap's collection is
 1428  * ordered by this virtual address range.
 1429  */
 1430 static void
 1431 pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte)
 1432 {
 1433         vm_page_t root;
 1434 
 1435         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 1436         root = pmap->pm_root;
 1437         if (root == NULL) {
 1438                 mpte->left = NULL;
 1439                 mpte->right = NULL;
 1440         } else {
 1441                 root = vm_page_splay(mpte->pindex, root);
 1442                 if (mpte->pindex < root->pindex) {
 1443                         mpte->left = root->left;
 1444                         mpte->right = root;
 1445                         root->left = NULL;
 1446                 } else if (mpte->pindex == root->pindex)
 1447                         panic("pmap_insert_pt_page: pindex already inserted");
 1448                 else {
 1449                         mpte->right = root->right;
 1450                         mpte->left = root;
 1451                         root->right = NULL;
 1452                 }
 1453         }
 1454         pmap->pm_root = mpte;
 1455 }
 1456 
 1457 /*
 1458  * Looks for a page table page mapping the specified virtual address in the
 1459  * specified pmap's collection of idle page table pages.  Returns NULL if there
 1460  * is no page table page corresponding to the specified virtual address.
 1461  */
 1462 static vm_page_t
 1463 pmap_lookup_pt_page(pmap_t pmap, vm_offset_t va)
 1464 {
 1465         vm_page_t mpte;
 1466         vm_pindex_t pindex = pmap_pde_pindex(va);
 1467 
 1468         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 1469         if ((mpte = pmap->pm_root) != NULL && mpte->pindex != pindex) {
 1470                 mpte = vm_page_splay(pindex, mpte);
 1471                 if ((pmap->pm_root = mpte)->pindex != pindex)
 1472                         mpte = NULL;
 1473         }
 1474         return (mpte);
 1475 }
 1476 
 1477 /*
 1478  * Removes the specified page table page from the specified pmap's collection
 1479  * of idle page table pages.  The specified page table page must be a member of
 1480  * the pmap's collection.
 1481  */
 1482 static void
 1483 pmap_remove_pt_page(pmap_t pmap, vm_page_t mpte)
 1484 {
 1485         vm_page_t root;
 1486 
 1487         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 1488         if (mpte != pmap->pm_root) {
 1489                 root = vm_page_splay(mpte->pindex, pmap->pm_root);
 1490                 KASSERT(mpte == root,
 1491                     ("pmap_remove_pt_page: mpte %p is missing from pmap %p",
 1492                     mpte, pmap));
 1493         }
 1494         if (mpte->left == NULL)
 1495                 root = mpte->right;
 1496         else {
 1497                 root = vm_page_splay(mpte->pindex, mpte->left);
 1498                 root->right = mpte->right;
 1499         }
 1500         pmap->pm_root = root;
 1501 }
 1502 
 1503 /*
 1504  * This routine unholds page table pages, and if the hold count
 1505  * drops to zero, then it decrements the wire count.
 1506  */
 1507 static __inline int
 1508 pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t *free)
 1509 {
 1510 
 1511         --m->wire_count;
 1512         if (m->wire_count == 0)
 1513                 return (_pmap_unwire_pte_hold(pmap, va, m, free));
 1514         else
 1515                 return (0);
 1516 }
 1517 
 1518 static int 
 1519 _pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m, 
 1520     vm_page_t *free)
 1521 {
 1522 
 1523         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 1524         /*
 1525          * unmap the page table page
 1526          */
 1527         if (m->pindex >= (NUPDE + NUPDPE)) {
 1528                 /* PDP page */
 1529                 pml4_entry_t *pml4;
 1530                 pml4 = pmap_pml4e(pmap, va);
 1531                 *pml4 = 0;
 1532         } else if (m->pindex >= NUPDE) {
 1533                 /* PD page */
 1534                 pdp_entry_t *pdp;
 1535                 pdp = pmap_pdpe(pmap, va);
 1536                 *pdp = 0;
 1537         } else {
 1538                 /* PTE page */
 1539                 pd_entry_t *pd;
 1540                 pd = pmap_pde(pmap, va);
 1541                 *pd = 0;
 1542         }
 1543         pmap_resident_count_dec(pmap, 1);
 1544         if (m->pindex < NUPDE) {
 1545                 /* We just released a PT, unhold the matching PD */
 1546                 vm_page_t pdpg;
 1547 
 1548                 pdpg = PHYS_TO_VM_PAGE(*pmap_pdpe(pmap, va) & PG_FRAME);
 1549                 pmap_unwire_pte_hold(pmap, va, pdpg, free);
 1550         }
 1551         if (m->pindex >= NUPDE && m->pindex < (NUPDE + NUPDPE)) {
 1552                 /* We just released a PD, unhold the matching PDP */
 1553                 vm_page_t pdppg;
 1554 
 1555                 pdppg = PHYS_TO_VM_PAGE(*pmap_pml4e(pmap, va) & PG_FRAME);
 1556                 pmap_unwire_pte_hold(pmap, va, pdppg, free);
 1557         }
 1558 
 1559         /*
 1560          * This is a release store so that the ordinary store unmapping
 1561          * the page table page is globally performed before TLB shoot-
 1562          * down is begun.
 1563          */
 1564         atomic_subtract_rel_int(&cnt.v_wire_count, 1);
 1565 
 1566         /* 
 1567          * Put page on a list so that it is released after
 1568          * *ALL* TLB shootdown is done
 1569          */
 1570         pmap_add_delayed_free_list(m, free, TRUE);
 1571         
 1572         return (1);
 1573 }
 1574 
 1575 /*
 1576  * After removing a page table entry, this routine is used to
 1577  * conditionally free the page, and manage the hold/wire counts.
 1578  */
 1579 static int
 1580 pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t ptepde, vm_page_t *free)
 1581 {
 1582         vm_page_t mpte;
 1583 
 1584         if (va >= VM_MAXUSER_ADDRESS)
 1585                 return (0);
 1586         KASSERT(ptepde != 0, ("pmap_unuse_pt: ptepde != 0"));
 1587         mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME);
 1588         return (pmap_unwire_pte_hold(pmap, va, mpte, free));
 1589 }
 1590 
 1591 void
 1592 pmap_pinit0(pmap_t pmap)
 1593 {
 1594 
 1595         PMAP_LOCK_INIT(pmap);
 1596         pmap->pm_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(KPML4phys);
 1597         pmap->pm_root = NULL;
 1598         CPU_ZERO(&pmap->pm_active);
 1599         PCPU_SET(curpmap, pmap);
 1600         TAILQ_INIT(&pmap->pm_pvchunk);
 1601         bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
 1602 }
 1603 
 1604 /*
 1605  * Initialize a preallocated and zeroed pmap structure,
 1606  * such as one in a vmspace structure.
 1607  */
 1608 int
 1609 pmap_pinit(pmap_t pmap)
 1610 {
 1611         vm_page_t pml4pg;
 1612         int i;
 1613 
 1614         PMAP_LOCK_INIT(pmap);
 1615 
 1616         /*
 1617          * allocate the page directory page
 1618          */
 1619         while ((pml4pg = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
 1620             VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL)
 1621                 VM_WAIT;
 1622 
 1623         pmap->pm_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pml4pg));
 1624 
 1625         if ((pml4pg->flags & PG_ZERO) == 0)
 1626                 pagezero(pmap->pm_pml4);
 1627 
 1628         /* Wire in kernel global address entries. */
 1629         pmap->pm_pml4[KPML4I] = KPDPphys | PG_RW | PG_V | PG_U;
 1630         for (i = 0; i < NDMPML4E; i++) {
 1631                 pmap->pm_pml4[DMPML4I + i] = (DMPDPphys + (i << PAGE_SHIFT)) |
 1632                     PG_RW | PG_V | PG_U;
 1633         }
 1634 
 1635         /* install self-referential address mapping entry(s) */
 1636         pmap->pm_pml4[PML4PML4I] = VM_PAGE_TO_PHYS(pml4pg) | PG_V | PG_RW | PG_A | PG_M;
 1637 
 1638         pmap->pm_root = NULL;
 1639         CPU_ZERO(&pmap->pm_active);
 1640         TAILQ_INIT(&pmap->pm_pvchunk);
 1641         bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
 1642 
 1643         return (1);
 1644 }
 1645 
 1646 /*
 1647  * this routine is called if the page table page is not
 1648  * mapped correctly.
 1649  *
 1650  * Note: If a page allocation fails at page table level two or three,
 1651  * one or two pages may be held during the wait, only to be released
 1652  * afterwards.  This conservative approach is easily argued to avoid
 1653  * race conditions.
 1654  */
 1655 static vm_page_t
 1656 _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, int flags)
 1657 {
 1658         vm_page_t m, pdppg, pdpg;
 1659 
 1660         KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
 1661             (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
 1662             ("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
 1663 
 1664         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 1665         /*
 1666          * Allocate a page table page.
 1667          */
 1668         if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ |
 1669             VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
 1670                 if (flags & M_WAITOK) {
 1671                         PMAP_UNLOCK(pmap);
 1672                         rw_wunlock(&pvh_global_lock);
 1673                         VM_WAIT;
 1674                         rw_wlock(&pvh_global_lock);
 1675                         PMAP_LOCK(pmap);
 1676                 }
 1677 
 1678                 /*
 1679                  * Indicate the need to retry.  While waiting, the page table
 1680                  * page may have been allocated.
 1681                  */
 1682                 return (NULL);
 1683         }
 1684         if ((m->flags & PG_ZERO) == 0)
 1685                 pmap_zero_page(m);
 1686 
 1687         /*
 1688          * Map the pagetable page into the process address space, if
 1689          * it isn't already there.
 1690          */
 1691 
 1692         if (ptepindex >= (NUPDE + NUPDPE)) {
 1693                 pml4_entry_t *pml4;
 1694                 vm_pindex_t pml4index;
 1695 
 1696                 /* Wire up a new PDPE page */
 1697                 pml4index = ptepindex - (NUPDE + NUPDPE);
 1698                 pml4 = &pmap->pm_pml4[pml4index];
 1699                 *pml4 = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M;
 1700 
 1701         } else if (ptepindex >= NUPDE) {
 1702                 vm_pindex_t pml4index;
 1703                 vm_pindex_t pdpindex;
 1704                 pml4_entry_t *pml4;
 1705                 pdp_entry_t *pdp;
 1706 
 1707                 /* Wire up a new PDE page */
 1708                 pdpindex = ptepindex - NUPDE;
 1709                 pml4index = pdpindex >> NPML4EPGSHIFT;
 1710 
 1711                 pml4 = &pmap->pm_pml4[pml4index];
 1712                 if ((*pml4 & PG_V) == 0) {
 1713                         /* Have to allocate a new pdp, recurse */
 1714                         if (_pmap_allocpte(pmap, NUPDE + NUPDPE + pml4index,
 1715                             flags) == NULL) {
 1716                                 --m->wire_count;
 1717                                 atomic_subtract_int(&cnt.v_wire_count, 1);
 1718                                 vm_page_free_zero(m);
 1719                                 return (NULL);
 1720                         }
 1721                 } else {
 1722                         /* Add reference to pdp page */
 1723                         pdppg = PHYS_TO_VM_PAGE(*pml4 & PG_FRAME);
 1724                         pdppg->wire_count++;
 1725                 }
 1726                 pdp = (pdp_entry_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME);
 1727 
 1728                 /* Now find the pdp page */
 1729                 pdp = &pdp[pdpindex & ((1ul << NPDPEPGSHIFT) - 1)];
 1730                 *pdp = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M;
 1731 
 1732         } else {
 1733                 vm_pindex_t pml4index;
 1734                 vm_pindex_t pdpindex;
 1735                 pml4_entry_t *pml4;
 1736                 pdp_entry_t *pdp;
 1737                 pd_entry_t *pd;
 1738 
 1739                 /* Wire up a new PTE page */
 1740                 pdpindex = ptepindex >> NPDPEPGSHIFT;
 1741                 pml4index = pdpindex >> NPML4EPGSHIFT;
 1742 
 1743                 /* First, find the pdp and check that its valid. */
 1744                 pml4 = &pmap->pm_pml4[pml4index];
 1745                 if ((*pml4 & PG_V) == 0) {
 1746                         /* Have to allocate a new pd, recurse */
 1747                         if (_pmap_allocpte(pmap, NUPDE + pdpindex,
 1748                             flags) == NULL) {
 1749                                 --m->wire_count;
 1750                                 atomic_subtract_int(&cnt.v_wire_count, 1);
 1751                                 vm_page_free_zero(m);
 1752                                 return (NULL);
 1753                         }
 1754                         pdp = (pdp_entry_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME);
 1755                         pdp = &pdp[pdpindex & ((1ul << NPDPEPGSHIFT) - 1)];
 1756                 } else {
 1757                         pdp = (pdp_entry_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME);
 1758                         pdp = &pdp[pdpindex & ((1ul << NPDPEPGSHIFT) - 1)];
 1759                         if ((*pdp & PG_V) == 0) {
 1760                                 /* Have to allocate a new pd, recurse */
 1761                                 if (_pmap_allocpte(pmap, NUPDE + pdpindex,
 1762                                     flags) == NULL) {
 1763                                         --m->wire_count;
 1764                                         atomic_subtract_int(&cnt.v_wire_count,
 1765                                             1);
 1766                                         vm_page_free_zero(m);
 1767                                         return (NULL);
 1768                                 }
 1769                         } else {
 1770                                 /* Add reference to the pd page */
 1771                                 pdpg = PHYS_TO_VM_PAGE(*pdp & PG_FRAME);
 1772                                 pdpg->wire_count++;
 1773                         }
 1774                 }
 1775                 pd = (pd_entry_t *)PHYS_TO_DMAP(*pdp & PG_FRAME);
 1776 
 1777                 /* Now we know where the page directory page is */
 1778                 pd = &pd[ptepindex & ((1ul << NPDEPGSHIFT) - 1)];
 1779                 *pd = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M;
 1780         }
 1781 
 1782         pmap_resident_count_inc(pmap, 1);
 1783 
 1784         return (m);
 1785 }
 1786 
 1787 static vm_page_t
 1788 pmap_allocpde(pmap_t pmap, vm_offset_t va, int flags)
 1789 {
 1790         vm_pindex_t pdpindex, ptepindex;
 1791         pdp_entry_t *pdpe;
 1792         vm_page_t pdpg;
 1793 
 1794         KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
 1795             (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
 1796             ("pmap_allocpde: flags is neither M_NOWAIT nor M_WAITOK"));
 1797 retry:
 1798         pdpe = pmap_pdpe(pmap, va);
 1799         if (pdpe != NULL && (*pdpe & PG_V) != 0) {
 1800                 /* Add a reference to the pd page. */
 1801                 pdpg = PHYS_TO_VM_PAGE(*pdpe & PG_FRAME);
 1802                 pdpg->wire_count++;
 1803         } else {
 1804                 /* Allocate a pd page. */
 1805                 ptepindex = pmap_pde_pindex(va);
 1806                 pdpindex = ptepindex >> NPDPEPGSHIFT;
 1807                 pdpg = _pmap_allocpte(pmap, NUPDE + pdpindex, flags);
 1808                 if (pdpg == NULL && (flags & M_WAITOK))
 1809                         goto retry;
 1810         }
 1811         return (pdpg);
 1812 }
 1813 
 1814 static vm_page_t
 1815 pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags)
 1816 {
 1817         vm_pindex_t ptepindex;
 1818         pd_entry_t *pd;
 1819         vm_page_t m;
 1820 
 1821         KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
 1822             (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
 1823             ("pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
 1824 
 1825         /*
 1826          * Calculate pagetable page index
 1827          */
 1828         ptepindex = pmap_pde_pindex(va);
 1829 retry:
 1830         /*
 1831          * Get the page directory entry
 1832          */
 1833         pd = pmap_pde(pmap, va);
 1834 
 1835         /*
 1836          * This supports switching from a 2MB page to a
 1837          * normal 4K page.
 1838          */
 1839         if (pd != NULL && (*pd & (PG_PS | PG_V)) == (PG_PS | PG_V)) {
 1840                 if (!pmap_demote_pde(pmap, pd, va)) {
 1841                         /*
 1842                          * Invalidation of the 2MB page mapping may have caused
 1843                          * the deallocation of the underlying PD page.
 1844                          */
 1845                         pd = NULL;
 1846                 }
 1847         }
 1848 
 1849         /*
 1850          * If the page table page is mapped, we just increment the
 1851          * hold count, and activate it.
 1852          */
 1853         if (pd != NULL && (*pd & PG_V) != 0) {
 1854                 m = PHYS_TO_VM_PAGE(*pd & PG_FRAME);
 1855                 m->wire_count++;
 1856         } else {
 1857                 /*
 1858                  * Here if the pte page isn't mapped, or if it has been
 1859                  * deallocated.
 1860                  */
 1861                 m = _pmap_allocpte(pmap, ptepindex, flags);
 1862                 if (m == NULL && (flags & M_WAITOK))
 1863                         goto retry;
 1864         }
 1865         return (m);
 1866 }
 1867 
 1868 
 1869 /***************************************************
 1870  * Pmap allocation/deallocation routines.
 1871  ***************************************************/
 1872 
 1873 /*
 1874  * Release any resources held by the given physical map.
 1875  * Called when a pmap initialized by pmap_pinit is being released.
 1876  * Should only be called if the map contains no valid mappings.
 1877  */
 1878 void
 1879 pmap_release(pmap_t pmap)
 1880 {
 1881         vm_page_t m;
 1882         int i;
 1883 
 1884         KASSERT(pmap->pm_stats.resident_count == 0,
 1885             ("pmap_release: pmap resident count %ld != 0",
 1886             pmap->pm_stats.resident_count));
 1887         KASSERT(pmap->pm_root == NULL,
 1888             ("pmap_release: pmap has reserved page table page(s)"));
 1889 
 1890         m = PHYS_TO_VM_PAGE(pmap->pm_pml4[PML4PML4I] & PG_FRAME);
 1891 
 1892         pmap->pm_pml4[KPML4I] = 0;      /* KVA */
 1893         for (i = 0; i < NDMPML4E; i++)  /* Direct Map */
 1894                 pmap->pm_pml4[DMPML4I + i] = 0;
 1895         pmap->pm_pml4[PML4PML4I] = 0;   /* Recursive Mapping */
 1896 
 1897         m->wire_count--;
 1898         atomic_subtract_int(&cnt.v_wire_count, 1);
 1899         vm_page_free_zero(m);
 1900         PMAP_LOCK_DESTROY(pmap);
 1901 }
 1902 
 1903 static int
 1904 kvm_size(SYSCTL_HANDLER_ARGS)
 1905 {
 1906         unsigned long ksize = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
 1907 
 1908         return sysctl_handle_long(oidp, &ksize, 0, req);
 1909 }
 1910 SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG|CTLFLAG_RD, 
 1911     0, 0, kvm_size, "LU", "Size of KVM");
 1912 
 1913 static int
 1914 kvm_free(SYSCTL_HANDLER_ARGS)
 1915 {
 1916         unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end;
 1917 
 1918         return sysctl_handle_long(oidp, &kfree, 0, req);
 1919 }
 1920 SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD, 
 1921     0, 0, kvm_free, "LU", "Amount of KVM free");
 1922 
 1923 /*
 1924  * grow the number of kernel page table entries, if needed
 1925  */
 1926 void
 1927 pmap_growkernel(vm_offset_t addr)
 1928 {
 1929         vm_paddr_t paddr;
 1930         vm_page_t nkpg;
 1931         pd_entry_t *pde, newpdir;
 1932         pdp_entry_t *pdpe;
 1933 
 1934         mtx_assert(&kernel_map->system_mtx, MA_OWNED);
 1935 
 1936         /*
 1937          * Return if "addr" is within the range of kernel page table pages
 1938          * that were preallocated during pmap bootstrap.  Moreover, leave
 1939          * "kernel_vm_end" and the kernel page table as they were.
 1940          *
 1941          * The correctness of this action is based on the following
 1942          * argument: vm_map_findspace() allocates contiguous ranges of the
 1943          * kernel virtual address space.  It calls this function if a range
 1944          * ends after "kernel_vm_end".  If the kernel is mapped between
 1945          * "kernel_vm_end" and "addr", then the range cannot begin at
 1946          * "kernel_vm_end".  In fact, its beginning address cannot be less
 1947          * than the kernel.  Thus, there is no immediate need to allocate
 1948          * any new kernel page table pages between "kernel_vm_end" and
 1949          * "KERNBASE".
 1950          */
 1951         if (KERNBASE < addr && addr <= KERNBASE + NKPT * NBPDR)
 1952                 return;
 1953 
 1954         addr = roundup2(addr, NBPDR);
 1955         if (addr - 1 >= kernel_map->max_offset)
 1956                 addr = kernel_map->max_offset;
 1957         while (kernel_vm_end < addr) {
 1958                 pdpe = pmap_pdpe(kernel_pmap, kernel_vm_end);
 1959                 if ((*pdpe & PG_V) == 0) {
 1960                         /* We need a new PDP entry */
 1961                         nkpg = vm_page_alloc(NULL, kernel_vm_end >> PDPSHIFT,
 1962                             VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ |
 1963                             VM_ALLOC_WIRED | VM_ALLOC_ZERO);
 1964                         if (nkpg == NULL)
 1965                                 panic("pmap_growkernel: no memory to grow kernel");
 1966                         if ((nkpg->flags & PG_ZERO) == 0)
 1967                                 pmap_zero_page(nkpg);
 1968                         paddr = VM_PAGE_TO_PHYS(nkpg);
 1969                         *pdpe = (pdp_entry_t)
 1970                                 (paddr | PG_V | PG_RW | PG_A | PG_M);
 1971                         continue; /* try again */
 1972                 }
 1973                 pde = pmap_pdpe_to_pde(pdpe, kernel_vm_end);
 1974                 if ((*pde & PG_V) != 0) {
 1975                         kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK;
 1976                         if (kernel_vm_end - 1 >= kernel_map->max_offset) {
 1977                                 kernel_vm_end = kernel_map->max_offset;
 1978                                 break;                       
 1979                         }
 1980                         continue;
 1981                 }
 1982 
 1983                 nkpg = vm_page_alloc(NULL, pmap_pde_pindex(kernel_vm_end),
 1984                     VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
 1985                     VM_ALLOC_ZERO);
 1986                 if (nkpg == NULL)
 1987                         panic("pmap_growkernel: no memory to grow kernel");
 1988                 if ((nkpg->flags & PG_ZERO) == 0)
 1989                         pmap_zero_page(nkpg);
 1990                 paddr = VM_PAGE_TO_PHYS(nkpg);
 1991                 newpdir = (pd_entry_t) (paddr | PG_V | PG_RW | PG_A | PG_M);
 1992                 pde_store(pde, newpdir);
 1993 
 1994                 kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK;
 1995                 if (kernel_vm_end - 1 >= kernel_map->max_offset) {
 1996                         kernel_vm_end = kernel_map->max_offset;
 1997                         break;                       
 1998                 }
 1999         }
 2000 }
 2001 
 2002 
 2003 /***************************************************
 2004  * page management routines.
 2005  ***************************************************/
 2006 
 2007 CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE);
 2008 CTASSERT(_NPCM == 3);
 2009 CTASSERT(_NPCPV == 168);
 2010 
 2011 static __inline struct pv_chunk *
 2012 pv_to_chunk(pv_entry_t pv)
 2013 {
 2014 
 2015         return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK));
 2016 }
 2017 
 2018 #define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap)
 2019 
 2020 #define PC_FREE0        0xfffffffffffffffful
 2021 #define PC_FREE1        0xfffffffffffffffful
 2022 #define PC_FREE2        0x000000fffffffffful
 2023 
 2024 static const uint64_t pc_freemask[_NPCM] = { PC_FREE0, PC_FREE1, PC_FREE2 };
 2025 
 2026 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0,
 2027         "Current number of pv entries");
 2028 
 2029 #ifdef PV_STATS
 2030 static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail;
 2031 
 2032 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0,
 2033         "Current number of pv entry chunks");
 2034 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0,
 2035         "Current number of pv entry chunks allocated");
 2036 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0,
 2037         "Current number of pv entry chunks frees");
 2038 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0,
 2039         "Number of times tried to get a chunk page but failed.");
 2040 
 2041 static long pv_entry_frees, pv_entry_allocs;
 2042 static int pv_entry_spare;
 2043 
 2044 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0,
 2045         "Current number of pv entry frees");
 2046 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0,
 2047         "Current number of pv entry allocs");
 2048 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0,
 2049         "Current number of spare pv entries");
 2050 #endif
 2051 
 2052 /*
 2053  * We are in a serious low memory condition.  Resort to
 2054  * drastic measures to free some pages so we can allocate
 2055  * another pv entry chunk.
 2056  *
 2057  * We do not, however, unmap 2mpages because subsequent accesses will
 2058  * allocate per-page pv entries until repromotion occurs, thereby
 2059  * exacerbating the shortage of free pv entries.
 2060  */
 2061 static vm_page_t
 2062 pmap_pv_reclaim(pmap_t locked_pmap)
 2063 {
 2064         struct pch newtail;
 2065         struct pv_chunk *pc;
 2066         struct md_page *pvh;
 2067         pd_entry_t *pde;
 2068         pmap_t pmap;
 2069         pt_entry_t *pte, tpte;
 2070         pv_entry_t pv;
 2071         vm_offset_t va;
 2072         vm_page_t free, m, m_pc;
 2073         uint64_t inuse;
 2074         int bit, field, freed;
 2075         
 2076         rw_assert(&pvh_global_lock, RA_WLOCKED);
 2077         PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
 2078         pmap = NULL;
 2079         free = m_pc = NULL;
 2080         TAILQ_INIT(&newtail);
 2081         while ((pc = TAILQ_FIRST(&pv_chunks)) != NULL && free == NULL) {
 2082                 TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
 2083                 if (pmap != pc->pc_pmap) {
 2084                         if (pmap != NULL) {
 2085                                 pmap_invalidate_all(pmap);
 2086                                 if (pmap != locked_pmap)
 2087                                         PMAP_UNLOCK(pmap);
 2088                         }
 2089                         pmap = pc->pc_pmap;
 2090                         /* Avoid deadlock and lock recursion. */
 2091                         if (pmap > locked_pmap)
 2092                                 PMAP_LOCK(pmap);
 2093                         else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap)) {
 2094                                 pmap = NULL;
 2095                                 TAILQ_INSERT_TAIL(&newtail, pc, pc_lru);
 2096                                 continue;
 2097                         }
 2098                 }
 2099 
 2100                 /*
 2101                  * Destroy every non-wired, 4 KB page mapping in the chunk.
 2102                  */
 2103                 freed = 0;
 2104                 for (field = 0; field < _NPCM; field++) {
 2105                         for (inuse = ~pc->pc_map[field] & pc_freemask[field];
 2106                             inuse != 0; inuse &= ~(1UL << bit)) {
 2107                                 bit = bsfq(inuse);
 2108                                 pv = &pc->pc_pventry[field * 64 + bit];
 2109                                 va = pv->pv_va;
 2110                                 pde = pmap_pde(pmap, va);
 2111                                 if ((*pde & PG_PS) != 0)
 2112                                         continue;
 2113                                 pte = pmap_pde_to_pte(pde, va);
 2114                                 if ((*pte & PG_W) != 0)
 2115                                         continue;
 2116                                 tpte = pte_load_clear(pte);
 2117                                 if ((tpte & PG_G) != 0)
 2118                                         pmap_invalidate_page(pmap, va);
 2119                                 m = PHYS_TO_VM_PAGE(tpte & PG_FRAME);
 2120                                 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
 2121                                         vm_page_dirty(m);
 2122                                 if ((tpte & PG_A) != 0)
 2123                                         vm_page_aflag_set(m, PGA_REFERENCED);
 2124                                 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
 2125                                 if (TAILQ_EMPTY(&m->md.pv_list) &&
 2126                                     (m->flags & PG_FICTITIOUS) == 0) {
 2127                                         pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 2128                                         if (TAILQ_EMPTY(&pvh->pv_list)) {
 2129                                                 vm_page_aflag_clear(m,
 2130                                                     PGA_WRITEABLE);
 2131                                         }
 2132                                 }
 2133                                 pc->pc_map[field] |= 1UL << bit;
 2134                                 pmap_unuse_pt(pmap, va, *pde, &free);   
 2135                                 freed++;
 2136                         }
 2137                 }
 2138                 if (freed == 0) {
 2139                         TAILQ_INSERT_TAIL(&newtail, pc, pc_lru);
 2140                         continue;
 2141                 }
 2142                 /* Every freed mapping is for a 4 KB page. */
 2143                 pmap_resident_count_dec(pmap, freed);
 2144                 PV_STAT(pv_entry_frees += freed);
 2145                 PV_STAT(pv_entry_spare += freed);
 2146                 pv_entry_count -= freed;
 2147                 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
 2148                 if (pc->pc_map[0] == PC_FREE0 && pc->pc_map[1] == PC_FREE1 &&
 2149                     pc->pc_map[2] == PC_FREE2) {
 2150                         PV_STAT(pv_entry_spare -= _NPCPV);
 2151                         PV_STAT(pc_chunk_count--);
 2152                         PV_STAT(pc_chunk_frees++);
 2153                         /* Entire chunk is free; return it. */
 2154                         m_pc = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
 2155                         dump_drop_page(m_pc->phys_addr);
 2156                         break;
 2157                 }
 2158                 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
 2159                 TAILQ_INSERT_TAIL(&newtail, pc, pc_lru);
 2160                 /* One freed pv entry in locked_pmap is sufficient. */
 2161                 if (pmap == locked_pmap)
 2162                         break;
 2163         }
 2164         TAILQ_CONCAT(&pv_chunks, &newtail, pc_lru);
 2165         if (pmap != NULL) {
 2166                 pmap_invalidate_all(pmap);
 2167                 if (pmap != locked_pmap)
 2168                         PMAP_UNLOCK(pmap);
 2169         }
 2170         if (m_pc == NULL && free != NULL) {
 2171                 m_pc = free;
 2172                 free = m_pc->right;
 2173                 /* Recycle a freed page table page. */
 2174                 m_pc->wire_count = 1;
 2175                 atomic_add_int(&cnt.v_wire_count, 1);
 2176         }
 2177         pmap_free_zero_pages(free);
 2178         return (m_pc);
 2179 }
 2180 
 2181 /*
 2182  * free the pv_entry back to the free list
 2183  */
 2184 static void
 2185 free_pv_entry(pmap_t pmap, pv_entry_t pv)
 2186 {
 2187         struct pv_chunk *pc;
 2188         int idx, field, bit;
 2189 
 2190         rw_assert(&pvh_global_lock, RA_WLOCKED);
 2191         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2192         PV_STAT(pv_entry_frees++);
 2193         PV_STAT(pv_entry_spare++);
 2194         pv_entry_count--;
 2195         pc = pv_to_chunk(pv);
 2196         idx = pv - &pc->pc_pventry[0];
 2197         field = idx / 64;
 2198         bit = idx % 64;
 2199         pc->pc_map[field] |= 1ul << bit;
 2200         if (pc->pc_map[0] != PC_FREE0 || pc->pc_map[1] != PC_FREE1 ||
 2201             pc->pc_map[2] != PC_FREE2) {
 2202                 /* 98% of the time, pc is already at the head of the list. */
 2203                 if (__predict_false(pc != TAILQ_FIRST(&pmap->pm_pvchunk))) {
 2204                         TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
 2205                         TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
 2206                 }
 2207                 return;
 2208         }
 2209         TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
 2210         free_pv_chunk(pc);
 2211 }
 2212 
 2213 static void
 2214 free_pv_chunk(struct pv_chunk *pc)
 2215 {
 2216         vm_page_t m;
 2217 
 2218         TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
 2219         PV_STAT(pv_entry_spare -= _NPCPV);
 2220         PV_STAT(pc_chunk_count--);
 2221         PV_STAT(pc_chunk_frees++);
 2222         /* entire chunk is free, return it */
 2223         m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
 2224         dump_drop_page(m->phys_addr);
 2225         vm_page_unwire(m, 0);
 2226         vm_page_free(m);
 2227 }
 2228 
 2229 /*
 2230  * get a new pv_entry, allocating a block from the system
 2231  * when needed.
 2232  */
 2233 static pv_entry_t
 2234 get_pv_entry(pmap_t pmap, boolean_t try)
 2235 {
 2236         int bit, field;
 2237         pv_entry_t pv;
 2238         struct pv_chunk *pc;
 2239         vm_page_t m;
 2240 
 2241         rw_assert(&pvh_global_lock, RA_WLOCKED);
 2242         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2243         PV_STAT(pv_entry_allocs++);
 2244 retry:
 2245         pc = TAILQ_FIRST(&pmap->pm_pvchunk);
 2246         if (pc != NULL) {
 2247                 for (field = 0; field < _NPCM; field++) {
 2248                         if (pc->pc_map[field]) {
 2249                                 bit = bsfq(pc->pc_map[field]);
 2250                                 break;
 2251                         }
 2252                 }
 2253                 if (field < _NPCM) {
 2254                         pv = &pc->pc_pventry[field * 64 + bit];
 2255                         pc->pc_map[field] &= ~(1ul << bit);
 2256                         /* If this was the last item, move it to tail */
 2257                         if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0 &&
 2258                             pc->pc_map[2] == 0) {
 2259                                 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
 2260                                 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc,
 2261                                     pc_list);
 2262                         }
 2263                         pv_entry_count++;
 2264                         PV_STAT(pv_entry_spare--);
 2265                         return (pv);
 2266                 }
 2267         }
 2268         /* No free items, allocate another chunk */
 2269         m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
 2270             VM_ALLOC_WIRED);
 2271         if (m == NULL) {
 2272                 if (try) {
 2273                         PV_STAT(pc_chunk_tryfail++);
 2274                         return (NULL);
 2275                 }
 2276                 m = pmap_pv_reclaim(pmap);
 2277                 if (m == NULL)
 2278                         goto retry;
 2279         }
 2280         PV_STAT(pc_chunk_count++);
 2281         PV_STAT(pc_chunk_allocs++);
 2282         dump_add_page(m->phys_addr);
 2283         pc = (void *)PHYS_TO_DMAP(m->phys_addr);
 2284         pc->pc_pmap = pmap;
 2285         pc->pc_map[0] = PC_FREE0 & ~1ul;        /* preallocated bit 0 */
 2286         pc->pc_map[1] = PC_FREE1;
 2287         pc->pc_map[2] = PC_FREE2;
 2288         TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
 2289         pv = &pc->pc_pventry[0];
 2290         TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
 2291         pv_entry_count++;
 2292         PV_STAT(pv_entry_spare += _NPCPV - 1);
 2293         return (pv);
 2294 }
 2295 
 2296 /*
 2297  * First find and then remove the pv entry for the specified pmap and virtual
 2298  * address from the specified pv list.  Returns the pv entry if found and NULL
 2299  * otherwise.  This operation can be performed on pv lists for either 4KB or
 2300  * 2MB page mappings.
 2301  */
 2302 static __inline pv_entry_t
 2303 pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
 2304 {
 2305         pv_entry_t pv;
 2306 
 2307         rw_assert(&pvh_global_lock, RA_WLOCKED);
 2308         TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
 2309                 if (pmap == PV_PMAP(pv) && va == pv->pv_va) {
 2310                         TAILQ_REMOVE(&pvh->pv_list, pv, pv_list);
 2311                         break;
 2312                 }
 2313         }
 2314         return (pv);
 2315 }
 2316 
 2317 /*
 2318  * After demotion from a 2MB page mapping to 512 4KB page mappings,
 2319  * destroy the pv entry for the 2MB page mapping and reinstantiate the pv
 2320  * entries for each of the 4KB page mappings.
 2321  */
 2322 static void
 2323 pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
 2324 {
 2325         struct md_page *pvh;
 2326         pv_entry_t pv;
 2327         vm_offset_t va_last;
 2328         vm_page_t m;
 2329 
 2330         rw_assert(&pvh_global_lock, RA_WLOCKED);
 2331         KASSERT((pa & PDRMASK) == 0,
 2332             ("pmap_pv_demote_pde: pa is not 2mpage aligned"));
 2333 
 2334         /*
 2335          * Transfer the 2mpage's pv entry for this mapping to the first
 2336          * page's pv list.
 2337          */
 2338         pvh = pa_to_pvh(pa);
 2339         va = trunc_2mpage(va);
 2340         pv = pmap_pvh_remove(pvh, pmap, va);
 2341         KASSERT(pv != NULL, ("pmap_pv_demote_pde: pv not found"));
 2342         m = PHYS_TO_VM_PAGE(pa);
 2343         TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
 2344         /* Instantiate the remaining NPTEPG - 1 pv entries. */
 2345         va_last = va + NBPDR - PAGE_SIZE;
 2346         do {
 2347                 m++;
 2348                 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 2349                     ("pmap_pv_demote_pde: page %p is not managed", m));
 2350                 va += PAGE_SIZE;
 2351                 pmap_insert_entry(pmap, va, m);
 2352         } while (va < va_last);
 2353 }
 2354 
 2355 /*
 2356  * After promotion from 512 4KB page mappings to a single 2MB page mapping,
 2357  * replace the many pv entries for the 4KB page mappings by a single pv entry
 2358  * for the 2MB page mapping.
 2359  */
 2360 static void
 2361 pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
 2362 {
 2363         struct md_page *pvh;
 2364         pv_entry_t pv;
 2365         vm_offset_t va_last;
 2366         vm_page_t m;
 2367 
 2368         rw_assert(&pvh_global_lock, RA_WLOCKED);
 2369         KASSERT((pa & PDRMASK) == 0,
 2370             ("pmap_pv_promote_pde: pa is not 2mpage aligned"));
 2371 
 2372         /*
 2373          * Transfer the first page's pv entry for this mapping to the
 2374          * 2mpage's pv list.  Aside from avoiding the cost of a call
 2375          * to get_pv_entry(), a transfer avoids the possibility that
 2376          * get_pv_entry() calls pmap_collect() and that pmap_collect()
 2377          * removes one of the mappings that is being promoted.
 2378          */
 2379         m = PHYS_TO_VM_PAGE(pa);
 2380         va = trunc_2mpage(va);
 2381         pv = pmap_pvh_remove(&m->md, pmap, va);
 2382         KASSERT(pv != NULL, ("pmap_pv_promote_pde: pv not found"));
 2383         pvh = pa_to_pvh(pa);
 2384         TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_list);
 2385         /* Free the remaining NPTEPG - 1 pv entries. */
 2386         va_last = va + NBPDR - PAGE_SIZE;
 2387         do {
 2388                 m++;
 2389                 va += PAGE_SIZE;
 2390                 pmap_pvh_free(&m->md, pmap, va);
 2391         } while (va < va_last);
 2392 }
 2393 
 2394 /*
 2395  * First find and then destroy the pv entry for the specified pmap and virtual
 2396  * address.  This operation can be performed on pv lists for either 4KB or 2MB
 2397  * page mappings.
 2398  */
 2399 static void
 2400 pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
 2401 {
 2402         pv_entry_t pv;
 2403 
 2404         pv = pmap_pvh_remove(pvh, pmap, va);
 2405         KASSERT(pv != NULL, ("pmap_pvh_free: pv not found"));
 2406         free_pv_entry(pmap, pv);
 2407 }
 2408 
 2409 static void
 2410 pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
 2411 {
 2412         struct md_page *pvh;
 2413 
 2414         rw_assert(&pvh_global_lock, RA_WLOCKED);
 2415         pmap_pvh_free(&m->md, pmap, va);
 2416         if (TAILQ_EMPTY(&m->md.pv_list) && (m->flags & PG_FICTITIOUS) == 0) {
 2417                 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 2418                 if (TAILQ_EMPTY(&pvh->pv_list))
 2419                         vm_page_aflag_clear(m, PGA_WRITEABLE);
 2420         }
 2421 }
 2422 
 2423 /*
 2424  * Create a pv entry for page at pa for
 2425  * (pmap, va).
 2426  */
 2427 static void
 2428 pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
 2429 {
 2430         pv_entry_t pv;
 2431 
 2432         rw_assert(&pvh_global_lock, RA_WLOCKED);
 2433         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2434         pv = get_pv_entry(pmap, FALSE);
 2435         pv->pv_va = va;
 2436         TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
 2437 }
 2438 
 2439 /*
 2440  * Conditionally create a pv entry.
 2441  */
 2442 static boolean_t
 2443 pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
 2444 {
 2445         pv_entry_t pv;
 2446 
 2447         rw_assert(&pvh_global_lock, RA_WLOCKED);
 2448         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2449         if ((pv = get_pv_entry(pmap, TRUE)) != NULL) {
 2450                 pv->pv_va = va;
 2451                 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
 2452                 return (TRUE);
 2453         } else
 2454                 return (FALSE);
 2455 }
 2456 
 2457 /*
 2458  * Create the pv entry for a 2MB page mapping.
 2459  */
 2460 static boolean_t
 2461 pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
 2462 {
 2463         struct md_page *pvh;
 2464         pv_entry_t pv;
 2465 
 2466         rw_assert(&pvh_global_lock, RA_WLOCKED);
 2467         if ((pv = get_pv_entry(pmap, TRUE)) != NULL) {
 2468                 pv->pv_va = va;
 2469                 pvh = pa_to_pvh(pa);
 2470                 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_list);
 2471                 return (TRUE);
 2472         } else
 2473                 return (FALSE);
 2474 }
 2475 
 2476 /*
 2477  * Fills a page table page with mappings to consecutive physical pages.
 2478  */
 2479 static void
 2480 pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte)
 2481 {
 2482         pt_entry_t *pte;
 2483 
 2484         for (pte = firstpte; pte < firstpte + NPTEPG; pte++) {
 2485                 *pte = newpte;
 2486                 newpte += PAGE_SIZE;
 2487         }
 2488 }
 2489 
 2490 /*
 2491  * Tries to demote a 2MB page mapping.  If demotion fails, the 2MB page
 2492  * mapping is invalidated.
 2493  */
 2494 static boolean_t
 2495 pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
 2496 {
 2497         pd_entry_t newpde, oldpde;
 2498         pt_entry_t *firstpte, newpte;
 2499         vm_paddr_t mptepa;
 2500         vm_page_t free, mpte;
 2501 
 2502         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2503         oldpde = *pde;
 2504         KASSERT((oldpde & (PG_PS | PG_V)) == (PG_PS | PG_V),
 2505             ("pmap_demote_pde: oldpde is missing PG_PS and/or PG_V"));
 2506         mpte = pmap_lookup_pt_page(pmap, va);
 2507         if (mpte != NULL)
 2508                 pmap_remove_pt_page(pmap, mpte);
 2509         else {
 2510                 KASSERT((oldpde & PG_W) == 0,
 2511                     ("pmap_demote_pde: page table page for a wired mapping"
 2512                     " is missing"));
 2513 
 2514                 /*
 2515                  * Invalidate the 2MB page mapping and return "failure" if the
 2516                  * mapping was never accessed or the allocation of the new
 2517                  * page table page fails.  If the 2MB page mapping belongs to
 2518                  * the direct map region of the kernel's address space, then
 2519                  * the page allocation request specifies the highest possible
 2520                  * priority (VM_ALLOC_INTERRUPT).  Otherwise, the priority is
 2521                  * normal.  Page table pages are preallocated for every other
 2522                  * part of the kernel address space, so the direct map region
 2523                  * is the only part of the kernel address space that must be
 2524                  * handled here.
 2525                  */
 2526                 if ((oldpde & PG_A) == 0 || (mpte = vm_page_alloc(NULL,
 2527                     pmap_pde_pindex(va), (va >= DMAP_MIN_ADDRESS && va <
 2528                     DMAP_MAX_ADDRESS ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL) |
 2529                     VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
 2530                         free = NULL;
 2531                         pmap_remove_pde(pmap, pde, trunc_2mpage(va), &free);
 2532                         pmap_invalidate_page(pmap, trunc_2mpage(va));
 2533                         pmap_free_zero_pages(free);
 2534                         CTR2(KTR_PMAP, "pmap_demote_pde: failure for va %#lx"
 2535                             " in pmap %p", va, pmap);
 2536                         return (FALSE);
 2537                 }
 2538                 if (va < VM_MAXUSER_ADDRESS)
 2539                         pmap_resident_count_inc(pmap, 1);
 2540         }
 2541         mptepa = VM_PAGE_TO_PHYS(mpte);
 2542         firstpte = (pt_entry_t *)PHYS_TO_DMAP(mptepa);
 2543         newpde = mptepa | PG_M | PG_A | (oldpde & PG_U) | PG_RW | PG_V;
 2544         KASSERT((oldpde & PG_A) != 0,
 2545             ("pmap_demote_pde: oldpde is missing PG_A"));
 2546         KASSERT((oldpde & (PG_M | PG_RW)) != PG_RW,
 2547             ("pmap_demote_pde: oldpde is missing PG_M"));
 2548         newpte = oldpde & ~PG_PS;
 2549         if ((newpte & PG_PDE_PAT) != 0)
 2550                 newpte ^= PG_PDE_PAT | PG_PTE_PAT;
 2551 
 2552         /*
 2553          * If the page table page is new, initialize it.
 2554          */
 2555         if (mpte->wire_count == 1) {
 2556                 mpte->wire_count = NPTEPG;
 2557                 pmap_fill_ptp(firstpte, newpte);
 2558         }
 2559         KASSERT((*firstpte & PG_FRAME) == (newpte & PG_FRAME),
 2560             ("pmap_demote_pde: firstpte and newpte map different physical"
 2561             " addresses"));
 2562 
 2563         /*
 2564          * If the mapping has changed attributes, update the page table
 2565          * entries.
 2566          */
 2567         if ((*firstpte & PG_PTE_PROMOTE) != (newpte & PG_PTE_PROMOTE))
 2568                 pmap_fill_ptp(firstpte, newpte);
 2569 
 2570         /*
 2571          * Demote the mapping.  This pmap is locked.  The old PDE has
 2572          * PG_A set.  If the old PDE has PG_RW set, it also has PG_M
 2573          * set.  Thus, there is no danger of a race with another
 2574          * processor changing the setting of PG_A and/or PG_M between
 2575          * the read above and the store below. 
 2576          */
 2577         if (workaround_erratum383)
 2578                 pmap_update_pde(pmap, va, pde, newpde);
 2579         else
 2580                 pde_store(pde, newpde);
 2581 
 2582         /*
 2583          * Invalidate a stale recursive mapping of the page table page.
 2584          */
 2585         if (va >= VM_MAXUSER_ADDRESS)
 2586                 pmap_invalidate_page(pmap, (vm_offset_t)vtopte(va));
 2587 
 2588         /*
 2589          * Demote the pv entry.  This depends on the earlier demotion
 2590          * of the mapping.  Specifically, the (re)creation of a per-
 2591          * page pv entry might trigger the execution of pmap_collect(),
 2592          * which might reclaim a newly (re)created per-page pv entry
 2593          * and destroy the associated mapping.  In order to destroy
 2594          * the mapping, the PDE must have already changed from mapping
 2595          * the 2mpage to referencing the page table page.
 2596          */
 2597         if ((oldpde & PG_MANAGED) != 0)
 2598                 pmap_pv_demote_pde(pmap, va, oldpde & PG_PS_FRAME);
 2599 
 2600         pmap_pde_demotions++;
 2601         CTR2(KTR_PMAP, "pmap_demote_pde: success for va %#lx"
 2602             " in pmap %p", va, pmap);
 2603         return (TRUE);
 2604 }
 2605 
 2606 /*
 2607  * pmap_remove_pde: do the things to unmap a superpage in a process
 2608  */
 2609 static int
 2610 pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
 2611     vm_page_t *free)
 2612 {
 2613         struct md_page *pvh;
 2614         pd_entry_t oldpde;
 2615         vm_offset_t eva, va;
 2616         vm_page_t m, mpte;
 2617 
 2618         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2619         KASSERT((sva & PDRMASK) == 0,
 2620             ("pmap_remove_pde: sva is not 2mpage aligned"));
 2621         oldpde = pte_load_clear(pdq);
 2622         if (oldpde & PG_W)
 2623                 pmap->pm_stats.wired_count -= NBPDR / PAGE_SIZE;
 2624 
 2625         /*
 2626          * Machines that don't support invlpg, also don't support
 2627          * PG_G.
 2628          */
 2629         if (oldpde & PG_G)
 2630                 pmap_invalidate_page(kernel_pmap, sva);
 2631         pmap_resident_count_dec(pmap, NBPDR / PAGE_SIZE);
 2632         if (oldpde & PG_MANAGED) {
 2633                 pvh = pa_to_pvh(oldpde & PG_PS_FRAME);
 2634                 pmap_pvh_free(pvh, pmap, sva);
 2635                 eva = sva + NBPDR;
 2636                 for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
 2637                     va < eva; va += PAGE_SIZE, m++) {
 2638                         if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW))
 2639                                 vm_page_dirty(m);
 2640                         if (oldpde & PG_A)
 2641                                 vm_page_aflag_set(m, PGA_REFERENCED);
 2642                         if (TAILQ_EMPTY(&m->md.pv_list) &&
 2643                             TAILQ_EMPTY(&pvh->pv_list))
 2644                                 vm_page_aflag_clear(m, PGA_WRITEABLE);
 2645                 }
 2646         }
 2647         if (pmap == kernel_pmap) {
 2648                 if (!pmap_demote_pde(pmap, pdq, sva))
 2649                         panic("pmap_remove_pde: failed demotion");
 2650         } else {
 2651                 mpte = pmap_lookup_pt_page(pmap, sva);
 2652                 if (mpte != NULL) {
 2653                         pmap_remove_pt_page(pmap, mpte);
 2654                         pmap_resident_count_dec(pmap, 1);
 2655                         KASSERT(mpte->wire_count == NPTEPG,
 2656                             ("pmap_remove_pde: pte page wire count error"));
 2657                         mpte->wire_count = 0;
 2658                         pmap_add_delayed_free_list(mpte, free, FALSE);
 2659                         atomic_subtract_int(&cnt.v_wire_count, 1);
 2660                 }
 2661         }
 2662         return (pmap_unuse_pt(pmap, sva, *pmap_pdpe(pmap, sva), free));
 2663 }
 2664 
 2665 /*
 2666  * pmap_remove_pte: do the things to unmap a page in a process
 2667  */
 2668 static int
 2669 pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va, 
 2670     pd_entry_t ptepde, vm_page_t *free)
 2671 {
 2672         pt_entry_t oldpte;
 2673         vm_page_t m;
 2674 
 2675         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2676         oldpte = pte_load_clear(ptq);
 2677         if (oldpte & PG_W)
 2678                 pmap->pm_stats.wired_count -= 1;
 2679         pmap_resident_count_dec(pmap, 1);
 2680         if (oldpte & PG_MANAGED) {
 2681                 m = PHYS_TO_VM_PAGE(oldpte & PG_FRAME);
 2682                 if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
 2683                         vm_page_dirty(m);
 2684                 if (oldpte & PG_A)
 2685                         vm_page_aflag_set(m, PGA_REFERENCED);
 2686                 pmap_remove_entry(pmap, m, va);
 2687         }
 2688         return (pmap_unuse_pt(pmap, va, ptepde, free));
 2689 }
 2690 
 2691 /*
 2692  * Remove a single page from a process address space
 2693  */
 2694 static void
 2695 pmap_remove_page(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, vm_page_t *free)
 2696 {
 2697         pt_entry_t *pte;
 2698 
 2699         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2700         if ((*pde & PG_V) == 0)
 2701                 return;
 2702         pte = pmap_pde_to_pte(pde, va);
 2703         if ((*pte & PG_V) == 0)
 2704                 return;
 2705         pmap_remove_pte(pmap, pte, va, *pde, free);
 2706         pmap_invalidate_page(pmap, va);
 2707 }
 2708 
 2709 /*
 2710  *      Remove the given range of addresses from the specified map.
 2711  *
 2712  *      It is assumed that the start and end are properly
 2713  *      rounded to the page size.
 2714  */
 2715 void
 2716 pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
 2717 {
 2718         vm_offset_t va, va_next;
 2719         pml4_entry_t *pml4e;
 2720         pdp_entry_t *pdpe;
 2721         pd_entry_t ptpaddr, *pde;
 2722         pt_entry_t *pte;
 2723         vm_page_t free = NULL;
 2724         int anyvalid;
 2725 
 2726         /*
 2727          * Perform an unsynchronized read.  This is, however, safe.
 2728          */
 2729         if (pmap->pm_stats.resident_count == 0)
 2730                 return;
 2731 
 2732         anyvalid = 0;
 2733 
 2734         rw_wlock(&pvh_global_lock);
 2735         PMAP_LOCK(pmap);
 2736 
 2737         /*
 2738          * special handling of removing one page.  a very
 2739          * common operation and easy to short circuit some
 2740          * code.
 2741          */
 2742         if (sva + PAGE_SIZE == eva) {
 2743                 pde = pmap_pde(pmap, sva);
 2744                 if (pde && (*pde & PG_PS) == 0) {
 2745                         pmap_remove_page(pmap, sva, pde, &free);
 2746                         goto out;
 2747                 }
 2748         }
 2749 
 2750         for (; sva < eva; sva = va_next) {
 2751 
 2752                 if (pmap->pm_stats.resident_count == 0)
 2753                         break;
 2754 
 2755                 pml4e = pmap_pml4e(pmap, sva);
 2756                 if ((*pml4e & PG_V) == 0) {
 2757                         va_next = (sva + NBPML4) & ~PML4MASK;
 2758                         if (va_next < sva)
 2759                                 va_next = eva;
 2760                         continue;
 2761                 }
 2762 
 2763                 pdpe = pmap_pml4e_to_pdpe(pml4e, sva);
 2764                 if ((*pdpe & PG_V) == 0) {
 2765                         va_next = (sva + NBPDP) & ~PDPMASK;
 2766                         if (va_next < sva)
 2767                                 va_next = eva;
 2768                         continue;
 2769                 }
 2770 
 2771                 /*
 2772                  * Calculate index for next page table.
 2773                  */
 2774                 va_next = (sva + NBPDR) & ~PDRMASK;
 2775                 if (va_next < sva)
 2776                         va_next = eva;
 2777 
 2778                 pde = pmap_pdpe_to_pde(pdpe, sva);
 2779                 ptpaddr = *pde;
 2780 
 2781                 /*
 2782                  * Weed out invalid mappings.
 2783                  */
 2784                 if (ptpaddr == 0)
 2785                         continue;
 2786 
 2787                 /*
 2788                  * Check for large page.
 2789                  */
 2790                 if ((ptpaddr & PG_PS) != 0) {
 2791                         /*
 2792                          * Are we removing the entire large page?  If not,
 2793                          * demote the mapping and fall through.
 2794                          */
 2795                         if (sva + NBPDR == va_next && eva >= va_next) {
 2796                                 /*
 2797                                  * The TLB entry for a PG_G mapping is
 2798                                  * invalidated by pmap_remove_pde().
 2799                                  */
 2800                                 if ((ptpaddr & PG_G) == 0)
 2801                                         anyvalid = 1;
 2802                                 pmap_remove_pde(pmap, pde, sva, &free);
 2803                                 continue;
 2804                         } else if (!pmap_demote_pde(pmap, pde, sva)) {
 2805                                 /* The large page mapping was destroyed. */
 2806                                 continue;
 2807                         } else
 2808                                 ptpaddr = *pde;
 2809                 }
 2810 
 2811                 /*
 2812                  * Limit our scan to either the end of the va represented
 2813                  * by the current page table page, or to the end of the
 2814                  * range being removed.
 2815                  */
 2816                 if (va_next > eva)
 2817                         va_next = eva;
 2818 
 2819                 va = va_next;
 2820                 for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
 2821                     sva += PAGE_SIZE) {
 2822                         if (*pte == 0) {
 2823                                 if (va != va_next) {
 2824                                         pmap_invalidate_range(pmap, va, sva);
 2825                                         va = va_next;
 2826                                 }
 2827                                 continue;
 2828                         }
 2829                         if ((*pte & PG_G) == 0)
 2830                                 anyvalid = 1;
 2831                         else if (va == va_next)
 2832                                 va = sva;
 2833                         if (pmap_remove_pte(pmap, pte, sva, ptpaddr, &free)) {
 2834                                 sva += PAGE_SIZE;
 2835                                 break;
 2836                         }
 2837                 }
 2838                 if (va != va_next)
 2839                         pmap_invalidate_range(pmap, va, sva);
 2840         }
 2841 out:
 2842         if (anyvalid)
 2843                 pmap_invalidate_all(pmap);
 2844         rw_wunlock(&pvh_global_lock);   
 2845         PMAP_UNLOCK(pmap);
 2846         pmap_free_zero_pages(free);
 2847 }
 2848 
 2849 /*
 2850  *      Routine:        pmap_remove_all
 2851  *      Function:
 2852  *              Removes this physical page from
 2853  *              all physical maps in which it resides.
 2854  *              Reflects back modify bits to the pager.
 2855  *
 2856  *      Notes:
 2857  *              Original versions of this routine were very
 2858  *              inefficient because they iteratively called
 2859  *              pmap_remove (slow...)
 2860  */
 2861 
 2862 void
 2863 pmap_remove_all(vm_page_t m)
 2864 {
 2865         struct md_page *pvh;
 2866         pv_entry_t pv;
 2867         pmap_t pmap;
 2868         pt_entry_t *pte, tpte;
 2869         pd_entry_t *pde;
 2870         vm_offset_t va;
 2871         vm_page_t free;
 2872 
 2873         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 2874             ("pmap_remove_all: page %p is not managed", m));
 2875         free = NULL;
 2876         rw_wlock(&pvh_global_lock);
 2877         if ((m->flags & PG_FICTITIOUS) != 0)
 2878                 goto small_mappings;
 2879         pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 2880         while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) {
 2881                 pmap = PV_PMAP(pv);
 2882                 PMAP_LOCK(pmap);
 2883                 va = pv->pv_va;
 2884                 pde = pmap_pde(pmap, va);
 2885                 (void)pmap_demote_pde(pmap, pde, va);
 2886                 PMAP_UNLOCK(pmap);
 2887         }
 2888 small_mappings:
 2889         while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
 2890                 pmap = PV_PMAP(pv);
 2891                 PMAP_LOCK(pmap);
 2892                 pmap_resident_count_dec(pmap, 1);
 2893                 pde = pmap_pde(pmap, pv->pv_va);
 2894                 KASSERT((*pde & PG_PS) == 0, ("pmap_remove_all: found"
 2895                     " a 2mpage in page %p's pv list", m));
 2896                 pte = pmap_pde_to_pte(pde, pv->pv_va);
 2897                 tpte = pte_load_clear(pte);
 2898                 if (tpte & PG_W)
 2899                         pmap->pm_stats.wired_count--;
 2900                 if (tpte & PG_A)
 2901                         vm_page_aflag_set(m, PGA_REFERENCED);
 2902 
 2903                 /*
 2904                  * Update the vm_page_t clean and reference bits.
 2905                  */
 2906                 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
 2907                         vm_page_dirty(m);
 2908                 pmap_unuse_pt(pmap, pv->pv_va, *pde, &free);
 2909                 pmap_invalidate_page(pmap, pv->pv_va);
 2910                 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
 2911                 free_pv_entry(pmap, pv);
 2912                 PMAP_UNLOCK(pmap);
 2913         }
 2914         vm_page_aflag_clear(m, PGA_WRITEABLE);
 2915         rw_wunlock(&pvh_global_lock);
 2916         pmap_free_zero_pages(free);
 2917 }
 2918 
 2919 /*
 2920  * pmap_protect_pde: do the things to protect a 2mpage in a process
 2921  */
 2922 static boolean_t
 2923 pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot)
 2924 {
 2925         pd_entry_t newpde, oldpde;
 2926         vm_offset_t eva, va;
 2927         vm_page_t m;
 2928         boolean_t anychanged;
 2929 
 2930         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2931         KASSERT((sva & PDRMASK) == 0,
 2932             ("pmap_protect_pde: sva is not 2mpage aligned"));
 2933         anychanged = FALSE;
 2934 retry:
 2935         oldpde = newpde = *pde;
 2936         if (oldpde & PG_MANAGED) {
 2937                 eva = sva + NBPDR;
 2938                 for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
 2939                     va < eva; va += PAGE_SIZE, m++)
 2940                         if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW))
 2941                                 vm_page_dirty(m);
 2942         }
 2943         if ((prot & VM_PROT_WRITE) == 0)
 2944                 newpde &= ~(PG_RW | PG_M);
 2945         if ((prot & VM_PROT_EXECUTE) == 0)
 2946                 newpde |= pg_nx;
 2947         if (newpde != oldpde) {
 2948                 if (!atomic_cmpset_long(pde, oldpde, newpde))
 2949                         goto retry;
 2950                 if (oldpde & PG_G)
 2951                         pmap_invalidate_page(pmap, sva);
 2952                 else
 2953                         anychanged = TRUE;
 2954         }
 2955         return (anychanged);
 2956 }
 2957 
 2958 /*
 2959  *      Set the physical protection on the
 2960  *      specified range of this map as requested.
 2961  */
 2962 void
 2963 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
 2964 {
 2965         vm_offset_t va_next;
 2966         pml4_entry_t *pml4e;
 2967         pdp_entry_t *pdpe;
 2968         pd_entry_t ptpaddr, *pde;
 2969         pt_entry_t *pte;
 2970         int anychanged;
 2971         boolean_t pv_lists_locked;
 2972 
 2973         if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
 2974                 pmap_remove(pmap, sva, eva);
 2975                 return;
 2976         }
 2977 
 2978         if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) ==
 2979             (VM_PROT_WRITE|VM_PROT_EXECUTE))
 2980                 return;
 2981 
 2982         pv_lists_locked = FALSE;
 2983 resume:
 2984         anychanged = 0;
 2985 
 2986         PMAP_LOCK(pmap);
 2987         for (; sva < eva; sva = va_next) {
 2988 
 2989                 pml4e = pmap_pml4e(pmap, sva);
 2990                 if ((*pml4e & PG_V) == 0) {
 2991                         va_next = (sva + NBPML4) & ~PML4MASK;
 2992                         if (va_next < sva)
 2993                                 va_next = eva;
 2994                         continue;
 2995                 }
 2996 
 2997                 pdpe = pmap_pml4e_to_pdpe(pml4e, sva);
 2998                 if ((*pdpe & PG_V) == 0) {
 2999                         va_next = (sva + NBPDP) & ~PDPMASK;
 3000                         if (va_next < sva)
 3001                                 va_next = eva;
 3002                         continue;
 3003                 }
 3004 
 3005                 va_next = (sva + NBPDR) & ~PDRMASK;
 3006                 if (va_next < sva)
 3007                         va_next = eva;
 3008 
 3009                 pde = pmap_pdpe_to_pde(pdpe, sva);
 3010                 ptpaddr = *pde;
 3011 
 3012                 /*
 3013                  * Weed out invalid mappings.
 3014                  */
 3015                 if (ptpaddr == 0)
 3016                         continue;
 3017 
 3018                 /*
 3019                  * Check for large page.
 3020                  */
 3021                 if ((ptpaddr & PG_PS) != 0) {
 3022                         /*
 3023                          * Are we protecting the entire large page?  If not,
 3024                          * demote the mapping and fall through.
 3025                          */
 3026                         if (sva + NBPDR == va_next && eva >= va_next) {
 3027                                 /*
 3028                                  * The TLB entry for a PG_G mapping is
 3029                                  * invalidated by pmap_protect_pde().
 3030                                  */
 3031                                 if (pmap_protect_pde(pmap, pde, sva, prot))
 3032                                         anychanged = 1;
 3033                                 continue;
 3034                         } else {
 3035                                 if (!pv_lists_locked) {
 3036                                         pv_lists_locked = TRUE;
 3037                                         if (!rw_try_wlock(&pvh_global_lock)) {
 3038                                                 if (anychanged)
 3039                                                         pmap_invalidate_all(
 3040                                                             pmap);
 3041                                                 PMAP_UNLOCK(pmap);
 3042                                                 rw_wlock(&pvh_global_lock);
 3043                                                 goto resume;
 3044                                         }
 3045                                 }
 3046                                 if (!pmap_demote_pde(pmap, pde, sva)) {
 3047                                         /*
 3048                                          * The large page mapping was
 3049                                          * destroyed.
 3050                                          */
 3051                                         continue;
 3052                                 }
 3053                         }
 3054                 }
 3055 
 3056                 if (va_next > eva)
 3057                         va_next = eva;
 3058 
 3059                 for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
 3060                     sva += PAGE_SIZE) {
 3061                         pt_entry_t obits, pbits;
 3062                         vm_page_t m;
 3063 
 3064 retry:
 3065                         obits = pbits = *pte;
 3066                         if ((pbits & PG_V) == 0)
 3067                                 continue;
 3068 
 3069                         if ((prot & VM_PROT_WRITE) == 0) {
 3070                                 if ((pbits & (PG_MANAGED | PG_M | PG_RW)) ==
 3071                                     (PG_MANAGED | PG_M | PG_RW)) {
 3072                                         m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
 3073                                         vm_page_dirty(m);
 3074                                 }
 3075                                 pbits &= ~(PG_RW | PG_M);
 3076                         }
 3077                         if ((prot & VM_PROT_EXECUTE) == 0)
 3078                                 pbits |= pg_nx;
 3079 
 3080                         if (pbits != obits) {
 3081                                 if (!atomic_cmpset_long(pte, obits, pbits))
 3082                                         goto retry;
 3083                                 if (obits & PG_G)
 3084                                         pmap_invalidate_page(pmap, sva);
 3085                                 else
 3086                                         anychanged = 1;
 3087                         }
 3088                 }
 3089         }
 3090         if (anychanged)
 3091                 pmap_invalidate_all(pmap);
 3092         if (pv_lists_locked)
 3093                 rw_wunlock(&pvh_global_lock);
 3094         PMAP_UNLOCK(pmap);
 3095 }
 3096 
 3097 /*
 3098  * Tries to promote the 512, contiguous 4KB page mappings that are within a
 3099  * single page table page (PTP) to a single 2MB page mapping.  For promotion
 3100  * to occur, two conditions must be met: (1) the 4KB page mappings must map
 3101  * aligned, contiguous physical memory and (2) the 4KB page mappings must have
 3102  * identical characteristics. 
 3103  */
 3104 static void
 3105 pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
 3106 {
 3107         pd_entry_t newpde;
 3108         pt_entry_t *firstpte, oldpte, pa, *pte;
 3109         vm_offset_t oldpteva;
 3110         vm_page_t mpte;
 3111 
 3112         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 3113 
 3114         /*
 3115          * Examine the first PTE in the specified PTP.  Abort if this PTE is
 3116          * either invalid, unused, or does not map the first 4KB physical page
 3117          * within a 2MB page. 
 3118          */
 3119         firstpte = (pt_entry_t *)PHYS_TO_DMAP(*pde & PG_FRAME);
 3120 setpde:
 3121         newpde = *firstpte;
 3122         if ((newpde & ((PG_FRAME & PDRMASK) | PG_A | PG_V)) != (PG_A | PG_V)) {
 3123                 pmap_pde_p_failures++;
 3124                 CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#lx"
 3125                     " in pmap %p", va, pmap);
 3126                 return;
 3127         }
 3128         if ((newpde & (PG_M | PG_RW)) == PG_RW) {
 3129                 /*
 3130                  * When PG_M is already clear, PG_RW can be cleared without
 3131                  * a TLB invalidation.
 3132                  */
 3133                 if (!atomic_cmpset_long(firstpte, newpde, newpde & ~PG_RW))
 3134                         goto setpde;
 3135                 newpde &= ~PG_RW;
 3136         }
 3137 
 3138         /*
 3139          * Examine each of the other PTEs in the specified PTP.  Abort if this
 3140          * PTE maps an unexpected 4KB physical page or does not have identical
 3141          * characteristics to the first PTE.
 3142          */
 3143         pa = (newpde & (PG_PS_FRAME | PG_A | PG_V)) + NBPDR - PAGE_SIZE;
 3144         for (pte = firstpte + NPTEPG - 1; pte > firstpte; pte--) {
 3145 setpte:
 3146                 oldpte = *pte;
 3147                 if ((oldpte & (PG_FRAME | PG_A | PG_V)) != pa) {
 3148                         pmap_pde_p_failures++;
 3149                         CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#lx"
 3150                             " in pmap %p", va, pmap);
 3151                         return;
 3152                 }
 3153                 if ((oldpte & (PG_M | PG_RW)) == PG_RW) {
 3154                         /*
 3155                          * When PG_M is already clear, PG_RW can be cleared
 3156                          * without a TLB invalidation.
 3157                          */
 3158                         if (!atomic_cmpset_long(pte, oldpte, oldpte & ~PG_RW))
 3159                                 goto setpte;
 3160                         oldpte &= ~PG_RW;
 3161                         oldpteva = (oldpte & PG_FRAME & PDRMASK) |
 3162                             (va & ~PDRMASK);
 3163                         CTR2(KTR_PMAP, "pmap_promote_pde: protect for va %#lx"
 3164                             " in pmap %p", oldpteva, pmap);
 3165                 }
 3166                 if ((oldpte & PG_PTE_PROMOTE) != (newpde & PG_PTE_PROMOTE)) {
 3167                         pmap_pde_p_failures++;
 3168                         CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#lx"
 3169                             " in pmap %p", va, pmap);
 3170                         return;
 3171                 }
 3172                 pa -= PAGE_SIZE;
 3173         }
 3174 
 3175         /*
 3176          * Save the page table page in its current state until the PDE
 3177          * mapping the superpage is demoted by pmap_demote_pde() or
 3178          * destroyed by pmap_remove_pde(). 
 3179          */
 3180         mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
 3181         KASSERT(mpte >= vm_page_array &&
 3182             mpte < &vm_page_array[vm_page_array_size],
 3183             ("pmap_promote_pde: page table page is out of range"));
 3184         KASSERT(mpte->pindex == pmap_pde_pindex(va),
 3185             ("pmap_promote_pde: page table page's pindex is wrong"));
 3186         pmap_insert_pt_page(pmap, mpte);
 3187 
 3188         /*
 3189          * Promote the pv entries.
 3190          */
 3191         if ((newpde & PG_MANAGED) != 0)
 3192                 pmap_pv_promote_pde(pmap, va, newpde & PG_PS_FRAME);
 3193 
 3194         /*
 3195          * Propagate the PAT index to its proper position.
 3196          */
 3197         if ((newpde & PG_PTE_PAT) != 0)
 3198                 newpde ^= PG_PDE_PAT | PG_PTE_PAT;
 3199 
 3200         /*
 3201          * Map the superpage.
 3202          */
 3203         if (workaround_erratum383)
 3204                 pmap_update_pde(pmap, va, pde, PG_PS | newpde);
 3205         else
 3206                 pde_store(pde, PG_PS | newpde);
 3207 
 3208         pmap_pde_promotions++;
 3209         CTR2(KTR_PMAP, "pmap_promote_pde: success for va %#lx"
 3210             " in pmap %p", va, pmap);
 3211 }
 3212 
 3213 /*
 3214  *      Insert the given physical page (p) at
 3215  *      the specified virtual address (v) in the
 3216  *      target physical map with the protection requested.
 3217  *
 3218  *      If specified, the page will be wired down, meaning
 3219  *      that the related pte can not be reclaimed.
 3220  *
 3221  *      NB:  This is the only routine which MAY NOT lazy-evaluate
 3222  *      or lose information.  That is, this routine must actually
 3223  *      insert this page into the given map NOW.
 3224  */
 3225 void
 3226 pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
 3227     vm_prot_t prot, boolean_t wired)
 3228 {
 3229         pd_entry_t *pde;
 3230         pt_entry_t *pte;
 3231         pt_entry_t newpte, origpte;
 3232         pv_entry_t pv;
 3233         vm_paddr_t opa, pa;
 3234         vm_page_t mpte, om;
 3235         boolean_t invlva;
 3236 
 3237         va = trunc_page(va);
 3238         KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
 3239         KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS,
 3240             ("pmap_enter: invalid to pmap_enter page table pages (va: 0x%lx)",
 3241             va));
 3242         KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0 ||
 3243             VM_OBJECT_LOCKED(m->object),
 3244             ("pmap_enter: page %p is not busy", m));
 3245 
 3246         mpte = NULL;
 3247 
 3248         rw_wlock(&pvh_global_lock);
 3249         PMAP_LOCK(pmap);
 3250 
 3251         /*
 3252          * In the case that a page table page is not
 3253          * resident, we are creating it here.
 3254          */
 3255         if (va < VM_MAXUSER_ADDRESS)
 3256                 mpte = pmap_allocpte(pmap, va, M_WAITOK);
 3257 
 3258         pde = pmap_pde(pmap, va);
 3259         if (pde != NULL && (*pde & PG_V) != 0) {
 3260                 if ((*pde & PG_PS) != 0)
 3261                         panic("pmap_enter: attempted pmap_enter on 2MB page");
 3262                 pte = pmap_pde_to_pte(pde, va);
 3263         } else
 3264                 panic("pmap_enter: invalid page directory va=%#lx", va);
 3265 
 3266         pa = VM_PAGE_TO_PHYS(m);
 3267         om = NULL;
 3268         origpte = *pte;
 3269         opa = origpte & PG_FRAME;
 3270 
 3271         /*
 3272          * Mapping has not changed, must be protection or wiring change.
 3273          */
 3274         if (origpte && (opa == pa)) {
 3275                 /*
 3276                  * Wiring change, just update stats. We don't worry about
 3277                  * wiring PT pages as they remain resident as long as there
 3278                  * are valid mappings in them. Hence, if a user page is wired,
 3279                  * the PT page will be also.
 3280                  */
 3281                 if (wired && ((origpte & PG_W) == 0))
 3282                         pmap->pm_stats.wired_count++;
 3283                 else if (!wired && (origpte & PG_W))
 3284                         pmap->pm_stats.wired_count--;
 3285 
 3286                 /*
 3287                  * Remove extra pte reference
 3288                  */
 3289                 if (mpte)
 3290                         mpte->wire_count--;
 3291 
 3292                 if (origpte & PG_MANAGED) {
 3293                         om = m;
 3294                         pa |= PG_MANAGED;
 3295                 }
 3296                 goto validate;
 3297         } 
 3298 
 3299         pv = NULL;
 3300 
 3301         /*
 3302          * Mapping has changed, invalidate old range and fall through to
 3303          * handle validating new mapping.
 3304          */
 3305         if (opa) {
 3306                 if (origpte & PG_W)
 3307                         pmap->pm_stats.wired_count--;
 3308                 if (origpte & PG_MANAGED) {
 3309                         om = PHYS_TO_VM_PAGE(opa);
 3310                         pv = pmap_pvh_remove(&om->md, pmap, va);
 3311                 }
 3312                 if (mpte != NULL) {
 3313                         mpte->wire_count--;
 3314                         KASSERT(mpte->wire_count > 0,
 3315                             ("pmap_enter: missing reference to page table page,"
 3316                              " va: 0x%lx", va));
 3317                 }
 3318         } else
 3319                 pmap_resident_count_inc(pmap, 1);
 3320 
 3321         /*
 3322          * Enter on the PV list if part of our managed memory.
 3323          */
 3324         if ((m->oflags & VPO_UNMANAGED) == 0) {
 3325                 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva,
 3326                     ("pmap_enter: managed mapping within the clean submap"));
 3327                 if (pv == NULL)
 3328                         pv = get_pv_entry(pmap, FALSE);
 3329                 pv->pv_va = va;
 3330                 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
 3331                 pa |= PG_MANAGED;
 3332         } else if (pv != NULL)
 3333                 free_pv_entry(pmap, pv);
 3334 
 3335         /*
 3336          * Increment counters
 3337          */
 3338         if (wired)
 3339                 pmap->pm_stats.wired_count++;
 3340 
 3341 validate:
 3342         /*
 3343          * Now validate mapping with desired protection/wiring.
 3344          */
 3345         newpte = (pt_entry_t)(pa | pmap_cache_bits(m->md.pat_mode, 0) | PG_V);
 3346         if ((prot & VM_PROT_WRITE) != 0) {
 3347                 newpte |= PG_RW;
 3348                 if ((newpte & PG_MANAGED) != 0)
 3349                         vm_page_aflag_set(m, PGA_WRITEABLE);
 3350         }
 3351         if ((prot & VM_PROT_EXECUTE) == 0)
 3352                 newpte |= pg_nx;
 3353         if (wired)
 3354                 newpte |= PG_W;
 3355         if (va < VM_MAXUSER_ADDRESS)
 3356                 newpte |= PG_U;
 3357         if (pmap == kernel_pmap)
 3358                 newpte |= PG_G;
 3359 
 3360         /*
 3361          * if the mapping or permission bits are different, we need
 3362          * to update the pte.
 3363          */
 3364         if ((origpte & ~(PG_M|PG_A)) != newpte) {
 3365                 newpte |= PG_A;
 3366                 if ((access & VM_PROT_WRITE) != 0)
 3367                         newpte |= PG_M;
 3368                 if (origpte & PG_V) {
 3369                         invlva = FALSE;
 3370                         origpte = pte_load_store(pte, newpte);
 3371                         if (origpte & PG_A) {
 3372                                 if (origpte & PG_MANAGED)
 3373                                         vm_page_aflag_set(om, PGA_REFERENCED);
 3374                                 if (opa != VM_PAGE_TO_PHYS(m) || ((origpte &
 3375                                     PG_NX) == 0 && (newpte & PG_NX)))
 3376                                         invlva = TRUE;
 3377                         }
 3378                         if ((origpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
 3379                                 if ((origpte & PG_MANAGED) != 0)
 3380                                         vm_page_dirty(om);
 3381                                 if ((newpte & PG_RW) == 0)
 3382                                         invlva = TRUE;
 3383                         }
 3384                         if ((origpte & PG_MANAGED) != 0 &&
 3385                             TAILQ_EMPTY(&om->md.pv_list) &&
 3386                             ((om->flags & PG_FICTITIOUS) != 0 ||
 3387                             TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
 3388                                 vm_page_aflag_clear(om, PGA_WRITEABLE);
 3389                         if (invlva)
 3390                                 pmap_invalidate_page(pmap, va);
 3391                 } else
 3392                         pte_store(pte, newpte);
 3393         }
 3394 
 3395         /*
 3396          * If both the page table page and the reservation are fully
 3397          * populated, then attempt promotion.
 3398          */
 3399         if ((mpte == NULL || mpte->wire_count == NPTEPG) &&
 3400             pg_ps_enabled && (m->flags & PG_FICTITIOUS) == 0 &&
 3401             vm_reserv_level_iffullpop(m) == 0)
 3402                 pmap_promote_pde(pmap, pde, va);
 3403 
 3404         rw_wunlock(&pvh_global_lock);
 3405         PMAP_UNLOCK(pmap);
 3406 }
 3407 
 3408 /*
 3409  * Tries to create a 2MB page mapping.  Returns TRUE if successful and FALSE
 3410  * otherwise.  Fails if (1) a page table page cannot be allocated without
 3411  * blocking, (2) a mapping already exists at the specified virtual address, or
 3412  * (3) a pv entry cannot be allocated without reclaiming another pv entry. 
 3413  */
 3414 static boolean_t
 3415 pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
 3416 {
 3417         pd_entry_t *pde, newpde;
 3418         vm_page_t free, mpde;
 3419 
 3420         rw_assert(&pvh_global_lock, RA_WLOCKED);
 3421         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 3422         if ((mpde = pmap_allocpde(pmap, va, M_NOWAIT)) == NULL) {
 3423                 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
 3424                     " in pmap %p", va, pmap);
 3425                 return (FALSE);
 3426         }
 3427         pde = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mpde));
 3428         pde = &pde[pmap_pde_index(va)];
 3429         if ((*pde & PG_V) != 0) {
 3430                 KASSERT(mpde->wire_count > 1,
 3431                     ("pmap_enter_pde: mpde's wire count is too low"));
 3432                 mpde->wire_count--;
 3433                 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
 3434                     " in pmap %p", va, pmap);
 3435                 return (FALSE);
 3436         }
 3437         newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 1) |
 3438             PG_PS | PG_V;
 3439         if ((m->oflags & VPO_UNMANAGED) == 0) {
 3440                 newpde |= PG_MANAGED;
 3441 
 3442                 /*
 3443                  * Abort this mapping if its PV entry could not be created.
 3444                  */
 3445                 if (!pmap_pv_insert_pde(pmap, va, VM_PAGE_TO_PHYS(m))) {
 3446                         free = NULL;
 3447                         if (pmap_unwire_pte_hold(pmap, va, mpde, &free)) {
 3448                                 pmap_invalidate_page(pmap, va);
 3449                                 pmap_free_zero_pages(free);
 3450                         }
 3451                         CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
 3452                             " in pmap %p", va, pmap);
 3453                         return (FALSE);
 3454                 }
 3455         }
 3456         if ((prot & VM_PROT_EXECUTE) == 0)
 3457                 newpde |= pg_nx;
 3458         if (va < VM_MAXUSER_ADDRESS)
 3459                 newpde |= PG_U;
 3460 
 3461         /*
 3462          * Increment counters.
 3463          */
 3464         pmap_resident_count_inc(pmap, NBPDR / PAGE_SIZE);
 3465 
 3466         /*
 3467          * Map the superpage.
 3468          */
 3469         pde_store(pde, newpde);
 3470 
 3471         pmap_pde_mappings++;
 3472         CTR2(KTR_PMAP, "pmap_enter_pde: success for va %#lx"
 3473             " in pmap %p", va, pmap);
 3474         return (TRUE);
 3475 }
 3476 
 3477 /*
 3478  * Maps a sequence of resident pages belonging to the same object.
 3479  * The sequence begins with the given page m_start.  This page is
 3480  * mapped at the given virtual address start.  Each subsequent page is
 3481  * mapped at a virtual address that is offset from start by the same
 3482  * amount as the page is offset from m_start within the object.  The
 3483  * last page in the sequence is the page with the largest offset from
 3484  * m_start that can be mapped at a virtual address less than the given
 3485  * virtual address end.  Not every virtual page between start and end
 3486  * is mapped; only those for which a resident page exists with the
 3487  * corresponding offset from m_start are mapped.
 3488  */
 3489 void
 3490 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
 3491     vm_page_t m_start, vm_prot_t prot)
 3492 {
 3493         vm_offset_t va;
 3494         vm_page_t m, mpte;
 3495         vm_pindex_t diff, psize;
 3496 
 3497         VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED);
 3498         psize = atop(end - start);
 3499         mpte = NULL;
 3500         m = m_start;
 3501         rw_wlock(&pvh_global_lock);
 3502         PMAP_LOCK(pmap);
 3503         while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
 3504                 va = start + ptoa(diff);
 3505                 if ((va & PDRMASK) == 0 && va + NBPDR <= end &&
 3506                     (VM_PAGE_TO_PHYS(m) & PDRMASK) == 0 &&
 3507                     pg_ps_enabled && vm_reserv_level_iffullpop(m) == 0 &&
 3508                     pmap_enter_pde(pmap, va, m, prot))
 3509                         m = &m[NBPDR / PAGE_SIZE - 1];
 3510                 else
 3511                         mpte = pmap_enter_quick_locked(pmap, va, m, prot,
 3512                             mpte);
 3513                 m = TAILQ_NEXT(m, listq);
 3514         }
 3515         rw_wunlock(&pvh_global_lock);
 3516         PMAP_UNLOCK(pmap);
 3517 }
 3518 
 3519 /*
 3520  * this code makes some *MAJOR* assumptions:
 3521  * 1. Current pmap & pmap exists.
 3522  * 2. Not wired.
 3523  * 3. Read access.
 3524  * 4. No page table pages.
 3525  * but is *MUCH* faster than pmap_enter...
 3526  */
 3527 
 3528 void
 3529 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
 3530 {
 3531 
 3532         rw_wlock(&pvh_global_lock);
 3533         PMAP_LOCK(pmap);
 3534         (void)pmap_enter_quick_locked(pmap, va, m, prot, NULL);
 3535         rw_wunlock(&pvh_global_lock);
 3536         PMAP_UNLOCK(pmap);
 3537 }
 3538 
 3539 static vm_page_t
 3540 pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
 3541     vm_prot_t prot, vm_page_t mpte)
 3542 {
 3543         vm_page_t free;
 3544         pt_entry_t *pte;
 3545         vm_paddr_t pa;
 3546 
 3547         KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
 3548             (m->oflags & VPO_UNMANAGED) != 0,
 3549             ("pmap_enter_quick_locked: managed mapping within the clean submap"));
 3550         rw_assert(&pvh_global_lock, RA_WLOCKED);
 3551         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 3552 
 3553         /*
 3554          * In the case that a page table page is not
 3555          * resident, we are creating it here.
 3556          */
 3557         if (va < VM_MAXUSER_ADDRESS) {
 3558                 vm_pindex_t ptepindex;
 3559                 pd_entry_t *ptepa;
 3560 
 3561                 /*
 3562                  * Calculate pagetable page index
 3563                  */
 3564                 ptepindex = pmap_pde_pindex(va);
 3565                 if (mpte && (mpte->pindex == ptepindex)) {
 3566                         mpte->wire_count++;
 3567                 } else {
 3568                         /*
 3569                          * Get the page directory entry
 3570                          */
 3571                         ptepa = pmap_pde(pmap, va);
 3572 
 3573                         /*
 3574                          * If the page table page is mapped, we just increment
 3575                          * the hold count, and activate it.
 3576                          */
 3577                         if (ptepa && (*ptepa & PG_V) != 0) {
 3578                                 if (*ptepa & PG_PS)
 3579                                         return (NULL);
 3580                                 mpte = PHYS_TO_VM_PAGE(*ptepa & PG_FRAME);
 3581                                 mpte->wire_count++;
 3582                         } else {
 3583                                 mpte = _pmap_allocpte(pmap, ptepindex,
 3584                                     M_NOWAIT);
 3585                                 if (mpte == NULL)
 3586                                         return (mpte);
 3587                         }
 3588                 }
 3589                 pte = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mpte));
 3590                 pte = &pte[pmap_pte_index(va)];
 3591         } else {
 3592                 mpte = NULL;
 3593                 pte = vtopte(va);
 3594         }
 3595         if (*pte) {
 3596                 if (mpte != NULL) {
 3597                         mpte->wire_count--;
 3598                         mpte = NULL;
 3599                 }
 3600                 return (mpte);
 3601         }
 3602 
 3603         /*
 3604          * Enter on the PV list if part of our managed memory.
 3605          */
 3606         if ((m->oflags & VPO_UNMANAGED) == 0 &&
 3607             !pmap_try_insert_pv_entry(pmap, va, m)) {
 3608                 if (mpte != NULL) {
 3609                         free = NULL;
 3610                         if (pmap_unwire_pte_hold(pmap, va, mpte, &free)) {
 3611                                 pmap_invalidate_page(pmap, va);
 3612                                 pmap_free_zero_pages(free);
 3613                         }
 3614                         mpte = NULL;
 3615                 }
 3616                 return (mpte);
 3617         }
 3618 
 3619         /*
 3620          * Increment counters
 3621          */
 3622         pmap_resident_count_inc(pmap, 1);
 3623 
 3624         pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 0);
 3625         if ((prot & VM_PROT_EXECUTE) == 0)
 3626                 pa |= pg_nx;
 3627 
 3628         /*
 3629          * Now validate mapping with RO protection
 3630          */
 3631         if ((m->oflags & VPO_UNMANAGED) != 0)
 3632                 pte_store(pte, pa | PG_V | PG_U);
 3633         else
 3634                 pte_store(pte, pa | PG_V | PG_U | PG_MANAGED);
 3635         return (mpte);
 3636 }
 3637 
 3638 /*
 3639  * Make a temporary mapping for a physical address.  This is only intended
 3640  * to be used for panic dumps.
 3641  */
 3642 void *
 3643 pmap_kenter_temporary(vm_paddr_t pa, int i)
 3644 {
 3645         vm_offset_t va;
 3646 
 3647         va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE);
 3648         pmap_kenter(va, pa);
 3649         invlpg(va);
 3650         return ((void *)crashdumpmap);
 3651 }
 3652 
 3653 /*
 3654  * This code maps large physical mmap regions into the
 3655  * processor address space.  Note that some shortcuts
 3656  * are taken, but the code works.
 3657  */
 3658 void
 3659 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
 3660     vm_pindex_t pindex, vm_size_t size)
 3661 {
 3662         pd_entry_t *pde;
 3663         vm_paddr_t pa, ptepa;
 3664         vm_page_t p, pdpg;
 3665         int pat_mode;
 3666 
 3667         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
 3668         KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
 3669             ("pmap_object_init_pt: non-device object"));
 3670         if ((addr & (NBPDR - 1)) == 0 && (size & (NBPDR - 1)) == 0) {
 3671                 if (!vm_object_populate(object, pindex, pindex + atop(size)))
 3672                         return;
 3673                 p = vm_page_lookup(object, pindex);
 3674                 KASSERT(p->valid == VM_PAGE_BITS_ALL,
 3675                     ("pmap_object_init_pt: invalid page %p", p));
 3676                 pat_mode = p->md.pat_mode;
 3677 
 3678                 /*
 3679                  * Abort the mapping if the first page is not physically
 3680                  * aligned to a 2MB page boundary.
 3681                  */
 3682                 ptepa = VM_PAGE_TO_PHYS(p);
 3683                 if (ptepa & (NBPDR - 1))
 3684                         return;
 3685 
 3686                 /*
 3687                  * Skip the first page.  Abort the mapping if the rest of
 3688                  * the pages are not physically contiguous or have differing
 3689                  * memory attributes.
 3690                  */
 3691                 p = TAILQ_NEXT(p, listq);
 3692                 for (pa = ptepa + PAGE_SIZE; pa < ptepa + size;
 3693                     pa += PAGE_SIZE) {
 3694                         KASSERT(p->valid == VM_PAGE_BITS_ALL,
 3695                             ("pmap_object_init_pt: invalid page %p", p));
 3696                         if (pa != VM_PAGE_TO_PHYS(p) ||
 3697                             pat_mode != p->md.pat_mode)
 3698                                 return;
 3699                         p = TAILQ_NEXT(p, listq);
 3700                 }
 3701 
 3702                 /*
 3703                  * Map using 2MB pages.  Since "ptepa" is 2M aligned and
 3704                  * "size" is a multiple of 2M, adding the PAT setting to "pa"
 3705                  * will not affect the termination of this loop.
 3706                  */ 
 3707                 PMAP_LOCK(pmap);
 3708                 for (pa = ptepa | pmap_cache_bits(pat_mode, 1); pa < ptepa +
 3709                     size; pa += NBPDR) {
 3710                         pdpg = pmap_allocpde(pmap, addr, M_NOWAIT);
 3711                         if (pdpg == NULL) {
 3712                                 /*
 3713                                  * The creation of mappings below is only an
 3714                                  * optimization.  If a page directory page
 3715                                  * cannot be allocated without blocking,
 3716                                  * continue on to the next mapping rather than
 3717                                  * blocking.
 3718                                  */
 3719                                 addr += NBPDR;
 3720                                 continue;
 3721                         }
 3722                         pde = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pdpg));
 3723                         pde = &pde[pmap_pde_index(addr)];
 3724                         if ((*pde & PG_V) == 0) {
 3725                                 pde_store(pde, pa | PG_PS | PG_M | PG_A |
 3726                                     PG_U | PG_RW | PG_V);
 3727                                 pmap_resident_count_inc(pmap, NBPDR / PAGE_SIZE);
 3728                                 pmap_pde_mappings++;
 3729                         } else {
 3730                                 /* Continue on if the PDE is already valid. */
 3731                                 pdpg->wire_count--;
 3732                                 KASSERT(pdpg->wire_count > 0,
 3733                                     ("pmap_object_init_pt: missing reference "
 3734                                     "to page directory page, va: 0x%lx", addr));
 3735                         }
 3736                         addr += NBPDR;
 3737                 }
 3738                 PMAP_UNLOCK(pmap);
 3739         }
 3740 }
 3741 
 3742 /*
 3743  *      Routine:        pmap_change_wiring
 3744  *      Function:       Change the wiring attribute for a map/virtual-address
 3745  *                      pair.
 3746  *      In/out conditions:
 3747  *                      The mapping must already exist in the pmap.
 3748  */
 3749 void
 3750 pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
 3751 {
 3752         pd_entry_t *pde;
 3753         pt_entry_t *pte;
 3754         boolean_t are_queues_locked;
 3755 
 3756         are_queues_locked = FALSE;
 3757 
 3758         /*
 3759          * Wiring is not a hardware characteristic so there is no need to
 3760          * invalidate TLB.
 3761          */
 3762 retry:
 3763         PMAP_LOCK(pmap);
 3764         pde = pmap_pde(pmap, va);
 3765         if ((*pde & PG_PS) != 0) {
 3766                 if (!wired != ((*pde & PG_W) == 0)) {
 3767                         if (!are_queues_locked) {
 3768                                 are_queues_locked = TRUE;
 3769                                 if (!rw_try_wlock(&pvh_global_lock)) {
 3770                                         PMAP_UNLOCK(pmap);
 3771                                         rw_wlock(&pvh_global_lock);
 3772                                         goto retry;
 3773                                 }
 3774                         }
 3775                         if (!pmap_demote_pde(pmap, pde, va))
 3776                                 panic("pmap_change_wiring: demotion failed");
 3777                 } else
 3778                         goto out;
 3779         }
 3780         pte = pmap_pde_to_pte(pde, va);
 3781         if (wired && (*pte & PG_W) == 0) {
 3782                 pmap->pm_stats.wired_count++;
 3783                 atomic_set_long(pte, PG_W);
 3784         } else if (!wired && (*pte & PG_W) != 0) {
 3785                 pmap->pm_stats.wired_count--;
 3786                 atomic_clear_long(pte, PG_W);
 3787         }
 3788 out:
 3789         if (are_queues_locked)
 3790                 rw_wunlock(&pvh_global_lock);
 3791         PMAP_UNLOCK(pmap);
 3792 }
 3793 
 3794 /*
 3795  *      Copy the range specified by src_addr/len
 3796  *      from the source map to the range dst_addr/len
 3797  *      in the destination map.
 3798  *
 3799  *      This routine is only advisory and need not do anything.
 3800  */
 3801 
 3802 void
 3803 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
 3804     vm_offset_t src_addr)
 3805 {
 3806         vm_page_t   free;
 3807         vm_offset_t addr;
 3808         vm_offset_t end_addr = src_addr + len;
 3809         vm_offset_t va_next;
 3810 
 3811         if (dst_addr != src_addr)
 3812                 return;
 3813 
 3814         rw_wlock(&pvh_global_lock);
 3815         if (dst_pmap < src_pmap) {
 3816                 PMAP_LOCK(dst_pmap);
 3817                 PMAP_LOCK(src_pmap);
 3818         } else {
 3819                 PMAP_LOCK(src_pmap);
 3820                 PMAP_LOCK(dst_pmap);
 3821         }
 3822         for (addr = src_addr; addr < end_addr; addr = va_next) {
 3823                 pt_entry_t *src_pte, *dst_pte;
 3824                 vm_page_t dstmpde, dstmpte, srcmpte;
 3825                 pml4_entry_t *pml4e;
 3826                 pdp_entry_t *pdpe;
 3827                 pd_entry_t srcptepaddr, *pde;
 3828 
 3829                 KASSERT(addr < UPT_MIN_ADDRESS,
 3830                     ("pmap_copy: invalid to pmap_copy page tables"));
 3831 
 3832                 pml4e = pmap_pml4e(src_pmap, addr);
 3833                 if ((*pml4e & PG_V) == 0) {
 3834                         va_next = (addr + NBPML4) & ~PML4MASK;
 3835                         if (va_next < addr)
 3836                                 va_next = end_addr;
 3837                         continue;
 3838                 }
 3839 
 3840                 pdpe = pmap_pml4e_to_pdpe(pml4e, addr);
 3841                 if ((*pdpe & PG_V) == 0) {
 3842                         va_next = (addr + NBPDP) & ~PDPMASK;
 3843                         if (va_next < addr)
 3844                                 va_next = end_addr;
 3845                         continue;
 3846                 }
 3847 
 3848                 va_next = (addr + NBPDR) & ~PDRMASK;
 3849                 if (va_next < addr)
 3850                         va_next = end_addr;
 3851 
 3852                 pde = pmap_pdpe_to_pde(pdpe, addr);
 3853                 srcptepaddr = *pde;
 3854                 if (srcptepaddr == 0)
 3855                         continue;
 3856                         
 3857                 if (srcptepaddr & PG_PS) {
 3858                         dstmpde = pmap_allocpde(dst_pmap, addr, M_NOWAIT);
 3859                         if (dstmpde == NULL)
 3860                                 break;
 3861                         pde = (pd_entry_t *)
 3862                             PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dstmpde));
 3863                         pde = &pde[pmap_pde_index(addr)];
 3864                         if (*pde == 0 && ((srcptepaddr & PG_MANAGED) == 0 ||
 3865                             pmap_pv_insert_pde(dst_pmap, addr, srcptepaddr &
 3866                             PG_PS_FRAME))) {
 3867                                 *pde = srcptepaddr & ~PG_W;
 3868                                 pmap_resident_count_inc(dst_pmap, NBPDR / PAGE_SIZE);
 3869                         } else
 3870                                 dstmpde->wire_count--;
 3871                         continue;
 3872                 }
 3873 
 3874                 srcptepaddr &= PG_FRAME;
 3875                 srcmpte = PHYS_TO_VM_PAGE(srcptepaddr);
 3876                 KASSERT(srcmpte->wire_count > 0,
 3877                     ("pmap_copy: source page table page is unused"));
 3878 
 3879                 if (va_next > end_addr)
 3880                         va_next = end_addr;
 3881 
 3882                 src_pte = (pt_entry_t *)PHYS_TO_DMAP(srcptepaddr);
 3883                 src_pte = &src_pte[pmap_pte_index(addr)];
 3884                 dstmpte = NULL;
 3885                 while (addr < va_next) {
 3886                         pt_entry_t ptetemp;
 3887                         ptetemp = *src_pte;
 3888                         /*
 3889                          * we only virtual copy managed pages
 3890                          */
 3891                         if ((ptetemp & PG_MANAGED) != 0) {
 3892                                 if (dstmpte != NULL &&
 3893                                     dstmpte->pindex == pmap_pde_pindex(addr))
 3894                                         dstmpte->wire_count++;
 3895                                 else if ((dstmpte = pmap_allocpte(dst_pmap,
 3896                                     addr, M_NOWAIT)) == NULL)
 3897                                         goto out;
 3898                                 dst_pte = (pt_entry_t *)
 3899                                     PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dstmpte));
 3900                                 dst_pte = &dst_pte[pmap_pte_index(addr)];
 3901                                 if (*dst_pte == 0 &&
 3902                                     pmap_try_insert_pv_entry(dst_pmap, addr,
 3903                                     PHYS_TO_VM_PAGE(ptetemp & PG_FRAME))) {
 3904                                         /*
 3905                                          * Clear the wired, modified, and
 3906                                          * accessed (referenced) bits
 3907                                          * during the copy.
 3908                                          */
 3909                                         *dst_pte = ptetemp & ~(PG_W | PG_M |
 3910                                             PG_A);
 3911                                         pmap_resident_count_inc(dst_pmap, 1);
 3912                                 } else {
 3913                                         free = NULL;
 3914                                         if (pmap_unwire_pte_hold(dst_pmap,
 3915                                             addr, dstmpte, &free)) {
 3916                                                 pmap_invalidate_page(dst_pmap,
 3917                                                     addr);
 3918                                                 pmap_free_zero_pages(free);
 3919                                         }
 3920                                         goto out;
 3921                                 }
 3922                                 if (dstmpte->wire_count >= srcmpte->wire_count)
 3923                                         break;
 3924                         }
 3925                         addr += PAGE_SIZE;
 3926                         src_pte++;
 3927                 }
 3928         }
 3929 out:
 3930         rw_wunlock(&pvh_global_lock);
 3931         PMAP_UNLOCK(src_pmap);
 3932         PMAP_UNLOCK(dst_pmap);
 3933 }       
 3934 
 3935 /*
 3936  *      pmap_zero_page zeros the specified hardware page by mapping 
 3937  *      the page into KVM and using bzero to clear its contents.
 3938  */
 3939 void
 3940 pmap_zero_page(vm_page_t m)
 3941 {
 3942         vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
 3943 
 3944         pagezero((void *)va);
 3945 }
 3946 
 3947 /*
 3948  *      pmap_zero_page_area zeros the specified hardware page by mapping 
 3949  *      the page into KVM and using bzero to clear its contents.
 3950  *
 3951  *      off and size may not cover an area beyond a single hardware page.
 3952  */
 3953 void
 3954 pmap_zero_page_area(vm_page_t m, int off, int size)
 3955 {
 3956         vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
 3957 
 3958         if (off == 0 && size == PAGE_SIZE)
 3959                 pagezero((void *)va);
 3960         else
 3961                 bzero((char *)va + off, size);
 3962 }
 3963 
 3964 /*
 3965  *      pmap_zero_page_idle zeros the specified hardware page by mapping 
 3966  *      the page into KVM and using bzero to clear its contents.  This
 3967  *      is intended to be called from the vm_pagezero process only and
 3968  *      outside of Giant.
 3969  */
 3970 void
 3971 pmap_zero_page_idle(vm_page_t m)
 3972 {
 3973         vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
 3974 
 3975         pagezero((void *)va);
 3976 }
 3977 
 3978 /*
 3979  *      pmap_copy_page copies the specified (machine independent)
 3980  *      page by mapping the page into virtual memory and using
 3981  *      bcopy to copy the page, one machine dependent page at a
 3982  *      time.
 3983  */
 3984 void
 3985 pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
 3986 {
 3987         vm_offset_t src = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(msrc));
 3988         vm_offset_t dst = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mdst));
 3989 
 3990         pagecopy((void *)src, (void *)dst);
 3991 }
 3992 
 3993 /*
 3994  * Returns true if the pmap's pv is one of the first
 3995  * 16 pvs linked to from this page.  This count may
 3996  * be changed upwards or downwards in the future; it
 3997  * is only necessary that true be returned for a small
 3998  * subset of pmaps for proper page aging.
 3999  */
 4000 boolean_t
 4001 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
 4002 {
 4003         struct md_page *pvh;
 4004         pv_entry_t pv;
 4005         int loops = 0;
 4006         boolean_t rv;
 4007 
 4008         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 4009             ("pmap_page_exists_quick: page %p is not managed", m));
 4010         rv = FALSE;
 4011         rw_wlock(&pvh_global_lock);
 4012         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
 4013                 if (PV_PMAP(pv) == pmap) {
 4014                         rv = TRUE;
 4015                         break;
 4016                 }
 4017                 loops++;
 4018                 if (loops >= 16)
 4019                         break;
 4020         }
 4021         if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) {
 4022                 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 4023                 TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
 4024                         if (PV_PMAP(pv) == pmap) {
 4025                                 rv = TRUE;
 4026                                 break;
 4027                         }
 4028                         loops++;
 4029                         if (loops >= 16)
 4030                                 break;
 4031                 }
 4032         }
 4033         rw_wunlock(&pvh_global_lock);
 4034         return (rv);
 4035 }
 4036 
 4037 /*
 4038  *      pmap_page_wired_mappings:
 4039  *
 4040  *      Return the number of managed mappings to the given physical page
 4041  *      that are wired.
 4042  */
 4043 int
 4044 pmap_page_wired_mappings(vm_page_t m)
 4045 {
 4046         int count;
 4047 
 4048         count = 0;
 4049         if ((m->oflags & VPO_UNMANAGED) != 0)
 4050                 return (count);
 4051         rw_wlock(&pvh_global_lock);
 4052         count = pmap_pvh_wired_mappings(&m->md, count);
 4053         if ((m->flags & PG_FICTITIOUS) == 0) {
 4054             count = pmap_pvh_wired_mappings(pa_to_pvh(VM_PAGE_TO_PHYS(m)),
 4055                 count);
 4056         }
 4057         rw_wunlock(&pvh_global_lock);
 4058         return (count);
 4059 }
 4060 
 4061 /*
 4062  *      pmap_pvh_wired_mappings:
 4063  *
 4064  *      Return the updated number "count" of managed mappings that are wired.
 4065  */
 4066 static int
 4067 pmap_pvh_wired_mappings(struct md_page *pvh, int count)
 4068 {
 4069         pmap_t pmap;
 4070         pt_entry_t *pte;
 4071         pv_entry_t pv;
 4072 
 4073         rw_assert(&pvh_global_lock, RA_WLOCKED);
 4074         TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
 4075                 pmap = PV_PMAP(pv);
 4076                 PMAP_LOCK(pmap);
 4077                 pte = pmap_pte(pmap, pv->pv_va);
 4078                 if ((*pte & PG_W) != 0)
 4079                         count++;
 4080                 PMAP_UNLOCK(pmap);
 4081         }
 4082         return (count);
 4083 }
 4084 
 4085 /*
 4086  * Returns TRUE if the given page is mapped individually or as part of
 4087  * a 2mpage.  Otherwise, returns FALSE.
 4088  */
 4089 boolean_t
 4090 pmap_page_is_mapped(vm_page_t m)
 4091 {
 4092         boolean_t rv;
 4093 
 4094         if ((m->oflags & VPO_UNMANAGED) != 0)
 4095                 return (FALSE);
 4096         rw_wlock(&pvh_global_lock);
 4097         rv = !TAILQ_EMPTY(&m->md.pv_list) ||
 4098             ((m->flags & PG_FICTITIOUS) == 0 &&
 4099             !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list));
 4100         rw_wunlock(&pvh_global_lock);
 4101         return (rv);
 4102 }
 4103 
 4104 /*
 4105  * Remove all pages from specified address space
 4106  * this aids process exit speeds.  Also, this code
 4107  * is special cased for current process only, but
 4108  * can have the more generic (and slightly slower)
 4109  * mode enabled.  This is much faster than pmap_remove
 4110  * in the case of running down an entire address space.
 4111  */
 4112 void
 4113 pmap_remove_pages(pmap_t pmap)
 4114 {
 4115         pd_entry_t ptepde;
 4116         pt_entry_t *pte, tpte;
 4117         vm_page_t free = NULL;
 4118         vm_page_t m, mpte, mt;
 4119         pv_entry_t pv;
 4120         struct md_page *pvh;
 4121         struct pv_chunk *pc, *npc;
 4122         int field, idx;
 4123         int64_t bit;
 4124         uint64_t inuse, bitmask;
 4125         int allfree;
 4126 
 4127         if (pmap != PCPU_GET(curpmap)) {
 4128                 printf("warning: pmap_remove_pages called with non-current pmap\n");
 4129                 return;
 4130         }
 4131         rw_wlock(&pvh_global_lock);
 4132         PMAP_LOCK(pmap);
 4133         TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
 4134                 allfree = 1;
 4135                 for (field = 0; field < _NPCM; field++) {
 4136                         inuse = ~pc->pc_map[field] & pc_freemask[field];
 4137                         while (inuse != 0) {
 4138                                 bit = bsfq(inuse);
 4139                                 bitmask = 1UL << bit;
 4140                                 idx = field * 64 + bit;
 4141                                 pv = &pc->pc_pventry[idx];
 4142                                 inuse &= ~bitmask;
 4143 
 4144                                 pte = pmap_pdpe(pmap, pv->pv_va);
 4145                                 ptepde = *pte;
 4146                                 pte = pmap_pdpe_to_pde(pte, pv->pv_va);
 4147                                 tpte = *pte;
 4148                                 if ((tpte & (PG_PS | PG_V)) == PG_V) {
 4149                                         ptepde = tpte;
 4150                                         pte = (pt_entry_t *)PHYS_TO_DMAP(tpte &
 4151                                             PG_FRAME);
 4152                                         pte = &pte[pmap_pte_index(pv->pv_va)];
 4153                                         tpte = *pte & ~PG_PTE_PAT;
 4154                                 }
 4155                                 if ((tpte & PG_V) == 0)
 4156                                         panic("bad pte");
 4157 
 4158 /*
 4159  * We cannot remove wired pages from a process' mapping at this time
 4160  */
 4161                                 if (tpte & PG_W) {
 4162                                         allfree = 0;
 4163                                         continue;
 4164                                 }
 4165 
 4166                                 m = PHYS_TO_VM_PAGE(tpte & PG_FRAME);
 4167                                 KASSERT(m->phys_addr == (tpte & PG_FRAME),
 4168                                     ("vm_page_t %p phys_addr mismatch %016jx %016jx",
 4169                                     m, (uintmax_t)m->phys_addr,
 4170                                     (uintmax_t)tpte));
 4171 
 4172                                 KASSERT((m->flags & PG_FICTITIOUS) != 0 ||
 4173                                     m < &vm_page_array[vm_page_array_size],
 4174                                     ("pmap_remove_pages: bad tpte %#jx",
 4175                                     (uintmax_t)tpte));
 4176 
 4177                                 pte_clear(pte);
 4178 
 4179                                 /*
 4180                                  * Update the vm_page_t clean/reference bits.
 4181                                  */
 4182                                 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
 4183                                         if ((tpte & PG_PS) != 0) {
 4184                                                 for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
 4185                                                         vm_page_dirty(mt);
 4186                                         } else
 4187                                                 vm_page_dirty(m);
 4188                                 }
 4189 
 4190                                 /* Mark free */
 4191                                 PV_STAT(pv_entry_frees++);
 4192                                 PV_STAT(pv_entry_spare++);
 4193                                 pv_entry_count--;
 4194                                 pc->pc_map[field] |= bitmask;
 4195                                 if ((tpte & PG_PS) != 0) {
 4196                                         pmap_resident_count_dec(pmap, NBPDR / PAGE_SIZE);
 4197                                         pvh = pa_to_pvh(tpte & PG_PS_FRAME);
 4198                                         TAILQ_REMOVE(&pvh->pv_list, pv, pv_list);
 4199                                         if (TAILQ_EMPTY(&pvh->pv_list)) {
 4200                                                 for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
 4201                                                         if ((mt->aflags & PGA_WRITEABLE) != 0 &&
 4202                                                             TAILQ_EMPTY(&mt->md.pv_list))
 4203                                                                 vm_page_aflag_clear(mt, PGA_WRITEABLE);
 4204                                         }
 4205                                         mpte = pmap_lookup_pt_page(pmap, pv->pv_va);
 4206                                         if (mpte != NULL) {
 4207                                                 pmap_remove_pt_page(pmap, mpte);
 4208                                                 pmap_resident_count_dec(pmap, 1);
 4209                                                 KASSERT(mpte->wire_count == NPTEPG,
 4210                                                     ("pmap_remove_pages: pte page wire count error"));
 4211                                                 mpte->wire_count = 0;
 4212                                                 pmap_add_delayed_free_list(mpte, &free, FALSE);
 4213                                                 atomic_subtract_int(&cnt.v_wire_count, 1);
 4214                                         }
 4215                                 } else {
 4216                                         pmap_resident_count_dec(pmap, 1);
 4217                                         TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
 4218                                         if ((m->aflags & PGA_WRITEABLE) != 0 &&
 4219                                             TAILQ_EMPTY(&m->md.pv_list) &&
 4220                                             (m->flags & PG_FICTITIOUS) == 0) {
 4221                                                 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 4222                                                 if (TAILQ_EMPTY(&pvh->pv_list))
 4223                                                         vm_page_aflag_clear(m, PGA_WRITEABLE);
 4224                                         }
 4225                                 }
 4226                                 pmap_unuse_pt(pmap, pv->pv_va, ptepde, &free);
 4227                         }
 4228                 }
 4229                 if (allfree) {
 4230                         TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
 4231                         free_pv_chunk(pc);
 4232                 }
 4233         }
 4234         pmap_invalidate_all(pmap);
 4235         rw_wunlock(&pvh_global_lock);
 4236         PMAP_UNLOCK(pmap);
 4237         pmap_free_zero_pages(free);
 4238 }
 4239 
 4240 /*
 4241  *      pmap_is_modified:
 4242  *
 4243  *      Return whether or not the specified physical page was modified
 4244  *      in any physical maps.
 4245  */
 4246 boolean_t
 4247 pmap_is_modified(vm_page_t m)
 4248 {
 4249         boolean_t rv;
 4250 
 4251         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 4252             ("pmap_is_modified: page %p is not managed", m));
 4253 
 4254         /*
 4255          * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be
 4256          * concurrently set while the object is locked.  Thus, if PGA_WRITEABLE
 4257          * is clear, no PTEs can have PG_M set.
 4258          */
 4259         VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
 4260         if ((m->oflags & VPO_BUSY) == 0 &&
 4261             (m->aflags & PGA_WRITEABLE) == 0)
 4262                 return (FALSE);
 4263         rw_wlock(&pvh_global_lock);
 4264         rv = pmap_is_modified_pvh(&m->md) ||
 4265             ((m->flags & PG_FICTITIOUS) == 0 &&
 4266             pmap_is_modified_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m))));
 4267         rw_wunlock(&pvh_global_lock);
 4268         return (rv);
 4269 }
 4270 
 4271 /*
 4272  * Returns TRUE if any of the given mappings were used to modify
 4273  * physical memory.  Otherwise, returns FALSE.  Both page and 2mpage
 4274  * mappings are supported.
 4275  */
 4276 static boolean_t
 4277 pmap_is_modified_pvh(struct md_page *pvh)
 4278 {
 4279         pv_entry_t pv;
 4280         pt_entry_t *pte;
 4281         pmap_t pmap;
 4282         boolean_t rv;
 4283 
 4284         rw_assert(&pvh_global_lock, RA_WLOCKED);
 4285         rv = FALSE;
 4286         TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
 4287                 pmap = PV_PMAP(pv);
 4288                 PMAP_LOCK(pmap);
 4289                 pte = pmap_pte(pmap, pv->pv_va);
 4290                 rv = (*pte & (PG_M | PG_RW)) == (PG_M | PG_RW);
 4291                 PMAP_UNLOCK(pmap);
 4292                 if (rv)
 4293                         break;
 4294         }
 4295         return (rv);
 4296 }
 4297 
 4298 /*
 4299  *      pmap_is_prefaultable:
 4300  *
 4301  *      Return whether or not the specified virtual address is elgible
 4302  *      for prefault.
 4303  */
 4304 boolean_t
 4305 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
 4306 {
 4307         pd_entry_t *pde;
 4308         pt_entry_t *pte;
 4309         boolean_t rv;
 4310 
 4311         rv = FALSE;
 4312         PMAP_LOCK(pmap);
 4313         pde = pmap_pde(pmap, addr);
 4314         if (pde != NULL && (*pde & (PG_PS | PG_V)) == PG_V) {
 4315                 pte = pmap_pde_to_pte(pde, addr);
 4316                 rv = (*pte & PG_V) == 0;
 4317         }
 4318         PMAP_UNLOCK(pmap);
 4319         return (rv);
 4320 }
 4321 
 4322 /*
 4323  *      pmap_is_referenced:
 4324  *
 4325  *      Return whether or not the specified physical page was referenced
 4326  *      in any physical maps.
 4327  */
 4328 boolean_t
 4329 pmap_is_referenced(vm_page_t m)
 4330 {
 4331         boolean_t rv;
 4332 
 4333         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 4334             ("pmap_is_referenced: page %p is not managed", m));
 4335         rw_wlock(&pvh_global_lock);
 4336         rv = pmap_is_referenced_pvh(&m->md) ||
 4337             ((m->flags & PG_FICTITIOUS) == 0 &&
 4338             pmap_is_referenced_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m))));
 4339         rw_wunlock(&pvh_global_lock);
 4340         return (rv);
 4341 }
 4342 
 4343 /*
 4344  * Returns TRUE if any of the given mappings were referenced and FALSE
 4345  * otherwise.  Both page and 2mpage mappings are supported.
 4346  */
 4347 static boolean_t
 4348 pmap_is_referenced_pvh(struct md_page *pvh)
 4349 {
 4350         pv_entry_t pv;
 4351         pt_entry_t *pte;
 4352         pmap_t pmap;
 4353         boolean_t rv;
 4354 
 4355         rw_assert(&pvh_global_lock, RA_WLOCKED);
 4356         rv = FALSE;
 4357         TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
 4358                 pmap = PV_PMAP(pv);
 4359                 PMAP_LOCK(pmap);
 4360                 pte = pmap_pte(pmap, pv->pv_va);
 4361                 rv = (*pte & (PG_A | PG_V)) == (PG_A | PG_V);
 4362                 PMAP_UNLOCK(pmap);
 4363                 if (rv)
 4364                         break;
 4365         }
 4366         return (rv);
 4367 }
 4368 
 4369 /*
 4370  * Clear the write and modified bits in each of the given page's mappings.
 4371  */
 4372 void
 4373 pmap_remove_write(vm_page_t m)
 4374 {
 4375         struct md_page *pvh;
 4376         pmap_t pmap;
 4377         pv_entry_t next_pv, pv;
 4378         pd_entry_t *pde;
 4379         pt_entry_t oldpte, *pte;
 4380         vm_offset_t va;
 4381 
 4382         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 4383             ("pmap_remove_write: page %p is not managed", m));
 4384 
 4385         /*
 4386          * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by
 4387          * another thread while the object is locked.  Thus, if PGA_WRITEABLE
 4388          * is clear, no page table entries need updating.
 4389          */
 4390         VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
 4391         if ((m->oflags & VPO_BUSY) == 0 &&
 4392             (m->aflags & PGA_WRITEABLE) == 0)
 4393                 return;
 4394         rw_wlock(&pvh_global_lock);
 4395         if ((m->flags & PG_FICTITIOUS) != 0)
 4396                 goto small_mappings;
 4397         pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 4398         TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
 4399                 pmap = PV_PMAP(pv);
 4400                 PMAP_LOCK(pmap);
 4401                 va = pv->pv_va;
 4402                 pde = pmap_pde(pmap, va);
 4403                 if ((*pde & PG_RW) != 0)
 4404                         (void)pmap_demote_pde(pmap, pde, va);
 4405                 PMAP_UNLOCK(pmap);
 4406         }
 4407 small_mappings:
 4408         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
 4409                 pmap = PV_PMAP(pv);
 4410                 PMAP_LOCK(pmap);
 4411                 pde = pmap_pde(pmap, pv->pv_va);
 4412                 KASSERT((*pde & PG_PS) == 0,
 4413                     ("pmap_remove_write: found a 2mpage in page %p's pv list",
 4414                     m));
 4415                 pte = pmap_pde_to_pte(pde, pv->pv_va);
 4416 retry:
 4417                 oldpte = *pte;
 4418                 if (oldpte & PG_RW) {
 4419                         if (!atomic_cmpset_long(pte, oldpte, oldpte &
 4420                             ~(PG_RW | PG_M)))
 4421                                 goto retry;
 4422                         if ((oldpte & PG_M) != 0)
 4423                                 vm_page_dirty(m);
 4424                         pmap_invalidate_page(pmap, pv->pv_va);
 4425                 }
 4426                 PMAP_UNLOCK(pmap);
 4427         }
 4428         vm_page_aflag_clear(m, PGA_WRITEABLE);
 4429         rw_wunlock(&pvh_global_lock);
 4430 }
 4431 
 4432 /*
 4433  *      pmap_ts_referenced:
 4434  *
 4435  *      Return a count of reference bits for a page, clearing those bits.
 4436  *      It is not necessary for every reference bit to be cleared, but it
 4437  *      is necessary that 0 only be returned when there are truly no
 4438  *      reference bits set.
 4439  *
 4440  *      XXX: The exact number of bits to check and clear is a matter that
 4441  *      should be tested and standardized at some point in the future for
 4442  *      optimal aging of shared pages.
 4443  */
 4444 int
 4445 pmap_ts_referenced(vm_page_t m)
 4446 {
 4447         struct md_page *pvh;
 4448         pv_entry_t pv, pvf, pvn;
 4449         pmap_t pmap;
 4450         pd_entry_t oldpde, *pde;
 4451         pt_entry_t *pte;
 4452         vm_offset_t va;
 4453         int rtval = 0;
 4454 
 4455         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 4456             ("pmap_ts_referenced: page %p is not managed", m));
 4457         rw_wlock(&pvh_global_lock);
 4458         if ((m->flags & PG_FICTITIOUS) != 0)
 4459                 goto small_mappings;
 4460         pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 4461         TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, pvn) {
 4462                 pmap = PV_PMAP(pv);
 4463                 PMAP_LOCK(pmap);
 4464                 va = pv->pv_va;
 4465                 pde = pmap_pde(pmap, va);
 4466                 oldpde = *pde;
 4467                 if ((oldpde & PG_A) != 0) {
 4468                         if (pmap_demote_pde(pmap, pde, va)) {
 4469                                 if ((oldpde & PG_W) == 0) {
 4470                                         /*
 4471                                          * Remove the mapping to a single page
 4472                                          * so that a subsequent access may
 4473                                          * repromote.  Since the underlying
 4474                                          * page table page is fully populated,
 4475                                          * this removal never frees a page
 4476                                          * table page.
 4477                                          */
 4478                                         va += VM_PAGE_TO_PHYS(m) - (oldpde &
 4479                                             PG_PS_FRAME);
 4480                                         pmap_remove_page(pmap, va, pde, NULL);
 4481                                         rtval++;
 4482                                         if (rtval > 4) {
 4483                                                 PMAP_UNLOCK(pmap);
 4484                                                 goto out;
 4485                                         }
 4486                                 }
 4487                         }
 4488                 }
 4489                 PMAP_UNLOCK(pmap);
 4490         }
 4491 small_mappings:
 4492         if ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
 4493                 pvf = pv;
 4494                 do {
 4495                         pvn = TAILQ_NEXT(pv, pv_list);
 4496                         TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
 4497                         TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
 4498                         pmap = PV_PMAP(pv);
 4499                         PMAP_LOCK(pmap);
 4500                         pde = pmap_pde(pmap, pv->pv_va);
 4501                         KASSERT((*pde & PG_PS) == 0, ("pmap_ts_referenced:"
 4502                             " found a 2mpage in page %p's pv list", m));
 4503                         pte = pmap_pde_to_pte(pde, pv->pv_va);
 4504                         if ((*pte & PG_A) != 0) {
 4505                                 atomic_clear_long(pte, PG_A);
 4506                                 pmap_invalidate_page(pmap, pv->pv_va);
 4507                                 rtval++;
 4508                                 if (rtval > 4)
 4509                                         pvn = NULL;
 4510                         }
 4511                         PMAP_UNLOCK(pmap);
 4512                 } while ((pv = pvn) != NULL && pv != pvf);
 4513         }
 4514 out:
 4515         rw_wunlock(&pvh_global_lock);
 4516         return (rtval);
 4517 }
 4518 
 4519 /*
 4520  *      Clear the modify bits on the specified physical page.
 4521  */
 4522 void
 4523 pmap_clear_modify(vm_page_t m)
 4524 {
 4525         struct md_page *pvh;
 4526         pmap_t pmap;
 4527         pv_entry_t next_pv, pv;
 4528         pd_entry_t oldpde, *pde;
 4529         pt_entry_t oldpte, *pte;
 4530         vm_offset_t va;
 4531 
 4532         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 4533             ("pmap_clear_modify: page %p is not managed", m));
 4534         VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
 4535         KASSERT((m->oflags & VPO_BUSY) == 0,
 4536             ("pmap_clear_modify: page %p is busy", m));
 4537 
 4538         /*
 4539          * If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set.
 4540          * If the object containing the page is locked and the page is not
 4541          * VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set.
 4542          */
 4543         if ((m->aflags & PGA_WRITEABLE) == 0)
 4544                 return;
 4545         rw_wlock(&pvh_global_lock);
 4546         if ((m->flags & PG_FICTITIOUS) != 0)
 4547                 goto small_mappings;
 4548         pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 4549         TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
 4550                 pmap = PV_PMAP(pv);
 4551                 PMAP_LOCK(pmap);
 4552                 va = pv->pv_va;
 4553                 pde = pmap_pde(pmap, va);
 4554                 oldpde = *pde;
 4555                 if ((oldpde & PG_RW) != 0) {
 4556                         if (pmap_demote_pde(pmap, pde, va)) {
 4557                                 if ((oldpde & PG_W) == 0) {
 4558                                         /*
 4559                                          * Write protect the mapping to a
 4560                                          * single page so that a subsequent
 4561                                          * write access may repromote.
 4562                                          */
 4563                                         va += VM_PAGE_TO_PHYS(m) - (oldpde &
 4564                                             PG_PS_FRAME);
 4565                                         pte = pmap_pde_to_pte(pde, va);
 4566                                         oldpte = *pte;
 4567                                         if ((oldpte & PG_V) != 0) {
 4568                                                 while (!atomic_cmpset_long(pte,
 4569                                                     oldpte,
 4570                                                     oldpte & ~(PG_M | PG_RW)))
 4571                                                         oldpte = *pte;
 4572                                                 vm_page_dirty(m);
 4573                                                 pmap_invalidate_page(pmap, va);
 4574                                         }
 4575                                 }
 4576                         }
 4577                 }
 4578                 PMAP_UNLOCK(pmap);
 4579         }
 4580 small_mappings:
 4581         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
 4582                 pmap = PV_PMAP(pv);
 4583                 PMAP_LOCK(pmap);
 4584                 pde = pmap_pde(pmap, pv->pv_va);
 4585                 KASSERT((*pde & PG_PS) == 0, ("pmap_clear_modify: found"
 4586                     " a 2mpage in page %p's pv list", m));
 4587                 pte = pmap_pde_to_pte(pde, pv->pv_va);
 4588                 if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
 4589                         atomic_clear_long(pte, PG_M);
 4590                         pmap_invalidate_page(pmap, pv->pv_va);
 4591                 }
 4592                 PMAP_UNLOCK(pmap);
 4593         }
 4594         rw_wunlock(&pvh_global_lock);
 4595 }
 4596 
 4597 /*
 4598  *      pmap_clear_reference:
 4599  *
 4600  *      Clear the reference bit on the specified physical page.
 4601  */
 4602 void
 4603 pmap_clear_reference(vm_page_t m)
 4604 {
 4605         struct md_page *pvh;
 4606         pmap_t pmap;
 4607         pv_entry_t next_pv, pv;
 4608         pd_entry_t oldpde, *pde;
 4609         pt_entry_t *pte;
 4610         vm_offset_t va;
 4611 
 4612         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 4613             ("pmap_clear_reference: page %p is not managed", m));
 4614         rw_wlock(&pvh_global_lock);
 4615         if ((m->flags & PG_FICTITIOUS) != 0)
 4616                 goto small_mappings;
 4617         pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 4618         TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
 4619                 pmap = PV_PMAP(pv);
 4620                 PMAP_LOCK(pmap);
 4621                 va = pv->pv_va;
 4622                 pde = pmap_pde(pmap, va);
 4623                 oldpde = *pde;
 4624                 if ((oldpde & PG_A) != 0) {
 4625                         if (pmap_demote_pde(pmap, pde, va)) {
 4626                                 /*
 4627                                  * Remove the mapping to a single page so
 4628                                  * that a subsequent access may repromote.
 4629                                  * Since the underlying page table page is
 4630                                  * fully populated, this removal never frees
 4631                                  * a page table page.
 4632                                  */
 4633                                 va += VM_PAGE_TO_PHYS(m) - (oldpde &
 4634                                     PG_PS_FRAME);
 4635                                 pmap_remove_page(pmap, va, pde, NULL);
 4636                         }
 4637                 }
 4638                 PMAP_UNLOCK(pmap);
 4639         }
 4640 small_mappings:
 4641         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
 4642                 pmap = PV_PMAP(pv);
 4643                 PMAP_LOCK(pmap);
 4644                 pde = pmap_pde(pmap, pv->pv_va);
 4645                 KASSERT((*pde & PG_PS) == 0, ("pmap_clear_reference: found"
 4646                     " a 2mpage in page %p's pv list", m));
 4647                 pte = pmap_pde_to_pte(pde, pv->pv_va);
 4648                 if (*pte & PG_A) {
 4649                         atomic_clear_long(pte, PG_A);
 4650                         pmap_invalidate_page(pmap, pv->pv_va);
 4651                 }
 4652                 PMAP_UNLOCK(pmap);
 4653         }
 4654         rw_wunlock(&pvh_global_lock);
 4655 }
 4656 
 4657 /*
 4658  * Miscellaneous support routines follow
 4659  */
 4660 
 4661 /* Adjust the cache mode for a 4KB page mapped via a PTE. */
 4662 static __inline void
 4663 pmap_pte_attr(pt_entry_t *pte, int cache_bits)
 4664 {
 4665         u_int opte, npte;
 4666 
 4667         /*
 4668          * The cache mode bits are all in the low 32-bits of the
 4669          * PTE, so we can just spin on updating the low 32-bits.
 4670          */
 4671         do {
 4672                 opte = *(u_int *)pte;
 4673                 npte = opte & ~PG_PTE_CACHE;
 4674                 npte |= cache_bits;
 4675         } while (npte != opte && !atomic_cmpset_int((u_int *)pte, opte, npte));
 4676 }
 4677 
 4678 /* Adjust the cache mode for a 2MB page mapped via a PDE. */
 4679 static __inline void
 4680 pmap_pde_attr(pd_entry_t *pde, int cache_bits)
 4681 {
 4682         u_int opde, npde;
 4683 
 4684         /*
 4685          * The cache mode bits are all in the low 32-bits of the
 4686          * PDE, so we can just spin on updating the low 32-bits.
 4687          */
 4688         do {
 4689                 opde = *(u_int *)pde;
 4690                 npde = opde & ~PG_PDE_CACHE;
 4691                 npde |= cache_bits;
 4692         } while (npde != opde && !atomic_cmpset_int((u_int *)pde, opde, npde));
 4693 }
 4694 
 4695 /*
 4696  * Map a set of physical memory pages into the kernel virtual
 4697  * address space. Return a pointer to where it is mapped. This
 4698  * routine is intended to be used for mapping device memory,
 4699  * NOT real memory.
 4700  */
 4701 void *
 4702 pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
 4703 {
 4704         vm_offset_t va, offset;
 4705         vm_size_t tmpsize;
 4706 
 4707         /*
 4708          * If the specified range of physical addresses fits within the direct
 4709          * map window, use the direct map. 
 4710          */
 4711         if (pa < dmaplimit && pa + size < dmaplimit) {
 4712                 va = PHYS_TO_DMAP(pa);
 4713                 if (!pmap_change_attr(va, size, mode))
 4714                         return ((void *)va);
 4715         }
 4716         offset = pa & PAGE_MASK;
 4717         size = roundup(offset + size, PAGE_SIZE);
 4718         va = kmem_alloc_nofault(kernel_map, size);
 4719         if (!va)
 4720                 panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
 4721         pa = trunc_page(pa);
 4722         for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE)
 4723                 pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode);
 4724         pmap_invalidate_range(kernel_pmap, va, va + tmpsize);
 4725         pmap_invalidate_cache_range(va, va + tmpsize);
 4726         return ((void *)(va + offset));
 4727 }
 4728 
 4729 void *
 4730 pmap_mapdev(vm_paddr_t pa, vm_size_t size)
 4731 {
 4732 
 4733         return (pmap_mapdev_attr(pa, size, PAT_UNCACHEABLE));
 4734 }
 4735 
 4736 void *
 4737 pmap_mapbios(vm_paddr_t pa, vm_size_t size)
 4738 {
 4739 
 4740         return (pmap_mapdev_attr(pa, size, PAT_WRITE_BACK));
 4741 }
 4742 
 4743 void
 4744 pmap_unmapdev(vm_offset_t va, vm_size_t size)
 4745 {
 4746         vm_offset_t base, offset, tmpva;
 4747 
 4748         /* If we gave a direct map region in pmap_mapdev, do nothing */
 4749         if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS)
 4750                 return;
 4751         base = trunc_page(va);
 4752         offset = va & PAGE_MASK;
 4753         size = roundup(offset + size, PAGE_SIZE);
 4754         for (tmpva = base; tmpva < (base + size); tmpva += PAGE_SIZE)
 4755                 pmap_kremove(tmpva);
 4756         pmap_invalidate_range(kernel_pmap, va, tmpva);
 4757         kmem_free(kernel_map, base, size);
 4758 }
 4759 
 4760 /*
 4761  * Tries to demote a 1GB page mapping.
 4762  */
 4763 static boolean_t
 4764 pmap_demote_pdpe(pmap_t pmap, pdp_entry_t *pdpe, vm_offset_t va)
 4765 {
 4766         pdp_entry_t newpdpe, oldpdpe;
 4767         pd_entry_t *firstpde, newpde, *pde;
 4768         vm_paddr_t mpdepa;
 4769         vm_page_t mpde;
 4770 
 4771         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 4772         oldpdpe = *pdpe;
 4773         KASSERT((oldpdpe & (PG_PS | PG_V)) == (PG_PS | PG_V),
 4774             ("pmap_demote_pdpe: oldpdpe is missing PG_PS and/or PG_V"));
 4775         if ((mpde = vm_page_alloc(NULL, va >> PDPSHIFT, VM_ALLOC_INTERRUPT |
 4776             VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
 4777                 CTR2(KTR_PMAP, "pmap_demote_pdpe: failure for va %#lx"
 4778                     " in pmap %p", va, pmap);
 4779                 return (FALSE);
 4780         }
 4781         mpdepa = VM_PAGE_TO_PHYS(mpde);
 4782         firstpde = (pd_entry_t *)PHYS_TO_DMAP(mpdepa);
 4783         newpdpe = mpdepa | PG_M | PG_A | (oldpdpe & PG_U) | PG_RW | PG_V;
 4784         KASSERT((oldpdpe & PG_A) != 0,
 4785             ("pmap_demote_pdpe: oldpdpe is missing PG_A"));
 4786         KASSERT((oldpdpe & (PG_M | PG_RW)) != PG_RW,
 4787             ("pmap_demote_pdpe: oldpdpe is missing PG_M"));
 4788         newpde = oldpdpe;
 4789 
 4790         /*
 4791          * Initialize the page directory page.
 4792          */
 4793         for (pde = firstpde; pde < firstpde + NPDEPG; pde++) {
 4794                 *pde = newpde;
 4795                 newpde += NBPDR;
 4796         }
 4797 
 4798         /*
 4799          * Demote the mapping.
 4800          */
 4801         *pdpe = newpdpe;
 4802 
 4803         /*
 4804          * Invalidate a stale recursive mapping of the page directory page.
 4805          */
 4806         pmap_invalidate_page(pmap, (vm_offset_t)vtopde(va));
 4807 
 4808         pmap_pdpe_demotions++;
 4809         CTR2(KTR_PMAP, "pmap_demote_pdpe: success for va %#lx"
 4810             " in pmap %p", va, pmap);
 4811         return (TRUE);
 4812 }
 4813 
 4814 /*
 4815  * Sets the memory attribute for the specified page.
 4816  */
 4817 void
 4818 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
 4819 {
 4820 
 4821         m->md.pat_mode = ma;
 4822 
 4823         /*
 4824          * If "m" is a normal page, update its direct mapping.  This update
 4825          * can be relied upon to perform any cache operations that are
 4826          * required for data coherence.
 4827          */
 4828         if ((m->flags & PG_FICTITIOUS) == 0 &&
 4829             pmap_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), PAGE_SIZE,
 4830             m->md.pat_mode))
 4831                 panic("memory attribute change on the direct map failed");
 4832 }
 4833 
 4834 /*
 4835  * Changes the specified virtual address range's memory type to that given by
 4836  * the parameter "mode".  The specified virtual address range must be
 4837  * completely contained within either the direct map or the kernel map.  If
 4838  * the virtual address range is contained within the kernel map, then the
 4839  * memory type for each of the corresponding ranges of the direct map is also
 4840  * changed.  (The corresponding ranges of the direct map are those ranges that
 4841  * map the same physical pages as the specified virtual address range.)  These
 4842  * changes to the direct map are necessary because Intel describes the
 4843  * behavior of their processors as "undefined" if two or more mappings to the
 4844  * same physical page have different memory types.
 4845  *
 4846  * Returns zero if the change completed successfully, and either EINVAL or
 4847  * ENOMEM if the change failed.  Specifically, EINVAL is returned if some part
 4848  * of the virtual address range was not mapped, and ENOMEM is returned if
 4849  * there was insufficient memory available to complete the change.  In the
 4850  * latter case, the memory type may have been changed on some part of the
 4851  * virtual address range or the direct map.
 4852  */
 4853 int
 4854 pmap_change_attr(vm_offset_t va, vm_size_t size, int mode)
 4855 {
 4856         int error;
 4857 
 4858         PMAP_LOCK(kernel_pmap);
 4859         error = pmap_change_attr_locked(va, size, mode);
 4860         PMAP_UNLOCK(kernel_pmap);
 4861         return (error);
 4862 }
 4863 
 4864 static int
 4865 pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode)
 4866 {
 4867         vm_offset_t base, offset, tmpva;
 4868         vm_paddr_t pa_start, pa_end;
 4869         pdp_entry_t *pdpe;
 4870         pd_entry_t *pde;
 4871         pt_entry_t *pte;
 4872         int cache_bits_pte, cache_bits_pde, error;
 4873         boolean_t changed;
 4874 
 4875         PMAP_LOCK_ASSERT(kernel_pmap, MA_OWNED);
 4876         base = trunc_page(va);
 4877         offset = va & PAGE_MASK;
 4878         size = roundup(offset + size, PAGE_SIZE);
 4879 
 4880         /*
 4881          * Only supported on kernel virtual addresses, including the direct
 4882          * map but excluding the recursive map.
 4883          */
 4884         if (base < DMAP_MIN_ADDRESS)
 4885                 return (EINVAL);
 4886 
 4887         cache_bits_pde = pmap_cache_bits(mode, 1);
 4888         cache_bits_pte = pmap_cache_bits(mode, 0);
 4889         changed = FALSE;
 4890 
 4891         /*
 4892          * Pages that aren't mapped aren't supported.  Also break down 2MB pages
 4893          * into 4KB pages if required.
 4894          */
 4895         for (tmpva = base; tmpva < base + size; ) {
 4896                 pdpe = pmap_pdpe(kernel_pmap, tmpva);
 4897                 if (*pdpe == 0)
 4898                         return (EINVAL);
 4899                 if (*pdpe & PG_PS) {
 4900                         /*
 4901                          * If the current 1GB page already has the required
 4902                          * memory type, then we need not demote this page. Just
 4903                          * increment tmpva to the next 1GB page frame.
 4904                          */
 4905                         if ((*pdpe & PG_PDE_CACHE) == cache_bits_pde) {
 4906                                 tmpva = trunc_1gpage(tmpva) + NBPDP;
 4907                                 continue;
 4908                         }
 4909 
 4910                         /*
 4911                          * If the current offset aligns with a 1GB page frame
 4912                          * and there is at least 1GB left within the range, then
 4913                          * we need not break down this page into 2MB pages.
 4914                          */
 4915                         if ((tmpva & PDPMASK) == 0 &&
 4916                             tmpva + PDPMASK < base + size) {
 4917                                 tmpva += NBPDP;
 4918                                 continue;
 4919                         }
 4920                         if (!pmap_demote_pdpe(kernel_pmap, pdpe, tmpva))
 4921                                 return (ENOMEM);
 4922                 }
 4923                 pde = pmap_pdpe_to_pde(pdpe, tmpva);
 4924                 if (*pde == 0)
 4925                         return (EINVAL);
 4926                 if (*pde & PG_PS) {
 4927                         /*
 4928                          * If the current 2MB page already has the required
 4929                          * memory type, then we need not demote this page. Just
 4930                          * increment tmpva to the next 2MB page frame.
 4931                          */
 4932                         if ((*pde & PG_PDE_CACHE) == cache_bits_pde) {
 4933                                 tmpva = trunc_2mpage(tmpva) + NBPDR;
 4934                                 continue;
 4935                         }
 4936 
 4937                         /*
 4938                          * If the current offset aligns with a 2MB page frame
 4939                          * and there is at least 2MB left within the range, then
 4940                          * we need not break down this page into 4KB pages.
 4941                          */
 4942                         if ((tmpva & PDRMASK) == 0 &&
 4943                             tmpva + PDRMASK < base + size) {
 4944                                 tmpva += NBPDR;
 4945                                 continue;
 4946                         }
 4947                         if (!pmap_demote_pde(kernel_pmap, pde, tmpva))
 4948                                 return (ENOMEM);
 4949                 }
 4950                 pte = pmap_pde_to_pte(pde, tmpva);
 4951                 if (*pte == 0)
 4952                         return (EINVAL);
 4953                 tmpva += PAGE_SIZE;
 4954         }
 4955         error = 0;
 4956 
 4957         /*
 4958          * Ok, all the pages exist, so run through them updating their
 4959          * cache mode if required.
 4960          */
 4961         pa_start = pa_end = 0;
 4962         for (tmpva = base; tmpva < base + size; ) {
 4963                 pdpe = pmap_pdpe(kernel_pmap, tmpva);
 4964                 if (*pdpe & PG_PS) {
 4965                         if ((*pdpe & PG_PDE_CACHE) != cache_bits_pde) {
 4966                                 pmap_pde_attr(pdpe, cache_bits_pde);
 4967                                 changed = TRUE;
 4968                         }
 4969                         if (tmpva >= VM_MIN_KERNEL_ADDRESS) {
 4970                                 if (pa_start == pa_end) {
 4971                                         /* Start physical address run. */
 4972                                         pa_start = *pdpe & PG_PS_FRAME;
 4973                                         pa_end = pa_start + NBPDP;
 4974                                 } else if (pa_end == (*pdpe & PG_PS_FRAME))
 4975                                         pa_end += NBPDP;
 4976                                 else {
 4977                                         /* Run ended, update direct map. */
 4978                                         error = pmap_change_attr_locked(
 4979                                             PHYS_TO_DMAP(pa_start),
 4980                                             pa_end - pa_start, mode);
 4981                                         if (error != 0)
 4982                                                 break;
 4983                                         /* Start physical address run. */
 4984                                         pa_start = *pdpe & PG_PS_FRAME;
 4985                                         pa_end = pa_start + NBPDP;
 4986                                 }
 4987                         }
 4988                         tmpva = trunc_1gpage(tmpva) + NBPDP;
 4989                         continue;
 4990                 }
 4991                 pde = pmap_pdpe_to_pde(pdpe, tmpva);
 4992                 if (*pde & PG_PS) {
 4993                         if ((*pde & PG_PDE_CACHE) != cache_bits_pde) {
 4994                                 pmap_pde_attr(pde, cache_bits_pde);
 4995                                 changed = TRUE;
 4996                         }
 4997                         if (tmpva >= VM_MIN_KERNEL_ADDRESS) {
 4998                                 if (pa_start == pa_end) {
 4999                                         /* Start physical address run. */
 5000                                         pa_start = *pde & PG_PS_FRAME;
 5001                                         pa_end = pa_start + NBPDR;
 5002                                 } else if (pa_end == (*pde & PG_PS_FRAME))
 5003                                         pa_end += NBPDR;
 5004                                 else {
 5005                                         /* Run ended, update direct map. */
 5006                                         error = pmap_change_attr_locked(
 5007                                             PHYS_TO_DMAP(pa_start),
 5008                                             pa_end - pa_start, mode);
 5009                                         if (error != 0)
 5010                                                 break;
 5011                                         /* Start physical address run. */
 5012                                         pa_start = *pde & PG_PS_FRAME;
 5013                                         pa_end = pa_start + NBPDR;
 5014                                 }
 5015                         }
 5016                         tmpva = trunc_2mpage(tmpva) + NBPDR;
 5017                 } else {
 5018                         pte = pmap_pde_to_pte(pde, tmpva);
 5019                         if ((*pte & PG_PTE_CACHE) != cache_bits_pte) {
 5020                                 pmap_pte_attr(pte, cache_bits_pte);
 5021                                 changed = TRUE;
 5022                         }
 5023                         if (tmpva >= VM_MIN_KERNEL_ADDRESS) {
 5024                                 if (pa_start == pa_end) {
 5025                                         /* Start physical address run. */
 5026                                         pa_start = *pte & PG_FRAME;
 5027                                         pa_end = pa_start + PAGE_SIZE;
 5028                                 } else if (pa_end == (*pte & PG_FRAME))
 5029                                         pa_end += PAGE_SIZE;
 5030                                 else {
 5031                                         /* Run ended, update direct map. */
 5032                                         error = pmap_change_attr_locked(
 5033                                             PHYS_TO_DMAP(pa_start),
 5034                                             pa_end - pa_start, mode);
 5035                                         if (error != 0)
 5036                                                 break;
 5037                                         /* Start physical address run. */
 5038                                         pa_start = *pte & PG_FRAME;
 5039                                         pa_end = pa_start + PAGE_SIZE;
 5040                                 }
 5041                         }
 5042                         tmpva += PAGE_SIZE;
 5043                 }
 5044         }
 5045         if (error == 0 && pa_start != pa_end)
 5046                 error = pmap_change_attr_locked(PHYS_TO_DMAP(pa_start),
 5047                     pa_end - pa_start, mode);
 5048 
 5049         /*
 5050          * Flush CPU caches if required to make sure any data isn't cached that
 5051          * shouldn't be, etc.
 5052          */
 5053         if (changed) {
 5054                 pmap_invalidate_range(kernel_pmap, base, tmpva);
 5055                 pmap_invalidate_cache_range(base, tmpva);
 5056         }
 5057         return (error);
 5058 }
 5059 
 5060 /*
 5061  * Demotes any mapping within the direct map region that covers more than the
 5062  * specified range of physical addresses.  This range's size must be a power
 5063  * of two and its starting address must be a multiple of its size.  Since the
 5064  * demotion does not change any attributes of the mapping, a TLB invalidation
 5065  * is not mandatory.  The caller may, however, request a TLB invalidation.
 5066  */
 5067 void
 5068 pmap_demote_DMAP(vm_paddr_t base, vm_size_t len, boolean_t invalidate)
 5069 {
 5070         pdp_entry_t *pdpe;
 5071         pd_entry_t *pde;
 5072         vm_offset_t va;
 5073         boolean_t changed;
 5074 
 5075         if (len == 0)
 5076                 return;
 5077         KASSERT(powerof2(len), ("pmap_demote_DMAP: len is not a power of 2"));
 5078         KASSERT((base & (len - 1)) == 0,
 5079             ("pmap_demote_DMAP: base is not a multiple of len"));
 5080         if (len < NBPDP && base < dmaplimit) {
 5081                 va = PHYS_TO_DMAP(base);
 5082                 changed = FALSE;
 5083                 PMAP_LOCK(kernel_pmap);
 5084                 pdpe = pmap_pdpe(kernel_pmap, va);
 5085                 if ((*pdpe & PG_V) == 0)
 5086                         panic("pmap_demote_DMAP: invalid PDPE");
 5087                 if ((*pdpe & PG_PS) != 0) {
 5088                         if (!pmap_demote_pdpe(kernel_pmap, pdpe, va))
 5089                                 panic("pmap_demote_DMAP: PDPE failed");
 5090                         changed = TRUE;
 5091                 }
 5092                 if (len < NBPDR) {
 5093                         pde = pmap_pdpe_to_pde(pdpe, va);
 5094                         if ((*pde & PG_V) == 0)
 5095                                 panic("pmap_demote_DMAP: invalid PDE");
 5096                         if ((*pde & PG_PS) != 0) {
 5097                                 if (!pmap_demote_pde(kernel_pmap, pde, va))
 5098                                         panic("pmap_demote_DMAP: PDE failed");
 5099                                 changed = TRUE;
 5100                         }
 5101                 }
 5102                 if (changed && invalidate)
 5103                         pmap_invalidate_page(kernel_pmap, va);
 5104                 PMAP_UNLOCK(kernel_pmap);
 5105         }
 5106 }
 5107 
 5108 /*
 5109  * perform the pmap work for mincore
 5110  */
 5111 int
 5112 pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
 5113 {
 5114         pd_entry_t *pdep;
 5115         pt_entry_t pte;
 5116         vm_paddr_t pa;
 5117         int val;
 5118 
 5119         PMAP_LOCK(pmap);
 5120 retry:
 5121         pdep = pmap_pde(pmap, addr);
 5122         if (pdep != NULL && (*pdep & PG_V)) {
 5123                 if (*pdep & PG_PS) {
 5124                         pte = *pdep;
 5125                         /* Compute the physical address of the 4KB page. */
 5126                         pa = ((*pdep & PG_PS_FRAME) | (addr & PDRMASK)) &
 5127                             PG_FRAME;
 5128                         val = MINCORE_SUPER;
 5129                 } else {
 5130                         pte = *pmap_pde_to_pte(pdep, addr);
 5131                         pa = pte & PG_FRAME;
 5132                         val = 0;
 5133                 }
 5134         } else {
 5135                 pte = 0;
 5136                 pa = 0;
 5137                 val = 0;
 5138         }
 5139         if ((pte & PG_V) != 0) {
 5140                 val |= MINCORE_INCORE;
 5141                 if ((pte & (PG_M | PG_RW)) == (PG_M | PG_RW))
 5142                         val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
 5143                 if ((pte & PG_A) != 0)
 5144                         val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
 5145         }
 5146         if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
 5147             (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) &&
 5148             (pte & (PG_MANAGED | PG_V)) == (PG_MANAGED | PG_V)) {
 5149                 /* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */
 5150                 if (vm_page_pa_tryrelock(pmap, pa, locked_pa))
 5151                         goto retry;
 5152         } else
 5153                 PA_UNLOCK_COND(*locked_pa);
 5154         PMAP_UNLOCK(pmap);
 5155         return (val);
 5156 }
 5157 
 5158 void
 5159 pmap_activate(struct thread *td)
 5160 {
 5161         pmap_t  pmap, oldpmap;
 5162         u_int   cpuid;
 5163         u_int64_t  cr3;
 5164 
 5165         critical_enter();
 5166         pmap = vmspace_pmap(td->td_proc->p_vmspace);
 5167         oldpmap = PCPU_GET(curpmap);
 5168         cpuid = PCPU_GET(cpuid);
 5169 #ifdef SMP
 5170         CPU_CLR_ATOMIC(cpuid, &oldpmap->pm_active);
 5171         CPU_SET_ATOMIC(cpuid, &pmap->pm_active);
 5172 #else
 5173         CPU_CLR(cpuid, &oldpmap->pm_active);
 5174         CPU_SET(cpuid, &pmap->pm_active);
 5175 #endif
 5176         cr3 = DMAP_TO_PHYS((vm_offset_t)pmap->pm_pml4);
 5177         td->td_pcb->pcb_cr3 = cr3;
 5178         load_cr3(cr3);
 5179         PCPU_SET(curpmap, pmap);
 5180         critical_exit();
 5181 }
 5182 
 5183 void
 5184 pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
 5185 {
 5186 }
 5187 
 5188 /*
 5189  *      Increase the starting virtual address of the given mapping if a
 5190  *      different alignment might result in more superpage mappings.
 5191  */
 5192 void
 5193 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
 5194     vm_offset_t *addr, vm_size_t size)
 5195 {
 5196         vm_offset_t superpage_offset;
 5197 
 5198         if (size < NBPDR)
 5199                 return;
 5200         if (object != NULL && (object->flags & OBJ_COLORED) != 0)
 5201                 offset += ptoa(object->pg_color);
 5202         superpage_offset = offset & PDRMASK;
 5203         if (size - ((NBPDR - superpage_offset) & PDRMASK) < NBPDR ||
 5204             (*addr & PDRMASK) == superpage_offset)
 5205                 return;
 5206         if ((*addr & PDRMASK) < superpage_offset)
 5207                 *addr = (*addr & ~PDRMASK) + superpage_offset;
 5208         else
 5209                 *addr = ((*addr + PDRMASK) & ~PDRMASK) + superpage_offset;
 5210 }

Cache object: 34f94827688eed5586a2f1af95eefd9d


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.