The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/i386/i386/pmap.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1991 Regents of the University of California.
    3  * All rights reserved.
    4  * Copyright (c) 1994 John S. Dyson
    5  * All rights reserved.
    6  * Copyright (c) 1994 David Greenman
    7  * All rights reserved.
    8  * Copyright (c) 2005-2010 Alan L. Cox <alc@cs.rice.edu>
    9  * All rights reserved.
   10  *
   11  * This code is derived from software contributed to Berkeley by
   12  * the Systems Programming Group of the University of Utah Computer
   13  * Science Department and William Jolitz of UUNET Technologies Inc.
   14  *
   15  * Redistribution and use in source and binary forms, with or without
   16  * modification, are permitted provided that the following conditions
   17  * are met:
   18  * 1. Redistributions of source code must retain the above copyright
   19  *    notice, this list of conditions and the following disclaimer.
   20  * 2. Redistributions in binary form must reproduce the above copyright
   21  *    notice, this list of conditions and the following disclaimer in the
   22  *    documentation and/or other materials provided with the distribution.
   23  * 3. All advertising materials mentioning features or use of this software
   24  *    must display the following acknowledgement:
   25  *      This product includes software developed by the University of
   26  *      California, Berkeley and its contributors.
   27  * 4. Neither the name of the University nor the names of its contributors
   28  *    may be used to endorse or promote products derived from this software
   29  *    without specific prior written permission.
   30  *
   31  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   32  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   33  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   34  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   35  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   36  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   37  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   38  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   39  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   40  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   41  * SUCH DAMAGE.
   42  *
   43  *      from:   @(#)pmap.c      7.7 (Berkeley)  5/12/91
   44  */
   45 /*-
   46  * Copyright (c) 2003 Networks Associates Technology, Inc.
   47  * All rights reserved.
   48  *
   49  * This software was developed for the FreeBSD Project by Jake Burkholder,
   50  * Safeport Network Services, and Network Associates Laboratories, the
   51  * Security Research Division of Network Associates, Inc. under
   52  * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA
   53  * CHATS research program.
   54  *
   55  * Redistribution and use in source and binary forms, with or without
   56  * modification, are permitted provided that the following conditions
   57  * are met:
   58  * 1. Redistributions of source code must retain the above copyright
   59  *    notice, this list of conditions and the following disclaimer.
   60  * 2. Redistributions in binary form must reproduce the above copyright
   61  *    notice, this list of conditions and the following disclaimer in the
   62  *    documentation and/or other materials provided with the distribution.
   63  *
   64  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   65  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   66  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   67  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   68  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   69  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   70  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   71  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   72  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   73  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   74  * SUCH DAMAGE.
   75  */
   76 
   77 #include <sys/cdefs.h>
   78 __FBSDID("$FreeBSD$");
   79 
   80 /*
   81  *      Manages physical address maps.
   82  *
   83  *      In addition to hardware address maps, this
   84  *      module is called upon to provide software-use-only
   85  *      maps which may or may not be stored in the same
   86  *      form as hardware maps.  These pseudo-maps are
   87  *      used to store intermediate results from copy
   88  *      operations to and from address spaces.
   89  *
   90  *      Since the information managed by this module is
   91  *      also stored by the logical address mapping module,
   92  *      this module may throw away valid virtual-to-physical
   93  *      mappings at almost any time.  However, invalidations
   94  *      of virtual-to-physical mappings must be done as
   95  *      requested.
   96  *
   97  *      In order to cope with hardware architectures which
   98  *      make virtual-to-physical map invalidates expensive,
   99  *      this module may delay invalidate or reduced protection
  100  *      operations until such time as they are actually
  101  *      necessary.  This module is given full information as
  102  *      to which processors are currently using which maps,
  103  *      and to when physical maps must be made correct.
  104  */
  105 
  106 #include "opt_apic.h"
  107 #include "opt_cpu.h"
  108 #include "opt_pmap.h"
  109 #include "opt_smp.h"
  110 #include "opt_xbox.h"
  111 
  112 #include <sys/param.h>
  113 #include <sys/systm.h>
  114 #include <sys/kernel.h>
  115 #include <sys/ktr.h>
  116 #include <sys/lock.h>
  117 #include <sys/malloc.h>
  118 #include <sys/mman.h>
  119 #include <sys/msgbuf.h>
  120 #include <sys/mutex.h>
  121 #include <sys/proc.h>
  122 #include <sys/sf_buf.h>
  123 #include <sys/sx.h>
  124 #include <sys/vmmeter.h>
  125 #include <sys/sched.h>
  126 #include <sys/sysctl.h>
  127 #ifdef SMP
  128 #include <sys/smp.h>
  129 #endif
  130 
  131 #include <vm/vm.h>
  132 #include <vm/vm_param.h>
  133 #include <vm/vm_kern.h>
  134 #include <vm/vm_page.h>
  135 #include <vm/vm_map.h>
  136 #include <vm/vm_object.h>
  137 #include <vm/vm_extern.h>
  138 #include <vm/vm_pageout.h>
  139 #include <vm/vm_pager.h>
  140 #include <vm/vm_reserv.h>
  141 #include <vm/uma.h>
  142 
  143 #ifdef DEV_APIC
  144 #include <sys/bus.h>
  145 #include <machine/intr_machdep.h>
  146 #include <machine/apicvar.h>
  147 #endif
  148 #include <machine/cpu.h>
  149 #include <machine/cputypes.h>
  150 #include <machine/md_var.h>
  151 #include <machine/pcb.h>
  152 #include <machine/specialreg.h>
  153 #ifdef SMP
  154 #include <machine/smp.h>
  155 #endif
  156 
  157 #ifdef XBOX
  158 #include <machine/xbox.h>
  159 #endif
  160 
  161 #if !defined(CPU_DISABLE_SSE) && defined(I686_CPU)
  162 #define CPU_ENABLE_SSE
  163 #endif
  164 
  165 #ifndef PMAP_SHPGPERPROC
  166 #define PMAP_SHPGPERPROC 200
  167 #endif
  168 
  169 #if !defined(DIAGNOSTIC)
  170 #define PMAP_INLINE     __gnu89_inline
  171 #else
  172 #define PMAP_INLINE
  173 #endif
  174 
  175 #ifdef PV_STATS
  176 #define PV_STAT(x)      do { x ; } while (0)
  177 #else
  178 #define PV_STAT(x)      do { } while (0)
  179 #endif
  180 
  181 #define pa_index(pa)    ((pa) >> PDRSHIFT)
  182 #define pa_to_pvh(pa)   (&pv_table[pa_index(pa)])
  183 
  184 /*
  185  * Get PDEs and PTEs for user/kernel address space
  186  */
  187 #define pmap_pde(m, v)  (&((m)->pm_pdir[(vm_offset_t)(v) >> PDRSHIFT]))
  188 #define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDRSHIFT])
  189 
  190 #define pmap_pde_v(pte)         ((*(int *)pte & PG_V) != 0)
  191 #define pmap_pte_w(pte)         ((*(int *)pte & PG_W) != 0)
  192 #define pmap_pte_m(pte)         ((*(int *)pte & PG_M) != 0)
  193 #define pmap_pte_u(pte)         ((*(int *)pte & PG_A) != 0)
  194 #define pmap_pte_v(pte)         ((*(int *)pte & PG_V) != 0)
  195 
  196 #define pmap_pte_set_w(pte, v)  ((v) ? atomic_set_int((u_int *)(pte), PG_W) : \
  197     atomic_clear_int((u_int *)(pte), PG_W))
  198 #define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v)))
  199 
  200 struct pmap kernel_pmap_store;
  201 LIST_HEAD(pmaplist, pmap);
  202 static struct pmaplist allpmaps;
  203 static struct mtx allpmaps_lock;
  204 
  205 vm_offset_t virtual_avail;      /* VA of first avail page (after kernel bss) */
  206 vm_offset_t virtual_end;        /* VA of last avail page (end of kernel AS) */
  207 int pgeflag = 0;                /* PG_G or-in */
  208 int pseflag = 0;                /* PG_PS or-in */
  209 
  210 static int nkpt;
  211 vm_offset_t kernel_vm_end;
  212 extern u_int32_t KERNend;
  213 extern u_int32_t KPTphys;
  214 
  215 #ifdef PAE
  216 pt_entry_t pg_nx;
  217 static uma_zone_t pdptzone;
  218 #endif
  219 
  220 SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters");
  221 
  222 static int pat_works = 1;
  223 SYSCTL_INT(_vm_pmap, OID_AUTO, pat_works, CTLFLAG_RD, &pat_works, 1,
  224     "Is page attribute table fully functional?");
  225 
  226 static int pg_ps_enabled;
  227 SYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLFLAG_RDTUN, &pg_ps_enabled, 0,
  228     "Are large page mappings enabled?");
  229 
  230 #define PAT_INDEX_SIZE  8
  231 static int pat_index[PAT_INDEX_SIZE];   /* cache mode to PAT index conversion */
  232 
  233 /*
  234  * Data for the pv entry allocation mechanism
  235  */
  236 static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
  237 static struct md_page *pv_table;
  238 static int shpgperproc = PMAP_SHPGPERPROC;
  239 
  240 struct pv_chunk *pv_chunkbase;          /* KVA block for pv_chunks */
  241 int pv_maxchunks;                       /* How many chunks we have KVA for */
  242 vm_offset_t pv_vafree;                  /* freelist stored in the PTE */
  243 
  244 /*
  245  * All those kernel PT submaps that BSD is so fond of
  246  */
  247 struct sysmaps {
  248         struct  mtx lock;
  249         pt_entry_t *CMAP1;
  250         pt_entry_t *CMAP2;
  251         caddr_t CADDR1;
  252         caddr_t CADDR2;
  253 };
  254 static struct sysmaps sysmaps_pcpu[MAXCPU];
  255 pt_entry_t *CMAP1 = 0;
  256 static pt_entry_t *CMAP3;
  257 static pd_entry_t *KPTD;
  258 caddr_t CADDR1 = 0, ptvmmap = 0;
  259 static caddr_t CADDR3;
  260 struct msgbuf *msgbufp = 0;
  261 
  262 /*
  263  * Crashdump maps.
  264  */
  265 static caddr_t crashdumpmap;
  266 
  267 static pt_entry_t *PMAP1 = 0, *PMAP2;
  268 static pt_entry_t *PADDR1 = 0, *PADDR2;
  269 #ifdef SMP
  270 static int PMAP1cpu;
  271 static int PMAP1changedcpu;
  272 SYSCTL_INT(_debug, OID_AUTO, PMAP1changedcpu, CTLFLAG_RD, 
  273            &PMAP1changedcpu, 0,
  274            "Number of times pmap_pte_quick changed CPU with same PMAP1");
  275 #endif
  276 static int PMAP1changed;
  277 SYSCTL_INT(_debug, OID_AUTO, PMAP1changed, CTLFLAG_RD, 
  278            &PMAP1changed, 0,
  279            "Number of times pmap_pte_quick changed PMAP1");
  280 static int PMAP1unchanged;
  281 SYSCTL_INT(_debug, OID_AUTO, PMAP1unchanged, CTLFLAG_RD, 
  282            &PMAP1unchanged, 0,
  283            "Number of times pmap_pte_quick didn't change PMAP1");
  284 static struct mtx PMAP2mutex;
  285 
  286 static void     free_pv_entry(pmap_t pmap, pv_entry_t pv);
  287 static pv_entry_t get_pv_entry(pmap_t locked_pmap, int try);
  288 static void     pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa);
  289 static boolean_t pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa);
  290 static void     pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa);
  291 static void     pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
  292 static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
  293                     vm_offset_t va);
  294 static int      pmap_pvh_wired_mappings(struct md_page *pvh, int count);
  295 
  296 static boolean_t pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va);
  297 static boolean_t pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m,
  298     vm_prot_t prot);
  299 static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
  300     vm_page_t m, vm_prot_t prot, vm_page_t mpte);
  301 static void pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte);
  302 static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte);
  303 static boolean_t pmap_is_modified_pvh(struct md_page *pvh);
  304 static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode);
  305 static void pmap_kenter_pde(vm_offset_t va, pd_entry_t newpde);
  306 static vm_page_t pmap_lookup_pt_page(pmap_t pmap, vm_offset_t va);
  307 static void pmap_pde_attr(pd_entry_t *pde, int cache_bits);
  308 static void pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va);
  309 static boolean_t pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva,
  310     vm_prot_t prot);
  311 static void pmap_pte_attr(pt_entry_t *pte, int cache_bits);
  312 static void pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
  313     vm_page_t *free);
  314 static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva,
  315     vm_page_t *free);
  316 static void pmap_remove_pt_page(pmap_t pmap, vm_page_t mpte);
  317 static void pmap_remove_page(struct pmap *pmap, vm_offset_t va,
  318     vm_page_t *free);
  319 static void pmap_remove_entry(struct pmap *pmap, vm_page_t m,
  320                                         vm_offset_t va);
  321 static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m);
  322 static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
  323     vm_page_t m);
  324 static void pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
  325     pd_entry_t newpde);
  326 static void pmap_update_pde_invalidate(vm_offset_t va, pd_entry_t newpde);
  327 
  328 static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags);
  329 
  330 static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags);
  331 static int _pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free);
  332 static pt_entry_t *pmap_pte_quick(pmap_t pmap, vm_offset_t va);
  333 static void pmap_pte_release(pt_entry_t *pte);
  334 static int pmap_unuse_pt(pmap_t, vm_offset_t, vm_page_t *);
  335 static vm_offset_t pmap_kmem_choose(vm_offset_t addr);
  336 #ifdef PAE
  337 static void *pmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait);
  338 #endif
  339 
  340 CTASSERT(1 << PDESHIFT == sizeof(pd_entry_t));
  341 CTASSERT(1 << PTESHIFT == sizeof(pt_entry_t));
  342 
  343 /*
  344  * If you get an error here, then you set KVA_PAGES wrong! See the
  345  * description of KVA_PAGES in sys/i386/include/pmap.h. It must be
  346  * multiple of 4 for a normal kernel, or a multiple of 8 for a PAE.
  347  */
  348 CTASSERT(KERNBASE % (1 << 24) == 0);
  349 
  350 /*
  351  * Move the kernel virtual free pointer to the next
  352  * 4MB.  This is used to help improve performance
  353  * by using a large (4MB) page for much of the kernel
  354  * (.text, .data, .bss)
  355  */
  356 static vm_offset_t
  357 pmap_kmem_choose(vm_offset_t addr)
  358 {
  359         vm_offset_t newaddr = addr;
  360 
  361 #ifndef DISABLE_PSE
  362         if (cpu_feature & CPUID_PSE)
  363                 newaddr = (addr + PDRMASK) & ~PDRMASK;
  364 #endif
  365         return newaddr;
  366 }
  367 
  368 /*
  369  *      Bootstrap the system enough to run with virtual memory.
  370  *
  371  *      On the i386 this is called after mapping has already been enabled
  372  *      and just syncs the pmap module with what has already been done.
  373  *      [We can't call it easily with mapping off since the kernel is not
  374  *      mapped with PA == VA, hence we would have to relocate every address
  375  *      from the linked base (virtual) address "KERNBASE" to the actual
  376  *      (physical) address starting relative to 0]
  377  */
  378 void
  379 pmap_bootstrap(vm_paddr_t firstaddr)
  380 {
  381         vm_offset_t va;
  382         pt_entry_t *pte, *unused;
  383         struct sysmaps *sysmaps;
  384         int i;
  385 
  386         /*
  387          * Initialize the first available kernel virtual address.  However,
  388          * using "firstaddr" may waste a few pages of the kernel virtual
  389          * address space, because locore may not have mapped every physical
  390          * page that it allocated.  Preferably, locore would provide a first
  391          * unused virtual address in addition to "firstaddr".
  392          */
  393         virtual_avail = (vm_offset_t) KERNBASE + firstaddr;
  394         virtual_avail = pmap_kmem_choose(virtual_avail);
  395 
  396         virtual_end = VM_MAX_KERNEL_ADDRESS;
  397 
  398         /*
  399          * Initialize the kernel pmap (which is statically allocated).
  400          */
  401         PMAP_LOCK_INIT(kernel_pmap);
  402         kernel_pmap->pm_pdir = (pd_entry_t *) (KERNBASE + (u_int)IdlePTD);
  403 #ifdef PAE
  404         kernel_pmap->pm_pdpt = (pdpt_entry_t *) (KERNBASE + (u_int)IdlePDPT);
  405 #endif
  406         kernel_pmap->pm_root = NULL;
  407         kernel_pmap->pm_active = -1;    /* don't allow deactivation */
  408         TAILQ_INIT(&kernel_pmap->pm_pvchunk);
  409         LIST_INIT(&allpmaps);
  410 
  411         /*
  412          * Request a spin mutex so that changes to allpmaps cannot be
  413          * preempted by smp_rendezvous_cpus().  Otherwise,
  414          * pmap_update_pde_kernel() could access allpmaps while it is
  415          * being changed.
  416          */
  417         mtx_init(&allpmaps_lock, "allpmaps", NULL, MTX_SPIN);
  418         mtx_lock_spin(&allpmaps_lock);
  419         LIST_INSERT_HEAD(&allpmaps, kernel_pmap, pm_list);
  420         mtx_unlock_spin(&allpmaps_lock);
  421         nkpt = NKPT;
  422 
  423         /*
  424          * Reserve some special page table entries/VA space for temporary
  425          * mapping of pages.
  426          */
  427 #define SYSMAP(c, p, v, n)      \
  428         v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n);
  429 
  430         va = virtual_avail;
  431         pte = vtopte(va);
  432 
  433         /*
  434          * CMAP1/CMAP2 are used for zeroing and copying pages.
  435          * CMAP3 is used for the idle process page zeroing.
  436          */
  437         for (i = 0; i < MAXCPU; i++) {
  438                 sysmaps = &sysmaps_pcpu[i];
  439                 mtx_init(&sysmaps->lock, "SYSMAPS", NULL, MTX_DEF);
  440                 SYSMAP(caddr_t, sysmaps->CMAP1, sysmaps->CADDR1, 1)
  441                 SYSMAP(caddr_t, sysmaps->CMAP2, sysmaps->CADDR2, 1)
  442         }
  443         SYSMAP(caddr_t, CMAP1, CADDR1, 1)
  444         SYSMAP(caddr_t, CMAP3, CADDR3, 1)
  445 
  446         /*
  447          * Crashdump maps.
  448          */
  449         SYSMAP(caddr_t, unused, crashdumpmap, MAXDUMPPGS)
  450 
  451         /*
  452          * ptvmmap is used for reading arbitrary physical pages via /dev/mem.
  453          */
  454         SYSMAP(caddr_t, unused, ptvmmap, 1)
  455 
  456         /*
  457          * msgbufp is used to map the system message buffer.
  458          */
  459         SYSMAP(struct msgbuf *, unused, msgbufp, atop(round_page(msgbufsize)))
  460 
  461         /*
  462          * KPTmap is used by pmap_kextract().
  463          *
  464          * KPTmap is first initialized by locore.  However, that initial
  465          * KPTmap can only support NKPT page table pages.  Here, a larger
  466          * KPTmap is created that can support KVA_PAGES page table pages.
  467          */
  468         SYSMAP(pt_entry_t *, KPTD, KPTmap, KVA_PAGES)
  469 
  470         for (i = 0; i < NKPT; i++)
  471                 KPTD[i] = (KPTphys + (i << PAGE_SHIFT)) | pgeflag | PG_RW | PG_V;
  472 
  473         /*
  474          * Adjust the start of the KPTD and KPTmap so that the implementation
  475          * of pmap_kextract() and pmap_growkernel() can be made simpler.
  476          */
  477         KPTD -= KPTDI;
  478         KPTmap -= i386_btop(KPTDI << PDRSHIFT);
  479 
  480         /*
  481          * ptemap is used for pmap_pte_quick
  482          */
  483         SYSMAP(pt_entry_t *, PMAP1, PADDR1, 1)
  484         SYSMAP(pt_entry_t *, PMAP2, PADDR2, 1)
  485 
  486         mtx_init(&PMAP2mutex, "PMAP2", NULL, MTX_DEF);
  487 
  488         virtual_avail = va;
  489 
  490         /*
  491          * Leave in place an identity mapping (virt == phys) for the low 1 MB
  492          * physical memory region that is used by the ACPI wakeup code.  This
  493          * mapping must not have PG_G set. 
  494          */
  495 #ifdef XBOX
  496         /* FIXME: This is gross, but needed for the XBOX. Since we are in such
  497          * an early stadium, we cannot yet neatly map video memory ... :-(
  498          * Better fixes are very welcome! */
  499         if (!arch_i386_is_xbox)
  500 #endif
  501         for (i = 1; i < NKPT; i++)
  502                 PTD[i] = 0;
  503 
  504         /* Initialize the PAT MSR if present. */
  505         pmap_init_pat();
  506 
  507         /* Turn on PG_G on kernel page(s) */
  508         pmap_set_pg();
  509 }
  510 
  511 /*
  512  * Setup the PAT MSR.
  513  */
  514 void
  515 pmap_init_pat(void)
  516 {
  517         int pat_table[PAT_INDEX_SIZE];
  518         uint64_t pat_msr;
  519         u_long cr0, cr4;
  520         int i;
  521 
  522         /* Set default PAT index table. */
  523         for (i = 0; i < PAT_INDEX_SIZE; i++)
  524                 pat_table[i] = -1;
  525         pat_table[PAT_WRITE_BACK] = 0;
  526         pat_table[PAT_WRITE_THROUGH] = 1;
  527         pat_table[PAT_UNCACHEABLE] = 3;
  528         pat_table[PAT_WRITE_COMBINING] = 3;
  529         pat_table[PAT_WRITE_PROTECTED] = 3;
  530         pat_table[PAT_UNCACHED] = 3;
  531 
  532         /* Bail if this CPU doesn't implement PAT. */
  533         if ((cpu_feature & CPUID_PAT) == 0) {
  534                 for (i = 0; i < PAT_INDEX_SIZE; i++)
  535                         pat_index[i] = pat_table[i];
  536                 pat_works = 0;
  537                 return;
  538         }
  539 
  540         /*
  541          * Due to some Intel errata, we can only safely use the lower 4
  542          * PAT entries.
  543          *
  544          *   Intel Pentium III Processor Specification Update
  545          * Errata E.27 (Upper Four PAT Entries Not Usable With Mode B
  546          * or Mode C Paging)
  547          *
  548          *   Intel Pentium IV  Processor Specification Update
  549          * Errata N46 (PAT Index MSB May Be Calculated Incorrectly)
  550          */
  551         if (cpu_vendor_id == CPU_VENDOR_INTEL &&
  552             !(CPUID_TO_FAMILY(cpu_id) == 6 && CPUID_TO_MODEL(cpu_id) >= 0xe))
  553                 pat_works = 0;
  554 
  555         /* Initialize default PAT entries. */
  556         pat_msr = PAT_VALUE(0, PAT_WRITE_BACK) |
  557             PAT_VALUE(1, PAT_WRITE_THROUGH) |
  558             PAT_VALUE(2, PAT_UNCACHED) |
  559             PAT_VALUE(3, PAT_UNCACHEABLE) |
  560             PAT_VALUE(4, PAT_WRITE_BACK) |
  561             PAT_VALUE(5, PAT_WRITE_THROUGH) |
  562             PAT_VALUE(6, PAT_UNCACHED) |
  563             PAT_VALUE(7, PAT_UNCACHEABLE);
  564 
  565         if (pat_works) {
  566                 /*
  567                  * Leave the indices 0-3 at the default of WB, WT, UC-, and UC.
  568                  * Program 5 and 6 as WP and WC.
  569                  * Leave 4 and 7 as WB and UC.
  570                  */
  571                 pat_msr &= ~(PAT_MASK(5) | PAT_MASK(6));
  572                 pat_msr |= PAT_VALUE(5, PAT_WRITE_PROTECTED) |
  573                     PAT_VALUE(6, PAT_WRITE_COMBINING);
  574                 pat_table[PAT_UNCACHED] = 2;
  575                 pat_table[PAT_WRITE_PROTECTED] = 5;
  576                 pat_table[PAT_WRITE_COMBINING] = 6;
  577         } else {
  578                 /*
  579                  * Just replace PAT Index 2 with WC instead of UC-.
  580                  */
  581                 pat_msr &= ~PAT_MASK(2);
  582                 pat_msr |= PAT_VALUE(2, PAT_WRITE_COMBINING);
  583                 pat_table[PAT_WRITE_COMBINING] = 2;
  584         }
  585 
  586         /* Disable PGE. */
  587         cr4 = rcr4();
  588         load_cr4(cr4 & ~CR4_PGE);
  589 
  590         /* Disable caches (CD = 1, NW = 0). */
  591         cr0 = rcr0();
  592         load_cr0((cr0 & ~CR0_NW) | CR0_CD);
  593 
  594         /* Flushes caches and TLBs. */
  595         wbinvd();
  596         invltlb();
  597 
  598         /* Update PAT and index table. */
  599         wrmsr(MSR_PAT, pat_msr);
  600         for (i = 0; i < PAT_INDEX_SIZE; i++)
  601                 pat_index[i] = pat_table[i];
  602 
  603         /* Flush caches and TLBs again. */
  604         wbinvd();
  605         invltlb();
  606 
  607         /* Restore caches and PGE. */
  608         load_cr0(cr0);
  609         load_cr4(cr4);
  610 }
  611 
  612 /*
  613  * Set PG_G on kernel pages.  Only the BSP calls this when SMP is turned on.
  614  */
  615 void
  616 pmap_set_pg(void)
  617 {
  618         pt_entry_t *pte;
  619         vm_offset_t va, endva;
  620 
  621         if (pgeflag == 0)
  622                 return;
  623 
  624         endva = KERNBASE + KERNend;
  625 
  626         if (pseflag) {
  627                 va = KERNBASE + KERNLOAD;
  628                 while (va  < endva) {
  629                         pdir_pde(PTD, va) |= pgeflag;
  630                         invltlb();      /* Play it safe, invltlb() every time */
  631                         va += NBPDR;
  632                 }
  633         } else {
  634                 va = (vm_offset_t)btext;
  635                 while (va < endva) {
  636                         pte = vtopte(va);
  637                         if (*pte)
  638                                 *pte |= pgeflag;
  639                         invltlb();      /* Play it safe, invltlb() every time */
  640                         va += PAGE_SIZE;
  641                 }
  642         }
  643 }
  644 
  645 /*
  646  * Initialize a vm_page's machine-dependent fields.
  647  */
  648 void
  649 pmap_page_init(vm_page_t m)
  650 {
  651 
  652         TAILQ_INIT(&m->md.pv_list);
  653         m->md.pat_mode = PAT_WRITE_BACK;
  654 }
  655 
  656 #ifdef PAE
  657 static void *
  658 pmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
  659 {
  660 
  661         /* Inform UMA that this allocator uses kernel_map/object. */
  662         *flags = UMA_SLAB_KERNEL;
  663         return ((void *)kmem_alloc_contig(kernel_map, bytes, wait, 0x0ULL,
  664             0xffffffffULL, 1, 0, VM_MEMATTR_DEFAULT));
  665 }
  666 #endif
  667 
  668 /*
  669  * ABuse the pte nodes for unmapped kva to thread a kva freelist through.
  670  * Requirements:
  671  *  - Must deal with pages in order to ensure that none of the PG_* bits
  672  *    are ever set, PG_V in particular.
  673  *  - Assumes we can write to ptes without pte_store() atomic ops, even
  674  *    on PAE systems.  This should be ok.
  675  *  - Assumes nothing will ever test these addresses for 0 to indicate
  676  *    no mapping instead of correctly checking PG_V.
  677  *  - Assumes a vm_offset_t will fit in a pte (true for i386).
  678  * Because PG_V is never set, there can be no mappings to invalidate.
  679  */
  680 static vm_offset_t
  681 pmap_ptelist_alloc(vm_offset_t *head)
  682 {
  683         pt_entry_t *pte;
  684         vm_offset_t va;
  685 
  686         va = *head;
  687         if (va == 0)
  688                 return (va);    /* Out of memory */
  689         pte = vtopte(va);
  690         *head = *pte;
  691         if (*head & PG_V)
  692                 panic("pmap_ptelist_alloc: va with PG_V set!");
  693         *pte = 0;
  694         return (va);
  695 }
  696 
  697 static void
  698 pmap_ptelist_free(vm_offset_t *head, vm_offset_t va)
  699 {
  700         pt_entry_t *pte;
  701 
  702         if (va & PG_V)
  703                 panic("pmap_ptelist_free: freeing va with PG_V set!");
  704         pte = vtopte(va);
  705         *pte = *head;           /* virtual! PG_V is 0 though */
  706         *head = va;
  707 }
  708 
  709 static void
  710 pmap_ptelist_init(vm_offset_t *head, void *base, int npages)
  711 {
  712         int i;
  713         vm_offset_t va;
  714 
  715         *head = 0;
  716         for (i = npages - 1; i >= 0; i--) {
  717                 va = (vm_offset_t)base + i * PAGE_SIZE;
  718                 pmap_ptelist_free(head, va);
  719         }
  720 }
  721 
  722 
  723 /*
  724  *      Initialize the pmap module.
  725  *      Called by vm_init, to initialize any structures that the pmap
  726  *      system needs to map virtual memory.
  727  */
  728 void
  729 pmap_init(void)
  730 {
  731         vm_page_t mpte;
  732         vm_size_t s;
  733         int i, pv_npg;
  734 
  735         /*
  736          * Initialize the vm page array entries for the kernel pmap's
  737          * page table pages.
  738          */ 
  739         for (i = 0; i < NKPT; i++) {
  740                 mpte = PHYS_TO_VM_PAGE(KPTphys + (i << PAGE_SHIFT));
  741                 KASSERT(mpte >= vm_page_array &&
  742                     mpte < &vm_page_array[vm_page_array_size],
  743                     ("pmap_init: page table page is out of range"));
  744                 mpte->pindex = i + KPTDI;
  745                 mpte->phys_addr = KPTphys + (i << PAGE_SHIFT);
  746         }
  747 
  748         /*
  749          * Initialize the address space (zone) for the pv entries.  Set a
  750          * high water mark so that the system can recover from excessive
  751          * numbers of pv entries.
  752          */
  753         TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
  754         pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
  755         TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
  756         pv_entry_max = roundup(pv_entry_max, _NPCPV);
  757         pv_entry_high_water = 9 * (pv_entry_max / 10);
  758 
  759         /*
  760          * If the kernel is running on a virtual machine, then it must assume
  761          * that MCA is enabled by the hypervisor.  Moreover, the kernel must
  762          * be prepared for the hypervisor changing the vendor and family that
  763          * are reported by CPUID.  Consequently, the workaround for AMD Family
  764          * 10h Erratum 383 is enabled if the processor's feature set does not
  765          * include at least one feature that is only supported by older Intel
  766          * or newer AMD processors.
  767          */
  768         if (vm_guest == VM_GUEST_VM && (cpu_feature & CPUID_SS) == 0 &&
  769             (cpu_feature2 & (CPUID2_SSSE3 | CPUID2_SSE41 | CPUID2_AESNI |
  770             CPUID2_AVX | CPUID2_XSAVE)) == 0 && (amd_feature2 & (AMDID2_XOP |
  771             AMDID2_FMA4)) == 0)
  772                 workaround_erratum383 = 1;
  773 
  774         /*
  775          * Are large page mappings supported and enabled?
  776          */
  777         TUNABLE_INT_FETCH("vm.pmap.pg_ps_enabled", &pg_ps_enabled);
  778         if (pseflag == 0)
  779                 pg_ps_enabled = 0;
  780         else if (pg_ps_enabled) {
  781                 KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0,
  782                     ("pmap_init: can't assign to pagesizes[1]"));
  783                 pagesizes[1] = NBPDR;
  784         }
  785 
  786         /*
  787          * Calculate the size of the pv head table for superpages.
  788          */
  789         for (i = 0; phys_avail[i + 1]; i += 2);
  790         pv_npg = round_4mpage(phys_avail[(i - 2) + 1]) / NBPDR;
  791 
  792         /*
  793          * Allocate memory for the pv head table for superpages.
  794          */
  795         s = (vm_size_t)(pv_npg * sizeof(struct md_page));
  796         s = round_page(s);
  797         pv_table = (struct md_page *)kmem_alloc(kernel_map, s);
  798         for (i = 0; i < pv_npg; i++)
  799                 TAILQ_INIT(&pv_table[i].pv_list);
  800 
  801         pv_maxchunks = MAX(pv_entry_max / _NPCPV, maxproc);
  802         pv_chunkbase = (struct pv_chunk *)kmem_alloc_nofault(kernel_map,
  803             PAGE_SIZE * pv_maxchunks);
  804         if (pv_chunkbase == NULL)
  805                 panic("pmap_init: not enough kvm for pv chunks");
  806         pmap_ptelist_init(&pv_vafree, pv_chunkbase, pv_maxchunks);
  807 #ifdef PAE
  808         pdptzone = uma_zcreate("PDPT", NPGPTD * sizeof(pdpt_entry_t), NULL,
  809             NULL, NULL, NULL, (NPGPTD * sizeof(pdpt_entry_t)) - 1,
  810             UMA_ZONE_VM | UMA_ZONE_NOFREE);
  811         uma_zone_set_allocf(pdptzone, pmap_pdpt_allocf);
  812 #endif
  813 }
  814 
  815 
  816 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_max, CTLFLAG_RD, &pv_entry_max, 0,
  817         "Max number of PV entries");
  818 SYSCTL_INT(_vm_pmap, OID_AUTO, shpgperproc, CTLFLAG_RD, &shpgperproc, 0,
  819         "Page share factor per proc");
  820 
  821 SYSCTL_NODE(_vm_pmap, OID_AUTO, pde, CTLFLAG_RD, 0,
  822     "2/4MB page mapping counters");
  823 
  824 static u_long pmap_pde_demotions;
  825 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, demotions, CTLFLAG_RD,
  826     &pmap_pde_demotions, 0, "2/4MB page demotions");
  827 
  828 static u_long pmap_pde_mappings;
  829 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, mappings, CTLFLAG_RD,
  830     &pmap_pde_mappings, 0, "2/4MB page mappings");
  831 
  832 static u_long pmap_pde_p_failures;
  833 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, p_failures, CTLFLAG_RD,
  834     &pmap_pde_p_failures, 0, "2/4MB page promotion failures");
  835 
  836 static u_long pmap_pde_promotions;
  837 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, promotions, CTLFLAG_RD,
  838     &pmap_pde_promotions, 0, "2/4MB page promotions");
  839 
  840 /***************************************************
  841  * Low level helper routines.....
  842  ***************************************************/
  843 
  844 /*
  845  * Determine the appropriate bits to set in a PTE or PDE for a specified
  846  * caching mode.
  847  */
  848 int
  849 pmap_cache_bits(int mode, boolean_t is_pde)
  850 {
  851         int cache_bits, pat_flag, pat_idx;
  852 
  853         if (mode < 0 || mode >= PAT_INDEX_SIZE || pat_index[mode] < 0)
  854                 panic("Unknown caching mode %d\n", mode);
  855 
  856         /* The PAT bit is different for PTE's and PDE's. */
  857         pat_flag = is_pde ? PG_PDE_PAT : PG_PTE_PAT;
  858 
  859         /* Map the caching mode to a PAT index. */
  860         pat_idx = pat_index[mode];
  861 
  862         /* Map the 3-bit index value into the PAT, PCD, and PWT bits. */
  863         cache_bits = 0;
  864         if (pat_idx & 0x4)
  865                 cache_bits |= pat_flag;
  866         if (pat_idx & 0x2)
  867                 cache_bits |= PG_NC_PCD;
  868         if (pat_idx & 0x1)
  869                 cache_bits |= PG_NC_PWT;
  870         return (cache_bits);
  871 }
  872 
  873 /*
  874  * The caller is responsible for maintaining TLB consistency.
  875  */
  876 static void
  877 pmap_kenter_pde(vm_offset_t va, pd_entry_t newpde)
  878 {
  879         pd_entry_t *pde;
  880         pmap_t pmap;
  881         boolean_t PTD_updated;
  882 
  883         PTD_updated = FALSE;
  884         mtx_lock_spin(&allpmaps_lock);
  885         LIST_FOREACH(pmap, &allpmaps, pm_list) {
  886                 if ((pmap->pm_pdir[PTDPTDI] & PG_FRAME) == (PTDpde[0] &
  887                     PG_FRAME))
  888                         PTD_updated = TRUE;
  889                 pde = pmap_pde(pmap, va);
  890                 pde_store(pde, newpde);
  891         }
  892         mtx_unlock_spin(&allpmaps_lock);
  893         KASSERT(PTD_updated,
  894             ("pmap_kenter_pde: current page table is not in allpmaps"));
  895 }
  896 
  897 /*
  898  * After changing the page size for the specified virtual address in the page
  899  * table, flush the corresponding entries from the processor's TLB.  Only the
  900  * calling processor's TLB is affected.
  901  *
  902  * The calling thread must be pinned to a processor.
  903  */
  904 static void
  905 pmap_update_pde_invalidate(vm_offset_t va, pd_entry_t newpde)
  906 {
  907         u_long cr4;
  908 
  909         if ((newpde & PG_PS) == 0)
  910                 /* Demotion: flush a specific 2MB page mapping. */
  911                 invlpg(va);
  912         else if ((newpde & PG_G) == 0)
  913                 /*
  914                  * Promotion: flush every 4KB page mapping from the TLB
  915                  * because there are too many to flush individually.
  916                  */
  917                 invltlb();
  918         else {
  919                 /*
  920                  * Promotion: flush every 4KB page mapping from the TLB,
  921                  * including any global (PG_G) mappings.
  922                  */
  923                 cr4 = rcr4();
  924                 load_cr4(cr4 & ~CR4_PGE);
  925                 /*
  926                  * Although preemption at this point could be detrimental to
  927                  * performance, it would not lead to an error.  PG_G is simply
  928                  * ignored if CR4.PGE is clear.  Moreover, in case this block
  929                  * is re-entered, the load_cr4() either above or below will
  930                  * modify CR4.PGE flushing the TLB.
  931                  */
  932                 load_cr4(cr4 | CR4_PGE);
  933         }
  934 }
  935 #ifdef SMP
  936 /*
  937  * For SMP, these functions have to use the IPI mechanism for coherence.
  938  *
  939  * N.B.: Before calling any of the following TLB invalidation functions,
  940  * the calling processor must ensure that all stores updating a non-
  941  * kernel page table are globally performed.  Otherwise, another
  942  * processor could cache an old, pre-update entry without being
  943  * invalidated.  This can happen one of two ways: (1) The pmap becomes
  944  * active on another processor after its pm_active field is checked by
  945  * one of the following functions but before a store updating the page
  946  * table is globally performed. (2) The pmap becomes active on another
  947  * processor before its pm_active field is checked but due to
  948  * speculative loads one of the following functions stills reads the
  949  * pmap as inactive on the other processor.
  950  * 
  951  * The kernel page table is exempt because its pm_active field is
  952  * immutable.  The kernel page table is always active on every
  953  * processor.
  954  */
  955 void
  956 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
  957 {
  958         cpumask_t cpumask, other_cpus;
  959 
  960         sched_pin();
  961         if (pmap == kernel_pmap || pmap->pm_active == all_cpus) {
  962                 invlpg(va);
  963                 smp_invlpg(va);
  964         } else {
  965                 cpumask = PCPU_GET(cpumask);
  966                 other_cpus = PCPU_GET(other_cpus);
  967                 if (pmap->pm_active & cpumask)
  968                         invlpg(va);
  969                 if (pmap->pm_active & other_cpus)
  970                         smp_masked_invlpg(pmap->pm_active & other_cpus, va);
  971         }
  972         sched_unpin();
  973 }
  974 
  975 void
  976 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
  977 {
  978         cpumask_t cpumask, other_cpus;
  979         vm_offset_t addr;
  980 
  981         sched_pin();
  982         if (pmap == kernel_pmap || pmap->pm_active == all_cpus) {
  983                 for (addr = sva; addr < eva; addr += PAGE_SIZE)
  984                         invlpg(addr);
  985                 smp_invlpg_range(sva, eva);
  986         } else {
  987                 cpumask = PCPU_GET(cpumask);
  988                 other_cpus = PCPU_GET(other_cpus);
  989                 if (pmap->pm_active & cpumask)
  990                         for (addr = sva; addr < eva; addr += PAGE_SIZE)
  991                                 invlpg(addr);
  992                 if (pmap->pm_active & other_cpus)
  993                         smp_masked_invlpg_range(pmap->pm_active & other_cpus,
  994                             sva, eva);
  995         }
  996         sched_unpin();
  997 }
  998 
  999 void
 1000 pmap_invalidate_all(pmap_t pmap)
 1001 {
 1002         cpumask_t cpumask, other_cpus;
 1003 
 1004         sched_pin();
 1005         if (pmap == kernel_pmap || pmap->pm_active == all_cpus) {
 1006                 invltlb();
 1007                 smp_invltlb();
 1008         } else {
 1009                 cpumask = PCPU_GET(cpumask);
 1010                 other_cpus = PCPU_GET(other_cpus);
 1011                 if (pmap->pm_active & cpumask)
 1012                         invltlb();
 1013                 if (pmap->pm_active & other_cpus)
 1014                         smp_masked_invltlb(pmap->pm_active & other_cpus);
 1015         }
 1016         sched_unpin();
 1017 }
 1018 
 1019 void
 1020 pmap_invalidate_cache(void)
 1021 {
 1022 
 1023         sched_pin();
 1024         wbinvd();
 1025         smp_cache_flush();
 1026         sched_unpin();
 1027 }
 1028 
 1029 struct pde_action {
 1030         cpumask_t store;        /* processor that updates the PDE */
 1031         cpumask_t invalidate;   /* processors that invalidate their TLB */
 1032         vm_offset_t va;
 1033         pd_entry_t *pde;
 1034         pd_entry_t newpde;
 1035 };
 1036 
 1037 static void
 1038 pmap_update_pde_kernel(void *arg)
 1039 {
 1040         struct pde_action *act = arg;
 1041         pd_entry_t *pde;
 1042         pmap_t pmap;
 1043 
 1044         if (act->store == PCPU_GET(cpumask))
 1045                 /*
 1046                  * Elsewhere, this operation requires allpmaps_lock for
 1047                  * synchronization.  Here, it does not because it is being
 1048                  * performed in the context of an all_cpus rendezvous.
 1049                  */
 1050                 LIST_FOREACH(pmap, &allpmaps, pm_list) {
 1051                         pde = pmap_pde(pmap, act->va);
 1052                         pde_store(pde, act->newpde);
 1053                 }
 1054 }
 1055 
 1056 static void
 1057 pmap_update_pde_user(void *arg)
 1058 {
 1059         struct pde_action *act = arg;
 1060 
 1061         if (act->store == PCPU_GET(cpumask))
 1062                 pde_store(act->pde, act->newpde);
 1063 }
 1064 
 1065 static void
 1066 pmap_update_pde_teardown(void *arg)
 1067 {
 1068         struct pde_action *act = arg;
 1069 
 1070         if ((act->invalidate & PCPU_GET(cpumask)) != 0)
 1071                 pmap_update_pde_invalidate(act->va, act->newpde);
 1072 }
 1073 
 1074 /*
 1075  * Change the page size for the specified virtual address in a way that
 1076  * prevents any possibility of the TLB ever having two entries that map the
 1077  * same virtual address using different page sizes.  This is the recommended
 1078  * workaround for Erratum 383 on AMD Family 10h processors.  It prevents a
 1079  * machine check exception for a TLB state that is improperly diagnosed as a
 1080  * hardware error.
 1081  */
 1082 static void
 1083 pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde)
 1084 {
 1085         struct pde_action act;
 1086         cpumask_t active, cpumask;
 1087 
 1088         sched_pin();
 1089         cpumask = PCPU_GET(cpumask);
 1090         if (pmap == kernel_pmap)
 1091                 active = all_cpus;
 1092         else
 1093                 active = pmap->pm_active;
 1094         if ((active & PCPU_GET(other_cpus)) != 0) {
 1095                 act.store = cpumask;
 1096                 act.invalidate = active;
 1097                 act.va = va;
 1098                 act.pde = pde;
 1099                 act.newpde = newpde;
 1100                 smp_rendezvous_cpus(cpumask | active,
 1101                     smp_no_rendevous_barrier, pmap == kernel_pmap ?
 1102                     pmap_update_pde_kernel : pmap_update_pde_user,
 1103                     pmap_update_pde_teardown, &act);
 1104         } else {
 1105                 if (pmap == kernel_pmap)
 1106                         pmap_kenter_pde(va, newpde);
 1107                 else
 1108                         pde_store(pde, newpde);
 1109                 if ((active & cpumask) != 0)
 1110                         pmap_update_pde_invalidate(va, newpde);
 1111         }
 1112         sched_unpin();
 1113 }
 1114 #else /* !SMP */
 1115 /*
 1116  * Normal, non-SMP, 486+ invalidation functions.
 1117  * We inline these within pmap.c for speed.
 1118  */
 1119 PMAP_INLINE void
 1120 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
 1121 {
 1122 
 1123         if (pmap == kernel_pmap || pmap->pm_active)
 1124                 invlpg(va);
 1125 }
 1126 
 1127 PMAP_INLINE void
 1128 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
 1129 {
 1130         vm_offset_t addr;
 1131 
 1132         if (pmap == kernel_pmap || pmap->pm_active)
 1133                 for (addr = sva; addr < eva; addr += PAGE_SIZE)
 1134                         invlpg(addr);
 1135 }
 1136 
 1137 PMAP_INLINE void
 1138 pmap_invalidate_all(pmap_t pmap)
 1139 {
 1140 
 1141         if (pmap == kernel_pmap || pmap->pm_active)
 1142                 invltlb();
 1143 }
 1144 
 1145 PMAP_INLINE void
 1146 pmap_invalidate_cache(void)
 1147 {
 1148 
 1149         wbinvd();
 1150 }
 1151 
 1152 static void
 1153 pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde)
 1154 {
 1155 
 1156         if (pmap == kernel_pmap)
 1157                 pmap_kenter_pde(va, newpde);
 1158         else
 1159                 pde_store(pde, newpde);
 1160         if (pmap == kernel_pmap || pmap->pm_active)
 1161                 pmap_update_pde_invalidate(va, newpde);
 1162 }
 1163 #endif /* !SMP */
 1164 
 1165 void
 1166 pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva)
 1167 {
 1168 
 1169         KASSERT((sva & PAGE_MASK) == 0,
 1170             ("pmap_invalidate_cache_range: sva not page-aligned"));
 1171         KASSERT((eva & PAGE_MASK) == 0,
 1172             ("pmap_invalidate_cache_range: eva not page-aligned"));
 1173 
 1174         if (cpu_feature & CPUID_SS)
 1175                 ; /* If "Self Snoop" is supported, do nothing. */
 1176         else if ((cpu_feature & CPUID_CLFSH) != 0 &&
 1177                  eva - sva < 2 * 1024 * 1024) {
 1178 
 1179 #ifdef DEV_APIC
 1180                 /*
 1181                  * XXX: Some CPUs fault, hang, or trash the local APIC
 1182                  * registers if we use CLFLUSH on the local APIC
 1183                  * range.  The local APIC is always uncached, so we
 1184                  * don't need to flush for that range anyway.
 1185                  */
 1186                 if (pmap_kextract(sva) == lapic_paddr)
 1187                         return;
 1188 #endif
 1189                 /*
 1190                  * Otherwise, do per-cache line flush.  Use the mfence
 1191                  * instruction to insure that previous stores are
 1192                  * included in the write-back.  The processor
 1193                  * propagates flush to other processors in the cache
 1194                  * coherence domain.
 1195                  */
 1196                 mfence();
 1197                 for (; sva < eva; sva += cpu_clflush_line_size)
 1198                         clflush(sva);
 1199                 mfence();
 1200         } else {
 1201 
 1202                 /*
 1203                  * No targeted cache flush methods are supported by CPU,
 1204                  * or the supplied range is bigger than 2MB.
 1205                  * Globally invalidate cache.
 1206                  */
 1207                 pmap_invalidate_cache();
 1208         }
 1209 }
 1210 
 1211 /*
 1212  * Are we current address space or kernel?  N.B. We return FALSE when
 1213  * a pmap's page table is in use because a kernel thread is borrowing
 1214  * it.  The borrowed page table can change spontaneously, making any
 1215  * dependence on its continued use subject to a race condition.
 1216  */
 1217 static __inline int
 1218 pmap_is_current(pmap_t pmap)
 1219 {
 1220 
 1221         return (pmap == kernel_pmap ||
 1222                 (pmap == vmspace_pmap(curthread->td_proc->p_vmspace) &&
 1223             (pmap->pm_pdir[PTDPTDI] & PG_FRAME) == (PTDpde[0] & PG_FRAME)));
 1224 }
 1225 
 1226 /*
 1227  * If the given pmap is not the current or kernel pmap, the returned pte must
 1228  * be released by passing it to pmap_pte_release().
 1229  */
 1230 pt_entry_t *
 1231 pmap_pte(pmap_t pmap, vm_offset_t va)
 1232 {
 1233         pd_entry_t newpf;
 1234         pd_entry_t *pde;
 1235 
 1236         pde = pmap_pde(pmap, va);
 1237         if (*pde & PG_PS)
 1238                 return (pde);
 1239         if (*pde != 0) {
 1240                 /* are we current address space or kernel? */
 1241                 if (pmap_is_current(pmap))
 1242                         return (vtopte(va));
 1243                 mtx_lock(&PMAP2mutex);
 1244                 newpf = *pde & PG_FRAME;
 1245                 if ((*PMAP2 & PG_FRAME) != newpf) {
 1246                         *PMAP2 = newpf | PG_RW | PG_V | PG_A | PG_M;
 1247                         pmap_invalidate_page(kernel_pmap, (vm_offset_t)PADDR2);
 1248                 }
 1249                 return (PADDR2 + (i386_btop(va) & (NPTEPG - 1)));
 1250         }
 1251         return (0);
 1252 }
 1253 
 1254 /*
 1255  * Releases a pte that was obtained from pmap_pte().  Be prepared for the pte
 1256  * being NULL.
 1257  */
 1258 static __inline void
 1259 pmap_pte_release(pt_entry_t *pte)
 1260 {
 1261 
 1262         if ((pt_entry_t *)((vm_offset_t)pte & ~PAGE_MASK) == PADDR2)
 1263                 mtx_unlock(&PMAP2mutex);
 1264 }
 1265 
 1266 /*
 1267  * NB:  The sequence of updating a page table followed by accesses to the
 1268  * corresponding pages is subject to the situation described in the "AMD64
 1269  * Architecture Programmer's Manual Volume 2: System Programming" rev. 3.23,
 1270  * "7.3.1 Special Coherency Considerations".  Therefore, issuing the INVLPG
 1271  * right after modifying the PTE bits is crucial.
 1272  */
 1273 static __inline void
 1274 invlcaddr(void *caddr)
 1275 {
 1276 
 1277         invlpg((u_int)caddr);
 1278 }
 1279 
 1280 /*
 1281  * Super fast pmap_pte routine best used when scanning
 1282  * the pv lists.  This eliminates many coarse-grained
 1283  * invltlb calls.  Note that many of the pv list
 1284  * scans are across different pmaps.  It is very wasteful
 1285  * to do an entire invltlb for checking a single mapping.
 1286  *
 1287  * If the given pmap is not the current pmap, vm_page_queue_mtx
 1288  * must be held and curthread pinned to a CPU.
 1289  */
 1290 static pt_entry_t *
 1291 pmap_pte_quick(pmap_t pmap, vm_offset_t va)
 1292 {
 1293         pd_entry_t newpf;
 1294         pd_entry_t *pde;
 1295 
 1296         pde = pmap_pde(pmap, va);
 1297         if (*pde & PG_PS)
 1298                 return (pde);
 1299         if (*pde != 0) {
 1300                 /* are we current address space or kernel? */
 1301                 if (pmap_is_current(pmap))
 1302                         return (vtopte(va));
 1303                 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1304                 KASSERT(curthread->td_pinned > 0, ("curthread not pinned"));
 1305                 newpf = *pde & PG_FRAME;
 1306                 if ((*PMAP1 & PG_FRAME) != newpf) {
 1307                         *PMAP1 = newpf | PG_RW | PG_V | PG_A | PG_M;
 1308 #ifdef SMP
 1309                         PMAP1cpu = PCPU_GET(cpuid);
 1310 #endif
 1311                         invlcaddr(PADDR1);
 1312                         PMAP1changed++;
 1313                 } else
 1314 #ifdef SMP
 1315                 if (PMAP1cpu != PCPU_GET(cpuid)) {
 1316                         PMAP1cpu = PCPU_GET(cpuid);
 1317                         invlcaddr(PADDR1);
 1318                         PMAP1changedcpu++;
 1319                 } else
 1320 #endif
 1321                         PMAP1unchanged++;
 1322                 return (PADDR1 + (i386_btop(va) & (NPTEPG - 1)));
 1323         }
 1324         return (0);
 1325 }
 1326 
 1327 /*
 1328  *      Routine:        pmap_extract
 1329  *      Function:
 1330  *              Extract the physical page address associated
 1331  *              with the given map/virtual_address pair.
 1332  */
 1333 vm_paddr_t 
 1334 pmap_extract(pmap_t pmap, vm_offset_t va)
 1335 {
 1336         vm_paddr_t rtval;
 1337         pt_entry_t *pte;
 1338         pd_entry_t pde;
 1339 
 1340         rtval = 0;
 1341         PMAP_LOCK(pmap);
 1342         pde = pmap->pm_pdir[va >> PDRSHIFT];
 1343         if (pde != 0) {
 1344                 if ((pde & PG_PS) != 0)
 1345                         rtval = (pde & PG_PS_FRAME) | (va & PDRMASK);
 1346                 else {
 1347                         pte = pmap_pte(pmap, va);
 1348                         rtval = (*pte & PG_FRAME) | (va & PAGE_MASK);
 1349                         pmap_pte_release(pte);
 1350                 }
 1351         }
 1352         PMAP_UNLOCK(pmap);
 1353         return (rtval);
 1354 }
 1355 
 1356 /*
 1357  *      Routine:        pmap_extract_and_hold
 1358  *      Function:
 1359  *              Atomically extract and hold the physical page
 1360  *              with the given pmap and virtual address pair
 1361  *              if that mapping permits the given protection.
 1362  */
 1363 vm_page_t
 1364 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
 1365 {
 1366         pd_entry_t pde;
 1367         pt_entry_t pte;
 1368         vm_page_t m;
 1369 
 1370         m = NULL;
 1371         vm_page_lock_queues();
 1372         PMAP_LOCK(pmap);
 1373         pde = *pmap_pde(pmap, va);
 1374         if (pde != 0) {
 1375                 if (pde & PG_PS) {
 1376                         if ((pde & PG_RW) || (prot & VM_PROT_WRITE) == 0) {
 1377                                 m = PHYS_TO_VM_PAGE((pde & PG_PS_FRAME) |
 1378                                     (va & PDRMASK));
 1379                                 vm_page_hold(m);
 1380                         }
 1381                 } else {
 1382                         sched_pin();
 1383                         pte = *pmap_pte_quick(pmap, va);
 1384                         if (pte != 0 &&
 1385                             ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0)) {
 1386                                 m = PHYS_TO_VM_PAGE(pte & PG_FRAME);
 1387                                 vm_page_hold(m);
 1388                         }
 1389                         sched_unpin();
 1390                 }
 1391         }
 1392         vm_page_unlock_queues();
 1393         PMAP_UNLOCK(pmap);
 1394         return (m);
 1395 }
 1396 
 1397 /***************************************************
 1398  * Low level mapping routines.....
 1399  ***************************************************/
 1400 
 1401 /*
 1402  * Add a wired page to the kva.
 1403  * Note: not SMP coherent.
 1404  *
 1405  * This function may be used before pmap_bootstrap() is called.
 1406  */
 1407 PMAP_INLINE void 
 1408 pmap_kenter(vm_offset_t va, vm_paddr_t pa)
 1409 {
 1410         pt_entry_t *pte;
 1411 
 1412         pte = vtopte(va);
 1413         pte_store(pte, pa | PG_RW | PG_V | pgeflag);
 1414 }
 1415 
 1416 static __inline void
 1417 pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode)
 1418 {
 1419         pt_entry_t *pte;
 1420 
 1421         pte = vtopte(va);
 1422         pte_store(pte, pa | PG_RW | PG_V | pgeflag | pmap_cache_bits(mode, 0));
 1423 }
 1424 
 1425 /*
 1426  * Remove a page from the kernel pagetables.
 1427  * Note: not SMP coherent.
 1428  *
 1429  * This function may be used before pmap_bootstrap() is called.
 1430  */
 1431 PMAP_INLINE void
 1432 pmap_kremove(vm_offset_t va)
 1433 {
 1434         pt_entry_t *pte;
 1435 
 1436         pte = vtopte(va);
 1437         pte_clear(pte);
 1438 }
 1439 
 1440 /*
 1441  *      Used to map a range of physical addresses into kernel
 1442  *      virtual address space.
 1443  *
 1444  *      The value passed in '*virt' is a suggested virtual address for
 1445  *      the mapping. Architectures which can support a direct-mapped
 1446  *      physical to virtual region can return the appropriate address
 1447  *      within that region, leaving '*virt' unchanged. Other
 1448  *      architectures should map the pages starting at '*virt' and
 1449  *      update '*virt' with the first usable address after the mapped
 1450  *      region.
 1451  */
 1452 vm_offset_t
 1453 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
 1454 {
 1455         vm_offset_t va, sva;
 1456         vm_paddr_t superpage_offset;
 1457         pd_entry_t newpde;
 1458 
 1459         va = *virt;
 1460         /*
 1461          * Does the physical address range's size and alignment permit at
 1462          * least one superpage mapping to be created?
 1463          */ 
 1464         superpage_offset = start & PDRMASK;
 1465         if ((end - start) - ((NBPDR - superpage_offset) & PDRMASK) >= NBPDR) {
 1466                 /*
 1467                  * Increase the starting virtual address so that its alignment
 1468                  * does not preclude the use of superpage mappings.
 1469                  */
 1470                 if ((va & PDRMASK) < superpage_offset)
 1471                         va = (va & ~PDRMASK) + superpage_offset;
 1472                 else if ((va & PDRMASK) > superpage_offset)
 1473                         va = ((va + PDRMASK) & ~PDRMASK) + superpage_offset;
 1474         }
 1475         sva = va;
 1476         while (start < end) {
 1477                 if ((start & PDRMASK) == 0 && end - start >= NBPDR &&
 1478                     pseflag) {
 1479                         KASSERT((va & PDRMASK) == 0,
 1480                             ("pmap_map: misaligned va %#x", va));
 1481                         newpde = start | PG_PS | pgeflag | PG_RW | PG_V;
 1482                         pmap_kenter_pde(va, newpde);
 1483                         va += NBPDR;
 1484                         start += NBPDR;
 1485                 } else {
 1486                         pmap_kenter(va, start);
 1487                         va += PAGE_SIZE;
 1488                         start += PAGE_SIZE;
 1489                 }
 1490         }
 1491         pmap_invalidate_range(kernel_pmap, sva, va);
 1492         *virt = va;
 1493         return (sva);
 1494 }
 1495 
 1496 
 1497 /*
 1498  * Add a list of wired pages to the kva
 1499  * this routine is only used for temporary
 1500  * kernel mappings that do not need to have
 1501  * page modification or references recorded.
 1502  * Note that old mappings are simply written
 1503  * over.  The page *must* be wired.
 1504  * Note: SMP coherent.  Uses a ranged shootdown IPI.
 1505  */
 1506 void
 1507 pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
 1508 {
 1509         pt_entry_t *endpte, oldpte, pa, *pte;
 1510         vm_page_t m;
 1511 
 1512         oldpte = 0;
 1513         pte = vtopte(sva);
 1514         endpte = pte + count;
 1515         while (pte < endpte) {
 1516                 m = *ma++;
 1517                 pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 0);
 1518                 if ((*pte & (PG_FRAME | PG_PTE_CACHE)) != pa) {
 1519                         oldpte |= *pte;
 1520                         pte_store(pte, pa | pgeflag | PG_RW | PG_V);
 1521                 }
 1522                 pte++;
 1523         }
 1524         if (__predict_false((oldpte & PG_V) != 0))
 1525                 pmap_invalidate_range(kernel_pmap, sva, sva + count *
 1526                     PAGE_SIZE);
 1527 }
 1528 
 1529 /*
 1530  * This routine tears out page mappings from the
 1531  * kernel -- it is meant only for temporary mappings.
 1532  * Note: SMP coherent.  Uses a ranged shootdown IPI.
 1533  */
 1534 void
 1535 pmap_qremove(vm_offset_t sva, int count)
 1536 {
 1537         vm_offset_t va;
 1538 
 1539         va = sva;
 1540         while (count-- > 0) {
 1541                 pmap_kremove(va);
 1542                 va += PAGE_SIZE;
 1543         }
 1544         pmap_invalidate_range(kernel_pmap, sva, va);
 1545 }
 1546 
 1547 /***************************************************
 1548  * Page table page management routines.....
 1549  ***************************************************/
 1550 static __inline void
 1551 pmap_free_zero_pages(vm_page_t free)
 1552 {
 1553         vm_page_t m;
 1554 
 1555         while (free != NULL) {
 1556                 m = free;
 1557                 free = m->right;
 1558                 /* Preserve the page's PG_ZERO setting. */
 1559                 vm_page_free_toq(m);
 1560         }
 1561 }
 1562 
 1563 /*
 1564  * Schedule the specified unused page table page to be freed.  Specifically,
 1565  * add the page to the specified list of pages that will be released to the
 1566  * physical memory manager after the TLB has been updated.
 1567  */
 1568 static __inline void
 1569 pmap_add_delayed_free_list(vm_page_t m, vm_page_t *free, boolean_t set_PG_ZERO)
 1570 {
 1571 
 1572         if (set_PG_ZERO)
 1573                 m->flags |= PG_ZERO;
 1574         else
 1575                 m->flags &= ~PG_ZERO;
 1576         m->right = *free;
 1577         *free = m;
 1578 }
 1579 
 1580 /*
 1581  * Inserts the specified page table page into the specified pmap's collection
 1582  * of idle page table pages.  Each of a pmap's page table pages is responsible
 1583  * for mapping a distinct range of virtual addresses.  The pmap's collection is
 1584  * ordered by this virtual address range.
 1585  */
 1586 static void
 1587 pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte)
 1588 {
 1589         vm_page_t root;
 1590 
 1591         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 1592         root = pmap->pm_root;
 1593         if (root == NULL) {
 1594                 mpte->left = NULL;
 1595                 mpte->right = NULL;
 1596         } else {
 1597                 root = vm_page_splay(mpte->pindex, root);
 1598                 if (mpte->pindex < root->pindex) {
 1599                         mpte->left = root->left;
 1600                         mpte->right = root;
 1601                         root->left = NULL;
 1602                 } else if (mpte->pindex == root->pindex)
 1603                         panic("pmap_insert_pt_page: pindex already inserted");
 1604                 else {
 1605                         mpte->right = root->right;
 1606                         mpte->left = root;
 1607                         root->right = NULL;
 1608                 }
 1609         }
 1610         pmap->pm_root = mpte;
 1611 }
 1612 
 1613 /*
 1614  * Looks for a page table page mapping the specified virtual address in the
 1615  * specified pmap's collection of idle page table pages.  Returns NULL if there
 1616  * is no page table page corresponding to the specified virtual address.
 1617  */
 1618 static vm_page_t
 1619 pmap_lookup_pt_page(pmap_t pmap, vm_offset_t va)
 1620 {
 1621         vm_page_t mpte;
 1622         vm_pindex_t pindex = va >> PDRSHIFT;
 1623 
 1624         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 1625         if ((mpte = pmap->pm_root) != NULL && mpte->pindex != pindex) {
 1626                 mpte = vm_page_splay(pindex, mpte);
 1627                 if ((pmap->pm_root = mpte)->pindex != pindex)
 1628                         mpte = NULL;
 1629         }
 1630         return (mpte);
 1631 }
 1632 
 1633 /*
 1634  * Removes the specified page table page from the specified pmap's collection
 1635  * of idle page table pages.  The specified page table page must be a member of
 1636  * the pmap's collection.
 1637  */
 1638 static void
 1639 pmap_remove_pt_page(pmap_t pmap, vm_page_t mpte)
 1640 {
 1641         vm_page_t root;
 1642 
 1643         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 1644         if (mpte != pmap->pm_root)
 1645                 vm_page_splay(mpte->pindex, pmap->pm_root);
 1646         if (mpte->left == NULL)
 1647                 root = mpte->right;
 1648         else {
 1649                 root = vm_page_splay(mpte->pindex, mpte->left);
 1650                 root->right = mpte->right;
 1651         }
 1652         pmap->pm_root = root;
 1653 }
 1654 
 1655 /*
 1656  * This routine unholds page table pages, and if the hold count
 1657  * drops to zero, then it decrements the wire count.
 1658  */
 1659 static __inline int
 1660 pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free)
 1661 {
 1662 
 1663         --m->wire_count;
 1664         if (m->wire_count == 0)
 1665                 return _pmap_unwire_pte_hold(pmap, m, free);
 1666         else
 1667                 return 0;
 1668 }
 1669 
 1670 static int 
 1671 _pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free)
 1672 {
 1673         vm_offset_t pteva;
 1674 
 1675         /*
 1676          * unmap the page table page
 1677          */
 1678         pmap->pm_pdir[m->pindex] = 0;
 1679         --pmap->pm_stats.resident_count;
 1680 
 1681         /*
 1682          * This is a release store so that the ordinary store unmapping
 1683          * the page table page is globally performed before TLB shoot-
 1684          * down is begun.
 1685          */
 1686         atomic_subtract_rel_int(&cnt.v_wire_count, 1);
 1687 
 1688         /*
 1689          * Do an invltlb to make the invalidated mapping
 1690          * take effect immediately.
 1691          */
 1692         pteva = VM_MAXUSER_ADDRESS + i386_ptob(m->pindex);
 1693         pmap_invalidate_page(pmap, pteva);
 1694 
 1695         /* 
 1696          * Put page on a list so that it is released after
 1697          * *ALL* TLB shootdown is done
 1698          */
 1699         pmap_add_delayed_free_list(m, free, TRUE);
 1700 
 1701         return 1;
 1702 }
 1703 
 1704 /*
 1705  * After removing a page table entry, this routine is used to
 1706  * conditionally free the page, and manage the hold/wire counts.
 1707  */
 1708 static int
 1709 pmap_unuse_pt(pmap_t pmap, vm_offset_t va, vm_page_t *free)
 1710 {
 1711         pd_entry_t ptepde;
 1712         vm_page_t mpte;
 1713 
 1714         if (va >= VM_MAXUSER_ADDRESS)
 1715                 return 0;
 1716         ptepde = *pmap_pde(pmap, va);
 1717         mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME);
 1718         return pmap_unwire_pte_hold(pmap, mpte, free);
 1719 }
 1720 
 1721 /*
 1722  * Initialize the pmap for the swapper process.
 1723  */
 1724 void
 1725 pmap_pinit0(pmap_t pmap)
 1726 {
 1727 
 1728         PMAP_LOCK_INIT(pmap);
 1729         /*
 1730          * Since the page table directory is shared with the kernel pmap,
 1731          * which is already included in the list "allpmaps", this pmap does
 1732          * not need to be inserted into that list.
 1733          */
 1734         pmap->pm_pdir = (pd_entry_t *)(KERNBASE + (vm_offset_t)IdlePTD);
 1735 #ifdef PAE
 1736         pmap->pm_pdpt = (pdpt_entry_t *)(KERNBASE + (vm_offset_t)IdlePDPT);
 1737 #endif
 1738         pmap->pm_root = NULL;
 1739         pmap->pm_active = 0;
 1740         PCPU_SET(curpmap, pmap);
 1741         TAILQ_INIT(&pmap->pm_pvchunk);
 1742         bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
 1743 }
 1744 
 1745 /*
 1746  * Initialize a preallocated and zeroed pmap structure,
 1747  * such as one in a vmspace structure.
 1748  */
 1749 int
 1750 pmap_pinit(pmap_t pmap)
 1751 {
 1752         vm_page_t m, ptdpg[NPGPTD];
 1753         vm_paddr_t pa;
 1754         static int color;
 1755         int i;
 1756 
 1757         PMAP_LOCK_INIT(pmap);
 1758 
 1759         /*
 1760          * No need to allocate page table space yet but we do need a valid
 1761          * page directory table.
 1762          */
 1763         if (pmap->pm_pdir == NULL) {
 1764                 pmap->pm_pdir = (pd_entry_t *)kmem_alloc_nofault(kernel_map,
 1765                     NBPTD);
 1766 
 1767                 if (pmap->pm_pdir == NULL) {
 1768                         PMAP_LOCK_DESTROY(pmap);
 1769                         return (0);
 1770                 }
 1771 #ifdef PAE
 1772                 pmap->pm_pdpt = uma_zalloc(pdptzone, M_WAITOK | M_ZERO);
 1773                 KASSERT(((vm_offset_t)pmap->pm_pdpt &
 1774                     ((NPGPTD * sizeof(pdpt_entry_t)) - 1)) == 0,
 1775                     ("pmap_pinit: pdpt misaligned"));
 1776                 KASSERT(pmap_kextract((vm_offset_t)pmap->pm_pdpt) < (4ULL<<30),
 1777                     ("pmap_pinit: pdpt above 4g"));
 1778 #endif
 1779                 pmap->pm_root = NULL;
 1780         }
 1781         KASSERT(pmap->pm_root == NULL,
 1782             ("pmap_pinit: pmap has reserved page table page(s)"));
 1783 
 1784         /*
 1785          * allocate the page directory page(s)
 1786          */
 1787         for (i = 0; i < NPGPTD;) {
 1788                 m = vm_page_alloc(NULL, color++,
 1789                     VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
 1790                     VM_ALLOC_ZERO);
 1791                 if (m == NULL)
 1792                         VM_WAIT;
 1793                 else {
 1794                         ptdpg[i++] = m;
 1795                 }
 1796         }
 1797 
 1798         pmap_qenter((vm_offset_t)pmap->pm_pdir, ptdpg, NPGPTD);
 1799 
 1800         for (i = 0; i < NPGPTD; i++) {
 1801                 if ((ptdpg[i]->flags & PG_ZERO) == 0)
 1802                         bzero(pmap->pm_pdir + (i * NPDEPG), PAGE_SIZE);
 1803         }
 1804 
 1805         mtx_lock_spin(&allpmaps_lock);
 1806         LIST_INSERT_HEAD(&allpmaps, pmap, pm_list);
 1807         /* Copy the kernel page table directory entries. */
 1808         bcopy(PTD + KPTDI, pmap->pm_pdir + KPTDI, nkpt * sizeof(pd_entry_t));
 1809         mtx_unlock_spin(&allpmaps_lock);
 1810 
 1811         /* install self-referential address mapping entry(s) */
 1812         for (i = 0; i < NPGPTD; i++) {
 1813                 pa = VM_PAGE_TO_PHYS(ptdpg[i]);
 1814                 pmap->pm_pdir[PTDPTDI + i] = pa | PG_V | PG_RW | PG_A | PG_M;
 1815 #ifdef PAE
 1816                 pmap->pm_pdpt[i] = pa | PG_V;
 1817 #endif
 1818         }
 1819 
 1820         pmap->pm_active = 0;
 1821         TAILQ_INIT(&pmap->pm_pvchunk);
 1822         bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
 1823 
 1824         return (1);
 1825 }
 1826 
 1827 /*
 1828  * this routine is called if the page table page is not
 1829  * mapped correctly.
 1830  */
 1831 static vm_page_t
 1832 _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags)
 1833 {
 1834         vm_paddr_t ptepa;
 1835         vm_page_t m;
 1836 
 1837         KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
 1838             (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
 1839             ("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
 1840 
 1841         /*
 1842          * Allocate a page table page.
 1843          */
 1844         if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ |
 1845             VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
 1846                 if (flags & M_WAITOK) {
 1847                         PMAP_UNLOCK(pmap);
 1848                         vm_page_unlock_queues();
 1849                         VM_WAIT;
 1850                         vm_page_lock_queues();
 1851                         PMAP_LOCK(pmap);
 1852                 }
 1853 
 1854                 /*
 1855                  * Indicate the need to retry.  While waiting, the page table
 1856                  * page may have been allocated.
 1857                  */
 1858                 return (NULL);
 1859         }
 1860         if ((m->flags & PG_ZERO) == 0)
 1861                 pmap_zero_page(m);
 1862 
 1863         /*
 1864          * Map the pagetable page into the process address space, if
 1865          * it isn't already there.
 1866          */
 1867 
 1868         pmap->pm_stats.resident_count++;
 1869 
 1870         ptepa = VM_PAGE_TO_PHYS(m);
 1871         pmap->pm_pdir[ptepindex] =
 1872                 (pd_entry_t) (ptepa | PG_U | PG_RW | PG_V | PG_A | PG_M);
 1873 
 1874         return m;
 1875 }
 1876 
 1877 static vm_page_t
 1878 pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags)
 1879 {
 1880         unsigned ptepindex;
 1881         pd_entry_t ptepa;
 1882         vm_page_t m;
 1883 
 1884         KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
 1885             (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
 1886             ("pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
 1887 
 1888         /*
 1889          * Calculate pagetable page index
 1890          */
 1891         ptepindex = va >> PDRSHIFT;
 1892 retry:
 1893         /*
 1894          * Get the page directory entry
 1895          */
 1896         ptepa = pmap->pm_pdir[ptepindex];
 1897 
 1898         /*
 1899          * This supports switching from a 4MB page to a
 1900          * normal 4K page.
 1901          */
 1902         if (ptepa & PG_PS) {
 1903                 (void)pmap_demote_pde(pmap, &pmap->pm_pdir[ptepindex], va);
 1904                 ptepa = pmap->pm_pdir[ptepindex];
 1905         }
 1906 
 1907         /*
 1908          * If the page table page is mapped, we just increment the
 1909          * hold count, and activate it.
 1910          */
 1911         if (ptepa) {
 1912                 m = PHYS_TO_VM_PAGE(ptepa & PG_FRAME);
 1913                 m->wire_count++;
 1914         } else {
 1915                 /*
 1916                  * Here if the pte page isn't mapped, or if it has
 1917                  * been deallocated. 
 1918                  */
 1919                 m = _pmap_allocpte(pmap, ptepindex, flags);
 1920                 if (m == NULL && (flags & M_WAITOK))
 1921                         goto retry;
 1922         }
 1923         return (m);
 1924 }
 1925 
 1926 
 1927 /***************************************************
 1928 * Pmap allocation/deallocation routines.
 1929  ***************************************************/
 1930 
 1931 #ifdef SMP
 1932 /*
 1933  * Deal with a SMP shootdown of other users of the pmap that we are
 1934  * trying to dispose of.  This can be a bit hairy.
 1935  */
 1936 static cpumask_t *lazymask;
 1937 static u_int lazyptd;
 1938 static volatile u_int lazywait;
 1939 
 1940 void pmap_lazyfix_action(void);
 1941 
 1942 void
 1943 pmap_lazyfix_action(void)
 1944 {
 1945         cpumask_t mymask = PCPU_GET(cpumask);
 1946 
 1947 #ifdef COUNT_IPIS
 1948         (*ipi_lazypmap_counts[PCPU_GET(cpuid)])++;
 1949 #endif
 1950         if (rcr3() == lazyptd)
 1951                 load_cr3(PCPU_GET(curpcb)->pcb_cr3);
 1952         atomic_clear_int(lazymask, mymask);
 1953         atomic_store_rel_int(&lazywait, 1);
 1954 }
 1955 
 1956 static void
 1957 pmap_lazyfix_self(cpumask_t mymask)
 1958 {
 1959 
 1960         if (rcr3() == lazyptd)
 1961                 load_cr3(PCPU_GET(curpcb)->pcb_cr3);
 1962         atomic_clear_int(lazymask, mymask);
 1963 }
 1964 
 1965 
 1966 static void
 1967 pmap_lazyfix(pmap_t pmap)
 1968 {
 1969         cpumask_t mymask, mask;
 1970         u_int spins;
 1971 
 1972         while ((mask = pmap->pm_active) != 0) {
 1973                 spins = 50000000;
 1974                 mask = mask & -mask;    /* Find least significant set bit */
 1975                 mtx_lock_spin(&smp_ipi_mtx);
 1976 #ifdef PAE
 1977                 lazyptd = vtophys(pmap->pm_pdpt);
 1978 #else
 1979                 lazyptd = vtophys(pmap->pm_pdir);
 1980 #endif
 1981                 mymask = PCPU_GET(cpumask);
 1982                 if (mask == mymask) {
 1983                         lazymask = &pmap->pm_active;
 1984                         pmap_lazyfix_self(mymask);
 1985                 } else {
 1986                         atomic_store_rel_int((u_int *)&lazymask,
 1987                             (u_int)&pmap->pm_active);
 1988                         atomic_store_rel_int(&lazywait, 0);
 1989                         ipi_selected(mask, IPI_LAZYPMAP);
 1990                         while (lazywait == 0) {
 1991                                 ia32_pause();
 1992                                 if (--spins == 0)
 1993                                         break;
 1994                         }
 1995                 }
 1996                 mtx_unlock_spin(&smp_ipi_mtx);
 1997                 if (spins == 0)
 1998                         printf("pmap_lazyfix: spun for 50000000\n");
 1999         }
 2000 }
 2001 
 2002 #else   /* SMP */
 2003 
 2004 /*
 2005  * Cleaning up on uniprocessor is easy.  For various reasons, we're
 2006  * unlikely to have to even execute this code, including the fact
 2007  * that the cleanup is deferred until the parent does a wait(2), which
 2008  * means that another userland process has run.
 2009  */
 2010 static void
 2011 pmap_lazyfix(pmap_t pmap)
 2012 {
 2013         u_int cr3;
 2014 
 2015         cr3 = vtophys(pmap->pm_pdir);
 2016         if (cr3 == rcr3()) {
 2017                 load_cr3(PCPU_GET(curpcb)->pcb_cr3);
 2018                 pmap->pm_active &= ~(PCPU_GET(cpumask));
 2019         }
 2020 }
 2021 #endif  /* SMP */
 2022 
 2023 /*
 2024  * Release any resources held by the given physical map.
 2025  * Called when a pmap initialized by pmap_pinit is being released.
 2026  * Should only be called if the map contains no valid mappings.
 2027  */
 2028 void
 2029 pmap_release(pmap_t pmap)
 2030 {
 2031         vm_page_t m, ptdpg[NPGPTD];
 2032         int i;
 2033 
 2034         KASSERT(pmap->pm_stats.resident_count == 0,
 2035             ("pmap_release: pmap resident count %ld != 0",
 2036             pmap->pm_stats.resident_count));
 2037         KASSERT(pmap->pm_root == NULL,
 2038             ("pmap_release: pmap has reserved page table page(s)"));
 2039 
 2040         pmap_lazyfix(pmap);
 2041         mtx_lock_spin(&allpmaps_lock);
 2042         LIST_REMOVE(pmap, pm_list);
 2043         mtx_unlock_spin(&allpmaps_lock);
 2044 
 2045         for (i = 0; i < NPGPTD; i++)
 2046                 ptdpg[i] = PHYS_TO_VM_PAGE(pmap->pm_pdir[PTDPTDI + i] &
 2047                     PG_FRAME);
 2048 
 2049         bzero(pmap->pm_pdir + PTDPTDI, (nkpt + NPGPTD) *
 2050             sizeof(*pmap->pm_pdir));
 2051 
 2052         pmap_qremove((vm_offset_t)pmap->pm_pdir, NPGPTD);
 2053 
 2054         for (i = 0; i < NPGPTD; i++) {
 2055                 m = ptdpg[i];
 2056 #ifdef PAE
 2057                 KASSERT(VM_PAGE_TO_PHYS(m) == (pmap->pm_pdpt[i] & PG_FRAME),
 2058                     ("pmap_release: got wrong ptd page"));
 2059 #endif
 2060                 m->wire_count--;
 2061                 atomic_subtract_int(&cnt.v_wire_count, 1);
 2062                 vm_page_free_zero(m);
 2063         }
 2064         PMAP_LOCK_DESTROY(pmap);
 2065 }
 2066 
 2067 static int
 2068 kvm_size(SYSCTL_HANDLER_ARGS)
 2069 {
 2070         unsigned long ksize = VM_MAX_KERNEL_ADDRESS - KERNBASE;
 2071 
 2072         return sysctl_handle_long(oidp, &ksize, 0, req);
 2073 }
 2074 SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG|CTLFLAG_RD, 
 2075     0, 0, kvm_size, "IU", "Size of KVM");
 2076 
 2077 static int
 2078 kvm_free(SYSCTL_HANDLER_ARGS)
 2079 {
 2080         unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end;
 2081 
 2082         return sysctl_handle_long(oidp, &kfree, 0, req);
 2083 }
 2084 SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD, 
 2085     0, 0, kvm_free, "IU", "Amount of KVM free");
 2086 
 2087 /*
 2088  * grow the number of kernel page table entries, if needed
 2089  */
 2090 void
 2091 pmap_growkernel(vm_offset_t addr)
 2092 {
 2093         vm_paddr_t ptppaddr;
 2094         vm_page_t nkpg;
 2095         pd_entry_t newpdir;
 2096 
 2097         mtx_assert(&kernel_map->system_mtx, MA_OWNED);
 2098         if (kernel_vm_end == 0) {
 2099                 kernel_vm_end = KERNBASE;
 2100                 nkpt = 0;
 2101                 while (pdir_pde(PTD, kernel_vm_end)) {
 2102                         kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
 2103                         nkpt++;
 2104                         if (kernel_vm_end - 1 >= kernel_map->max_offset) {
 2105                                 kernel_vm_end = kernel_map->max_offset;
 2106                                 break;
 2107                         }
 2108                 }
 2109         }
 2110         addr = roundup2(addr, PAGE_SIZE * NPTEPG);
 2111         if (addr - 1 >= kernel_map->max_offset)
 2112                 addr = kernel_map->max_offset;
 2113         while (kernel_vm_end < addr) {
 2114                 if (pdir_pde(PTD, kernel_vm_end)) {
 2115                         kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
 2116                         if (kernel_vm_end - 1 >= kernel_map->max_offset) {
 2117                                 kernel_vm_end = kernel_map->max_offset;
 2118                                 break;
 2119                         }
 2120                         continue;
 2121                 }
 2122 
 2123                 nkpg = vm_page_alloc(NULL, kernel_vm_end >> PDRSHIFT,
 2124                     VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
 2125                     VM_ALLOC_ZERO);
 2126                 if (nkpg == NULL)
 2127                         panic("pmap_growkernel: no memory to grow kernel");
 2128 
 2129                 nkpt++;
 2130 
 2131                 if ((nkpg->flags & PG_ZERO) == 0)
 2132                         pmap_zero_page(nkpg);
 2133                 ptppaddr = VM_PAGE_TO_PHYS(nkpg);
 2134                 newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M);
 2135                 pdir_pde(KPTD, kernel_vm_end) = pgeflag | newpdir;
 2136 
 2137                 pmap_kenter_pde(kernel_vm_end, newpdir);
 2138                 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
 2139                 if (kernel_vm_end - 1 >= kernel_map->max_offset) {
 2140                         kernel_vm_end = kernel_map->max_offset;
 2141                         break;
 2142                 }
 2143         }
 2144 }
 2145 
 2146 
 2147 /***************************************************
 2148  * page management routines.
 2149  ***************************************************/
 2150 
 2151 CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE);
 2152 CTASSERT(_NPCM == 11);
 2153 
 2154 static __inline struct pv_chunk *
 2155 pv_to_chunk(pv_entry_t pv)
 2156 {
 2157 
 2158         return (struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK);
 2159 }
 2160 
 2161 #define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap)
 2162 
 2163 #define PC_FREE0_9      0xfffffffful    /* Free values for index 0 through 9 */
 2164 #define PC_FREE10       0x0000fffful    /* Free values for index 10 */
 2165 
 2166 static uint32_t pc_freemask[11] = {
 2167         PC_FREE0_9, PC_FREE0_9, PC_FREE0_9,
 2168         PC_FREE0_9, PC_FREE0_9, PC_FREE0_9,
 2169         PC_FREE0_9, PC_FREE0_9, PC_FREE0_9,
 2170         PC_FREE0_9, PC_FREE10
 2171 };
 2172 
 2173 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0,
 2174         "Current number of pv entries");
 2175 
 2176 #ifdef PV_STATS
 2177 static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail;
 2178 
 2179 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0,
 2180         "Current number of pv entry chunks");
 2181 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0,
 2182         "Current number of pv entry chunks allocated");
 2183 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0,
 2184         "Current number of pv entry chunks frees");
 2185 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0,
 2186         "Number of times tried to get a chunk page but failed.");
 2187 
 2188 static long pv_entry_frees, pv_entry_allocs;
 2189 static int pv_entry_spare;
 2190 
 2191 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0,
 2192         "Current number of pv entry frees");
 2193 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0,
 2194         "Current number of pv entry allocs");
 2195 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0,
 2196         "Current number of spare pv entries");
 2197 
 2198 static int pmap_collect_inactive, pmap_collect_active;
 2199 
 2200 SYSCTL_INT(_vm_pmap, OID_AUTO, pmap_collect_inactive, CTLFLAG_RD, &pmap_collect_inactive, 0,
 2201         "Current number times pmap_collect called on inactive queue");
 2202 SYSCTL_INT(_vm_pmap, OID_AUTO, pmap_collect_active, CTLFLAG_RD, &pmap_collect_active, 0,
 2203         "Current number times pmap_collect called on active queue");
 2204 #endif
 2205 
 2206 /*
 2207  * We are in a serious low memory condition.  Resort to
 2208  * drastic measures to free some pages so we can allocate
 2209  * another pv entry chunk.  This is normally called to
 2210  * unmap inactive pages, and if necessary, active pages.
 2211  */
 2212 static void
 2213 pmap_collect(pmap_t locked_pmap, struct vpgqueues *vpq)
 2214 {
 2215         struct md_page *pvh;
 2216         pd_entry_t *pde;
 2217         pmap_t pmap;
 2218         pt_entry_t *pte, tpte;
 2219         pv_entry_t next_pv, pv;
 2220         vm_offset_t va;
 2221         vm_page_t m, free;
 2222 
 2223         sched_pin();
 2224         TAILQ_FOREACH(m, &vpq->pl, pageq) {
 2225                 if ((m->flags & PG_MARKER) != 0 || m->hold_count || m->busy)
 2226                         continue;
 2227                 TAILQ_FOREACH_SAFE(pv, &m->md.pv_list, pv_list, next_pv) {
 2228                         va = pv->pv_va;
 2229                         pmap = PV_PMAP(pv);
 2230                         /* Avoid deadlock and lock recursion. */
 2231                         if (pmap > locked_pmap)
 2232                                 PMAP_LOCK(pmap);
 2233                         else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap))
 2234                                 continue;
 2235                         pmap->pm_stats.resident_count--;
 2236                         pde = pmap_pde(pmap, va);
 2237                         KASSERT((*pde & PG_PS) == 0, ("pmap_collect: found"
 2238                             " a 4mpage in page %p's pv list", m));
 2239                         pte = pmap_pte_quick(pmap, va);
 2240                         tpte = pte_load_clear(pte);
 2241                         KASSERT((tpte & PG_W) == 0,
 2242                             ("pmap_collect: wired pte %#jx", (uintmax_t)tpte));
 2243                         if (tpte & PG_A)
 2244                                 vm_page_flag_set(m, PG_REFERENCED);
 2245                         if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
 2246                                 vm_page_dirty(m);
 2247                         free = NULL;
 2248                         pmap_unuse_pt(pmap, va, &free);
 2249                         pmap_invalidate_page(pmap, va);
 2250                         pmap_free_zero_pages(free);
 2251                         TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
 2252                         if (TAILQ_EMPTY(&m->md.pv_list)) {
 2253                                 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 2254                                 if (TAILQ_EMPTY(&pvh->pv_list))
 2255                                         vm_page_flag_clear(m, PG_WRITEABLE);
 2256                         }
 2257                         free_pv_entry(pmap, pv);
 2258                         if (pmap != locked_pmap)
 2259                                 PMAP_UNLOCK(pmap);
 2260                 }
 2261         }
 2262         sched_unpin();
 2263 }
 2264 
 2265 
 2266 /*
 2267  * free the pv_entry back to the free list
 2268  */
 2269 static void
 2270 free_pv_entry(pmap_t pmap, pv_entry_t pv)
 2271 {
 2272         vm_page_t m;
 2273         struct pv_chunk *pc;
 2274         int idx, field, bit;
 2275 
 2276         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2277         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2278         PV_STAT(pv_entry_frees++);
 2279         PV_STAT(pv_entry_spare++);
 2280         pv_entry_count--;
 2281         pc = pv_to_chunk(pv);
 2282         idx = pv - &pc->pc_pventry[0];
 2283         field = idx / 32;
 2284         bit = idx % 32;
 2285         pc->pc_map[field] |= 1ul << bit;
 2286         /* move to head of list */
 2287         TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
 2288         for (idx = 0; idx < _NPCM; idx++)
 2289                 if (pc->pc_map[idx] != pc_freemask[idx]) {
 2290                         TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
 2291                         return;
 2292                 }
 2293         PV_STAT(pv_entry_spare -= _NPCPV);
 2294         PV_STAT(pc_chunk_count--);
 2295         PV_STAT(pc_chunk_frees++);
 2296         /* entire chunk is free, return it */
 2297         m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc));
 2298         pmap_qremove((vm_offset_t)pc, 1);
 2299         vm_page_unwire(m, 0);
 2300         vm_page_free(m);
 2301         pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc);
 2302 }
 2303 
 2304 /*
 2305  * get a new pv_entry, allocating a block from the system
 2306  * when needed.
 2307  */
 2308 static pv_entry_t
 2309 get_pv_entry(pmap_t pmap, int try)
 2310 {
 2311         static const struct timeval printinterval = { 60, 0 };
 2312         static struct timeval lastprint;
 2313         static vm_pindex_t colour;
 2314         struct vpgqueues *pq;
 2315         int bit, field;
 2316         pv_entry_t pv;
 2317         struct pv_chunk *pc;
 2318         vm_page_t m;
 2319 
 2320         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2321         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2322         PV_STAT(pv_entry_allocs++);
 2323         pv_entry_count++;
 2324         if (pv_entry_count > pv_entry_high_water)
 2325                 if (ratecheck(&lastprint, &printinterval))
 2326                         printf("Approaching the limit on PV entries, consider "
 2327                             "increasing either the vm.pmap.shpgperproc or the "
 2328                             "vm.pmap.pv_entry_max tunable.\n");
 2329         pq = NULL;
 2330 retry:
 2331         pc = TAILQ_FIRST(&pmap->pm_pvchunk);
 2332         if (pc != NULL) {
 2333                 for (field = 0; field < _NPCM; field++) {
 2334                         if (pc->pc_map[field]) {
 2335                                 bit = bsfl(pc->pc_map[field]);
 2336                                 break;
 2337                         }
 2338                 }
 2339                 if (field < _NPCM) {
 2340                         pv = &pc->pc_pventry[field * 32 + bit];
 2341                         pc->pc_map[field] &= ~(1ul << bit);
 2342                         /* If this was the last item, move it to tail */
 2343                         for (field = 0; field < _NPCM; field++)
 2344                                 if (pc->pc_map[field] != 0) {
 2345                                         PV_STAT(pv_entry_spare--);
 2346                                         return (pv);    /* not full, return */
 2347                                 }
 2348                         TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
 2349                         TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
 2350                         PV_STAT(pv_entry_spare--);
 2351                         return (pv);
 2352                 }
 2353         }
 2354         /*
 2355          * Access to the ptelist "pv_vafree" is synchronized by the page
 2356          * queues lock.  If "pv_vafree" is currently non-empty, it will
 2357          * remain non-empty until pmap_ptelist_alloc() completes.
 2358          */
 2359         if (pv_vafree == 0 || (m = vm_page_alloc(NULL, colour, (pq ==
 2360             &vm_page_queues[PQ_ACTIVE] ? VM_ALLOC_SYSTEM : VM_ALLOC_NORMAL) |
 2361             VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
 2362                 if (try) {
 2363                         pv_entry_count--;
 2364                         PV_STAT(pc_chunk_tryfail++);
 2365                         return (NULL);
 2366                 }
 2367                 /*
 2368                  * Reclaim pv entries: At first, destroy mappings to
 2369                  * inactive pages.  After that, if a pv chunk entry
 2370                  * is still needed, destroy mappings to active pages.
 2371                  */
 2372                 if (pq == NULL) {
 2373                         PV_STAT(pmap_collect_inactive++);
 2374                         pq = &vm_page_queues[PQ_INACTIVE];
 2375                 } else if (pq == &vm_page_queues[PQ_INACTIVE]) {
 2376                         PV_STAT(pmap_collect_active++);
 2377                         pq = &vm_page_queues[PQ_ACTIVE];
 2378                 } else
 2379                         panic("get_pv_entry: increase vm.pmap.shpgperproc");
 2380                 pmap_collect(pmap, pq);
 2381                 goto retry;
 2382         }
 2383         PV_STAT(pc_chunk_count++);
 2384         PV_STAT(pc_chunk_allocs++);
 2385         colour++;
 2386         pc = (struct pv_chunk *)pmap_ptelist_alloc(&pv_vafree);
 2387         pmap_qenter((vm_offset_t)pc, &m, 1);
 2388         pc->pc_pmap = pmap;
 2389         pc->pc_map[0] = pc_freemask[0] & ~1ul;  /* preallocated bit 0 */
 2390         for (field = 1; field < _NPCM; field++)
 2391                 pc->pc_map[field] = pc_freemask[field];
 2392         pv = &pc->pc_pventry[0];
 2393         TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
 2394         PV_STAT(pv_entry_spare += _NPCPV - 1);
 2395         return (pv);
 2396 }
 2397 
 2398 static __inline pv_entry_t
 2399 pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
 2400 {
 2401         pv_entry_t pv;
 2402 
 2403         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2404         TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
 2405                 if (pmap == PV_PMAP(pv) && va == pv->pv_va) {
 2406                         TAILQ_REMOVE(&pvh->pv_list, pv, pv_list);
 2407                         break;
 2408                 }
 2409         }
 2410         return (pv);
 2411 }
 2412 
 2413 static void
 2414 pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
 2415 {
 2416         struct md_page *pvh;
 2417         pv_entry_t pv;
 2418         vm_offset_t va_last;
 2419         vm_page_t m;
 2420 
 2421         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2422         KASSERT((pa & PDRMASK) == 0,
 2423             ("pmap_pv_demote_pde: pa is not 4mpage aligned"));
 2424 
 2425         /*
 2426          * Transfer the 4mpage's pv entry for this mapping to the first
 2427          * page's pv list.
 2428          */
 2429         pvh = pa_to_pvh(pa);
 2430         va = trunc_4mpage(va);
 2431         pv = pmap_pvh_remove(pvh, pmap, va);
 2432         KASSERT(pv != NULL, ("pmap_pv_demote_pde: pv not found"));
 2433         m = PHYS_TO_VM_PAGE(pa);
 2434         TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
 2435         /* Instantiate the remaining NPTEPG - 1 pv entries. */
 2436         va_last = va + NBPDR - PAGE_SIZE;
 2437         do {
 2438                 m++;
 2439                 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
 2440                     ("pmap_pv_demote_pde: page %p is not managed", m));
 2441                 va += PAGE_SIZE;
 2442                 pmap_insert_entry(pmap, va, m);
 2443         } while (va < va_last);
 2444 }
 2445 
 2446 static void
 2447 pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
 2448 {
 2449         struct md_page *pvh;
 2450         pv_entry_t pv;
 2451         vm_offset_t va_last;
 2452         vm_page_t m;
 2453 
 2454         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2455         KASSERT((pa & PDRMASK) == 0,
 2456             ("pmap_pv_promote_pde: pa is not 4mpage aligned"));
 2457 
 2458         /*
 2459          * Transfer the first page's pv entry for this mapping to the
 2460          * 4mpage's pv list.  Aside from avoiding the cost of a call
 2461          * to get_pv_entry(), a transfer avoids the possibility that
 2462          * get_pv_entry() calls pmap_collect() and that pmap_collect()
 2463          * removes one of the mappings that is being promoted.
 2464          */
 2465         m = PHYS_TO_VM_PAGE(pa);
 2466         va = trunc_4mpage(va);
 2467         pv = pmap_pvh_remove(&m->md, pmap, va);
 2468         KASSERT(pv != NULL, ("pmap_pv_promote_pde: pv not found"));
 2469         pvh = pa_to_pvh(pa);
 2470         TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_list);
 2471         /* Free the remaining NPTEPG - 1 pv entries. */
 2472         va_last = va + NBPDR - PAGE_SIZE;
 2473         do {
 2474                 m++;
 2475                 va += PAGE_SIZE;
 2476                 pmap_pvh_free(&m->md, pmap, va);
 2477         } while (va < va_last);
 2478 }
 2479 
 2480 static void
 2481 pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
 2482 {
 2483         pv_entry_t pv;
 2484 
 2485         pv = pmap_pvh_remove(pvh, pmap, va);
 2486         KASSERT(pv != NULL, ("pmap_pvh_free: pv not found"));
 2487         free_pv_entry(pmap, pv);
 2488 }
 2489 
 2490 static void
 2491 pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
 2492 {
 2493         struct md_page *pvh;
 2494 
 2495         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2496         pmap_pvh_free(&m->md, pmap, va);
 2497         if (TAILQ_EMPTY(&m->md.pv_list)) {
 2498                 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 2499                 if (TAILQ_EMPTY(&pvh->pv_list))
 2500                         vm_page_flag_clear(m, PG_WRITEABLE);
 2501         }
 2502 }
 2503 
 2504 /*
 2505  * Create a pv entry for page at pa for
 2506  * (pmap, va).
 2507  */
 2508 static void
 2509 pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
 2510 {
 2511         pv_entry_t pv;
 2512 
 2513         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2514         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2515         pv = get_pv_entry(pmap, FALSE);
 2516         pv->pv_va = va;
 2517         TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
 2518 }
 2519 
 2520 /*
 2521  * Conditionally create a pv entry.
 2522  */
 2523 static boolean_t
 2524 pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
 2525 {
 2526         pv_entry_t pv;
 2527 
 2528         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2529         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2530         if (pv_entry_count < pv_entry_high_water && 
 2531             (pv = get_pv_entry(pmap, TRUE)) != NULL) {
 2532                 pv->pv_va = va;
 2533                 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
 2534                 return (TRUE);
 2535         } else
 2536                 return (FALSE);
 2537 }
 2538 
 2539 /*
 2540  * Create the pv entries for each of the pages within a superpage.
 2541  */
 2542 static boolean_t
 2543 pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
 2544 {
 2545         struct md_page *pvh;
 2546         pv_entry_t pv;
 2547 
 2548         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2549         if (pv_entry_count < pv_entry_high_water && 
 2550             (pv = get_pv_entry(pmap, TRUE)) != NULL) {
 2551                 pv->pv_va = va;
 2552                 pvh = pa_to_pvh(pa);
 2553                 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_list);
 2554                 return (TRUE);
 2555         } else
 2556                 return (FALSE);
 2557 }
 2558 
 2559 /*
 2560  * Fills a page table page with mappings to consecutive physical pages.
 2561  */
 2562 static void
 2563 pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte)
 2564 {
 2565         pt_entry_t *pte;
 2566 
 2567         for (pte = firstpte; pte < firstpte + NPTEPG; pte++) {
 2568                 *pte = newpte;  
 2569                 newpte += PAGE_SIZE;
 2570         }
 2571 }
 2572 
 2573 /*
 2574  * Tries to demote a 2- or 4MB page mapping.  If demotion fails, the
 2575  * 2- or 4MB page mapping is invalidated.
 2576  */
 2577 static boolean_t
 2578 pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
 2579 {
 2580         pd_entry_t newpde, oldpde;
 2581         pt_entry_t *firstpte, newpte;
 2582         vm_paddr_t mptepa;
 2583         vm_page_t free, mpte;
 2584 
 2585         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2586         oldpde = *pde;
 2587         KASSERT((oldpde & (PG_PS | PG_V)) == (PG_PS | PG_V),
 2588             ("pmap_demote_pde: oldpde is missing PG_PS and/or PG_V"));
 2589         mpte = pmap_lookup_pt_page(pmap, va);
 2590         if (mpte != NULL)
 2591                 pmap_remove_pt_page(pmap, mpte);
 2592         else {
 2593                 KASSERT((oldpde & PG_W) == 0,
 2594                     ("pmap_demote_pde: page table page for a wired mapping"
 2595                     " is missing"));
 2596 
 2597                 /*
 2598                  * Invalidate the 2- or 4MB page mapping and return
 2599                  * "failure" if the mapping was never accessed or the
 2600                  * allocation of the new page table page fails.
 2601                  */
 2602                 if ((oldpde & PG_A) == 0 || (mpte = vm_page_alloc(NULL,
 2603                     va >> PDRSHIFT, VM_ALLOC_NOOBJ | VM_ALLOC_NORMAL |
 2604                     VM_ALLOC_WIRED)) == NULL) {
 2605                         free = NULL;
 2606                         pmap_remove_pde(pmap, pde, trunc_4mpage(va), &free);
 2607                         pmap_invalidate_page(pmap, trunc_4mpage(va));
 2608                         pmap_free_zero_pages(free);
 2609                         CTR2(KTR_PMAP, "pmap_demote_pde: failure for va %#x"
 2610                             " in pmap %p", va, pmap);
 2611                         return (FALSE);
 2612                 }
 2613                 if (va < VM_MAXUSER_ADDRESS)
 2614                         pmap->pm_stats.resident_count++;
 2615         }
 2616         mptepa = VM_PAGE_TO_PHYS(mpte);
 2617 
 2618         /*
 2619          * If the page mapping is in the kernel's address space, then the
 2620          * KPTmap can provide access to the page table page.  Otherwise,
 2621          * temporarily map the page table page (mpte) into the kernel's
 2622          * address space at either PADDR1 or PADDR2. 
 2623          */
 2624         if (va >= KERNBASE)
 2625                 firstpte = &KPTmap[i386_btop(trunc_4mpage(va))];
 2626         else if (curthread->td_pinned > 0 && mtx_owned(&vm_page_queue_mtx)) {
 2627                 if ((*PMAP1 & PG_FRAME) != mptepa) {
 2628                         *PMAP1 = mptepa | PG_RW | PG_V | PG_A | PG_M;
 2629 #ifdef SMP
 2630                         PMAP1cpu = PCPU_GET(cpuid);
 2631 #endif
 2632                         invlcaddr(PADDR1);
 2633                         PMAP1changed++;
 2634                 } else
 2635 #ifdef SMP
 2636                 if (PMAP1cpu != PCPU_GET(cpuid)) {
 2637                         PMAP1cpu = PCPU_GET(cpuid);
 2638                         invlcaddr(PADDR1);
 2639                         PMAP1changedcpu++;
 2640                 } else
 2641 #endif
 2642                         PMAP1unchanged++;
 2643                 firstpte = PADDR1;
 2644         } else {
 2645                 mtx_lock(&PMAP2mutex);
 2646                 if ((*PMAP2 & PG_FRAME) != mptepa) {
 2647                         *PMAP2 = mptepa | PG_RW | PG_V | PG_A | PG_M;
 2648                         pmap_invalidate_page(kernel_pmap, (vm_offset_t)PADDR2);
 2649                 }
 2650                 firstpte = PADDR2;
 2651         }
 2652         newpde = mptepa | PG_M | PG_A | (oldpde & PG_U) | PG_RW | PG_V;
 2653         KASSERT((oldpde & PG_A) != 0,
 2654             ("pmap_demote_pde: oldpde is missing PG_A"));
 2655         KASSERT((oldpde & (PG_M | PG_RW)) != PG_RW,
 2656             ("pmap_demote_pde: oldpde is missing PG_M"));
 2657         newpte = oldpde & ~PG_PS;
 2658         if ((newpte & PG_PDE_PAT) != 0)
 2659                 newpte ^= PG_PDE_PAT | PG_PTE_PAT;
 2660 
 2661         /*
 2662          * If the page table page is new, initialize it.
 2663          */
 2664         if (mpte->wire_count == 1) {
 2665                 mpte->wire_count = NPTEPG;
 2666                 pmap_fill_ptp(firstpte, newpte);
 2667         }
 2668         KASSERT((*firstpte & PG_FRAME) == (newpte & PG_FRAME),
 2669             ("pmap_demote_pde: firstpte and newpte map different physical"
 2670             " addresses"));
 2671 
 2672         /*
 2673          * If the mapping has changed attributes, update the page table
 2674          * entries.
 2675          */ 
 2676         if ((*firstpte & PG_PTE_PROMOTE) != (newpte & PG_PTE_PROMOTE))
 2677                 pmap_fill_ptp(firstpte, newpte);
 2678         
 2679         /*
 2680          * Demote the mapping.  This pmap is locked.  The old PDE has
 2681          * PG_A set.  If the old PDE has PG_RW set, it also has PG_M
 2682          * set.  Thus, there is no danger of a race with another
 2683          * processor changing the setting of PG_A and/or PG_M between
 2684          * the read above and the store below. 
 2685          */
 2686         if (workaround_erratum383)
 2687                 pmap_update_pde(pmap, va, pde, newpde);
 2688         else if (pmap == kernel_pmap)
 2689                 pmap_kenter_pde(va, newpde);
 2690         else
 2691                 pde_store(pde, newpde); 
 2692         if (firstpte == PADDR2)
 2693                 mtx_unlock(&PMAP2mutex);
 2694 
 2695         /*
 2696          * Invalidate the recursive mapping of the page table page.
 2697          */
 2698         pmap_invalidate_page(pmap, (vm_offset_t)vtopte(va));
 2699 
 2700         /*
 2701          * Demote the pv entry.  This depends on the earlier demotion
 2702          * of the mapping.  Specifically, the (re)creation of a per-
 2703          * page pv entry might trigger the execution of pmap_collect(),
 2704          * which might reclaim a newly (re)created per-page pv entry
 2705          * and destroy the associated mapping.  In order to destroy
 2706          * the mapping, the PDE must have already changed from mapping
 2707          * the 2mpage to referencing the page table page.
 2708          */
 2709         if ((oldpde & PG_MANAGED) != 0)
 2710                 pmap_pv_demote_pde(pmap, va, oldpde & PG_PS_FRAME);
 2711 
 2712         pmap_pde_demotions++;
 2713         CTR2(KTR_PMAP, "pmap_demote_pde: success for va %#x"
 2714             " in pmap %p", va, pmap);
 2715         return (TRUE);
 2716 }
 2717 
 2718 /*
 2719  * pmap_remove_pde: do the things to unmap a superpage in a process
 2720  */
 2721 static void
 2722 pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
 2723     vm_page_t *free)
 2724 {
 2725         struct md_page *pvh;
 2726         pd_entry_t oldpde;
 2727         vm_offset_t eva, va;
 2728         vm_page_t m, mpte;
 2729 
 2730         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2731         KASSERT((sva & PDRMASK) == 0,
 2732             ("pmap_remove_pde: sva is not 4mpage aligned"));
 2733         oldpde = pte_load_clear(pdq);
 2734         if (oldpde & PG_W)
 2735                 pmap->pm_stats.wired_count -= NBPDR / PAGE_SIZE;
 2736 
 2737         /*
 2738          * Machines that don't support invlpg, also don't support
 2739          * PG_G.
 2740          */
 2741         if (oldpde & PG_G)
 2742                 pmap_invalidate_page(kernel_pmap, sva);
 2743         pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
 2744         if (oldpde & PG_MANAGED) {
 2745                 pvh = pa_to_pvh(oldpde & PG_PS_FRAME);
 2746                 pmap_pvh_free(pvh, pmap, sva);
 2747                 eva = sva + NBPDR;
 2748                 for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
 2749                     va < eva; va += PAGE_SIZE, m++) {
 2750                         if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW))
 2751                                 vm_page_dirty(m);
 2752                         if (oldpde & PG_A)
 2753                                 vm_page_flag_set(m, PG_REFERENCED);
 2754                         if (TAILQ_EMPTY(&m->md.pv_list) &&
 2755                             TAILQ_EMPTY(&pvh->pv_list))
 2756                                 vm_page_flag_clear(m, PG_WRITEABLE);
 2757                 }
 2758         }
 2759         if (pmap == kernel_pmap) {
 2760                 if (!pmap_demote_pde(pmap, pdq, sva))
 2761                         panic("pmap_remove_pde: failed demotion");
 2762         } else {
 2763                 mpte = pmap_lookup_pt_page(pmap, sva);
 2764                 if (mpte != NULL) {
 2765                         pmap_remove_pt_page(pmap, mpte);
 2766                         pmap->pm_stats.resident_count--;
 2767                         KASSERT(mpte->wire_count == NPTEPG,
 2768                             ("pmap_remove_pde: pte page wire count error"));
 2769                         mpte->wire_count = 0;
 2770                         pmap_add_delayed_free_list(mpte, free, FALSE);
 2771                         atomic_subtract_int(&cnt.v_wire_count, 1);
 2772                 }
 2773         }
 2774 }
 2775 
 2776 /*
 2777  * pmap_remove_pte: do the things to unmap a page in a process
 2778  */
 2779 static int
 2780 pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va, vm_page_t *free)
 2781 {
 2782         pt_entry_t oldpte;
 2783         vm_page_t m;
 2784 
 2785         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2786         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2787         oldpte = pte_load_clear(ptq);
 2788         if (oldpte & PG_W)
 2789                 pmap->pm_stats.wired_count -= 1;
 2790         /*
 2791          * Machines that don't support invlpg, also don't support
 2792          * PG_G.
 2793          */
 2794         if (oldpte & PG_G)
 2795                 pmap_invalidate_page(kernel_pmap, va);
 2796         pmap->pm_stats.resident_count -= 1;
 2797         if (oldpte & PG_MANAGED) {
 2798                 m = PHYS_TO_VM_PAGE(oldpte & PG_FRAME);
 2799                 if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
 2800                         vm_page_dirty(m);
 2801                 if (oldpte & PG_A)
 2802                         vm_page_flag_set(m, PG_REFERENCED);
 2803                 pmap_remove_entry(pmap, m, va);
 2804         }
 2805         return (pmap_unuse_pt(pmap, va, free));
 2806 }
 2807 
 2808 /*
 2809  * Remove a single page from a process address space
 2810  */
 2811 static void
 2812 pmap_remove_page(pmap_t pmap, vm_offset_t va, vm_page_t *free)
 2813 {
 2814         pt_entry_t *pte;
 2815 
 2816         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2817         KASSERT(curthread->td_pinned > 0, ("curthread not pinned"));
 2818         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2819         if ((pte = pmap_pte_quick(pmap, va)) == NULL || *pte == 0)
 2820                 return;
 2821         pmap_remove_pte(pmap, pte, va, free);
 2822         pmap_invalidate_page(pmap, va);
 2823 }
 2824 
 2825 /*
 2826  *      Remove the given range of addresses from the specified map.
 2827  *
 2828  *      It is assumed that the start and end are properly
 2829  *      rounded to the page size.
 2830  */
 2831 void
 2832 pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
 2833 {
 2834         vm_offset_t pdnxt;
 2835         pd_entry_t ptpaddr;
 2836         pt_entry_t *pte;
 2837         vm_page_t free = NULL;
 2838         int anyvalid;
 2839 
 2840         /*
 2841          * Perform an unsynchronized read.  This is, however, safe.
 2842          */
 2843         if (pmap->pm_stats.resident_count == 0)
 2844                 return;
 2845 
 2846         anyvalid = 0;
 2847 
 2848         vm_page_lock_queues();
 2849         sched_pin();
 2850         PMAP_LOCK(pmap);
 2851 
 2852         /*
 2853          * special handling of removing one page.  a very
 2854          * common operation and easy to short circuit some
 2855          * code.
 2856          */
 2857         if ((sva + PAGE_SIZE == eva) && 
 2858             ((pmap->pm_pdir[(sva >> PDRSHIFT)] & PG_PS) == 0)) {
 2859                 pmap_remove_page(pmap, sva, &free);
 2860                 goto out;
 2861         }
 2862 
 2863         for (; sva < eva; sva = pdnxt) {
 2864                 unsigned pdirindex;
 2865 
 2866                 /*
 2867                  * Calculate index for next page table.
 2868                  */
 2869                 pdnxt = (sva + NBPDR) & ~PDRMASK;
 2870                 if (pdnxt < sva)
 2871                         pdnxt = eva;
 2872                 if (pmap->pm_stats.resident_count == 0)
 2873                         break;
 2874 
 2875                 pdirindex = sva >> PDRSHIFT;
 2876                 ptpaddr = pmap->pm_pdir[pdirindex];
 2877 
 2878                 /*
 2879                  * Weed out invalid mappings. Note: we assume that the page
 2880                  * directory table is always allocated, and in kernel virtual.
 2881                  */
 2882                 if (ptpaddr == 0)
 2883                         continue;
 2884 
 2885                 /*
 2886                  * Check for large page.
 2887                  */
 2888                 if ((ptpaddr & PG_PS) != 0) {
 2889                         /*
 2890                          * Are we removing the entire large page?  If not,
 2891                          * demote the mapping and fall through.
 2892                          */
 2893                         if (sva + NBPDR == pdnxt && eva >= pdnxt) {
 2894                                 /*
 2895                                  * The TLB entry for a PG_G mapping is
 2896                                  * invalidated by pmap_remove_pde().
 2897                                  */
 2898                                 if ((ptpaddr & PG_G) == 0)
 2899                                         anyvalid = 1;
 2900                                 pmap_remove_pde(pmap,
 2901                                     &pmap->pm_pdir[pdirindex], sva, &free);
 2902                                 continue;
 2903                         } else if (!pmap_demote_pde(pmap,
 2904                             &pmap->pm_pdir[pdirindex], sva)) {
 2905                                 /* The large page mapping was destroyed. */
 2906                                 continue;
 2907                         }
 2908                 }
 2909 
 2910                 /*
 2911                  * Limit our scan to either the end of the va represented
 2912                  * by the current page table page, or to the end of the
 2913                  * range being removed.
 2914                  */
 2915                 if (pdnxt > eva)
 2916                         pdnxt = eva;
 2917 
 2918                 for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++,
 2919                     sva += PAGE_SIZE) {
 2920                         if (*pte == 0)
 2921                                 continue;
 2922 
 2923                         /*
 2924                          * The TLB entry for a PG_G mapping is invalidated
 2925                          * by pmap_remove_pte().
 2926                          */
 2927                         if ((*pte & PG_G) == 0)
 2928                                 anyvalid = 1;
 2929                         if (pmap_remove_pte(pmap, pte, sva, &free))
 2930                                 break;
 2931                 }
 2932         }
 2933 out:
 2934         sched_unpin();
 2935         if (anyvalid)
 2936                 pmap_invalidate_all(pmap);
 2937         vm_page_unlock_queues();
 2938         PMAP_UNLOCK(pmap);
 2939         pmap_free_zero_pages(free);
 2940 }
 2941 
 2942 /*
 2943  *      Routine:        pmap_remove_all
 2944  *      Function:
 2945  *              Removes this physical page from
 2946  *              all physical maps in which it resides.
 2947  *              Reflects back modify bits to the pager.
 2948  *
 2949  *      Notes:
 2950  *              Original versions of this routine were very
 2951  *              inefficient because they iteratively called
 2952  *              pmap_remove (slow...)
 2953  */
 2954 
 2955 void
 2956 pmap_remove_all(vm_page_t m)
 2957 {
 2958         struct md_page *pvh;
 2959         pv_entry_t pv;
 2960         pmap_t pmap;
 2961         pt_entry_t *pte, tpte;
 2962         pd_entry_t *pde;
 2963         vm_offset_t va;
 2964         vm_page_t free;
 2965 
 2966         KASSERT((m->flags & PG_FICTITIOUS) == 0,
 2967             ("pmap_remove_all: page %p is fictitious", m));
 2968         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2969         sched_pin();
 2970         pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 2971         while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) {
 2972                 va = pv->pv_va;
 2973                 pmap = PV_PMAP(pv);
 2974                 PMAP_LOCK(pmap);
 2975                 pde = pmap_pde(pmap, va);
 2976                 (void)pmap_demote_pde(pmap, pde, va);
 2977                 PMAP_UNLOCK(pmap);
 2978         }
 2979         while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
 2980                 pmap = PV_PMAP(pv);
 2981                 PMAP_LOCK(pmap);
 2982                 pmap->pm_stats.resident_count--;
 2983                 pde = pmap_pde(pmap, pv->pv_va);
 2984                 KASSERT((*pde & PG_PS) == 0, ("pmap_remove_all: found"
 2985                     " a 4mpage in page %p's pv list", m));
 2986                 pte = pmap_pte_quick(pmap, pv->pv_va);
 2987                 tpte = pte_load_clear(pte);
 2988                 if (tpte & PG_W)
 2989                         pmap->pm_stats.wired_count--;
 2990                 if (tpte & PG_A)
 2991                         vm_page_flag_set(m, PG_REFERENCED);
 2992 
 2993                 /*
 2994                  * Update the vm_page_t clean and reference bits.
 2995                  */
 2996                 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
 2997                         vm_page_dirty(m);
 2998                 free = NULL;
 2999                 pmap_unuse_pt(pmap, pv->pv_va, &free);
 3000                 pmap_invalidate_page(pmap, pv->pv_va);
 3001                 pmap_free_zero_pages(free);
 3002                 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
 3003                 free_pv_entry(pmap, pv);
 3004                 PMAP_UNLOCK(pmap);
 3005         }
 3006         vm_page_flag_clear(m, PG_WRITEABLE);
 3007         sched_unpin();
 3008 }
 3009 
 3010 /*
 3011  * pmap_protect_pde: do the things to protect a 4mpage in a process
 3012  */
 3013 static boolean_t
 3014 pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot)
 3015 {
 3016         pd_entry_t newpde, oldpde;
 3017         vm_offset_t eva, va;
 3018         vm_page_t m;
 3019         boolean_t anychanged;
 3020 
 3021         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 3022         KASSERT((sva & PDRMASK) == 0,
 3023             ("pmap_protect_pde: sva is not 4mpage aligned"));
 3024         anychanged = FALSE;
 3025 retry:
 3026         oldpde = newpde = *pde;
 3027         if (oldpde & PG_MANAGED) {
 3028                 eva = sva + NBPDR;
 3029                 for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
 3030                     va < eva; va += PAGE_SIZE, m++) {
 3031                         /*
 3032                          * In contrast to the analogous operation on a 4KB page
 3033                          * mapping, the mapping's PG_A flag is not cleared and
 3034                          * the page's PG_REFERENCED flag is not set.  The
 3035                          * reason is that pmap_demote_pde() expects that a 2/4MB
 3036                          * page mapping with a stored page table page has PG_A
 3037                          * set.
 3038                          */
 3039                         if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW))
 3040                                 vm_page_dirty(m);
 3041                 }
 3042         }
 3043         if ((prot & VM_PROT_WRITE) == 0)
 3044                 newpde &= ~(PG_RW | PG_M);
 3045 #ifdef PAE
 3046         if ((prot & VM_PROT_EXECUTE) == 0)
 3047                 newpde |= pg_nx;
 3048 #endif
 3049         if (newpde != oldpde) {
 3050                 if (!pde_cmpset(pde, oldpde, newpde))
 3051                         goto retry;
 3052                 if (oldpde & PG_G)
 3053                         pmap_invalidate_page(pmap, sva);
 3054                 else
 3055                         anychanged = TRUE;
 3056         }
 3057         return (anychanged);
 3058 }
 3059 
 3060 /*
 3061  *      Set the physical protection on the
 3062  *      specified range of this map as requested.
 3063  */
 3064 void
 3065 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
 3066 {
 3067         vm_offset_t pdnxt;
 3068         pd_entry_t ptpaddr;
 3069         pt_entry_t *pte;
 3070         int anychanged;
 3071 
 3072         if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
 3073                 pmap_remove(pmap, sva, eva);
 3074                 return;
 3075         }
 3076 
 3077 #ifdef PAE
 3078         if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) ==
 3079             (VM_PROT_WRITE|VM_PROT_EXECUTE))
 3080                 return;
 3081 #else
 3082         if (prot & VM_PROT_WRITE)
 3083                 return;
 3084 #endif
 3085 
 3086         anychanged = 0;
 3087 
 3088         vm_page_lock_queues();
 3089         sched_pin();
 3090         PMAP_LOCK(pmap);
 3091         for (; sva < eva; sva = pdnxt) {
 3092                 pt_entry_t obits, pbits;
 3093                 unsigned pdirindex;
 3094 
 3095                 pdnxt = (sva + NBPDR) & ~PDRMASK;
 3096                 if (pdnxt < sva)
 3097                         pdnxt = eva;
 3098 
 3099                 pdirindex = sva >> PDRSHIFT;
 3100                 ptpaddr = pmap->pm_pdir[pdirindex];
 3101 
 3102                 /*
 3103                  * Weed out invalid mappings. Note: we assume that the page
 3104                  * directory table is always allocated, and in kernel virtual.
 3105                  */
 3106                 if (ptpaddr == 0)
 3107                         continue;
 3108 
 3109                 /*
 3110                  * Check for large page.
 3111                  */
 3112                 if ((ptpaddr & PG_PS) != 0) {
 3113                         /*
 3114                          * Are we protecting the entire large page?  If not,
 3115                          * demote the mapping and fall through.
 3116                          */
 3117                         if (sva + NBPDR == pdnxt && eva >= pdnxt) {
 3118                                 /*
 3119                                  * The TLB entry for a PG_G mapping is
 3120                                  * invalidated by pmap_protect_pde().
 3121                                  */
 3122                                 if (pmap_protect_pde(pmap,
 3123                                     &pmap->pm_pdir[pdirindex], sva, prot))
 3124                                         anychanged = 1;
 3125                                 continue;
 3126                         } else if (!pmap_demote_pde(pmap,
 3127                             &pmap->pm_pdir[pdirindex], sva)) {
 3128                                 /* The large page mapping was destroyed. */
 3129                                 continue;
 3130                         }
 3131                 }
 3132 
 3133                 if (pdnxt > eva)
 3134                         pdnxt = eva;
 3135 
 3136                 for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++,
 3137                     sva += PAGE_SIZE) {
 3138                         vm_page_t m;
 3139 
 3140 retry:
 3141                         /*
 3142                          * Regardless of whether a pte is 32 or 64 bits in
 3143                          * size, PG_RW, PG_A, and PG_M are among the least
 3144                          * significant 32 bits.
 3145                          */
 3146                         obits = pbits = *pte;
 3147                         if ((pbits & PG_V) == 0)
 3148                                 continue;
 3149                         if (pbits & PG_MANAGED) {
 3150                                 m = NULL;
 3151                                 if (pbits & PG_A) {
 3152                                         m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
 3153                                         vm_page_flag_set(m, PG_REFERENCED);
 3154                                         pbits &= ~PG_A;
 3155                                 }
 3156                                 if ((pbits & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
 3157                                         if (m == NULL)
 3158                                                 m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
 3159                                         vm_page_dirty(m);
 3160                                 }
 3161                         }
 3162 
 3163                         if ((prot & VM_PROT_WRITE) == 0)
 3164                                 pbits &= ~(PG_RW | PG_M);
 3165 #ifdef PAE
 3166                         if ((prot & VM_PROT_EXECUTE) == 0)
 3167                                 pbits |= pg_nx;
 3168 #endif
 3169 
 3170                         if (pbits != obits) {
 3171 #ifdef PAE
 3172                                 if (!atomic_cmpset_64(pte, obits, pbits))
 3173                                         goto retry;
 3174 #else
 3175                                 if (!atomic_cmpset_int((u_int *)pte, obits,
 3176                                     pbits))
 3177                                         goto retry;
 3178 #endif
 3179                                 if (obits & PG_G)
 3180                                         pmap_invalidate_page(pmap, sva);
 3181                                 else
 3182                                         anychanged = 1;
 3183                         }
 3184                 }
 3185         }
 3186         sched_unpin();
 3187         if (anychanged)
 3188                 pmap_invalidate_all(pmap);
 3189         vm_page_unlock_queues();
 3190         PMAP_UNLOCK(pmap);
 3191 }
 3192 
 3193 /*
 3194  * Tries to promote the 512 or 1024, contiguous 4KB page mappings that are
 3195  * within a single page table page (PTP) to a single 2- or 4MB page mapping.
 3196  * For promotion to occur, two conditions must be met: (1) the 4KB page
 3197  * mappings must map aligned, contiguous physical memory and (2) the 4KB page
 3198  * mappings must have identical characteristics.
 3199  *
 3200  * Managed (PG_MANAGED) mappings within the kernel address space are not
 3201  * promoted.  The reason is that kernel PDEs are replicated in each pmap but
 3202  * pmap_clear_ptes() and pmap_ts_referenced() only read the PDE from the kernel
 3203  * pmap.
 3204  */
 3205 static void
 3206 pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
 3207 {
 3208         pd_entry_t newpde;
 3209         pt_entry_t *firstpte, oldpte, pa, *pte;
 3210         vm_offset_t oldpteva;
 3211         vm_page_t mpte;
 3212 
 3213         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 3214 
 3215         /*
 3216          * Examine the first PTE in the specified PTP.  Abort if this PTE is
 3217          * either invalid, unused, or does not map the first 4KB physical page
 3218          * within a 2- or 4MB page.
 3219          */
 3220         firstpte = pmap_pte_quick(pmap, trunc_4mpage(va));
 3221 setpde:
 3222         newpde = *firstpte;
 3223         if ((newpde & ((PG_FRAME & PDRMASK) | PG_A | PG_V)) != (PG_A | PG_V)) {
 3224                 pmap_pde_p_failures++;
 3225                 CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x"
 3226                     " in pmap %p", va, pmap);
 3227                 return;
 3228         }
 3229         if ((*firstpte & PG_MANAGED) != 0 && pmap == kernel_pmap) {
 3230                 pmap_pde_p_failures++;
 3231                 CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x"
 3232                     " in pmap %p", va, pmap);
 3233                 return;
 3234         }
 3235         if ((newpde & (PG_M | PG_RW)) == PG_RW) {
 3236                 /*
 3237                  * When PG_M is already clear, PG_RW can be cleared without
 3238                  * a TLB invalidation.
 3239                  */
 3240                 if (!atomic_cmpset_int((u_int *)firstpte, newpde, newpde &
 3241                     ~PG_RW))  
 3242                         goto setpde;
 3243                 newpde &= ~PG_RW;
 3244         }
 3245 
 3246         /* 
 3247          * Examine each of the other PTEs in the specified PTP.  Abort if this
 3248          * PTE maps an unexpected 4KB physical page or does not have identical
 3249          * characteristics to the first PTE.
 3250          */
 3251         pa = (newpde & (PG_PS_FRAME | PG_A | PG_V)) + NBPDR - PAGE_SIZE;
 3252         for (pte = firstpte + NPTEPG - 1; pte > firstpte; pte--) {
 3253 setpte:
 3254                 oldpte = *pte;
 3255                 if ((oldpte & (PG_FRAME | PG_A | PG_V)) != pa) {
 3256                         pmap_pde_p_failures++;
 3257                         CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x"
 3258                             " in pmap %p", va, pmap);
 3259                         return;
 3260                 }
 3261                 if ((oldpte & (PG_M | PG_RW)) == PG_RW) {
 3262                         /*
 3263                          * When PG_M is already clear, PG_RW can be cleared
 3264                          * without a TLB invalidation.
 3265                          */
 3266                         if (!atomic_cmpset_int((u_int *)pte, oldpte,
 3267                             oldpte & ~PG_RW))
 3268                                 goto setpte;
 3269                         oldpte &= ~PG_RW;
 3270                         oldpteva = (oldpte & PG_FRAME & PDRMASK) |
 3271                             (va & ~PDRMASK);
 3272                         CTR2(KTR_PMAP, "pmap_promote_pde: protect for va %#x"
 3273                             " in pmap %p", oldpteva, pmap);
 3274                 }
 3275                 if ((oldpte & PG_PTE_PROMOTE) != (newpde & PG_PTE_PROMOTE)) {
 3276                         pmap_pde_p_failures++;
 3277                         CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x"
 3278                             " in pmap %p", va, pmap);
 3279                         return;
 3280                 }
 3281                 pa -= PAGE_SIZE;
 3282         }
 3283 
 3284         /*
 3285          * Save the page table page in its current state until the PDE
 3286          * mapping the superpage is demoted by pmap_demote_pde() or
 3287          * destroyed by pmap_remove_pde(). 
 3288          */
 3289         mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
 3290         KASSERT(mpte >= vm_page_array &&
 3291             mpte < &vm_page_array[vm_page_array_size],
 3292             ("pmap_promote_pde: page table page is out of range"));
 3293         KASSERT(mpte->pindex == va >> PDRSHIFT,
 3294             ("pmap_promote_pde: page table page's pindex is wrong"));
 3295         pmap_insert_pt_page(pmap, mpte);
 3296 
 3297         /*
 3298          * Promote the pv entries.
 3299          */
 3300         if ((newpde & PG_MANAGED) != 0)
 3301                 pmap_pv_promote_pde(pmap, va, newpde & PG_PS_FRAME);
 3302 
 3303         /*
 3304          * Propagate the PAT index to its proper position.
 3305          */
 3306         if ((newpde & PG_PTE_PAT) != 0)
 3307                 newpde ^= PG_PDE_PAT | PG_PTE_PAT;
 3308 
 3309         /*
 3310          * Map the superpage.
 3311          */
 3312         if (workaround_erratum383)
 3313                 pmap_update_pde(pmap, va, pde, PG_PS | newpde);
 3314         else if (pmap == kernel_pmap)
 3315                 pmap_kenter_pde(va, PG_PS | newpde);
 3316         else
 3317                 pde_store(pde, PG_PS | newpde);
 3318 
 3319         pmap_pde_promotions++;
 3320         CTR2(KTR_PMAP, "pmap_promote_pde: success for va %#x"
 3321             " in pmap %p", va, pmap);
 3322 }
 3323 
 3324 /*
 3325  *      Insert the given physical page (p) at
 3326  *      the specified virtual address (v) in the
 3327  *      target physical map with the protection requested.
 3328  *
 3329  *      If specified, the page will be wired down, meaning
 3330  *      that the related pte can not be reclaimed.
 3331  *
 3332  *      NB:  This is the only routine which MAY NOT lazy-evaluate
 3333  *      or lose information.  That is, this routine must actually
 3334  *      insert this page into the given map NOW.
 3335  */
 3336 void
 3337 pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
 3338     vm_prot_t prot, boolean_t wired)
 3339 {
 3340         vm_paddr_t pa;
 3341         pd_entry_t *pde;
 3342         pt_entry_t *pte;
 3343         vm_paddr_t opa;
 3344         pt_entry_t origpte, newpte;
 3345         vm_page_t mpte, om;
 3346         boolean_t invlva;
 3347 
 3348         va = trunc_page(va);
 3349         KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
 3350         KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS,
 3351             ("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)", va));
 3352 
 3353         mpte = NULL;
 3354 
 3355         vm_page_lock_queues();
 3356         PMAP_LOCK(pmap);
 3357         sched_pin();
 3358 
 3359         /*
 3360          * In the case that a page table page is not
 3361          * resident, we are creating it here.
 3362          */
 3363         if (va < VM_MAXUSER_ADDRESS) {
 3364                 mpte = pmap_allocpte(pmap, va, M_WAITOK);
 3365         }
 3366 
 3367         pde = pmap_pde(pmap, va);
 3368         if ((*pde & PG_PS) != 0)
 3369                 panic("pmap_enter: attempted pmap_enter on 4MB page");
 3370         pte = pmap_pte_quick(pmap, va);
 3371 
 3372         /*
 3373          * Page Directory table entry not valid, we need a new PT page
 3374          */
 3375         if (pte == NULL) {
 3376                 panic("pmap_enter: invalid page directory pdir=%#jx, va=%#x",
 3377                         (uintmax_t)pmap->pm_pdir[PTDPTDI], va);
 3378         }
 3379 
 3380         pa = VM_PAGE_TO_PHYS(m);
 3381         om = NULL;
 3382         origpte = *pte;
 3383         opa = origpte & PG_FRAME;
 3384 
 3385         /*
 3386          * Mapping has not changed, must be protection or wiring change.
 3387          */
 3388         if (origpte && (opa == pa)) {
 3389                 /*
 3390                  * Wiring change, just update stats. We don't worry about
 3391                  * wiring PT pages as they remain resident as long as there
 3392                  * are valid mappings in them. Hence, if a user page is wired,
 3393                  * the PT page will be also.
 3394                  */
 3395                 if (wired && ((origpte & PG_W) == 0))
 3396                         pmap->pm_stats.wired_count++;
 3397                 else if (!wired && (origpte & PG_W))
 3398                         pmap->pm_stats.wired_count--;
 3399 
 3400                 /*
 3401                  * Remove extra pte reference
 3402                  */
 3403                 if (mpte)
 3404                         mpte->wire_count--;
 3405 
 3406                 /*
 3407                  * We might be turning off write access to the page,
 3408                  * so we go ahead and sense modify status.
 3409                  */
 3410                 if (origpte & PG_MANAGED) {
 3411                         om = m;
 3412                         pa |= PG_MANAGED;
 3413                 }
 3414                 goto validate;
 3415         } 
 3416         /*
 3417          * Mapping has changed, invalidate old range and fall through to
 3418          * handle validating new mapping.
 3419          */
 3420         if (opa) {
 3421                 if (origpte & PG_W)
 3422                         pmap->pm_stats.wired_count--;
 3423                 if (origpte & PG_MANAGED) {
 3424                         om = PHYS_TO_VM_PAGE(opa);
 3425                         pmap_remove_entry(pmap, om, va);
 3426                 }
 3427                 if (mpte != NULL) {
 3428                         mpte->wire_count--;
 3429                         KASSERT(mpte->wire_count > 0,
 3430                             ("pmap_enter: missing reference to page table page,"
 3431                              " va: 0x%x", va));
 3432                 }
 3433         } else
 3434                 pmap->pm_stats.resident_count++;
 3435 
 3436         /*
 3437          * Enter on the PV list if part of our managed memory.
 3438          */
 3439         if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
 3440                 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva,
 3441                     ("pmap_enter: managed mapping within the clean submap"));
 3442                 pmap_insert_entry(pmap, va, m);
 3443                 pa |= PG_MANAGED;
 3444         }
 3445 
 3446         /*
 3447          * Increment counters
 3448          */
 3449         if (wired)
 3450                 pmap->pm_stats.wired_count++;
 3451 
 3452 validate:
 3453         /*
 3454          * Now validate mapping with desired protection/wiring.
 3455          */
 3456         newpte = (pt_entry_t)(pa | pmap_cache_bits(m->md.pat_mode, 0) | PG_V);
 3457         if ((prot & VM_PROT_WRITE) != 0) {
 3458                 newpte |= PG_RW;
 3459                 vm_page_flag_set(m, PG_WRITEABLE);
 3460         }
 3461 #ifdef PAE
 3462         if ((prot & VM_PROT_EXECUTE) == 0)
 3463                 newpte |= pg_nx;
 3464 #endif
 3465         if (wired)
 3466                 newpte |= PG_W;
 3467         if (va < VM_MAXUSER_ADDRESS)
 3468                 newpte |= PG_U;
 3469         if (pmap == kernel_pmap)
 3470                 newpte |= pgeflag;
 3471 
 3472         /*
 3473          * if the mapping or permission bits are different, we need
 3474          * to update the pte.
 3475          */
 3476         if ((origpte & ~(PG_M|PG_A)) != newpte) {
 3477                 newpte |= PG_A;
 3478                 if ((access & VM_PROT_WRITE) != 0)
 3479                         newpte |= PG_M;
 3480                 if (origpte & PG_V) {
 3481                         invlva = FALSE;
 3482                         origpte = pte_load_store(pte, newpte);
 3483                         if (origpte & PG_A) {
 3484                                 if (origpte & PG_MANAGED)
 3485                                         vm_page_flag_set(om, PG_REFERENCED);
 3486                                 if (opa != VM_PAGE_TO_PHYS(m))
 3487                                         invlva = TRUE;
 3488 #ifdef PAE
 3489                                 if ((origpte & PG_NX) == 0 &&
 3490                                     (newpte & PG_NX) != 0)
 3491                                         invlva = TRUE;
 3492 #endif
 3493                         }
 3494                         if ((origpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
 3495                                 if ((origpte & PG_MANAGED) != 0)
 3496                                         vm_page_dirty(om);
 3497                                 if ((prot & VM_PROT_WRITE) == 0)
 3498                                         invlva = TRUE;
 3499                         }
 3500                         if (invlva)
 3501                                 pmap_invalidate_page(pmap, va);
 3502                 } else
 3503                         pte_store(pte, newpte);
 3504         }
 3505 
 3506         /*
 3507          * If both the page table page and the reservation are fully
 3508          * populated, then attempt promotion.
 3509          */
 3510         if ((mpte == NULL || mpte->wire_count == NPTEPG) &&
 3511             pg_ps_enabled && vm_reserv_level_iffullpop(m) == 0)
 3512                 pmap_promote_pde(pmap, pde, va);
 3513 
 3514         sched_unpin();
 3515         vm_page_unlock_queues();
 3516         PMAP_UNLOCK(pmap);
 3517 }
 3518 
 3519 /*
 3520  * Tries to create a 2- or 4MB page mapping.  Returns TRUE if successful and
 3521  * FALSE otherwise.  Fails if (1) a page table page cannot be allocated without
 3522  * blocking, (2) a mapping already exists at the specified virtual address, or
 3523  * (3) a pv entry cannot be allocated without reclaiming another pv entry. 
 3524  */
 3525 static boolean_t
 3526 pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
 3527 {
 3528         pd_entry_t *pde, newpde;
 3529 
 3530         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 3531         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 3532         pde = pmap_pde(pmap, va);
 3533         if (*pde != 0) {
 3534                 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
 3535                     " in pmap %p", va, pmap);
 3536                 return (FALSE);
 3537         }
 3538         newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 1) |
 3539             PG_PS | PG_V;
 3540         if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
 3541                 newpde |= PG_MANAGED;
 3542 
 3543                 /*
 3544                  * Abort this mapping if its PV entry could not be created.
 3545                  */
 3546                 if (!pmap_pv_insert_pde(pmap, va, VM_PAGE_TO_PHYS(m))) {
 3547                         CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
 3548                             " in pmap %p", va, pmap);
 3549                         return (FALSE);
 3550                 }
 3551         }
 3552 #ifdef PAE
 3553         if ((prot & VM_PROT_EXECUTE) == 0)
 3554                 newpde |= pg_nx;
 3555 #endif
 3556         if (va < VM_MAXUSER_ADDRESS)
 3557                 newpde |= PG_U;
 3558 
 3559         /*
 3560          * Increment counters.
 3561          */
 3562         pmap->pm_stats.resident_count += NBPDR / PAGE_SIZE;
 3563 
 3564         /*
 3565          * Map the superpage.
 3566          */
 3567         pde_store(pde, newpde);
 3568 
 3569         pmap_pde_mappings++;
 3570         CTR2(KTR_PMAP, "pmap_enter_pde: success for va %#lx"
 3571             " in pmap %p", va, pmap);
 3572         return (TRUE);
 3573 }
 3574 
 3575 /*
 3576  * Maps a sequence of resident pages belonging to the same object.
 3577  * The sequence begins with the given page m_start.  This page is
 3578  * mapped at the given virtual address start.  Each subsequent page is
 3579  * mapped at a virtual address that is offset from start by the same
 3580  * amount as the page is offset from m_start within the object.  The
 3581  * last page in the sequence is the page with the largest offset from
 3582  * m_start that can be mapped at a virtual address less than the given
 3583  * virtual address end.  Not every virtual page between start and end
 3584  * is mapped; only those for which a resident page exists with the
 3585  * corresponding offset from m_start are mapped.
 3586  */
 3587 void
 3588 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
 3589     vm_page_t m_start, vm_prot_t prot)
 3590 {
 3591         vm_offset_t va;
 3592         vm_page_t m, mpte;
 3593         vm_pindex_t diff, psize;
 3594 
 3595         VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED);
 3596         psize = atop(end - start);
 3597         mpte = NULL;
 3598         m = m_start;
 3599         PMAP_LOCK(pmap);
 3600         while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
 3601                 va = start + ptoa(diff);
 3602                 if ((va & PDRMASK) == 0 && va + NBPDR <= end &&
 3603                     (VM_PAGE_TO_PHYS(m) & PDRMASK) == 0 &&
 3604                     pg_ps_enabled && vm_reserv_level_iffullpop(m) == 0 &&
 3605                     pmap_enter_pde(pmap, va, m, prot))
 3606                         m = &m[NBPDR / PAGE_SIZE - 1];
 3607                 else
 3608                         mpte = pmap_enter_quick_locked(pmap, va, m, prot,
 3609                             mpte);
 3610                 m = TAILQ_NEXT(m, listq);
 3611         }
 3612         PMAP_UNLOCK(pmap);
 3613 }
 3614 
 3615 /*
 3616  * this code makes some *MAJOR* assumptions:
 3617  * 1. Current pmap & pmap exists.
 3618  * 2. Not wired.
 3619  * 3. Read access.
 3620  * 4. No page table pages.
 3621  * but is *MUCH* faster than pmap_enter...
 3622  */
 3623 
 3624 void
 3625 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
 3626 {
 3627 
 3628         PMAP_LOCK(pmap);
 3629         (void) pmap_enter_quick_locked(pmap, va, m, prot, NULL);
 3630         PMAP_UNLOCK(pmap);
 3631 }
 3632 
 3633 static vm_page_t
 3634 pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
 3635     vm_prot_t prot, vm_page_t mpte)
 3636 {
 3637         pt_entry_t *pte;
 3638         vm_paddr_t pa;
 3639         vm_page_t free;
 3640 
 3641         KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
 3642             (m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0,
 3643             ("pmap_enter_quick_locked: managed mapping within the clean submap"));
 3644         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 3645         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 3646 
 3647         /*
 3648          * In the case that a page table page is not
 3649          * resident, we are creating it here.
 3650          */
 3651         if (va < VM_MAXUSER_ADDRESS) {
 3652                 unsigned ptepindex;
 3653                 pd_entry_t ptepa;
 3654 
 3655                 /*
 3656                  * Calculate pagetable page index
 3657                  */
 3658                 ptepindex = va >> PDRSHIFT;
 3659                 if (mpte && (mpte->pindex == ptepindex)) {
 3660                         mpte->wire_count++;
 3661                 } else {
 3662                         /*
 3663                          * Get the page directory entry
 3664                          */
 3665                         ptepa = pmap->pm_pdir[ptepindex];
 3666 
 3667                         /*
 3668                          * If the page table page is mapped, we just increment
 3669                          * the hold count, and activate it.
 3670                          */
 3671                         if (ptepa) {
 3672                                 if (ptepa & PG_PS)
 3673                                         return (NULL);
 3674                                 mpte = PHYS_TO_VM_PAGE(ptepa & PG_FRAME);
 3675                                 mpte->wire_count++;
 3676                         } else {
 3677                                 mpte = _pmap_allocpte(pmap, ptepindex,
 3678                                     M_NOWAIT);
 3679                                 if (mpte == NULL)
 3680                                         return (mpte);
 3681                         }
 3682                 }
 3683         } else {
 3684                 mpte = NULL;
 3685         }
 3686 
 3687         /*
 3688          * This call to vtopte makes the assumption that we are
 3689          * entering the page into the current pmap.  In order to support
 3690          * quick entry into any pmap, one would likely use pmap_pte_quick.
 3691          * But that isn't as quick as vtopte.
 3692          */
 3693         pte = vtopte(va);
 3694         if (*pte) {
 3695                 if (mpte != NULL) {
 3696                         mpte->wire_count--;
 3697                         mpte = NULL;
 3698                 }
 3699                 return (mpte);
 3700         }
 3701 
 3702         /*
 3703          * Enter on the PV list if part of our managed memory.
 3704          */
 3705         if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0 &&
 3706             !pmap_try_insert_pv_entry(pmap, va, m)) {
 3707                 if (mpte != NULL) {
 3708                         free = NULL;
 3709                         if (pmap_unwire_pte_hold(pmap, mpte, &free)) {
 3710                                 pmap_invalidate_page(pmap, va);
 3711                                 pmap_free_zero_pages(free);
 3712                         }
 3713                         
 3714                         mpte = NULL;
 3715                 }
 3716                 return (mpte);
 3717         }
 3718 
 3719         /*
 3720          * Increment counters
 3721          */
 3722         pmap->pm_stats.resident_count++;
 3723 
 3724         pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 0);
 3725 #ifdef PAE
 3726         if ((prot & VM_PROT_EXECUTE) == 0)
 3727                 pa |= pg_nx;
 3728 #endif
 3729 
 3730         /*
 3731          * Now validate mapping with RO protection
 3732          */
 3733         if (m->flags & (PG_FICTITIOUS|PG_UNMANAGED))
 3734                 pte_store(pte, pa | PG_V | PG_U);
 3735         else
 3736                 pte_store(pte, pa | PG_V | PG_U | PG_MANAGED);
 3737         return mpte;
 3738 }
 3739 
 3740 /*
 3741  * Make a temporary mapping for a physical address.  This is only intended
 3742  * to be used for panic dumps.
 3743  */
 3744 void *
 3745 pmap_kenter_temporary(vm_paddr_t pa, int i)
 3746 {
 3747         vm_offset_t va;
 3748 
 3749         va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE);
 3750         pmap_kenter(va, pa);
 3751         invlpg(va);
 3752         return ((void *)crashdumpmap);
 3753 }
 3754 
 3755 /*
 3756  * This code maps large physical mmap regions into the
 3757  * processor address space.  Note that some shortcuts
 3758  * are taken, but the code works.
 3759  */
 3760 void
 3761 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
 3762     vm_pindex_t pindex, vm_size_t size)
 3763 {
 3764         pd_entry_t *pde;
 3765         vm_paddr_t pa, ptepa;
 3766         vm_page_t p;
 3767         int pat_mode;
 3768 
 3769         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
 3770         KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
 3771             ("pmap_object_init_pt: non-device object"));
 3772         if (pseflag && 
 3773             (addr & (NBPDR - 1)) == 0 && (size & (NBPDR - 1)) == 0) {
 3774                 if (!vm_object_populate(object, pindex, pindex + atop(size)))
 3775                         return;
 3776                 p = vm_page_lookup(object, pindex);
 3777                 KASSERT(p->valid == VM_PAGE_BITS_ALL,
 3778                     ("pmap_object_init_pt: invalid page %p", p));
 3779                 pat_mode = p->md.pat_mode;
 3780 
 3781                 /*
 3782                  * Abort the mapping if the first page is not physically
 3783                  * aligned to a 2/4MB page boundary.
 3784                  */
 3785                 ptepa = VM_PAGE_TO_PHYS(p);
 3786                 if (ptepa & (NBPDR - 1))
 3787                         return;
 3788 
 3789                 /*
 3790                  * Skip the first page.  Abort the mapping if the rest of
 3791                  * the pages are not physically contiguous or have differing
 3792                  * memory attributes.
 3793                  */
 3794                 p = TAILQ_NEXT(p, listq);
 3795                 for (pa = ptepa + PAGE_SIZE; pa < ptepa + size;
 3796                     pa += PAGE_SIZE) {
 3797                         KASSERT(p->valid == VM_PAGE_BITS_ALL,
 3798                             ("pmap_object_init_pt: invalid page %p", p));
 3799                         if (pa != VM_PAGE_TO_PHYS(p) ||
 3800                             pat_mode != p->md.pat_mode)
 3801                                 return;
 3802                         p = TAILQ_NEXT(p, listq);
 3803                 }
 3804 
 3805                 /*
 3806                  * Map using 2/4MB pages.  Since "ptepa" is 2/4M aligned and
 3807                  * "size" is a multiple of 2/4M, adding the PAT setting to
 3808                  * "pa" will not affect the termination of this loop.
 3809                  */
 3810                 PMAP_LOCK(pmap);
 3811                 for (pa = ptepa | pmap_cache_bits(pat_mode, 1); pa < ptepa +
 3812                     size; pa += NBPDR) {
 3813                         pde = pmap_pde(pmap, addr);
 3814                         if (*pde == 0) {
 3815                                 pde_store(pde, pa | PG_PS | PG_M | PG_A |
 3816                                     PG_U | PG_RW | PG_V);
 3817                                 pmap->pm_stats.resident_count += NBPDR /
 3818                                     PAGE_SIZE;
 3819                                 pmap_pde_mappings++;
 3820                         }
 3821                         /* Else continue on if the PDE is already valid. */
 3822                         addr += NBPDR;
 3823                 }
 3824                 PMAP_UNLOCK(pmap);
 3825         }
 3826 }
 3827 
 3828 /*
 3829  *      Routine:        pmap_change_wiring
 3830  *      Function:       Change the wiring attribute for a map/virtual-address
 3831  *                      pair.
 3832  *      In/out conditions:
 3833  *                      The mapping must already exist in the pmap.
 3834  */
 3835 void
 3836 pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
 3837 {
 3838         pd_entry_t *pde;
 3839         pt_entry_t *pte;
 3840         boolean_t are_queues_locked;
 3841 
 3842         are_queues_locked = FALSE;
 3843 retry:
 3844         PMAP_LOCK(pmap);
 3845         pde = pmap_pde(pmap, va);
 3846         if ((*pde & PG_PS) != 0) {
 3847                 if (!wired != ((*pde & PG_W) == 0)) {
 3848                         if (!are_queues_locked) {
 3849                                 are_queues_locked = TRUE;
 3850                                 if (!mtx_trylock(&vm_page_queue_mtx)) {
 3851                                         PMAP_UNLOCK(pmap);
 3852                                         vm_page_lock_queues();
 3853                                         goto retry;
 3854                                 }
 3855                         }
 3856                         if (!pmap_demote_pde(pmap, pde, va))
 3857                                 panic("pmap_change_wiring: demotion failed");
 3858                 } else
 3859                         goto out;
 3860         }
 3861         pte = pmap_pte(pmap, va);
 3862 
 3863         if (wired && !pmap_pte_w(pte))
 3864                 pmap->pm_stats.wired_count++;
 3865         else if (!wired && pmap_pte_w(pte))
 3866                 pmap->pm_stats.wired_count--;
 3867 
 3868         /*
 3869          * Wiring is not a hardware characteristic so there is no need to
 3870          * invalidate TLB.
 3871          */
 3872         pmap_pte_set_w(pte, wired);
 3873         pmap_pte_release(pte);
 3874 out:
 3875         if (are_queues_locked)
 3876                 vm_page_unlock_queues();
 3877         PMAP_UNLOCK(pmap);
 3878 }
 3879 
 3880 
 3881 
 3882 /*
 3883  *      Copy the range specified by src_addr/len
 3884  *      from the source map to the range dst_addr/len
 3885  *      in the destination map.
 3886  *
 3887  *      This routine is only advisory and need not do anything.
 3888  */
 3889 
 3890 void
 3891 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
 3892     vm_offset_t src_addr)
 3893 {
 3894         vm_page_t   free;
 3895         vm_offset_t addr;
 3896         vm_offset_t end_addr = src_addr + len;
 3897         vm_offset_t pdnxt;
 3898 
 3899         if (dst_addr != src_addr)
 3900                 return;
 3901 
 3902         if (!pmap_is_current(src_pmap))
 3903                 return;
 3904 
 3905         vm_page_lock_queues();
 3906         if (dst_pmap < src_pmap) {
 3907                 PMAP_LOCK(dst_pmap);
 3908                 PMAP_LOCK(src_pmap);
 3909         } else {
 3910                 PMAP_LOCK(src_pmap);
 3911                 PMAP_LOCK(dst_pmap);
 3912         }
 3913         sched_pin();
 3914         for (addr = src_addr; addr < end_addr; addr = pdnxt) {
 3915                 pt_entry_t *src_pte, *dst_pte;
 3916                 vm_page_t dstmpte, srcmpte;
 3917                 pd_entry_t srcptepaddr;
 3918                 unsigned ptepindex;
 3919 
 3920                 KASSERT(addr < UPT_MIN_ADDRESS,
 3921                     ("pmap_copy: invalid to pmap_copy page tables"));
 3922 
 3923                 pdnxt = (addr + NBPDR) & ~PDRMASK;
 3924                 if (pdnxt < addr)
 3925                         pdnxt = end_addr;
 3926                 ptepindex = addr >> PDRSHIFT;
 3927 
 3928                 srcptepaddr = src_pmap->pm_pdir[ptepindex];
 3929                 if (srcptepaddr == 0)
 3930                         continue;
 3931                         
 3932                 if (srcptepaddr & PG_PS) {
 3933                         if (dst_pmap->pm_pdir[ptepindex] == 0 &&
 3934                             ((srcptepaddr & PG_MANAGED) == 0 ||
 3935                             pmap_pv_insert_pde(dst_pmap, addr, srcptepaddr &
 3936                             PG_PS_FRAME))) {
 3937                                 dst_pmap->pm_pdir[ptepindex] = srcptepaddr &
 3938                                     ~PG_W;
 3939                                 dst_pmap->pm_stats.resident_count +=
 3940                                     NBPDR / PAGE_SIZE;
 3941                         }
 3942                         continue;
 3943                 }
 3944 
 3945                 srcmpte = PHYS_TO_VM_PAGE(srcptepaddr & PG_FRAME);
 3946                 KASSERT(srcmpte->wire_count > 0,
 3947                     ("pmap_copy: source page table page is unused"));
 3948 
 3949                 if (pdnxt > end_addr)
 3950                         pdnxt = end_addr;
 3951 
 3952                 src_pte = vtopte(addr);
 3953                 while (addr < pdnxt) {
 3954                         pt_entry_t ptetemp;
 3955                         ptetemp = *src_pte;
 3956                         /*
 3957                          * we only virtual copy managed pages
 3958                          */
 3959                         if ((ptetemp & PG_MANAGED) != 0) {
 3960                                 dstmpte = pmap_allocpte(dst_pmap, addr,
 3961                                     M_NOWAIT);
 3962                                 if (dstmpte == NULL)
 3963                                         goto out;
 3964                                 dst_pte = pmap_pte_quick(dst_pmap, addr);
 3965                                 if (*dst_pte == 0 &&
 3966                                     pmap_try_insert_pv_entry(dst_pmap, addr,
 3967                                     PHYS_TO_VM_PAGE(ptetemp & PG_FRAME))) {
 3968                                         /*
 3969                                          * Clear the wired, modified, and
 3970                                          * accessed (referenced) bits
 3971                                          * during the copy.
 3972                                          */
 3973                                         *dst_pte = ptetemp & ~(PG_W | PG_M |
 3974                                             PG_A);
 3975                                         dst_pmap->pm_stats.resident_count++;
 3976                                 } else {
 3977                                         free = NULL;
 3978                                         if (pmap_unwire_pte_hold(dst_pmap,
 3979                                             dstmpte, &free)) {
 3980                                                 pmap_invalidate_page(dst_pmap,
 3981                                                     addr);
 3982                                                 pmap_free_zero_pages(free);
 3983                                         }
 3984                                         goto out;
 3985                                 }
 3986                                 if (dstmpte->wire_count >= srcmpte->wire_count)
 3987                                         break;
 3988                         }
 3989                         addr += PAGE_SIZE;
 3990                         src_pte++;
 3991                 }
 3992         }
 3993 out:
 3994         sched_unpin();
 3995         vm_page_unlock_queues();
 3996         PMAP_UNLOCK(src_pmap);
 3997         PMAP_UNLOCK(dst_pmap);
 3998 }       
 3999 
 4000 static __inline void
 4001 pagezero(void *page)
 4002 {
 4003 #if defined(I686_CPU)
 4004         if (cpu_class == CPUCLASS_686) {
 4005 #if defined(CPU_ENABLE_SSE)
 4006                 if (cpu_feature & CPUID_SSE2)
 4007                         sse2_pagezero(page);
 4008                 else
 4009 #endif
 4010                         i686_pagezero(page);
 4011         } else
 4012 #endif
 4013                 bzero(page, PAGE_SIZE);
 4014 }
 4015 
 4016 /*
 4017  *      pmap_zero_page zeros the specified hardware page by mapping 
 4018  *      the page into KVM and using bzero to clear its contents.
 4019  */
 4020 void
 4021 pmap_zero_page(vm_page_t m)
 4022 {
 4023         struct sysmaps *sysmaps;
 4024 
 4025         sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)];
 4026         mtx_lock(&sysmaps->lock);
 4027         if (*sysmaps->CMAP2)
 4028                 panic("pmap_zero_page: CMAP2 busy");
 4029         sched_pin();
 4030         *sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M |
 4031             pmap_cache_bits(m->md.pat_mode, 0);
 4032         invlcaddr(sysmaps->CADDR2);
 4033         pagezero(sysmaps->CADDR2);
 4034         *sysmaps->CMAP2 = 0;
 4035         sched_unpin();
 4036         mtx_unlock(&sysmaps->lock);
 4037 }
 4038 
 4039 /*
 4040  *      pmap_zero_page_area zeros the specified hardware page by mapping 
 4041  *      the page into KVM and using bzero to clear its contents.
 4042  *
 4043  *      off and size may not cover an area beyond a single hardware page.
 4044  */
 4045 void
 4046 pmap_zero_page_area(vm_page_t m, int off, int size)
 4047 {
 4048         struct sysmaps *sysmaps;
 4049 
 4050         sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)];
 4051         mtx_lock(&sysmaps->lock);
 4052         if (*sysmaps->CMAP2)
 4053                 panic("pmap_zero_page_area: CMAP2 busy");
 4054         sched_pin();
 4055         *sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M |
 4056             pmap_cache_bits(m->md.pat_mode, 0);
 4057         invlcaddr(sysmaps->CADDR2);
 4058         if (off == 0 && size == PAGE_SIZE) 
 4059                 pagezero(sysmaps->CADDR2);
 4060         else
 4061                 bzero((char *)sysmaps->CADDR2 + off, size);
 4062         *sysmaps->CMAP2 = 0;
 4063         sched_unpin();
 4064         mtx_unlock(&sysmaps->lock);
 4065 }
 4066 
 4067 /*
 4068  *      pmap_zero_page_idle zeros the specified hardware page by mapping 
 4069  *      the page into KVM and using bzero to clear its contents.  This
 4070  *      is intended to be called from the vm_pagezero process only and
 4071  *      outside of Giant.
 4072  */
 4073 void
 4074 pmap_zero_page_idle(vm_page_t m)
 4075 {
 4076 
 4077         if (*CMAP3)
 4078                 panic("pmap_zero_page_idle: CMAP3 busy");
 4079         sched_pin();
 4080         *CMAP3 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M |
 4081             pmap_cache_bits(m->md.pat_mode, 0);
 4082         invlcaddr(CADDR3);
 4083         pagezero(CADDR3);
 4084         *CMAP3 = 0;
 4085         sched_unpin();
 4086 }
 4087 
 4088 /*
 4089  *      pmap_copy_page copies the specified (machine independent)
 4090  *      page by mapping the page into virtual memory and using
 4091  *      bcopy to copy the page, one machine dependent page at a
 4092  *      time.
 4093  */
 4094 void
 4095 pmap_copy_page(vm_page_t src, vm_page_t dst)
 4096 {
 4097         struct sysmaps *sysmaps;
 4098 
 4099         sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)];
 4100         mtx_lock(&sysmaps->lock);
 4101         if (*sysmaps->CMAP1)
 4102                 panic("pmap_copy_page: CMAP1 busy");
 4103         if (*sysmaps->CMAP2)
 4104                 panic("pmap_copy_page: CMAP2 busy");
 4105         sched_pin();
 4106         *sysmaps->CMAP1 = PG_V | VM_PAGE_TO_PHYS(src) | PG_A |
 4107             pmap_cache_bits(src->md.pat_mode, 0);
 4108         invlcaddr(sysmaps->CADDR1);
 4109         *sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(dst) | PG_A | PG_M |
 4110             pmap_cache_bits(dst->md.pat_mode, 0);
 4111         invlcaddr(sysmaps->CADDR2);
 4112         bcopy(sysmaps->CADDR1, sysmaps->CADDR2, PAGE_SIZE);
 4113         *sysmaps->CMAP1 = 0;
 4114         *sysmaps->CMAP2 = 0;
 4115         sched_unpin();
 4116         mtx_unlock(&sysmaps->lock);
 4117 }
 4118 
 4119 /*
 4120  * Returns true if the pmap's pv is one of the first
 4121  * 16 pvs linked to from this page.  This count may
 4122  * be changed upwards or downwards in the future; it
 4123  * is only necessary that true be returned for a small
 4124  * subset of pmaps for proper page aging.
 4125  */
 4126 boolean_t
 4127 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
 4128 {
 4129         struct md_page *pvh;
 4130         pv_entry_t pv;
 4131         int loops = 0;
 4132 
 4133         if (m->flags & PG_FICTITIOUS)
 4134                 return FALSE;
 4135 
 4136         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 4137         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
 4138                 if (PV_PMAP(pv) == pmap) {
 4139                         return TRUE;
 4140                 }
 4141                 loops++;
 4142                 if (loops >= 16)
 4143                         break;
 4144         }
 4145         if (loops < 16) {
 4146                 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 4147                 TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
 4148                         if (PV_PMAP(pv) == pmap)
 4149                                 return (TRUE);
 4150                         loops++;
 4151                         if (loops >= 16)
 4152                                 break;
 4153                 }
 4154         }
 4155         return (FALSE);
 4156 }
 4157 
 4158 /*
 4159  *      pmap_page_wired_mappings:
 4160  *
 4161  *      Return the number of managed mappings to the given physical page
 4162  *      that are wired.
 4163  */
 4164 int
 4165 pmap_page_wired_mappings(vm_page_t m)
 4166 {
 4167         int count;
 4168 
 4169         count = 0;
 4170         if ((m->flags & PG_FICTITIOUS) != 0)
 4171                 return (count);
 4172         count = pmap_pvh_wired_mappings(&m->md, count);
 4173         return (pmap_pvh_wired_mappings(pa_to_pvh(VM_PAGE_TO_PHYS(m)), count));
 4174 }
 4175 
 4176 /*
 4177  *      pmap_pvh_wired_mappings:
 4178  *
 4179  *      Return the updated number "count" of managed mappings that are wired.
 4180  */
 4181 static int
 4182 pmap_pvh_wired_mappings(struct md_page *pvh, int count)
 4183 {
 4184         pmap_t pmap;
 4185         pt_entry_t *pte;
 4186         pv_entry_t pv;
 4187 
 4188         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 4189         sched_pin();
 4190         TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
 4191                 pmap = PV_PMAP(pv);
 4192                 PMAP_LOCK(pmap);
 4193                 pte = pmap_pte_quick(pmap, pv->pv_va);
 4194                 if ((*pte & PG_W) != 0)
 4195                         count++;
 4196                 PMAP_UNLOCK(pmap);
 4197         }
 4198         sched_unpin();
 4199         return (count);
 4200 }
 4201 
 4202 /*
 4203  * Returns TRUE if the given page is mapped individually or as part of
 4204  * a 4mpage.  Otherwise, returns FALSE.
 4205  */
 4206 boolean_t
 4207 pmap_page_is_mapped(vm_page_t m)
 4208 {
 4209         struct md_page *pvh;
 4210 
 4211         if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
 4212                 return (FALSE);
 4213         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 4214         if (TAILQ_EMPTY(&m->md.pv_list)) {
 4215                 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 4216                 return (!TAILQ_EMPTY(&pvh->pv_list));
 4217         } else
 4218                 return (TRUE);
 4219 }
 4220 
 4221 /*
 4222  * Remove all pages from specified address space
 4223  * this aids process exit speeds.  Also, this code
 4224  * is special cased for current process only, but
 4225  * can have the more generic (and slightly slower)
 4226  * mode enabled.  This is much faster than pmap_remove
 4227  * in the case of running down an entire address space.
 4228  */
 4229 void
 4230 pmap_remove_pages(pmap_t pmap)
 4231 {
 4232         pt_entry_t *pte, tpte;
 4233         vm_page_t free = NULL;
 4234         vm_page_t m, mpte, mt;
 4235         pv_entry_t pv;
 4236         struct md_page *pvh;
 4237         struct pv_chunk *pc, *npc;
 4238         int field, idx;
 4239         int32_t bit;
 4240         uint32_t inuse, bitmask;
 4241         int allfree;
 4242 
 4243         if (pmap != PCPU_GET(curpmap)) {
 4244                 printf("warning: pmap_remove_pages called with non-current pmap\n");
 4245                 return;
 4246         }
 4247         vm_page_lock_queues();
 4248         PMAP_LOCK(pmap);
 4249         sched_pin();
 4250         TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
 4251                 allfree = 1;
 4252                 for (field = 0; field < _NPCM; field++) {
 4253                         inuse = (~(pc->pc_map[field])) & pc_freemask[field];
 4254                         while (inuse != 0) {
 4255                                 bit = bsfl(inuse);
 4256                                 bitmask = 1UL << bit;
 4257                                 idx = field * 32 + bit;
 4258                                 pv = &pc->pc_pventry[idx];
 4259                                 inuse &= ~bitmask;
 4260 
 4261                                 pte = pmap_pde(pmap, pv->pv_va);
 4262                                 tpte = *pte;
 4263                                 if ((tpte & PG_PS) == 0) {
 4264                                         pte = vtopte(pv->pv_va);
 4265                                         tpte = *pte & ~PG_PTE_PAT;
 4266                                 }
 4267 
 4268                                 if (tpte == 0) {
 4269                                         printf(
 4270                                             "TPTE at %p  IS ZERO @ VA %08x\n",
 4271                                             pte, pv->pv_va);
 4272                                         panic("bad pte");
 4273                                 }
 4274 
 4275 /*
 4276  * We cannot remove wired pages from a process' mapping at this time
 4277  */
 4278                                 if (tpte & PG_W) {
 4279                                         allfree = 0;
 4280                                         continue;
 4281                                 }
 4282 
 4283                                 m = PHYS_TO_VM_PAGE(tpte & PG_FRAME);
 4284                                 KASSERT(m->phys_addr == (tpte & PG_FRAME),
 4285                                     ("vm_page_t %p phys_addr mismatch %016jx %016jx",
 4286                                     m, (uintmax_t)m->phys_addr,
 4287                                     (uintmax_t)tpte));
 4288 
 4289                                 KASSERT(m < &vm_page_array[vm_page_array_size],
 4290                                         ("pmap_remove_pages: bad tpte %#jx",
 4291                                         (uintmax_t)tpte));
 4292 
 4293                                 pte_clear(pte);
 4294 
 4295                                 /*
 4296                                  * Update the vm_page_t clean/reference bits.
 4297                                  */
 4298                                 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
 4299                                         if ((tpte & PG_PS) != 0) {
 4300                                                 for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
 4301                                                         vm_page_dirty(mt);
 4302                                         } else
 4303                                                 vm_page_dirty(m);
 4304                                 }
 4305 
 4306                                 /* Mark free */
 4307                                 PV_STAT(pv_entry_frees++);
 4308                                 PV_STAT(pv_entry_spare++);
 4309                                 pv_entry_count--;
 4310                                 pc->pc_map[field] |= bitmask;
 4311                                 if ((tpte & PG_PS) != 0) {
 4312                                         pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
 4313                                         pvh = pa_to_pvh(tpte & PG_PS_FRAME);
 4314                                         TAILQ_REMOVE(&pvh->pv_list, pv, pv_list);
 4315                                         if (TAILQ_EMPTY(&pvh->pv_list)) {
 4316                                                 for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
 4317                                                         if (TAILQ_EMPTY(&mt->md.pv_list))
 4318                                                                 vm_page_flag_clear(mt, PG_WRITEABLE);
 4319                                         }
 4320                                         mpte = pmap_lookup_pt_page(pmap, pv->pv_va);
 4321                                         if (mpte != NULL) {
 4322                                                 pmap_remove_pt_page(pmap, mpte);
 4323                                                 pmap->pm_stats.resident_count--;
 4324                                                 KASSERT(mpte->wire_count == NPTEPG,
 4325                                                     ("pmap_remove_pages: pte page wire count error"));
 4326                                                 mpte->wire_count = 0;
 4327                                                 pmap_add_delayed_free_list(mpte, &free, FALSE);
 4328                                                 atomic_subtract_int(&cnt.v_wire_count, 1);
 4329                                         }
 4330                                 } else {
 4331                                         pmap->pm_stats.resident_count--;
 4332                                         TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
 4333                                         if (TAILQ_EMPTY(&m->md.pv_list)) {
 4334                                                 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 4335                                                 if (TAILQ_EMPTY(&pvh->pv_list))
 4336                                                         vm_page_flag_clear(m, PG_WRITEABLE);
 4337                                         }
 4338                                         pmap_unuse_pt(pmap, pv->pv_va, &free);
 4339                                 }
 4340                         }
 4341                 }
 4342                 if (allfree) {
 4343                         PV_STAT(pv_entry_spare -= _NPCPV);
 4344                         PV_STAT(pc_chunk_count--);
 4345                         PV_STAT(pc_chunk_frees++);
 4346                         TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
 4347                         m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc));
 4348                         pmap_qremove((vm_offset_t)pc, 1);
 4349                         vm_page_unwire(m, 0);
 4350                         vm_page_free(m);
 4351                         pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc);
 4352                 }
 4353         }
 4354         sched_unpin();
 4355         pmap_invalidate_all(pmap);
 4356         vm_page_unlock_queues();
 4357         PMAP_UNLOCK(pmap);
 4358         pmap_free_zero_pages(free);
 4359 }
 4360 
 4361 /*
 4362  *      pmap_is_modified:
 4363  *
 4364  *      Return whether or not the specified physical page was modified
 4365  *      in any physical maps.
 4366  */
 4367 boolean_t
 4368 pmap_is_modified(vm_page_t m)
 4369 {
 4370 
 4371         if (m->flags & PG_FICTITIOUS)
 4372                 return (FALSE);
 4373         if (pmap_is_modified_pvh(&m->md))
 4374                 return (TRUE);
 4375         return (pmap_is_modified_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m))));
 4376 }
 4377 
 4378 /*
 4379  * Returns TRUE if any of the given mappings were used to modify
 4380  * physical memory.  Otherwise, returns FALSE.  Both page and 2mpage
 4381  * mappings are supported.
 4382  */
 4383 static boolean_t
 4384 pmap_is_modified_pvh(struct md_page *pvh)
 4385 {
 4386         pv_entry_t pv;
 4387         pt_entry_t *pte;
 4388         pmap_t pmap;
 4389         boolean_t rv;
 4390 
 4391         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 4392         rv = FALSE;
 4393         sched_pin();
 4394         TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
 4395                 pmap = PV_PMAP(pv);
 4396                 PMAP_LOCK(pmap);
 4397                 pte = pmap_pte_quick(pmap, pv->pv_va);
 4398                 rv = (*pte & (PG_M | PG_RW)) == (PG_M | PG_RW);
 4399                 PMAP_UNLOCK(pmap);
 4400                 if (rv)
 4401                         break;
 4402         }
 4403         sched_unpin();
 4404         return (rv);
 4405 }
 4406 
 4407 /*
 4408  *      pmap_is_prefaultable:
 4409  *
 4410  *      Return whether or not the specified virtual address is elgible
 4411  *      for prefault.
 4412  */
 4413 boolean_t
 4414 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
 4415 {
 4416         pd_entry_t *pde;
 4417         pt_entry_t *pte;
 4418         boolean_t rv;
 4419 
 4420         rv = FALSE;
 4421         PMAP_LOCK(pmap);
 4422         pde = pmap_pde(pmap, addr);
 4423         if (*pde != 0 && (*pde & PG_PS) == 0) {
 4424                 pte = vtopte(addr);
 4425                 rv = *pte == 0;
 4426         }
 4427         PMAP_UNLOCK(pmap);
 4428         return (rv);
 4429 }
 4430 
 4431 /*
 4432  * Clear the write and modified bits in each of the given page's mappings.
 4433  */
 4434 void
 4435 pmap_remove_write(vm_page_t m)
 4436 {
 4437         struct md_page *pvh;
 4438         pv_entry_t next_pv, pv;
 4439         pmap_t pmap;
 4440         pd_entry_t *pde;
 4441         pt_entry_t oldpte, *pte;
 4442         vm_offset_t va;
 4443 
 4444         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 4445         if ((m->flags & PG_FICTITIOUS) != 0 ||
 4446             (m->flags & PG_WRITEABLE) == 0)
 4447                 return;
 4448         sched_pin();
 4449         pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 4450         TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
 4451                 va = pv->pv_va;
 4452                 pmap = PV_PMAP(pv);
 4453                 PMAP_LOCK(pmap);
 4454                 pde = pmap_pde(pmap, va);
 4455                 if ((*pde & PG_RW) != 0)
 4456                         (void)pmap_demote_pde(pmap, pde, va);
 4457                 PMAP_UNLOCK(pmap);
 4458         }
 4459         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
 4460                 pmap = PV_PMAP(pv);
 4461                 PMAP_LOCK(pmap);
 4462                 pde = pmap_pde(pmap, pv->pv_va);
 4463                 KASSERT((*pde & PG_PS) == 0, ("pmap_clear_write: found"
 4464                     " a 4mpage in page %p's pv list", m));
 4465                 pte = pmap_pte_quick(pmap, pv->pv_va);
 4466 retry:
 4467                 oldpte = *pte;
 4468                 if ((oldpte & PG_RW) != 0) {
 4469                         /*
 4470                          * Regardless of whether a pte is 32 or 64 bits
 4471                          * in size, PG_RW and PG_M are among the least
 4472                          * significant 32 bits.
 4473                          */
 4474                         if (!atomic_cmpset_int((u_int *)pte, oldpte,
 4475                             oldpte & ~(PG_RW | PG_M)))
 4476                                 goto retry;
 4477                         if ((oldpte & PG_M) != 0)
 4478                                 vm_page_dirty(m);
 4479                         pmap_invalidate_page(pmap, pv->pv_va);
 4480                 }
 4481                 PMAP_UNLOCK(pmap);
 4482         }
 4483         vm_page_flag_clear(m, PG_WRITEABLE);
 4484         sched_unpin();
 4485 }
 4486 
 4487 /*
 4488  *      pmap_ts_referenced:
 4489  *
 4490  *      Return a count of reference bits for a page, clearing those bits.
 4491  *      It is not necessary for every reference bit to be cleared, but it
 4492  *      is necessary that 0 only be returned when there are truly no
 4493  *      reference bits set.
 4494  *
 4495  *      XXX: The exact number of bits to check and clear is a matter that
 4496  *      should be tested and standardized at some point in the future for
 4497  *      optimal aging of shared pages.
 4498  */
 4499 int
 4500 pmap_ts_referenced(vm_page_t m)
 4501 {
 4502         struct md_page *pvh;
 4503         pv_entry_t pv, pvf, pvn;
 4504         pmap_t pmap;
 4505         pd_entry_t oldpde, *pde;
 4506         pt_entry_t *pte;
 4507         vm_offset_t va;
 4508         int rtval = 0;
 4509 
 4510         if (m->flags & PG_FICTITIOUS)
 4511                 return (rtval);
 4512         sched_pin();
 4513         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 4514         pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 4515         TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, pvn) {
 4516                 va = pv->pv_va;
 4517                 pmap = PV_PMAP(pv);
 4518                 PMAP_LOCK(pmap);
 4519                 pde = pmap_pde(pmap, va);
 4520                 oldpde = *pde;
 4521                 if ((oldpde & PG_A) != 0) {
 4522                         if (pmap_demote_pde(pmap, pde, va)) {
 4523                                 if ((oldpde & PG_W) == 0) {
 4524                                         /*
 4525                                          * Remove the mapping to a single page
 4526                                          * so that a subsequent access may
 4527                                          * repromote.  Since the underlying
 4528                                          * page table page is fully populated,
 4529                                          * this removal never frees a page
 4530                                          * table page.
 4531                                          */
 4532                                         va += VM_PAGE_TO_PHYS(m) - (oldpde &
 4533                                             PG_PS_FRAME);
 4534                                         pmap_remove_page(pmap, va, NULL);
 4535                                         rtval++;
 4536                                         if (rtval > 4) {
 4537                                                 PMAP_UNLOCK(pmap);
 4538                                                 goto out;
 4539                                         }
 4540                                 }
 4541                         }
 4542                 }
 4543                 PMAP_UNLOCK(pmap);
 4544         }
 4545         if ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
 4546                 pvf = pv;
 4547                 do {
 4548                         pvn = TAILQ_NEXT(pv, pv_list);
 4549                         TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
 4550                         TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
 4551                         pmap = PV_PMAP(pv);
 4552                         PMAP_LOCK(pmap);
 4553                         pde = pmap_pde(pmap, pv->pv_va);
 4554                         KASSERT((*pde & PG_PS) == 0, ("pmap_ts_referenced:"
 4555                             " found a 4mpage in page %p's pv list", m));
 4556                         pte = pmap_pte_quick(pmap, pv->pv_va);
 4557                         if ((*pte & PG_A) != 0) {
 4558                                 atomic_clear_int((u_int *)pte, PG_A);
 4559                                 pmap_invalidate_page(pmap, pv->pv_va);
 4560                                 rtval++;
 4561                                 if (rtval > 4)
 4562                                         pvn = NULL;
 4563                         }
 4564                         PMAP_UNLOCK(pmap);
 4565                 } while ((pv = pvn) != NULL && pv != pvf);
 4566         }
 4567 out:
 4568         sched_unpin();
 4569         return (rtval);
 4570 }
 4571 
 4572 /*
 4573  *      Clear the modify bits on the specified physical page.
 4574  */
 4575 void
 4576 pmap_clear_modify(vm_page_t m)
 4577 {
 4578         struct md_page *pvh;
 4579         pv_entry_t next_pv, pv;
 4580         pmap_t pmap;
 4581         pd_entry_t oldpde, *pde;
 4582         pt_entry_t oldpte, *pte;
 4583         vm_offset_t va;
 4584 
 4585         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 4586         if ((m->flags & PG_FICTITIOUS) != 0)
 4587                 return;
 4588         sched_pin();
 4589         pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 4590         TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
 4591                 va = pv->pv_va;
 4592                 pmap = PV_PMAP(pv);
 4593                 PMAP_LOCK(pmap);
 4594                 pde = pmap_pde(pmap, va);
 4595                 oldpde = *pde;
 4596                 if ((oldpde & PG_RW) != 0) {
 4597                         if (pmap_demote_pde(pmap, pde, va)) {
 4598                                 if ((oldpde & PG_W) == 0) {
 4599                                         /*
 4600                                          * Write protect the mapping to a
 4601                                          * single page so that a subsequent
 4602                                          * write access may repromote.
 4603                                          */
 4604                                         va += VM_PAGE_TO_PHYS(m) - (oldpde &
 4605                                             PG_PS_FRAME);
 4606                                         pte = pmap_pte_quick(pmap, va);
 4607                                         oldpte = *pte;
 4608                                         if ((oldpte & PG_V) != 0) {
 4609                                                 /*
 4610                                                  * Regardless of whether a pte is 32 or 64 bits
 4611                                                  * in size, PG_RW and PG_M are among the least
 4612                                                  * significant 32 bits.
 4613                                                  */
 4614                                                 while (!atomic_cmpset_int((u_int *)pte,
 4615                                                     oldpte,
 4616                                                     oldpte & ~(PG_M | PG_RW)))
 4617                                                         oldpte = *pte;
 4618                                                 vm_page_dirty(m);
 4619                                                 pmap_invalidate_page(pmap, va);
 4620                                         }
 4621                                 }
 4622                         }
 4623                 }
 4624                 PMAP_UNLOCK(pmap);
 4625         }
 4626         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
 4627                 pmap = PV_PMAP(pv);
 4628                 PMAP_LOCK(pmap);
 4629                 pde = pmap_pde(pmap, pv->pv_va);
 4630                 KASSERT((*pde & PG_PS) == 0, ("pmap_clear_modify: found"
 4631                     " a 4mpage in page %p's pv list", m));
 4632                 pte = pmap_pte_quick(pmap, pv->pv_va);
 4633                 if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
 4634                         /*
 4635                          * Regardless of whether a pte is 32 or 64 bits
 4636                          * in size, PG_M is among the least significant
 4637                          * 32 bits. 
 4638                          */
 4639                         atomic_clear_int((u_int *)pte, PG_M);
 4640                         pmap_invalidate_page(pmap, pv->pv_va);
 4641                 }
 4642                 PMAP_UNLOCK(pmap);
 4643         }
 4644         sched_unpin();
 4645 }
 4646 
 4647 /*
 4648  *      pmap_clear_reference:
 4649  *
 4650  *      Clear the reference bit on the specified physical page.
 4651  */
 4652 void
 4653 pmap_clear_reference(vm_page_t m)
 4654 {
 4655         struct md_page *pvh;
 4656         pv_entry_t next_pv, pv;
 4657         pmap_t pmap;
 4658         pd_entry_t oldpde, *pde;
 4659         pt_entry_t *pte;
 4660         vm_offset_t va;
 4661 
 4662         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 4663         if ((m->flags & PG_FICTITIOUS) != 0)
 4664                 return;
 4665         sched_pin();
 4666         pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 4667         TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
 4668                 va = pv->pv_va;
 4669                 pmap = PV_PMAP(pv);
 4670                 PMAP_LOCK(pmap);
 4671                 pde = pmap_pde(pmap, va);
 4672                 oldpde = *pde;
 4673                 if ((oldpde & PG_A) != 0) {
 4674                         if (pmap_demote_pde(pmap, pde, va)) {
 4675                                 /*
 4676                                  * Remove the mapping to a single page so
 4677                                  * that a subsequent access may repromote.
 4678                                  * Since the underlying page table page is
 4679                                  * fully populated, this removal never frees
 4680                                  * a page table page.
 4681                                  */
 4682                                 va += VM_PAGE_TO_PHYS(m) - (oldpde &
 4683                                     PG_PS_FRAME);
 4684                                 pmap_remove_page(pmap, va, NULL);
 4685                         }
 4686                 }
 4687                 PMAP_UNLOCK(pmap);
 4688         }
 4689         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
 4690                 pmap = PV_PMAP(pv);
 4691                 PMAP_LOCK(pmap);
 4692                 pde = pmap_pde(pmap, pv->pv_va);
 4693                 KASSERT((*pde & PG_PS) == 0, ("pmap_clear_reference: found"
 4694                     " a 4mpage in page %p's pv list", m));
 4695                 pte = pmap_pte_quick(pmap, pv->pv_va);
 4696                 if ((*pte & PG_A) != 0) {
 4697                         /*
 4698                          * Regardless of whether a pte is 32 or 64 bits
 4699                          * in size, PG_A is among the least significant
 4700                          * 32 bits. 
 4701                          */
 4702                         atomic_clear_int((u_int *)pte, PG_A);
 4703                         pmap_invalidate_page(pmap, pv->pv_va);
 4704                 }
 4705                 PMAP_UNLOCK(pmap);
 4706         }
 4707         sched_unpin();
 4708 }
 4709 
 4710 /*
 4711  * Miscellaneous support routines follow
 4712  */
 4713 
 4714 /* Adjust the cache mode for a 4KB page mapped via a PTE. */
 4715 static __inline void
 4716 pmap_pte_attr(pt_entry_t *pte, int cache_bits)
 4717 {
 4718         u_int opte, npte;
 4719 
 4720         /*
 4721          * The cache mode bits are all in the low 32-bits of the
 4722          * PTE, so we can just spin on updating the low 32-bits.
 4723          */
 4724         do {
 4725                 opte = *(u_int *)pte;
 4726                 npte = opte & ~PG_PTE_CACHE;
 4727                 npte |= cache_bits;
 4728         } while (npte != opte && !atomic_cmpset_int((u_int *)pte, opte, npte));
 4729 }
 4730 
 4731 /* Adjust the cache mode for a 2/4MB page mapped via a PDE. */
 4732 static __inline void
 4733 pmap_pde_attr(pd_entry_t *pde, int cache_bits)
 4734 {
 4735         u_int opde, npde;
 4736 
 4737         /*
 4738          * The cache mode bits are all in the low 32-bits of the
 4739          * PDE, so we can just spin on updating the low 32-bits.
 4740          */
 4741         do {
 4742                 opde = *(u_int *)pde;
 4743                 npde = opde & ~PG_PDE_CACHE;
 4744                 npde |= cache_bits;
 4745         } while (npde != opde && !atomic_cmpset_int((u_int *)pde, opde, npde));
 4746 }
 4747 
 4748 /*
 4749  * Map a set of physical memory pages into the kernel virtual
 4750  * address space. Return a pointer to where it is mapped. This
 4751  * routine is intended to be used for mapping device memory,
 4752  * NOT real memory.
 4753  */
 4754 void *
 4755 pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
 4756 {
 4757         vm_offset_t va, offset;
 4758         vm_size_t tmpsize;
 4759 
 4760         offset = pa & PAGE_MASK;
 4761         size = roundup(offset + size, PAGE_SIZE);
 4762         pa = pa & PG_FRAME;
 4763 
 4764         if (pa < KERNLOAD && pa + size <= KERNLOAD)
 4765                 va = KERNBASE + pa;
 4766         else
 4767                 va = kmem_alloc_nofault(kernel_map, size);
 4768         if (!va)
 4769                 panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
 4770 
 4771         for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE)
 4772                 pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode);
 4773         pmap_invalidate_range(kernel_pmap, va, va + tmpsize);
 4774         pmap_invalidate_cache_range(va, va + size);
 4775         return ((void *)(va + offset));
 4776 }
 4777 
 4778 void *
 4779 pmap_mapdev(vm_paddr_t pa, vm_size_t size)
 4780 {
 4781 
 4782         return (pmap_mapdev_attr(pa, size, PAT_UNCACHEABLE));
 4783 }
 4784 
 4785 void *
 4786 pmap_mapbios(vm_paddr_t pa, vm_size_t size)
 4787 {
 4788 
 4789         return (pmap_mapdev_attr(pa, size, PAT_WRITE_BACK));
 4790 }
 4791 
 4792 void
 4793 pmap_unmapdev(vm_offset_t va, vm_size_t size)
 4794 {
 4795         vm_offset_t base, offset, tmpva;
 4796 
 4797         if (va >= KERNBASE && va + size <= KERNBASE + KERNLOAD)
 4798                 return;
 4799         base = trunc_page(va);
 4800         offset = va & PAGE_MASK;
 4801         size = roundup(offset + size, PAGE_SIZE);
 4802         for (tmpva = base; tmpva < (base + size); tmpva += PAGE_SIZE)
 4803                 pmap_kremove(tmpva);
 4804         pmap_invalidate_range(kernel_pmap, va, tmpva);
 4805         kmem_free(kernel_map, base, size);
 4806 }
 4807 
 4808 /*
 4809  * Sets the memory attribute for the specified page.
 4810  */
 4811 void
 4812 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
 4813 {
 4814         struct sysmaps *sysmaps;
 4815         vm_offset_t sva, eva;
 4816 
 4817         m->md.pat_mode = ma;
 4818         if ((m->flags & PG_FICTITIOUS) != 0)
 4819                 return;
 4820 
 4821         /*
 4822          * If "m" is a normal page, flush it from the cache.
 4823          * See pmap_invalidate_cache_range().
 4824          *
 4825          * First, try to find an existing mapping of the page by sf
 4826          * buffer. sf_buf_invalidate_cache() modifies mapping and
 4827          * flushes the cache.
 4828          */    
 4829         if (sf_buf_invalidate_cache(m))
 4830                 return;
 4831 
 4832         /*
 4833          * If page is not mapped by sf buffer, but CPU does not
 4834          * support self snoop, map the page transient and do
 4835          * invalidation. In the worst case, whole cache is flushed by
 4836          * pmap_invalidate_cache_range().
 4837          */
 4838         if ((cpu_feature & (CPUID_SS|CPUID_CLFSH)) == CPUID_CLFSH) {
 4839                 sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)];
 4840                 mtx_lock(&sysmaps->lock);
 4841                 if (*sysmaps->CMAP2)
 4842                         panic("pmap_page_set_memattr: CMAP2 busy");
 4843                 sched_pin();
 4844                 *sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) |
 4845                     PG_A | PG_M | pmap_cache_bits(m->md.pat_mode, 0);
 4846                 invlcaddr(sysmaps->CADDR2);
 4847                 sva = (vm_offset_t)sysmaps->CADDR2;
 4848                 eva = sva + PAGE_SIZE;
 4849         } else
 4850                 sva = eva = 0; /* gcc */
 4851         pmap_invalidate_cache_range(sva, eva);
 4852         if (sva != 0) {
 4853                 *sysmaps->CMAP2 = 0;
 4854                 sched_unpin();
 4855                 mtx_unlock(&sysmaps->lock);
 4856         }
 4857 }
 4858 
 4859 /*
 4860  * Changes the specified virtual address range's memory type to that given by
 4861  * the parameter "mode".  The specified virtual address range must be
 4862  * completely contained within either the kernel map.
 4863  *
 4864  * Returns zero if the change completed successfully, and either EINVAL or
 4865  * ENOMEM if the change failed.  Specifically, EINVAL is returned if some part
 4866  * of the virtual address range was not mapped, and ENOMEM is returned if
 4867  * there was insufficient memory available to complete the change.
 4868  */
 4869 int
 4870 pmap_change_attr(vm_offset_t va, vm_size_t size, int mode)
 4871 {
 4872         vm_offset_t base, offset, tmpva;
 4873         pd_entry_t *pde;
 4874         pt_entry_t *pte;
 4875         int cache_bits_pte, cache_bits_pde;
 4876         boolean_t changed;
 4877 
 4878         base = trunc_page(va);
 4879         offset = va & PAGE_MASK;
 4880         size = roundup(offset + size, PAGE_SIZE);
 4881 
 4882         /*
 4883          * Only supported on kernel virtual addresses above the recursive map.
 4884          */
 4885         if (base < VM_MIN_KERNEL_ADDRESS)
 4886                 return (EINVAL);
 4887 
 4888         cache_bits_pde = pmap_cache_bits(mode, 1);
 4889         cache_bits_pte = pmap_cache_bits(mode, 0);
 4890         changed = FALSE;
 4891 
 4892         /*
 4893          * Pages that aren't mapped aren't supported.  Also break down
 4894          * 2/4MB pages into 4KB pages if required.
 4895          */
 4896         PMAP_LOCK(kernel_pmap);
 4897         for (tmpva = base; tmpva < base + size; ) {
 4898                 pde = pmap_pde(kernel_pmap, tmpva);
 4899                 if (*pde == 0) {
 4900                         PMAP_UNLOCK(kernel_pmap);
 4901                         return (EINVAL);
 4902                 }
 4903                 if (*pde & PG_PS) {
 4904                         /*
 4905                          * If the current 2/4MB page already has
 4906                          * the required memory type, then we need not
 4907                          * demote this page.  Just increment tmpva to
 4908                          * the next 2/4MB page frame.
 4909                          */
 4910                         if ((*pde & PG_PDE_CACHE) == cache_bits_pde) {
 4911                                 tmpva = trunc_4mpage(tmpva) + NBPDR;
 4912                                 continue;
 4913                         }
 4914 
 4915                         /*
 4916                          * If the current offset aligns with a 2/4MB
 4917                          * page frame and there is at least 2/4MB left
 4918                          * within the range, then we need not break
 4919                          * down this page into 4KB pages.
 4920                          */
 4921                         if ((tmpva & PDRMASK) == 0 &&
 4922                             tmpva + PDRMASK < base + size) {
 4923                                 tmpva += NBPDR;
 4924                                 continue;
 4925                         }
 4926                         if (!pmap_demote_pde(kernel_pmap, pde, tmpva)) {
 4927                                 PMAP_UNLOCK(kernel_pmap);
 4928                                 return (ENOMEM);
 4929                         }
 4930                 }
 4931                 pte = vtopte(tmpva);
 4932                 if (*pte == 0) {
 4933                         PMAP_UNLOCK(kernel_pmap);
 4934                         return (EINVAL);
 4935                 }
 4936                 tmpva += PAGE_SIZE;
 4937         }
 4938         PMAP_UNLOCK(kernel_pmap);
 4939 
 4940         /*
 4941          * Ok, all the pages exist, so run through them updating their
 4942          * cache mode if required.
 4943          */
 4944         for (tmpva = base; tmpva < base + size; ) {
 4945                 pde = pmap_pde(kernel_pmap, tmpva);
 4946                 if (*pde & PG_PS) {
 4947                         if ((*pde & PG_PDE_CACHE) != cache_bits_pde) {
 4948                                 pmap_pde_attr(pde, cache_bits_pde);
 4949                                 changed = TRUE;
 4950                         }
 4951                         tmpva = trunc_4mpage(tmpva) + NBPDR;
 4952                 } else {
 4953                         pte = vtopte(tmpva);
 4954                         if ((*pte & PG_PTE_CACHE) != cache_bits_pte) {
 4955                                 pmap_pte_attr(pte, cache_bits_pte);
 4956                                 changed = TRUE;
 4957                         }
 4958                         tmpva += PAGE_SIZE;
 4959                 }
 4960         }
 4961 
 4962         /*
 4963          * Flush CPU caches to make sure any data isn't cached that
 4964          * shouldn't be, etc.
 4965          */
 4966         if (changed) {
 4967                 pmap_invalidate_range(kernel_pmap, base, tmpva);
 4968                 pmap_invalidate_cache_range(base, tmpva);
 4969         }
 4970         return (0);
 4971 }
 4972 
 4973 /*
 4974  * perform the pmap work for mincore
 4975  */
 4976 int
 4977 pmap_mincore(pmap_t pmap, vm_offset_t addr)
 4978 {
 4979         pd_entry_t *pdep;
 4980         pt_entry_t *ptep, pte;
 4981         vm_paddr_t pa;
 4982         vm_page_t m;
 4983         int val = 0;
 4984         
 4985         PMAP_LOCK(pmap);
 4986         pdep = pmap_pde(pmap, addr);
 4987         if (*pdep != 0) {
 4988                 if (*pdep & PG_PS) {
 4989                         pte = *pdep;
 4990                         val = MINCORE_SUPER;
 4991                         /* Compute the physical address of the 4KB page. */
 4992                         pa = ((*pdep & PG_PS_FRAME) | (addr & PDRMASK)) &
 4993                             PG_FRAME;
 4994                 } else {
 4995                         ptep = pmap_pte(pmap, addr);
 4996                         pte = *ptep;
 4997                         pmap_pte_release(ptep);
 4998                         pa = pte & PG_FRAME;
 4999                 }
 5000         } else {
 5001                 pte = 0;
 5002                 pa = 0;
 5003         }
 5004         PMAP_UNLOCK(pmap);
 5005 
 5006         if (pte != 0) {
 5007                 val |= MINCORE_INCORE;
 5008                 if ((pte & PG_MANAGED) == 0)
 5009                         return val;
 5010 
 5011                 m = PHYS_TO_VM_PAGE(pa);
 5012 
 5013                 /*
 5014                  * Modified by us
 5015                  */
 5016                 if ((pte & (PG_M | PG_RW)) == (PG_M | PG_RW))
 5017                         val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER;
 5018                 else {
 5019                         /*
 5020                          * Modified by someone else
 5021                          */
 5022                         vm_page_lock_queues();
 5023                         if (m->dirty || pmap_is_modified(m))
 5024                                 val |= MINCORE_MODIFIED_OTHER;
 5025                         vm_page_unlock_queues();
 5026                 }
 5027                 /*
 5028                  * Referenced by us
 5029                  */
 5030                 if (pte & PG_A)
 5031                         val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER;
 5032                 else {
 5033                         /*
 5034                          * Referenced by someone else
 5035                          */
 5036                         vm_page_lock_queues();
 5037                         if ((m->flags & PG_REFERENCED) ||
 5038                             pmap_ts_referenced(m)) {
 5039                                 val |= MINCORE_REFERENCED_OTHER;
 5040                                 vm_page_flag_set(m, PG_REFERENCED);
 5041                         }
 5042                         vm_page_unlock_queues();
 5043                 }
 5044         } 
 5045         return val;
 5046 }
 5047 
 5048 void
 5049 pmap_activate(struct thread *td)
 5050 {
 5051         pmap_t  pmap, oldpmap;
 5052         u_int32_t  cr3;
 5053 
 5054         critical_enter();
 5055         pmap = vmspace_pmap(td->td_proc->p_vmspace);
 5056         oldpmap = PCPU_GET(curpmap);
 5057 #if defined(SMP)
 5058         atomic_clear_int(&oldpmap->pm_active, PCPU_GET(cpumask));
 5059         atomic_set_int(&pmap->pm_active, PCPU_GET(cpumask));
 5060 #else
 5061         oldpmap->pm_active &= ~1;
 5062         pmap->pm_active |= 1;
 5063 #endif
 5064 #ifdef PAE
 5065         cr3 = vtophys(pmap->pm_pdpt);
 5066 #else
 5067         cr3 = vtophys(pmap->pm_pdir);
 5068 #endif
 5069         /*
 5070          * pmap_activate is for the current thread on the current cpu
 5071          */
 5072         td->td_pcb->pcb_cr3 = cr3;
 5073         load_cr3(cr3);
 5074         PCPU_SET(curpmap, pmap);
 5075         critical_exit();
 5076 }
 5077 
 5078 void
 5079 pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
 5080 {
 5081 }
 5082 
 5083 /*
 5084  *      Increase the starting virtual address of the given mapping if a
 5085  *      different alignment might result in more superpage mappings.
 5086  */
 5087 void
 5088 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
 5089     vm_offset_t *addr, vm_size_t size)
 5090 {
 5091         vm_offset_t superpage_offset;
 5092 
 5093         if (size < NBPDR)
 5094                 return;
 5095         if (object != NULL && (object->flags & OBJ_COLORED) != 0)
 5096                 offset += ptoa(object->pg_color);
 5097         superpage_offset = offset & PDRMASK;
 5098         if (size - ((NBPDR - superpage_offset) & PDRMASK) < NBPDR ||
 5099             (*addr & PDRMASK) == superpage_offset)
 5100                 return;
 5101         if ((*addr & PDRMASK) < superpage_offset)
 5102                 *addr = (*addr & ~PDRMASK) + superpage_offset;
 5103         else
 5104                 *addr = ((*addr + PDRMASK) & ~PDRMASK) + superpage_offset;
 5105 }
 5106 
 5107 
 5108 #if defined(PMAP_DEBUG)
 5109 pmap_pid_dump(int pid)
 5110 {
 5111         pmap_t pmap;
 5112         struct proc *p;
 5113         int npte = 0;
 5114         int index;
 5115 
 5116         sx_slock(&allproc_lock);
 5117         FOREACH_PROC_IN_SYSTEM(p) {
 5118                 if (p->p_pid != pid)
 5119                         continue;
 5120 
 5121                 if (p->p_vmspace) {
 5122                         int i,j;
 5123                         index = 0;
 5124                         pmap = vmspace_pmap(p->p_vmspace);
 5125                         for (i = 0; i < NPDEPTD; i++) {
 5126                                 pd_entry_t *pde;
 5127                                 pt_entry_t *pte;
 5128                                 vm_offset_t base = i << PDRSHIFT;
 5129                                 
 5130                                 pde = &pmap->pm_pdir[i];
 5131                                 if (pde && pmap_pde_v(pde)) {
 5132                                         for (j = 0; j < NPTEPG; j++) {
 5133                                                 vm_offset_t va = base + (j << PAGE_SHIFT);
 5134                                                 if (va >= (vm_offset_t) VM_MIN_KERNEL_ADDRESS) {
 5135                                                         if (index) {
 5136                                                                 index = 0;
 5137                                                                 printf("\n");
 5138                                                         }
 5139                                                         sx_sunlock(&allproc_lock);
 5140                                                         return npte;
 5141                                                 }
 5142                                                 pte = pmap_pte(pmap, va);
 5143                                                 if (pte && pmap_pte_v(pte)) {
 5144                                                         pt_entry_t pa;
 5145                                                         vm_page_t m;
 5146                                                         pa = *pte;
 5147                                                         m = PHYS_TO_VM_PAGE(pa & PG_FRAME);
 5148                                                         printf("va: 0x%x, pt: 0x%x, h: %d, w: %d, f: 0x%x",
 5149                                                                 va, pa, m->hold_count, m->wire_count, m->flags);
 5150                                                         npte++;
 5151                                                         index++;
 5152                                                         if (index >= 2) {
 5153                                                                 index = 0;
 5154                                                                 printf("\n");
 5155                                                         } else {
 5156                                                                 printf(" ");
 5157                                                         }
 5158                                                 }
 5159                                         }
 5160                                 }
 5161                         }
 5162                 }
 5163         }
 5164         sx_sunlock(&allproc_lock);
 5165         return npte;
 5166 }
 5167 #endif
 5168 
 5169 #if defined(DEBUG)
 5170 
 5171 static void     pads(pmap_t pm);
 5172 void            pmap_pvdump(vm_offset_t pa);
 5173 
 5174 /* print address space of pmap*/
 5175 static void
 5176 pads(pmap_t pm)
 5177 {
 5178         int i, j;
 5179         vm_paddr_t va;
 5180         pt_entry_t *ptep;
 5181 
 5182         if (pm == kernel_pmap)
 5183                 return;
 5184         for (i = 0; i < NPDEPTD; i++)
 5185                 if (pm->pm_pdir[i])
 5186                         for (j = 0; j < NPTEPG; j++) {
 5187                                 va = (i << PDRSHIFT) + (j << PAGE_SHIFT);
 5188                                 if (pm == kernel_pmap && va < KERNBASE)
 5189                                         continue;
 5190                                 if (pm != kernel_pmap && va > UPT_MAX_ADDRESS)
 5191                                         continue;
 5192                                 ptep = pmap_pte(pm, va);
 5193                                 if (pmap_pte_v(ptep))
 5194                                         printf("%x:%x ", va, *ptep);
 5195                         };
 5196 
 5197 }
 5198 
 5199 void
 5200 pmap_pvdump(vm_paddr_t pa)
 5201 {
 5202         pv_entry_t pv;
 5203         pmap_t pmap;
 5204         vm_page_t m;
 5205 
 5206         printf("pa %x", pa);
 5207         m = PHYS_TO_VM_PAGE(pa);
 5208         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
 5209                 pmap = PV_PMAP(pv);
 5210                 printf(" -> pmap %p, va %x", (void *)pmap, pv->pv_va);
 5211                 pads(pmap);
 5212         }
 5213         printf(" ");
 5214 }
 5215 #endif

Cache object: 39e45877a25e7c803abbcfaa90843c7a


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.