The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/i386/i386/pmap.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1991 Regents of the University of California.
    3  * All rights reserved.
    4  * Copyright (c) 1994 John S. Dyson
    5  * All rights reserved.
    6  * Copyright (c) 1994 David Greenman
    7  * All rights reserved.
    8  * Copyright (c) 2005-2008 Alan L. Cox <alc@cs.rice.edu>
    9  * All rights reserved.
   10  *
   11  * This code is derived from software contributed to Berkeley by
   12  * the Systems Programming Group of the University of Utah Computer
   13  * Science Department and William Jolitz of UUNET Technologies Inc.
   14  *
   15  * Redistribution and use in source and binary forms, with or without
   16  * modification, are permitted provided that the following conditions
   17  * are met:
   18  * 1. Redistributions of source code must retain the above copyright
   19  *    notice, this list of conditions and the following disclaimer.
   20  * 2. Redistributions in binary form must reproduce the above copyright
   21  *    notice, this list of conditions and the following disclaimer in the
   22  *    documentation and/or other materials provided with the distribution.
   23  * 3. All advertising materials mentioning features or use of this software
   24  *    must display the following acknowledgement:
   25  *      This product includes software developed by the University of
   26  *      California, Berkeley and its contributors.
   27  * 4. Neither the name of the University nor the names of its contributors
   28  *    may be used to endorse or promote products derived from this software
   29  *    without specific prior written permission.
   30  *
   31  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   32  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   33  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   34  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   35  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   36  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   37  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   38  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   39  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   40  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   41  * SUCH DAMAGE.
   42  *
   43  *      from:   @(#)pmap.c      7.7 (Berkeley)  5/12/91
   44  */
   45 /*-
   46  * Copyright (c) 2003 Networks Associates Technology, Inc.
   47  * All rights reserved.
   48  *
   49  * This software was developed for the FreeBSD Project by Jake Burkholder,
   50  * Safeport Network Services, and Network Associates Laboratories, the
   51  * Security Research Division of Network Associates, Inc. under
   52  * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA
   53  * CHATS research program.
   54  *
   55  * Redistribution and use in source and binary forms, with or without
   56  * modification, are permitted provided that the following conditions
   57  * are met:
   58  * 1. Redistributions of source code must retain the above copyright
   59  *    notice, this list of conditions and the following disclaimer.
   60  * 2. Redistributions in binary form must reproduce the above copyright
   61  *    notice, this list of conditions and the following disclaimer in the
   62  *    documentation and/or other materials provided with the distribution.
   63  *
   64  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   65  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   66  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   67  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   68  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   69  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   70  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   71  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   72  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   73  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   74  * SUCH DAMAGE.
   75  */
   76 
   77 #include <sys/cdefs.h>
   78 __FBSDID("$FreeBSD: releng/7.3/sys/i386/i386/pmap.c 203208 2010-01-30 18:17:43Z alc $");
   79 
   80 /*
   81  *      Manages physical address maps.
   82  *
   83  *      In addition to hardware address maps, this
   84  *      module is called upon to provide software-use-only
   85  *      maps which may or may not be stored in the same
   86  *      form as hardware maps.  These pseudo-maps are
   87  *      used to store intermediate results from copy
   88  *      operations to and from address spaces.
   89  *
   90  *      Since the information managed by this module is
   91  *      also stored by the logical address mapping module,
   92  *      this module may throw away valid virtual-to-physical
   93  *      mappings at almost any time.  However, invalidations
   94  *      of virtual-to-physical mappings must be done as
   95  *      requested.
   96  *
   97  *      In order to cope with hardware architectures which
   98  *      make virtual-to-physical map invalidates expensive,
   99  *      this module may delay invalidate or reduced protection
  100  *      operations until such time as they are actually
  101  *      necessary.  This module is given full information as
  102  *      to which processors are currently using which maps,
  103  *      and to when physical maps must be made correct.
  104  */
  105 
  106 #include "opt_cpu.h"
  107 #include "opt_pmap.h"
  108 #include "opt_msgbuf.h"
  109 #include "opt_smp.h"
  110 #include "opt_xbox.h"
  111 
  112 #include <sys/param.h>
  113 #include <sys/systm.h>
  114 #include <sys/kernel.h>
  115 #include <sys/ktr.h>
  116 #include <sys/lock.h>
  117 #include <sys/malloc.h>
  118 #include <sys/mman.h>
  119 #include <sys/msgbuf.h>
  120 #include <sys/mutex.h>
  121 #include <sys/proc.h>
  122 #include <sys/sf_buf.h>
  123 #include <sys/sx.h>
  124 #include <sys/vmmeter.h>
  125 #include <sys/sched.h>
  126 #include <sys/sysctl.h>
  127 #ifdef SMP
  128 #include <sys/smp.h>
  129 #endif
  130 
  131 #include <vm/vm.h>
  132 #include <vm/vm_param.h>
  133 #include <vm/vm_kern.h>
  134 #include <vm/vm_page.h>
  135 #include <vm/vm_map.h>
  136 #include <vm/vm_object.h>
  137 #include <vm/vm_extern.h>
  138 #include <vm/vm_pageout.h>
  139 #include <vm/vm_pager.h>
  140 #include <vm/vm_reserv.h>
  141 #include <vm/uma.h>
  142 
  143 #include <machine/cpu.h>
  144 #include <machine/cputypes.h>
  145 #include <machine/md_var.h>
  146 #include <machine/pcb.h>
  147 #include <machine/specialreg.h>
  148 #ifdef SMP
  149 #include <machine/smp.h>
  150 #endif
  151 
  152 #ifdef XBOX
  153 #include <machine/xbox.h>
  154 #endif
  155 
  156 #if !defined(CPU_DISABLE_SSE) && defined(I686_CPU)
  157 #define CPU_ENABLE_SSE
  158 #endif
  159 
  160 #ifndef PMAP_SHPGPERPROC
  161 #define PMAP_SHPGPERPROC 200
  162 #endif
  163 
  164 #if !defined(DIAGNOSTIC)
  165 #define PMAP_INLINE     __gnu89_inline
  166 #else
  167 #define PMAP_INLINE
  168 #endif
  169 
  170 #define PV_STATS
  171 #ifdef PV_STATS
  172 #define PV_STAT(x)      do { x ; } while (0)
  173 #else
  174 #define PV_STAT(x)      do { } while (0)
  175 #endif
  176 
  177 #define pa_index(pa)    ((pa) >> PDRSHIFT)
  178 #define pa_to_pvh(pa)   (&pv_table[pa_index(pa)])
  179 
  180 /*
  181  * Get PDEs and PTEs for user/kernel address space
  182  */
  183 #define pmap_pde(m, v)  (&((m)->pm_pdir[(vm_offset_t)(v) >> PDRSHIFT]))
  184 #define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDRSHIFT])
  185 
  186 #define pmap_pde_v(pte)         ((*(int *)pte & PG_V) != 0)
  187 #define pmap_pte_w(pte)         ((*(int *)pte & PG_W) != 0)
  188 #define pmap_pte_m(pte)         ((*(int *)pte & PG_M) != 0)
  189 #define pmap_pte_u(pte)         ((*(int *)pte & PG_A) != 0)
  190 #define pmap_pte_v(pte)         ((*(int *)pte & PG_V) != 0)
  191 
  192 #define pmap_pte_set_w(pte, v)  ((v) ? atomic_set_int((u_int *)(pte), PG_W) : \
  193     atomic_clear_int((u_int *)(pte), PG_W))
  194 #define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v)))
  195 
  196 struct pmap kernel_pmap_store;
  197 LIST_HEAD(pmaplist, pmap);
  198 static struct pmaplist allpmaps;
  199 static struct mtx allpmaps_lock;
  200 
  201 vm_offset_t virtual_avail;      /* VA of first avail page (after kernel bss) */
  202 vm_offset_t virtual_end;        /* VA of last avail page (end of kernel AS) */
  203 int pgeflag = 0;                /* PG_G or-in */
  204 int pseflag = 0;                /* PG_PS or-in */
  205 
  206 static int nkpt;
  207 vm_offset_t kernel_vm_end;
  208 extern u_int32_t KERNend;
  209 extern u_int32_t KPTphys;
  210 
  211 #ifdef PAE
  212 pt_entry_t pg_nx;
  213 static uma_zone_t pdptzone;
  214 #endif
  215 
  216 SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters");
  217 
  218 static int pg_ps_enabled;
  219 SYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLFLAG_RDTUN, &pg_ps_enabled, 0,
  220     "Are large page mappings enabled?");
  221 
  222 /*
  223  * Data for the pv entry allocation mechanism
  224  */
  225 static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
  226 static struct md_page *pv_table;
  227 static int shpgperproc = PMAP_SHPGPERPROC;
  228 
  229 struct pv_chunk *pv_chunkbase;          /* KVA block for pv_chunks */
  230 int pv_maxchunks;                       /* How many chunks we have KVA for */
  231 vm_offset_t pv_vafree;                  /* freelist stored in the PTE */
  232 
  233 /*
  234  * All those kernel PT submaps that BSD is so fond of
  235  */
  236 struct sysmaps {
  237         struct  mtx lock;
  238         pt_entry_t *CMAP1;
  239         pt_entry_t *CMAP2;
  240         caddr_t CADDR1;
  241         caddr_t CADDR2;
  242 };
  243 static struct sysmaps sysmaps_pcpu[MAXCPU];
  244 pt_entry_t *CMAP1 = 0, *KPTmap;
  245 static pt_entry_t *CMAP3;
  246 static pd_entry_t *KPTD;
  247 caddr_t CADDR1 = 0, ptvmmap = 0;
  248 static caddr_t CADDR3;
  249 struct msgbuf *msgbufp = 0;
  250 
  251 /*
  252  * Crashdump maps.
  253  */
  254 static caddr_t crashdumpmap;
  255 
  256 static pt_entry_t *PMAP1 = 0, *PMAP2;
  257 static pt_entry_t *PADDR1 = 0, *PADDR2;
  258 #ifdef SMP
  259 static int PMAP1cpu;
  260 static int PMAP1changedcpu;
  261 SYSCTL_INT(_debug, OID_AUTO, PMAP1changedcpu, CTLFLAG_RD, 
  262            &PMAP1changedcpu, 0,
  263            "Number of times pmap_pte_quick changed CPU with same PMAP1");
  264 #endif
  265 static int PMAP1changed;
  266 SYSCTL_INT(_debug, OID_AUTO, PMAP1changed, CTLFLAG_RD, 
  267            &PMAP1changed, 0,
  268            "Number of times pmap_pte_quick changed PMAP1");
  269 static int PMAP1unchanged;
  270 SYSCTL_INT(_debug, OID_AUTO, PMAP1unchanged, CTLFLAG_RD, 
  271            &PMAP1unchanged, 0,
  272            "Number of times pmap_pte_quick didn't change PMAP1");
  273 static struct mtx PMAP2mutex;
  274 
  275 static void     free_pv_entry(pmap_t pmap, pv_entry_t pv);
  276 static pv_entry_t get_pv_entry(pmap_t locked_pmap, int try);
  277 static void     pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa);
  278 static boolean_t pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa);
  279 static void     pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa);
  280 static void     pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
  281 static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
  282                     vm_offset_t va);
  283 
  284 static boolean_t pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va);
  285 static boolean_t pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m,
  286     vm_prot_t prot);
  287 static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
  288     vm_page_t m, vm_prot_t prot, vm_page_t mpte);
  289 static void pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte);
  290 static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte);
  291 static boolean_t pmap_is_modified_pvh(struct md_page *pvh);
  292 static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode);
  293 static vm_page_t pmap_lookup_pt_page(pmap_t pmap, vm_offset_t va);
  294 static void pmap_pde_attr(pd_entry_t *pde, int cache_bits);
  295 static void pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va);
  296 static boolean_t pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva,
  297     vm_prot_t prot);
  298 static void pmap_pte_attr(pt_entry_t *pte, int cache_bits);
  299 static void pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
  300     vm_page_t *free);
  301 static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva,
  302     vm_page_t *free);
  303 static void pmap_remove_pt_page(pmap_t pmap, vm_page_t mpte);
  304 static void pmap_remove_page(struct pmap *pmap, vm_offset_t va,
  305     vm_page_t *free);
  306 static void pmap_remove_entry(struct pmap *pmap, vm_page_t m,
  307                                         vm_offset_t va);
  308 static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m);
  309 static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
  310     vm_page_t m);
  311 
  312 static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags);
  313 
  314 static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags);
  315 static int _pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free);
  316 static pt_entry_t *pmap_pte_quick(pmap_t pmap, vm_offset_t va);
  317 static void pmap_pte_release(pt_entry_t *pte);
  318 static int pmap_unuse_pt(pmap_t, vm_offset_t, vm_page_t *);
  319 static vm_offset_t pmap_kmem_choose(vm_offset_t addr);
  320 #ifdef PAE
  321 static void *pmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait);
  322 #endif
  323 
  324 CTASSERT(1 << PDESHIFT == sizeof(pd_entry_t));
  325 CTASSERT(1 << PTESHIFT == sizeof(pt_entry_t));
  326 
  327 /*
  328  * If you get an error here, then you set KVA_PAGES wrong! See the
  329  * description of KVA_PAGES in sys/i386/include/pmap.h. It must be
  330  * multiple of 4 for a normal kernel, or a multiple of 8 for a PAE.
  331  */
  332 CTASSERT(KERNBASE % (1 << 24) == 0);
  333 
  334 /*
  335  * Move the kernel virtual free pointer to the next
  336  * 4MB.  This is used to help improve performance
  337  * by using a large (4MB) page for much of the kernel
  338  * (.text, .data, .bss)
  339  */
  340 static vm_offset_t
  341 pmap_kmem_choose(vm_offset_t addr)
  342 {
  343         vm_offset_t newaddr = addr;
  344 
  345 #ifndef DISABLE_PSE
  346         if (cpu_feature & CPUID_PSE)
  347                 newaddr = (addr + PDRMASK) & ~PDRMASK;
  348 #endif
  349         return newaddr;
  350 }
  351 
  352 /*
  353  *      Bootstrap the system enough to run with virtual memory.
  354  *
  355  *      On the i386 this is called after mapping has already been enabled
  356  *      and just syncs the pmap module with what has already been done.
  357  *      [We can't call it easily with mapping off since the kernel is not
  358  *      mapped with PA == VA, hence we would have to relocate every address
  359  *      from the linked base (virtual) address "KERNBASE" to the actual
  360  *      (physical) address starting relative to 0]
  361  */
  362 void
  363 pmap_bootstrap(vm_paddr_t firstaddr)
  364 {
  365         vm_offset_t va;
  366         pt_entry_t *pte, *unused;
  367         struct sysmaps *sysmaps;
  368         int i;
  369 
  370         /*
  371          * XXX The calculation of virtual_avail is wrong. It's NKPT*PAGE_SIZE too
  372          * large. It should instead be correctly calculated in locore.s and
  373          * not based on 'first' (which is a physical address, not a virtual
  374          * address, for the start of unused physical memory). The kernel
  375          * page tables are NOT double mapped and thus should not be included
  376          * in this calculation.
  377          */
  378         virtual_avail = (vm_offset_t) KERNBASE + firstaddr;
  379         virtual_avail = pmap_kmem_choose(virtual_avail);
  380 
  381         virtual_end = VM_MAX_KERNEL_ADDRESS;
  382 
  383         /*
  384          * Initialize the kernel pmap (which is statically allocated).
  385          */
  386         PMAP_LOCK_INIT(kernel_pmap);
  387         kernel_pmap->pm_pdir = (pd_entry_t *) (KERNBASE + (u_int)IdlePTD);
  388 #ifdef PAE
  389         kernel_pmap->pm_pdpt = (pdpt_entry_t *) (KERNBASE + (u_int)IdlePDPT);
  390 #endif
  391         kernel_pmap->pm_root = NULL;
  392         kernel_pmap->pm_active = -1;    /* don't allow deactivation */
  393         TAILQ_INIT(&kernel_pmap->pm_pvchunk);
  394         LIST_INIT(&allpmaps);
  395         mtx_init(&allpmaps_lock, "allpmaps", NULL, MTX_SPIN);
  396         mtx_lock_spin(&allpmaps_lock);
  397         LIST_INSERT_HEAD(&allpmaps, kernel_pmap, pm_list);
  398         mtx_unlock_spin(&allpmaps_lock);
  399         nkpt = NKPT;
  400 
  401         /*
  402          * Reserve some special page table entries/VA space for temporary
  403          * mapping of pages.
  404          */
  405 #define SYSMAP(c, p, v, n)      \
  406         v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n);
  407 
  408         va = virtual_avail;
  409         pte = vtopte(va);
  410 
  411         /*
  412          * CMAP1/CMAP2 are used for zeroing and copying pages.
  413          * CMAP3 is used for the idle process page zeroing.
  414          */
  415         for (i = 0; i < MAXCPU; i++) {
  416                 sysmaps = &sysmaps_pcpu[i];
  417                 mtx_init(&sysmaps->lock, "SYSMAPS", NULL, MTX_DEF);
  418                 SYSMAP(caddr_t, sysmaps->CMAP1, sysmaps->CADDR1, 1)
  419                 SYSMAP(caddr_t, sysmaps->CMAP2, sysmaps->CADDR2, 1)
  420         }
  421         SYSMAP(caddr_t, CMAP1, CADDR1, 1)
  422         SYSMAP(caddr_t, CMAP3, CADDR3, 1)
  423         *CMAP3 = 0;
  424 
  425         /*
  426          * Crashdump maps.
  427          */
  428         SYSMAP(caddr_t, unused, crashdumpmap, MAXDUMPPGS)
  429 
  430         /*
  431          * ptvmmap is used for reading arbitrary physical pages via /dev/mem.
  432          */
  433         SYSMAP(caddr_t, unused, ptvmmap, 1)
  434 
  435         /*
  436          * msgbufp is used to map the system message buffer.
  437          */
  438         SYSMAP(struct msgbuf *, unused, msgbufp, atop(round_page(MSGBUF_SIZE)))
  439 
  440         /*
  441          * KPTmap is used by pmap_kextract().
  442          */
  443         SYSMAP(pt_entry_t *, KPTD, KPTmap, KVA_PAGES)
  444 
  445         for (i = 0; i < NKPT; i++)
  446                 KPTD[i] = (KPTphys + (i << PAGE_SHIFT)) | PG_RW | PG_V;
  447 
  448         /*
  449          * Adjust the start of the KPTD and KPTmap so that the implementation
  450          * of pmap_kextract() and pmap_growkernel() can be made simpler.
  451          */
  452         KPTD -= KPTDI;
  453         KPTmap -= i386_btop(KPTDI << PDRSHIFT);
  454 
  455         /*
  456          * ptemap is used for pmap_pte_quick
  457          */
  458         SYSMAP(pt_entry_t *, PMAP1, PADDR1, 1);
  459         SYSMAP(pt_entry_t *, PMAP2, PADDR2, 1);
  460 
  461         mtx_init(&PMAP2mutex, "PMAP2", NULL, MTX_DEF);
  462 
  463         virtual_avail = va;
  464 
  465         *CMAP1 = 0;
  466 
  467         /*
  468          * Leave in place an identity mapping (virt == phys) for the low 1 MB
  469          * physical memory region that is used by the ACPI wakeup code.  This
  470          * mapping must not have PG_G set. 
  471          */
  472 #ifdef XBOX
  473         /* FIXME: This is gross, but needed for the XBOX. Since we are in such
  474          * an early stadium, we cannot yet neatly map video memory ... :-(
  475          * Better fixes are very welcome! */
  476         if (!arch_i386_is_xbox)
  477 #endif
  478         for (i = 1; i < NKPT; i++)
  479                 PTD[i] = 0;
  480 
  481         /* Initialize the PAT MSR if present. */
  482         pmap_init_pat();
  483 
  484         /* Turn on PG_G on kernel page(s) */
  485         pmap_set_pg();
  486 }
  487 
  488 /*
  489  * Setup the PAT MSR.
  490  */
  491 void
  492 pmap_init_pat(void)
  493 {
  494         uint64_t pat_msr;
  495 
  496         /* Bail if this CPU doesn't implement PAT. */
  497         if (!(cpu_feature & CPUID_PAT))
  498                 return;
  499 
  500 #ifdef PAT_WORKS
  501         /*
  502          * Leave the indices 0-3 at the default of WB, WT, UC, and UC-.
  503          * Program 4 and 5 as WP and WC.
  504          * Leave 6 and 7 as UC and UC-.
  505          */
  506         pat_msr = rdmsr(MSR_PAT);
  507         pat_msr &= ~(PAT_MASK(4) | PAT_MASK(5));
  508         pat_msr |= PAT_VALUE(4, PAT_WRITE_PROTECTED) |
  509             PAT_VALUE(5, PAT_WRITE_COMBINING);
  510 #else
  511         /*
  512          * Due to some Intel errata, we can only safely use the lower 4
  513          * PAT entries.  Thus, just replace PAT Index 2 with WC instead
  514          * of UC-.
  515          *
  516          *   Intel Pentium III Processor Specification Update
  517          * Errata E.27 (Upper Four PAT Entries Not Usable With Mode B
  518          * or Mode C Paging)
  519          *
  520          *   Intel Pentium IV  Processor Specification Update
  521          * Errata N46 (PAT Index MSB May Be Calculated Incorrectly)
  522          */
  523         pat_msr = rdmsr(MSR_PAT);
  524         pat_msr &= ~PAT_MASK(2);
  525         pat_msr |= PAT_VALUE(2, PAT_WRITE_COMBINING);
  526 #endif
  527         wrmsr(MSR_PAT, pat_msr);
  528 }
  529 
  530 /*
  531  * Set PG_G on kernel pages.  Only the BSP calls this when SMP is turned on.
  532  */
  533 void
  534 pmap_set_pg(void)
  535 {
  536         pd_entry_t pdir;
  537         pt_entry_t *pte;
  538         vm_offset_t va, endva;
  539         int i; 
  540 
  541         if (pgeflag == 0)
  542                 return;
  543 
  544         i = KERNLOAD/NBPDR;
  545         endva = KERNBASE + KERNend;
  546 
  547         if (pseflag) {
  548                 va = KERNBASE + KERNLOAD;
  549                 while (va  < endva) {
  550                         pdir = kernel_pmap->pm_pdir[KPTDI+i];
  551                         pdir |= pgeflag;
  552                         kernel_pmap->pm_pdir[KPTDI+i] = PTD[KPTDI+i] = pdir;
  553                         invltlb();      /* Play it safe, invltlb() every time */
  554                         i++;
  555                         va += NBPDR;
  556                 }
  557         } else {
  558                 va = (vm_offset_t)btext;
  559                 while (va < endva) {
  560                         pte = vtopte(va);
  561                         if (*pte)
  562                                 *pte |= pgeflag;
  563                         invltlb();      /* Play it safe, invltlb() every time */
  564                         va += PAGE_SIZE;
  565                 }
  566         }
  567 }
  568 
  569 /*
  570  * Initialize a vm_page's machine-dependent fields.
  571  */
  572 void
  573 pmap_page_init(vm_page_t m)
  574 {
  575 
  576         TAILQ_INIT(&m->md.pv_list);
  577         m->md.pat_mode = PAT_WRITE_BACK;
  578 }
  579 
  580 #ifdef PAE
  581 static void *
  582 pmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
  583 {
  584 
  585         /* Inform UMA that this allocator uses kernel_map/object. */
  586         *flags = UMA_SLAB_KERNEL;
  587         return ((void *)kmem_alloc_contig(kernel_map, bytes, wait, 0x0ULL,
  588             0xffffffffULL, 1, 0, VM_MEMATTR_DEFAULT));
  589 }
  590 #endif
  591 
  592 /*
  593  * ABuse the pte nodes for unmapped kva to thread a kva freelist through.
  594  * Requirements:
  595  *  - Must deal with pages in order to ensure that none of the PG_* bits
  596  *    are ever set, PG_V in particular.
  597  *  - Assumes we can write to ptes without pte_store() atomic ops, even
  598  *    on PAE systems.  This should be ok.
  599  *  - Assumes nothing will ever test these addresses for 0 to indicate
  600  *    no mapping instead of correctly checking PG_V.
  601  *  - Assumes a vm_offset_t will fit in a pte (true for i386).
  602  * Because PG_V is never set, there can be no mappings to invalidate.
  603  */
  604 static vm_offset_t
  605 pmap_ptelist_alloc(vm_offset_t *head)
  606 {
  607         pt_entry_t *pte;
  608         vm_offset_t va;
  609 
  610         va = *head;
  611         if (va == 0)
  612                 return (va);    /* Out of memory */
  613         pte = vtopte(va);
  614         *head = *pte;
  615         if (*head & PG_V)
  616                 panic("pmap_ptelist_alloc: va with PG_V set!");
  617         *pte = 0;
  618         return (va);
  619 }
  620 
  621 static void
  622 pmap_ptelist_free(vm_offset_t *head, vm_offset_t va)
  623 {
  624         pt_entry_t *pte;
  625 
  626         if (va & PG_V)
  627                 panic("pmap_ptelist_free: freeing va with PG_V set!");
  628         pte = vtopte(va);
  629         *pte = *head;           /* virtual! PG_V is 0 though */
  630         *head = va;
  631 }
  632 
  633 static void
  634 pmap_ptelist_init(vm_offset_t *head, void *base, int npages)
  635 {
  636         int i;
  637         vm_offset_t va;
  638 
  639         *head = 0;
  640         for (i = npages - 1; i >= 0; i--) {
  641                 va = (vm_offset_t)base + i * PAGE_SIZE;
  642                 pmap_ptelist_free(head, va);
  643         }
  644 }
  645 
  646 
  647 /*
  648  *      Initialize the pmap module.
  649  *      Called by vm_init, to initialize any structures that the pmap
  650  *      system needs to map virtual memory.
  651  */
  652 void
  653 pmap_init(void)
  654 {
  655         vm_page_t mpte;
  656         vm_size_t s;
  657         int i, pv_npg;
  658 
  659         /*
  660          * Initialize the vm page array entries for the kernel pmap's
  661          * page table pages.
  662          */ 
  663         for (i = 0; i < NKPT; i++) {
  664                 mpte = PHYS_TO_VM_PAGE(KPTphys + (i << PAGE_SHIFT));
  665                 KASSERT(mpte >= vm_page_array &&
  666                     mpte < &vm_page_array[vm_page_array_size],
  667                     ("pmap_init: page table page is out of range"));
  668                 mpte->pindex = i + KPTDI;
  669                 mpte->phys_addr = KPTphys + (i << PAGE_SHIFT);
  670         }
  671 
  672         /*
  673          * Initialize the address space (zone) for the pv entries.  Set a
  674          * high water mark so that the system can recover from excessive
  675          * numbers of pv entries.
  676          */
  677         TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
  678         pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
  679         TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
  680         pv_entry_max = roundup(pv_entry_max, _NPCPV);
  681         pv_entry_high_water = 9 * (pv_entry_max / 10);
  682 
  683         /*
  684          * Are large page mappings enabled?
  685          */
  686         TUNABLE_INT_FETCH("vm.pmap.pg_ps_enabled", &pg_ps_enabled);
  687         if (pg_ps_enabled) {
  688                 KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0,
  689                     ("pmap_init: can't assign to pagesizes[1]"));
  690                 pagesizes[1] = NBPDR;
  691         }
  692 
  693         /*
  694          * Calculate the size of the pv head table for superpages.
  695          */
  696         for (i = 0; phys_avail[i + 1]; i += 2);
  697         pv_npg = round_4mpage(phys_avail[(i - 2) + 1]) / NBPDR;
  698 
  699         /*
  700          * Allocate memory for the pv head table for superpages.
  701          */
  702         s = (vm_size_t)(pv_npg * sizeof(struct md_page));
  703         s = round_page(s);
  704         pv_table = (struct md_page *)kmem_alloc(kernel_map, s);
  705         for (i = 0; i < pv_npg; i++)
  706                 TAILQ_INIT(&pv_table[i].pv_list);
  707 
  708         pv_maxchunks = MAX(pv_entry_max / _NPCPV, maxproc);
  709         pv_chunkbase = (struct pv_chunk *)kmem_alloc_nofault(kernel_map,
  710             PAGE_SIZE * pv_maxchunks);
  711         if (pv_chunkbase == NULL)
  712                 panic("pmap_init: not enough kvm for pv chunks");
  713         pmap_ptelist_init(&pv_vafree, pv_chunkbase, pv_maxchunks);
  714 #ifdef PAE
  715         pdptzone = uma_zcreate("PDPT", NPGPTD * sizeof(pdpt_entry_t), NULL,
  716             NULL, NULL, NULL, (NPGPTD * sizeof(pdpt_entry_t)) - 1,
  717             UMA_ZONE_VM | UMA_ZONE_NOFREE);
  718         uma_zone_set_allocf(pdptzone, pmap_pdpt_allocf);
  719 #endif
  720 }
  721 
  722 
  723 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_max, CTLFLAG_RD, &pv_entry_max, 0,
  724         "Max number of PV entries");
  725 SYSCTL_INT(_vm_pmap, OID_AUTO, shpgperproc, CTLFLAG_RD, &shpgperproc, 0,
  726         "Page share factor per proc");
  727 
  728 SYSCTL_NODE(_vm_pmap, OID_AUTO, pde, CTLFLAG_RD, 0,
  729     "2/4MB page mapping counters");
  730 
  731 static u_long pmap_pde_demotions;
  732 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, demotions, CTLFLAG_RD,
  733     &pmap_pde_demotions, 0, "2/4MB page demotions");
  734 
  735 static u_long pmap_pde_mappings;
  736 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, mappings, CTLFLAG_RD,
  737     &pmap_pde_mappings, 0, "2/4MB page mappings");
  738 
  739 static u_long pmap_pde_p_failures;
  740 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, p_failures, CTLFLAG_RD,
  741     &pmap_pde_p_failures, 0, "2/4MB page promotion failures");
  742 
  743 static u_long pmap_pde_promotions;
  744 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, promotions, CTLFLAG_RD,
  745     &pmap_pde_promotions, 0, "2/4MB page promotions");
  746 
  747 /***************************************************
  748  * Low level helper routines.....
  749  ***************************************************/
  750 
  751 /*
  752  * Determine the appropriate bits to set in a PTE or PDE for a specified
  753  * caching mode.
  754  */
  755 int
  756 pmap_cache_bits(int mode, boolean_t is_pde)
  757 {
  758         int pat_flag, pat_index, cache_bits;
  759 
  760         /* The PAT bit is different for PTE's and PDE's. */
  761         pat_flag = is_pde ? PG_PDE_PAT : PG_PTE_PAT;
  762 
  763         /* If we don't support PAT, map extended modes to older ones. */
  764         if (!(cpu_feature & CPUID_PAT)) {
  765                 switch (mode) {
  766                 case PAT_UNCACHEABLE:
  767                 case PAT_WRITE_THROUGH:
  768                 case PAT_WRITE_BACK:
  769                         break;
  770                 case PAT_UNCACHED:
  771                 case PAT_WRITE_COMBINING:
  772                 case PAT_WRITE_PROTECTED:
  773                         mode = PAT_UNCACHEABLE;
  774                         break;
  775                 }
  776         }
  777         
  778         /* Map the caching mode to a PAT index. */
  779         switch (mode) {
  780 #ifdef PAT_WORKS
  781         case PAT_UNCACHEABLE:
  782                 pat_index = 3;
  783                 break;
  784         case PAT_WRITE_THROUGH:
  785                 pat_index = 1;
  786                 break;
  787         case PAT_WRITE_BACK:
  788                 pat_index = 0;
  789                 break;
  790         case PAT_UNCACHED:
  791                 pat_index = 2;
  792                 break;
  793         case PAT_WRITE_COMBINING:
  794                 pat_index = 5;
  795                 break;
  796         case PAT_WRITE_PROTECTED:
  797                 pat_index = 4;
  798                 break;
  799 #else
  800         case PAT_UNCACHED:
  801         case PAT_UNCACHEABLE:
  802         case PAT_WRITE_PROTECTED:
  803                 pat_index = 3;
  804                 break;
  805         case PAT_WRITE_THROUGH:
  806                 pat_index = 1;
  807                 break;
  808         case PAT_WRITE_BACK:
  809                 pat_index = 0;
  810                 break;
  811         case PAT_WRITE_COMBINING:
  812                 pat_index = 2;
  813                 break;
  814 #endif
  815         default:
  816                 panic("Unknown caching mode %d\n", mode);
  817         }       
  818 
  819         /* Map the 3-bit index value into the PAT, PCD, and PWT bits. */
  820         cache_bits = 0;
  821         if (pat_index & 0x4)
  822                 cache_bits |= pat_flag;
  823         if (pat_index & 0x2)
  824                 cache_bits |= PG_NC_PCD;
  825         if (pat_index & 0x1)
  826                 cache_bits |= PG_NC_PWT;
  827         return (cache_bits);
  828 }
  829 #ifdef SMP
  830 /*
  831  * For SMP, these functions have to use the IPI mechanism for coherence.
  832  *
  833  * N.B.: Before calling any of the following TLB invalidation functions,
  834  * the calling processor must ensure that all stores updating a non-
  835  * kernel page table are globally performed.  Otherwise, another
  836  * processor could cache an old, pre-update entry without being
  837  * invalidated.  This can happen one of two ways: (1) The pmap becomes
  838  * active on another processor after its pm_active field is checked by
  839  * one of the following functions but before a store updating the page
  840  * table is globally performed. (2) The pmap becomes active on another
  841  * processor before its pm_active field is checked but due to
  842  * speculative loads one of the following functions stills reads the
  843  * pmap as inactive on the other processor.
  844  * 
  845  * The kernel page table is exempt because its pm_active field is
  846  * immutable.  The kernel page table is always active on every
  847  * processor.
  848  */
  849 void
  850 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
  851 {
  852         u_int cpumask;
  853         u_int other_cpus;
  854 
  855         sched_pin();
  856         if (pmap == kernel_pmap || pmap->pm_active == all_cpus) {
  857                 invlpg(va);
  858                 smp_invlpg(va);
  859         } else {
  860                 cpumask = PCPU_GET(cpumask);
  861                 other_cpus = PCPU_GET(other_cpus);
  862                 if (pmap->pm_active & cpumask)
  863                         invlpg(va);
  864                 if (pmap->pm_active & other_cpus)
  865                         smp_masked_invlpg(pmap->pm_active & other_cpus, va);
  866         }
  867         sched_unpin();
  868 }
  869 
  870 void
  871 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
  872 {
  873         u_int cpumask;
  874         u_int other_cpus;
  875         vm_offset_t addr;
  876 
  877         sched_pin();
  878         if (pmap == kernel_pmap || pmap->pm_active == all_cpus) {
  879                 for (addr = sva; addr < eva; addr += PAGE_SIZE)
  880                         invlpg(addr);
  881                 smp_invlpg_range(sva, eva);
  882         } else {
  883                 cpumask = PCPU_GET(cpumask);
  884                 other_cpus = PCPU_GET(other_cpus);
  885                 if (pmap->pm_active & cpumask)
  886                         for (addr = sva; addr < eva; addr += PAGE_SIZE)
  887                                 invlpg(addr);
  888                 if (pmap->pm_active & other_cpus)
  889                         smp_masked_invlpg_range(pmap->pm_active & other_cpus,
  890                             sva, eva);
  891         }
  892         sched_unpin();
  893 }
  894 
  895 void
  896 pmap_invalidate_all(pmap_t pmap)
  897 {
  898         u_int cpumask;
  899         u_int other_cpus;
  900 
  901         sched_pin();
  902         if (pmap == kernel_pmap || pmap->pm_active == all_cpus) {
  903                 invltlb();
  904                 smp_invltlb();
  905         } else {
  906                 cpumask = PCPU_GET(cpumask);
  907                 other_cpus = PCPU_GET(other_cpus);
  908                 if (pmap->pm_active & cpumask)
  909                         invltlb();
  910                 if (pmap->pm_active & other_cpus)
  911                         smp_masked_invltlb(pmap->pm_active & other_cpus);
  912         }
  913         sched_unpin();
  914 }
  915 
  916 void
  917 pmap_invalidate_cache(void)
  918 {
  919 
  920         sched_pin();
  921         wbinvd();
  922         smp_cache_flush();
  923         sched_unpin();
  924 }
  925 #else /* !SMP */
  926 /*
  927  * Normal, non-SMP, 486+ invalidation functions.
  928  * We inline these within pmap.c for speed.
  929  */
  930 PMAP_INLINE void
  931 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
  932 {
  933 
  934         if (pmap == kernel_pmap || pmap->pm_active)
  935                 invlpg(va);
  936 }
  937 
  938 PMAP_INLINE void
  939 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
  940 {
  941         vm_offset_t addr;
  942 
  943         if (pmap == kernel_pmap || pmap->pm_active)
  944                 for (addr = sva; addr < eva; addr += PAGE_SIZE)
  945                         invlpg(addr);
  946 }
  947 
  948 PMAP_INLINE void
  949 pmap_invalidate_all(pmap_t pmap)
  950 {
  951 
  952         if (pmap == kernel_pmap || pmap->pm_active)
  953                 invltlb();
  954 }
  955 
  956 PMAP_INLINE void
  957 pmap_invalidate_cache(void)
  958 {
  959 
  960         wbinvd();
  961 }
  962 #endif /* !SMP */
  963 
  964 void
  965 pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva)
  966 {
  967 
  968         KASSERT((sva & PAGE_MASK) == 0,
  969             ("pmap_invalidate_cache_range: sva not page-aligned"));
  970         KASSERT((eva & PAGE_MASK) == 0,
  971             ("pmap_invalidate_cache_range: eva not page-aligned"));
  972 
  973         if (cpu_feature & CPUID_SS)
  974                 ; /* If "Self Snoop" is supported, do nothing. */
  975         else if (cpu_feature & CPUID_CLFSH) {
  976 
  977                 /*
  978                  * Otherwise, do per-cache line flush.  Use the mfence
  979                  * instruction to insure that previous stores are
  980                  * included in the write-back.  The processor
  981                  * propagates flush to other processors in the cache
  982                  * coherence domain.
  983                  */
  984                 mfence();
  985                 for (; sva < eva; sva += cpu_clflush_line_size)
  986                         clflush(sva);
  987                 mfence();
  988         } else {
  989 
  990                 /*
  991                  * No targeted cache flush methods are supported by CPU,
  992                  * globally invalidate cache as a last resort.
  993                  */
  994                 pmap_invalidate_cache();
  995         }
  996 }
  997 
  998 /*
  999  * Are we current address space or kernel?  N.B. We return FALSE when
 1000  * a pmap's page table is in use because a kernel thread is borrowing
 1001  * it.  The borrowed page table can change spontaneously, making any
 1002  * dependence on its continued use subject to a race condition.
 1003  */
 1004 static __inline int
 1005 pmap_is_current(pmap_t pmap)
 1006 {
 1007 
 1008         return (pmap == kernel_pmap ||
 1009                 (pmap == vmspace_pmap(curthread->td_proc->p_vmspace) &&
 1010             (pmap->pm_pdir[PTDPTDI] & PG_FRAME) == (PTDpde[0] & PG_FRAME)));
 1011 }
 1012 
 1013 /*
 1014  * If the given pmap is not the current or kernel pmap, the returned pte must
 1015  * be released by passing it to pmap_pte_release().
 1016  */
 1017 pt_entry_t *
 1018 pmap_pte(pmap_t pmap, vm_offset_t va)
 1019 {
 1020         pd_entry_t newpf;
 1021         pd_entry_t *pde;
 1022 
 1023         pde = pmap_pde(pmap, va);
 1024         if (*pde & PG_PS)
 1025                 return (pde);
 1026         if (*pde != 0) {
 1027                 /* are we current address space or kernel? */
 1028                 if (pmap_is_current(pmap))
 1029                         return (vtopte(va));
 1030                 mtx_lock(&PMAP2mutex);
 1031                 newpf = *pde & PG_FRAME;
 1032                 if ((*PMAP2 & PG_FRAME) != newpf) {
 1033                         *PMAP2 = newpf | PG_RW | PG_V | PG_A | PG_M;
 1034                         pmap_invalidate_page(kernel_pmap, (vm_offset_t)PADDR2);
 1035                 }
 1036                 return (PADDR2 + (i386_btop(va) & (NPTEPG - 1)));
 1037         }
 1038         return (0);
 1039 }
 1040 
 1041 /*
 1042  * Releases a pte that was obtained from pmap_pte().  Be prepared for the pte
 1043  * being NULL.
 1044  */
 1045 static __inline void
 1046 pmap_pte_release(pt_entry_t *pte)
 1047 {
 1048 
 1049         if ((pt_entry_t *)((vm_offset_t)pte & ~PAGE_MASK) == PADDR2)
 1050                 mtx_unlock(&PMAP2mutex);
 1051 }
 1052 
 1053 static __inline void
 1054 invlcaddr(void *caddr)
 1055 {
 1056 
 1057         invlpg((u_int)caddr);
 1058 }
 1059 
 1060 /*
 1061  * Super fast pmap_pte routine best used when scanning
 1062  * the pv lists.  This eliminates many coarse-grained
 1063  * invltlb calls.  Note that many of the pv list
 1064  * scans are across different pmaps.  It is very wasteful
 1065  * to do an entire invltlb for checking a single mapping.
 1066  *
 1067  * If the given pmap is not the current pmap, vm_page_queue_mtx
 1068  * must be held and curthread pinned to a CPU.
 1069  */
 1070 static pt_entry_t *
 1071 pmap_pte_quick(pmap_t pmap, vm_offset_t va)
 1072 {
 1073         pd_entry_t newpf;
 1074         pd_entry_t *pde;
 1075 
 1076         pde = pmap_pde(pmap, va);
 1077         if (*pde & PG_PS)
 1078                 return (pde);
 1079         if (*pde != 0) {
 1080                 /* are we current address space or kernel? */
 1081                 if (pmap_is_current(pmap))
 1082                         return (vtopte(va));
 1083                 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1084                 KASSERT(curthread->td_pinned > 0, ("curthread not pinned"));
 1085                 newpf = *pde & PG_FRAME;
 1086                 if ((*PMAP1 & PG_FRAME) != newpf) {
 1087                         *PMAP1 = newpf | PG_RW | PG_V | PG_A | PG_M;
 1088 #ifdef SMP
 1089                         PMAP1cpu = PCPU_GET(cpuid);
 1090 #endif
 1091                         invlcaddr(PADDR1);
 1092                         PMAP1changed++;
 1093                 } else
 1094 #ifdef SMP
 1095                 if (PMAP1cpu != PCPU_GET(cpuid)) {
 1096                         PMAP1cpu = PCPU_GET(cpuid);
 1097                         invlcaddr(PADDR1);
 1098                         PMAP1changedcpu++;
 1099                 } else
 1100 #endif
 1101                         PMAP1unchanged++;
 1102                 return (PADDR1 + (i386_btop(va) & (NPTEPG - 1)));
 1103         }
 1104         return (0);
 1105 }
 1106 
 1107 /*
 1108  *      Routine:        pmap_extract
 1109  *      Function:
 1110  *              Extract the physical page address associated
 1111  *              with the given map/virtual_address pair.
 1112  */
 1113 vm_paddr_t 
 1114 pmap_extract(pmap_t pmap, vm_offset_t va)
 1115 {
 1116         vm_paddr_t rtval;
 1117         pt_entry_t *pte;
 1118         pd_entry_t pde;
 1119 
 1120         rtval = 0;
 1121         PMAP_LOCK(pmap);
 1122         pde = pmap->pm_pdir[va >> PDRSHIFT];
 1123         if (pde != 0) {
 1124                 if ((pde & PG_PS) != 0)
 1125                         rtval = (pde & PG_PS_FRAME) | (va & PDRMASK);
 1126                 else {
 1127                         pte = pmap_pte(pmap, va);
 1128                         rtval = (*pte & PG_FRAME) | (va & PAGE_MASK);
 1129                         pmap_pte_release(pte);
 1130                 }
 1131         }
 1132         PMAP_UNLOCK(pmap);
 1133         return (rtval);
 1134 }
 1135 
 1136 /*
 1137  *      Routine:        pmap_extract_and_hold
 1138  *      Function:
 1139  *              Atomically extract and hold the physical page
 1140  *              with the given pmap and virtual address pair
 1141  *              if that mapping permits the given protection.
 1142  */
 1143 vm_page_t
 1144 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
 1145 {
 1146         pd_entry_t pde;
 1147         pt_entry_t pte;
 1148         vm_page_t m;
 1149 
 1150         m = NULL;
 1151         vm_page_lock_queues();
 1152         PMAP_LOCK(pmap);
 1153         pde = *pmap_pde(pmap, va);
 1154         if (pde != 0) {
 1155                 if (pde & PG_PS) {
 1156                         if ((pde & PG_RW) || (prot & VM_PROT_WRITE) == 0) {
 1157                                 m = PHYS_TO_VM_PAGE((pde & PG_PS_FRAME) |
 1158                                     (va & PDRMASK));
 1159                                 vm_page_hold(m);
 1160                         }
 1161                 } else {
 1162                         sched_pin();
 1163                         pte = *pmap_pte_quick(pmap, va);
 1164                         if (pte != 0 &&
 1165                             ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0)) {
 1166                                 m = PHYS_TO_VM_PAGE(pte & PG_FRAME);
 1167                                 vm_page_hold(m);
 1168                         }
 1169                         sched_unpin();
 1170                 }
 1171         }
 1172         vm_page_unlock_queues();
 1173         PMAP_UNLOCK(pmap);
 1174         return (m);
 1175 }
 1176 
 1177 /***************************************************
 1178  * Low level mapping routines.....
 1179  ***************************************************/
 1180 
 1181 /*
 1182  * Add a wired page to the kva.
 1183  * Note: not SMP coherent.
 1184  */
 1185 PMAP_INLINE void 
 1186 pmap_kenter(vm_offset_t va, vm_paddr_t pa)
 1187 {
 1188         pt_entry_t *pte;
 1189 
 1190         pte = vtopte(va);
 1191         pte_store(pte, pa | PG_RW | PG_V | pgeflag);
 1192 }
 1193 
 1194 static __inline void
 1195 pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode)
 1196 {
 1197         pt_entry_t *pte;
 1198 
 1199         pte = vtopte(va);
 1200         pte_store(pte, pa | PG_RW | PG_V | pgeflag | pmap_cache_bits(mode, 0));
 1201 }
 1202 
 1203 /*
 1204  * Remove a page from the kernel pagetables.
 1205  * Note: not SMP coherent.
 1206  */
 1207 PMAP_INLINE void
 1208 pmap_kremove(vm_offset_t va)
 1209 {
 1210         pt_entry_t *pte;
 1211 
 1212         pte = vtopte(va);
 1213         pte_clear(pte);
 1214 }
 1215 
 1216 /*
 1217  *      Used to map a range of physical addresses into kernel
 1218  *      virtual address space.
 1219  *
 1220  *      The value passed in '*virt' is a suggested virtual address for
 1221  *      the mapping. Architectures which can support a direct-mapped
 1222  *      physical to virtual region can return the appropriate address
 1223  *      within that region, leaving '*virt' unchanged. Other
 1224  *      architectures should map the pages starting at '*virt' and
 1225  *      update '*virt' with the first usable address after the mapped
 1226  *      region.
 1227  */
 1228 vm_offset_t
 1229 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
 1230 {
 1231         vm_offset_t va, sva;
 1232 
 1233         va = sva = *virt;
 1234         while (start < end) {
 1235                 pmap_kenter(va, start);
 1236                 va += PAGE_SIZE;
 1237                 start += PAGE_SIZE;
 1238         }
 1239         pmap_invalidate_range(kernel_pmap, sva, va);
 1240         *virt = va;
 1241         return (sva);
 1242 }
 1243 
 1244 
 1245 /*
 1246  * Add a list of wired pages to the kva
 1247  * this routine is only used for temporary
 1248  * kernel mappings that do not need to have
 1249  * page modification or references recorded.
 1250  * Note that old mappings are simply written
 1251  * over.  The page *must* be wired.
 1252  * Note: SMP coherent.  Uses a ranged shootdown IPI.
 1253  */
 1254 void
 1255 pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
 1256 {
 1257         pt_entry_t *endpte, oldpte, *pte;
 1258 
 1259         oldpte = 0;
 1260         pte = vtopte(sva);
 1261         endpte = pte + count;
 1262         while (pte < endpte) {
 1263                 oldpte |= *pte;
 1264                 pte_store(pte, VM_PAGE_TO_PHYS(*ma) | pgeflag |
 1265                     pmap_cache_bits((*ma)->md.pat_mode, 0) | PG_RW | PG_V);
 1266                 pte++;
 1267                 ma++;
 1268         }
 1269         if ((oldpte & PG_V) != 0)
 1270                 pmap_invalidate_range(kernel_pmap, sva, sva + count *
 1271                     PAGE_SIZE);
 1272 }
 1273 
 1274 /*
 1275  * This routine tears out page mappings from the
 1276  * kernel -- it is meant only for temporary mappings.
 1277  * Note: SMP coherent.  Uses a ranged shootdown IPI.
 1278  */
 1279 void
 1280 pmap_qremove(vm_offset_t sva, int count)
 1281 {
 1282         vm_offset_t va;
 1283 
 1284         va = sva;
 1285         while (count-- > 0) {
 1286                 pmap_kremove(va);
 1287                 va += PAGE_SIZE;
 1288         }
 1289         pmap_invalidate_range(kernel_pmap, sva, va);
 1290 }
 1291 
 1292 /***************************************************
 1293  * Page table page management routines.....
 1294  ***************************************************/
 1295 static __inline void
 1296 pmap_free_zero_pages(vm_page_t free)
 1297 {
 1298         vm_page_t m;
 1299 
 1300         while (free != NULL) {
 1301                 m = free;
 1302                 free = m->right;
 1303                 /* Preserve the page's PG_ZERO setting. */
 1304                 vm_page_free_toq(m);
 1305         }
 1306 }
 1307 
 1308 /*
 1309  * Schedule the specified unused page table page to be freed.  Specifically,
 1310  * add the page to the specified list of pages that will be released to the
 1311  * physical memory manager after the TLB has been updated.
 1312  */
 1313 static __inline void
 1314 pmap_add_delayed_free_list(vm_page_t m, vm_page_t *free, boolean_t set_PG_ZERO)
 1315 {
 1316 
 1317         if (set_PG_ZERO)
 1318                 m->flags |= PG_ZERO;
 1319         else
 1320                 m->flags &= ~PG_ZERO;
 1321         m->right = *free;
 1322         *free = m;
 1323 }
 1324 
 1325 /*
 1326  * Inserts the specified page table page into the specified pmap's collection
 1327  * of idle page table pages.  Each of a pmap's page table pages is responsible
 1328  * for mapping a distinct range of virtual addresses.  The pmap's collection is
 1329  * ordered by this virtual address range.
 1330  */
 1331 static void
 1332 pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte)
 1333 {
 1334         vm_page_t root;
 1335 
 1336         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 1337         root = pmap->pm_root;
 1338         if (root == NULL) {
 1339                 mpte->left = NULL;
 1340                 mpte->right = NULL;
 1341         } else {
 1342                 root = vm_page_splay(mpte->pindex, root);
 1343                 if (mpte->pindex < root->pindex) {
 1344                         mpte->left = root->left;
 1345                         mpte->right = root;
 1346                         root->left = NULL;
 1347                 } else if (mpte->pindex == root->pindex)
 1348                         panic("pmap_insert_pt_page: pindex already inserted");
 1349                 else {
 1350                         mpte->right = root->right;
 1351                         mpte->left = root;
 1352                         root->right = NULL;
 1353                 }
 1354         }
 1355         pmap->pm_root = mpte;
 1356 }
 1357 
 1358 /*
 1359  * Looks for a page table page mapping the specified virtual address in the
 1360  * specified pmap's collection of idle page table pages.  Returns NULL if there
 1361  * is no page table page corresponding to the specified virtual address.
 1362  */
 1363 static vm_page_t
 1364 pmap_lookup_pt_page(pmap_t pmap, vm_offset_t va)
 1365 {
 1366         vm_page_t mpte;
 1367         vm_pindex_t pindex = va >> PDRSHIFT;
 1368 
 1369         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 1370         if ((mpte = pmap->pm_root) != NULL && mpte->pindex != pindex) {
 1371                 mpte = vm_page_splay(pindex, mpte);
 1372                 if ((pmap->pm_root = mpte)->pindex != pindex)
 1373                         mpte = NULL;
 1374         }
 1375         return (mpte);
 1376 }
 1377 
 1378 /*
 1379  * Removes the specified page table page from the specified pmap's collection
 1380  * of idle page table pages.  The specified page table page must be a member of
 1381  * the pmap's collection.
 1382  */
 1383 static void
 1384 pmap_remove_pt_page(pmap_t pmap, vm_page_t mpte)
 1385 {
 1386         vm_page_t root;
 1387 
 1388         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 1389         if (mpte != pmap->pm_root)
 1390                 vm_page_splay(mpte->pindex, pmap->pm_root);
 1391         if (mpte->left == NULL)
 1392                 root = mpte->right;
 1393         else {
 1394                 root = vm_page_splay(mpte->pindex, mpte->left);
 1395                 root->right = mpte->right;
 1396         }
 1397         pmap->pm_root = root;
 1398 }
 1399 
 1400 /*
 1401  * This routine unholds page table pages, and if the hold count
 1402  * drops to zero, then it decrements the wire count.
 1403  */
 1404 static __inline int
 1405 pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free)
 1406 {
 1407 
 1408         --m->wire_count;
 1409         if (m->wire_count == 0)
 1410                 return _pmap_unwire_pte_hold(pmap, m, free);
 1411         else
 1412                 return 0;
 1413 }
 1414 
 1415 static int 
 1416 _pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free)
 1417 {
 1418         vm_offset_t pteva;
 1419 
 1420         /*
 1421          * unmap the page table page
 1422          */
 1423         pmap->pm_pdir[m->pindex] = 0;
 1424         --pmap->pm_stats.resident_count;
 1425 
 1426         /*
 1427          * This is a release store so that the ordinary store unmapping
 1428          * the page table page is globally performed before TLB shoot-
 1429          * down is begun.
 1430          */
 1431         atomic_subtract_rel_int(&cnt.v_wire_count, 1);
 1432 
 1433         /*
 1434          * Do an invltlb to make the invalidated mapping
 1435          * take effect immediately.
 1436          */
 1437         pteva = VM_MAXUSER_ADDRESS + i386_ptob(m->pindex);
 1438         pmap_invalidate_page(pmap, pteva);
 1439 
 1440         /* 
 1441          * Put page on a list so that it is released after
 1442          * *ALL* TLB shootdown is done
 1443          */
 1444         pmap_add_delayed_free_list(m, free, TRUE);
 1445 
 1446         return 1;
 1447 }
 1448 
 1449 /*
 1450  * After removing a page table entry, this routine is used to
 1451  * conditionally free the page, and manage the hold/wire counts.
 1452  */
 1453 static int
 1454 pmap_unuse_pt(pmap_t pmap, vm_offset_t va, vm_page_t *free)
 1455 {
 1456         pd_entry_t ptepde;
 1457         vm_page_t mpte;
 1458 
 1459         if (va >= VM_MAXUSER_ADDRESS)
 1460                 return 0;
 1461         ptepde = *pmap_pde(pmap, va);
 1462         mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME);
 1463         return pmap_unwire_pte_hold(pmap, mpte, free);
 1464 }
 1465 
 1466 void
 1467 pmap_pinit0(pmap_t pmap)
 1468 {
 1469 
 1470         PMAP_LOCK_INIT(pmap);
 1471         pmap->pm_pdir = (pd_entry_t *)(KERNBASE + (vm_offset_t)IdlePTD);
 1472 #ifdef PAE
 1473         pmap->pm_pdpt = (pdpt_entry_t *)(KERNBASE + (vm_offset_t)IdlePDPT);
 1474 #endif
 1475         pmap->pm_root = NULL;
 1476         pmap->pm_active = 0;
 1477         PCPU_SET(curpmap, pmap);
 1478         TAILQ_INIT(&pmap->pm_pvchunk);
 1479         bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
 1480         mtx_lock_spin(&allpmaps_lock);
 1481         LIST_INSERT_HEAD(&allpmaps, pmap, pm_list);
 1482         mtx_unlock_spin(&allpmaps_lock);
 1483 }
 1484 
 1485 /*
 1486  * Initialize a preallocated and zeroed pmap structure,
 1487  * such as one in a vmspace structure.
 1488  */
 1489 int
 1490 pmap_pinit(pmap_t pmap)
 1491 {
 1492         vm_page_t m, ptdpg[NPGPTD];
 1493         vm_paddr_t pa;
 1494         static int color;
 1495         int i;
 1496 
 1497         PMAP_LOCK_INIT(pmap);
 1498 
 1499         /*
 1500          * No need to allocate page table space yet but we do need a valid
 1501          * page directory table.
 1502          */
 1503         if (pmap->pm_pdir == NULL) {
 1504                 pmap->pm_pdir = (pd_entry_t *)kmem_alloc_nofault(kernel_map,
 1505                     NBPTD);
 1506 
 1507                 if (pmap->pm_pdir == NULL) {
 1508                         PMAP_LOCK_DESTROY(pmap);
 1509                         return (0);
 1510                 }
 1511 #ifdef PAE
 1512                 pmap->pm_pdpt = uma_zalloc(pdptzone, M_WAITOK | M_ZERO);
 1513                 KASSERT(((vm_offset_t)pmap->pm_pdpt &
 1514                     ((NPGPTD * sizeof(pdpt_entry_t)) - 1)) == 0,
 1515                     ("pmap_pinit: pdpt misaligned"));
 1516                 KASSERT(pmap_kextract((vm_offset_t)pmap->pm_pdpt) < (4ULL<<30),
 1517                     ("pmap_pinit: pdpt above 4g"));
 1518 #endif
 1519                 pmap->pm_root = NULL;
 1520         }
 1521         KASSERT(pmap->pm_root == NULL,
 1522             ("pmap_pinit: pmap has reserved page table page(s)"));
 1523 
 1524         /*
 1525          * allocate the page directory page(s)
 1526          */
 1527         for (i = 0; i < NPGPTD;) {
 1528                 m = vm_page_alloc(NULL, color++,
 1529                     VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
 1530                     VM_ALLOC_ZERO);
 1531                 if (m == NULL)
 1532                         VM_WAIT;
 1533                 else {
 1534                         ptdpg[i++] = m;
 1535                 }
 1536         }
 1537 
 1538         pmap_qenter((vm_offset_t)pmap->pm_pdir, ptdpg, NPGPTD);
 1539 
 1540         for (i = 0; i < NPGPTD; i++) {
 1541                 if ((ptdpg[i]->flags & PG_ZERO) == 0)
 1542                         bzero(pmap->pm_pdir + (i * NPDEPG), PAGE_SIZE);
 1543         }
 1544 
 1545         mtx_lock_spin(&allpmaps_lock);
 1546         LIST_INSERT_HEAD(&allpmaps, pmap, pm_list);
 1547         mtx_unlock_spin(&allpmaps_lock);
 1548         /* Wire in kernel global address entries. */
 1549         bcopy(PTD + KPTDI, pmap->pm_pdir + KPTDI, nkpt * sizeof(pd_entry_t));
 1550 
 1551         /* install self-referential address mapping entry(s) */
 1552         for (i = 0; i < NPGPTD; i++) {
 1553                 pa = VM_PAGE_TO_PHYS(ptdpg[i]);
 1554                 pmap->pm_pdir[PTDPTDI + i] = pa | PG_V | PG_RW | PG_A | PG_M;
 1555 #ifdef PAE
 1556                 pmap->pm_pdpt[i] = pa | PG_V;
 1557 #endif
 1558         }
 1559 
 1560         pmap->pm_active = 0;
 1561         TAILQ_INIT(&pmap->pm_pvchunk);
 1562         bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
 1563 
 1564         return (1);
 1565 }
 1566 
 1567 /*
 1568  * this routine is called if the page table page is not
 1569  * mapped correctly.
 1570  */
 1571 static vm_page_t
 1572 _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags)
 1573 {
 1574         vm_paddr_t ptepa;
 1575         vm_page_t m;
 1576 
 1577         KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
 1578             (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
 1579             ("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
 1580 
 1581         /*
 1582          * Allocate a page table page.
 1583          */
 1584         if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ |
 1585             VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
 1586                 if (flags & M_WAITOK) {
 1587                         PMAP_UNLOCK(pmap);
 1588                         vm_page_unlock_queues();
 1589                         VM_WAIT;
 1590                         vm_page_lock_queues();
 1591                         PMAP_LOCK(pmap);
 1592                 }
 1593 
 1594                 /*
 1595                  * Indicate the need to retry.  While waiting, the page table
 1596                  * page may have been allocated.
 1597                  */
 1598                 return (NULL);
 1599         }
 1600         if ((m->flags & PG_ZERO) == 0)
 1601                 pmap_zero_page(m);
 1602 
 1603         /*
 1604          * Map the pagetable page into the process address space, if
 1605          * it isn't already there.
 1606          */
 1607 
 1608         pmap->pm_stats.resident_count++;
 1609 
 1610         ptepa = VM_PAGE_TO_PHYS(m);
 1611         pmap->pm_pdir[ptepindex] =
 1612                 (pd_entry_t) (ptepa | PG_U | PG_RW | PG_V | PG_A | PG_M);
 1613 
 1614         return m;
 1615 }
 1616 
 1617 static vm_page_t
 1618 pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags)
 1619 {
 1620         unsigned ptepindex;
 1621         pd_entry_t ptepa;
 1622         vm_page_t m;
 1623 
 1624         KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
 1625             (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
 1626             ("pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
 1627 
 1628         /*
 1629          * Calculate pagetable page index
 1630          */
 1631         ptepindex = va >> PDRSHIFT;
 1632 retry:
 1633         /*
 1634          * Get the page directory entry
 1635          */
 1636         ptepa = pmap->pm_pdir[ptepindex];
 1637 
 1638         /*
 1639          * This supports switching from a 4MB page to a
 1640          * normal 4K page.
 1641          */
 1642         if (ptepa & PG_PS) {
 1643                 (void)pmap_demote_pde(pmap, &pmap->pm_pdir[ptepindex], va);
 1644                 ptepa = pmap->pm_pdir[ptepindex];
 1645         }
 1646 
 1647         /*
 1648          * If the page table page is mapped, we just increment the
 1649          * hold count, and activate it.
 1650          */
 1651         if (ptepa) {
 1652                 m = PHYS_TO_VM_PAGE(ptepa & PG_FRAME);
 1653                 m->wire_count++;
 1654         } else {
 1655                 /*
 1656                  * Here if the pte page isn't mapped, or if it has
 1657                  * been deallocated. 
 1658                  */
 1659                 m = _pmap_allocpte(pmap, ptepindex, flags);
 1660                 if (m == NULL && (flags & M_WAITOK))
 1661                         goto retry;
 1662         }
 1663         return (m);
 1664 }
 1665 
 1666 
 1667 /***************************************************
 1668 * Pmap allocation/deallocation routines.
 1669  ***************************************************/
 1670 
 1671 #ifdef SMP
 1672 /*
 1673  * Deal with a SMP shootdown of other users of the pmap that we are
 1674  * trying to dispose of.  This can be a bit hairy.
 1675  */
 1676 static u_int *lazymask;
 1677 static u_int lazyptd;
 1678 static volatile u_int lazywait;
 1679 
 1680 void pmap_lazyfix_action(void);
 1681 
 1682 void
 1683 pmap_lazyfix_action(void)
 1684 {
 1685         u_int mymask = PCPU_GET(cpumask);
 1686 
 1687 #ifdef COUNT_IPIS
 1688         (*ipi_lazypmap_counts[PCPU_GET(cpuid)])++;
 1689 #endif
 1690         if (rcr3() == lazyptd)
 1691                 load_cr3(PCPU_GET(curpcb)->pcb_cr3);
 1692         atomic_clear_int(lazymask, mymask);
 1693         atomic_store_rel_int(&lazywait, 1);
 1694 }
 1695 
 1696 static void
 1697 pmap_lazyfix_self(u_int mymask)
 1698 {
 1699 
 1700         if (rcr3() == lazyptd)
 1701                 load_cr3(PCPU_GET(curpcb)->pcb_cr3);
 1702         atomic_clear_int(lazymask, mymask);
 1703 }
 1704 
 1705 
 1706 static void
 1707 pmap_lazyfix(pmap_t pmap)
 1708 {
 1709         u_int mymask;
 1710         u_int mask;
 1711         u_int spins;
 1712 
 1713         while ((mask = pmap->pm_active) != 0) {
 1714                 spins = 50000000;
 1715                 mask = mask & -mask;    /* Find least significant set bit */
 1716                 mtx_lock_spin(&smp_ipi_mtx);
 1717 #ifdef PAE
 1718                 lazyptd = vtophys(pmap->pm_pdpt);
 1719 #else
 1720                 lazyptd = vtophys(pmap->pm_pdir);
 1721 #endif
 1722                 mymask = PCPU_GET(cpumask);
 1723                 if (mask == mymask) {
 1724                         lazymask = &pmap->pm_active;
 1725                         pmap_lazyfix_self(mymask);
 1726                 } else {
 1727                         atomic_store_rel_int((u_int *)&lazymask,
 1728                             (u_int)&pmap->pm_active);
 1729                         atomic_store_rel_int(&lazywait, 0);
 1730                         ipi_selected(mask, IPI_LAZYPMAP);
 1731                         while (lazywait == 0) {
 1732                                 ia32_pause();
 1733                                 if (--spins == 0)
 1734                                         break;
 1735                         }
 1736                 }
 1737                 mtx_unlock_spin(&smp_ipi_mtx);
 1738                 if (spins == 0)
 1739                         printf("pmap_lazyfix: spun for 50000000\n");
 1740         }
 1741 }
 1742 
 1743 #else   /* SMP */
 1744 
 1745 /*
 1746  * Cleaning up on uniprocessor is easy.  For various reasons, we're
 1747  * unlikely to have to even execute this code, including the fact
 1748  * that the cleanup is deferred until the parent does a wait(2), which
 1749  * means that another userland process has run.
 1750  */
 1751 static void
 1752 pmap_lazyfix(pmap_t pmap)
 1753 {
 1754         u_int cr3;
 1755 
 1756         cr3 = vtophys(pmap->pm_pdir);
 1757         if (cr3 == rcr3()) {
 1758                 load_cr3(PCPU_GET(curpcb)->pcb_cr3);
 1759                 pmap->pm_active &= ~(PCPU_GET(cpumask));
 1760         }
 1761 }
 1762 #endif  /* SMP */
 1763 
 1764 /*
 1765  * Release any resources held by the given physical map.
 1766  * Called when a pmap initialized by pmap_pinit is being released.
 1767  * Should only be called if the map contains no valid mappings.
 1768  */
 1769 void
 1770 pmap_release(pmap_t pmap)
 1771 {
 1772         vm_page_t m, ptdpg[NPGPTD];
 1773         int i;
 1774 
 1775         KASSERT(pmap->pm_stats.resident_count == 0,
 1776             ("pmap_release: pmap resident count %ld != 0",
 1777             pmap->pm_stats.resident_count));
 1778         KASSERT(pmap->pm_root == NULL,
 1779             ("pmap_release: pmap has reserved page table page(s)"));
 1780 
 1781         pmap_lazyfix(pmap);
 1782         mtx_lock_spin(&allpmaps_lock);
 1783         LIST_REMOVE(pmap, pm_list);
 1784         mtx_unlock_spin(&allpmaps_lock);
 1785 
 1786         for (i = 0; i < NPGPTD; i++)
 1787                 ptdpg[i] = PHYS_TO_VM_PAGE(pmap->pm_pdir[PTDPTDI + i] &
 1788                     PG_FRAME);
 1789 
 1790         bzero(pmap->pm_pdir + PTDPTDI, (nkpt + NPGPTD) *
 1791             sizeof(*pmap->pm_pdir));
 1792 
 1793         pmap_qremove((vm_offset_t)pmap->pm_pdir, NPGPTD);
 1794 
 1795         for (i = 0; i < NPGPTD; i++) {
 1796                 m = ptdpg[i];
 1797 #ifdef PAE
 1798                 KASSERT(VM_PAGE_TO_PHYS(m) == (pmap->pm_pdpt[i] & PG_FRAME),
 1799                     ("pmap_release: got wrong ptd page"));
 1800 #endif
 1801                 m->wire_count--;
 1802                 atomic_subtract_int(&cnt.v_wire_count, 1);
 1803                 vm_page_free_zero(m);
 1804         }
 1805         PMAP_LOCK_DESTROY(pmap);
 1806 }
 1807 
 1808 static int
 1809 kvm_size(SYSCTL_HANDLER_ARGS)
 1810 {
 1811         unsigned long ksize = VM_MAX_KERNEL_ADDRESS - KERNBASE;
 1812 
 1813         return sysctl_handle_long(oidp, &ksize, 0, req);
 1814 }
 1815 SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG|CTLFLAG_RD, 
 1816     0, 0, kvm_size, "IU", "Size of KVM");
 1817 
 1818 static int
 1819 kvm_free(SYSCTL_HANDLER_ARGS)
 1820 {
 1821         unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end;
 1822 
 1823         return sysctl_handle_long(oidp, &kfree, 0, req);
 1824 }
 1825 SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD, 
 1826     0, 0, kvm_free, "IU", "Amount of KVM free");
 1827 
 1828 /*
 1829  * grow the number of kernel page table entries, if needed
 1830  */
 1831 void
 1832 pmap_growkernel(vm_offset_t addr)
 1833 {
 1834         struct pmap *pmap;
 1835         vm_paddr_t ptppaddr;
 1836         vm_page_t nkpg;
 1837         pd_entry_t newpdir;
 1838         pt_entry_t *pde;
 1839         boolean_t updated_PTD;
 1840 
 1841         mtx_assert(&kernel_map->system_mtx, MA_OWNED);
 1842         if (kernel_vm_end == 0) {
 1843                 kernel_vm_end = KERNBASE;
 1844                 nkpt = 0;
 1845                 while (pdir_pde(PTD, kernel_vm_end)) {
 1846                         kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
 1847                         nkpt++;
 1848                         if (kernel_vm_end - 1 >= kernel_map->max_offset) {
 1849                                 kernel_vm_end = kernel_map->max_offset;
 1850                                 break;
 1851                         }
 1852                 }
 1853         }
 1854         addr = roundup2(addr, PAGE_SIZE * NPTEPG);
 1855         if (addr - 1 >= kernel_map->max_offset)
 1856                 addr = kernel_map->max_offset;
 1857         while (kernel_vm_end < addr) {
 1858                 if (pdir_pde(PTD, kernel_vm_end)) {
 1859                         kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
 1860                         if (kernel_vm_end - 1 >= kernel_map->max_offset) {
 1861                                 kernel_vm_end = kernel_map->max_offset;
 1862                                 break;
 1863                         }
 1864                         continue;
 1865                 }
 1866 
 1867                 nkpg = vm_page_alloc(NULL, kernel_vm_end >> PDRSHIFT,
 1868                     VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
 1869                     VM_ALLOC_ZERO);
 1870                 if (nkpg == NULL)
 1871                         panic("pmap_growkernel: no memory to grow kernel");
 1872 
 1873                 nkpt++;
 1874 
 1875                 if ((nkpg->flags & PG_ZERO) == 0)
 1876                         pmap_zero_page(nkpg);
 1877                 ptppaddr = VM_PAGE_TO_PHYS(nkpg);
 1878                 newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M);
 1879                 pdir_pde(KPTD, kernel_vm_end) = newpdir;
 1880 
 1881                 updated_PTD = FALSE;
 1882                 mtx_lock_spin(&allpmaps_lock);
 1883                 LIST_FOREACH(pmap, &allpmaps, pm_list) {
 1884                         if ((pmap->pm_pdir[PTDPTDI] & PG_FRAME) == (PTDpde[0] &
 1885                             PG_FRAME))
 1886                                 updated_PTD = TRUE;
 1887                         pde = pmap_pde(pmap, kernel_vm_end);
 1888                         pde_store(pde, newpdir);
 1889                 }
 1890                 mtx_unlock_spin(&allpmaps_lock);
 1891                 KASSERT(updated_PTD,
 1892                     ("pmap_growkernel: current page table is not in allpmaps"));
 1893                 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
 1894                 if (kernel_vm_end - 1 >= kernel_map->max_offset) {
 1895                         kernel_vm_end = kernel_map->max_offset;
 1896                         break;
 1897                 }
 1898         }
 1899 }
 1900 
 1901 
 1902 /***************************************************
 1903  * page management routines.
 1904  ***************************************************/
 1905 
 1906 CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE);
 1907 CTASSERT(_NPCM == 11);
 1908 
 1909 static __inline struct pv_chunk *
 1910 pv_to_chunk(pv_entry_t pv)
 1911 {
 1912 
 1913         return (struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK);
 1914 }
 1915 
 1916 #define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap)
 1917 
 1918 #define PC_FREE0_9      0xfffffffful    /* Free values for index 0 through 9 */
 1919 #define PC_FREE10       0x0000fffful    /* Free values for index 10 */
 1920 
 1921 static uint32_t pc_freemask[11] = {
 1922         PC_FREE0_9, PC_FREE0_9, PC_FREE0_9,
 1923         PC_FREE0_9, PC_FREE0_9, PC_FREE0_9,
 1924         PC_FREE0_9, PC_FREE0_9, PC_FREE0_9,
 1925         PC_FREE0_9, PC_FREE10
 1926 };
 1927 
 1928 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0,
 1929         "Current number of pv entries");
 1930 
 1931 #ifdef PV_STATS
 1932 static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail;
 1933 
 1934 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0,
 1935         "Current number of pv entry chunks");
 1936 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0,
 1937         "Current number of pv entry chunks allocated");
 1938 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0,
 1939         "Current number of pv entry chunks frees");
 1940 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0,
 1941         "Number of times tried to get a chunk page but failed.");
 1942 
 1943 static long pv_entry_frees, pv_entry_allocs;
 1944 static int pv_entry_spare;
 1945 
 1946 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0,
 1947         "Current number of pv entry frees");
 1948 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0,
 1949         "Current number of pv entry allocs");
 1950 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0,
 1951         "Current number of spare pv entries");
 1952 
 1953 static int pmap_collect_inactive, pmap_collect_active;
 1954 
 1955 SYSCTL_INT(_vm_pmap, OID_AUTO, pmap_collect_inactive, CTLFLAG_RD, &pmap_collect_inactive, 0,
 1956         "Current number times pmap_collect called on inactive queue");
 1957 SYSCTL_INT(_vm_pmap, OID_AUTO, pmap_collect_active, CTLFLAG_RD, &pmap_collect_active, 0,
 1958         "Current number times pmap_collect called on active queue");
 1959 #endif
 1960 
 1961 /*
 1962  * We are in a serious low memory condition.  Resort to
 1963  * drastic measures to free some pages so we can allocate
 1964  * another pv entry chunk.  This is normally called to
 1965  * unmap inactive pages, and if necessary, active pages.
 1966  */
 1967 static void
 1968 pmap_collect(pmap_t locked_pmap, struct vpgqueues *vpq)
 1969 {
 1970         struct md_page *pvh;
 1971         pd_entry_t *pde;
 1972         pmap_t pmap;
 1973         pt_entry_t *pte, tpte;
 1974         pv_entry_t next_pv, pv;
 1975         vm_offset_t va;
 1976         vm_page_t m, free;
 1977 
 1978         sched_pin();
 1979         TAILQ_FOREACH(m, &vpq->pl, pageq) {
 1980                 if (m->hold_count || m->busy)
 1981                         continue;
 1982                 TAILQ_FOREACH_SAFE(pv, &m->md.pv_list, pv_list, next_pv) {
 1983                         va = pv->pv_va;
 1984                         pmap = PV_PMAP(pv);
 1985                         /* Avoid deadlock and lock recursion. */
 1986                         if (pmap > locked_pmap)
 1987                                 PMAP_LOCK(pmap);
 1988                         else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap))
 1989                                 continue;
 1990                         pmap->pm_stats.resident_count--;
 1991                         pde = pmap_pde(pmap, va);
 1992                         KASSERT((*pde & PG_PS) == 0, ("pmap_collect: found"
 1993                             " a 4mpage in page %p's pv list", m));
 1994                         pte = pmap_pte_quick(pmap, va);
 1995                         tpte = pte_load_clear(pte);
 1996                         KASSERT((tpte & PG_W) == 0,
 1997                             ("pmap_collect: wired pte %#jx", (uintmax_t)tpte));
 1998                         if (tpte & PG_A)
 1999                                 vm_page_flag_set(m, PG_REFERENCED);
 2000                         if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
 2001                                 vm_page_dirty(m);
 2002                         free = NULL;
 2003                         pmap_unuse_pt(pmap, va, &free);
 2004                         pmap_invalidate_page(pmap, va);
 2005                         pmap_free_zero_pages(free);
 2006                         TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
 2007                         if (TAILQ_EMPTY(&m->md.pv_list)) {
 2008                                 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 2009                                 if (TAILQ_EMPTY(&pvh->pv_list))
 2010                                         vm_page_flag_clear(m, PG_WRITEABLE);
 2011                         }
 2012                         free_pv_entry(pmap, pv);
 2013                         if (pmap != locked_pmap)
 2014                                 PMAP_UNLOCK(pmap);
 2015                 }
 2016         }
 2017         sched_unpin();
 2018 }
 2019 
 2020 
 2021 /*
 2022  * free the pv_entry back to the free list
 2023  */
 2024 static void
 2025 free_pv_entry(pmap_t pmap, pv_entry_t pv)
 2026 {
 2027         vm_page_t m;
 2028         struct pv_chunk *pc;
 2029         int idx, field, bit;
 2030 
 2031         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2032         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2033         PV_STAT(pv_entry_frees++);
 2034         PV_STAT(pv_entry_spare++);
 2035         pv_entry_count--;
 2036         pc = pv_to_chunk(pv);
 2037         idx = pv - &pc->pc_pventry[0];
 2038         field = idx / 32;
 2039         bit = idx % 32;
 2040         pc->pc_map[field] |= 1ul << bit;
 2041         /* move to head of list */
 2042         TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
 2043         for (idx = 0; idx < _NPCM; idx++)
 2044                 if (pc->pc_map[idx] != pc_freemask[idx]) {
 2045                         TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
 2046                         return;
 2047                 }
 2048         PV_STAT(pv_entry_spare -= _NPCPV);
 2049         PV_STAT(pc_chunk_count--);
 2050         PV_STAT(pc_chunk_frees++);
 2051         /* entire chunk is free, return it */
 2052         m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc));
 2053         pmap_qremove((vm_offset_t)pc, 1);
 2054         vm_page_unwire(m, 0);
 2055         vm_page_free(m);
 2056         pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc);
 2057 }
 2058 
 2059 /*
 2060  * get a new pv_entry, allocating a block from the system
 2061  * when needed.
 2062  */
 2063 static pv_entry_t
 2064 get_pv_entry(pmap_t pmap, int try)
 2065 {
 2066         static const struct timeval printinterval = { 60, 0 };
 2067         static struct timeval lastprint;
 2068         static vm_pindex_t colour;
 2069         struct vpgqueues *pq;
 2070         int bit, field;
 2071         pv_entry_t pv;
 2072         struct pv_chunk *pc;
 2073         vm_page_t m;
 2074 
 2075         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2076         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2077         PV_STAT(pv_entry_allocs++);
 2078         pv_entry_count++;
 2079         if (pv_entry_count > pv_entry_high_water)
 2080                 if (ratecheck(&lastprint, &printinterval))
 2081                         printf("Approaching the limit on PV entries, consider "
 2082                             "increasing either the vm.pmap.shpgperproc or the "
 2083                             "vm.pmap.pv_entry_max tunable.\n");
 2084         pq = NULL;
 2085 retry:
 2086         pc = TAILQ_FIRST(&pmap->pm_pvchunk);
 2087         if (pc != NULL) {
 2088                 for (field = 0; field < _NPCM; field++) {
 2089                         if (pc->pc_map[field]) {
 2090                                 bit = bsfl(pc->pc_map[field]);
 2091                                 break;
 2092                         }
 2093                 }
 2094                 if (field < _NPCM) {
 2095                         pv = &pc->pc_pventry[field * 32 + bit];
 2096                         pc->pc_map[field] &= ~(1ul << bit);
 2097                         /* If this was the last item, move it to tail */
 2098                         for (field = 0; field < _NPCM; field++)
 2099                                 if (pc->pc_map[field] != 0) {
 2100                                         PV_STAT(pv_entry_spare--);
 2101                                         return (pv);    /* not full, return */
 2102                                 }
 2103                         TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
 2104                         TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
 2105                         PV_STAT(pv_entry_spare--);
 2106                         return (pv);
 2107                 }
 2108         }
 2109         /*
 2110          * Access to the ptelist "pv_vafree" is synchronized by the page
 2111          * queues lock.  If "pv_vafree" is currently non-empty, it will
 2112          * remain non-empty until pmap_ptelist_alloc() completes.
 2113          */
 2114         if (pv_vafree == 0 || (m = vm_page_alloc(NULL, colour, (pq ==
 2115             &vm_page_queues[PQ_ACTIVE] ? VM_ALLOC_SYSTEM : VM_ALLOC_NORMAL) |
 2116             VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
 2117                 if (try) {
 2118                         pv_entry_count--;
 2119                         PV_STAT(pc_chunk_tryfail++);
 2120                         return (NULL);
 2121                 }
 2122                 /*
 2123                  * Reclaim pv entries: At first, destroy mappings to
 2124                  * inactive pages.  After that, if a pv chunk entry
 2125                  * is still needed, destroy mappings to active pages.
 2126                  */
 2127                 if (pq == NULL) {
 2128                         PV_STAT(pmap_collect_inactive++);
 2129                         pq = &vm_page_queues[PQ_INACTIVE];
 2130                 } else if (pq == &vm_page_queues[PQ_INACTIVE]) {
 2131                         PV_STAT(pmap_collect_active++);
 2132                         pq = &vm_page_queues[PQ_ACTIVE];
 2133                 } else
 2134                         panic("get_pv_entry: increase vm.pmap.shpgperproc");
 2135                 pmap_collect(pmap, pq);
 2136                 goto retry;
 2137         }
 2138         PV_STAT(pc_chunk_count++);
 2139         PV_STAT(pc_chunk_allocs++);
 2140         colour++;
 2141         pc = (struct pv_chunk *)pmap_ptelist_alloc(&pv_vafree);
 2142         pmap_qenter((vm_offset_t)pc, &m, 1);
 2143         pc->pc_pmap = pmap;
 2144         pc->pc_map[0] = pc_freemask[0] & ~1ul;  /* preallocated bit 0 */
 2145         for (field = 1; field < _NPCM; field++)
 2146                 pc->pc_map[field] = pc_freemask[field];
 2147         pv = &pc->pc_pventry[0];
 2148         TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
 2149         PV_STAT(pv_entry_spare += _NPCPV - 1);
 2150         return (pv);
 2151 }
 2152 
 2153 static __inline pv_entry_t
 2154 pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
 2155 {
 2156         pv_entry_t pv;
 2157 
 2158         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2159         TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
 2160                 if (pmap == PV_PMAP(pv) && va == pv->pv_va) {
 2161                         TAILQ_REMOVE(&pvh->pv_list, pv, pv_list);
 2162                         break;
 2163                 }
 2164         }
 2165         return (pv);
 2166 }
 2167 
 2168 static void
 2169 pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
 2170 {
 2171         struct md_page *pvh;
 2172         pv_entry_t pv;
 2173         vm_offset_t va_last;
 2174         vm_page_t m;
 2175 
 2176         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2177         KASSERT((pa & PDRMASK) == 0,
 2178             ("pmap_pv_demote_pde: pa is not 4mpage aligned"));
 2179 
 2180         /*
 2181          * Transfer the 4mpage's pv entry for this mapping to the first
 2182          * page's pv list.
 2183          */
 2184         pvh = pa_to_pvh(pa);
 2185         va = trunc_4mpage(va);
 2186         pv = pmap_pvh_remove(pvh, pmap, va);
 2187         KASSERT(pv != NULL, ("pmap_pv_demote_pde: pv not found"));
 2188         m = PHYS_TO_VM_PAGE(pa);
 2189         TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
 2190         /* Instantiate the remaining NPTEPG - 1 pv entries. */
 2191         va_last = va + NBPDR - PAGE_SIZE;
 2192         do {
 2193                 m++;
 2194                 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
 2195                     ("pmap_pv_demote_pde: page %p is not managed", m));
 2196                 va += PAGE_SIZE;
 2197                 pmap_insert_entry(pmap, va, m);
 2198         } while (va < va_last);
 2199 }
 2200 
 2201 static void
 2202 pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
 2203 {
 2204         struct md_page *pvh;
 2205         pv_entry_t pv;
 2206         vm_offset_t va_last;
 2207         vm_page_t m;
 2208 
 2209         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2210         KASSERT((pa & PDRMASK) == 0,
 2211             ("pmap_pv_promote_pde: pa is not 4mpage aligned"));
 2212 
 2213         /*
 2214          * Transfer the first page's pv entry for this mapping to the
 2215          * 4mpage's pv list.  Aside from avoiding the cost of a call
 2216          * to get_pv_entry(), a transfer avoids the possibility that
 2217          * get_pv_entry() calls pmap_collect() and that pmap_collect()
 2218          * removes one of the mappings that is being promoted.
 2219          */
 2220         m = PHYS_TO_VM_PAGE(pa);
 2221         va = trunc_4mpage(va);
 2222         pv = pmap_pvh_remove(&m->md, pmap, va);
 2223         KASSERT(pv != NULL, ("pmap_pv_promote_pde: pv not found"));
 2224         pvh = pa_to_pvh(pa);
 2225         TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_list);
 2226         /* Free the remaining NPTEPG - 1 pv entries. */
 2227         va_last = va + NBPDR - PAGE_SIZE;
 2228         do {
 2229                 m++;
 2230                 va += PAGE_SIZE;
 2231                 pmap_pvh_free(&m->md, pmap, va);
 2232         } while (va < va_last);
 2233 }
 2234 
 2235 static void
 2236 pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
 2237 {
 2238         pv_entry_t pv;
 2239 
 2240         pv = pmap_pvh_remove(pvh, pmap, va);
 2241         KASSERT(pv != NULL, ("pmap_pvh_free: pv not found"));
 2242         free_pv_entry(pmap, pv);
 2243 }
 2244 
 2245 static void
 2246 pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
 2247 {
 2248         struct md_page *pvh;
 2249 
 2250         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2251         pmap_pvh_free(&m->md, pmap, va);
 2252         if (TAILQ_EMPTY(&m->md.pv_list)) {
 2253                 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 2254                 if (TAILQ_EMPTY(&pvh->pv_list))
 2255                         vm_page_flag_clear(m, PG_WRITEABLE);
 2256         }
 2257 }
 2258 
 2259 /*
 2260  * Create a pv entry for page at pa for
 2261  * (pmap, va).
 2262  */
 2263 static void
 2264 pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
 2265 {
 2266         pv_entry_t pv;
 2267 
 2268         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2269         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2270         pv = get_pv_entry(pmap, FALSE);
 2271         pv->pv_va = va;
 2272         TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
 2273 }
 2274 
 2275 /*
 2276  * Conditionally create a pv entry.
 2277  */
 2278 static boolean_t
 2279 pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
 2280 {
 2281         pv_entry_t pv;
 2282 
 2283         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2284         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2285         if (pv_entry_count < pv_entry_high_water && 
 2286             (pv = get_pv_entry(pmap, TRUE)) != NULL) {
 2287                 pv->pv_va = va;
 2288                 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
 2289                 return (TRUE);
 2290         } else
 2291                 return (FALSE);
 2292 }
 2293 
 2294 /*
 2295  * Create the pv entries for each of the pages within a superpage.
 2296  */
 2297 static boolean_t
 2298 pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
 2299 {
 2300         struct md_page *pvh;
 2301         pv_entry_t pv;
 2302 
 2303         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2304         if (pv_entry_count < pv_entry_high_water && 
 2305             (pv = get_pv_entry(pmap, TRUE)) != NULL) {
 2306                 pv->pv_va = va;
 2307                 pvh = pa_to_pvh(pa);
 2308                 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_list);
 2309                 return (TRUE);
 2310         } else
 2311                 return (FALSE);
 2312 }
 2313 
 2314 /*
 2315  * Fills a page table page with mappings to consecutive physical pages.
 2316  */
 2317 static void
 2318 pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte)
 2319 {
 2320         pt_entry_t *pte;
 2321 
 2322         for (pte = firstpte; pte < firstpte + NPTEPG; pte++) {
 2323                 *pte = newpte;  
 2324                 newpte += PAGE_SIZE;
 2325         }
 2326 }
 2327 
 2328 /*
 2329  * Tries to demote a 2- or 4MB page mapping.  If demotion fails, the
 2330  * 2- or 4MB page mapping is invalidated.
 2331  */
 2332 static boolean_t
 2333 pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
 2334 {
 2335         pd_entry_t newpde, oldpde;
 2336         pmap_t allpmaps_entry;
 2337         pt_entry_t *firstpte, newpte;
 2338         vm_paddr_t mptepa;
 2339         vm_page_t free, mpte;
 2340 
 2341         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2342         oldpde = *pde;
 2343         KASSERT((oldpde & (PG_PS | PG_V)) == (PG_PS | PG_V),
 2344             ("pmap_demote_pde: oldpde is missing PG_PS and/or PG_V"));
 2345         mpte = pmap_lookup_pt_page(pmap, va);
 2346         if (mpte != NULL)
 2347                 pmap_remove_pt_page(pmap, mpte);
 2348         else {
 2349                 KASSERT((oldpde & PG_W) == 0,
 2350                     ("pmap_demote_pde: page table page for a wired mapping"
 2351                     " is missing"));
 2352 
 2353                 /*
 2354                  * Invalidate the 2- or 4MB page mapping and return
 2355                  * "failure" if the mapping was never accessed or the
 2356                  * allocation of the new page table page fails.
 2357                  */
 2358                 if ((oldpde & PG_A) == 0 || (mpte = vm_page_alloc(NULL,
 2359                     va >> PDRSHIFT, VM_ALLOC_NOOBJ | VM_ALLOC_NORMAL |
 2360                     VM_ALLOC_WIRED)) == NULL) {
 2361                         free = NULL;
 2362                         pmap_remove_pde(pmap, pde, trunc_4mpage(va), &free);
 2363                         pmap_invalidate_page(pmap, trunc_4mpage(va));
 2364                         pmap_free_zero_pages(free);
 2365                         CTR2(KTR_PMAP, "pmap_demote_pde: failure for va %#x"
 2366                             " in pmap %p", va, pmap);
 2367                         return (FALSE);
 2368                 }
 2369                 if (va < VM_MAXUSER_ADDRESS)
 2370                         pmap->pm_stats.resident_count++;
 2371         }
 2372         mptepa = VM_PAGE_TO_PHYS(mpte);
 2373 
 2374         /*
 2375          * Temporarily map the page table page (mpte) into the kernel's
 2376          * address space at either PADDR1 or PADDR2.
 2377          */
 2378         if (curthread->td_pinned > 0 && mtx_owned(&vm_page_queue_mtx)) {
 2379                 if ((*PMAP1 & PG_FRAME) != mptepa) {
 2380                         *PMAP1 = mptepa | PG_RW | PG_V | PG_A | PG_M;
 2381 #ifdef SMP
 2382                         PMAP1cpu = PCPU_GET(cpuid);
 2383 #endif
 2384                         invlcaddr(PADDR1);
 2385                         PMAP1changed++;
 2386                 } else
 2387 #ifdef SMP
 2388                 if (PMAP1cpu != PCPU_GET(cpuid)) {
 2389                         PMAP1cpu = PCPU_GET(cpuid);
 2390                         invlcaddr(PADDR1);
 2391                         PMAP1changedcpu++;
 2392                 } else
 2393 #endif
 2394                         PMAP1unchanged++;
 2395                 firstpte = PADDR1;
 2396         } else {
 2397                 mtx_lock(&PMAP2mutex);
 2398                 if ((*PMAP2 & PG_FRAME) != mptepa) {
 2399                         *PMAP2 = mptepa | PG_RW | PG_V | PG_A | PG_M;
 2400                         pmap_invalidate_page(kernel_pmap, (vm_offset_t)PADDR2);
 2401                 }
 2402                 firstpte = PADDR2;
 2403         }
 2404         newpde = mptepa | PG_M | PG_A | (oldpde & PG_U) | PG_RW | PG_V;
 2405         KASSERT((oldpde & PG_A) != 0,
 2406             ("pmap_demote_pde: oldpde is missing PG_A"));
 2407         KASSERT((oldpde & (PG_M | PG_RW)) != PG_RW,
 2408             ("pmap_demote_pde: oldpde is missing PG_M"));
 2409         newpte = oldpde & ~PG_PS;
 2410         if ((newpte & PG_PDE_PAT) != 0)
 2411                 newpte ^= PG_PDE_PAT | PG_PTE_PAT;
 2412 
 2413         /*
 2414          * If the page table page is new, initialize it.
 2415          */
 2416         if (mpte->wire_count == 1) {
 2417                 mpte->wire_count = NPTEPG;
 2418                 pmap_fill_ptp(firstpte, newpte);
 2419         }
 2420         KASSERT((*firstpte & PG_FRAME) == (newpte & PG_FRAME),
 2421             ("pmap_demote_pde: firstpte and newpte map different physical"
 2422             " addresses"));
 2423 
 2424         /*
 2425          * If the mapping has changed attributes, update the page table
 2426          * entries.
 2427          */ 
 2428         if ((*firstpte & PG_PTE_PROMOTE) != (newpte & PG_PTE_PROMOTE))
 2429                 pmap_fill_ptp(firstpte, newpte);
 2430         
 2431         /*
 2432          * Demote the mapping.  This pmap is locked.  The old PDE has
 2433          * PG_A set.  If the old PDE has PG_RW set, it also has PG_M
 2434          * set.  Thus, there is no danger of a race with another
 2435          * processor changing the setting of PG_A and/or PG_M between
 2436          * the read above and the store below. 
 2437          */
 2438         if (pmap == kernel_pmap) {
 2439                 /*
 2440                  * A harmless race exists between this loop and the bcopy()
 2441                  * in pmap_pinit() that initializes the kernel segment of
 2442                  * the new page table.  Specifically, that bcopy() may copy
 2443                  * the new PDE from the PTD, which is first in allpmaps, to
 2444                  * the new page table before this loop updates that new
 2445                  * page table.
 2446                  */
 2447                 mtx_lock_spin(&allpmaps_lock);
 2448                 LIST_FOREACH(allpmaps_entry, &allpmaps, pm_list) {
 2449                         pde = pmap_pde(allpmaps_entry, va);
 2450                         KASSERT(*pde == newpde || (*pde & PG_PTE_PROMOTE) ==
 2451                             (oldpde & PG_PTE_PROMOTE),
 2452                             ("pmap_demote_pde: pde was %#jx, expected %#jx",
 2453                             (uintmax_t)*pde, (uintmax_t)oldpde));
 2454                         pde_store(pde, newpde);
 2455                 }
 2456                 mtx_unlock_spin(&allpmaps_lock);
 2457         } else
 2458                 pde_store(pde, newpde); 
 2459         if (firstpte == PADDR2)
 2460                 mtx_unlock(&PMAP2mutex);
 2461 
 2462         /*
 2463          * Invalidate the recursive mapping of the page table page.
 2464          */
 2465         pmap_invalidate_page(pmap, (vm_offset_t)vtopte(va));
 2466 
 2467         /*
 2468          * Demote the pv entry.  This depends on the earlier demotion
 2469          * of the mapping.  Specifically, the (re)creation of a per-
 2470          * page pv entry might trigger the execution of pmap_collect(),
 2471          * which might reclaim a newly (re)created per-page pv entry
 2472          * and destroy the associated mapping.  In order to destroy
 2473          * the mapping, the PDE must have already changed from mapping
 2474          * the 2mpage to referencing the page table page.
 2475          */
 2476         if ((oldpde & PG_MANAGED) != 0)
 2477                 pmap_pv_demote_pde(pmap, va, oldpde & PG_PS_FRAME);
 2478 
 2479         pmap_pde_demotions++;
 2480         CTR2(KTR_PMAP, "pmap_demote_pde: success for va %#x"
 2481             " in pmap %p", va, pmap);
 2482         return (TRUE);
 2483 }
 2484 
 2485 /*
 2486  * pmap_remove_pde: do the things to unmap a superpage in a process
 2487  */
 2488 static void
 2489 pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
 2490     vm_page_t *free)
 2491 {
 2492         struct md_page *pvh;
 2493         pd_entry_t oldpde;
 2494         vm_offset_t eva, va;
 2495         vm_page_t m, mpte;
 2496 
 2497         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2498         KASSERT((sva & PDRMASK) == 0,
 2499             ("pmap_remove_pde: sva is not 4mpage aligned"));
 2500         oldpde = pte_load_clear(pdq);
 2501         if (oldpde & PG_W)
 2502                 pmap->pm_stats.wired_count -= NBPDR / PAGE_SIZE;
 2503 
 2504         /*
 2505          * Machines that don't support invlpg, also don't support
 2506          * PG_G.
 2507          */
 2508         if (oldpde & PG_G)
 2509                 pmap_invalidate_page(kernel_pmap, sva);
 2510         pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
 2511         if (oldpde & PG_MANAGED) {
 2512                 pvh = pa_to_pvh(oldpde & PG_PS_FRAME);
 2513                 pmap_pvh_free(pvh, pmap, sva);
 2514                 eva = sva + NBPDR;
 2515                 for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
 2516                     va < eva; va += PAGE_SIZE, m++) {
 2517                         if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW))
 2518                                 vm_page_dirty(m);
 2519                         if (oldpde & PG_A)
 2520                                 vm_page_flag_set(m, PG_REFERENCED);
 2521                         if (TAILQ_EMPTY(&m->md.pv_list) &&
 2522                             TAILQ_EMPTY(&pvh->pv_list))
 2523                                 vm_page_flag_clear(m, PG_WRITEABLE);
 2524                 }
 2525         }
 2526         if (pmap == kernel_pmap) {
 2527                 if (!pmap_demote_pde(pmap, pdq, sva))
 2528                         panic("pmap_remove_pde: failed demotion");
 2529         } else {
 2530                 mpte = pmap_lookup_pt_page(pmap, sva);
 2531                 if (mpte != NULL) {
 2532                         pmap_remove_pt_page(pmap, mpte);
 2533                         pmap->pm_stats.resident_count--;
 2534                         KASSERT(mpte->wire_count == NPTEPG,
 2535                             ("pmap_remove_pde: pte page wire count error"));
 2536                         mpte->wire_count = 0;
 2537                         pmap_add_delayed_free_list(mpte, free, FALSE);
 2538                         atomic_subtract_int(&cnt.v_wire_count, 1);
 2539                 }
 2540         }
 2541 }
 2542 
 2543 /*
 2544  * pmap_remove_pte: do the things to unmap a page in a process
 2545  */
 2546 static int
 2547 pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va, vm_page_t *free)
 2548 {
 2549         pt_entry_t oldpte;
 2550         vm_page_t m;
 2551 
 2552         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2553         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2554         oldpte = pte_load_clear(ptq);
 2555         if (oldpte & PG_W)
 2556                 pmap->pm_stats.wired_count -= 1;
 2557         /*
 2558          * Machines that don't support invlpg, also don't support
 2559          * PG_G.
 2560          */
 2561         if (oldpte & PG_G)
 2562                 pmap_invalidate_page(kernel_pmap, va);
 2563         pmap->pm_stats.resident_count -= 1;
 2564         if (oldpte & PG_MANAGED) {
 2565                 m = PHYS_TO_VM_PAGE(oldpte & PG_FRAME);
 2566                 if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
 2567                         vm_page_dirty(m);
 2568                 if (oldpte & PG_A)
 2569                         vm_page_flag_set(m, PG_REFERENCED);
 2570                 pmap_remove_entry(pmap, m, va);
 2571         }
 2572         return (pmap_unuse_pt(pmap, va, free));
 2573 }
 2574 
 2575 /*
 2576  * Remove a single page from a process address space
 2577  */
 2578 static void
 2579 pmap_remove_page(pmap_t pmap, vm_offset_t va, vm_page_t *free)
 2580 {
 2581         pt_entry_t *pte;
 2582 
 2583         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2584         KASSERT(curthread->td_pinned > 0, ("curthread not pinned"));
 2585         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2586         if ((pte = pmap_pte_quick(pmap, va)) == NULL || *pte == 0)
 2587                 return;
 2588         pmap_remove_pte(pmap, pte, va, free);
 2589         pmap_invalidate_page(pmap, va);
 2590 }
 2591 
 2592 /*
 2593  *      Remove the given range of addresses from the specified map.
 2594  *
 2595  *      It is assumed that the start and end are properly
 2596  *      rounded to the page size.
 2597  */
 2598 void
 2599 pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
 2600 {
 2601         vm_offset_t pdnxt;
 2602         pd_entry_t ptpaddr;
 2603         pt_entry_t *pte;
 2604         vm_page_t free = NULL;
 2605         int anyvalid;
 2606 
 2607         /*
 2608          * Perform an unsynchronized read.  This is, however, safe.
 2609          */
 2610         if (pmap->pm_stats.resident_count == 0)
 2611                 return;
 2612 
 2613         anyvalid = 0;
 2614 
 2615         vm_page_lock_queues();
 2616         sched_pin();
 2617         PMAP_LOCK(pmap);
 2618 
 2619         /*
 2620          * special handling of removing one page.  a very
 2621          * common operation and easy to short circuit some
 2622          * code.
 2623          */
 2624         if ((sva + PAGE_SIZE == eva) && 
 2625             ((pmap->pm_pdir[(sva >> PDRSHIFT)] & PG_PS) == 0)) {
 2626                 pmap_remove_page(pmap, sva, &free);
 2627                 goto out;
 2628         }
 2629 
 2630         for (; sva < eva; sva = pdnxt) {
 2631                 unsigned pdirindex;
 2632 
 2633                 /*
 2634                  * Calculate index for next page table.
 2635                  */
 2636                 pdnxt = (sva + NBPDR) & ~PDRMASK;
 2637                 if (pdnxt < sva)
 2638                         pdnxt = eva;
 2639                 if (pmap->pm_stats.resident_count == 0)
 2640                         break;
 2641 
 2642                 pdirindex = sva >> PDRSHIFT;
 2643                 ptpaddr = pmap->pm_pdir[pdirindex];
 2644 
 2645                 /*
 2646                  * Weed out invalid mappings. Note: we assume that the page
 2647                  * directory table is always allocated, and in kernel virtual.
 2648                  */
 2649                 if (ptpaddr == 0)
 2650                         continue;
 2651 
 2652                 /*
 2653                  * Check for large page.
 2654                  */
 2655                 if ((ptpaddr & PG_PS) != 0) {
 2656                         /*
 2657                          * Are we removing the entire large page?  If not,
 2658                          * demote the mapping and fall through.
 2659                          */
 2660                         if (sva + NBPDR == pdnxt && eva >= pdnxt) {
 2661                                 /*
 2662                                  * The TLB entry for a PG_G mapping is
 2663                                  * invalidated by pmap_remove_pde().
 2664                                  */
 2665                                 if ((ptpaddr & PG_G) == 0)
 2666                                         anyvalid = 1;
 2667                                 pmap_remove_pde(pmap,
 2668                                     &pmap->pm_pdir[pdirindex], sva, &free);
 2669                                 continue;
 2670                         } else if (!pmap_demote_pde(pmap,
 2671                             &pmap->pm_pdir[pdirindex], sva)) {
 2672                                 /* The large page mapping was destroyed. */
 2673                                 continue;
 2674                         }
 2675                 }
 2676 
 2677                 /*
 2678                  * Limit our scan to either the end of the va represented
 2679                  * by the current page table page, or to the end of the
 2680                  * range being removed.
 2681                  */
 2682                 if (pdnxt > eva)
 2683                         pdnxt = eva;
 2684 
 2685                 for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++,
 2686                     sva += PAGE_SIZE) {
 2687                         if (*pte == 0)
 2688                                 continue;
 2689 
 2690                         /*
 2691                          * The TLB entry for a PG_G mapping is invalidated
 2692                          * by pmap_remove_pte().
 2693                          */
 2694                         if ((*pte & PG_G) == 0)
 2695                                 anyvalid = 1;
 2696                         if (pmap_remove_pte(pmap, pte, sva, &free))
 2697                                 break;
 2698                 }
 2699         }
 2700 out:
 2701         sched_unpin();
 2702         if (anyvalid)
 2703                 pmap_invalidate_all(pmap);
 2704         vm_page_unlock_queues();
 2705         PMAP_UNLOCK(pmap);
 2706         pmap_free_zero_pages(free);
 2707 }
 2708 
 2709 /*
 2710  *      Routine:        pmap_remove_all
 2711  *      Function:
 2712  *              Removes this physical page from
 2713  *              all physical maps in which it resides.
 2714  *              Reflects back modify bits to the pager.
 2715  *
 2716  *      Notes:
 2717  *              Original versions of this routine were very
 2718  *              inefficient because they iteratively called
 2719  *              pmap_remove (slow...)
 2720  */
 2721 
 2722 void
 2723 pmap_remove_all(vm_page_t m)
 2724 {
 2725         struct md_page *pvh;
 2726         pv_entry_t pv;
 2727         pmap_t pmap;
 2728         pt_entry_t *pte, tpte;
 2729         pd_entry_t *pde;
 2730         vm_offset_t va;
 2731         vm_page_t free;
 2732 
 2733         KASSERT((m->flags & PG_FICTITIOUS) == 0,
 2734             ("pmap_remove_all: page %p is fictitious", m));
 2735         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2736         sched_pin();
 2737         pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 2738         while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) {
 2739                 va = pv->pv_va;
 2740                 pmap = PV_PMAP(pv);
 2741                 PMAP_LOCK(pmap);
 2742                 pde = pmap_pde(pmap, va);
 2743                 (void)pmap_demote_pde(pmap, pde, va);
 2744                 PMAP_UNLOCK(pmap);
 2745         }
 2746         while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
 2747                 pmap = PV_PMAP(pv);
 2748                 PMAP_LOCK(pmap);
 2749                 pmap->pm_stats.resident_count--;
 2750                 pde = pmap_pde(pmap, pv->pv_va);
 2751                 KASSERT((*pde & PG_PS) == 0, ("pmap_remove_all: found"
 2752                     " a 4mpage in page %p's pv list", m));
 2753                 pte = pmap_pte_quick(pmap, pv->pv_va);
 2754                 tpte = pte_load_clear(pte);
 2755                 if (tpte & PG_W)
 2756                         pmap->pm_stats.wired_count--;
 2757                 if (tpte & PG_A)
 2758                         vm_page_flag_set(m, PG_REFERENCED);
 2759 
 2760                 /*
 2761                  * Update the vm_page_t clean and reference bits.
 2762                  */
 2763                 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
 2764                         vm_page_dirty(m);
 2765                 free = NULL;
 2766                 pmap_unuse_pt(pmap, pv->pv_va, &free);
 2767                 pmap_invalidate_page(pmap, pv->pv_va);
 2768                 pmap_free_zero_pages(free);
 2769                 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
 2770                 free_pv_entry(pmap, pv);
 2771                 PMAP_UNLOCK(pmap);
 2772         }
 2773         vm_page_flag_clear(m, PG_WRITEABLE);
 2774         sched_unpin();
 2775 }
 2776 
 2777 /*
 2778  * pmap_protect_pde: do the things to protect a 4mpage in a process
 2779  */
 2780 static boolean_t
 2781 pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot)
 2782 {
 2783         pd_entry_t newpde, oldpde;
 2784         vm_offset_t eva, va;
 2785         vm_page_t m;
 2786         boolean_t anychanged;
 2787 
 2788         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2789         KASSERT((sva & PDRMASK) == 0,
 2790             ("pmap_protect_pde: sva is not 4mpage aligned"));
 2791         anychanged = FALSE;
 2792 retry:
 2793         oldpde = newpde = *pde;
 2794         if (oldpde & PG_MANAGED) {
 2795                 eva = sva + NBPDR;
 2796                 for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
 2797                     va < eva; va += PAGE_SIZE, m++) {
 2798                         /*
 2799                          * In contrast to the analogous operation on a 4KB page
 2800                          * mapping, the mapping's PG_A flag is not cleared and
 2801                          * the page's PG_REFERENCED flag is not set.  The
 2802                          * reason is that pmap_demote_pde() expects that a 2/4MB
 2803                          * page mapping with a stored page table page has PG_A
 2804                          * set.
 2805                          */
 2806                         if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW))
 2807                                 vm_page_dirty(m);
 2808                 }
 2809         }
 2810         if ((prot & VM_PROT_WRITE) == 0)
 2811                 newpde &= ~(PG_RW | PG_M);
 2812 #ifdef PAE
 2813         if ((prot & VM_PROT_EXECUTE) == 0)
 2814                 newpde |= pg_nx;
 2815 #endif
 2816         if (newpde != oldpde) {
 2817                 if (!pde_cmpset(pde, oldpde, newpde))
 2818                         goto retry;
 2819                 if (oldpde & PG_G)
 2820                         pmap_invalidate_page(pmap, sva);
 2821                 else
 2822                         anychanged = TRUE;
 2823         }
 2824         return (anychanged);
 2825 }
 2826 
 2827 /*
 2828  *      Set the physical protection on the
 2829  *      specified range of this map as requested.
 2830  */
 2831 void
 2832 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
 2833 {
 2834         vm_offset_t pdnxt;
 2835         pd_entry_t ptpaddr;
 2836         pt_entry_t *pte;
 2837         int anychanged;
 2838 
 2839         if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
 2840                 pmap_remove(pmap, sva, eva);
 2841                 return;
 2842         }
 2843 
 2844 #ifdef PAE
 2845         if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) ==
 2846             (VM_PROT_WRITE|VM_PROT_EXECUTE))
 2847                 return;
 2848 #else
 2849         if (prot & VM_PROT_WRITE)
 2850                 return;
 2851 #endif
 2852 
 2853         anychanged = 0;
 2854 
 2855         vm_page_lock_queues();
 2856         sched_pin();
 2857         PMAP_LOCK(pmap);
 2858         for (; sva < eva; sva = pdnxt) {
 2859                 pt_entry_t obits, pbits;
 2860                 unsigned pdirindex;
 2861 
 2862                 pdnxt = (sva + NBPDR) & ~PDRMASK;
 2863                 if (pdnxt < sva)
 2864                         pdnxt = eva;
 2865 
 2866                 pdirindex = sva >> PDRSHIFT;
 2867                 ptpaddr = pmap->pm_pdir[pdirindex];
 2868 
 2869                 /*
 2870                  * Weed out invalid mappings. Note: we assume that the page
 2871                  * directory table is always allocated, and in kernel virtual.
 2872                  */
 2873                 if (ptpaddr == 0)
 2874                         continue;
 2875 
 2876                 /*
 2877                  * Check for large page.
 2878                  */
 2879                 if ((ptpaddr & PG_PS) != 0) {
 2880                         /*
 2881                          * Are we protecting the entire large page?  If not,
 2882                          * demote the mapping and fall through.
 2883                          */
 2884                         if (sva + NBPDR == pdnxt && eva >= pdnxt) {
 2885                                 /*
 2886                                  * The TLB entry for a PG_G mapping is
 2887                                  * invalidated by pmap_protect_pde().
 2888                                  */
 2889                                 if (pmap_protect_pde(pmap,
 2890                                     &pmap->pm_pdir[pdirindex], sva, prot))
 2891                                         anychanged = 1;
 2892                                 continue;
 2893                         } else if (!pmap_demote_pde(pmap,
 2894                             &pmap->pm_pdir[pdirindex], sva)) {
 2895                                 /* The large page mapping was destroyed. */
 2896                                 continue;
 2897                         }
 2898                 }
 2899 
 2900                 if (pdnxt > eva)
 2901                         pdnxt = eva;
 2902 
 2903                 for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++,
 2904                     sva += PAGE_SIZE) {
 2905                         vm_page_t m;
 2906 
 2907 retry:
 2908                         /*
 2909                          * Regardless of whether a pte is 32 or 64 bits in
 2910                          * size, PG_RW, PG_A, and PG_M are among the least
 2911                          * significant 32 bits.
 2912                          */
 2913                         obits = pbits = *pte;
 2914                         if ((pbits & PG_V) == 0)
 2915                                 continue;
 2916                         if (pbits & PG_MANAGED) {
 2917                                 m = NULL;
 2918                                 if (pbits & PG_A) {
 2919                                         m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
 2920                                         vm_page_flag_set(m, PG_REFERENCED);
 2921                                         pbits &= ~PG_A;
 2922                                 }
 2923                                 if ((pbits & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
 2924                                         if (m == NULL)
 2925                                                 m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
 2926                                         vm_page_dirty(m);
 2927                                 }
 2928                         }
 2929 
 2930                         if ((prot & VM_PROT_WRITE) == 0)
 2931                                 pbits &= ~(PG_RW | PG_M);
 2932 #ifdef PAE
 2933                         if ((prot & VM_PROT_EXECUTE) == 0)
 2934                                 pbits |= pg_nx;
 2935 #endif
 2936 
 2937                         if (pbits != obits) {
 2938 #ifdef PAE
 2939                                 if (!atomic_cmpset_64(pte, obits, pbits))
 2940                                         goto retry;
 2941 #else
 2942                                 if (!atomic_cmpset_int((u_int *)pte, obits,
 2943                                     pbits))
 2944                                         goto retry;
 2945 #endif
 2946                                 if (obits & PG_G)
 2947                                         pmap_invalidate_page(pmap, sva);
 2948                                 else
 2949                                         anychanged = 1;
 2950                         }
 2951                 }
 2952         }
 2953         sched_unpin();
 2954         if (anychanged)
 2955                 pmap_invalidate_all(pmap);
 2956         vm_page_unlock_queues();
 2957         PMAP_UNLOCK(pmap);
 2958 }
 2959 
 2960 /*
 2961  * Tries to promote the 512 or 1024, contiguous 4KB page mappings that are
 2962  * within a single page table page (PTP) to a single 2- or 4MB page mapping.
 2963  * For promotion to occur, two conditions must be met: (1) the 4KB page
 2964  * mappings must map aligned, contiguous physical memory and (2) the 4KB page
 2965  * mappings must have identical characteristics.
 2966  *
 2967  * Managed (PG_MANAGED) mappings within the kernel address space are not
 2968  * promoted.  The reason is that kernel PDEs are replicated in each pmap but
 2969  * pmap_clear_ptes() and pmap_ts_referenced() only read the PDE from the kernel
 2970  * pmap.
 2971  */
 2972 static void
 2973 pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
 2974 {
 2975         pd_entry_t newpde;
 2976         pmap_t allpmaps_entry;
 2977         pt_entry_t *firstpte, oldpte, pa, *pte;
 2978         vm_offset_t oldpteva;
 2979         vm_page_t mpte;
 2980 
 2981         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2982 
 2983         /*
 2984          * Examine the first PTE in the specified PTP.  Abort if this PTE is
 2985          * either invalid, unused, or does not map the first 4KB physical page
 2986          * within a 2- or 4MB page.
 2987          */
 2988         firstpte = vtopte(trunc_4mpage(va));
 2989 setpde:
 2990         newpde = *firstpte;
 2991         if ((newpde & ((PG_FRAME & PDRMASK) | PG_A | PG_V)) != (PG_A | PG_V)) {
 2992                 pmap_pde_p_failures++;
 2993                 CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x"
 2994                     " in pmap %p", va, pmap);
 2995                 return;
 2996         }
 2997         if ((*firstpte & PG_MANAGED) != 0 && pmap == kernel_pmap) {
 2998                 pmap_pde_p_failures++;
 2999                 CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x"
 3000                     " in pmap %p", va, pmap);
 3001                 return;
 3002         }
 3003         if ((newpde & (PG_M | PG_RW)) == PG_RW) {
 3004                 /*
 3005                  * When PG_M is already clear, PG_RW can be cleared without
 3006                  * a TLB invalidation.
 3007                  */
 3008                 if (!atomic_cmpset_int((u_int *)firstpte, newpde, newpde &
 3009                     ~PG_RW))  
 3010                         goto setpde;
 3011                 newpde &= ~PG_RW;
 3012         }
 3013 
 3014         /* 
 3015          * Examine each of the other PTEs in the specified PTP.  Abort if this
 3016          * PTE maps an unexpected 4KB physical page or does not have identical
 3017          * characteristics to the first PTE.
 3018          */
 3019         pa = (newpde & (PG_PS_FRAME | PG_A | PG_V)) + NBPDR - PAGE_SIZE;
 3020         for (pte = firstpte + NPTEPG - 1; pte > firstpte; pte--) {
 3021 setpte:
 3022                 oldpte = *pte;
 3023                 if ((oldpte & (PG_FRAME | PG_A | PG_V)) != pa) {
 3024                         pmap_pde_p_failures++;
 3025                         CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x"
 3026                             " in pmap %p", va, pmap);
 3027                         return;
 3028                 }
 3029                 if ((oldpte & (PG_M | PG_RW)) == PG_RW) {
 3030                         /*
 3031                          * When PG_M is already clear, PG_RW can be cleared
 3032                          * without a TLB invalidation.
 3033                          */
 3034                         if (!atomic_cmpset_int((u_int *)pte, oldpte,
 3035                             oldpte & ~PG_RW))
 3036                                 goto setpte;
 3037                         oldpte &= ~PG_RW;
 3038                         oldpteva = (oldpte & PG_FRAME & PDRMASK) |
 3039                             (va & ~PDRMASK);
 3040                         CTR2(KTR_PMAP, "pmap_promote_pde: protect for va %#x"
 3041                             " in pmap %p", oldpteva, pmap);
 3042                 }
 3043                 if ((oldpte & PG_PTE_PROMOTE) != (newpde & PG_PTE_PROMOTE)) {
 3044                         pmap_pde_p_failures++;
 3045                         CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x"
 3046                             " in pmap %p", va, pmap);
 3047                         return;
 3048                 }
 3049                 pa -= PAGE_SIZE;
 3050         }
 3051 
 3052         /*
 3053          * Save the page table page in its current state until the PDE
 3054          * mapping the superpage is demoted by pmap_demote_pde() or
 3055          * destroyed by pmap_remove_pde(). 
 3056          */
 3057         mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
 3058         KASSERT(mpte >= vm_page_array &&
 3059             mpte < &vm_page_array[vm_page_array_size],
 3060             ("pmap_promote_pde: page table page is out of range"));
 3061         KASSERT(mpte->pindex == va >> PDRSHIFT,
 3062             ("pmap_promote_pde: page table page's pindex is wrong"));
 3063         pmap_insert_pt_page(pmap, mpte);
 3064 
 3065         /*
 3066          * Promote the pv entries.
 3067          */
 3068         if ((newpde & PG_MANAGED) != 0)
 3069                 pmap_pv_promote_pde(pmap, va, newpde & PG_PS_FRAME);
 3070 
 3071         /*
 3072          * Propagate the PAT index to its proper position.
 3073          */
 3074         if ((newpde & PG_PTE_PAT) != 0)
 3075                 newpde ^= PG_PDE_PAT | PG_PTE_PAT;
 3076 
 3077         /*
 3078          * Map the superpage.
 3079          */
 3080         if (pmap == kernel_pmap) {
 3081                 mtx_lock_spin(&allpmaps_lock);
 3082                 LIST_FOREACH(allpmaps_entry, &allpmaps, pm_list) {
 3083                         pde = pmap_pde(allpmaps_entry, va);
 3084                         pde_store(pde, PG_PS | newpde);
 3085                 }
 3086                 mtx_unlock_spin(&allpmaps_lock);
 3087         } else
 3088                 pde_store(pde, PG_PS | newpde);
 3089 
 3090         pmap_pde_promotions++;
 3091         CTR2(KTR_PMAP, "pmap_promote_pde: success for va %#x"
 3092             " in pmap %p", va, pmap);
 3093 }
 3094 
 3095 /*
 3096  *      Insert the given physical page (p) at
 3097  *      the specified virtual address (v) in the
 3098  *      target physical map with the protection requested.
 3099  *
 3100  *      If specified, the page will be wired down, meaning
 3101  *      that the related pte can not be reclaimed.
 3102  *
 3103  *      NB:  This is the only routine which MAY NOT lazy-evaluate
 3104  *      or lose information.  That is, this routine must actually
 3105  *      insert this page into the given map NOW.
 3106  */
 3107 void
 3108 pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
 3109     vm_prot_t prot, boolean_t wired)
 3110 {
 3111         vm_paddr_t pa;
 3112         pd_entry_t *pde;
 3113         pt_entry_t *pte;
 3114         vm_paddr_t opa;
 3115         pt_entry_t origpte, newpte;
 3116         vm_page_t mpte, om;
 3117         boolean_t invlva;
 3118 
 3119         va = trunc_page(va);
 3120         KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
 3121         KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS,
 3122             ("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)", va));
 3123 
 3124         mpte = NULL;
 3125 
 3126         vm_page_lock_queues();
 3127         PMAP_LOCK(pmap);
 3128         sched_pin();
 3129 
 3130         /*
 3131          * In the case that a page table page is not
 3132          * resident, we are creating it here.
 3133          */
 3134         if (va < VM_MAXUSER_ADDRESS) {
 3135                 mpte = pmap_allocpte(pmap, va, M_WAITOK);
 3136         }
 3137 
 3138         pde = pmap_pde(pmap, va);
 3139         if ((*pde & PG_PS) != 0)
 3140                 panic("pmap_enter: attempted pmap_enter on 4MB page");
 3141         pte = pmap_pte_quick(pmap, va);
 3142 
 3143         /*
 3144          * Page Directory table entry not valid, we need a new PT page
 3145          */
 3146         if (pte == NULL) {
 3147                 panic("pmap_enter: invalid page directory pdir=%#jx, va=%#x",
 3148                         (uintmax_t)pmap->pm_pdir[PTDPTDI], va);
 3149         }
 3150 
 3151         pa = VM_PAGE_TO_PHYS(m);
 3152         om = NULL;
 3153         origpte = *pte;
 3154         opa = origpte & PG_FRAME;
 3155 
 3156         /*
 3157          * Mapping has not changed, must be protection or wiring change.
 3158          */
 3159         if (origpte && (opa == pa)) {
 3160                 /*
 3161                  * Wiring change, just update stats. We don't worry about
 3162                  * wiring PT pages as they remain resident as long as there
 3163                  * are valid mappings in them. Hence, if a user page is wired,
 3164                  * the PT page will be also.
 3165                  */
 3166                 if (wired && ((origpte & PG_W) == 0))
 3167                         pmap->pm_stats.wired_count++;
 3168                 else if (!wired && (origpte & PG_W))
 3169                         pmap->pm_stats.wired_count--;
 3170 
 3171                 /*
 3172                  * Remove extra pte reference
 3173                  */
 3174                 if (mpte)
 3175                         mpte->wire_count--;
 3176 
 3177                 /*
 3178                  * We might be turning off write access to the page,
 3179                  * so we go ahead and sense modify status.
 3180                  */
 3181                 if (origpte & PG_MANAGED) {
 3182                         om = m;
 3183                         pa |= PG_MANAGED;
 3184                 }
 3185                 goto validate;
 3186         } 
 3187         /*
 3188          * Mapping has changed, invalidate old range and fall through to
 3189          * handle validating new mapping.
 3190          */
 3191         if (opa) {
 3192                 if (origpte & PG_W)
 3193                         pmap->pm_stats.wired_count--;
 3194                 if (origpte & PG_MANAGED) {
 3195                         om = PHYS_TO_VM_PAGE(opa);
 3196                         pmap_remove_entry(pmap, om, va);
 3197                 }
 3198                 if (mpte != NULL) {
 3199                         mpte->wire_count--;
 3200                         KASSERT(mpte->wire_count > 0,
 3201                             ("pmap_enter: missing reference to page table page,"
 3202                              " va: 0x%x", va));
 3203                 }
 3204         } else
 3205                 pmap->pm_stats.resident_count++;
 3206 
 3207         /*
 3208          * Enter on the PV list if part of our managed memory.
 3209          */
 3210         if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
 3211                 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva,
 3212                     ("pmap_enter: managed mapping within the clean submap"));
 3213                 pmap_insert_entry(pmap, va, m);
 3214                 pa |= PG_MANAGED;
 3215         }
 3216 
 3217         /*
 3218          * Increment counters
 3219          */
 3220         if (wired)
 3221                 pmap->pm_stats.wired_count++;
 3222 
 3223 validate:
 3224         /*
 3225          * Now validate mapping with desired protection/wiring.
 3226          */
 3227         newpte = (pt_entry_t)(pa | pmap_cache_bits(m->md.pat_mode, 0) | PG_V);
 3228         if ((prot & VM_PROT_WRITE) != 0) {
 3229                 newpte |= PG_RW;
 3230                 vm_page_flag_set(m, PG_WRITEABLE);
 3231         }
 3232 #ifdef PAE
 3233         if ((prot & VM_PROT_EXECUTE) == 0)
 3234                 newpte |= pg_nx;
 3235 #endif
 3236         if (wired)
 3237                 newpte |= PG_W;
 3238         if (va < VM_MAXUSER_ADDRESS)
 3239                 newpte |= PG_U;
 3240         if (pmap == kernel_pmap)
 3241                 newpte |= pgeflag;
 3242 
 3243         /*
 3244          * if the mapping or permission bits are different, we need
 3245          * to update the pte.
 3246          */
 3247         if ((origpte & ~(PG_M|PG_A)) != newpte) {
 3248                 newpte |= PG_A;
 3249                 if ((access & VM_PROT_WRITE) != 0)
 3250                         newpte |= PG_M;
 3251                 if (origpte & PG_V) {
 3252                         invlva = FALSE;
 3253                         origpte = pte_load_store(pte, newpte);
 3254                         if (origpte & PG_A) {
 3255                                 if (origpte & PG_MANAGED)
 3256                                         vm_page_flag_set(om, PG_REFERENCED);
 3257                                 if (opa != VM_PAGE_TO_PHYS(m))
 3258                                         invlva = TRUE;
 3259 #ifdef PAE
 3260                                 if ((origpte & PG_NX) == 0 &&
 3261                                     (newpte & PG_NX) != 0)
 3262                                         invlva = TRUE;
 3263 #endif
 3264                         }
 3265                         if ((origpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
 3266                                 if ((origpte & PG_MANAGED) != 0)
 3267                                         vm_page_dirty(om);
 3268                                 if ((prot & VM_PROT_WRITE) == 0)
 3269                                         invlva = TRUE;
 3270                         }
 3271                         if (invlva)
 3272                                 pmap_invalidate_page(pmap, va);
 3273                 } else
 3274                         pte_store(pte, newpte);
 3275         }
 3276 
 3277         /*
 3278          * If both the page table page and the reservation are fully
 3279          * populated, then attempt promotion.
 3280          */
 3281         if ((mpte == NULL || mpte->wire_count == NPTEPG) &&
 3282             pg_ps_enabled && vm_reserv_level_iffullpop(m) == 0)
 3283                 pmap_promote_pde(pmap, pde, va);
 3284 
 3285         sched_unpin();
 3286         vm_page_unlock_queues();
 3287         PMAP_UNLOCK(pmap);
 3288 }
 3289 
 3290 /*
 3291  * Tries to create a 2- or 4MB page mapping.  Returns TRUE if successful and
 3292  * FALSE otherwise.  Fails if (1) a page table page cannot be allocated without
 3293  * blocking, (2) a mapping already exists at the specified virtual address, or
 3294  * (3) a pv entry cannot be allocated without reclaiming another pv entry. 
 3295  */
 3296 static boolean_t
 3297 pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
 3298 {
 3299         pd_entry_t *pde, newpde;
 3300 
 3301         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 3302         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 3303         pde = pmap_pde(pmap, va);
 3304         if (*pde != 0) {
 3305                 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
 3306                     " in pmap %p", va, pmap);
 3307                 return (FALSE);
 3308         }
 3309         newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 1) |
 3310             PG_PS | PG_V;
 3311         if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
 3312                 newpde |= PG_MANAGED;
 3313 
 3314                 /*
 3315                  * Abort this mapping if its PV entry could not be created.
 3316                  */
 3317                 if (!pmap_pv_insert_pde(pmap, va, VM_PAGE_TO_PHYS(m))) {
 3318                         CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
 3319                             " in pmap %p", va, pmap);
 3320                         return (FALSE);
 3321                 }
 3322         }
 3323 #ifdef PAE
 3324         if ((prot & VM_PROT_EXECUTE) == 0)
 3325                 newpde |= pg_nx;
 3326 #endif
 3327         if (va < VM_MAXUSER_ADDRESS)
 3328                 newpde |= PG_U;
 3329 
 3330         /*
 3331          * Increment counters.
 3332          */
 3333         pmap->pm_stats.resident_count += NBPDR / PAGE_SIZE;
 3334 
 3335         /*
 3336          * Map the superpage.
 3337          */
 3338         pde_store(pde, newpde);
 3339 
 3340         pmap_pde_mappings++;
 3341         CTR2(KTR_PMAP, "pmap_enter_pde: success for va %#lx"
 3342             " in pmap %p", va, pmap);
 3343         return (TRUE);
 3344 }
 3345 
 3346 /*
 3347  * Maps a sequence of resident pages belonging to the same object.
 3348  * The sequence begins with the given page m_start.  This page is
 3349  * mapped at the given virtual address start.  Each subsequent page is
 3350  * mapped at a virtual address that is offset from start by the same
 3351  * amount as the page is offset from m_start within the object.  The
 3352  * last page in the sequence is the page with the largest offset from
 3353  * m_start that can be mapped at a virtual address less than the given
 3354  * virtual address end.  Not every virtual page between start and end
 3355  * is mapped; only those for which a resident page exists with the
 3356  * corresponding offset from m_start are mapped.
 3357  */
 3358 void
 3359 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
 3360     vm_page_t m_start, vm_prot_t prot)
 3361 {
 3362         vm_offset_t va;
 3363         vm_page_t m, mpte;
 3364         vm_pindex_t diff, psize;
 3365 
 3366         VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED);
 3367         psize = atop(end - start);
 3368         mpte = NULL;
 3369         m = m_start;
 3370         PMAP_LOCK(pmap);
 3371         while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
 3372                 va = start + ptoa(diff);
 3373                 if ((va & PDRMASK) == 0 && va + NBPDR <= end &&
 3374                     (VM_PAGE_TO_PHYS(m) & PDRMASK) == 0 &&
 3375                     pg_ps_enabled && vm_reserv_level_iffullpop(m) == 0 &&
 3376                     pmap_enter_pde(pmap, va, m, prot))
 3377                         m = &m[NBPDR / PAGE_SIZE - 1];
 3378                 else
 3379                         mpte = pmap_enter_quick_locked(pmap, va, m, prot,
 3380                             mpte);
 3381                 m = TAILQ_NEXT(m, listq);
 3382         }
 3383         PMAP_UNLOCK(pmap);
 3384 }
 3385 
 3386 /*
 3387  * this code makes some *MAJOR* assumptions:
 3388  * 1. Current pmap & pmap exists.
 3389  * 2. Not wired.
 3390  * 3. Read access.
 3391  * 4. No page table pages.
 3392  * but is *MUCH* faster than pmap_enter...
 3393  */
 3394 
 3395 void
 3396 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
 3397 {
 3398 
 3399         PMAP_LOCK(pmap);
 3400         (void) pmap_enter_quick_locked(pmap, va, m, prot, NULL);
 3401         PMAP_UNLOCK(pmap);
 3402 }
 3403 
 3404 static vm_page_t
 3405 pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
 3406     vm_prot_t prot, vm_page_t mpte)
 3407 {
 3408         pt_entry_t *pte;
 3409         vm_paddr_t pa;
 3410         vm_page_t free;
 3411 
 3412         KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
 3413             (m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0,
 3414             ("pmap_enter_quick_locked: managed mapping within the clean submap"));
 3415         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 3416         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 3417 
 3418         /*
 3419          * In the case that a page table page is not
 3420          * resident, we are creating it here.
 3421          */
 3422         if (va < VM_MAXUSER_ADDRESS) {
 3423                 unsigned ptepindex;
 3424                 pd_entry_t ptepa;
 3425 
 3426                 /*
 3427                  * Calculate pagetable page index
 3428                  */
 3429                 ptepindex = va >> PDRSHIFT;
 3430                 if (mpte && (mpte->pindex == ptepindex)) {
 3431                         mpte->wire_count++;
 3432                 } else {
 3433                         /*
 3434                          * Get the page directory entry
 3435                          */
 3436                         ptepa = pmap->pm_pdir[ptepindex];
 3437 
 3438                         /*
 3439                          * If the page table page is mapped, we just increment
 3440                          * the hold count, and activate it.
 3441                          */
 3442                         if (ptepa) {
 3443                                 if (ptepa & PG_PS)
 3444                                         return (NULL);
 3445                                 mpte = PHYS_TO_VM_PAGE(ptepa & PG_FRAME);
 3446                                 mpte->wire_count++;
 3447                         } else {
 3448                                 mpte = _pmap_allocpte(pmap, ptepindex,
 3449                                     M_NOWAIT);
 3450                                 if (mpte == NULL)
 3451                                         return (mpte);
 3452                         }
 3453                 }
 3454         } else {
 3455                 mpte = NULL;
 3456         }
 3457 
 3458         /*
 3459          * This call to vtopte makes the assumption that we are
 3460          * entering the page into the current pmap.  In order to support
 3461          * quick entry into any pmap, one would likely use pmap_pte_quick.
 3462          * But that isn't as quick as vtopte.
 3463          */
 3464         pte = vtopte(va);
 3465         if (*pte) {
 3466                 if (mpte != NULL) {
 3467                         mpte->wire_count--;
 3468                         mpte = NULL;
 3469                 }
 3470                 return (mpte);
 3471         }
 3472 
 3473         /*
 3474          * Enter on the PV list if part of our managed memory.
 3475          */
 3476         if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0 &&
 3477             !pmap_try_insert_pv_entry(pmap, va, m)) {
 3478                 if (mpte != NULL) {
 3479                         free = NULL;
 3480                         if (pmap_unwire_pte_hold(pmap, mpte, &free)) {
 3481                                 pmap_invalidate_page(pmap, va);
 3482                                 pmap_free_zero_pages(free);
 3483                         }
 3484                         
 3485                         mpte = NULL;
 3486                 }
 3487                 return (mpte);
 3488         }
 3489 
 3490         /*
 3491          * Increment counters
 3492          */
 3493         pmap->pm_stats.resident_count++;
 3494 
 3495         pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 0);
 3496 #ifdef PAE
 3497         if ((prot & VM_PROT_EXECUTE) == 0)
 3498                 pa |= pg_nx;
 3499 #endif
 3500 
 3501         /*
 3502          * Now validate mapping with RO protection
 3503          */
 3504         if (m->flags & (PG_FICTITIOUS|PG_UNMANAGED))
 3505                 pte_store(pte, pa | PG_V | PG_U);
 3506         else
 3507                 pte_store(pte, pa | PG_V | PG_U | PG_MANAGED);
 3508         return mpte;
 3509 }
 3510 
 3511 /*
 3512  * Make a temporary mapping for a physical address.  This is only intended
 3513  * to be used for panic dumps.
 3514  */
 3515 void *
 3516 pmap_kenter_temporary(vm_paddr_t pa, int i)
 3517 {
 3518         vm_offset_t va;
 3519 
 3520         va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE);
 3521         pmap_kenter(va, pa);
 3522         invlpg(va);
 3523         return ((void *)crashdumpmap);
 3524 }
 3525 
 3526 /*
 3527  * This code maps large physical mmap regions into the
 3528  * processor address space.  Note that some shortcuts
 3529  * are taken, but the code works.
 3530  */
 3531 void
 3532 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
 3533     vm_pindex_t pindex, vm_size_t size)
 3534 {
 3535         pd_entry_t *pde;
 3536         vm_paddr_t pa, ptepa;
 3537         vm_page_t p;
 3538         int pat_mode;
 3539 
 3540         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
 3541         KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
 3542             ("pmap_object_init_pt: non-device object"));
 3543         if (pseflag && 
 3544             (addr & (NBPDR - 1)) == 0 && (size & (NBPDR - 1)) == 0) {
 3545                 if (!vm_object_populate(object, pindex, pindex + atop(size)))
 3546                         return;
 3547                 p = vm_page_lookup(object, pindex);
 3548                 KASSERT(p->valid == VM_PAGE_BITS_ALL,
 3549                     ("pmap_object_init_pt: invalid page %p", p));
 3550                 pat_mode = p->md.pat_mode;
 3551 
 3552                 /*
 3553                  * Abort the mapping if the first page is not physically
 3554                  * aligned to a 2/4MB page boundary.
 3555                  */
 3556                 ptepa = VM_PAGE_TO_PHYS(p);
 3557                 if (ptepa & (NBPDR - 1))
 3558                         return;
 3559 
 3560                 /*
 3561                  * Skip the first page.  Abort the mapping if the rest of
 3562                  * the pages are not physically contiguous or have differing
 3563                  * memory attributes.
 3564                  */
 3565                 p = TAILQ_NEXT(p, listq);
 3566                 for (pa = ptepa + PAGE_SIZE; pa < ptepa + size;
 3567                     pa += PAGE_SIZE) {
 3568                         KASSERT(p->valid == VM_PAGE_BITS_ALL,
 3569                             ("pmap_object_init_pt: invalid page %p", p));
 3570                         if (pa != VM_PAGE_TO_PHYS(p) ||
 3571                             pat_mode != p->md.pat_mode)
 3572                                 return;
 3573                         p = TAILQ_NEXT(p, listq);
 3574                 }
 3575 
 3576                 /*
 3577                  * Map using 2/4MB pages.  Since "ptepa" is 2/4M aligned and
 3578                  * "size" is a multiple of 2/4M, adding the PAT setting to
 3579                  * "pa" will not affect the termination of this loop.
 3580                  */
 3581                 PMAP_LOCK(pmap);
 3582                 for (pa = ptepa | pmap_cache_bits(pat_mode, 1); pa < ptepa +
 3583                     size; pa += NBPDR) {
 3584                         pde = pmap_pde(pmap, addr);
 3585                         if (*pde == 0) {
 3586                                 pde_store(pde, pa | PG_PS | PG_M | PG_A |
 3587                                     PG_U | PG_RW | PG_V);
 3588                                 pmap->pm_stats.resident_count += NBPDR /
 3589                                     PAGE_SIZE;
 3590                                 pmap_pde_mappings++;
 3591                         }
 3592                         /* Else continue on if the PDE is already valid. */
 3593                         addr += NBPDR;
 3594                 }
 3595                 PMAP_UNLOCK(pmap);
 3596         }
 3597 }
 3598 
 3599 /*
 3600  *      Routine:        pmap_change_wiring
 3601  *      Function:       Change the wiring attribute for a map/virtual-address
 3602  *                      pair.
 3603  *      In/out conditions:
 3604  *                      The mapping must already exist in the pmap.
 3605  */
 3606 void
 3607 pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
 3608 {
 3609         pd_entry_t *pde;
 3610         pt_entry_t *pte;
 3611         boolean_t are_queues_locked;
 3612 
 3613         are_queues_locked = FALSE;
 3614 retry:
 3615         PMAP_LOCK(pmap);
 3616         pde = pmap_pde(pmap, va);
 3617         if ((*pde & PG_PS) != 0) {
 3618                 if (!wired != ((*pde & PG_W) == 0)) {
 3619                         if (!are_queues_locked) {
 3620                                 are_queues_locked = TRUE;
 3621                                 if (!mtx_trylock(&vm_page_queue_mtx)) {
 3622                                         PMAP_UNLOCK(pmap);
 3623                                         vm_page_lock_queues();
 3624                                         goto retry;
 3625                                 }
 3626                         }
 3627                         if (!pmap_demote_pde(pmap, pde, va))
 3628                                 panic("pmap_change_wiring: demotion failed");
 3629                 } else
 3630                         goto out;
 3631         }
 3632         pte = pmap_pte(pmap, va);
 3633 
 3634         if (wired && !pmap_pte_w(pte))
 3635                 pmap->pm_stats.wired_count++;
 3636         else if (!wired && pmap_pte_w(pte))
 3637                 pmap->pm_stats.wired_count--;
 3638 
 3639         /*
 3640          * Wiring is not a hardware characteristic so there is no need to
 3641          * invalidate TLB.
 3642          */
 3643         pmap_pte_set_w(pte, wired);
 3644         pmap_pte_release(pte);
 3645 out:
 3646         if (are_queues_locked)
 3647                 vm_page_unlock_queues();
 3648         PMAP_UNLOCK(pmap);
 3649 }
 3650 
 3651 
 3652 
 3653 /*
 3654  *      Copy the range specified by src_addr/len
 3655  *      from the source map to the range dst_addr/len
 3656  *      in the destination map.
 3657  *
 3658  *      This routine is only advisory and need not do anything.
 3659  */
 3660 
 3661 void
 3662 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
 3663     vm_offset_t src_addr)
 3664 {
 3665         vm_page_t   free;
 3666         vm_offset_t addr;
 3667         vm_offset_t end_addr = src_addr + len;
 3668         vm_offset_t pdnxt;
 3669 
 3670         if (dst_addr != src_addr)
 3671                 return;
 3672 
 3673         if (!pmap_is_current(src_pmap))
 3674                 return;
 3675 
 3676         vm_page_lock_queues();
 3677         if (dst_pmap < src_pmap) {
 3678                 PMAP_LOCK(dst_pmap);
 3679                 PMAP_LOCK(src_pmap);
 3680         } else {
 3681                 PMAP_LOCK(src_pmap);
 3682                 PMAP_LOCK(dst_pmap);
 3683         }
 3684         sched_pin();
 3685         for (addr = src_addr; addr < end_addr; addr = pdnxt) {
 3686                 pt_entry_t *src_pte, *dst_pte;
 3687                 vm_page_t dstmpte, srcmpte;
 3688                 pd_entry_t srcptepaddr;
 3689                 unsigned ptepindex;
 3690 
 3691                 KASSERT(addr < UPT_MIN_ADDRESS,
 3692                     ("pmap_copy: invalid to pmap_copy page tables"));
 3693 
 3694                 pdnxt = (addr + NBPDR) & ~PDRMASK;
 3695                 if (pdnxt < addr)
 3696                         pdnxt = end_addr;
 3697                 ptepindex = addr >> PDRSHIFT;
 3698 
 3699                 srcptepaddr = src_pmap->pm_pdir[ptepindex];
 3700                 if (srcptepaddr == 0)
 3701                         continue;
 3702                         
 3703                 if (srcptepaddr & PG_PS) {
 3704                         if (dst_pmap->pm_pdir[ptepindex] == 0 &&
 3705                             ((srcptepaddr & PG_MANAGED) == 0 ||
 3706                             pmap_pv_insert_pde(dst_pmap, addr, srcptepaddr &
 3707                             PG_PS_FRAME))) {
 3708                                 dst_pmap->pm_pdir[ptepindex] = srcptepaddr &
 3709                                     ~PG_W;
 3710                                 dst_pmap->pm_stats.resident_count +=
 3711                                     NBPDR / PAGE_SIZE;
 3712                         }
 3713                         continue;
 3714                 }
 3715 
 3716                 srcmpte = PHYS_TO_VM_PAGE(srcptepaddr & PG_FRAME);
 3717                 KASSERT(srcmpte->wire_count > 0,
 3718                     ("pmap_copy: source page table page is unused"));
 3719 
 3720                 if (pdnxt > end_addr)
 3721                         pdnxt = end_addr;
 3722 
 3723                 src_pte = vtopte(addr);
 3724                 while (addr < pdnxt) {
 3725                         pt_entry_t ptetemp;
 3726                         ptetemp = *src_pte;
 3727                         /*
 3728                          * we only virtual copy managed pages
 3729                          */
 3730                         if ((ptetemp & PG_MANAGED) != 0) {
 3731                                 dstmpte = pmap_allocpte(dst_pmap, addr,
 3732                                     M_NOWAIT);
 3733                                 if (dstmpte == NULL)
 3734                                         goto out;
 3735                                 dst_pte = pmap_pte_quick(dst_pmap, addr);
 3736                                 if (*dst_pte == 0 &&
 3737                                     pmap_try_insert_pv_entry(dst_pmap, addr,
 3738                                     PHYS_TO_VM_PAGE(ptetemp & PG_FRAME))) {
 3739                                         /*
 3740                                          * Clear the wired, modified, and
 3741                                          * accessed (referenced) bits
 3742                                          * during the copy.
 3743                                          */
 3744                                         *dst_pte = ptetemp & ~(PG_W | PG_M |
 3745                                             PG_A);
 3746                                         dst_pmap->pm_stats.resident_count++;
 3747                                 } else {
 3748                                         free = NULL;
 3749                                         if (pmap_unwire_pte_hold(dst_pmap,
 3750                                             dstmpte, &free)) {
 3751                                                 pmap_invalidate_page(dst_pmap,
 3752                                                     addr);
 3753                                                 pmap_free_zero_pages(free);
 3754                                         }
 3755                                         goto out;
 3756                                 }
 3757                                 if (dstmpte->wire_count >= srcmpte->wire_count)
 3758                                         break;
 3759                         }
 3760                         addr += PAGE_SIZE;
 3761                         src_pte++;
 3762                 }
 3763         }
 3764 out:
 3765         sched_unpin();
 3766         vm_page_unlock_queues();
 3767         PMAP_UNLOCK(src_pmap);
 3768         PMAP_UNLOCK(dst_pmap);
 3769 }       
 3770 
 3771 static __inline void
 3772 pagezero(void *page)
 3773 {
 3774 #if defined(I686_CPU)
 3775         if (cpu_class == CPUCLASS_686) {
 3776 #if defined(CPU_ENABLE_SSE)
 3777                 if (cpu_feature & CPUID_SSE2)
 3778                         sse2_pagezero(page);
 3779                 else
 3780 #endif
 3781                         i686_pagezero(page);
 3782         } else
 3783 #endif
 3784                 bzero(page, PAGE_SIZE);
 3785 }
 3786 
 3787 /*
 3788  *      pmap_zero_page zeros the specified hardware page by mapping 
 3789  *      the page into KVM and using bzero to clear its contents.
 3790  */
 3791 void
 3792 pmap_zero_page(vm_page_t m)
 3793 {
 3794         struct sysmaps *sysmaps;
 3795 
 3796         sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)];
 3797         mtx_lock(&sysmaps->lock);
 3798         if (*sysmaps->CMAP2)
 3799                 panic("pmap_zero_page: CMAP2 busy");
 3800         sched_pin();
 3801         *sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M |
 3802             pmap_cache_bits(m->md.pat_mode, 0);
 3803         invlcaddr(sysmaps->CADDR2);
 3804         pagezero(sysmaps->CADDR2);
 3805         *sysmaps->CMAP2 = 0;
 3806         sched_unpin();
 3807         mtx_unlock(&sysmaps->lock);
 3808 }
 3809 
 3810 /*
 3811  *      pmap_zero_page_area zeros the specified hardware page by mapping 
 3812  *      the page into KVM and using bzero to clear its contents.
 3813  *
 3814  *      off and size may not cover an area beyond a single hardware page.
 3815  */
 3816 void
 3817 pmap_zero_page_area(vm_page_t m, int off, int size)
 3818 {
 3819         struct sysmaps *sysmaps;
 3820 
 3821         sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)];
 3822         mtx_lock(&sysmaps->lock);
 3823         if (*sysmaps->CMAP2)
 3824                 panic("pmap_zero_page_area: CMAP2 busy");
 3825         sched_pin();
 3826         *sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M |
 3827             pmap_cache_bits(m->md.pat_mode, 0);
 3828         invlcaddr(sysmaps->CADDR2);
 3829         if (off == 0 && size == PAGE_SIZE) 
 3830                 pagezero(sysmaps->CADDR2);
 3831         else
 3832                 bzero((char *)sysmaps->CADDR2 + off, size);
 3833         *sysmaps->CMAP2 = 0;
 3834         sched_unpin();
 3835         mtx_unlock(&sysmaps->lock);
 3836 }
 3837 
 3838 /*
 3839  *      pmap_zero_page_idle zeros the specified hardware page by mapping 
 3840  *      the page into KVM and using bzero to clear its contents.  This
 3841  *      is intended to be called from the vm_pagezero process only and
 3842  *      outside of Giant.
 3843  */
 3844 void
 3845 pmap_zero_page_idle(vm_page_t m)
 3846 {
 3847 
 3848         if (*CMAP3)
 3849                 panic("pmap_zero_page_idle: CMAP3 busy");
 3850         sched_pin();
 3851         *CMAP3 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M |
 3852             pmap_cache_bits(m->md.pat_mode, 0);
 3853         invlcaddr(CADDR3);
 3854         pagezero(CADDR3);
 3855         *CMAP3 = 0;
 3856         sched_unpin();
 3857 }
 3858 
 3859 /*
 3860  *      pmap_copy_page copies the specified (machine independent)
 3861  *      page by mapping the page into virtual memory and using
 3862  *      bcopy to copy the page, one machine dependent page at a
 3863  *      time.
 3864  */
 3865 void
 3866 pmap_copy_page(vm_page_t src, vm_page_t dst)
 3867 {
 3868         struct sysmaps *sysmaps;
 3869 
 3870         sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)];
 3871         mtx_lock(&sysmaps->lock);
 3872         if (*sysmaps->CMAP1)
 3873                 panic("pmap_copy_page: CMAP1 busy");
 3874         if (*sysmaps->CMAP2)
 3875                 panic("pmap_copy_page: CMAP2 busy");
 3876         sched_pin();
 3877         invlpg((u_int)sysmaps->CADDR1);
 3878         invlpg((u_int)sysmaps->CADDR2);
 3879         *sysmaps->CMAP1 = PG_V | VM_PAGE_TO_PHYS(src) | PG_A |
 3880             pmap_cache_bits(src->md.pat_mode, 0);
 3881         *sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(dst) | PG_A | PG_M |
 3882             pmap_cache_bits(dst->md.pat_mode, 0);
 3883         bcopy(sysmaps->CADDR1, sysmaps->CADDR2, PAGE_SIZE);
 3884         *sysmaps->CMAP1 = 0;
 3885         *sysmaps->CMAP2 = 0;
 3886         sched_unpin();
 3887         mtx_unlock(&sysmaps->lock);
 3888 }
 3889 
 3890 /*
 3891  * Returns true if the pmap's pv is one of the first
 3892  * 16 pvs linked to from this page.  This count may
 3893  * be changed upwards or downwards in the future; it
 3894  * is only necessary that true be returned for a small
 3895  * subset of pmaps for proper page aging.
 3896  */
 3897 boolean_t
 3898 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
 3899 {
 3900         struct md_page *pvh;
 3901         pv_entry_t pv;
 3902         int loops = 0;
 3903 
 3904         if (m->flags & PG_FICTITIOUS)
 3905                 return FALSE;
 3906 
 3907         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 3908         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
 3909                 if (PV_PMAP(pv) == pmap) {
 3910                         return TRUE;
 3911                 }
 3912                 loops++;
 3913                 if (loops >= 16)
 3914                         break;
 3915         }
 3916         if (loops < 16) {
 3917                 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 3918                 TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
 3919                         if (PV_PMAP(pv) == pmap)
 3920                                 return (TRUE);
 3921                         loops++;
 3922                         if (loops >= 16)
 3923                                 break;
 3924                 }
 3925         }
 3926         return (FALSE);
 3927 }
 3928 
 3929 /*
 3930  * Returns TRUE if the given page is mapped individually or as part of
 3931  * a 4mpage.  Otherwise, returns FALSE.
 3932  */
 3933 boolean_t
 3934 pmap_page_is_mapped(vm_page_t m)
 3935 {
 3936         struct md_page *pvh;
 3937 
 3938         if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
 3939                 return (FALSE);
 3940         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 3941         if (TAILQ_EMPTY(&m->md.pv_list)) {
 3942                 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 3943                 return (!TAILQ_EMPTY(&pvh->pv_list));
 3944         } else
 3945                 return (TRUE);
 3946 }
 3947 
 3948 /*
 3949  * Remove all pages from specified address space
 3950  * this aids process exit speeds.  Also, this code
 3951  * is special cased for current process only, but
 3952  * can have the more generic (and slightly slower)
 3953  * mode enabled.  This is much faster than pmap_remove
 3954  * in the case of running down an entire address space.
 3955  */
 3956 void
 3957 pmap_remove_pages(pmap_t pmap)
 3958 {
 3959         pt_entry_t *pte, tpte;
 3960         vm_page_t free = NULL;
 3961         vm_page_t m, mpte, mt;
 3962         pv_entry_t pv;
 3963         struct md_page *pvh;
 3964         struct pv_chunk *pc, *npc;
 3965         int field, idx;
 3966         int32_t bit;
 3967         uint32_t inuse, bitmask;
 3968         int allfree;
 3969 
 3970         if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) {
 3971                 printf("warning: pmap_remove_pages called with non-current pmap\n");
 3972                 return;
 3973         }
 3974         vm_page_lock_queues();
 3975         PMAP_LOCK(pmap);
 3976         sched_pin();
 3977         TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
 3978                 allfree = 1;
 3979                 for (field = 0; field < _NPCM; field++) {
 3980                         inuse = (~(pc->pc_map[field])) & pc_freemask[field];
 3981                         while (inuse != 0) {
 3982                                 bit = bsfl(inuse);
 3983                                 bitmask = 1UL << bit;
 3984                                 idx = field * 32 + bit;
 3985                                 pv = &pc->pc_pventry[idx];
 3986                                 inuse &= ~bitmask;
 3987 
 3988                                 pte = pmap_pde(pmap, pv->pv_va);
 3989                                 tpte = *pte;
 3990                                 if ((tpte & PG_PS) == 0) {
 3991                                         pte = vtopte(pv->pv_va);
 3992                                         tpte = *pte & ~PG_PTE_PAT;
 3993                                 }
 3994 
 3995                                 if (tpte == 0) {
 3996                                         printf(
 3997                                             "TPTE at %p  IS ZERO @ VA %08x\n",
 3998                                             pte, pv->pv_va);
 3999                                         panic("bad pte");
 4000                                 }
 4001 
 4002 /*
 4003  * We cannot remove wired pages from a process' mapping at this time
 4004  */
 4005                                 if (tpte & PG_W) {
 4006                                         allfree = 0;
 4007                                         continue;
 4008                                 }
 4009 
 4010                                 m = PHYS_TO_VM_PAGE(tpte & PG_FRAME);
 4011                                 KASSERT(m->phys_addr == (tpte & PG_FRAME),
 4012                                     ("vm_page_t %p phys_addr mismatch %016jx %016jx",
 4013                                     m, (uintmax_t)m->phys_addr,
 4014                                     (uintmax_t)tpte));
 4015 
 4016                                 KASSERT(m < &vm_page_array[vm_page_array_size],
 4017                                         ("pmap_remove_pages: bad tpte %#jx",
 4018                                         (uintmax_t)tpte));
 4019 
 4020                                 pte_clear(pte);
 4021 
 4022                                 /*
 4023                                  * Update the vm_page_t clean/reference bits.
 4024                                  */
 4025                                 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
 4026                                         if ((tpte & PG_PS) != 0) {
 4027                                                 for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
 4028                                                         vm_page_dirty(mt);
 4029                                         } else
 4030                                                 vm_page_dirty(m);
 4031                                 }
 4032 
 4033                                 /* Mark free */
 4034                                 PV_STAT(pv_entry_frees++);
 4035                                 PV_STAT(pv_entry_spare++);
 4036                                 pv_entry_count--;
 4037                                 pc->pc_map[field] |= bitmask;
 4038                                 if ((tpte & PG_PS) != 0) {
 4039                                         pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
 4040                                         pvh = pa_to_pvh(tpte & PG_PS_FRAME);
 4041                                         TAILQ_REMOVE(&pvh->pv_list, pv, pv_list);
 4042                                         if (TAILQ_EMPTY(&pvh->pv_list)) {
 4043                                                 for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
 4044                                                         if (TAILQ_EMPTY(&mt->md.pv_list))
 4045                                                                 vm_page_flag_clear(mt, PG_WRITEABLE);
 4046                                         }
 4047                                         mpte = pmap_lookup_pt_page(pmap, pv->pv_va);
 4048                                         if (mpte != NULL) {
 4049                                                 pmap_remove_pt_page(pmap, mpte);
 4050                                                 pmap->pm_stats.resident_count--;
 4051                                                 KASSERT(mpte->wire_count == NPTEPG,
 4052                                                     ("pmap_remove_pages: pte page wire count error"));
 4053                                                 mpte->wire_count = 0;
 4054                                                 pmap_add_delayed_free_list(mpte, &free, FALSE);
 4055                                                 atomic_subtract_int(&cnt.v_wire_count, 1);
 4056                                         }
 4057                                 } else {
 4058                                         pmap->pm_stats.resident_count--;
 4059                                         TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
 4060                                         if (TAILQ_EMPTY(&m->md.pv_list)) {
 4061                                                 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 4062                                                 if (TAILQ_EMPTY(&pvh->pv_list))
 4063                                                         vm_page_flag_clear(m, PG_WRITEABLE);
 4064                                         }
 4065                                         pmap_unuse_pt(pmap, pv->pv_va, &free);
 4066                                 }
 4067                         }
 4068                 }
 4069                 if (allfree) {
 4070                         PV_STAT(pv_entry_spare -= _NPCPV);
 4071                         PV_STAT(pc_chunk_count--);
 4072                         PV_STAT(pc_chunk_frees++);
 4073                         TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
 4074                         m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc));
 4075                         pmap_qremove((vm_offset_t)pc, 1);
 4076                         vm_page_unwire(m, 0);
 4077                         vm_page_free(m);
 4078                         pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc);
 4079                 }
 4080         }
 4081         sched_unpin();
 4082         pmap_invalidate_all(pmap);
 4083         vm_page_unlock_queues();
 4084         PMAP_UNLOCK(pmap);
 4085         pmap_free_zero_pages(free);
 4086 }
 4087 
 4088 /*
 4089  *      pmap_is_modified:
 4090  *
 4091  *      Return whether or not the specified physical page was modified
 4092  *      in any physical maps.
 4093  */
 4094 boolean_t
 4095 pmap_is_modified(vm_page_t m)
 4096 {
 4097 
 4098         if (m->flags & PG_FICTITIOUS)
 4099                 return (FALSE);
 4100         if (pmap_is_modified_pvh(&m->md))
 4101                 return (TRUE);
 4102         return (pmap_is_modified_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m))));
 4103 }
 4104 
 4105 /*
 4106  * Returns TRUE if any of the given mappings were used to modify
 4107  * physical memory.  Otherwise, returns FALSE.  Both page and 2mpage
 4108  * mappings are supported.
 4109  */
 4110 static boolean_t
 4111 pmap_is_modified_pvh(struct md_page *pvh)
 4112 {
 4113         pv_entry_t pv;
 4114         pt_entry_t *pte;
 4115         pmap_t pmap;
 4116         boolean_t rv;
 4117 
 4118         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 4119         rv = FALSE;
 4120         sched_pin();
 4121         TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
 4122                 pmap = PV_PMAP(pv);
 4123                 PMAP_LOCK(pmap);
 4124                 pte = pmap_pte_quick(pmap, pv->pv_va);
 4125                 rv = (*pte & (PG_M | PG_RW)) == (PG_M | PG_RW);
 4126                 PMAP_UNLOCK(pmap);
 4127                 if (rv)
 4128                         break;
 4129         }
 4130         sched_unpin();
 4131         return (rv);
 4132 }
 4133 
 4134 /*
 4135  *      pmap_is_prefaultable:
 4136  *
 4137  *      Return whether or not the specified virtual address is elgible
 4138  *      for prefault.
 4139  */
 4140 boolean_t
 4141 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
 4142 {
 4143         pd_entry_t *pde;
 4144         pt_entry_t *pte;
 4145         boolean_t rv;
 4146 
 4147         rv = FALSE;
 4148         PMAP_LOCK(pmap);
 4149         pde = pmap_pde(pmap, addr);
 4150         if (*pde != 0 && (*pde & PG_PS) == 0) {
 4151                 pte = vtopte(addr);
 4152                 rv = *pte == 0;
 4153         }
 4154         PMAP_UNLOCK(pmap);
 4155         return (rv);
 4156 }
 4157 
 4158 /*
 4159  * Clear the write and modified bits in each of the given page's mappings.
 4160  */
 4161 void
 4162 pmap_remove_write(vm_page_t m)
 4163 {
 4164         struct md_page *pvh;
 4165         pv_entry_t next_pv, pv;
 4166         pmap_t pmap;
 4167         pd_entry_t *pde;
 4168         pt_entry_t oldpte, *pte;
 4169         vm_offset_t va;
 4170 
 4171         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 4172         if ((m->flags & PG_FICTITIOUS) != 0 ||
 4173             (m->flags & PG_WRITEABLE) == 0)
 4174                 return;
 4175         sched_pin();
 4176         pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 4177         TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
 4178                 va = pv->pv_va;
 4179                 pmap = PV_PMAP(pv);
 4180                 PMAP_LOCK(pmap);
 4181                 pde = pmap_pde(pmap, va);
 4182                 if ((*pde & PG_RW) != 0)
 4183                         (void)pmap_demote_pde(pmap, pde, va);
 4184                 PMAP_UNLOCK(pmap);
 4185         }
 4186         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
 4187                 pmap = PV_PMAP(pv);
 4188                 PMAP_LOCK(pmap);
 4189                 pde = pmap_pde(pmap, pv->pv_va);
 4190                 KASSERT((*pde & PG_PS) == 0, ("pmap_clear_write: found"
 4191                     " a 4mpage in page %p's pv list", m));
 4192                 pte = pmap_pte_quick(pmap, pv->pv_va);
 4193 retry:
 4194                 oldpte = *pte;
 4195                 if ((oldpte & PG_RW) != 0) {
 4196                         /*
 4197                          * Regardless of whether a pte is 32 or 64 bits
 4198                          * in size, PG_RW and PG_M are among the least
 4199                          * significant 32 bits.
 4200                          */
 4201                         if (!atomic_cmpset_int((u_int *)pte, oldpte,
 4202                             oldpte & ~(PG_RW | PG_M)))
 4203                                 goto retry;
 4204                         if ((oldpte & PG_M) != 0)
 4205                                 vm_page_dirty(m);
 4206                         pmap_invalidate_page(pmap, pv->pv_va);
 4207                 }
 4208                 PMAP_UNLOCK(pmap);
 4209         }
 4210         vm_page_flag_clear(m, PG_WRITEABLE);
 4211         sched_unpin();
 4212 }
 4213 
 4214 /*
 4215  *      pmap_ts_referenced:
 4216  *
 4217  *      Return a count of reference bits for a page, clearing those bits.
 4218  *      It is not necessary for every reference bit to be cleared, but it
 4219  *      is necessary that 0 only be returned when there are truly no
 4220  *      reference bits set.
 4221  *
 4222  *      XXX: The exact number of bits to check and clear is a matter that
 4223  *      should be tested and standardized at some point in the future for
 4224  *      optimal aging of shared pages.
 4225  */
 4226 int
 4227 pmap_ts_referenced(vm_page_t m)
 4228 {
 4229         struct md_page *pvh;
 4230         pv_entry_t pv, pvf, pvn;
 4231         pmap_t pmap;
 4232         pd_entry_t oldpde, *pde;
 4233         pt_entry_t *pte;
 4234         vm_offset_t va;
 4235         int rtval = 0;
 4236 
 4237         if (m->flags & PG_FICTITIOUS)
 4238                 return (rtval);
 4239         sched_pin();
 4240         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 4241         pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 4242         TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, pvn) {
 4243                 va = pv->pv_va;
 4244                 pmap = PV_PMAP(pv);
 4245                 PMAP_LOCK(pmap);
 4246                 pde = pmap_pde(pmap, va);
 4247                 oldpde = *pde;
 4248                 if ((oldpde & PG_A) != 0) {
 4249                         if (pmap_demote_pde(pmap, pde, va)) {
 4250                                 if ((oldpde & PG_W) == 0) {
 4251                                         /*
 4252                                          * Remove the mapping to a single page
 4253                                          * so that a subsequent access may
 4254                                          * repromote.  Since the underlying
 4255                                          * page table page is fully populated,
 4256                                          * this removal never frees a page
 4257                                          * table page.
 4258                                          */
 4259                                         va += VM_PAGE_TO_PHYS(m) - (oldpde &
 4260                                             PG_PS_FRAME);
 4261                                         pmap_remove_page(pmap, va, NULL);
 4262                                         rtval++;
 4263                                         if (rtval > 4) {
 4264                                                 PMAP_UNLOCK(pmap);
 4265                                                 return (rtval);
 4266                                         }
 4267                                 }
 4268                         }
 4269                 }
 4270                 PMAP_UNLOCK(pmap);
 4271         }
 4272         if ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
 4273                 pvf = pv;
 4274                 do {
 4275                         pvn = TAILQ_NEXT(pv, pv_list);
 4276                         TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
 4277                         TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
 4278                         pmap = PV_PMAP(pv);
 4279                         PMAP_LOCK(pmap);
 4280                         pde = pmap_pde(pmap, pv->pv_va);
 4281                         KASSERT((*pde & PG_PS) == 0, ("pmap_ts_referenced:"
 4282                             " found a 4mpage in page %p's pv list", m));
 4283                         pte = pmap_pte_quick(pmap, pv->pv_va);
 4284                         if ((*pte & PG_A) != 0) {
 4285                                 atomic_clear_int((u_int *)pte, PG_A);
 4286                                 pmap_invalidate_page(pmap, pv->pv_va);
 4287                                 rtval++;
 4288                                 if (rtval > 4)
 4289                                         pvn = NULL;
 4290                         }
 4291                         PMAP_UNLOCK(pmap);
 4292                 } while ((pv = pvn) != NULL && pv != pvf);
 4293         }
 4294         sched_unpin();
 4295         return (rtval);
 4296 }
 4297 
 4298 /*
 4299  *      Clear the modify bits on the specified physical page.
 4300  */
 4301 void
 4302 pmap_clear_modify(vm_page_t m)
 4303 {
 4304         struct md_page *pvh;
 4305         pv_entry_t next_pv, pv;
 4306         pmap_t pmap;
 4307         pd_entry_t oldpde, *pde;
 4308         pt_entry_t oldpte, *pte;
 4309         vm_offset_t va;
 4310 
 4311         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 4312         if ((m->flags & PG_FICTITIOUS) != 0)
 4313                 return;
 4314         sched_pin();
 4315         pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 4316         TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
 4317                 va = pv->pv_va;
 4318                 pmap = PV_PMAP(pv);
 4319                 PMAP_LOCK(pmap);
 4320                 pde = pmap_pde(pmap, va);
 4321                 oldpde = *pde;
 4322                 if ((oldpde & PG_RW) != 0) {
 4323                         if (pmap_demote_pde(pmap, pde, va)) {
 4324                                 if ((oldpde & PG_W) == 0) {
 4325                                         /*
 4326                                          * Write protect the mapping to a
 4327                                          * single page so that a subsequent
 4328                                          * write access may repromote.
 4329                                          */
 4330                                         va += VM_PAGE_TO_PHYS(m) - (oldpde &
 4331                                             PG_PS_FRAME);
 4332                                         pte = pmap_pte_quick(pmap, va);
 4333                                         oldpte = *pte;
 4334                                         if ((oldpte & PG_V) != 0) {
 4335                                                 /*
 4336                                                  * Regardless of whether a pte is 32 or 64 bits
 4337                                                  * in size, PG_RW and PG_M are among the least
 4338                                                  * significant 32 bits.
 4339                                                  */
 4340                                                 while (!atomic_cmpset_int((u_int *)pte,
 4341                                                     oldpte,
 4342                                                     oldpte & ~(PG_M | PG_RW)))
 4343                                                         oldpte = *pte;
 4344                                                 vm_page_dirty(m);
 4345                                                 pmap_invalidate_page(pmap, va);
 4346                                         }
 4347                                 }
 4348                         }
 4349                 }
 4350                 PMAP_UNLOCK(pmap);
 4351         }
 4352         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
 4353                 pmap = PV_PMAP(pv);
 4354                 PMAP_LOCK(pmap);
 4355                 pde = pmap_pde(pmap, pv->pv_va);
 4356                 KASSERT((*pde & PG_PS) == 0, ("pmap_clear_modify: found"
 4357                     " a 4mpage in page %p's pv list", m));
 4358                 pte = pmap_pte_quick(pmap, pv->pv_va);
 4359                 if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
 4360                         /*
 4361                          * Regardless of whether a pte is 32 or 64 bits
 4362                          * in size, PG_M is among the least significant
 4363                          * 32 bits. 
 4364                          */
 4365                         atomic_clear_int((u_int *)pte, PG_M);
 4366                         pmap_invalidate_page(pmap, pv->pv_va);
 4367                 }
 4368                 PMAP_UNLOCK(pmap);
 4369         }
 4370         sched_unpin();
 4371 }
 4372 
 4373 /*
 4374  *      pmap_clear_reference:
 4375  *
 4376  *      Clear the reference bit on the specified physical page.
 4377  */
 4378 void
 4379 pmap_clear_reference(vm_page_t m)
 4380 {
 4381         struct md_page *pvh;
 4382         pv_entry_t next_pv, pv;
 4383         pmap_t pmap;
 4384         pd_entry_t oldpde, *pde;
 4385         pt_entry_t *pte;
 4386         vm_offset_t va;
 4387 
 4388         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 4389         if ((m->flags & PG_FICTITIOUS) != 0)
 4390                 return;
 4391         sched_pin();
 4392         pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 4393         TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
 4394                 va = pv->pv_va;
 4395                 pmap = PV_PMAP(pv);
 4396                 PMAP_LOCK(pmap);
 4397                 pde = pmap_pde(pmap, va);
 4398                 oldpde = *pde;
 4399                 if ((oldpde & PG_A) != 0) {
 4400                         if (pmap_demote_pde(pmap, pde, va)) {
 4401                                 /*
 4402                                  * Remove the mapping to a single page so
 4403                                  * that a subsequent access may repromote.
 4404                                  * Since the underlying page table page is
 4405                                  * fully populated, this removal never frees
 4406                                  * a page table page.
 4407                                  */
 4408                                 va += VM_PAGE_TO_PHYS(m) - (oldpde &
 4409                                     PG_PS_FRAME);
 4410                                 pmap_remove_page(pmap, va, NULL);
 4411                         }
 4412                 }
 4413                 PMAP_UNLOCK(pmap);
 4414         }
 4415         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
 4416                 pmap = PV_PMAP(pv);
 4417                 PMAP_LOCK(pmap);
 4418                 pde = pmap_pde(pmap, pv->pv_va);
 4419                 KASSERT((*pde & PG_PS) == 0, ("pmap_clear_reference: found"
 4420                     " a 4mpage in page %p's pv list", m));
 4421                 pte = pmap_pte_quick(pmap, pv->pv_va);
 4422                 if ((*pte & PG_A) != 0) {
 4423                         /*
 4424                          * Regardless of whether a pte is 32 or 64 bits
 4425                          * in size, PG_A is among the least significant
 4426                          * 32 bits. 
 4427                          */
 4428                         atomic_clear_int((u_int *)pte, PG_A);
 4429                         pmap_invalidate_page(pmap, pv->pv_va);
 4430                 }
 4431                 PMAP_UNLOCK(pmap);
 4432         }
 4433         sched_unpin();
 4434 }
 4435 
 4436 /*
 4437  * Miscellaneous support routines follow
 4438  */
 4439 
 4440 /* Adjust the cache mode for a 4KB page mapped via a PTE. */
 4441 static __inline void
 4442 pmap_pte_attr(pt_entry_t *pte, int cache_bits)
 4443 {
 4444         u_int opte, npte;
 4445 
 4446         /*
 4447          * The cache mode bits are all in the low 32-bits of the
 4448          * PTE, so we can just spin on updating the low 32-bits.
 4449          */
 4450         do {
 4451                 opte = *(u_int *)pte;
 4452                 npte = opte & ~PG_PTE_CACHE;
 4453                 npte |= cache_bits;
 4454         } while (npte != opte && !atomic_cmpset_int((u_int *)pte, opte, npte));
 4455 }
 4456 
 4457 /* Adjust the cache mode for a 2/4MB page mapped via a PDE. */
 4458 static __inline void
 4459 pmap_pde_attr(pd_entry_t *pde, int cache_bits)
 4460 {
 4461         u_int opde, npde;
 4462 
 4463         /*
 4464          * The cache mode bits are all in the low 32-bits of the
 4465          * PDE, so we can just spin on updating the low 32-bits.
 4466          */
 4467         do {
 4468                 opde = *(u_int *)pde;
 4469                 npde = opde & ~PG_PDE_CACHE;
 4470                 npde |= cache_bits;
 4471         } while (npde != opde && !atomic_cmpset_int((u_int *)pde, opde, npde));
 4472 }
 4473 
 4474 /*
 4475  * Map a set of physical memory pages into the kernel virtual
 4476  * address space. Return a pointer to where it is mapped. This
 4477  * routine is intended to be used for mapping device memory,
 4478  * NOT real memory.
 4479  */
 4480 void *
 4481 pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
 4482 {
 4483         vm_offset_t va, offset;
 4484         vm_size_t tmpsize;
 4485 
 4486         offset = pa & PAGE_MASK;
 4487         size = roundup(offset + size, PAGE_SIZE);
 4488         pa = pa & PG_FRAME;
 4489 
 4490         if (pa < KERNLOAD && pa + size <= KERNLOAD)
 4491                 va = KERNBASE + pa;
 4492         else
 4493                 va = kmem_alloc_nofault(kernel_map, size);
 4494         if (!va)
 4495                 panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
 4496 
 4497         for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE)
 4498                 pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode);
 4499         pmap_invalidate_range(kernel_pmap, va, va + tmpsize);
 4500         pmap_invalidate_cache_range(va, va + size);
 4501         return ((void *)(va + offset));
 4502 }
 4503 
 4504 void *
 4505 pmap_mapdev(vm_paddr_t pa, vm_size_t size)
 4506 {
 4507 
 4508         return (pmap_mapdev_attr(pa, size, PAT_UNCACHEABLE));
 4509 }
 4510 
 4511 void *
 4512 pmap_mapbios(vm_paddr_t pa, vm_size_t size)
 4513 {
 4514 
 4515         return (pmap_mapdev_attr(pa, size, PAT_WRITE_BACK));
 4516 }
 4517 
 4518 void
 4519 pmap_unmapdev(vm_offset_t va, vm_size_t size)
 4520 {
 4521         vm_offset_t base, offset, tmpva;
 4522 
 4523         if (va >= KERNBASE && va + size <= KERNBASE + KERNLOAD)
 4524                 return;
 4525         base = trunc_page(va);
 4526         offset = va & PAGE_MASK;
 4527         size = roundup(offset + size, PAGE_SIZE);
 4528         for (tmpva = base; tmpva < (base + size); tmpva += PAGE_SIZE)
 4529                 pmap_kremove(tmpva);
 4530         pmap_invalidate_range(kernel_pmap, va, tmpva);
 4531         kmem_free(kernel_map, base, size);
 4532 }
 4533 
 4534 /*
 4535  * Sets the memory attribute for the specified page.
 4536  */
 4537 void
 4538 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
 4539 {
 4540         struct sysmaps *sysmaps;
 4541         vm_offset_t sva, eva;
 4542 
 4543         m->md.pat_mode = ma;
 4544         if ((m->flags & PG_FICTITIOUS) != 0)
 4545                 return;
 4546 
 4547         /*
 4548          * If "m" is a normal page, flush it from the cache.
 4549          * See pmap_invalidate_cache_range().
 4550          *
 4551          * First, try to find an existing mapping of the page by sf
 4552          * buffer. sf_buf_invalidate_cache() modifies mapping and
 4553          * flushes the cache.
 4554          */    
 4555         if (sf_buf_invalidate_cache(m))
 4556                 return;
 4557 
 4558         /*
 4559          * If page is not mapped by sf buffer, but CPU does not
 4560          * support self snoop, map the page transient and do
 4561          * invalidation. In the worst case, whole cache is flushed by
 4562          * pmap_invalidate_cache_range().
 4563          */
 4564         if ((cpu_feature & (CPUID_SS|CPUID_CLFSH)) == CPUID_CLFSH) {
 4565                 sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)];
 4566                 mtx_lock(&sysmaps->lock);
 4567                 if (*sysmaps->CMAP2)
 4568                         panic("pmap_page_set_memattr: CMAP2 busy");
 4569                 sched_pin();
 4570                 *sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) |
 4571                     PG_A | PG_M | pmap_cache_bits(m->md.pat_mode, 0);
 4572                 invlcaddr(sysmaps->CADDR2);
 4573                 sva = (vm_offset_t)sysmaps->CADDR2;
 4574                 eva = sva + PAGE_SIZE;
 4575         } else
 4576                 sva = eva = 0; /* gcc */
 4577         pmap_invalidate_cache_range(sva, eva);
 4578         if (sva != 0) {
 4579                 *sysmaps->CMAP2 = 0;
 4580                 sched_unpin();
 4581                 mtx_unlock(&sysmaps->lock);
 4582         }
 4583 }
 4584 
 4585 /*
 4586  * Changes the specified virtual address range's memory type to that given by
 4587  * the parameter "mode".  The specified virtual address range must be
 4588  * completely contained within either the kernel map.
 4589  *
 4590  * Returns zero if the change completed successfully, and either EINVAL or
 4591  * ENOMEM if the change failed.  Specifically, EINVAL is returned if some part
 4592  * of the virtual address range was not mapped, and ENOMEM is returned if
 4593  * there was insufficient memory available to complete the change.
 4594  */
 4595 int
 4596 pmap_change_attr(vm_offset_t va, vm_size_t size, int mode)
 4597 {
 4598         vm_offset_t base, offset, tmpva;
 4599         pd_entry_t *pde;
 4600         pt_entry_t *pte;
 4601         int cache_bits_pte, cache_bits_pde;
 4602         boolean_t changed;
 4603 
 4604         base = trunc_page(va);
 4605         offset = va & PAGE_MASK;
 4606         size = roundup(offset + size, PAGE_SIZE);
 4607 
 4608         /*
 4609          * Only supported on kernel virtual addresses above the recursive map.
 4610          */
 4611         if (base < VM_MIN_KERNEL_ADDRESS)
 4612                 return (EINVAL);
 4613 
 4614         cache_bits_pde = pmap_cache_bits(mode, 1);
 4615         cache_bits_pte = pmap_cache_bits(mode, 0);
 4616         changed = FALSE;
 4617 
 4618         /*
 4619          * Pages that aren't mapped aren't supported.  Also break down
 4620          * 2/4MB pages into 4KB pages if required.
 4621          */
 4622         PMAP_LOCK(kernel_pmap);
 4623         for (tmpva = base; tmpva < base + size; ) {
 4624                 pde = pmap_pde(kernel_pmap, tmpva);
 4625                 if (*pde == 0) {
 4626                         PMAP_UNLOCK(kernel_pmap);
 4627                         return (EINVAL);
 4628                 }
 4629                 if (*pde & PG_PS) {
 4630                         /*
 4631                          * If the current 2/4MB page already has
 4632                          * the required memory type, then we need not
 4633                          * demote this page.  Just increment tmpva to
 4634                          * the next 2/4MB page frame.
 4635                          */
 4636                         if ((*pde & PG_PDE_CACHE) == cache_bits_pde) {
 4637                                 tmpva = trunc_4mpage(tmpva) + NBPDR;
 4638                                 continue;
 4639                         }
 4640 
 4641                         /*
 4642                          * If the current offset aligns with a 2/4MB
 4643                          * page frame and there is at least 2/4MB left
 4644                          * within the range, then we need not break
 4645                          * down this page into 4KB pages.
 4646                          */
 4647                         if ((tmpva & PDRMASK) == 0 &&
 4648                             tmpva + PDRMASK < base + size) {
 4649                                 tmpva += NBPDR;
 4650                                 continue;
 4651                         }
 4652                         if (!pmap_demote_pde(kernel_pmap, pde, tmpva)) {
 4653                                 PMAP_UNLOCK(kernel_pmap);
 4654                                 return (ENOMEM);
 4655                         }
 4656                 }
 4657                 pte = vtopte(tmpva);
 4658                 if (*pte == 0) {
 4659                         PMAP_UNLOCK(kernel_pmap);
 4660                         return (EINVAL);
 4661                 }
 4662                 tmpva += PAGE_SIZE;
 4663         }
 4664         PMAP_UNLOCK(kernel_pmap);
 4665 
 4666         /*
 4667          * Ok, all the pages exist, so run through them updating their
 4668          * cache mode if required.
 4669          */
 4670         for (tmpva = base; tmpva < base + size; ) {
 4671                 pde = pmap_pde(kernel_pmap, tmpva);
 4672                 if (*pde & PG_PS) {
 4673                         if ((*pde & PG_PDE_CACHE) != cache_bits_pde) {
 4674                                 pmap_pde_attr(pde, cache_bits_pde);
 4675                                 changed = TRUE;
 4676                         }
 4677                         tmpva = trunc_4mpage(tmpva) + NBPDR;
 4678                 } else {
 4679                         pte = vtopte(tmpva);
 4680                         if ((*pte & PG_PTE_CACHE) != cache_bits_pte) {
 4681                                 pmap_pte_attr(pte, cache_bits_pte);
 4682                                 changed = TRUE;
 4683                         }
 4684                         tmpva += PAGE_SIZE;
 4685                 }
 4686         }
 4687 
 4688         /*
 4689          * Flush CPU caches to make sure any data isn't cached that
 4690          * shouldn't be, etc.
 4691          */
 4692         if (changed) {
 4693                 pmap_invalidate_range(kernel_pmap, base, tmpva);
 4694                 pmap_invalidate_cache_range(base, tmpva);
 4695         }
 4696         return (0);
 4697 }
 4698 
 4699 /*
 4700  * perform the pmap work for mincore
 4701  */
 4702 int
 4703 pmap_mincore(pmap_t pmap, vm_offset_t addr)
 4704 {
 4705         pd_entry_t *pdep;
 4706         pt_entry_t *ptep, pte;
 4707         vm_paddr_t pa;
 4708         vm_page_t m;
 4709         int val = 0;
 4710         
 4711         PMAP_LOCK(pmap);
 4712         pdep = pmap_pde(pmap, addr);
 4713         if (*pdep != 0) {
 4714                 if (*pdep & PG_PS) {
 4715                         pte = *pdep;
 4716                         val = MINCORE_SUPER;
 4717                         /* Compute the physical address of the 4KB page. */
 4718                         pa = ((*pdep & PG_PS_FRAME) | (addr & PDRMASK)) &
 4719                             PG_FRAME;
 4720                 } else {
 4721                         ptep = pmap_pte(pmap, addr);
 4722                         pte = *ptep;
 4723                         pmap_pte_release(ptep);
 4724                         pa = pte & PG_FRAME;
 4725                 }
 4726         } else {
 4727                 pte = 0;
 4728                 pa = 0;
 4729         }
 4730         PMAP_UNLOCK(pmap);
 4731 
 4732         if (pte != 0) {
 4733                 val |= MINCORE_INCORE;
 4734                 if ((pte & PG_MANAGED) == 0)
 4735                         return val;
 4736 
 4737                 m = PHYS_TO_VM_PAGE(pa);
 4738 
 4739                 /*
 4740                  * Modified by us
 4741                  */
 4742                 if ((pte & (PG_M | PG_RW)) == (PG_M | PG_RW))
 4743                         val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER;
 4744                 else {
 4745                         /*
 4746                          * Modified by someone else
 4747                          */
 4748                         vm_page_lock_queues();
 4749                         if (m->dirty || pmap_is_modified(m))
 4750                                 val |= MINCORE_MODIFIED_OTHER;
 4751                         vm_page_unlock_queues();
 4752                 }
 4753                 /*
 4754                  * Referenced by us
 4755                  */
 4756                 if (pte & PG_A)
 4757                         val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER;
 4758                 else {
 4759                         /*
 4760                          * Referenced by someone else
 4761                          */
 4762                         vm_page_lock_queues();
 4763                         if ((m->flags & PG_REFERENCED) ||
 4764                             pmap_ts_referenced(m)) {
 4765                                 val |= MINCORE_REFERENCED_OTHER;
 4766                                 vm_page_flag_set(m, PG_REFERENCED);
 4767                         }
 4768                         vm_page_unlock_queues();
 4769                 }
 4770         } 
 4771         return val;
 4772 }
 4773 
 4774 void
 4775 pmap_activate(struct thread *td)
 4776 {
 4777         pmap_t  pmap, oldpmap;
 4778         u_int32_t  cr3;
 4779 
 4780         critical_enter();
 4781         pmap = vmspace_pmap(td->td_proc->p_vmspace);
 4782         oldpmap = PCPU_GET(curpmap);
 4783 #if defined(SMP)
 4784         atomic_clear_int(&oldpmap->pm_active, PCPU_GET(cpumask));
 4785         atomic_set_int(&pmap->pm_active, PCPU_GET(cpumask));
 4786 #else
 4787         oldpmap->pm_active &= ~1;
 4788         pmap->pm_active |= 1;
 4789 #endif
 4790 #ifdef PAE
 4791         cr3 = vtophys(pmap->pm_pdpt);
 4792 #else
 4793         cr3 = vtophys(pmap->pm_pdir);
 4794 #endif
 4795         /*
 4796          * pmap_activate is for the current thread on the current cpu
 4797          */
 4798         td->td_pcb->pcb_cr3 = cr3;
 4799         load_cr3(cr3);
 4800         PCPU_SET(curpmap, pmap);
 4801         critical_exit();
 4802 }
 4803 
 4804 vm_offset_t
 4805 pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size)
 4806 {
 4807 
 4808         if ((obj == NULL) || (size < NBPDR) ||
 4809             (obj->type != OBJT_DEVICE && obj->type != OBJT_SG)) {
 4810                 return addr;
 4811         }
 4812 
 4813         addr = (addr + PDRMASK) & ~PDRMASK;
 4814         return addr;
 4815 }
 4816 
 4817 /*
 4818  *      Increase the starting virtual address of the given mapping if a
 4819  *      different alignment might result in more superpage mappings.
 4820  */
 4821 void
 4822 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
 4823     vm_offset_t *addr, vm_size_t size)
 4824 {
 4825         vm_offset_t superpage_offset;
 4826 
 4827         if (size < NBPDR)
 4828                 return;
 4829         if (object != NULL && (object->flags & OBJ_COLORED) != 0)
 4830                 offset += ptoa(object->pg_color);
 4831         superpage_offset = offset & PDRMASK;
 4832         if (size - ((NBPDR - superpage_offset) & PDRMASK) < NBPDR ||
 4833             (*addr & PDRMASK) == superpage_offset)
 4834                 return;
 4835         if ((*addr & PDRMASK) < superpage_offset)
 4836                 *addr = (*addr & ~PDRMASK) + superpage_offset;
 4837         else
 4838                 *addr = ((*addr + PDRMASK) & ~PDRMASK) + superpage_offset;
 4839 }
 4840 
 4841 
 4842 #if defined(PMAP_DEBUG)
 4843 pmap_pid_dump(int pid)
 4844 {
 4845         pmap_t pmap;
 4846         struct proc *p;
 4847         int npte = 0;
 4848         int index;
 4849 
 4850         sx_slock(&allproc_lock);
 4851         FOREACH_PROC_IN_SYSTEM(p) {
 4852                 if (p->p_pid != pid)
 4853                         continue;
 4854 
 4855                 if (p->p_vmspace) {
 4856                         int i,j;
 4857                         index = 0;
 4858                         pmap = vmspace_pmap(p->p_vmspace);
 4859                         for (i = 0; i < NPDEPTD; i++) {
 4860                                 pd_entry_t *pde;
 4861                                 pt_entry_t *pte;
 4862                                 vm_offset_t base = i << PDRSHIFT;
 4863                                 
 4864                                 pde = &pmap->pm_pdir[i];
 4865                                 if (pde && pmap_pde_v(pde)) {
 4866                                         for (j = 0; j < NPTEPG; j++) {
 4867                                                 vm_offset_t va = base + (j << PAGE_SHIFT);
 4868                                                 if (va >= (vm_offset_t) VM_MIN_KERNEL_ADDRESS) {
 4869                                                         if (index) {
 4870                                                                 index = 0;
 4871                                                                 printf("\n");
 4872                                                         }
 4873                                                         sx_sunlock(&allproc_lock);
 4874                                                         return npte;
 4875                                                 }
 4876                                                 pte = pmap_pte(pmap, va);
 4877                                                 if (pte && pmap_pte_v(pte)) {
 4878                                                         pt_entry_t pa;
 4879                                                         vm_page_t m;
 4880                                                         pa = *pte;
 4881                                                         m = PHYS_TO_VM_PAGE(pa & PG_FRAME);
 4882                                                         printf("va: 0x%x, pt: 0x%x, h: %d, w: %d, f: 0x%x",
 4883                                                                 va, pa, m->hold_count, m->wire_count, m->flags);
 4884                                                         npte++;
 4885                                                         index++;
 4886                                                         if (index >= 2) {
 4887                                                                 index = 0;
 4888                                                                 printf("\n");
 4889                                                         } else {
 4890                                                                 printf(" ");
 4891                                                         }
 4892                                                 }
 4893                                         }
 4894                                 }
 4895                         }
 4896                 }
 4897         }
 4898         sx_sunlock(&allproc_lock);
 4899         return npte;
 4900 }
 4901 #endif
 4902 
 4903 #if defined(DEBUG)
 4904 
 4905 static void     pads(pmap_t pm);
 4906 void            pmap_pvdump(vm_offset_t pa);
 4907 
 4908 /* print address space of pmap*/
 4909 static void
 4910 pads(pmap_t pm)
 4911 {
 4912         int i, j;
 4913         vm_paddr_t va;
 4914         pt_entry_t *ptep;
 4915 
 4916         if (pm == kernel_pmap)
 4917                 return;
 4918         for (i = 0; i < NPDEPTD; i++)
 4919                 if (pm->pm_pdir[i])
 4920                         for (j = 0; j < NPTEPG; j++) {
 4921                                 va = (i << PDRSHIFT) + (j << PAGE_SHIFT);
 4922                                 if (pm == kernel_pmap && va < KERNBASE)
 4923                                         continue;
 4924                                 if (pm != kernel_pmap && va > UPT_MAX_ADDRESS)
 4925                                         continue;
 4926                                 ptep = pmap_pte(pm, va);
 4927                                 if (pmap_pte_v(ptep))
 4928                                         printf("%x:%x ", va, *ptep);
 4929                         };
 4930 
 4931 }
 4932 
 4933 void
 4934 pmap_pvdump(vm_paddr_t pa)
 4935 {
 4936         pv_entry_t pv;
 4937         pmap_t pmap;
 4938         vm_page_t m;
 4939 
 4940         printf("pa %x", pa);
 4941         m = PHYS_TO_VM_PAGE(pa);
 4942         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
 4943                 pmap = PV_PMAP(pv);
 4944                 printf(" -> pmap %p, va %x", (void *)pmap, pv->pv_va);
 4945                 pads(pmap);
 4946         }
 4947         printf(" ");
 4948 }
 4949 #endif

Cache object: 50b5c38634e0621a82dc2de455979e14


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.