The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/i386/i386/pmap.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1991 Regents of the University of California.
    3  * All rights reserved.
    4  * Copyright (c) 1994 John S. Dyson
    5  * All rights reserved.
    6  * Copyright (c) 1994 David Greenman
    7  * All rights reserved.
    8  * Copyright (c) 2005-2008 Alan L. Cox <alc@cs.rice.edu>
    9  * All rights reserved.
   10  *
   11  * This code is derived from software contributed to Berkeley by
   12  * the Systems Programming Group of the University of Utah Computer
   13  * Science Department and William Jolitz of UUNET Technologies Inc.
   14  *
   15  * Redistribution and use in source and binary forms, with or without
   16  * modification, are permitted provided that the following conditions
   17  * are met:
   18  * 1. Redistributions of source code must retain the above copyright
   19  *    notice, this list of conditions and the following disclaimer.
   20  * 2. Redistributions in binary form must reproduce the above copyright
   21  *    notice, this list of conditions and the following disclaimer in the
   22  *    documentation and/or other materials provided with the distribution.
   23  * 3. All advertising materials mentioning features or use of this software
   24  *    must display the following acknowledgement:
   25  *      This product includes software developed by the University of
   26  *      California, Berkeley and its contributors.
   27  * 4. Neither the name of the University nor the names of its contributors
   28  *    may be used to endorse or promote products derived from this software
   29  *    without specific prior written permission.
   30  *
   31  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   32  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   33  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   34  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   35  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   36  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   37  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   38  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   39  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   40  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   41  * SUCH DAMAGE.
   42  *
   43  *      from:   @(#)pmap.c      7.7 (Berkeley)  5/12/91
   44  */
   45 /*-
   46  * Copyright (c) 2003 Networks Associates Technology, Inc.
   47  * All rights reserved.
   48  *
   49  * This software was developed for the FreeBSD Project by Jake Burkholder,
   50  * Safeport Network Services, and Network Associates Laboratories, the
   51  * Security Research Division of Network Associates, Inc. under
   52  * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA
   53  * CHATS research program.
   54  *
   55  * Redistribution and use in source and binary forms, with or without
   56  * modification, are permitted provided that the following conditions
   57  * are met:
   58  * 1. Redistributions of source code must retain the above copyright
   59  *    notice, this list of conditions and the following disclaimer.
   60  * 2. Redistributions in binary form must reproduce the above copyright
   61  *    notice, this list of conditions and the following disclaimer in the
   62  *    documentation and/or other materials provided with the distribution.
   63  *
   64  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   65  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   66  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   67  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   68  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   69  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   70  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   71  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   72  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   73  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   74  * SUCH DAMAGE.
   75  */
   76 
   77 #include <sys/cdefs.h>
   78 __FBSDID("$FreeBSD$");
   79 
   80 /*
   81  *      Manages physical address maps.
   82  *
   83  *      In addition to hardware address maps, this
   84  *      module is called upon to provide software-use-only
   85  *      maps which may or may not be stored in the same
   86  *      form as hardware maps.  These pseudo-maps are
   87  *      used to store intermediate results from copy
   88  *      operations to and from address spaces.
   89  *
   90  *      Since the information managed by this module is
   91  *      also stored by the logical address mapping module,
   92  *      this module may throw away valid virtual-to-physical
   93  *      mappings at almost any time.  However, invalidations
   94  *      of virtual-to-physical mappings must be done as
   95  *      requested.
   96  *
   97  *      In order to cope with hardware architectures which
   98  *      make virtual-to-physical map invalidates expensive,
   99  *      this module may delay invalidate or reduced protection
  100  *      operations until such time as they are actually
  101  *      necessary.  This module is given full information as
  102  *      to which processors are currently using which maps,
  103  *      and to when physical maps must be made correct.
  104  */
  105 
  106 #include "opt_cpu.h"
  107 #include "opt_pmap.h"
  108 #include "opt_msgbuf.h"
  109 #include "opt_smp.h"
  110 #include "opt_xbox.h"
  111 
  112 #include <sys/param.h>
  113 #include <sys/systm.h>
  114 #include <sys/kernel.h>
  115 #include <sys/ktr.h>
  116 #include <sys/lock.h>
  117 #include <sys/malloc.h>
  118 #include <sys/mman.h>
  119 #include <sys/msgbuf.h>
  120 #include <sys/mutex.h>
  121 #include <sys/proc.h>
  122 #include <sys/sx.h>
  123 #include <sys/vmmeter.h>
  124 #include <sys/sched.h>
  125 #include <sys/sysctl.h>
  126 #ifdef SMP
  127 #include <sys/smp.h>
  128 #endif
  129 
  130 #include <vm/vm.h>
  131 #include <vm/vm_param.h>
  132 #include <vm/vm_kern.h>
  133 #include <vm/vm_page.h>
  134 #include <vm/vm_map.h>
  135 #include <vm/vm_object.h>
  136 #include <vm/vm_extern.h>
  137 #include <vm/vm_pageout.h>
  138 #include <vm/vm_pager.h>
  139 #include <vm/vm_reserv.h>
  140 #include <vm/uma.h>
  141 
  142 #include <machine/cpu.h>
  143 #include <machine/cputypes.h>
  144 #include <machine/md_var.h>
  145 #include <machine/pcb.h>
  146 #include <machine/specialreg.h>
  147 #ifdef SMP
  148 #include <machine/smp.h>
  149 #endif
  150 
  151 #ifdef XBOX
  152 #include <machine/xbox.h>
  153 #endif
  154 
  155 #if !defined(CPU_DISABLE_SSE) && defined(I686_CPU)
  156 #define CPU_ENABLE_SSE
  157 #endif
  158 
  159 #ifndef PMAP_SHPGPERPROC
  160 #define PMAP_SHPGPERPROC 200
  161 #endif
  162 
  163 #if !defined(DIAGNOSTIC)
  164 #define PMAP_INLINE     __gnu89_inline
  165 #else
  166 #define PMAP_INLINE
  167 #endif
  168 
  169 #define PV_STATS
  170 #ifdef PV_STATS
  171 #define PV_STAT(x)      do { x ; } while (0)
  172 #else
  173 #define PV_STAT(x)      do { } while (0)
  174 #endif
  175 
  176 #define pa_index(pa)    ((pa) >> PDRSHIFT)
  177 #define pa_to_pvh(pa)   (&pv_table[pa_index(pa)])
  178 
  179 /*
  180  * Get PDEs and PTEs for user/kernel address space
  181  */
  182 #define pmap_pde(m, v)  (&((m)->pm_pdir[(vm_offset_t)(v) >> PDRSHIFT]))
  183 #define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDRSHIFT])
  184 
  185 #define pmap_pde_v(pte)         ((*(int *)pte & PG_V) != 0)
  186 #define pmap_pte_w(pte)         ((*(int *)pte & PG_W) != 0)
  187 #define pmap_pte_m(pte)         ((*(int *)pte & PG_M) != 0)
  188 #define pmap_pte_u(pte)         ((*(int *)pte & PG_A) != 0)
  189 #define pmap_pte_v(pte)         ((*(int *)pte & PG_V) != 0)
  190 
  191 #define pmap_pte_set_w(pte, v)  ((v) ? atomic_set_int((u_int *)(pte), PG_W) : \
  192     atomic_clear_int((u_int *)(pte), PG_W))
  193 #define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v)))
  194 
  195 struct pmap kernel_pmap_store;
  196 LIST_HEAD(pmaplist, pmap);
  197 static struct pmaplist allpmaps;
  198 static struct mtx allpmaps_lock;
  199 
  200 vm_offset_t virtual_avail;      /* VA of first avail page (after kernel bss) */
  201 vm_offset_t virtual_end;        /* VA of last avail page (end of kernel AS) */
  202 int pgeflag = 0;                /* PG_G or-in */
  203 int pseflag = 0;                /* PG_PS or-in */
  204 
  205 static int nkpt;
  206 vm_offset_t kernel_vm_end;
  207 extern u_int32_t KERNend;
  208 
  209 #ifdef PAE
  210 pt_entry_t pg_nx;
  211 static uma_zone_t pdptzone;
  212 #endif
  213 
  214 SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters");
  215 
  216 static int pg_ps_enabled;
  217 SYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLFLAG_RD, &pg_ps_enabled, 0,
  218     "Are large page mappings enabled?");
  219 
  220 /*
  221  * Data for the pv entry allocation mechanism
  222  */
  223 static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
  224 static struct md_page *pv_table;
  225 static int shpgperproc = PMAP_SHPGPERPROC;
  226 
  227 struct pv_chunk *pv_chunkbase;          /* KVA block for pv_chunks */
  228 int pv_maxchunks;                       /* How many chunks we have KVA for */
  229 vm_offset_t pv_vafree;                  /* freelist stored in the PTE */
  230 
  231 /*
  232  * All those kernel PT submaps that BSD is so fond of
  233  */
  234 struct sysmaps {
  235         struct  mtx lock;
  236         pt_entry_t *CMAP1;
  237         pt_entry_t *CMAP2;
  238         caddr_t CADDR1;
  239         caddr_t CADDR2;
  240 };
  241 static struct sysmaps sysmaps_pcpu[MAXCPU];
  242 pt_entry_t *CMAP1 = 0;
  243 static pt_entry_t *CMAP3;
  244 caddr_t CADDR1 = 0, ptvmmap = 0;
  245 static caddr_t CADDR3;
  246 struct msgbuf *msgbufp = 0;
  247 
  248 /*
  249  * Crashdump maps.
  250  */
  251 static caddr_t crashdumpmap;
  252 
  253 static pt_entry_t *PMAP1 = 0, *PMAP2;
  254 static pt_entry_t *PADDR1 = 0, *PADDR2;
  255 #ifdef SMP
  256 static int PMAP1cpu;
  257 static int PMAP1changedcpu;
  258 SYSCTL_INT(_debug, OID_AUTO, PMAP1changedcpu, CTLFLAG_RD, 
  259            &PMAP1changedcpu, 0,
  260            "Number of times pmap_pte_quick changed CPU with same PMAP1");
  261 #endif
  262 static int PMAP1changed;
  263 SYSCTL_INT(_debug, OID_AUTO, PMAP1changed, CTLFLAG_RD, 
  264            &PMAP1changed, 0,
  265            "Number of times pmap_pte_quick changed PMAP1");
  266 static int PMAP1unchanged;
  267 SYSCTL_INT(_debug, OID_AUTO, PMAP1unchanged, CTLFLAG_RD, 
  268            &PMAP1unchanged, 0,
  269            "Number of times pmap_pte_quick didn't change PMAP1");
  270 static struct mtx PMAP2mutex;
  271 
  272 static void     free_pv_entry(pmap_t pmap, pv_entry_t pv);
  273 static pv_entry_t get_pv_entry(pmap_t locked_pmap, int try);
  274 static void     pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa);
  275 static boolean_t pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa);
  276 static void     pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa);
  277 static void     pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
  278 static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
  279                     vm_offset_t va);
  280 
  281 static boolean_t pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va);
  282 static boolean_t pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m,
  283     vm_prot_t prot);
  284 static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
  285     vm_page_t m, vm_prot_t prot, vm_page_t mpte);
  286 static void pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte);
  287 static boolean_t pmap_is_modified_pvh(struct md_page *pvh);
  288 static vm_page_t pmap_lookup_pt_page(pmap_t pmap, vm_offset_t va);
  289 static void pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va);
  290 static boolean_t pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva,
  291     vm_prot_t prot);
  292 static void pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
  293     vm_page_t *free);
  294 static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva,
  295     vm_page_t *free);
  296 static void pmap_remove_pt_page(pmap_t pmap, vm_page_t mpte);
  297 static void pmap_remove_page(struct pmap *pmap, vm_offset_t va,
  298     vm_page_t *free);
  299 static void pmap_remove_entry(struct pmap *pmap, vm_page_t m,
  300                                         vm_offset_t va);
  301 static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m);
  302 static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
  303     vm_page_t m);
  304 
  305 static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags);
  306 
  307 static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags);
  308 static int _pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free);
  309 static pt_entry_t *pmap_pte_quick(pmap_t pmap, vm_offset_t va);
  310 static void pmap_pte_release(pt_entry_t *pte);
  311 static int pmap_unuse_pt(pmap_t, vm_offset_t, vm_page_t *);
  312 static vm_offset_t pmap_kmem_choose(vm_offset_t addr);
  313 #ifdef PAE
  314 static void *pmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait);
  315 #endif
  316 
  317 CTASSERT(1 << PDESHIFT == sizeof(pd_entry_t));
  318 CTASSERT(1 << PTESHIFT == sizeof(pt_entry_t));
  319 
  320 /*
  321  * If you get an error here, then you set KVA_PAGES wrong! See the
  322  * description of KVA_PAGES in sys/i386/include/pmap.h. It must be
  323  * multiple of 4 for a normal kernel, or a multiple of 8 for a PAE.
  324  */
  325 CTASSERT(KERNBASE % (1 << 24) == 0);
  326 
  327 /*
  328  * Move the kernel virtual free pointer to the next
  329  * 4MB.  This is used to help improve performance
  330  * by using a large (4MB) page for much of the kernel
  331  * (.text, .data, .bss)
  332  */
  333 static vm_offset_t
  334 pmap_kmem_choose(vm_offset_t addr)
  335 {
  336         vm_offset_t newaddr = addr;
  337 
  338 #ifndef DISABLE_PSE
  339         if (cpu_feature & CPUID_PSE)
  340                 newaddr = (addr + PDRMASK) & ~PDRMASK;
  341 #endif
  342         return newaddr;
  343 }
  344 
  345 /*
  346  *      Bootstrap the system enough to run with virtual memory.
  347  *
  348  *      On the i386 this is called after mapping has already been enabled
  349  *      and just syncs the pmap module with what has already been done.
  350  *      [We can't call it easily with mapping off since the kernel is not
  351  *      mapped with PA == VA, hence we would have to relocate every address
  352  *      from the linked base (virtual) address "KERNBASE" to the actual
  353  *      (physical) address starting relative to 0]
  354  */
  355 void
  356 pmap_bootstrap(vm_paddr_t firstaddr)
  357 {
  358         vm_offset_t va;
  359         pt_entry_t *pte, *unused;
  360         struct sysmaps *sysmaps;
  361         int i;
  362 
  363         /*
  364          * XXX The calculation of virtual_avail is wrong. It's NKPT*PAGE_SIZE too
  365          * large. It should instead be correctly calculated in locore.s and
  366          * not based on 'first' (which is a physical address, not a virtual
  367          * address, for the start of unused physical memory). The kernel
  368          * page tables are NOT double mapped and thus should not be included
  369          * in this calculation.
  370          */
  371         virtual_avail = (vm_offset_t) KERNBASE + firstaddr;
  372         virtual_avail = pmap_kmem_choose(virtual_avail);
  373 
  374         virtual_end = VM_MAX_KERNEL_ADDRESS;
  375 
  376         /*
  377          * Initialize the kernel pmap (which is statically allocated).
  378          */
  379         PMAP_LOCK_INIT(kernel_pmap);
  380         kernel_pmap->pm_pdir = (pd_entry_t *) (KERNBASE + (u_int)IdlePTD);
  381 #ifdef PAE
  382         kernel_pmap->pm_pdpt = (pdpt_entry_t *) (KERNBASE + (u_int)IdlePDPT);
  383 #endif
  384         kernel_pmap->pm_root = NULL;
  385         kernel_pmap->pm_active = -1;    /* don't allow deactivation */
  386         TAILQ_INIT(&kernel_pmap->pm_pvchunk);
  387         LIST_INIT(&allpmaps);
  388         mtx_init(&allpmaps_lock, "allpmaps", NULL, MTX_SPIN);
  389         mtx_lock_spin(&allpmaps_lock);
  390         LIST_INSERT_HEAD(&allpmaps, kernel_pmap, pm_list);
  391         mtx_unlock_spin(&allpmaps_lock);
  392         nkpt = NKPT;
  393 
  394         /*
  395          * Reserve some special page table entries/VA space for temporary
  396          * mapping of pages.
  397          */
  398 #define SYSMAP(c, p, v, n)      \
  399         v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n);
  400 
  401         va = virtual_avail;
  402         pte = vtopte(va);
  403 
  404         /*
  405          * CMAP1/CMAP2 are used for zeroing and copying pages.
  406          * CMAP3 is used for the idle process page zeroing.
  407          */
  408         for (i = 0; i < MAXCPU; i++) {
  409                 sysmaps = &sysmaps_pcpu[i];
  410                 mtx_init(&sysmaps->lock, "SYSMAPS", NULL, MTX_DEF);
  411                 SYSMAP(caddr_t, sysmaps->CMAP1, sysmaps->CADDR1, 1)
  412                 SYSMAP(caddr_t, sysmaps->CMAP2, sysmaps->CADDR2, 1)
  413         }
  414         SYSMAP(caddr_t, CMAP1, CADDR1, 1)
  415         SYSMAP(caddr_t, CMAP3, CADDR3, 1)
  416         *CMAP3 = 0;
  417 
  418         /*
  419          * Crashdump maps.
  420          */
  421         SYSMAP(caddr_t, unused, crashdumpmap, MAXDUMPPGS)
  422 
  423         /*
  424          * ptvmmap is used for reading arbitrary physical pages via /dev/mem.
  425          */
  426         SYSMAP(caddr_t, unused, ptvmmap, 1)
  427 
  428         /*
  429          * msgbufp is used to map the system message buffer.
  430          */
  431         SYSMAP(struct msgbuf *, unused, msgbufp, atop(round_page(MSGBUF_SIZE)))
  432 
  433         /*
  434          * ptemap is used for pmap_pte_quick
  435          */
  436         SYSMAP(pt_entry_t *, PMAP1, PADDR1, 1);
  437         SYSMAP(pt_entry_t *, PMAP2, PADDR2, 1);
  438 
  439         mtx_init(&PMAP2mutex, "PMAP2", NULL, MTX_DEF);
  440 
  441         virtual_avail = va;
  442 
  443         *CMAP1 = 0;
  444 
  445         /*
  446          * Leave in place an identity mapping (virt == phys) for the low 1 MB
  447          * physical memory region that is used by the ACPI wakeup code.  This
  448          * mapping must not have PG_G set. 
  449          */
  450 #ifdef XBOX
  451         /* FIXME: This is gross, but needed for the XBOX. Since we are in such
  452          * an early stadium, we cannot yet neatly map video memory ... :-(
  453          * Better fixes are very welcome! */
  454         if (!arch_i386_is_xbox)
  455 #endif
  456         for (i = 1; i < NKPT; i++)
  457                 PTD[i] = 0;
  458 
  459         /* Initialize the PAT MSR if present. */
  460         pmap_init_pat();
  461 
  462         /* Turn on PG_G on kernel page(s) */
  463         pmap_set_pg();
  464 }
  465 
  466 /*
  467  * Setup the PAT MSR.
  468  */
  469 void
  470 pmap_init_pat(void)
  471 {
  472         uint64_t pat_msr;
  473 
  474         /* Bail if this CPU doesn't implement PAT. */
  475         if (!(cpu_feature & CPUID_PAT))
  476                 return;
  477 
  478 #ifdef PAT_WORKS
  479         /*
  480          * Leave the indices 0-3 at the default of WB, WT, UC, and UC-.
  481          * Program 4 and 5 as WP and WC.
  482          * Leave 6 and 7 as UC and UC-.
  483          */
  484         pat_msr = rdmsr(MSR_PAT);
  485         pat_msr &= ~(PAT_MASK(4) | PAT_MASK(5));
  486         pat_msr |= PAT_VALUE(4, PAT_WRITE_PROTECTED) |
  487             PAT_VALUE(5, PAT_WRITE_COMBINING);
  488 #else
  489         /*
  490          * Due to some Intel errata, we can only safely use the lower 4
  491          * PAT entries.  Thus, just replace PAT Index 2 with WC instead
  492          * of UC-.
  493          *
  494          *   Intel Pentium III Processor Specification Update
  495          * Errata E.27 (Upper Four PAT Entries Not Usable With Mode B
  496          * or Mode C Paging)
  497          *
  498          *   Intel Pentium IV  Processor Specification Update
  499          * Errata N46 (PAT Index MSB May Be Calculated Incorrectly)
  500          */
  501         pat_msr = rdmsr(MSR_PAT);
  502         pat_msr &= ~PAT_MASK(2);
  503         pat_msr |= PAT_VALUE(2, PAT_WRITE_COMBINING);
  504 #endif
  505         wrmsr(MSR_PAT, pat_msr);
  506 }
  507 
  508 /*
  509  * Set PG_G on kernel pages.  Only the BSP calls this when SMP is turned on.
  510  */
  511 void
  512 pmap_set_pg(void)
  513 {
  514         pd_entry_t pdir;
  515         pt_entry_t *pte;
  516         vm_offset_t va, endva;
  517         int i; 
  518 
  519         if (pgeflag == 0)
  520                 return;
  521 
  522         i = KERNLOAD/NBPDR;
  523         endva = KERNBASE + KERNend;
  524 
  525         if (pseflag) {
  526                 va = KERNBASE + KERNLOAD;
  527                 while (va  < endva) {
  528                         pdir = kernel_pmap->pm_pdir[KPTDI+i];
  529                         pdir |= pgeflag;
  530                         kernel_pmap->pm_pdir[KPTDI+i] = PTD[KPTDI+i] = pdir;
  531                         invltlb();      /* Play it safe, invltlb() every time */
  532                         i++;
  533                         va += NBPDR;
  534                 }
  535         } else {
  536                 va = (vm_offset_t)btext;
  537                 while (va < endva) {
  538                         pte = vtopte(va);
  539                         if (*pte)
  540                                 *pte |= pgeflag;
  541                         invltlb();      /* Play it safe, invltlb() every time */
  542                         va += PAGE_SIZE;
  543                 }
  544         }
  545 }
  546 
  547 /*
  548  * Initialize a vm_page's machine-dependent fields.
  549  */
  550 void
  551 pmap_page_init(vm_page_t m)
  552 {
  553 
  554         TAILQ_INIT(&m->md.pv_list);
  555 }
  556 
  557 #ifdef PAE
  558 
  559 static MALLOC_DEFINE(M_PMAPPDPT, "pmap", "pmap pdpt");
  560 
  561 static void *
  562 pmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
  563 {
  564 
  565         /* Inform UMA that this allocator uses kernel_map/object. */
  566         *flags = UMA_SLAB_KERNEL;
  567         return (contigmalloc(PAGE_SIZE, M_PMAPPDPT, 0, 0x0ULL, 0xffffffffULL,
  568             1, 0));
  569 }
  570 #endif
  571 
  572 /*
  573  * ABuse the pte nodes for unmapped kva to thread a kva freelist through.
  574  * Requirements:
  575  *  - Must deal with pages in order to ensure that none of the PG_* bits
  576  *    are ever set, PG_V in particular.
  577  *  - Assumes we can write to ptes without pte_store() atomic ops, even
  578  *    on PAE systems.  This should be ok.
  579  *  - Assumes nothing will ever test these addresses for 0 to indicate
  580  *    no mapping instead of correctly checking PG_V.
  581  *  - Assumes a vm_offset_t will fit in a pte (true for i386).
  582  * Because PG_V is never set, there can be no mappings to invalidate.
  583  */
  584 static vm_offset_t
  585 pmap_ptelist_alloc(vm_offset_t *head)
  586 {
  587         pt_entry_t *pte;
  588         vm_offset_t va;
  589 
  590         va = *head;
  591         if (va == 0)
  592                 return (va);    /* Out of memory */
  593         pte = vtopte(va);
  594         *head = *pte;
  595         if (*head & PG_V)
  596                 panic("pmap_ptelist_alloc: va with PG_V set!");
  597         *pte = 0;
  598         return (va);
  599 }
  600 
  601 static void
  602 pmap_ptelist_free(vm_offset_t *head, vm_offset_t va)
  603 {
  604         pt_entry_t *pte;
  605 
  606         if (va & PG_V)
  607                 panic("pmap_ptelist_free: freeing va with PG_V set!");
  608         pte = vtopte(va);
  609         *pte = *head;           /* virtual! PG_V is 0 though */
  610         *head = va;
  611 }
  612 
  613 static void
  614 pmap_ptelist_init(vm_offset_t *head, void *base, int npages)
  615 {
  616         int i;
  617         vm_offset_t va;
  618 
  619         *head = 0;
  620         for (i = npages - 1; i >= 0; i--) {
  621                 va = (vm_offset_t)base + i * PAGE_SIZE;
  622                 pmap_ptelist_free(head, va);
  623         }
  624 }
  625 
  626 
  627 /*
  628  *      Initialize the pmap module.
  629  *      Called by vm_init, to initialize any structures that the pmap
  630  *      system needs to map virtual memory.
  631  */
  632 void
  633 pmap_init(void)
  634 {
  635         vm_page_t mpte;
  636         vm_size_t s;
  637         int i, pv_npg;
  638 
  639         /*
  640          * Initialize the vm page array entries for the kernel pmap's
  641          * page table pages.
  642          */ 
  643         for (i = 0; i < nkpt; i++) {
  644                 mpte = PHYS_TO_VM_PAGE(PTD[i + KPTDI] & PG_FRAME);
  645                 KASSERT(mpte >= vm_page_array &&
  646                     mpte < &vm_page_array[vm_page_array_size],
  647                     ("pmap_init: page table page is out of range"));
  648                 mpte->pindex = i + KPTDI;
  649                 mpte->phys_addr = PTD[i + KPTDI] & PG_FRAME;
  650         }
  651 
  652         /*
  653          * Initialize the address space (zone) for the pv entries.  Set a
  654          * high water mark so that the system can recover from excessive
  655          * numbers of pv entries.
  656          */
  657         TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
  658         pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
  659         TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
  660         pv_entry_max = roundup(pv_entry_max, _NPCPV);
  661         pv_entry_high_water = 9 * (pv_entry_max / 10);
  662 
  663         /*
  664          * Are large page mappings enabled?
  665          */
  666         TUNABLE_INT_FETCH("vm.pmap.pg_ps_enabled", &pg_ps_enabled);
  667 
  668         /*
  669          * Calculate the size of the pv head table for superpages.
  670          */
  671         for (i = 0; phys_avail[i + 1]; i += 2);
  672         pv_npg = round_4mpage(phys_avail[(i - 2) + 1]) / NBPDR;
  673 
  674         /*
  675          * Allocate memory for the pv head table for superpages.
  676          */
  677         s = (vm_size_t)(pv_npg * sizeof(struct md_page));
  678         s = round_page(s);
  679         pv_table = (struct md_page *)kmem_alloc(kernel_map, s);
  680         for (i = 0; i < pv_npg; i++)
  681                 TAILQ_INIT(&pv_table[i].pv_list);
  682 
  683         pv_maxchunks = MAX(pv_entry_max / _NPCPV, maxproc);
  684         pv_chunkbase = (struct pv_chunk *)kmem_alloc_nofault(kernel_map,
  685             PAGE_SIZE * pv_maxchunks);
  686         if (pv_chunkbase == NULL)
  687                 panic("pmap_init: not enough kvm for pv chunks");
  688         pmap_ptelist_init(&pv_vafree, pv_chunkbase, pv_maxchunks);
  689 #ifdef PAE
  690         pdptzone = uma_zcreate("PDPT", NPGPTD * sizeof(pdpt_entry_t), NULL,
  691             NULL, NULL, NULL, (NPGPTD * sizeof(pdpt_entry_t)) - 1,
  692             UMA_ZONE_VM | UMA_ZONE_NOFREE);
  693         uma_zone_set_allocf(pdptzone, pmap_pdpt_allocf);
  694 #endif
  695 }
  696 
  697 
  698 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_max, CTLFLAG_RD, &pv_entry_max, 0,
  699         "Max number of PV entries");
  700 SYSCTL_INT(_vm_pmap, OID_AUTO, shpgperproc, CTLFLAG_RD, &shpgperproc, 0,
  701         "Page share factor per proc");
  702 
  703 SYSCTL_NODE(_vm_pmap, OID_AUTO, pde, CTLFLAG_RD, 0,
  704     "2/4MB page mapping counters");
  705 
  706 static u_long pmap_pde_demotions;
  707 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, demotions, CTLFLAG_RD,
  708     &pmap_pde_demotions, 0, "2/4MB page demotions");
  709 
  710 static u_long pmap_pde_mappings;
  711 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, mappings, CTLFLAG_RD,
  712     &pmap_pde_mappings, 0, "2/4MB page mappings");
  713 
  714 static u_long pmap_pde_p_failures;
  715 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, p_failures, CTLFLAG_RD,
  716     &pmap_pde_p_failures, 0, "2/4MB page promotion failures");
  717 
  718 static u_long pmap_pde_promotions;
  719 SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, promotions, CTLFLAG_RD,
  720     &pmap_pde_promotions, 0, "2/4MB page promotions");
  721 
  722 /***************************************************
  723  * Low level helper routines.....
  724  ***************************************************/
  725 
  726 /*
  727  * Determine the appropriate bits to set in a PTE or PDE for a specified
  728  * caching mode.
  729  */
  730 static int
  731 pmap_cache_bits(int mode, boolean_t is_pde)
  732 {
  733         int pat_flag, pat_index, cache_bits;
  734 
  735         /* The PAT bit is different for PTE's and PDE's. */
  736         pat_flag = is_pde ? PG_PDE_PAT : PG_PTE_PAT;
  737 
  738         /* If we don't support PAT, map extended modes to older ones. */
  739         if (!(cpu_feature & CPUID_PAT)) {
  740                 switch (mode) {
  741                 case PAT_UNCACHEABLE:
  742                 case PAT_WRITE_THROUGH:
  743                 case PAT_WRITE_BACK:
  744                         break;
  745                 case PAT_UNCACHED:
  746                 case PAT_WRITE_COMBINING:
  747                 case PAT_WRITE_PROTECTED:
  748                         mode = PAT_UNCACHEABLE;
  749                         break;
  750                 }
  751         }
  752         
  753         /* Map the caching mode to a PAT index. */
  754         switch (mode) {
  755 #ifdef PAT_WORKS
  756         case PAT_UNCACHEABLE:
  757                 pat_index = 3;
  758                 break;
  759         case PAT_WRITE_THROUGH:
  760                 pat_index = 1;
  761                 break;
  762         case PAT_WRITE_BACK:
  763                 pat_index = 0;
  764                 break;
  765         case PAT_UNCACHED:
  766                 pat_index = 2;
  767                 break;
  768         case PAT_WRITE_COMBINING:
  769                 pat_index = 5;
  770                 break;
  771         case PAT_WRITE_PROTECTED:
  772                 pat_index = 4;
  773                 break;
  774 #else
  775         case PAT_UNCACHED:
  776         case PAT_UNCACHEABLE:
  777         case PAT_WRITE_PROTECTED:
  778                 pat_index = 3;
  779                 break;
  780         case PAT_WRITE_THROUGH:
  781                 pat_index = 1;
  782                 break;
  783         case PAT_WRITE_BACK:
  784                 pat_index = 0;
  785                 break;
  786         case PAT_WRITE_COMBINING:
  787                 pat_index = 2;
  788                 break;
  789 #endif
  790         default:
  791                 panic("Unknown caching mode %d\n", mode);
  792         }       
  793 
  794         /* Map the 3-bit index value into the PAT, PCD, and PWT bits. */
  795         cache_bits = 0;
  796         if (pat_index & 0x4)
  797                 cache_bits |= pat_flag;
  798         if (pat_index & 0x2)
  799                 cache_bits |= PG_NC_PCD;
  800         if (pat_index & 0x1)
  801                 cache_bits |= PG_NC_PWT;
  802         return (cache_bits);
  803 }
  804 #ifdef SMP
  805 /*
  806  * For SMP, these functions have to use the IPI mechanism for coherence.
  807  *
  808  * N.B.: Before calling any of the following TLB invalidation functions,
  809  * the calling processor must ensure that all stores updating a non-
  810  * kernel page table are globally performed.  Otherwise, another
  811  * processor could cache an old, pre-update entry without being
  812  * invalidated.  This can happen one of two ways: (1) The pmap becomes
  813  * active on another processor after its pm_active field is checked by
  814  * one of the following functions but before a store updating the page
  815  * table is globally performed. (2) The pmap becomes active on another
  816  * processor before its pm_active field is checked but due to
  817  * speculative loads one of the following functions stills reads the
  818  * pmap as inactive on the other processor.
  819  * 
  820  * The kernel page table is exempt because its pm_active field is
  821  * immutable.  The kernel page table is always active on every
  822  * processor.
  823  */
  824 void
  825 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
  826 {
  827         u_int cpumask;
  828         u_int other_cpus;
  829 
  830         sched_pin();
  831         if (pmap == kernel_pmap || pmap->pm_active == all_cpus) {
  832                 invlpg(va);
  833                 smp_invlpg(va);
  834         } else {
  835                 cpumask = PCPU_GET(cpumask);
  836                 other_cpus = PCPU_GET(other_cpus);
  837                 if (pmap->pm_active & cpumask)
  838                         invlpg(va);
  839                 if (pmap->pm_active & other_cpus)
  840                         smp_masked_invlpg(pmap->pm_active & other_cpus, va);
  841         }
  842         sched_unpin();
  843 }
  844 
  845 void
  846 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
  847 {
  848         u_int cpumask;
  849         u_int other_cpus;
  850         vm_offset_t addr;
  851 
  852         sched_pin();
  853         if (pmap == kernel_pmap || pmap->pm_active == all_cpus) {
  854                 for (addr = sva; addr < eva; addr += PAGE_SIZE)
  855                         invlpg(addr);
  856                 smp_invlpg_range(sva, eva);
  857         } else {
  858                 cpumask = PCPU_GET(cpumask);
  859                 other_cpus = PCPU_GET(other_cpus);
  860                 if (pmap->pm_active & cpumask)
  861                         for (addr = sva; addr < eva; addr += PAGE_SIZE)
  862                                 invlpg(addr);
  863                 if (pmap->pm_active & other_cpus)
  864                         smp_masked_invlpg_range(pmap->pm_active & other_cpus,
  865                             sva, eva);
  866         }
  867         sched_unpin();
  868 }
  869 
  870 void
  871 pmap_invalidate_all(pmap_t pmap)
  872 {
  873         u_int cpumask;
  874         u_int other_cpus;
  875 
  876         sched_pin();
  877         if (pmap == kernel_pmap || pmap->pm_active == all_cpus) {
  878                 invltlb();
  879                 smp_invltlb();
  880         } else {
  881                 cpumask = PCPU_GET(cpumask);
  882                 other_cpus = PCPU_GET(other_cpus);
  883                 if (pmap->pm_active & cpumask)
  884                         invltlb();
  885                 if (pmap->pm_active & other_cpus)
  886                         smp_masked_invltlb(pmap->pm_active & other_cpus);
  887         }
  888         sched_unpin();
  889 }
  890 
  891 void
  892 pmap_invalidate_cache(void)
  893 {
  894 
  895         sched_pin();
  896         wbinvd();
  897         smp_cache_flush();
  898         sched_unpin();
  899 }
  900 #else /* !SMP */
  901 /*
  902  * Normal, non-SMP, 486+ invalidation functions.
  903  * We inline these within pmap.c for speed.
  904  */
  905 PMAP_INLINE void
  906 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
  907 {
  908 
  909         if (pmap == kernel_pmap || pmap->pm_active)
  910                 invlpg(va);
  911 }
  912 
  913 PMAP_INLINE void
  914 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
  915 {
  916         vm_offset_t addr;
  917 
  918         if (pmap == kernel_pmap || pmap->pm_active)
  919                 for (addr = sva; addr < eva; addr += PAGE_SIZE)
  920                         invlpg(addr);
  921 }
  922 
  923 PMAP_INLINE void
  924 pmap_invalidate_all(pmap_t pmap)
  925 {
  926 
  927         if (pmap == kernel_pmap || pmap->pm_active)
  928                 invltlb();
  929 }
  930 
  931 PMAP_INLINE void
  932 pmap_invalidate_cache(void)
  933 {
  934 
  935         wbinvd();
  936 }
  937 #endif /* !SMP */
  938 
  939 /*
  940  * Are we current address space or kernel?  N.B. We return FALSE when
  941  * a pmap's page table is in use because a kernel thread is borrowing
  942  * it.  The borrowed page table can change spontaneously, making any
  943  * dependence on its continued use subject to a race condition.
  944  */
  945 static __inline int
  946 pmap_is_current(pmap_t pmap)
  947 {
  948 
  949         return (pmap == kernel_pmap ||
  950                 (pmap == vmspace_pmap(curthread->td_proc->p_vmspace) &&
  951             (pmap->pm_pdir[PTDPTDI] & PG_FRAME) == (PTDpde[0] & PG_FRAME)));
  952 }
  953 
  954 /*
  955  * If the given pmap is not the current or kernel pmap, the returned pte must
  956  * be released by passing it to pmap_pte_release().
  957  */
  958 pt_entry_t *
  959 pmap_pte(pmap_t pmap, vm_offset_t va)
  960 {
  961         pd_entry_t newpf;
  962         pd_entry_t *pde;
  963 
  964         pde = pmap_pde(pmap, va);
  965         if (*pde & PG_PS)
  966                 return (pde);
  967         if (*pde != 0) {
  968                 /* are we current address space or kernel? */
  969                 if (pmap_is_current(pmap))
  970                         return (vtopte(va));
  971                 mtx_lock(&PMAP2mutex);
  972                 newpf = *pde & PG_FRAME;
  973                 if ((*PMAP2 & PG_FRAME) != newpf) {
  974                         *PMAP2 = newpf | PG_RW | PG_V | PG_A | PG_M;
  975                         pmap_invalidate_page(kernel_pmap, (vm_offset_t)PADDR2);
  976                 }
  977                 return (PADDR2 + (i386_btop(va) & (NPTEPG - 1)));
  978         }
  979         return (0);
  980 }
  981 
  982 /*
  983  * Releases a pte that was obtained from pmap_pte().  Be prepared for the pte
  984  * being NULL.
  985  */
  986 static __inline void
  987 pmap_pte_release(pt_entry_t *pte)
  988 {
  989 
  990         if ((pt_entry_t *)((vm_offset_t)pte & ~PAGE_MASK) == PADDR2)
  991                 mtx_unlock(&PMAP2mutex);
  992 }
  993 
  994 static __inline void
  995 invlcaddr(void *caddr)
  996 {
  997 
  998         invlpg((u_int)caddr);
  999 }
 1000 
 1001 /*
 1002  * Super fast pmap_pte routine best used when scanning
 1003  * the pv lists.  This eliminates many coarse-grained
 1004  * invltlb calls.  Note that many of the pv list
 1005  * scans are across different pmaps.  It is very wasteful
 1006  * to do an entire invltlb for checking a single mapping.
 1007  *
 1008  * If the given pmap is not the current pmap, vm_page_queue_mtx
 1009  * must be held and curthread pinned to a CPU.
 1010  */
 1011 static pt_entry_t *
 1012 pmap_pte_quick(pmap_t pmap, vm_offset_t va)
 1013 {
 1014         pd_entry_t newpf;
 1015         pd_entry_t *pde;
 1016 
 1017         pde = pmap_pde(pmap, va);
 1018         if (*pde & PG_PS)
 1019                 return (pde);
 1020         if (*pde != 0) {
 1021                 /* are we current address space or kernel? */
 1022                 if (pmap_is_current(pmap))
 1023                         return (vtopte(va));
 1024                 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1025                 KASSERT(curthread->td_pinned > 0, ("curthread not pinned"));
 1026                 newpf = *pde & PG_FRAME;
 1027                 if ((*PMAP1 & PG_FRAME) != newpf) {
 1028                         *PMAP1 = newpf | PG_RW | PG_V | PG_A | PG_M;
 1029 #ifdef SMP
 1030                         PMAP1cpu = PCPU_GET(cpuid);
 1031 #endif
 1032                         invlcaddr(PADDR1);
 1033                         PMAP1changed++;
 1034                 } else
 1035 #ifdef SMP
 1036                 if (PMAP1cpu != PCPU_GET(cpuid)) {
 1037                         PMAP1cpu = PCPU_GET(cpuid);
 1038                         invlcaddr(PADDR1);
 1039                         PMAP1changedcpu++;
 1040                 } else
 1041 #endif
 1042                         PMAP1unchanged++;
 1043                 return (PADDR1 + (i386_btop(va) & (NPTEPG - 1)));
 1044         }
 1045         return (0);
 1046 }
 1047 
 1048 /*
 1049  *      Routine:        pmap_extract
 1050  *      Function:
 1051  *              Extract the physical page address associated
 1052  *              with the given map/virtual_address pair.
 1053  */
 1054 vm_paddr_t 
 1055 pmap_extract(pmap_t pmap, vm_offset_t va)
 1056 {
 1057         vm_paddr_t rtval;
 1058         pt_entry_t *pte;
 1059         pd_entry_t pde;
 1060 
 1061         rtval = 0;
 1062         PMAP_LOCK(pmap);
 1063         pde = pmap->pm_pdir[va >> PDRSHIFT];
 1064         if (pde != 0) {
 1065                 if ((pde & PG_PS) != 0)
 1066                         rtval = (pde & PG_PS_FRAME) | (va & PDRMASK);
 1067                 else {
 1068                         pte = pmap_pte(pmap, va);
 1069                         rtval = (*pte & PG_FRAME) | (va & PAGE_MASK);
 1070                         pmap_pte_release(pte);
 1071                 }
 1072         }
 1073         PMAP_UNLOCK(pmap);
 1074         return (rtval);
 1075 }
 1076 
 1077 /*
 1078  *      Routine:        pmap_extract_and_hold
 1079  *      Function:
 1080  *              Atomically extract and hold the physical page
 1081  *              with the given pmap and virtual address pair
 1082  *              if that mapping permits the given protection.
 1083  */
 1084 vm_page_t
 1085 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
 1086 {
 1087         pd_entry_t pde;
 1088         pt_entry_t pte;
 1089         vm_page_t m;
 1090 
 1091         m = NULL;
 1092         vm_page_lock_queues();
 1093         PMAP_LOCK(pmap);
 1094         pde = *pmap_pde(pmap, va);
 1095         if (pde != 0) {
 1096                 if (pde & PG_PS) {
 1097                         if ((pde & PG_RW) || (prot & VM_PROT_WRITE) == 0) {
 1098                                 m = PHYS_TO_VM_PAGE((pde & PG_PS_FRAME) |
 1099                                     (va & PDRMASK));
 1100                                 vm_page_hold(m);
 1101                         }
 1102                 } else {
 1103                         sched_pin();
 1104                         pte = *pmap_pte_quick(pmap, va);
 1105                         if (pte != 0 &&
 1106                             ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0)) {
 1107                                 m = PHYS_TO_VM_PAGE(pte & PG_FRAME);
 1108                                 vm_page_hold(m);
 1109                         }
 1110                         sched_unpin();
 1111                 }
 1112         }
 1113         vm_page_unlock_queues();
 1114         PMAP_UNLOCK(pmap);
 1115         return (m);
 1116 }
 1117 
 1118 /***************************************************
 1119  * Low level mapping routines.....
 1120  ***************************************************/
 1121 
 1122 /*
 1123  * Add a wired page to the kva.
 1124  * Note: not SMP coherent.
 1125  */
 1126 PMAP_INLINE void 
 1127 pmap_kenter(vm_offset_t va, vm_paddr_t pa)
 1128 {
 1129         pt_entry_t *pte;
 1130 
 1131         pte = vtopte(va);
 1132         pte_store(pte, pa | PG_RW | PG_V | pgeflag);
 1133 }
 1134 
 1135 PMAP_INLINE void 
 1136 pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode)
 1137 {
 1138         pt_entry_t *pte;
 1139 
 1140         pte = vtopte(va);
 1141         pte_store(pte, pa | PG_RW | PG_V | pgeflag | pmap_cache_bits(mode, 0));
 1142 }
 1143 
 1144 /*
 1145  * Remove a page from the kernel pagetables.
 1146  * Note: not SMP coherent.
 1147  */
 1148 PMAP_INLINE void
 1149 pmap_kremove(vm_offset_t va)
 1150 {
 1151         pt_entry_t *pte;
 1152 
 1153         pte = vtopte(va);
 1154         pte_clear(pte);
 1155 }
 1156 
 1157 /*
 1158  *      Used to map a range of physical addresses into kernel
 1159  *      virtual address space.
 1160  *
 1161  *      The value passed in '*virt' is a suggested virtual address for
 1162  *      the mapping. Architectures which can support a direct-mapped
 1163  *      physical to virtual region can return the appropriate address
 1164  *      within that region, leaving '*virt' unchanged. Other
 1165  *      architectures should map the pages starting at '*virt' and
 1166  *      update '*virt' with the first usable address after the mapped
 1167  *      region.
 1168  */
 1169 vm_offset_t
 1170 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
 1171 {
 1172         vm_offset_t va, sva;
 1173 
 1174         va = sva = *virt;
 1175         while (start < end) {
 1176                 pmap_kenter(va, start);
 1177                 va += PAGE_SIZE;
 1178                 start += PAGE_SIZE;
 1179         }
 1180         pmap_invalidate_range(kernel_pmap, sva, va);
 1181         *virt = va;
 1182         return (sva);
 1183 }
 1184 
 1185 
 1186 /*
 1187  * Add a list of wired pages to the kva
 1188  * this routine is only used for temporary
 1189  * kernel mappings that do not need to have
 1190  * page modification or references recorded.
 1191  * Note that old mappings are simply written
 1192  * over.  The page *must* be wired.
 1193  * Note: SMP coherent.  Uses a ranged shootdown IPI.
 1194  */
 1195 void
 1196 pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
 1197 {
 1198         pt_entry_t *endpte, oldpte, *pte;
 1199 
 1200         oldpte = 0;
 1201         pte = vtopte(sva);
 1202         endpte = pte + count;
 1203         while (pte < endpte) {
 1204                 oldpte |= *pte;
 1205                 pte_store(pte, VM_PAGE_TO_PHYS(*ma) | pgeflag | PG_RW | PG_V);
 1206                 pte++;
 1207                 ma++;
 1208         }
 1209         if ((oldpte & PG_V) != 0)
 1210                 pmap_invalidate_range(kernel_pmap, sva, sva + count *
 1211                     PAGE_SIZE);
 1212 }
 1213 
 1214 /*
 1215  * This routine tears out page mappings from the
 1216  * kernel -- it is meant only for temporary mappings.
 1217  * Note: SMP coherent.  Uses a ranged shootdown IPI.
 1218  */
 1219 void
 1220 pmap_qremove(vm_offset_t sva, int count)
 1221 {
 1222         vm_offset_t va;
 1223 
 1224         va = sva;
 1225         while (count-- > 0) {
 1226                 pmap_kremove(va);
 1227                 va += PAGE_SIZE;
 1228         }
 1229         pmap_invalidate_range(kernel_pmap, sva, va);
 1230 }
 1231 
 1232 /***************************************************
 1233  * Page table page management routines.....
 1234  ***************************************************/
 1235 static __inline void
 1236 pmap_free_zero_pages(vm_page_t free)
 1237 {
 1238         vm_page_t m;
 1239 
 1240         while (free != NULL) {
 1241                 m = free;
 1242                 free = m->right;
 1243                 /* Preserve the page's PG_ZERO setting. */
 1244                 vm_page_free_toq(m);
 1245         }
 1246 }
 1247 
 1248 /*
 1249  * Schedule the specified unused page table page to be freed.  Specifically,
 1250  * add the page to the specified list of pages that will be released to the
 1251  * physical memory manager after the TLB has been updated.
 1252  */
 1253 static __inline void
 1254 pmap_add_delayed_free_list(vm_page_t m, vm_page_t *free, boolean_t set_PG_ZERO)
 1255 {
 1256 
 1257         if (set_PG_ZERO)
 1258                 m->flags |= PG_ZERO;
 1259         else
 1260                 m->flags &= ~PG_ZERO;
 1261         m->right = *free;
 1262         *free = m;
 1263 }
 1264 
 1265 /*
 1266  * Inserts the specified page table page into the specified pmap's collection
 1267  * of idle page table pages.  Each of a pmap's page table pages is responsible
 1268  * for mapping a distinct range of virtual addresses.  The pmap's collection is
 1269  * ordered by this virtual address range.
 1270  */
 1271 static void
 1272 pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte)
 1273 {
 1274         vm_page_t root;
 1275 
 1276         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 1277         root = pmap->pm_root;
 1278         if (root == NULL) {
 1279                 mpte->left = NULL;
 1280                 mpte->right = NULL;
 1281         } else {
 1282                 root = vm_page_splay(mpte->pindex, root);
 1283                 if (mpte->pindex < root->pindex) {
 1284                         mpte->left = root->left;
 1285                         mpte->right = root;
 1286                         root->left = NULL;
 1287                 } else if (mpte->pindex == root->pindex)
 1288                         panic("pmap_insert_pt_page: pindex already inserted");
 1289                 else {
 1290                         mpte->right = root->right;
 1291                         mpte->left = root;
 1292                         root->right = NULL;
 1293                 }
 1294         }
 1295         pmap->pm_root = mpte;
 1296 }
 1297 
 1298 /*
 1299  * Looks for a page table page mapping the specified virtual address in the
 1300  * specified pmap's collection of idle page table pages.  Returns NULL if there
 1301  * is no page table page corresponding to the specified virtual address.
 1302  */
 1303 static vm_page_t
 1304 pmap_lookup_pt_page(pmap_t pmap, vm_offset_t va)
 1305 {
 1306         vm_page_t mpte;
 1307         vm_pindex_t pindex = va >> PDRSHIFT;
 1308 
 1309         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 1310         if ((mpte = pmap->pm_root) != NULL && mpte->pindex != pindex) {
 1311                 mpte = vm_page_splay(pindex, mpte);
 1312                 if ((pmap->pm_root = mpte)->pindex != pindex)
 1313                         mpte = NULL;
 1314         }
 1315         return (mpte);
 1316 }
 1317 
 1318 /*
 1319  * Removes the specified page table page from the specified pmap's collection
 1320  * of idle page table pages.  The specified page table page must be a member of
 1321  * the pmap's collection.
 1322  */
 1323 static void
 1324 pmap_remove_pt_page(pmap_t pmap, vm_page_t mpte)
 1325 {
 1326         vm_page_t root;
 1327 
 1328         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 1329         if (mpte != pmap->pm_root)
 1330                 vm_page_splay(mpte->pindex, pmap->pm_root);
 1331         if (mpte->left == NULL)
 1332                 root = mpte->right;
 1333         else {
 1334                 root = vm_page_splay(mpte->pindex, mpte->left);
 1335                 root->right = mpte->right;
 1336         }
 1337         pmap->pm_root = root;
 1338 }
 1339 
 1340 /*
 1341  * This routine unholds page table pages, and if the hold count
 1342  * drops to zero, then it decrements the wire count.
 1343  */
 1344 static __inline int
 1345 pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free)
 1346 {
 1347 
 1348         --m->wire_count;
 1349         if (m->wire_count == 0)
 1350                 return _pmap_unwire_pte_hold(pmap, m, free);
 1351         else
 1352                 return 0;
 1353 }
 1354 
 1355 static int 
 1356 _pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free)
 1357 {
 1358         vm_offset_t pteva;
 1359 
 1360         /*
 1361          * unmap the page table page
 1362          */
 1363         pmap->pm_pdir[m->pindex] = 0;
 1364         --pmap->pm_stats.resident_count;
 1365 
 1366         /*
 1367          * This is a release store so that the ordinary store unmapping
 1368          * the page table page is globally performed before TLB shoot-
 1369          * down is begun.
 1370          */
 1371         atomic_subtract_rel_int(&cnt.v_wire_count, 1);
 1372 
 1373         /*
 1374          * Do an invltlb to make the invalidated mapping
 1375          * take effect immediately.
 1376          */
 1377         pteva = VM_MAXUSER_ADDRESS + i386_ptob(m->pindex);
 1378         pmap_invalidate_page(pmap, pteva);
 1379 
 1380         /* 
 1381          * Put page on a list so that it is released after
 1382          * *ALL* TLB shootdown is done
 1383          */
 1384         pmap_add_delayed_free_list(m, free, TRUE);
 1385 
 1386         return 1;
 1387 }
 1388 
 1389 /*
 1390  * After removing a page table entry, this routine is used to
 1391  * conditionally free the page, and manage the hold/wire counts.
 1392  */
 1393 static int
 1394 pmap_unuse_pt(pmap_t pmap, vm_offset_t va, vm_page_t *free)
 1395 {
 1396         pd_entry_t ptepde;
 1397         vm_page_t mpte;
 1398 
 1399         if (va >= VM_MAXUSER_ADDRESS)
 1400                 return 0;
 1401         ptepde = *pmap_pde(pmap, va);
 1402         mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME);
 1403         return pmap_unwire_pte_hold(pmap, mpte, free);
 1404 }
 1405 
 1406 void
 1407 pmap_pinit0(pmap_t pmap)
 1408 {
 1409 
 1410         PMAP_LOCK_INIT(pmap);
 1411         pmap->pm_pdir = (pd_entry_t *)(KERNBASE + (vm_offset_t)IdlePTD);
 1412 #ifdef PAE
 1413         pmap->pm_pdpt = (pdpt_entry_t *)(KERNBASE + (vm_offset_t)IdlePDPT);
 1414 #endif
 1415         pmap->pm_root = NULL;
 1416         pmap->pm_active = 0;
 1417         PCPU_SET(curpmap, pmap);
 1418         TAILQ_INIT(&pmap->pm_pvchunk);
 1419         bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
 1420         mtx_lock_spin(&allpmaps_lock);
 1421         LIST_INSERT_HEAD(&allpmaps, pmap, pm_list);
 1422         mtx_unlock_spin(&allpmaps_lock);
 1423 }
 1424 
 1425 /*
 1426  * Initialize a preallocated and zeroed pmap structure,
 1427  * such as one in a vmspace structure.
 1428  */
 1429 int
 1430 pmap_pinit(pmap_t pmap)
 1431 {
 1432         vm_page_t m, ptdpg[NPGPTD];
 1433         vm_paddr_t pa;
 1434         static int color;
 1435         int i;
 1436 
 1437         PMAP_LOCK_INIT(pmap);
 1438 
 1439         /*
 1440          * No need to allocate page table space yet but we do need a valid
 1441          * page directory table.
 1442          */
 1443         if (pmap->pm_pdir == NULL) {
 1444                 pmap->pm_pdir = (pd_entry_t *)kmem_alloc_nofault(kernel_map,
 1445                     NBPTD);
 1446 
 1447                 if (pmap->pm_pdir == NULL) {
 1448                         PMAP_LOCK_DESTROY(pmap);
 1449                         return (0);
 1450                 }
 1451 #ifdef PAE
 1452                 pmap->pm_pdpt = uma_zalloc(pdptzone, M_WAITOK | M_ZERO);
 1453                 KASSERT(((vm_offset_t)pmap->pm_pdpt &
 1454                     ((NPGPTD * sizeof(pdpt_entry_t)) - 1)) == 0,
 1455                     ("pmap_pinit: pdpt misaligned"));
 1456                 KASSERT(pmap_kextract((vm_offset_t)pmap->pm_pdpt) < (4ULL<<30),
 1457                     ("pmap_pinit: pdpt above 4g"));
 1458 #endif
 1459                 pmap->pm_root = NULL;
 1460         }
 1461         KASSERT(pmap->pm_root == NULL,
 1462             ("pmap_pinit: pmap has reserved page table page(s)"));
 1463 
 1464         /*
 1465          * allocate the page directory page(s)
 1466          */
 1467         for (i = 0; i < NPGPTD;) {
 1468                 m = vm_page_alloc(NULL, color++,
 1469                     VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
 1470                     VM_ALLOC_ZERO);
 1471                 if (m == NULL)
 1472                         VM_WAIT;
 1473                 else {
 1474                         ptdpg[i++] = m;
 1475                 }
 1476         }
 1477 
 1478         pmap_qenter((vm_offset_t)pmap->pm_pdir, ptdpg, NPGPTD);
 1479 
 1480         for (i = 0; i < NPGPTD; i++) {
 1481                 if ((ptdpg[i]->flags & PG_ZERO) == 0)
 1482                         bzero(pmap->pm_pdir + (i * NPDEPG), PAGE_SIZE);
 1483         }
 1484 
 1485         mtx_lock_spin(&allpmaps_lock);
 1486         LIST_INSERT_HEAD(&allpmaps, pmap, pm_list);
 1487         mtx_unlock_spin(&allpmaps_lock);
 1488         /* Wire in kernel global address entries. */
 1489         bcopy(PTD + KPTDI, pmap->pm_pdir + KPTDI, nkpt * sizeof(pd_entry_t));
 1490 
 1491         /* install self-referential address mapping entry(s) */
 1492         for (i = 0; i < NPGPTD; i++) {
 1493                 pa = VM_PAGE_TO_PHYS(ptdpg[i]);
 1494                 pmap->pm_pdir[PTDPTDI + i] = pa | PG_V | PG_RW | PG_A | PG_M;
 1495 #ifdef PAE
 1496                 pmap->pm_pdpt[i] = pa | PG_V;
 1497 #endif
 1498         }
 1499 
 1500         pmap->pm_active = 0;
 1501         TAILQ_INIT(&pmap->pm_pvchunk);
 1502         bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
 1503 
 1504         return (1);
 1505 }
 1506 
 1507 /*
 1508  * this routine is called if the page table page is not
 1509  * mapped correctly.
 1510  */
 1511 static vm_page_t
 1512 _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags)
 1513 {
 1514         vm_paddr_t ptepa;
 1515         vm_page_t m;
 1516 
 1517         KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
 1518             (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
 1519             ("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
 1520 
 1521         /*
 1522          * Allocate a page table page.
 1523          */
 1524         if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ |
 1525             VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
 1526                 if (flags & M_WAITOK) {
 1527                         PMAP_UNLOCK(pmap);
 1528                         vm_page_unlock_queues();
 1529                         VM_WAIT;
 1530                         vm_page_lock_queues();
 1531                         PMAP_LOCK(pmap);
 1532                 }
 1533 
 1534                 /*
 1535                  * Indicate the need to retry.  While waiting, the page table
 1536                  * page may have been allocated.
 1537                  */
 1538                 return (NULL);
 1539         }
 1540         if ((m->flags & PG_ZERO) == 0)
 1541                 pmap_zero_page(m);
 1542 
 1543         /*
 1544          * Map the pagetable page into the process address space, if
 1545          * it isn't already there.
 1546          */
 1547 
 1548         pmap->pm_stats.resident_count++;
 1549 
 1550         ptepa = VM_PAGE_TO_PHYS(m);
 1551         pmap->pm_pdir[ptepindex] =
 1552                 (pd_entry_t) (ptepa | PG_U | PG_RW | PG_V | PG_A | PG_M);
 1553 
 1554         return m;
 1555 }
 1556 
 1557 static vm_page_t
 1558 pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags)
 1559 {
 1560         unsigned ptepindex;
 1561         pd_entry_t ptepa;
 1562         vm_page_t m;
 1563 
 1564         KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
 1565             (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
 1566             ("pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
 1567 
 1568         /*
 1569          * Calculate pagetable page index
 1570          */
 1571         ptepindex = va >> PDRSHIFT;
 1572 retry:
 1573         /*
 1574          * Get the page directory entry
 1575          */
 1576         ptepa = pmap->pm_pdir[ptepindex];
 1577 
 1578         /*
 1579          * This supports switching from a 4MB page to a
 1580          * normal 4K page.
 1581          */
 1582         if (ptepa & PG_PS) {
 1583                 (void)pmap_demote_pde(pmap, &pmap->pm_pdir[ptepindex], va);
 1584                 ptepa = pmap->pm_pdir[ptepindex];
 1585         }
 1586 
 1587         /*
 1588          * If the page table page is mapped, we just increment the
 1589          * hold count, and activate it.
 1590          */
 1591         if (ptepa) {
 1592                 m = PHYS_TO_VM_PAGE(ptepa & PG_FRAME);
 1593                 m->wire_count++;
 1594         } else {
 1595                 /*
 1596                  * Here if the pte page isn't mapped, or if it has
 1597                  * been deallocated. 
 1598                  */
 1599                 m = _pmap_allocpte(pmap, ptepindex, flags);
 1600                 if (m == NULL && (flags & M_WAITOK))
 1601                         goto retry;
 1602         }
 1603         return (m);
 1604 }
 1605 
 1606 
 1607 /***************************************************
 1608 * Pmap allocation/deallocation routines.
 1609  ***************************************************/
 1610 
 1611 #ifdef SMP
 1612 /*
 1613  * Deal with a SMP shootdown of other users of the pmap that we are
 1614  * trying to dispose of.  This can be a bit hairy.
 1615  */
 1616 static u_int *lazymask;
 1617 static u_int lazyptd;
 1618 static volatile u_int lazywait;
 1619 
 1620 void pmap_lazyfix_action(void);
 1621 
 1622 void
 1623 pmap_lazyfix_action(void)
 1624 {
 1625         u_int mymask = PCPU_GET(cpumask);
 1626 
 1627 #ifdef COUNT_IPIS
 1628         (*ipi_lazypmap_counts[PCPU_GET(cpuid)])++;
 1629 #endif
 1630         if (rcr3() == lazyptd)
 1631                 load_cr3(PCPU_GET(curpcb)->pcb_cr3);
 1632         atomic_clear_int(lazymask, mymask);
 1633         atomic_store_rel_int(&lazywait, 1);
 1634 }
 1635 
 1636 static void
 1637 pmap_lazyfix_self(u_int mymask)
 1638 {
 1639 
 1640         if (rcr3() == lazyptd)
 1641                 load_cr3(PCPU_GET(curpcb)->pcb_cr3);
 1642         atomic_clear_int(lazymask, mymask);
 1643 }
 1644 
 1645 
 1646 static void
 1647 pmap_lazyfix(pmap_t pmap)
 1648 {
 1649         u_int mymask;
 1650         u_int mask;
 1651         u_int spins;
 1652 
 1653         while ((mask = pmap->pm_active) != 0) {
 1654                 spins = 50000000;
 1655                 mask = mask & -mask;    /* Find least significant set bit */
 1656                 mtx_lock_spin(&smp_ipi_mtx);
 1657 #ifdef PAE
 1658                 lazyptd = vtophys(pmap->pm_pdpt);
 1659 #else
 1660                 lazyptd = vtophys(pmap->pm_pdir);
 1661 #endif
 1662                 mymask = PCPU_GET(cpumask);
 1663                 if (mask == mymask) {
 1664                         lazymask = &pmap->pm_active;
 1665                         pmap_lazyfix_self(mymask);
 1666                 } else {
 1667                         atomic_store_rel_int((u_int *)&lazymask,
 1668                             (u_int)&pmap->pm_active);
 1669                         atomic_store_rel_int(&lazywait, 0);
 1670                         ipi_selected(mask, IPI_LAZYPMAP);
 1671                         while (lazywait == 0) {
 1672                                 ia32_pause();
 1673                                 if (--spins == 0)
 1674                                         break;
 1675                         }
 1676                 }
 1677                 mtx_unlock_spin(&smp_ipi_mtx);
 1678                 if (spins == 0)
 1679                         printf("pmap_lazyfix: spun for 50000000\n");
 1680         }
 1681 }
 1682 
 1683 #else   /* SMP */
 1684 
 1685 /*
 1686  * Cleaning up on uniprocessor is easy.  For various reasons, we're
 1687  * unlikely to have to even execute this code, including the fact
 1688  * that the cleanup is deferred until the parent does a wait(2), which
 1689  * means that another userland process has run.
 1690  */
 1691 static void
 1692 pmap_lazyfix(pmap_t pmap)
 1693 {
 1694         u_int cr3;
 1695 
 1696         cr3 = vtophys(pmap->pm_pdir);
 1697         if (cr3 == rcr3()) {
 1698                 load_cr3(PCPU_GET(curpcb)->pcb_cr3);
 1699                 pmap->pm_active &= ~(PCPU_GET(cpumask));
 1700         }
 1701 }
 1702 #endif  /* SMP */
 1703 
 1704 /*
 1705  * Release any resources held by the given physical map.
 1706  * Called when a pmap initialized by pmap_pinit is being released.
 1707  * Should only be called if the map contains no valid mappings.
 1708  */
 1709 void
 1710 pmap_release(pmap_t pmap)
 1711 {
 1712         vm_page_t m, ptdpg[NPGPTD];
 1713         int i;
 1714 
 1715         KASSERT(pmap->pm_stats.resident_count == 0,
 1716             ("pmap_release: pmap resident count %ld != 0",
 1717             pmap->pm_stats.resident_count));
 1718         KASSERT(pmap->pm_root == NULL,
 1719             ("pmap_release: pmap has reserved page table page(s)"));
 1720 
 1721         pmap_lazyfix(pmap);
 1722         mtx_lock_spin(&allpmaps_lock);
 1723         LIST_REMOVE(pmap, pm_list);
 1724         mtx_unlock_spin(&allpmaps_lock);
 1725 
 1726         for (i = 0; i < NPGPTD; i++)
 1727                 ptdpg[i] = PHYS_TO_VM_PAGE(pmap->pm_pdir[PTDPTDI + i] &
 1728                     PG_FRAME);
 1729 
 1730         bzero(pmap->pm_pdir + PTDPTDI, (nkpt + NPGPTD) *
 1731             sizeof(*pmap->pm_pdir));
 1732 
 1733         pmap_qremove((vm_offset_t)pmap->pm_pdir, NPGPTD);
 1734 
 1735         for (i = 0; i < NPGPTD; i++) {
 1736                 m = ptdpg[i];
 1737 #ifdef PAE
 1738                 KASSERT(VM_PAGE_TO_PHYS(m) == (pmap->pm_pdpt[i] & PG_FRAME),
 1739                     ("pmap_release: got wrong ptd page"));
 1740 #endif
 1741                 m->wire_count--;
 1742                 atomic_subtract_int(&cnt.v_wire_count, 1);
 1743                 vm_page_free_zero(m);
 1744         }
 1745         PMAP_LOCK_DESTROY(pmap);
 1746 }
 1747 
 1748 static int
 1749 kvm_size(SYSCTL_HANDLER_ARGS)
 1750 {
 1751         unsigned long ksize = VM_MAX_KERNEL_ADDRESS - KERNBASE;
 1752 
 1753         return sysctl_handle_long(oidp, &ksize, 0, req);
 1754 }
 1755 SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG|CTLFLAG_RD, 
 1756     0, 0, kvm_size, "IU", "Size of KVM");
 1757 
 1758 static int
 1759 kvm_free(SYSCTL_HANDLER_ARGS)
 1760 {
 1761         unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end;
 1762 
 1763         return sysctl_handle_long(oidp, &kfree, 0, req);
 1764 }
 1765 SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD, 
 1766     0, 0, kvm_free, "IU", "Amount of KVM free");
 1767 
 1768 /*
 1769  * grow the number of kernel page table entries, if needed
 1770  */
 1771 void
 1772 pmap_growkernel(vm_offset_t addr)
 1773 {
 1774         struct pmap *pmap;
 1775         vm_paddr_t ptppaddr;
 1776         vm_page_t nkpg;
 1777         pd_entry_t newpdir;
 1778         pt_entry_t *pde;
 1779 
 1780         mtx_assert(&kernel_map->system_mtx, MA_OWNED);
 1781         if (kernel_vm_end == 0) {
 1782                 kernel_vm_end = KERNBASE;
 1783                 nkpt = 0;
 1784                 while (pdir_pde(PTD, kernel_vm_end)) {
 1785                         kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
 1786                         nkpt++;
 1787                         if (kernel_vm_end - 1 >= kernel_map->max_offset) {
 1788                                 kernel_vm_end = kernel_map->max_offset;
 1789                                 break;
 1790                         }
 1791                 }
 1792         }
 1793         addr = roundup2(addr, PAGE_SIZE * NPTEPG);
 1794         if (addr - 1 >= kernel_map->max_offset)
 1795                 addr = kernel_map->max_offset;
 1796         while (kernel_vm_end < addr) {
 1797                 if (pdir_pde(PTD, kernel_vm_end)) {
 1798                         kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
 1799                         if (kernel_vm_end - 1 >= kernel_map->max_offset) {
 1800                                 kernel_vm_end = kernel_map->max_offset;
 1801                                 break;
 1802                         }
 1803                         continue;
 1804                 }
 1805 
 1806                 nkpg = vm_page_alloc(NULL, kernel_vm_end >> PDRSHIFT,
 1807                     VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
 1808                     VM_ALLOC_ZERO);
 1809                 if (nkpg == NULL)
 1810                         panic("pmap_growkernel: no memory to grow kernel");
 1811 
 1812                 nkpt++;
 1813 
 1814                 if ((nkpg->flags & PG_ZERO) == 0)
 1815                         pmap_zero_page(nkpg);
 1816                 ptppaddr = VM_PAGE_TO_PHYS(nkpg);
 1817                 newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M);
 1818                 pdir_pde(PTD, kernel_vm_end) = newpdir;
 1819 
 1820                 mtx_lock_spin(&allpmaps_lock);
 1821                 LIST_FOREACH(pmap, &allpmaps, pm_list) {
 1822                         pde = pmap_pde(pmap, kernel_vm_end);
 1823                         pde_store(pde, newpdir);
 1824                 }
 1825                 mtx_unlock_spin(&allpmaps_lock);
 1826                 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
 1827                 if (kernel_vm_end - 1 >= kernel_map->max_offset) {
 1828                         kernel_vm_end = kernel_map->max_offset;
 1829                         break;
 1830                 }
 1831         }
 1832 }
 1833 
 1834 
 1835 /***************************************************
 1836  * page management routines.
 1837  ***************************************************/
 1838 
 1839 CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE);
 1840 CTASSERT(_NPCM == 11);
 1841 
 1842 static __inline struct pv_chunk *
 1843 pv_to_chunk(pv_entry_t pv)
 1844 {
 1845 
 1846         return (struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK);
 1847 }
 1848 
 1849 #define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap)
 1850 
 1851 #define PC_FREE0_9      0xfffffffful    /* Free values for index 0 through 9 */
 1852 #define PC_FREE10       0x0000fffful    /* Free values for index 10 */
 1853 
 1854 static uint32_t pc_freemask[11] = {
 1855         PC_FREE0_9, PC_FREE0_9, PC_FREE0_9,
 1856         PC_FREE0_9, PC_FREE0_9, PC_FREE0_9,
 1857         PC_FREE0_9, PC_FREE0_9, PC_FREE0_9,
 1858         PC_FREE0_9, PC_FREE10
 1859 };
 1860 
 1861 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0,
 1862         "Current number of pv entries");
 1863 
 1864 #ifdef PV_STATS
 1865 static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail;
 1866 
 1867 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0,
 1868         "Current number of pv entry chunks");
 1869 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0,
 1870         "Current number of pv entry chunks allocated");
 1871 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0,
 1872         "Current number of pv entry chunks frees");
 1873 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0,
 1874         "Number of times tried to get a chunk page but failed.");
 1875 
 1876 static long pv_entry_frees, pv_entry_allocs;
 1877 static int pv_entry_spare;
 1878 
 1879 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0,
 1880         "Current number of pv entry frees");
 1881 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0,
 1882         "Current number of pv entry allocs");
 1883 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0,
 1884         "Current number of spare pv entries");
 1885 
 1886 static int pmap_collect_inactive, pmap_collect_active;
 1887 
 1888 SYSCTL_INT(_vm_pmap, OID_AUTO, pmap_collect_inactive, CTLFLAG_RD, &pmap_collect_inactive, 0,
 1889         "Current number times pmap_collect called on inactive queue");
 1890 SYSCTL_INT(_vm_pmap, OID_AUTO, pmap_collect_active, CTLFLAG_RD, &pmap_collect_active, 0,
 1891         "Current number times pmap_collect called on active queue");
 1892 #endif
 1893 
 1894 /*
 1895  * We are in a serious low memory condition.  Resort to
 1896  * drastic measures to free some pages so we can allocate
 1897  * another pv entry chunk.  This is normally called to
 1898  * unmap inactive pages, and if necessary, active pages.
 1899  */
 1900 static void
 1901 pmap_collect(pmap_t locked_pmap, struct vpgqueues *vpq)
 1902 {
 1903         struct md_page *pvh;
 1904         pd_entry_t *pde;
 1905         pmap_t pmap;
 1906         pt_entry_t *pte, tpte;
 1907         pv_entry_t next_pv, pv;
 1908         vm_offset_t va;
 1909         vm_page_t m, free;
 1910 
 1911         sched_pin();
 1912         TAILQ_FOREACH(m, &vpq->pl, pageq) {
 1913                 if (m->hold_count || m->busy)
 1914                         continue;
 1915                 TAILQ_FOREACH_SAFE(pv, &m->md.pv_list, pv_list, next_pv) {
 1916                         va = pv->pv_va;
 1917                         pmap = PV_PMAP(pv);
 1918                         /* Avoid deadlock and lock recursion. */
 1919                         if (pmap > locked_pmap)
 1920                                 PMAP_LOCK(pmap);
 1921                         else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap))
 1922                                 continue;
 1923                         pmap->pm_stats.resident_count--;
 1924                         pde = pmap_pde(pmap, va);
 1925                         KASSERT((*pde & PG_PS) == 0, ("pmap_collect: found"
 1926                             " a 4mpage in page %p's pv list", m));
 1927                         pte = pmap_pte_quick(pmap, va);
 1928                         tpte = pte_load_clear(pte);
 1929                         KASSERT((tpte & PG_W) == 0,
 1930                             ("pmap_collect: wired pte %#jx", (uintmax_t)tpte));
 1931                         if (tpte & PG_A)
 1932                                 vm_page_flag_set(m, PG_REFERENCED);
 1933                         if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
 1934                                 vm_page_dirty(m);
 1935                         free = NULL;
 1936                         pmap_unuse_pt(pmap, va, &free);
 1937                         pmap_invalidate_page(pmap, va);
 1938                         pmap_free_zero_pages(free);
 1939                         TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
 1940                         if (TAILQ_EMPTY(&m->md.pv_list)) {
 1941                                 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 1942                                 if (TAILQ_EMPTY(&pvh->pv_list))
 1943                                         vm_page_flag_clear(m, PG_WRITEABLE);
 1944                         }
 1945                         free_pv_entry(pmap, pv);
 1946                         if (pmap != locked_pmap)
 1947                                 PMAP_UNLOCK(pmap);
 1948                 }
 1949         }
 1950         sched_unpin();
 1951 }
 1952 
 1953 
 1954 /*
 1955  * free the pv_entry back to the free list
 1956  */
 1957 static void
 1958 free_pv_entry(pmap_t pmap, pv_entry_t pv)
 1959 {
 1960         vm_page_t m;
 1961         struct pv_chunk *pc;
 1962         int idx, field, bit;
 1963 
 1964         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1965         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 1966         PV_STAT(pv_entry_frees++);
 1967         PV_STAT(pv_entry_spare++);
 1968         pv_entry_count--;
 1969         pc = pv_to_chunk(pv);
 1970         idx = pv - &pc->pc_pventry[0];
 1971         field = idx / 32;
 1972         bit = idx % 32;
 1973         pc->pc_map[field] |= 1ul << bit;
 1974         /* move to head of list */
 1975         TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
 1976         for (idx = 0; idx < _NPCM; idx++)
 1977                 if (pc->pc_map[idx] != pc_freemask[idx]) {
 1978                         TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
 1979                         return;
 1980                 }
 1981         PV_STAT(pv_entry_spare -= _NPCPV);
 1982         PV_STAT(pc_chunk_count--);
 1983         PV_STAT(pc_chunk_frees++);
 1984         /* entire chunk is free, return it */
 1985         m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc));
 1986         pmap_qremove((vm_offset_t)pc, 1);
 1987         vm_page_unwire(m, 0);
 1988         vm_page_free(m);
 1989         pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc);
 1990 }
 1991 
 1992 /*
 1993  * get a new pv_entry, allocating a block from the system
 1994  * when needed.
 1995  */
 1996 static pv_entry_t
 1997 get_pv_entry(pmap_t pmap, int try)
 1998 {
 1999         static const struct timeval printinterval = { 60, 0 };
 2000         static struct timeval lastprint;
 2001         static vm_pindex_t colour;
 2002         struct vpgqueues *pq;
 2003         int bit, field;
 2004         pv_entry_t pv;
 2005         struct pv_chunk *pc;
 2006         vm_page_t m;
 2007 
 2008         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2009         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2010         PV_STAT(pv_entry_allocs++);
 2011         pv_entry_count++;
 2012         if (pv_entry_count > pv_entry_high_water)
 2013                 if (ratecheck(&lastprint, &printinterval))
 2014                         printf("Approaching the limit on PV entries, consider "
 2015                             "increasing either the vm.pmap.shpgperproc or the "
 2016                             "vm.pmap.pv_entry_max tunable.\n");
 2017         pq = NULL;
 2018 retry:
 2019         pc = TAILQ_FIRST(&pmap->pm_pvchunk);
 2020         if (pc != NULL) {
 2021                 for (field = 0; field < _NPCM; field++) {
 2022                         if (pc->pc_map[field]) {
 2023                                 bit = bsfl(pc->pc_map[field]);
 2024                                 break;
 2025                         }
 2026                 }
 2027                 if (field < _NPCM) {
 2028                         pv = &pc->pc_pventry[field * 32 + bit];
 2029                         pc->pc_map[field] &= ~(1ul << bit);
 2030                         /* If this was the last item, move it to tail */
 2031                         for (field = 0; field < _NPCM; field++)
 2032                                 if (pc->pc_map[field] != 0) {
 2033                                         PV_STAT(pv_entry_spare--);
 2034                                         return (pv);    /* not full, return */
 2035                                 }
 2036                         TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
 2037                         TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
 2038                         PV_STAT(pv_entry_spare--);
 2039                         return (pv);
 2040                 }
 2041         }
 2042         /*
 2043          * Access to the ptelist "pv_vafree" is synchronized by the page
 2044          * queues lock.  If "pv_vafree" is currently non-empty, it will
 2045          * remain non-empty until pmap_ptelist_alloc() completes.
 2046          */
 2047         if (pv_vafree == 0 || (m = vm_page_alloc(NULL, colour, (pq ==
 2048             &vm_page_queues[PQ_ACTIVE] ? VM_ALLOC_SYSTEM : VM_ALLOC_NORMAL) |
 2049             VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
 2050                 if (try) {
 2051                         pv_entry_count--;
 2052                         PV_STAT(pc_chunk_tryfail++);
 2053                         return (NULL);
 2054                 }
 2055                 /*
 2056                  * Reclaim pv entries: At first, destroy mappings to
 2057                  * inactive pages.  After that, if a pv chunk entry
 2058                  * is still needed, destroy mappings to active pages.
 2059                  */
 2060                 if (pq == NULL) {
 2061                         PV_STAT(pmap_collect_inactive++);
 2062                         pq = &vm_page_queues[PQ_INACTIVE];
 2063                 } else if (pq == &vm_page_queues[PQ_INACTIVE]) {
 2064                         PV_STAT(pmap_collect_active++);
 2065                         pq = &vm_page_queues[PQ_ACTIVE];
 2066                 } else
 2067                         panic("get_pv_entry: increase vm.pmap.shpgperproc");
 2068                 pmap_collect(pmap, pq);
 2069                 goto retry;
 2070         }
 2071         PV_STAT(pc_chunk_count++);
 2072         PV_STAT(pc_chunk_allocs++);
 2073         colour++;
 2074         pc = (struct pv_chunk *)pmap_ptelist_alloc(&pv_vafree);
 2075         pmap_qenter((vm_offset_t)pc, &m, 1);
 2076         pc->pc_pmap = pmap;
 2077         pc->pc_map[0] = pc_freemask[0] & ~1ul;  /* preallocated bit 0 */
 2078         for (field = 1; field < _NPCM; field++)
 2079                 pc->pc_map[field] = pc_freemask[field];
 2080         pv = &pc->pc_pventry[0];
 2081         TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
 2082         PV_STAT(pv_entry_spare += _NPCPV - 1);
 2083         return (pv);
 2084 }
 2085 
 2086 static __inline pv_entry_t
 2087 pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
 2088 {
 2089         pv_entry_t pv;
 2090 
 2091         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2092         TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
 2093                 if (pmap == PV_PMAP(pv) && va == pv->pv_va) {
 2094                         TAILQ_REMOVE(&pvh->pv_list, pv, pv_list);
 2095                         break;
 2096                 }
 2097         }
 2098         return (pv);
 2099 }
 2100 
 2101 static void
 2102 pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
 2103 {
 2104         struct md_page *pvh;
 2105         pv_entry_t pv;
 2106         vm_offset_t va_last;
 2107         vm_page_t m;
 2108 
 2109         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2110         KASSERT((pa & PDRMASK) == 0,
 2111             ("pmap_pv_demote_pde: pa is not 4mpage aligned"));
 2112 
 2113         /*
 2114          * Transfer the 4mpage's pv entry for this mapping to the first
 2115          * page's pv list.
 2116          */
 2117         pvh = pa_to_pvh(pa);
 2118         va = trunc_4mpage(va);
 2119         pv = pmap_pvh_remove(pvh, pmap, va);
 2120         KASSERT(pv != NULL, ("pmap_pv_demote_pde: pv not found"));
 2121         m = PHYS_TO_VM_PAGE(pa);
 2122         TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
 2123         /* Instantiate the remaining NPTEPG - 1 pv entries. */
 2124         va_last = va + NBPDR - PAGE_SIZE;
 2125         do {
 2126                 m++;
 2127                 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
 2128                     ("pmap_pv_demote_pde: page %p is not managed", m));
 2129                 va += PAGE_SIZE;
 2130                 pmap_insert_entry(pmap, va, m);
 2131         } while (va < va_last);
 2132 }
 2133 
 2134 static void
 2135 pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
 2136 {
 2137         struct md_page *pvh;
 2138         pv_entry_t pv;
 2139         vm_offset_t va_last;
 2140         vm_page_t m;
 2141 
 2142         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2143         KASSERT((pa & PDRMASK) == 0,
 2144             ("pmap_pv_promote_pde: pa is not 4mpage aligned"));
 2145 
 2146         /*
 2147          * Transfer the first page's pv entry for this mapping to the
 2148          * 4mpage's pv list.  Aside from avoiding the cost of a call
 2149          * to get_pv_entry(), a transfer avoids the possibility that
 2150          * get_pv_entry() calls pmap_collect() and that pmap_collect()
 2151          * removes one of the mappings that is being promoted.
 2152          */
 2153         m = PHYS_TO_VM_PAGE(pa);
 2154         va = trunc_4mpage(va);
 2155         pv = pmap_pvh_remove(&m->md, pmap, va);
 2156         KASSERT(pv != NULL, ("pmap_pv_promote_pde: pv not found"));
 2157         pvh = pa_to_pvh(pa);
 2158         TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_list);
 2159         /* Free the remaining NPTEPG - 1 pv entries. */
 2160         va_last = va + NBPDR - PAGE_SIZE;
 2161         do {
 2162                 m++;
 2163                 va += PAGE_SIZE;
 2164                 pmap_pvh_free(&m->md, pmap, va);
 2165         } while (va < va_last);
 2166 }
 2167 
 2168 static void
 2169 pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
 2170 {
 2171         pv_entry_t pv;
 2172 
 2173         pv = pmap_pvh_remove(pvh, pmap, va);
 2174         KASSERT(pv != NULL, ("pmap_pvh_free: pv not found"));
 2175         free_pv_entry(pmap, pv);
 2176 }
 2177 
 2178 static void
 2179 pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
 2180 {
 2181         struct md_page *pvh;
 2182 
 2183         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2184         pmap_pvh_free(&m->md, pmap, va);
 2185         if (TAILQ_EMPTY(&m->md.pv_list)) {
 2186                 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 2187                 if (TAILQ_EMPTY(&pvh->pv_list))
 2188                         vm_page_flag_clear(m, PG_WRITEABLE);
 2189         }
 2190 }
 2191 
 2192 /*
 2193  * Create a pv entry for page at pa for
 2194  * (pmap, va).
 2195  */
 2196 static void
 2197 pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
 2198 {
 2199         pv_entry_t pv;
 2200 
 2201         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2202         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2203         pv = get_pv_entry(pmap, FALSE);
 2204         pv->pv_va = va;
 2205         TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
 2206 }
 2207 
 2208 /*
 2209  * Conditionally create a pv entry.
 2210  */
 2211 static boolean_t
 2212 pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
 2213 {
 2214         pv_entry_t pv;
 2215 
 2216         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2217         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2218         if (pv_entry_count < pv_entry_high_water && 
 2219             (pv = get_pv_entry(pmap, TRUE)) != NULL) {
 2220                 pv->pv_va = va;
 2221                 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
 2222                 return (TRUE);
 2223         } else
 2224                 return (FALSE);
 2225 }
 2226 
 2227 /*
 2228  * Create the pv entries for each of the pages within a superpage.
 2229  */
 2230 static boolean_t
 2231 pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
 2232 {
 2233         struct md_page *pvh;
 2234         pv_entry_t pv;
 2235 
 2236         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2237         if (pv_entry_count < pv_entry_high_water && 
 2238             (pv = get_pv_entry(pmap, TRUE)) != NULL) {
 2239                 pv->pv_va = va;
 2240                 pvh = pa_to_pvh(pa);
 2241                 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_list);
 2242                 return (TRUE);
 2243         } else
 2244                 return (FALSE);
 2245 }
 2246 
 2247 /*
 2248  * Tries to demote a 2- or 4MB page mapping.
 2249  */
 2250 static boolean_t
 2251 pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
 2252 {
 2253         pd_entry_t newpde, oldpde;
 2254         pmap_t allpmaps_entry;
 2255         pt_entry_t *firstpte, newpte, *pte;
 2256         vm_paddr_t mptepa;
 2257         vm_page_t free, mpte;
 2258 
 2259         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2260         mpte = pmap_lookup_pt_page(pmap, va);
 2261         if (mpte != NULL)
 2262                 pmap_remove_pt_page(pmap, mpte);
 2263         else {
 2264                 KASSERT((*pde & PG_W) == 0,
 2265                     ("pmap_demote_pde: page table page for a wired mapping"
 2266                     " is missing"));
 2267                 free = NULL;
 2268                 pmap_remove_pde(pmap, pde, trunc_4mpage(va), &free);
 2269                 pmap_invalidate_page(pmap, trunc_4mpage(va));
 2270                 pmap_free_zero_pages(free);
 2271                 CTR2(KTR_PMAP, "pmap_demote_pde: failure for va %#x"
 2272                     " in pmap %p", va, pmap);
 2273                 return (FALSE);
 2274         }
 2275         mptepa = VM_PAGE_TO_PHYS(mpte);
 2276 
 2277         /*
 2278          * Temporarily map the page table page (mpte) into the kernel's
 2279          * address space at either PADDR1 or PADDR2.
 2280          */
 2281         if (curthread->td_pinned > 0 && mtx_owned(&vm_page_queue_mtx)) {
 2282                 if ((*PMAP1 & PG_FRAME) != mptepa) {
 2283                         *PMAP1 = mptepa | PG_RW | PG_V | PG_A | PG_M;
 2284 #ifdef SMP
 2285                         PMAP1cpu = PCPU_GET(cpuid);
 2286 #endif
 2287                         invlcaddr(PADDR1);
 2288                         PMAP1changed++;
 2289                 } else
 2290 #ifdef SMP
 2291                 if (PMAP1cpu != PCPU_GET(cpuid)) {
 2292                         PMAP1cpu = PCPU_GET(cpuid);
 2293                         invlcaddr(PADDR1);
 2294                         PMAP1changedcpu++;
 2295                 } else
 2296 #endif
 2297                         PMAP1unchanged++;
 2298                 firstpte = PADDR1;
 2299         } else {
 2300                 mtx_lock(&PMAP2mutex);
 2301                 if ((*PMAP2 & PG_FRAME) != mptepa) {
 2302                         *PMAP2 = mptepa | PG_RW | PG_V | PG_A | PG_M;
 2303                         pmap_invalidate_page(kernel_pmap, (vm_offset_t)PADDR2);
 2304                 }
 2305                 firstpte = PADDR2;
 2306         }
 2307         oldpde = *pde;
 2308         newpde = mptepa | PG_M | PG_A | (oldpde & PG_U) | PG_RW | PG_V;
 2309         KASSERT((oldpde & (PG_A | PG_V)) == (PG_A | PG_V),
 2310             ("pmap_demote_pde: oldpde is missing PG_A and/or PG_V"));
 2311         KASSERT((oldpde & (PG_M | PG_RW)) != PG_RW,
 2312             ("pmap_demote_pde: oldpde is missing PG_M"));
 2313         KASSERT((oldpde & PG_PS) != 0,
 2314             ("pmap_demote_pde: oldpde is missing PG_PS"));
 2315         newpte = oldpde & ~PG_PS;
 2316         if ((newpte & PG_PDE_PAT) != 0)
 2317                 newpte ^= PG_PDE_PAT | PG_PTE_PAT;
 2318 
 2319         /*
 2320          * If the mapping has changed attributes, update the page table
 2321          * entries.
 2322          */ 
 2323         KASSERT((*firstpte & PG_FRAME) == (newpte & PG_FRAME),
 2324             ("pmap_demote_pde: firstpte and newpte map different physical"
 2325             " addresses"));
 2326         if ((*firstpte & PG_PTE_PROMOTE) != (newpte & PG_PTE_PROMOTE))
 2327                 for (pte = firstpte; pte < firstpte + NPTEPG; pte++) {
 2328                         *pte = newpte;  
 2329                         newpte += PAGE_SIZE;
 2330                 }
 2331         
 2332         /*
 2333          * Demote the mapping.  This pmap is locked.  The old PDE has
 2334          * PG_A set.  If the old PDE has PG_RW set, it also has PG_M
 2335          * set.  Thus, there is no danger of a race with another
 2336          * processor changing the setting of PG_A and/or PG_M between
 2337          * the read above and the store below. 
 2338          */
 2339         if (pmap == kernel_pmap) {
 2340                 /*
 2341                  * A harmless race exists between this loop and the bcopy()
 2342                  * in pmap_pinit() that initializes the kernel segment of
 2343                  * the new page table.  Specifically, that bcopy() may copy
 2344                  * the new PDE from the PTD, which is first in allpmaps, to
 2345                  * the new page table before this loop updates that new
 2346                  * page table.
 2347                  */
 2348                 mtx_lock_spin(&allpmaps_lock);
 2349                 LIST_FOREACH(allpmaps_entry, &allpmaps, pm_list) {
 2350                         pde = pmap_pde(allpmaps_entry, va);
 2351                         KASSERT(*pde == newpde || (*pde & PG_PTE_PROMOTE) ==
 2352                             (oldpde & PG_PTE_PROMOTE),
 2353                             ("pmap_demote_pde: pde was %#jx, expected %#jx",
 2354                             (uintmax_t)*pde, (uintmax_t)oldpde));
 2355                         pde_store(pde, newpde);
 2356                 }
 2357                 mtx_unlock_spin(&allpmaps_lock);
 2358         } else
 2359                 pde_store(pde, newpde); 
 2360         if (firstpte == PADDR2)
 2361                 mtx_unlock(&PMAP2mutex);
 2362 
 2363         /*
 2364          * Invalidate the recursive mapping of the page table page.
 2365          */
 2366         pmap_invalidate_page(pmap, (vm_offset_t)vtopte(va));
 2367 
 2368         /*
 2369          * Demote the pv entry.  This depends on the earlier demotion
 2370          * of the mapping.  Specifically, the (re)creation of a per-
 2371          * page pv entry might trigger the execution of pmap_collect(),
 2372          * which might reclaim a newly (re)created per-page pv entry
 2373          * and destroy the associated mapping.  In order to destroy
 2374          * the mapping, the PDE must have already changed from mapping
 2375          * the 2mpage to referencing the page table page.
 2376          */
 2377         if ((oldpde & PG_MANAGED) != 0)
 2378                 pmap_pv_demote_pde(pmap, va, oldpde & PG_PS_FRAME);
 2379 
 2380         pmap_pde_demotions++;
 2381         CTR2(KTR_PMAP, "pmap_demote_pde: success for va %#x"
 2382             " in pmap %p", va, pmap);
 2383         return (TRUE);
 2384 }
 2385 
 2386 /*
 2387  * pmap_remove_pde: do the things to unmap a superpage in a process
 2388  */
 2389 static void
 2390 pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
 2391     vm_page_t *free)
 2392 {
 2393         struct md_page *pvh;
 2394         pd_entry_t oldpde;
 2395         vm_offset_t eva, va;
 2396         vm_page_t m, mpte;
 2397 
 2398         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2399         KASSERT((sva & PDRMASK) == 0,
 2400             ("pmap_remove_pde: sva is not 4mpage aligned"));
 2401         oldpde = pte_load_clear(pdq);
 2402         if (oldpde & PG_W)
 2403                 pmap->pm_stats.wired_count -= NBPDR / PAGE_SIZE;
 2404 
 2405         /*
 2406          * Machines that don't support invlpg, also don't support
 2407          * PG_G.
 2408          */
 2409         if (oldpde & PG_G)
 2410                 pmap_invalidate_page(kernel_pmap, sva);
 2411         pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
 2412         if (oldpde & PG_MANAGED) {
 2413                 pvh = pa_to_pvh(oldpde & PG_PS_FRAME);
 2414                 pmap_pvh_free(pvh, pmap, sva);
 2415                 eva = sva + NBPDR;
 2416                 for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
 2417                     va < eva; va += PAGE_SIZE, m++) {
 2418                         if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW))
 2419                                 vm_page_dirty(m);
 2420                         if (oldpde & PG_A)
 2421                                 vm_page_flag_set(m, PG_REFERENCED);
 2422                         if (TAILQ_EMPTY(&m->md.pv_list) &&
 2423                             TAILQ_EMPTY(&pvh->pv_list))
 2424                                 vm_page_flag_clear(m, PG_WRITEABLE);
 2425                 }
 2426         }
 2427         if (pmap == kernel_pmap) {
 2428                 if (!pmap_demote_pde(pmap, pdq, sva))
 2429                         panic("pmap_remove_pde: failed demotion");
 2430         } else {
 2431                 mpte = pmap_lookup_pt_page(pmap, sva);
 2432                 if (mpte != NULL) {
 2433                         pmap_remove_pt_page(pmap, mpte);
 2434                         pmap->pm_stats.resident_count--;
 2435                         KASSERT(mpte->wire_count == NPTEPG,
 2436                             ("pmap_remove_pde: pte page wire count error"));
 2437                         mpte->wire_count = 0;
 2438                         pmap_add_delayed_free_list(mpte, free, FALSE);
 2439                         atomic_subtract_int(&cnt.v_wire_count, 1);
 2440                 }
 2441         }
 2442 }
 2443 
 2444 /*
 2445  * pmap_remove_pte: do the things to unmap a page in a process
 2446  */
 2447 static int
 2448 pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va, vm_page_t *free)
 2449 {
 2450         pt_entry_t oldpte;
 2451         vm_page_t m;
 2452 
 2453         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2454         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2455         oldpte = pte_load_clear(ptq);
 2456         if (oldpte & PG_W)
 2457                 pmap->pm_stats.wired_count -= 1;
 2458         /*
 2459          * Machines that don't support invlpg, also don't support
 2460          * PG_G.
 2461          */
 2462         if (oldpte & PG_G)
 2463                 pmap_invalidate_page(kernel_pmap, va);
 2464         pmap->pm_stats.resident_count -= 1;
 2465         if (oldpte & PG_MANAGED) {
 2466                 m = PHYS_TO_VM_PAGE(oldpte & PG_FRAME);
 2467                 if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
 2468                         vm_page_dirty(m);
 2469                 if (oldpte & PG_A)
 2470                         vm_page_flag_set(m, PG_REFERENCED);
 2471                 pmap_remove_entry(pmap, m, va);
 2472         }
 2473         return (pmap_unuse_pt(pmap, va, free));
 2474 }
 2475 
 2476 /*
 2477  * Remove a single page from a process address space
 2478  */
 2479 static void
 2480 pmap_remove_page(pmap_t pmap, vm_offset_t va, vm_page_t *free)
 2481 {
 2482         pt_entry_t *pte;
 2483 
 2484         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2485         KASSERT(curthread->td_pinned > 0, ("curthread not pinned"));
 2486         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2487         if ((pte = pmap_pte_quick(pmap, va)) == NULL || *pte == 0)
 2488                 return;
 2489         pmap_remove_pte(pmap, pte, va, free);
 2490         pmap_invalidate_page(pmap, va);
 2491 }
 2492 
 2493 /*
 2494  *      Remove the given range of addresses from the specified map.
 2495  *
 2496  *      It is assumed that the start and end are properly
 2497  *      rounded to the page size.
 2498  */
 2499 void
 2500 pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
 2501 {
 2502         vm_offset_t pdnxt;
 2503         pd_entry_t ptpaddr;
 2504         pt_entry_t *pte;
 2505         vm_page_t free = NULL;
 2506         int anyvalid;
 2507 
 2508         /*
 2509          * Perform an unsynchronized read.  This is, however, safe.
 2510          */
 2511         if (pmap->pm_stats.resident_count == 0)
 2512                 return;
 2513 
 2514         anyvalid = 0;
 2515 
 2516         vm_page_lock_queues();
 2517         sched_pin();
 2518         PMAP_LOCK(pmap);
 2519 
 2520         /*
 2521          * special handling of removing one page.  a very
 2522          * common operation and easy to short circuit some
 2523          * code.
 2524          */
 2525         if ((sva + PAGE_SIZE == eva) && 
 2526             ((pmap->pm_pdir[(sva >> PDRSHIFT)] & PG_PS) == 0)) {
 2527                 pmap_remove_page(pmap, sva, &free);
 2528                 goto out;
 2529         }
 2530 
 2531         for (; sva < eva; sva = pdnxt) {
 2532                 unsigned pdirindex;
 2533 
 2534                 /*
 2535                  * Calculate index for next page table.
 2536                  */
 2537                 pdnxt = (sva + NBPDR) & ~PDRMASK;
 2538                 if (pdnxt < sva)
 2539                         pdnxt = eva;
 2540                 if (pmap->pm_stats.resident_count == 0)
 2541                         break;
 2542 
 2543                 pdirindex = sva >> PDRSHIFT;
 2544                 ptpaddr = pmap->pm_pdir[pdirindex];
 2545 
 2546                 /*
 2547                  * Weed out invalid mappings. Note: we assume that the page
 2548                  * directory table is always allocated, and in kernel virtual.
 2549                  */
 2550                 if (ptpaddr == 0)
 2551                         continue;
 2552 
 2553                 /*
 2554                  * Check for large page.
 2555                  */
 2556                 if ((ptpaddr & PG_PS) != 0) {
 2557                         /*
 2558                          * Are we removing the entire large page?  If not,
 2559                          * demote the mapping and fall through.
 2560                          */
 2561                         if (sva + NBPDR == pdnxt && eva >= pdnxt) {
 2562                                 /*
 2563                                  * The TLB entry for a PG_G mapping is
 2564                                  * invalidated by pmap_remove_pde().
 2565                                  */
 2566                                 if ((ptpaddr & PG_G) == 0)
 2567                                         anyvalid = 1;
 2568                                 pmap_remove_pde(pmap,
 2569                                     &pmap->pm_pdir[pdirindex], sva, &free);
 2570                                 continue;
 2571                         } else if (!pmap_demote_pde(pmap,
 2572                             &pmap->pm_pdir[pdirindex], sva)) {
 2573                                 /* The large page mapping was destroyed. */
 2574                                 continue;
 2575                         }
 2576                 }
 2577 
 2578                 /*
 2579                  * Limit our scan to either the end of the va represented
 2580                  * by the current page table page, or to the end of the
 2581                  * range being removed.
 2582                  */
 2583                 if (pdnxt > eva)
 2584                         pdnxt = eva;
 2585 
 2586                 for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++,
 2587                     sva += PAGE_SIZE) {
 2588                         if (*pte == 0)
 2589                                 continue;
 2590 
 2591                         /*
 2592                          * The TLB entry for a PG_G mapping is invalidated
 2593                          * by pmap_remove_pte().
 2594                          */
 2595                         if ((*pte & PG_G) == 0)
 2596                                 anyvalid = 1;
 2597                         if (pmap_remove_pte(pmap, pte, sva, &free))
 2598                                 break;
 2599                 }
 2600         }
 2601 out:
 2602         sched_unpin();
 2603         if (anyvalid)
 2604                 pmap_invalidate_all(pmap);
 2605         vm_page_unlock_queues();
 2606         PMAP_UNLOCK(pmap);
 2607         pmap_free_zero_pages(free);
 2608 }
 2609 
 2610 /*
 2611  *      Routine:        pmap_remove_all
 2612  *      Function:
 2613  *              Removes this physical page from
 2614  *              all physical maps in which it resides.
 2615  *              Reflects back modify bits to the pager.
 2616  *
 2617  *      Notes:
 2618  *              Original versions of this routine were very
 2619  *              inefficient because they iteratively called
 2620  *              pmap_remove (slow...)
 2621  */
 2622 
 2623 void
 2624 pmap_remove_all(vm_page_t m)
 2625 {
 2626         struct md_page *pvh;
 2627         pv_entry_t pv;
 2628         pmap_t pmap;
 2629         pt_entry_t *pte, tpte;
 2630         pd_entry_t *pde;
 2631         vm_offset_t va;
 2632         vm_page_t free;
 2633 
 2634         KASSERT((m->flags & PG_FICTITIOUS) == 0,
 2635             ("pmap_remove_all: page %p is fictitious", m));
 2636         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2637         sched_pin();
 2638         pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 2639         while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) {
 2640                 va = pv->pv_va;
 2641                 pmap = PV_PMAP(pv);
 2642                 PMAP_LOCK(pmap);
 2643                 pde = pmap_pde(pmap, va);
 2644                 (void)pmap_demote_pde(pmap, pde, va);
 2645                 PMAP_UNLOCK(pmap);
 2646         }
 2647         while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
 2648                 pmap = PV_PMAP(pv);
 2649                 PMAP_LOCK(pmap);
 2650                 pmap->pm_stats.resident_count--;
 2651                 pde = pmap_pde(pmap, pv->pv_va);
 2652                 KASSERT((*pde & PG_PS) == 0, ("pmap_remove_all: found"
 2653                     " a 4mpage in page %p's pv list", m));
 2654                 pte = pmap_pte_quick(pmap, pv->pv_va);
 2655                 tpte = pte_load_clear(pte);
 2656                 if (tpte & PG_W)
 2657                         pmap->pm_stats.wired_count--;
 2658                 if (tpte & PG_A)
 2659                         vm_page_flag_set(m, PG_REFERENCED);
 2660 
 2661                 /*
 2662                  * Update the vm_page_t clean and reference bits.
 2663                  */
 2664                 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
 2665                         vm_page_dirty(m);
 2666                 free = NULL;
 2667                 pmap_unuse_pt(pmap, pv->pv_va, &free);
 2668                 pmap_invalidate_page(pmap, pv->pv_va);
 2669                 pmap_free_zero_pages(free);
 2670                 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
 2671                 free_pv_entry(pmap, pv);
 2672                 PMAP_UNLOCK(pmap);
 2673         }
 2674         vm_page_flag_clear(m, PG_WRITEABLE);
 2675         sched_unpin();
 2676 }
 2677 
 2678 /*
 2679  * pmap_protect_pde: do the things to protect a 4mpage in a process
 2680  */
 2681 static boolean_t
 2682 pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot)
 2683 {
 2684         pd_entry_t newpde, oldpde;
 2685         vm_offset_t eva, va;
 2686         vm_page_t m;
 2687         boolean_t anychanged;
 2688 
 2689         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2690         KASSERT((sva & PDRMASK) == 0,
 2691             ("pmap_protect_pde: sva is not 4mpage aligned"));
 2692         anychanged = FALSE;
 2693 retry:
 2694         oldpde = newpde = *pde;
 2695         if (oldpde & PG_MANAGED) {
 2696                 eva = sva + NBPDR;
 2697                 for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
 2698                     va < eva; va += PAGE_SIZE, m++) {
 2699                         /*
 2700                          * In contrast to the analogous operation on a 4KB page
 2701                          * mapping, the mapping's PG_A flag is not cleared and
 2702                          * the page's PG_REFERENCED flag is not set.  The
 2703                          * reason is that pmap_demote_pde() expects that a 2/4MB
 2704                          * page mapping with a stored page table page has PG_A
 2705                          * set.
 2706                          */
 2707                         if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW))
 2708                                 vm_page_dirty(m);
 2709                 }
 2710         }
 2711         if ((prot & VM_PROT_WRITE) == 0)
 2712                 newpde &= ~(PG_RW | PG_M);
 2713 #ifdef PAE
 2714         if ((prot & VM_PROT_EXECUTE) == 0)
 2715                 newpde |= pg_nx;
 2716 #endif
 2717         if (newpde != oldpde) {
 2718                 if (!pde_cmpset(pde, oldpde, newpde))
 2719                         goto retry;
 2720                 if (oldpde & PG_G)
 2721                         pmap_invalidate_page(pmap, sva);
 2722                 else
 2723                         anychanged = TRUE;
 2724         }
 2725         return (anychanged);
 2726 }
 2727 
 2728 /*
 2729  *      Set the physical protection on the
 2730  *      specified range of this map as requested.
 2731  */
 2732 void
 2733 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
 2734 {
 2735         vm_offset_t pdnxt;
 2736         pd_entry_t ptpaddr;
 2737         pt_entry_t *pte;
 2738         int anychanged;
 2739 
 2740         if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
 2741                 pmap_remove(pmap, sva, eva);
 2742                 return;
 2743         }
 2744 
 2745 #ifdef PAE
 2746         if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) ==
 2747             (VM_PROT_WRITE|VM_PROT_EXECUTE))
 2748                 return;
 2749 #else
 2750         if (prot & VM_PROT_WRITE)
 2751                 return;
 2752 #endif
 2753 
 2754         anychanged = 0;
 2755 
 2756         vm_page_lock_queues();
 2757         sched_pin();
 2758         PMAP_LOCK(pmap);
 2759         for (; sva < eva; sva = pdnxt) {
 2760                 pt_entry_t obits, pbits;
 2761                 unsigned pdirindex;
 2762 
 2763                 pdnxt = (sva + NBPDR) & ~PDRMASK;
 2764                 if (pdnxt < sva)
 2765                         pdnxt = eva;
 2766 
 2767                 pdirindex = sva >> PDRSHIFT;
 2768                 ptpaddr = pmap->pm_pdir[pdirindex];
 2769 
 2770                 /*
 2771                  * Weed out invalid mappings. Note: we assume that the page
 2772                  * directory table is always allocated, and in kernel virtual.
 2773                  */
 2774                 if (ptpaddr == 0)
 2775                         continue;
 2776 
 2777                 /*
 2778                  * Check for large page.
 2779                  */
 2780                 if ((ptpaddr & PG_PS) != 0) {
 2781                         /*
 2782                          * Are we protecting the entire large page?  If not,
 2783                          * demote the mapping and fall through.
 2784                          */
 2785                         if (sva + NBPDR == pdnxt && eva >= pdnxt) {
 2786                                 /*
 2787                                  * The TLB entry for a PG_G mapping is
 2788                                  * invalidated by pmap_protect_pde().
 2789                                  */
 2790                                 if (pmap_protect_pde(pmap,
 2791                                     &pmap->pm_pdir[pdirindex], sva, prot))
 2792                                         anychanged = 1;
 2793                                 continue;
 2794                         } else if (!pmap_demote_pde(pmap,
 2795                             &pmap->pm_pdir[pdirindex], sva)) {
 2796                                 /* The large page mapping was destroyed. */
 2797                                 continue;
 2798                         }
 2799                 }
 2800 
 2801                 if (pdnxt > eva)
 2802                         pdnxt = eva;
 2803 
 2804                 for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++,
 2805                     sva += PAGE_SIZE) {
 2806                         vm_page_t m;
 2807 
 2808 retry:
 2809                         /*
 2810                          * Regardless of whether a pte is 32 or 64 bits in
 2811                          * size, PG_RW, PG_A, and PG_M are among the least
 2812                          * significant 32 bits.
 2813                          */
 2814                         obits = pbits = *pte;
 2815                         if ((pbits & PG_V) == 0)
 2816                                 continue;
 2817                         if (pbits & PG_MANAGED) {
 2818                                 m = NULL;
 2819                                 if (pbits & PG_A) {
 2820                                         m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
 2821                                         vm_page_flag_set(m, PG_REFERENCED);
 2822                                         pbits &= ~PG_A;
 2823                                 }
 2824                                 if ((pbits & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
 2825                                         if (m == NULL)
 2826                                                 m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
 2827                                         vm_page_dirty(m);
 2828                                 }
 2829                         }
 2830 
 2831                         if ((prot & VM_PROT_WRITE) == 0)
 2832                                 pbits &= ~(PG_RW | PG_M);
 2833 #ifdef PAE
 2834                         if ((prot & VM_PROT_EXECUTE) == 0)
 2835                                 pbits |= pg_nx;
 2836 #endif
 2837 
 2838                         if (pbits != obits) {
 2839 #ifdef PAE
 2840                                 if (!atomic_cmpset_64(pte, obits, pbits))
 2841                                         goto retry;
 2842 #else
 2843                                 if (!atomic_cmpset_int((u_int *)pte, obits,
 2844                                     pbits))
 2845                                         goto retry;
 2846 #endif
 2847                                 if (obits & PG_G)
 2848                                         pmap_invalidate_page(pmap, sva);
 2849                                 else
 2850                                         anychanged = 1;
 2851                         }
 2852                 }
 2853         }
 2854         sched_unpin();
 2855         if (anychanged)
 2856                 pmap_invalidate_all(pmap);
 2857         vm_page_unlock_queues();
 2858         PMAP_UNLOCK(pmap);
 2859 }
 2860 
 2861 /*
 2862  * Tries to promote the 512 or 1024, contiguous 4KB page mappings that are
 2863  * within a single page table page (PTP) to a single 2- or 4MB page mapping.
 2864  * For promotion to occur, two conditions must be met: (1) the 4KB page
 2865  * mappings must map aligned, contiguous physical memory and (2) the 4KB page
 2866  * mappings must have identical characteristics.
 2867  *
 2868  * Managed (PG_MANAGED) mappings within the kernel address space are not
 2869  * promoted.  The reason is that kernel PDEs are replicated in each pmap but
 2870  * pmap_clear_ptes() and pmap_ts_referenced() only read the PDE from the kernel
 2871  * pmap.
 2872  */
 2873 static void
 2874 pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
 2875 {
 2876         pd_entry_t newpde;
 2877         pmap_t allpmaps_entry;
 2878         pt_entry_t *firstpte, oldpte, pa, *pte;
 2879         vm_offset_t oldpteva;
 2880         vm_page_t mpte;
 2881 
 2882         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2883 
 2884         /*
 2885          * Examine the first PTE in the specified PTP.  Abort if this PTE is
 2886          * either invalid, unused, or does not map the first 4KB physical page
 2887          * within a 2- or 4MB page.
 2888          */
 2889         firstpte = vtopte(trunc_4mpage(va));
 2890 setpde:
 2891         newpde = *firstpte;
 2892         if ((newpde & ((PG_FRAME & PDRMASK) | PG_A | PG_V)) != (PG_A | PG_V)) {
 2893                 pmap_pde_p_failures++;
 2894                 CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x"
 2895                     " in pmap %p", va, pmap);
 2896                 return;
 2897         }
 2898         if ((*firstpte & PG_MANAGED) != 0 && pmap == kernel_pmap) {
 2899                 pmap_pde_p_failures++;
 2900                 CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x"
 2901                     " in pmap %p", va, pmap);
 2902                 return;
 2903         }
 2904         if ((newpde & (PG_M | PG_RW)) == PG_RW) {
 2905                 /*
 2906                  * When PG_M is already clear, PG_RW can be cleared without
 2907                  * a TLB invalidation.
 2908                  */
 2909                 if (!atomic_cmpset_int((u_int *)firstpte, newpde, newpde &
 2910                     ~PG_RW))  
 2911                         goto setpde;
 2912                 newpde &= ~PG_RW;
 2913         }
 2914 
 2915         /* 
 2916          * Examine each of the other PTEs in the specified PTP.  Abort if this
 2917          * PTE maps an unexpected 4KB physical page or does not have identical
 2918          * characteristics to the first PTE.
 2919          */
 2920         pa = (newpde & (PG_PS_FRAME | PG_A | PG_V)) + NBPDR - PAGE_SIZE;
 2921         for (pte = firstpte + NPTEPG - 1; pte > firstpte; pte--) {
 2922 setpte:
 2923                 oldpte = *pte;
 2924                 if ((oldpte & (PG_FRAME | PG_A | PG_V)) != pa) {
 2925                         pmap_pde_p_failures++;
 2926                         CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x"
 2927                             " in pmap %p", va, pmap);
 2928                         return;
 2929                 }
 2930                 if ((oldpte & (PG_M | PG_RW)) == PG_RW) {
 2931                         /*
 2932                          * When PG_M is already clear, PG_RW can be cleared
 2933                          * without a TLB invalidation.
 2934                          */
 2935                         if (!atomic_cmpset_int((u_int *)pte, oldpte,
 2936                             oldpte & ~PG_RW))
 2937                                 goto setpte;
 2938                         oldpte &= ~PG_RW;
 2939                         oldpteva = (oldpte & PG_FRAME & PDRMASK) |
 2940                             (va & ~PDRMASK);
 2941                         CTR2(KTR_PMAP, "pmap_promote_pde: protect for va %#x"
 2942                             " in pmap %p", oldpteva, pmap);
 2943                 }
 2944                 if ((oldpte & PG_PTE_PROMOTE) != (newpde & PG_PTE_PROMOTE)) {
 2945                         pmap_pde_p_failures++;
 2946                         CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x"
 2947                             " in pmap %p", va, pmap);
 2948                         return;
 2949                 }
 2950                 pa -= PAGE_SIZE;
 2951         }
 2952 
 2953         /*
 2954          * Save the page table page in its current state until the PDE
 2955          * mapping the superpage is demoted by pmap_demote_pde() or
 2956          * destroyed by pmap_remove_pde(). 
 2957          */
 2958         mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
 2959         KASSERT(mpte >= vm_page_array &&
 2960             mpte < &vm_page_array[vm_page_array_size],
 2961             ("pmap_promote_pde: page table page is out of range"));
 2962         KASSERT(mpte->pindex == va >> PDRSHIFT,
 2963             ("pmap_promote_pde: page table page's pindex is wrong"));
 2964         pmap_insert_pt_page(pmap, mpte);
 2965 
 2966         /*
 2967          * Promote the pv entries.
 2968          */
 2969         if ((newpde & PG_MANAGED) != 0)
 2970                 pmap_pv_promote_pde(pmap, va, newpde & PG_PS_FRAME);
 2971 
 2972         /*
 2973          * Propagate the PAT index to its proper position.
 2974          */
 2975         if ((newpde & PG_PTE_PAT) != 0)
 2976                 newpde ^= PG_PDE_PAT | PG_PTE_PAT;
 2977 
 2978         /*
 2979          * Map the superpage.
 2980          */
 2981         if (pmap == kernel_pmap) {
 2982                 mtx_lock_spin(&allpmaps_lock);
 2983                 LIST_FOREACH(allpmaps_entry, &allpmaps, pm_list) {
 2984                         pde = pmap_pde(allpmaps_entry, va);
 2985                         pde_store(pde, PG_PS | newpde);
 2986                 }
 2987                 mtx_unlock_spin(&allpmaps_lock);
 2988         } else
 2989                 pde_store(pde, PG_PS | newpde);
 2990 
 2991         pmap_pde_promotions++;
 2992         CTR2(KTR_PMAP, "pmap_promote_pde: success for va %#x"
 2993             " in pmap %p", va, pmap);
 2994 }
 2995 
 2996 /*
 2997  *      Insert the given physical page (p) at
 2998  *      the specified virtual address (v) in the
 2999  *      target physical map with the protection requested.
 3000  *
 3001  *      If specified, the page will be wired down, meaning
 3002  *      that the related pte can not be reclaimed.
 3003  *
 3004  *      NB:  This is the only routine which MAY NOT lazy-evaluate
 3005  *      or lose information.  That is, this routine must actually
 3006  *      insert this page into the given map NOW.
 3007  */
 3008 void
 3009 pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
 3010     vm_prot_t prot, boolean_t wired)
 3011 {
 3012         vm_paddr_t pa;
 3013         pd_entry_t *pde;
 3014         pt_entry_t *pte;
 3015         vm_paddr_t opa;
 3016         pt_entry_t origpte, newpte;
 3017         vm_page_t mpte, om;
 3018         boolean_t invlva;
 3019 
 3020         va = trunc_page(va);
 3021         KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
 3022         KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS,
 3023             ("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)", va));
 3024 
 3025         mpte = NULL;
 3026 
 3027         vm_page_lock_queues();
 3028         PMAP_LOCK(pmap);
 3029         sched_pin();
 3030 
 3031         /*
 3032          * In the case that a page table page is not
 3033          * resident, we are creating it here.
 3034          */
 3035         if (va < VM_MAXUSER_ADDRESS) {
 3036                 mpte = pmap_allocpte(pmap, va, M_WAITOK);
 3037         }
 3038 
 3039         pde = pmap_pde(pmap, va);
 3040         if ((*pde & PG_PS) != 0)
 3041                 panic("pmap_enter: attempted pmap_enter on 4MB page");
 3042         pte = pmap_pte_quick(pmap, va);
 3043 
 3044         /*
 3045          * Page Directory table entry not valid, we need a new PT page
 3046          */
 3047         if (pte == NULL) {
 3048                 panic("pmap_enter: invalid page directory pdir=%#jx, va=%#x",
 3049                         (uintmax_t)pmap->pm_pdir[PTDPTDI], va);
 3050         }
 3051 
 3052         pa = VM_PAGE_TO_PHYS(m);
 3053         om = NULL;
 3054         origpte = *pte;
 3055         opa = origpte & PG_FRAME;
 3056 
 3057         /*
 3058          * Mapping has not changed, must be protection or wiring change.
 3059          */
 3060         if (origpte && (opa == pa)) {
 3061                 /*
 3062                  * Wiring change, just update stats. We don't worry about
 3063                  * wiring PT pages as they remain resident as long as there
 3064                  * are valid mappings in them. Hence, if a user page is wired,
 3065                  * the PT page will be also.
 3066                  */
 3067                 if (wired && ((origpte & PG_W) == 0))
 3068                         pmap->pm_stats.wired_count++;
 3069                 else if (!wired && (origpte & PG_W))
 3070                         pmap->pm_stats.wired_count--;
 3071 
 3072                 /*
 3073                  * Remove extra pte reference
 3074                  */
 3075                 if (mpte)
 3076                         mpte->wire_count--;
 3077 
 3078                 /*
 3079                  * We might be turning off write access to the page,
 3080                  * so we go ahead and sense modify status.
 3081                  */
 3082                 if (origpte & PG_MANAGED) {
 3083                         om = m;
 3084                         pa |= PG_MANAGED;
 3085                 }
 3086                 goto validate;
 3087         } 
 3088         /*
 3089          * Mapping has changed, invalidate old range and fall through to
 3090          * handle validating new mapping.
 3091          */
 3092         if (opa) {
 3093                 if (origpte & PG_W)
 3094                         pmap->pm_stats.wired_count--;
 3095                 if (origpte & PG_MANAGED) {
 3096                         om = PHYS_TO_VM_PAGE(opa);
 3097                         pmap_remove_entry(pmap, om, va);
 3098                 }
 3099                 if (mpte != NULL) {
 3100                         mpte->wire_count--;
 3101                         KASSERT(mpte->wire_count > 0,
 3102                             ("pmap_enter: missing reference to page table page,"
 3103                              " va: 0x%x", va));
 3104                 }
 3105         } else
 3106                 pmap->pm_stats.resident_count++;
 3107 
 3108         /*
 3109          * Enter on the PV list if part of our managed memory.
 3110          */
 3111         if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
 3112                 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva,
 3113                     ("pmap_enter: managed mapping within the clean submap"));
 3114                 pmap_insert_entry(pmap, va, m);
 3115                 pa |= PG_MANAGED;
 3116         }
 3117 
 3118         /*
 3119          * Increment counters
 3120          */
 3121         if (wired)
 3122                 pmap->pm_stats.wired_count++;
 3123 
 3124 validate:
 3125         /*
 3126          * Now validate mapping with desired protection/wiring.
 3127          */
 3128         newpte = (pt_entry_t)(pa | PG_V);
 3129         if ((prot & VM_PROT_WRITE) != 0) {
 3130                 newpte |= PG_RW;
 3131                 vm_page_flag_set(m, PG_WRITEABLE);
 3132         }
 3133 #ifdef PAE
 3134         if ((prot & VM_PROT_EXECUTE) == 0)
 3135                 newpte |= pg_nx;
 3136 #endif
 3137         if (wired)
 3138                 newpte |= PG_W;
 3139         if (va < VM_MAXUSER_ADDRESS)
 3140                 newpte |= PG_U;
 3141         if (pmap == kernel_pmap)
 3142                 newpte |= pgeflag;
 3143 
 3144         /*
 3145          * if the mapping or permission bits are different, we need
 3146          * to update the pte.
 3147          */
 3148         if ((origpte & ~(PG_M|PG_A)) != newpte) {
 3149                 newpte |= PG_A;
 3150                 if ((access & VM_PROT_WRITE) != 0)
 3151                         newpte |= PG_M;
 3152                 if (origpte & PG_V) {
 3153                         invlva = FALSE;
 3154                         origpte = pte_load_store(pte, newpte);
 3155                         if (origpte & PG_A) {
 3156                                 if (origpte & PG_MANAGED)
 3157                                         vm_page_flag_set(om, PG_REFERENCED);
 3158                                 if (opa != VM_PAGE_TO_PHYS(m))
 3159                                         invlva = TRUE;
 3160 #ifdef PAE
 3161                                 if ((origpte & PG_NX) == 0 &&
 3162                                     (newpte & PG_NX) != 0)
 3163                                         invlva = TRUE;
 3164 #endif
 3165                         }
 3166                         if ((origpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
 3167                                 if ((origpte & PG_MANAGED) != 0)
 3168                                         vm_page_dirty(om);
 3169                                 if ((prot & VM_PROT_WRITE) == 0)
 3170                                         invlva = TRUE;
 3171                         }
 3172                         if (invlva)
 3173                                 pmap_invalidate_page(pmap, va);
 3174                 } else
 3175                         pte_store(pte, newpte);
 3176         }
 3177 
 3178         /*
 3179          * If both the page table page and the reservation are fully
 3180          * populated, then attempt promotion.
 3181          */
 3182         if ((mpte == NULL || mpte->wire_count == NPTEPG) &&
 3183             pg_ps_enabled && vm_reserv_level_iffullpop(m) == 0)
 3184                 pmap_promote_pde(pmap, pde, va);
 3185 
 3186         sched_unpin();
 3187         vm_page_unlock_queues();
 3188         PMAP_UNLOCK(pmap);
 3189 }
 3190 
 3191 /*
 3192  * Tries to create a 2- or 4MB page mapping.  Returns TRUE if successful and
 3193  * FALSE otherwise.  Fails if (1) a page table page cannot be allocated without
 3194  * blocking, (2) a mapping already exists at the specified virtual address, or
 3195  * (3) a pv entry cannot be allocated without reclaiming another pv entry. 
 3196  */
 3197 static boolean_t
 3198 pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
 3199 {
 3200         pd_entry_t *pde, newpde;
 3201 
 3202         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 3203         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 3204         pde = pmap_pde(pmap, va);
 3205         if (*pde != 0) {
 3206                 CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
 3207                     " in pmap %p", va, pmap);
 3208                 return (FALSE);
 3209         }
 3210         newpde = VM_PAGE_TO_PHYS(m) | PG_PS | PG_V;
 3211         if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
 3212                 newpde |= PG_MANAGED;
 3213 
 3214                 /*
 3215                  * Abort this mapping if its PV entry could not be created.
 3216                  */
 3217                 if (!pmap_pv_insert_pde(pmap, va, VM_PAGE_TO_PHYS(m))) {
 3218                         CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
 3219                             " in pmap %p", va, pmap);
 3220                         return (FALSE);
 3221                 }
 3222         }
 3223 #ifdef PAE
 3224         if ((prot & VM_PROT_EXECUTE) == 0)
 3225                 newpde |= pg_nx;
 3226 #endif
 3227         if (va < VM_MAXUSER_ADDRESS)
 3228                 newpde |= PG_U;
 3229 
 3230         /*
 3231          * Increment counters.
 3232          */
 3233         pmap->pm_stats.resident_count += NBPDR / PAGE_SIZE;
 3234 
 3235         /*
 3236          * Map the superpage.
 3237          */
 3238         pde_store(pde, newpde);
 3239 
 3240         pmap_pde_mappings++;
 3241         CTR2(KTR_PMAP, "pmap_enter_pde: success for va %#lx"
 3242             " in pmap %p", va, pmap);
 3243         return (TRUE);
 3244 }
 3245 
 3246 /*
 3247  * Maps a sequence of resident pages belonging to the same object.
 3248  * The sequence begins with the given page m_start.  This page is
 3249  * mapped at the given virtual address start.  Each subsequent page is
 3250  * mapped at a virtual address that is offset from start by the same
 3251  * amount as the page is offset from m_start within the object.  The
 3252  * last page in the sequence is the page with the largest offset from
 3253  * m_start that can be mapped at a virtual address less than the given
 3254  * virtual address end.  Not every virtual page between start and end
 3255  * is mapped; only those for which a resident page exists with the
 3256  * corresponding offset from m_start are mapped.
 3257  */
 3258 void
 3259 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
 3260     vm_page_t m_start, vm_prot_t prot)
 3261 {
 3262         vm_offset_t va;
 3263         vm_page_t m, mpte;
 3264         vm_pindex_t diff, psize;
 3265 
 3266         VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED);
 3267         psize = atop(end - start);
 3268         mpte = NULL;
 3269         m = m_start;
 3270         PMAP_LOCK(pmap);
 3271         while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
 3272                 va = start + ptoa(diff);
 3273                 if ((va & PDRMASK) == 0 && va + NBPDR <= end &&
 3274                     (VM_PAGE_TO_PHYS(m) & PDRMASK) == 0 &&
 3275                     pg_ps_enabled && vm_reserv_level_iffullpop(m) == 0 &&
 3276                     pmap_enter_pde(pmap, va, m, prot))
 3277                         m = &m[NBPDR / PAGE_SIZE - 1];
 3278                 else
 3279                         mpte = pmap_enter_quick_locked(pmap, va, m, prot,
 3280                             mpte);
 3281                 m = TAILQ_NEXT(m, listq);
 3282         }
 3283         PMAP_UNLOCK(pmap);
 3284 }
 3285 
 3286 /*
 3287  * this code makes some *MAJOR* assumptions:
 3288  * 1. Current pmap & pmap exists.
 3289  * 2. Not wired.
 3290  * 3. Read access.
 3291  * 4. No page table pages.
 3292  * but is *MUCH* faster than pmap_enter...
 3293  */
 3294 
 3295 void
 3296 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
 3297 {
 3298 
 3299         PMAP_LOCK(pmap);
 3300         (void) pmap_enter_quick_locked(pmap, va, m, prot, NULL);
 3301         PMAP_UNLOCK(pmap);
 3302 }
 3303 
 3304 static vm_page_t
 3305 pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
 3306     vm_prot_t prot, vm_page_t mpte)
 3307 {
 3308         pt_entry_t *pte;
 3309         vm_paddr_t pa;
 3310         vm_page_t free;
 3311 
 3312         KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
 3313             (m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0,
 3314             ("pmap_enter_quick_locked: managed mapping within the clean submap"));
 3315         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 3316         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 3317 
 3318         /*
 3319          * In the case that a page table page is not
 3320          * resident, we are creating it here.
 3321          */
 3322         if (va < VM_MAXUSER_ADDRESS) {
 3323                 unsigned ptepindex;
 3324                 pd_entry_t ptepa;
 3325 
 3326                 /*
 3327                  * Calculate pagetable page index
 3328                  */
 3329                 ptepindex = va >> PDRSHIFT;
 3330                 if (mpte && (mpte->pindex == ptepindex)) {
 3331                         mpte->wire_count++;
 3332                 } else {
 3333                         /*
 3334                          * Get the page directory entry
 3335                          */
 3336                         ptepa = pmap->pm_pdir[ptepindex];
 3337 
 3338                         /*
 3339                          * If the page table page is mapped, we just increment
 3340                          * the hold count, and activate it.
 3341                          */
 3342                         if (ptepa) {
 3343                                 if (ptepa & PG_PS)
 3344                                         return (NULL);
 3345                                 mpte = PHYS_TO_VM_PAGE(ptepa & PG_FRAME);
 3346                                 mpte->wire_count++;
 3347                         } else {
 3348                                 mpte = _pmap_allocpte(pmap, ptepindex,
 3349                                     M_NOWAIT);
 3350                                 if (mpte == NULL)
 3351                                         return (mpte);
 3352                         }
 3353                 }
 3354         } else {
 3355                 mpte = NULL;
 3356         }
 3357 
 3358         /*
 3359          * This call to vtopte makes the assumption that we are
 3360          * entering the page into the current pmap.  In order to support
 3361          * quick entry into any pmap, one would likely use pmap_pte_quick.
 3362          * But that isn't as quick as vtopte.
 3363          */
 3364         pte = vtopte(va);
 3365         if (*pte) {
 3366                 if (mpte != NULL) {
 3367                         mpte->wire_count--;
 3368                         mpte = NULL;
 3369                 }
 3370                 return (mpte);
 3371         }
 3372 
 3373         /*
 3374          * Enter on the PV list if part of our managed memory.
 3375          */
 3376         if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0 &&
 3377             !pmap_try_insert_pv_entry(pmap, va, m)) {
 3378                 if (mpte != NULL) {
 3379                         free = NULL;
 3380                         if (pmap_unwire_pte_hold(pmap, mpte, &free)) {
 3381                                 pmap_invalidate_page(pmap, va);
 3382                                 pmap_free_zero_pages(free);
 3383                         }
 3384                         
 3385                         mpte = NULL;
 3386                 }
 3387                 return (mpte);
 3388         }
 3389 
 3390         /*
 3391          * Increment counters
 3392          */
 3393         pmap->pm_stats.resident_count++;
 3394 
 3395         pa = VM_PAGE_TO_PHYS(m);
 3396 #ifdef PAE
 3397         if ((prot & VM_PROT_EXECUTE) == 0)
 3398                 pa |= pg_nx;
 3399 #endif
 3400 
 3401         /*
 3402          * Now validate mapping with RO protection
 3403          */
 3404         if (m->flags & (PG_FICTITIOUS|PG_UNMANAGED))
 3405                 pte_store(pte, pa | PG_V | PG_U);
 3406         else
 3407                 pte_store(pte, pa | PG_V | PG_U | PG_MANAGED);
 3408         return mpte;
 3409 }
 3410 
 3411 /*
 3412  * Make a temporary mapping for a physical address.  This is only intended
 3413  * to be used for panic dumps.
 3414  */
 3415 void *
 3416 pmap_kenter_temporary(vm_paddr_t pa, int i)
 3417 {
 3418         vm_offset_t va;
 3419 
 3420         va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE);
 3421         pmap_kenter(va, pa);
 3422         invlpg(va);
 3423         return ((void *)crashdumpmap);
 3424 }
 3425 
 3426 /*
 3427  * This code maps large physical mmap regions into the
 3428  * processor address space.  Note that some shortcuts
 3429  * are taken, but the code works.
 3430  */
 3431 void
 3432 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
 3433     vm_pindex_t pindex, vm_size_t size)
 3434 {
 3435         vm_page_t p;
 3436 
 3437         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
 3438         KASSERT(object->type == OBJT_DEVICE,
 3439             ("pmap_object_init_pt: non-device object"));
 3440         if (pseflag && 
 3441             ((addr & (NBPDR - 1)) == 0) && ((size & (NBPDR - 1)) == 0)) {
 3442                 int i;
 3443                 vm_page_t m[1];
 3444                 unsigned int ptepindex;
 3445                 int npdes;
 3446                 pd_entry_t ptepa;
 3447 
 3448                 PMAP_LOCK(pmap);
 3449                 if (pmap->pm_pdir[ptepindex = (addr >> PDRSHIFT)])
 3450                         goto out;
 3451                 PMAP_UNLOCK(pmap);
 3452 retry:
 3453                 p = vm_page_lookup(object, pindex);
 3454                 if (p != NULL) {
 3455                         if (vm_page_sleep_if_busy(p, FALSE, "init4p"))
 3456                                 goto retry;
 3457                 } else {
 3458                         p = vm_page_alloc(object, pindex, VM_ALLOC_NORMAL);
 3459                         if (p == NULL)
 3460                                 return;
 3461                         m[0] = p;
 3462 
 3463                         if (vm_pager_get_pages(object, m, 1, 0) != VM_PAGER_OK) {
 3464                                 vm_page_lock_queues();
 3465                                 vm_page_free(p);
 3466                                 vm_page_unlock_queues();
 3467                                 return;
 3468                         }
 3469 
 3470                         p = vm_page_lookup(object, pindex);
 3471                         vm_page_wakeup(p);
 3472                 }
 3473 
 3474                 ptepa = VM_PAGE_TO_PHYS(p);
 3475                 if (ptepa & (NBPDR - 1))
 3476                         return;
 3477 
 3478                 p->valid = VM_PAGE_BITS_ALL;
 3479 
 3480                 PMAP_LOCK(pmap);
 3481                 pmap->pm_stats.resident_count += size >> PAGE_SHIFT;
 3482                 npdes = size >> PDRSHIFT;
 3483                 for(i = 0; i < npdes; i++) {
 3484                         pde_store(&pmap->pm_pdir[ptepindex],
 3485                             ptepa | PG_U | PG_RW | PG_V | PG_PS);
 3486                         ptepa += NBPDR;
 3487                         ptepindex += 1;
 3488                 }
 3489                 pmap_invalidate_all(pmap);
 3490 out:
 3491                 PMAP_UNLOCK(pmap);
 3492         }
 3493 }
 3494 
 3495 /*
 3496  *      Routine:        pmap_change_wiring
 3497  *      Function:       Change the wiring attribute for a map/virtual-address
 3498  *                      pair.
 3499  *      In/out conditions:
 3500  *                      The mapping must already exist in the pmap.
 3501  */
 3502 void
 3503 pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
 3504 {
 3505         pd_entry_t *pde;
 3506         pt_entry_t *pte;
 3507         boolean_t are_queues_locked;
 3508 
 3509         are_queues_locked = FALSE;
 3510 retry:
 3511         PMAP_LOCK(pmap);
 3512         pde = pmap_pde(pmap, va);
 3513         if ((*pde & PG_PS) != 0) {
 3514                 if (!wired != ((*pde & PG_W) == 0)) {
 3515                         if (!are_queues_locked) {
 3516                                 are_queues_locked = TRUE;
 3517                                 if (!mtx_trylock(&vm_page_queue_mtx)) {
 3518                                         PMAP_UNLOCK(pmap);
 3519                                         vm_page_lock_queues();
 3520                                         goto retry;
 3521                                 }
 3522                         }
 3523                         if (!pmap_demote_pde(pmap, pde, va))
 3524                                 panic("pmap_change_wiring: demotion failed");
 3525                 } else
 3526                         goto out;
 3527         }
 3528         pte = pmap_pte(pmap, va);
 3529 
 3530         if (wired && !pmap_pte_w(pte))
 3531                 pmap->pm_stats.wired_count++;
 3532         else if (!wired && pmap_pte_w(pte))
 3533                 pmap->pm_stats.wired_count--;
 3534 
 3535         /*
 3536          * Wiring is not a hardware characteristic so there is no need to
 3537          * invalidate TLB.
 3538          */
 3539         pmap_pte_set_w(pte, wired);
 3540         pmap_pte_release(pte);
 3541 out:
 3542         if (are_queues_locked)
 3543                 vm_page_unlock_queues();
 3544         PMAP_UNLOCK(pmap);
 3545 }
 3546 
 3547 
 3548 
 3549 /*
 3550  *      Copy the range specified by src_addr/len
 3551  *      from the source map to the range dst_addr/len
 3552  *      in the destination map.
 3553  *
 3554  *      This routine is only advisory and need not do anything.
 3555  */
 3556 
 3557 void
 3558 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
 3559     vm_offset_t src_addr)
 3560 {
 3561         vm_page_t   free;
 3562         vm_offset_t addr;
 3563         vm_offset_t end_addr = src_addr + len;
 3564         vm_offset_t pdnxt;
 3565 
 3566         if (dst_addr != src_addr)
 3567                 return;
 3568 
 3569         if (!pmap_is_current(src_pmap))
 3570                 return;
 3571 
 3572         vm_page_lock_queues();
 3573         if (dst_pmap < src_pmap) {
 3574                 PMAP_LOCK(dst_pmap);
 3575                 PMAP_LOCK(src_pmap);
 3576         } else {
 3577                 PMAP_LOCK(src_pmap);
 3578                 PMAP_LOCK(dst_pmap);
 3579         }
 3580         sched_pin();
 3581         for (addr = src_addr; addr < end_addr; addr = pdnxt) {
 3582                 pt_entry_t *src_pte, *dst_pte;
 3583                 vm_page_t dstmpte, srcmpte;
 3584                 pd_entry_t srcptepaddr;
 3585                 unsigned ptepindex;
 3586 
 3587                 KASSERT(addr < UPT_MIN_ADDRESS,
 3588                     ("pmap_copy: invalid to pmap_copy page tables"));
 3589 
 3590                 pdnxt = (addr + NBPDR) & ~PDRMASK;
 3591                 if (pdnxt < addr)
 3592                         pdnxt = end_addr;
 3593                 ptepindex = addr >> PDRSHIFT;
 3594 
 3595                 srcptepaddr = src_pmap->pm_pdir[ptepindex];
 3596                 if (srcptepaddr == 0)
 3597                         continue;
 3598                         
 3599                 if (srcptepaddr & PG_PS) {
 3600                         if (dst_pmap->pm_pdir[ptepindex] == 0 &&
 3601                             ((srcptepaddr & PG_MANAGED) == 0 ||
 3602                             pmap_pv_insert_pde(dst_pmap, addr, srcptepaddr &
 3603                             PG_PS_FRAME))) {
 3604                                 dst_pmap->pm_pdir[ptepindex] = srcptepaddr &
 3605                                     ~PG_W;
 3606                                 dst_pmap->pm_stats.resident_count +=
 3607                                     NBPDR / PAGE_SIZE;
 3608                         }
 3609                         continue;
 3610                 }
 3611 
 3612                 srcmpte = PHYS_TO_VM_PAGE(srcptepaddr & PG_FRAME);
 3613                 KASSERT(srcmpte->wire_count > 0,
 3614                     ("pmap_copy: source page table page is unused"));
 3615 
 3616                 if (pdnxt > end_addr)
 3617                         pdnxt = end_addr;
 3618 
 3619                 src_pte = vtopte(addr);
 3620                 while (addr < pdnxt) {
 3621                         pt_entry_t ptetemp;
 3622                         ptetemp = *src_pte;
 3623                         /*
 3624                          * we only virtual copy managed pages
 3625                          */
 3626                         if ((ptetemp & PG_MANAGED) != 0) {
 3627                                 dstmpte = pmap_allocpte(dst_pmap, addr,
 3628                                     M_NOWAIT);
 3629                                 if (dstmpte == NULL)
 3630                                         break;
 3631                                 dst_pte = pmap_pte_quick(dst_pmap, addr);
 3632                                 if (*dst_pte == 0 &&
 3633                                     pmap_try_insert_pv_entry(dst_pmap, addr,
 3634                                     PHYS_TO_VM_PAGE(ptetemp & PG_FRAME))) {
 3635                                         /*
 3636                                          * Clear the wired, modified, and
 3637                                          * accessed (referenced) bits
 3638                                          * during the copy.
 3639                                          */
 3640                                         *dst_pte = ptetemp & ~(PG_W | PG_M |
 3641                                             PG_A);
 3642                                         dst_pmap->pm_stats.resident_count++;
 3643                                 } else {
 3644                                         free = NULL;
 3645                                         if (pmap_unwire_pte_hold( dst_pmap,
 3646                                             dstmpte, &free)) {
 3647                                                 pmap_invalidate_page(dst_pmap,
 3648                                                     addr);
 3649                                                 pmap_free_zero_pages(free);
 3650                                         }
 3651                                 }
 3652                                 if (dstmpte->wire_count >= srcmpte->wire_count)
 3653                                         break;
 3654                         }
 3655                         addr += PAGE_SIZE;
 3656                         src_pte++;
 3657                 }
 3658         }
 3659         sched_unpin();
 3660         vm_page_unlock_queues();
 3661         PMAP_UNLOCK(src_pmap);
 3662         PMAP_UNLOCK(dst_pmap);
 3663 }       
 3664 
 3665 static __inline void
 3666 pagezero(void *page)
 3667 {
 3668 #if defined(I686_CPU)
 3669         if (cpu_class == CPUCLASS_686) {
 3670 #if defined(CPU_ENABLE_SSE)
 3671                 if (cpu_feature & CPUID_SSE2)
 3672                         sse2_pagezero(page);
 3673                 else
 3674 #endif
 3675                         i686_pagezero(page);
 3676         } else
 3677 #endif
 3678                 bzero(page, PAGE_SIZE);
 3679 }
 3680 
 3681 /*
 3682  *      pmap_zero_page zeros the specified hardware page by mapping 
 3683  *      the page into KVM and using bzero to clear its contents.
 3684  */
 3685 void
 3686 pmap_zero_page(vm_page_t m)
 3687 {
 3688         struct sysmaps *sysmaps;
 3689 
 3690         sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)];
 3691         mtx_lock(&sysmaps->lock);
 3692         if (*sysmaps->CMAP2)
 3693                 panic("pmap_zero_page: CMAP2 busy");
 3694         sched_pin();
 3695         *sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M;
 3696         invlcaddr(sysmaps->CADDR2);
 3697         pagezero(sysmaps->CADDR2);
 3698         *sysmaps->CMAP2 = 0;
 3699         sched_unpin();
 3700         mtx_unlock(&sysmaps->lock);
 3701 }
 3702 
 3703 /*
 3704  *      pmap_zero_page_area zeros the specified hardware page by mapping 
 3705  *      the page into KVM and using bzero to clear its contents.
 3706  *
 3707  *      off and size may not cover an area beyond a single hardware page.
 3708  */
 3709 void
 3710 pmap_zero_page_area(vm_page_t m, int off, int size)
 3711 {
 3712         struct sysmaps *sysmaps;
 3713 
 3714         sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)];
 3715         mtx_lock(&sysmaps->lock);
 3716         if (*sysmaps->CMAP2)
 3717                 panic("pmap_zero_page: CMAP2 busy");
 3718         sched_pin();
 3719         *sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M;
 3720         invlcaddr(sysmaps->CADDR2);
 3721         if (off == 0 && size == PAGE_SIZE) 
 3722                 pagezero(sysmaps->CADDR2);
 3723         else
 3724                 bzero((char *)sysmaps->CADDR2 + off, size);
 3725         *sysmaps->CMAP2 = 0;
 3726         sched_unpin();
 3727         mtx_unlock(&sysmaps->lock);
 3728 }
 3729 
 3730 /*
 3731  *      pmap_zero_page_idle zeros the specified hardware page by mapping 
 3732  *      the page into KVM and using bzero to clear its contents.  This
 3733  *      is intended to be called from the vm_pagezero process only and
 3734  *      outside of Giant.
 3735  */
 3736 void
 3737 pmap_zero_page_idle(vm_page_t m)
 3738 {
 3739 
 3740         if (*CMAP3)
 3741                 panic("pmap_zero_page: CMAP3 busy");
 3742         sched_pin();
 3743         *CMAP3 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M;
 3744         invlcaddr(CADDR3);
 3745         pagezero(CADDR3);
 3746         *CMAP3 = 0;
 3747         sched_unpin();
 3748 }
 3749 
 3750 /*
 3751  *      pmap_copy_page copies the specified (machine independent)
 3752  *      page by mapping the page into virtual memory and using
 3753  *      bcopy to copy the page, one machine dependent page at a
 3754  *      time.
 3755  */
 3756 void
 3757 pmap_copy_page(vm_page_t src, vm_page_t dst)
 3758 {
 3759         struct sysmaps *sysmaps;
 3760 
 3761         sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)];
 3762         mtx_lock(&sysmaps->lock);
 3763         if (*sysmaps->CMAP1)
 3764                 panic("pmap_copy_page: CMAP1 busy");
 3765         if (*sysmaps->CMAP2)
 3766                 panic("pmap_copy_page: CMAP2 busy");
 3767         sched_pin();
 3768         invlpg((u_int)sysmaps->CADDR1);
 3769         invlpg((u_int)sysmaps->CADDR2);
 3770         *sysmaps->CMAP1 = PG_V | VM_PAGE_TO_PHYS(src) | PG_A;
 3771         *sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(dst) | PG_A | PG_M;
 3772         bcopy(sysmaps->CADDR1, sysmaps->CADDR2, PAGE_SIZE);
 3773         *sysmaps->CMAP1 = 0;
 3774         *sysmaps->CMAP2 = 0;
 3775         sched_unpin();
 3776         mtx_unlock(&sysmaps->lock);
 3777 }
 3778 
 3779 /*
 3780  * Returns true if the pmap's pv is one of the first
 3781  * 16 pvs linked to from this page.  This count may
 3782  * be changed upwards or downwards in the future; it
 3783  * is only necessary that true be returned for a small
 3784  * subset of pmaps for proper page aging.
 3785  */
 3786 boolean_t
 3787 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
 3788 {
 3789         struct md_page *pvh;
 3790         pv_entry_t pv;
 3791         int loops = 0;
 3792 
 3793         if (m->flags & PG_FICTITIOUS)
 3794                 return FALSE;
 3795 
 3796         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 3797         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
 3798                 if (PV_PMAP(pv) == pmap) {
 3799                         return TRUE;
 3800                 }
 3801                 loops++;
 3802                 if (loops >= 16)
 3803                         break;
 3804         }
 3805         if (loops < 16) {
 3806                 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 3807                 TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
 3808                         if (PV_PMAP(pv) == pmap)
 3809                                 return (TRUE);
 3810                         loops++;
 3811                         if (loops >= 16)
 3812                                 break;
 3813                 }
 3814         }
 3815         return (FALSE);
 3816 }
 3817 
 3818 /*
 3819  * Returns TRUE if the given page is mapped individually or as part of
 3820  * a 4mpage.  Otherwise, returns FALSE.
 3821  */
 3822 boolean_t
 3823 pmap_page_is_mapped(vm_page_t m)
 3824 {
 3825         struct md_page *pvh;
 3826 
 3827         if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
 3828                 return (FALSE);
 3829         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 3830         if (TAILQ_EMPTY(&m->md.pv_list)) {
 3831                 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 3832                 return (!TAILQ_EMPTY(&pvh->pv_list));
 3833         } else
 3834                 return (TRUE);
 3835 }
 3836 
 3837 /*
 3838  * Remove all pages from specified address space
 3839  * this aids process exit speeds.  Also, this code
 3840  * is special cased for current process only, but
 3841  * can have the more generic (and slightly slower)
 3842  * mode enabled.  This is much faster than pmap_remove
 3843  * in the case of running down an entire address space.
 3844  */
 3845 void
 3846 pmap_remove_pages(pmap_t pmap)
 3847 {
 3848         pt_entry_t *pte, tpte;
 3849         vm_page_t free = NULL;
 3850         vm_page_t m, mpte, mt;
 3851         pv_entry_t pv;
 3852         struct md_page *pvh;
 3853         struct pv_chunk *pc, *npc;
 3854         int field, idx;
 3855         int32_t bit;
 3856         uint32_t inuse, bitmask;
 3857         int allfree;
 3858 
 3859         if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) {
 3860                 printf("warning: pmap_remove_pages called with non-current pmap\n");
 3861                 return;
 3862         }
 3863         vm_page_lock_queues();
 3864         PMAP_LOCK(pmap);
 3865         sched_pin();
 3866         TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
 3867                 allfree = 1;
 3868                 for (field = 0; field < _NPCM; field++) {
 3869                         inuse = (~(pc->pc_map[field])) & pc_freemask[field];
 3870                         while (inuse != 0) {
 3871                                 bit = bsfl(inuse);
 3872                                 bitmask = 1UL << bit;
 3873                                 idx = field * 32 + bit;
 3874                                 pv = &pc->pc_pventry[idx];
 3875                                 inuse &= ~bitmask;
 3876 
 3877                                 pte = pmap_pde(pmap, pv->pv_va);
 3878                                 tpte = *pte;
 3879                                 if ((tpte & PG_PS) == 0) {
 3880                                         pte = vtopte(pv->pv_va);
 3881                                         tpte = *pte & ~PG_PTE_PAT;
 3882                                 }
 3883 
 3884                                 if (tpte == 0) {
 3885                                         printf(
 3886                                             "TPTE at %p  IS ZERO @ VA %08x\n",
 3887                                             pte, pv->pv_va);
 3888                                         panic("bad pte");
 3889                                 }
 3890 
 3891 /*
 3892  * We cannot remove wired pages from a process' mapping at this time
 3893  */
 3894                                 if (tpte & PG_W) {
 3895                                         allfree = 0;
 3896                                         continue;
 3897                                 }
 3898 
 3899                                 m = PHYS_TO_VM_PAGE(tpte & PG_FRAME);
 3900                                 KASSERT(m->phys_addr == (tpte & PG_FRAME),
 3901                                     ("vm_page_t %p phys_addr mismatch %016jx %016jx",
 3902                                     m, (uintmax_t)m->phys_addr,
 3903                                     (uintmax_t)tpte));
 3904 
 3905                                 KASSERT(m < &vm_page_array[vm_page_array_size],
 3906                                         ("pmap_remove_pages: bad tpte %#jx",
 3907                                         (uintmax_t)tpte));
 3908 
 3909                                 pte_clear(pte);
 3910 
 3911                                 /*
 3912                                  * Update the vm_page_t clean/reference bits.
 3913                                  */
 3914                                 if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
 3915                                         if ((tpte & PG_PS) != 0) {
 3916                                                 for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
 3917                                                         vm_page_dirty(mt);
 3918                                         } else
 3919                                                 vm_page_dirty(m);
 3920                                 }
 3921 
 3922                                 /* Mark free */
 3923                                 PV_STAT(pv_entry_frees++);
 3924                                 PV_STAT(pv_entry_spare++);
 3925                                 pv_entry_count--;
 3926                                 pc->pc_map[field] |= bitmask;
 3927                                 if ((tpte & PG_PS) != 0) {
 3928                                         pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
 3929                                         pvh = pa_to_pvh(tpte & PG_PS_FRAME);
 3930                                         TAILQ_REMOVE(&pvh->pv_list, pv, pv_list);
 3931                                         if (TAILQ_EMPTY(&pvh->pv_list)) {
 3932                                                 for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
 3933                                                         if (TAILQ_EMPTY(&mt->md.pv_list))
 3934                                                                 vm_page_flag_clear(mt, PG_WRITEABLE);
 3935                                         }
 3936                                         mpte = pmap_lookup_pt_page(pmap, pv->pv_va);
 3937                                         if (mpte != NULL) {
 3938                                                 pmap_remove_pt_page(pmap, mpte);
 3939                                                 pmap->pm_stats.resident_count--;
 3940                                                 KASSERT(mpte->wire_count == NPTEPG,
 3941                                                     ("pmap_remove_pages: pte page wire count error"));
 3942                                                 mpte->wire_count = 0;
 3943                                                 pmap_add_delayed_free_list(mpte, &free, FALSE);
 3944                                                 atomic_subtract_int(&cnt.v_wire_count, 1);
 3945                                         }
 3946                                 } else {
 3947                                         pmap->pm_stats.resident_count--;
 3948                                         TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
 3949                                         if (TAILQ_EMPTY(&m->md.pv_list)) {
 3950                                                 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 3951                                                 if (TAILQ_EMPTY(&pvh->pv_list))
 3952                                                         vm_page_flag_clear(m, PG_WRITEABLE);
 3953                                         }
 3954                                         pmap_unuse_pt(pmap, pv->pv_va, &free);
 3955                                 }
 3956                         }
 3957                 }
 3958                 if (allfree) {
 3959                         PV_STAT(pv_entry_spare -= _NPCPV);
 3960                         PV_STAT(pc_chunk_count--);
 3961                         PV_STAT(pc_chunk_frees++);
 3962                         TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
 3963                         m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc));
 3964                         pmap_qremove((vm_offset_t)pc, 1);
 3965                         vm_page_unwire(m, 0);
 3966                         vm_page_free(m);
 3967                         pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc);
 3968                 }
 3969         }
 3970         sched_unpin();
 3971         pmap_invalidate_all(pmap);
 3972         vm_page_unlock_queues();
 3973         PMAP_UNLOCK(pmap);
 3974         pmap_free_zero_pages(free);
 3975 }
 3976 
 3977 /*
 3978  *      pmap_is_modified:
 3979  *
 3980  *      Return whether or not the specified physical page was modified
 3981  *      in any physical maps.
 3982  */
 3983 boolean_t
 3984 pmap_is_modified(vm_page_t m)
 3985 {
 3986 
 3987         if (m->flags & PG_FICTITIOUS)
 3988                 return (FALSE);
 3989         if (pmap_is_modified_pvh(&m->md))
 3990                 return (TRUE);
 3991         return (pmap_is_modified_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m))));
 3992 }
 3993 
 3994 /*
 3995  * Returns TRUE if any of the given mappings were used to modify
 3996  * physical memory.  Otherwise, returns FALSE.  Both page and 2mpage
 3997  * mappings are supported.
 3998  */
 3999 static boolean_t
 4000 pmap_is_modified_pvh(struct md_page *pvh)
 4001 {
 4002         pv_entry_t pv;
 4003         pt_entry_t *pte;
 4004         pmap_t pmap;
 4005         boolean_t rv;
 4006 
 4007         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 4008         rv = FALSE;
 4009         sched_pin();
 4010         TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
 4011                 pmap = PV_PMAP(pv);
 4012                 PMAP_LOCK(pmap);
 4013                 pte = pmap_pte_quick(pmap, pv->pv_va);
 4014                 rv = (*pte & (PG_M | PG_RW)) == (PG_M | PG_RW);
 4015                 PMAP_UNLOCK(pmap);
 4016                 if (rv)
 4017                         break;
 4018         }
 4019         sched_unpin();
 4020         return (rv);
 4021 }
 4022 
 4023 /*
 4024  *      pmap_is_prefaultable:
 4025  *
 4026  *      Return whether or not the specified virtual address is elgible
 4027  *      for prefault.
 4028  */
 4029 boolean_t
 4030 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
 4031 {
 4032         pd_entry_t *pde;
 4033         pt_entry_t *pte;
 4034         boolean_t rv;
 4035 
 4036         rv = FALSE;
 4037         PMAP_LOCK(pmap);
 4038         pde = pmap_pde(pmap, addr);
 4039         if (*pde != 0 && (*pde & PG_PS) == 0) {
 4040                 pte = vtopte(addr);
 4041                 rv = *pte == 0;
 4042         }
 4043         PMAP_UNLOCK(pmap);
 4044         return (rv);
 4045 }
 4046 
 4047 /*
 4048  * Clear the write and modified bits in each of the given page's mappings.
 4049  */
 4050 void
 4051 pmap_remove_write(vm_page_t m)
 4052 {
 4053         struct md_page *pvh;
 4054         pv_entry_t next_pv, pv;
 4055         pmap_t pmap;
 4056         pd_entry_t *pde;
 4057         pt_entry_t oldpte, *pte;
 4058         vm_offset_t va;
 4059 
 4060         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 4061         if ((m->flags & PG_FICTITIOUS) != 0 ||
 4062             (m->flags & PG_WRITEABLE) == 0)
 4063                 return;
 4064         sched_pin();
 4065         pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 4066         TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
 4067                 va = pv->pv_va;
 4068                 pmap = PV_PMAP(pv);
 4069                 PMAP_LOCK(pmap);
 4070                 pde = pmap_pde(pmap, va);
 4071                 if ((*pde & PG_RW) != 0)
 4072                         (void)pmap_demote_pde(pmap, pde, va);
 4073                 PMAP_UNLOCK(pmap);
 4074         }
 4075         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
 4076                 pmap = PV_PMAP(pv);
 4077                 PMAP_LOCK(pmap);
 4078                 pde = pmap_pde(pmap, pv->pv_va);
 4079                 KASSERT((*pde & PG_PS) == 0, ("pmap_clear_write: found"
 4080                     " a 4mpage in page %p's pv list", m));
 4081                 pte = pmap_pte_quick(pmap, pv->pv_va);
 4082 retry:
 4083                 oldpte = *pte;
 4084                 if ((oldpte & PG_RW) != 0) {
 4085                         /*
 4086                          * Regardless of whether a pte is 32 or 64 bits
 4087                          * in size, PG_RW and PG_M are among the least
 4088                          * significant 32 bits.
 4089                          */
 4090                         if (!atomic_cmpset_int((u_int *)pte, oldpte,
 4091                             oldpte & ~(PG_RW | PG_M)))
 4092                                 goto retry;
 4093                         if ((oldpte & PG_M) != 0)
 4094                                 vm_page_dirty(m);
 4095                         pmap_invalidate_page(pmap, pv->pv_va);
 4096                 }
 4097                 PMAP_UNLOCK(pmap);
 4098         }
 4099         vm_page_flag_clear(m, PG_WRITEABLE);
 4100         sched_unpin();
 4101 }
 4102 
 4103 /*
 4104  *      pmap_ts_referenced:
 4105  *
 4106  *      Return a count of reference bits for a page, clearing those bits.
 4107  *      It is not necessary for every reference bit to be cleared, but it
 4108  *      is necessary that 0 only be returned when there are truly no
 4109  *      reference bits set.
 4110  *
 4111  *      XXX: The exact number of bits to check and clear is a matter that
 4112  *      should be tested and standardized at some point in the future for
 4113  *      optimal aging of shared pages.
 4114  */
 4115 int
 4116 pmap_ts_referenced(vm_page_t m)
 4117 {
 4118         struct md_page *pvh;
 4119         pv_entry_t pv, pvf, pvn;
 4120         pmap_t pmap;
 4121         pd_entry_t oldpde, *pde;
 4122         pt_entry_t *pte;
 4123         vm_offset_t va;
 4124         int rtval = 0;
 4125 
 4126         if (m->flags & PG_FICTITIOUS)
 4127                 return (rtval);
 4128         sched_pin();
 4129         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 4130         pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 4131         TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, pvn) {
 4132                 va = pv->pv_va;
 4133                 pmap = PV_PMAP(pv);
 4134                 PMAP_LOCK(pmap);
 4135                 pde = pmap_pde(pmap, va);
 4136                 oldpde = *pde;
 4137                 if ((oldpde & PG_A) != 0) {
 4138                         if (pmap_demote_pde(pmap, pde, va)) {
 4139                                 if ((oldpde & PG_W) == 0) {
 4140                                         /*
 4141                                          * Remove the mapping to a single page
 4142                                          * so that a subsequent access may
 4143                                          * repromote.  Since the underlying
 4144                                          * page table page is fully populated,
 4145                                          * this removal never frees a page
 4146                                          * table page.
 4147                                          */
 4148                                         va += VM_PAGE_TO_PHYS(m) - (oldpde &
 4149                                             PG_PS_FRAME);
 4150                                         pmap_remove_page(pmap, va, NULL);
 4151                                         rtval++;
 4152                                         if (rtval > 4) {
 4153                                                 PMAP_UNLOCK(pmap);
 4154                                                 return (rtval);
 4155                                         }
 4156                                 }
 4157                         }
 4158                 }
 4159                 PMAP_UNLOCK(pmap);
 4160         }
 4161         if ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
 4162                 pvf = pv;
 4163                 do {
 4164                         pvn = TAILQ_NEXT(pv, pv_list);
 4165                         TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
 4166                         TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
 4167                         pmap = PV_PMAP(pv);
 4168                         PMAP_LOCK(pmap);
 4169                         pde = pmap_pde(pmap, pv->pv_va);
 4170                         KASSERT((*pde & PG_PS) == 0, ("pmap_ts_referenced:"
 4171                             " found a 4mpage in page %p's pv list", m));
 4172                         pte = pmap_pte_quick(pmap, pv->pv_va);
 4173                         if ((*pte & PG_A) != 0) {
 4174                                 atomic_clear_int((u_int *)pte, PG_A);
 4175                                 pmap_invalidate_page(pmap, pv->pv_va);
 4176                                 rtval++;
 4177                                 if (rtval > 4)
 4178                                         pvn = NULL;
 4179                         }
 4180                         PMAP_UNLOCK(pmap);
 4181                 } while ((pv = pvn) != NULL && pv != pvf);
 4182         }
 4183         sched_unpin();
 4184         return (rtval);
 4185 }
 4186 
 4187 /*
 4188  *      Clear the modify bits on the specified physical page.
 4189  */
 4190 void
 4191 pmap_clear_modify(vm_page_t m)
 4192 {
 4193         struct md_page *pvh;
 4194         pv_entry_t next_pv, pv;
 4195         pmap_t pmap;
 4196         pd_entry_t oldpde, *pde;
 4197         pt_entry_t oldpte, *pte;
 4198         vm_offset_t va;
 4199 
 4200         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 4201         if ((m->flags & PG_FICTITIOUS) != 0)
 4202                 return;
 4203         sched_pin();
 4204         pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 4205         TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
 4206                 va = pv->pv_va;
 4207                 pmap = PV_PMAP(pv);
 4208                 PMAP_LOCK(pmap);
 4209                 pde = pmap_pde(pmap, va);
 4210                 oldpde = *pde;
 4211                 if ((oldpde & PG_RW) != 0) {
 4212                         if (pmap_demote_pde(pmap, pde, va)) {
 4213                                 if ((oldpde & PG_W) == 0) {
 4214                                         /*
 4215                                          * Write protect the mapping to a
 4216                                          * single page so that a subsequent
 4217                                          * write access may repromote.
 4218                                          */
 4219                                         va += VM_PAGE_TO_PHYS(m) - (oldpde &
 4220                                             PG_PS_FRAME);
 4221                                         pte = pmap_pte_quick(pmap, va);
 4222                                         oldpte = *pte;
 4223                                         if ((oldpte & PG_V) != 0) {
 4224                                                 /*
 4225                                                  * Regardless of whether a pte is 32 or 64 bits
 4226                                                  * in size, PG_RW and PG_M are among the least
 4227                                                  * significant 32 bits.
 4228                                                  */
 4229                                                 while (!atomic_cmpset_int((u_int *)pte,
 4230                                                     oldpte,
 4231                                                     oldpte & ~(PG_M | PG_RW)))
 4232                                                         oldpte = *pte;
 4233                                                 vm_page_dirty(m);
 4234                                                 pmap_invalidate_page(pmap, va);
 4235                                         }
 4236                                 }
 4237                         }
 4238                 }
 4239                 PMAP_UNLOCK(pmap);
 4240         }
 4241         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
 4242                 pmap = PV_PMAP(pv);
 4243                 PMAP_LOCK(pmap);
 4244                 pde = pmap_pde(pmap, pv->pv_va);
 4245                 KASSERT((*pde & PG_PS) == 0, ("pmap_clear_modify: found"
 4246                     " a 4mpage in page %p's pv list", m));
 4247                 pte = pmap_pte_quick(pmap, pv->pv_va);
 4248                 if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
 4249                         /*
 4250                          * Regardless of whether a pte is 32 or 64 bits
 4251                          * in size, PG_M is among the least significant
 4252                          * 32 bits. 
 4253                          */
 4254                         atomic_clear_int((u_int *)pte, PG_M);
 4255                         pmap_invalidate_page(pmap, pv->pv_va);
 4256                 }
 4257                 PMAP_UNLOCK(pmap);
 4258         }
 4259         sched_unpin();
 4260 }
 4261 
 4262 /*
 4263  *      pmap_clear_reference:
 4264  *
 4265  *      Clear the reference bit on the specified physical page.
 4266  */
 4267 void
 4268 pmap_clear_reference(vm_page_t m)
 4269 {
 4270         struct md_page *pvh;
 4271         pv_entry_t next_pv, pv;
 4272         pmap_t pmap;
 4273         pd_entry_t oldpde, *pde;
 4274         pt_entry_t *pte;
 4275         vm_offset_t va;
 4276 
 4277         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 4278         if ((m->flags & PG_FICTITIOUS) != 0)
 4279                 return;
 4280         sched_pin();
 4281         pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 4282         TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
 4283                 va = pv->pv_va;
 4284                 pmap = PV_PMAP(pv);
 4285                 PMAP_LOCK(pmap);
 4286                 pde = pmap_pde(pmap, va);
 4287                 oldpde = *pde;
 4288                 if ((oldpde & PG_A) != 0) {
 4289                         if (pmap_demote_pde(pmap, pde, va)) {
 4290                                 /*
 4291                                  * Remove the mapping to a single page so
 4292                                  * that a subsequent access may repromote.
 4293                                  * Since the underlying page table page is
 4294                                  * fully populated, this removal never frees
 4295                                  * a page table page.
 4296                                  */
 4297                                 va += VM_PAGE_TO_PHYS(m) - (oldpde &
 4298                                     PG_PS_FRAME);
 4299                                 pmap_remove_page(pmap, va, NULL);
 4300                         }
 4301                 }
 4302                 PMAP_UNLOCK(pmap);
 4303         }
 4304         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
 4305                 pmap = PV_PMAP(pv);
 4306                 PMAP_LOCK(pmap);
 4307                 pde = pmap_pde(pmap, pv->pv_va);
 4308                 KASSERT((*pde & PG_PS) == 0, ("pmap_clear_reference: found"
 4309                     " a 4mpage in page %p's pv list", m));
 4310                 pte = pmap_pte_quick(pmap, pv->pv_va);
 4311                 if ((*pte & PG_A) != 0) {
 4312                         /*
 4313                          * Regardless of whether a pte is 32 or 64 bits
 4314                          * in size, PG_A is among the least significant
 4315                          * 32 bits. 
 4316                          */
 4317                         atomic_clear_int((u_int *)pte, PG_A);
 4318                         pmap_invalidate_page(pmap, pv->pv_va);
 4319                 }
 4320                 PMAP_UNLOCK(pmap);
 4321         }
 4322         sched_unpin();
 4323 }
 4324 
 4325 /*
 4326  * Miscellaneous support routines follow
 4327  */
 4328 
 4329 /*
 4330  * Map a set of physical memory pages into the kernel virtual
 4331  * address space. Return a pointer to where it is mapped. This
 4332  * routine is intended to be used for mapping device memory,
 4333  * NOT real memory.
 4334  */
 4335 void *
 4336 pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
 4337 {
 4338         vm_offset_t va, tmpva, offset;
 4339 
 4340         offset = pa & PAGE_MASK;
 4341         size = roundup(offset + size, PAGE_SIZE);
 4342         pa = pa & PG_FRAME;
 4343 
 4344         if (pa < KERNLOAD && pa + size <= KERNLOAD)
 4345                 va = KERNBASE + pa;
 4346         else
 4347                 va = kmem_alloc_nofault(kernel_map, size);
 4348         if (!va)
 4349                 panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
 4350 
 4351         for (tmpva = va; size > 0; ) {
 4352                 pmap_kenter_attr(tmpva, pa, mode);
 4353                 size -= PAGE_SIZE;
 4354                 tmpva += PAGE_SIZE;
 4355                 pa += PAGE_SIZE;
 4356         }
 4357         pmap_invalidate_range(kernel_pmap, va, tmpva);
 4358         pmap_invalidate_cache();
 4359         return ((void *)(va + offset));
 4360 }
 4361 
 4362 void *
 4363 pmap_mapdev(vm_paddr_t pa, vm_size_t size)
 4364 {
 4365 
 4366         return (pmap_mapdev_attr(pa, size, PAT_UNCACHEABLE));
 4367 }
 4368 
 4369 void *
 4370 pmap_mapbios(vm_paddr_t pa, vm_size_t size)
 4371 {
 4372 
 4373         return (pmap_mapdev_attr(pa, size, PAT_WRITE_BACK));
 4374 }
 4375 
 4376 void
 4377 pmap_unmapdev(vm_offset_t va, vm_size_t size)
 4378 {
 4379         vm_offset_t base, offset, tmpva;
 4380 
 4381         if (va >= KERNBASE && va + size <= KERNBASE + KERNLOAD)
 4382                 return;
 4383         base = trunc_page(va);
 4384         offset = va & PAGE_MASK;
 4385         size = roundup(offset + size, PAGE_SIZE);
 4386         for (tmpva = base; tmpva < (base + size); tmpva += PAGE_SIZE)
 4387                 pmap_kremove(tmpva);
 4388         pmap_invalidate_range(kernel_pmap, va, tmpva);
 4389         kmem_free(kernel_map, base, size);
 4390 }
 4391 
 4392 int
 4393 pmap_change_attr(vm_offset_t va, vm_size_t size, int mode)
 4394 {
 4395         vm_offset_t base, offset, tmpva;
 4396         pt_entry_t *pte;
 4397         u_int opte, npte;
 4398         pd_entry_t *pde;
 4399 
 4400         base = trunc_page(va);
 4401         offset = va & PAGE_MASK;
 4402         size = roundup(offset + size, PAGE_SIZE);
 4403 
 4404         /* Only supported on kernel virtual addresses. */
 4405         if (base <= VM_MAXUSER_ADDRESS)
 4406                 return (EINVAL);
 4407 
 4408         /* 4MB pages and pages that aren't mapped aren't supported. */
 4409         for (tmpva = base; tmpva < (base + size); tmpva += PAGE_SIZE) {
 4410                 pde = pmap_pde(kernel_pmap, tmpva);
 4411                 if (*pde & PG_PS)
 4412                         return (EINVAL);
 4413                 if (*pde == 0)
 4414                         return (EINVAL);
 4415                 pte = vtopte(va);
 4416                 if (*pte == 0)
 4417                         return (EINVAL);
 4418         }
 4419 
 4420         /*
 4421          * Ok, all the pages exist and are 4k, so run through them updating
 4422          * their cache mode.
 4423          */
 4424         for (tmpva = base; size > 0; ) {
 4425                 pte = vtopte(tmpva);
 4426 
 4427                 /*
 4428                  * The cache mode bits are all in the low 32-bits of the
 4429                  * PTE, so we can just spin on updating the low 32-bits.
 4430                  */
 4431                 do {
 4432                         opte = *(u_int *)pte;
 4433                         npte = opte & ~(PG_PTE_PAT | PG_NC_PCD | PG_NC_PWT);
 4434                         npte |= pmap_cache_bits(mode, 0);
 4435                 } while (npte != opte &&
 4436                     !atomic_cmpset_int((u_int *)pte, opte, npte));
 4437                 tmpva += PAGE_SIZE;
 4438                 size -= PAGE_SIZE;
 4439         }
 4440 
 4441         /*
 4442          * Flush CPU caches to make sure any data isn't cached that shouldn't
 4443          * be, etc.
 4444          */    
 4445         pmap_invalidate_range(kernel_pmap, base, tmpva);
 4446         pmap_invalidate_cache();
 4447         return (0);
 4448 }
 4449 
 4450 /*
 4451  * perform the pmap work for mincore
 4452  */
 4453 int
 4454 pmap_mincore(pmap_t pmap, vm_offset_t addr)
 4455 {
 4456         pd_entry_t *pdep;
 4457         pt_entry_t *ptep, pte;
 4458         vm_paddr_t pa;
 4459         vm_page_t m;
 4460         int val = 0;
 4461         
 4462         PMAP_LOCK(pmap);
 4463         pdep = pmap_pde(pmap, addr);
 4464         if (*pdep != 0) {
 4465                 if (*pdep & PG_PS) {
 4466                         pte = *pdep;
 4467                         val = MINCORE_SUPER;
 4468                         /* Compute the physical address of the 4KB page. */
 4469                         pa = ((*pdep & PG_PS_FRAME) | (addr & PDRMASK)) &
 4470                             PG_FRAME;
 4471                 } else {
 4472                         ptep = pmap_pte(pmap, addr);
 4473                         pte = *ptep;
 4474                         pmap_pte_release(ptep);
 4475                         pa = pte & PG_FRAME;
 4476                 }
 4477         } else {
 4478                 pte = 0;
 4479                 pa = 0;
 4480         }
 4481         PMAP_UNLOCK(pmap);
 4482 
 4483         if (pte != 0) {
 4484                 val |= MINCORE_INCORE;
 4485                 if ((pte & PG_MANAGED) == 0)
 4486                         return val;
 4487 
 4488                 m = PHYS_TO_VM_PAGE(pa);
 4489 
 4490                 /*
 4491                  * Modified by us
 4492                  */
 4493                 if ((pte & (PG_M | PG_RW)) == (PG_M | PG_RW))
 4494                         val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER;
 4495                 else {
 4496                         /*
 4497                          * Modified by someone else
 4498                          */
 4499                         vm_page_lock_queues();
 4500                         if (m->dirty || pmap_is_modified(m))
 4501                                 val |= MINCORE_MODIFIED_OTHER;
 4502                         vm_page_unlock_queues();
 4503                 }
 4504                 /*
 4505                  * Referenced by us
 4506                  */
 4507                 if (pte & PG_A)
 4508                         val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER;
 4509                 else {
 4510                         /*
 4511                          * Referenced by someone else
 4512                          */
 4513                         vm_page_lock_queues();
 4514                         if ((m->flags & PG_REFERENCED) ||
 4515                             pmap_ts_referenced(m)) {
 4516                                 val |= MINCORE_REFERENCED_OTHER;
 4517                                 vm_page_flag_set(m, PG_REFERENCED);
 4518                         }
 4519                         vm_page_unlock_queues();
 4520                 }
 4521         } 
 4522         return val;
 4523 }
 4524 
 4525 void
 4526 pmap_activate(struct thread *td)
 4527 {
 4528         pmap_t  pmap, oldpmap;
 4529         u_int32_t  cr3;
 4530 
 4531         critical_enter();
 4532         pmap = vmspace_pmap(td->td_proc->p_vmspace);
 4533         oldpmap = PCPU_GET(curpmap);
 4534 #if defined(SMP)
 4535         atomic_clear_int(&oldpmap->pm_active, PCPU_GET(cpumask));
 4536         atomic_set_int(&pmap->pm_active, PCPU_GET(cpumask));
 4537 #else
 4538         oldpmap->pm_active &= ~1;
 4539         pmap->pm_active |= 1;
 4540 #endif
 4541 #ifdef PAE
 4542         cr3 = vtophys(pmap->pm_pdpt);
 4543 #else
 4544         cr3 = vtophys(pmap->pm_pdir);
 4545 #endif
 4546         /*
 4547          * pmap_activate is for the current thread on the current cpu
 4548          */
 4549         td->td_pcb->pcb_cr3 = cr3;
 4550         load_cr3(cr3);
 4551         PCPU_SET(curpmap, pmap);
 4552         critical_exit();
 4553 }
 4554 
 4555 vm_offset_t
 4556 pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size)
 4557 {
 4558 
 4559         if ((obj == NULL) || (size < NBPDR) || (obj->type != OBJT_DEVICE)) {
 4560                 return addr;
 4561         }
 4562 
 4563         addr = (addr + PDRMASK) & ~PDRMASK;
 4564         return addr;
 4565 }
 4566 
 4567 /*
 4568  *      Increase the starting virtual address of the given mapping if a
 4569  *      different alignment might result in more superpage mappings.
 4570  */
 4571 void
 4572 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
 4573     vm_offset_t *addr, vm_size_t size)
 4574 {
 4575         vm_offset_t superpage_offset;
 4576 
 4577         if (size < NBPDR)
 4578                 return;
 4579         if (object != NULL && (object->flags & OBJ_COLORED) != 0)
 4580                 offset += ptoa(object->pg_color);
 4581         superpage_offset = offset & PDRMASK;
 4582         if (size - ((NBPDR - superpage_offset) & PDRMASK) < NBPDR ||
 4583             (*addr & PDRMASK) == superpage_offset)
 4584                 return;
 4585         if ((*addr & PDRMASK) < superpage_offset)
 4586                 *addr = (*addr & ~PDRMASK) + superpage_offset;
 4587         else
 4588                 *addr = ((*addr + PDRMASK) & ~PDRMASK) + superpage_offset;
 4589 }
 4590 
 4591 
 4592 #if defined(PMAP_DEBUG)
 4593 pmap_pid_dump(int pid)
 4594 {
 4595         pmap_t pmap;
 4596         struct proc *p;
 4597         int npte = 0;
 4598         int index;
 4599 
 4600         sx_slock(&allproc_lock);
 4601         FOREACH_PROC_IN_SYSTEM(p) {
 4602                 if (p->p_pid != pid)
 4603                         continue;
 4604 
 4605                 if (p->p_vmspace) {
 4606                         int i,j;
 4607                         index = 0;
 4608                         pmap = vmspace_pmap(p->p_vmspace);
 4609                         for (i = 0; i < NPDEPTD; i++) {
 4610                                 pd_entry_t *pde;
 4611                                 pt_entry_t *pte;
 4612                                 vm_offset_t base = i << PDRSHIFT;
 4613                                 
 4614                                 pde = &pmap->pm_pdir[i];
 4615                                 if (pde && pmap_pde_v(pde)) {
 4616                                         for (j = 0; j < NPTEPG; j++) {
 4617                                                 vm_offset_t va = base + (j << PAGE_SHIFT);
 4618                                                 if (va >= (vm_offset_t) VM_MIN_KERNEL_ADDRESS) {
 4619                                                         if (index) {
 4620                                                                 index = 0;
 4621                                                                 printf("\n");
 4622                                                         }
 4623                                                         sx_sunlock(&allproc_lock);
 4624                                                         return npte;
 4625                                                 }
 4626                                                 pte = pmap_pte(pmap, va);
 4627                                                 if (pte && pmap_pte_v(pte)) {
 4628                                                         pt_entry_t pa;
 4629                                                         vm_page_t m;
 4630                                                         pa = *pte;
 4631                                                         m = PHYS_TO_VM_PAGE(pa & PG_FRAME);
 4632                                                         printf("va: 0x%x, pt: 0x%x, h: %d, w: %d, f: 0x%x",
 4633                                                                 va, pa, m->hold_count, m->wire_count, m->flags);
 4634                                                         npte++;
 4635                                                         index++;
 4636                                                         if (index >= 2) {
 4637                                                                 index = 0;
 4638                                                                 printf("\n");
 4639                                                         } else {
 4640                                                                 printf(" ");
 4641                                                         }
 4642                                                 }
 4643                                         }
 4644                                 }
 4645                         }
 4646                 }
 4647         }
 4648         sx_sunlock(&allproc_lock);
 4649         return npte;
 4650 }
 4651 #endif
 4652 
 4653 #if defined(DEBUG)
 4654 
 4655 static void     pads(pmap_t pm);
 4656 void            pmap_pvdump(vm_offset_t pa);
 4657 
 4658 /* print address space of pmap*/
 4659 static void
 4660 pads(pmap_t pm)
 4661 {
 4662         int i, j;
 4663         vm_paddr_t va;
 4664         pt_entry_t *ptep;
 4665 
 4666         if (pm == kernel_pmap)
 4667                 return;
 4668         for (i = 0; i < NPDEPTD; i++)
 4669                 if (pm->pm_pdir[i])
 4670                         for (j = 0; j < NPTEPG; j++) {
 4671                                 va = (i << PDRSHIFT) + (j << PAGE_SHIFT);
 4672                                 if (pm == kernel_pmap && va < KERNBASE)
 4673                                         continue;
 4674                                 if (pm != kernel_pmap && va > UPT_MAX_ADDRESS)
 4675                                         continue;
 4676                                 ptep = pmap_pte(pm, va);
 4677                                 if (pmap_pte_v(ptep))
 4678                                         printf("%x:%x ", va, *ptep);
 4679                         };
 4680 
 4681 }
 4682 
 4683 void
 4684 pmap_pvdump(vm_paddr_t pa)
 4685 {
 4686         pv_entry_t pv;
 4687         pmap_t pmap;
 4688         vm_page_t m;
 4689 
 4690         printf("pa %x", pa);
 4691         m = PHYS_TO_VM_PAGE(pa);
 4692         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
 4693                 pmap = PV_PMAP(pv);
 4694                 printf(" -> pmap %p, va %x", (void *)pmap, pv->pv_va);
 4695                 pads(pmap);
 4696         }
 4697         printf(" ");
 4698 }
 4699 #endif

Cache object: 1fa2e32275f0eef5fb37b68913ecdda4


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.