The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/i386/xen/pmap.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1991 Regents of the University of California.
    3  * All rights reserved.
    4  * Copyright (c) 1994 John S. Dyson
    5  * All rights reserved.
    6  * Copyright (c) 1994 David Greenman
    7  * All rights reserved.
    8  * Copyright (c) 2005 Alan L. Cox <alc@cs.rice.edu>
    9  * All rights reserved.
   10  *
   11  * This code is derived from software contributed to Berkeley by
   12  * the Systems Programming Group of the University of Utah Computer
   13  * Science Department and William Jolitz of UUNET Technologies Inc.
   14  *
   15  * Redistribution and use in source and binary forms, with or without
   16  * modification, are permitted provided that the following conditions
   17  * are met:
   18  * 1. Redistributions of source code must retain the above copyright
   19  *    notice, this list of conditions and the following disclaimer.
   20  * 2. Redistributions in binary form must reproduce the above copyright
   21  *    notice, this list of conditions and the following disclaimer in the
   22  *    documentation and/or other materials provided with the distribution.
   23  * 3. All advertising materials mentioning features or use of this software
   24  *    must display the following acknowledgement:
   25  *      This product includes software developed by the University of
   26  *      California, Berkeley and its contributors.
   27  * 4. Neither the name of the University nor the names of its contributors
   28  *    may be used to endorse or promote products derived from this software
   29  *    without specific prior written permission.
   30  *
   31  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   32  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   33  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   34  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   35  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   36  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   37  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   38  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   39  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   40  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   41  * SUCH DAMAGE.
   42  *
   43  *      from:   @(#)pmap.c      7.7 (Berkeley)  5/12/91
   44  */
   45 /*-
   46  * Copyright (c) 2003 Networks Associates Technology, Inc.
   47  * All rights reserved.
   48  *
   49  * This software was developed for the FreeBSD Project by Jake Burkholder,
   50  * Safeport Network Services, and Network Associates Laboratories, the
   51  * Security Research Division of Network Associates, Inc. under
   52  * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA
   53  * CHATS research program.
   54  *
   55  * Redistribution and use in source and binary forms, with or without
   56  * modification, are permitted provided that the following conditions
   57  * are met:
   58  * 1. Redistributions of source code must retain the above copyright
   59  *    notice, this list of conditions and the following disclaimer.
   60  * 2. Redistributions in binary form must reproduce the above copyright
   61  *    notice, this list of conditions and the following disclaimer in the
   62  *    documentation and/or other materials provided with the distribution.
   63  *
   64  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   65  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   66  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   67  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   68  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   69  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   70  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   71  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   72  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   73  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   74  * SUCH DAMAGE.
   75  */
   76 
   77 #include <sys/cdefs.h>
   78 __FBSDID("$FreeBSD: releng/8.3/sys/i386/xen/pmap.c 230433 2012-01-21 18:38:57Z alc $");
   79 
   80 /*
   81  *      Manages physical address maps.
   82  *
   83  *      In addition to hardware address maps, this
   84  *      module is called upon to provide software-use-only
   85  *      maps which may or may not be stored in the same
   86  *      form as hardware maps.  These pseudo-maps are
   87  *      used to store intermediate results from copy
   88  *      operations to and from address spaces.
   89  *
   90  *      Since the information managed by this module is
   91  *      also stored by the logical address mapping module,
   92  *      this module may throw away valid virtual-to-physical
   93  *      mappings at almost any time.  However, invalidations
   94  *      of virtual-to-physical mappings must be done as
   95  *      requested.
   96  *
   97  *      In order to cope with hardware architectures which
   98  *      make virtual-to-physical map invalidates expensive,
   99  *      this module may delay invalidate or reduced protection
  100  *      operations until such time as they are actually
  101  *      necessary.  This module is given full information as
  102  *      to which processors are currently using which maps,
  103  *      and to when physical maps must be made correct.
  104  */
  105 
  106 #define PMAP_DIAGNOSTIC
  107 
  108 #include "opt_cpu.h"
  109 #include "opt_pmap.h"
  110 #include "opt_smp.h"
  111 #include "opt_xbox.h"
  112 
  113 #include <sys/param.h>
  114 #include <sys/systm.h>
  115 #include <sys/kernel.h>
  116 #include <sys/ktr.h>
  117 #include <sys/lock.h>
  118 #include <sys/malloc.h>
  119 #include <sys/mman.h>
  120 #include <sys/msgbuf.h>
  121 #include <sys/mutex.h>
  122 #include <sys/proc.h>
  123 #include <sys/sf_buf.h>
  124 #include <sys/sx.h>
  125 #include <sys/vmmeter.h>
  126 #include <sys/sched.h>
  127 #include <sys/sysctl.h>
  128 #ifdef SMP
  129 #include <sys/smp.h>
  130 #endif
  131 
  132 #include <vm/vm.h>
  133 #include <vm/vm_param.h>
  134 #include <vm/vm_kern.h>
  135 #include <vm/vm_page.h>
  136 #include <vm/vm_map.h>
  137 #include <vm/vm_object.h>
  138 #include <vm/vm_extern.h>
  139 #include <vm/vm_pageout.h>
  140 #include <vm/vm_pager.h>
  141 #include <vm/uma.h>
  142 
  143 #include <machine/cpu.h>
  144 #include <machine/cputypes.h>
  145 #include <machine/md_var.h>
  146 #include <machine/pcb.h>
  147 #include <machine/specialreg.h>
  148 #ifdef SMP
  149 #include <machine/smp.h>
  150 #endif
  151 
  152 #ifdef XBOX
  153 #include <machine/xbox.h>
  154 #endif
  155 
  156 #include <xen/interface/xen.h>
  157 #include <xen/hypervisor.h>
  158 #include <machine/xen/hypercall.h>
  159 #include <machine/xen/xenvar.h>
  160 #include <machine/xen/xenfunc.h>
  161 
  162 #if !defined(CPU_DISABLE_SSE) && defined(I686_CPU)
  163 #define CPU_ENABLE_SSE
  164 #endif
  165 
  166 #ifndef PMAP_SHPGPERPROC
  167 #define PMAP_SHPGPERPROC 200
  168 #endif
  169 
  170 #if defined(DIAGNOSTIC)
  171 #define PMAP_DIAGNOSTIC
  172 #endif
  173 
  174 #if !defined(PMAP_DIAGNOSTIC)
  175 #define PMAP_INLINE     __gnu89_inline
  176 #else
  177 #define PMAP_INLINE
  178 #endif
  179 
  180 #define PV_STATS
  181 #ifdef PV_STATS
  182 #define PV_STAT(x)      do { x ; } while (0)
  183 #else
  184 #define PV_STAT(x)      do { } while (0)
  185 #endif
  186 
  187 /*
  188  * Get PDEs and PTEs for user/kernel address space
  189  */
  190 #define pmap_pde(m, v)  (&((m)->pm_pdir[(vm_offset_t)(v) >> PDRSHIFT]))
  191 #define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDRSHIFT])
  192 
  193 #define pmap_pde_v(pte)         ((*(int *)pte & PG_V) != 0)
  194 #define pmap_pte_w(pte)         ((*(int *)pte & PG_W) != 0)
  195 #define pmap_pte_m(pte)         ((*(int *)pte & PG_M) != 0)
  196 #define pmap_pte_u(pte)         ((*(int *)pte & PG_A) != 0)
  197 #define pmap_pte_v(pte)         ((*(int *)pte & PG_V) != 0)
  198 
  199 #define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v)))
  200 
  201 #define HAMFISTED_LOCKING
  202 #ifdef HAMFISTED_LOCKING
  203 static struct mtx createdelete_lock;
  204 #endif
  205 
  206 struct pmap kernel_pmap_store;
  207 LIST_HEAD(pmaplist, pmap);
  208 static struct pmaplist allpmaps;
  209 static struct mtx allpmaps_lock;
  210 
  211 vm_offset_t virtual_avail;      /* VA of first avail page (after kernel bss) */
  212 vm_offset_t virtual_end;        /* VA of last avail page (end of kernel AS) */
  213 int pgeflag = 0;                /* PG_G or-in */
  214 int pseflag = 0;                /* PG_PS or-in */
  215 
  216 int nkpt;
  217 vm_offset_t kernel_vm_end;
  218 extern u_int32_t KERNend;
  219 
  220 #ifdef PAE
  221 pt_entry_t pg_nx;
  222 #if !defined(XEN) 
  223 static uma_zone_t pdptzone;
  224 #endif
  225 #endif
  226 
  227 static int pat_works;                   /* Is page attribute table sane? */
  228 
  229 /*
  230  * Data for the pv entry allocation mechanism
  231  */
  232 static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
  233 static int shpgperproc = PMAP_SHPGPERPROC;
  234 
  235 struct pv_chunk *pv_chunkbase;          /* KVA block for pv_chunks */
  236 int pv_maxchunks;                       /* How many chunks we have KVA for */
  237 vm_offset_t pv_vafree;                  /* freelist stored in the PTE */
  238 
  239 /*
  240  * All those kernel PT submaps that BSD is so fond of
  241  */
  242 struct sysmaps {
  243         struct  mtx lock;
  244         pt_entry_t *CMAP1;
  245         pt_entry_t *CMAP2;
  246         caddr_t CADDR1;
  247         caddr_t CADDR2;
  248 };
  249 static struct sysmaps sysmaps_pcpu[MAXCPU];
  250 pt_entry_t *CMAP1 = 0;
  251 static pt_entry_t *CMAP3;
  252 caddr_t CADDR1 = 0, ptvmmap = 0;
  253 static caddr_t CADDR3;
  254 struct msgbuf *msgbufp = 0;
  255 
  256 /*
  257  * Crashdump maps.
  258  */
  259 static caddr_t crashdumpmap;
  260 
  261 static pt_entry_t *PMAP1 = 0, *PMAP2;
  262 static pt_entry_t *PADDR1 = 0, *PADDR2;
  263 #ifdef SMP
  264 static int PMAP1cpu;
  265 static int PMAP1changedcpu;
  266 SYSCTL_INT(_debug, OID_AUTO, PMAP1changedcpu, CTLFLAG_RD, 
  267            &PMAP1changedcpu, 0,
  268            "Number of times pmap_pte_quick changed CPU with same PMAP1");
  269 #endif
  270 static int PMAP1changed;
  271 SYSCTL_INT(_debug, OID_AUTO, PMAP1changed, CTLFLAG_RD, 
  272            &PMAP1changed, 0,
  273            "Number of times pmap_pte_quick changed PMAP1");
  274 static int PMAP1unchanged;
  275 SYSCTL_INT(_debug, OID_AUTO, PMAP1unchanged, CTLFLAG_RD, 
  276            &PMAP1unchanged, 0,
  277            "Number of times pmap_pte_quick didn't change PMAP1");
  278 static struct mtx PMAP2mutex;
  279 
  280 SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters");
  281 
  282 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_max, CTLFLAG_RD, &pv_entry_max, 0,
  283         "Max number of PV entries");
  284 SYSCTL_INT(_vm_pmap, OID_AUTO, shpgperproc, CTLFLAG_RD, &shpgperproc, 0,
  285         "Page share factor per proc");
  286 
  287 static void     free_pv_entry(pmap_t pmap, pv_entry_t pv);
  288 static pv_entry_t get_pv_entry(pmap_t locked_pmap, int try);
  289 
  290 static vm_page_t pmap_enter_quick_locked(multicall_entry_t **mcl, int *count, pmap_t pmap, vm_offset_t va,
  291     vm_page_t m, vm_prot_t prot, vm_page_t mpte);
  292 static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva,
  293     vm_page_t *free);
  294 static void pmap_remove_page(struct pmap *pmap, vm_offset_t va,
  295     vm_page_t *free);
  296 static void pmap_remove_entry(struct pmap *pmap, vm_page_t m,
  297                                         vm_offset_t va);
  298 static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m);
  299 static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
  300     vm_page_t m);
  301 
  302 static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags);
  303 
  304 static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags);
  305 static int _pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free);
  306 static pt_entry_t *pmap_pte_quick(pmap_t pmap, vm_offset_t va);
  307 static void pmap_pte_release(pt_entry_t *pte);
  308 static int pmap_unuse_pt(pmap_t, vm_offset_t, vm_page_t *);
  309 static vm_offset_t pmap_kmem_choose(vm_offset_t addr);
  310 static boolean_t pmap_is_prefaultable_locked(pmap_t pmap, vm_offset_t addr);
  311 static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode);
  312 
  313 static __inline void pagezero(void *page);
  314 
  315 #if defined(PAE) && !defined(XEN)
  316 static void *pmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait);
  317 #endif
  318 
  319 CTASSERT(1 << PDESHIFT == sizeof(pd_entry_t));
  320 CTASSERT(1 << PTESHIFT == sizeof(pt_entry_t));
  321 
  322 /*
  323  * If you get an error here, then you set KVA_PAGES wrong! See the
  324  * description of KVA_PAGES in sys/i386/include/pmap.h. It must be
  325  * multiple of 4 for a normal kernel, or a multiple of 8 for a PAE.
  326  */
  327 CTASSERT(KERNBASE % (1 << 24) == 0);
  328 
  329 
  330 
  331 void 
  332 pd_set(struct pmap *pmap, int ptepindex, vm_paddr_t val, int type)
  333 {
  334         vm_paddr_t pdir_ma = vtomach(&pmap->pm_pdir[ptepindex]);
  335         
  336         switch (type) {
  337         case SH_PD_SET_VA:
  338 #if 0           
  339                 xen_queue_pt_update(shadow_pdir_ma,
  340                                     xpmap_ptom(val & ~(PG_RW)));
  341 #endif          
  342                 xen_queue_pt_update(pdir_ma,
  343                                     xpmap_ptom(val));   
  344                 break;
  345         case SH_PD_SET_VA_MA:
  346 #if 0           
  347                 xen_queue_pt_update(shadow_pdir_ma,
  348                                     val & ~(PG_RW));
  349 #endif          
  350                 xen_queue_pt_update(pdir_ma, val);      
  351                 break;
  352         case SH_PD_SET_VA_CLEAR:
  353 #if 0
  354                 xen_queue_pt_update(shadow_pdir_ma, 0);
  355 #endif          
  356                 xen_queue_pt_update(pdir_ma, 0);        
  357                 break;
  358         }
  359 }
  360 
  361 /*
  362  * Move the kernel virtual free pointer to the next
  363  * 4MB.  This is used to help improve performance
  364  * by using a large (4MB) page for much of the kernel
  365  * (.text, .data, .bss)
  366  */
  367 static vm_offset_t
  368 pmap_kmem_choose(vm_offset_t addr)
  369 {
  370         vm_offset_t newaddr = addr;
  371 
  372 #ifndef DISABLE_PSE
  373         if (cpu_feature & CPUID_PSE)
  374                 newaddr = (addr + PDRMASK) & ~PDRMASK;
  375 #endif
  376         return newaddr;
  377 }
  378 
  379 /*
  380  *      Bootstrap the system enough to run with virtual memory.
  381  *
  382  *      On the i386 this is called after mapping has already been enabled
  383  *      and just syncs the pmap module with what has already been done.
  384  *      [We can't call it easily with mapping off since the kernel is not
  385  *      mapped with PA == VA, hence we would have to relocate every address
  386  *      from the linked base (virtual) address "KERNBASE" to the actual
  387  *      (physical) address starting relative to 0]
  388  */
  389 void
  390 pmap_bootstrap(vm_paddr_t firstaddr)
  391 {
  392         vm_offset_t va;
  393         pt_entry_t *pte, *unused;
  394         struct sysmaps *sysmaps;
  395         int i;
  396 
  397         /*
  398          * XXX The calculation of virtual_avail is wrong. It's NKPT*PAGE_SIZE too
  399          * large. It should instead be correctly calculated in locore.s and
  400          * not based on 'first' (which is a physical address, not a virtual
  401          * address, for the start of unused physical memory). The kernel
  402          * page tables are NOT double mapped and thus should not be included
  403          * in this calculation.
  404          */
  405         virtual_avail = (vm_offset_t) KERNBASE + firstaddr;
  406         virtual_avail = pmap_kmem_choose(virtual_avail);
  407 
  408         virtual_end = VM_MAX_KERNEL_ADDRESS;
  409 
  410         /*
  411          * Initialize the kernel pmap (which is statically allocated).
  412          */
  413         PMAP_LOCK_INIT(kernel_pmap);
  414         kernel_pmap->pm_pdir = (pd_entry_t *) (KERNBASE + (u_int)IdlePTD);
  415 #ifdef PAE
  416         kernel_pmap->pm_pdpt = (pdpt_entry_t *) (KERNBASE + (u_int)IdlePDPT);
  417 #endif
  418         kernel_pmap->pm_active = -1;    /* don't allow deactivation */
  419         TAILQ_INIT(&kernel_pmap->pm_pvchunk);
  420         LIST_INIT(&allpmaps);
  421         mtx_init(&allpmaps_lock, "allpmaps", NULL, MTX_SPIN);
  422         mtx_lock_spin(&allpmaps_lock);
  423         LIST_INSERT_HEAD(&allpmaps, kernel_pmap, pm_list);
  424         mtx_unlock_spin(&allpmaps_lock);
  425         if (nkpt == 0)
  426                 nkpt = NKPT;
  427 
  428         /*
  429          * Reserve some special page table entries/VA space for temporary
  430          * mapping of pages.
  431          */
  432 #define SYSMAP(c, p, v, n)      \
  433         v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n);
  434 
  435         va = virtual_avail;
  436         pte = vtopte(va);
  437 
  438         /*
  439          * CMAP1/CMAP2 are used for zeroing and copying pages.
  440          * CMAP3 is used for the idle process page zeroing.
  441          */
  442         for (i = 0; i < MAXCPU; i++) {
  443                 sysmaps = &sysmaps_pcpu[i];
  444                 mtx_init(&sysmaps->lock, "SYSMAPS", NULL, MTX_DEF);
  445                 SYSMAP(caddr_t, sysmaps->CMAP1, sysmaps->CADDR1, 1)
  446                 SYSMAP(caddr_t, sysmaps->CMAP2, sysmaps->CADDR2, 1)
  447         }
  448         SYSMAP(caddr_t, CMAP1, CADDR1, 1)
  449         SYSMAP(caddr_t, CMAP3, CADDR3, 1)
  450         PT_SET_MA(CADDR3, 0);
  451 
  452         /*
  453          * Crashdump maps.
  454          */
  455         SYSMAP(caddr_t, unused, crashdumpmap, MAXDUMPPGS)
  456 
  457         /*
  458          * ptvmmap is used for reading arbitrary physical pages via /dev/mem.
  459          */
  460         SYSMAP(caddr_t, unused, ptvmmap, 1)
  461 
  462         /*
  463          * msgbufp is used to map the system message buffer.
  464          */
  465         SYSMAP(struct msgbuf *, unused, msgbufp, atop(round_page(msgbufsize)))
  466 
  467         /*
  468          * ptemap is used for pmap_pte_quick
  469          */
  470         SYSMAP(pt_entry_t *, PMAP1, PADDR1, 1);
  471         SYSMAP(pt_entry_t *, PMAP2, PADDR2, 1);
  472 
  473         mtx_init(&PMAP2mutex, "PMAP2", NULL, MTX_DEF);
  474 
  475         virtual_avail = va;
  476         PT_SET_MA(CADDR1, 0);
  477 
  478         /*
  479          * Leave in place an identity mapping (virt == phys) for the low 1 MB
  480          * physical memory region that is used by the ACPI wakeup code.  This
  481          * mapping must not have PG_G set. 
  482          */
  483 #ifndef XEN
  484         /*
  485          * leave here deliberately to show that this is not supported
  486          */
  487 #ifdef XBOX
  488         /* FIXME: This is gross, but needed for the XBOX. Since we are in such
  489          * an early stadium, we cannot yet neatly map video memory ... :-(
  490          * Better fixes are very welcome! */
  491         if (!arch_i386_is_xbox)
  492 #endif
  493         for (i = 1; i < NKPT; i++)
  494                 PTD[i] = 0;
  495 
  496         /* Initialize the PAT MSR if present. */
  497         pmap_init_pat();
  498 
  499         /* Turn on PG_G on kernel page(s) */
  500         pmap_set_pg();
  501 #endif
  502 
  503 #ifdef HAMFISTED_LOCKING
  504         mtx_init(&createdelete_lock, "pmap create/delete", NULL, MTX_DEF);
  505 #endif
  506 }
  507 
  508 /*
  509  * Setup the PAT MSR.
  510  */
  511 void
  512 pmap_init_pat(void)
  513 {
  514         uint64_t pat_msr;
  515 
  516         /* Bail if this CPU doesn't implement PAT. */
  517         if (!(cpu_feature & CPUID_PAT))
  518                 return;
  519 
  520         if (cpu_vendor_id != CPU_VENDOR_INTEL ||
  521             (CPUID_TO_FAMILY(cpu_id) == 6 && CPUID_TO_MODEL(cpu_id) >= 0xe)) {
  522                 /*
  523                  * Leave the indices 0-3 at the default of WB, WT, UC, and UC-.
  524                  * Program 4 and 5 as WP and WC.
  525                  * Leave 6 and 7 as UC and UC-.
  526                  */
  527                 pat_msr = rdmsr(MSR_PAT);
  528                 pat_msr &= ~(PAT_MASK(4) | PAT_MASK(5));
  529                 pat_msr |= PAT_VALUE(4, PAT_WRITE_PROTECTED) |
  530                     PAT_VALUE(5, PAT_WRITE_COMBINING);
  531                 pat_works = 1;
  532         } else {
  533                 /*
  534                  * Due to some Intel errata, we can only safely use the lower 4
  535                  * PAT entries.  Thus, just replace PAT Index 2 with WC instead
  536                  * of UC-.
  537                  *
  538                  *   Intel Pentium III Processor Specification Update
  539                  * Errata E.27 (Upper Four PAT Entries Not Usable With Mode B
  540                  * or Mode C Paging)
  541                  *
  542                  *   Intel Pentium IV  Processor Specification Update
  543                  * Errata N46 (PAT Index MSB May Be Calculated Incorrectly)
  544                  */
  545                 pat_msr = rdmsr(MSR_PAT);
  546                 pat_msr &= ~PAT_MASK(2);
  547                 pat_msr |= PAT_VALUE(2, PAT_WRITE_COMBINING);
  548                 pat_works = 0;
  549         }
  550         wrmsr(MSR_PAT, pat_msr);
  551 }
  552 
  553 /*
  554  * Set PG_G on kernel pages.  Only the BSP calls this when SMP is turned on.
  555  */
  556 void
  557 pmap_set_pg(void)
  558 {
  559         pd_entry_t pdir;
  560         pt_entry_t *pte;
  561         vm_offset_t va, endva;
  562         int i; 
  563 
  564         if (pgeflag == 0)
  565                 return;
  566 
  567         i = KERNLOAD/NBPDR;
  568         endva = KERNBASE + KERNend;
  569 
  570         if (pseflag) {
  571                 va = KERNBASE + KERNLOAD;
  572                 while (va  < endva) {
  573                         pdir = kernel_pmap->pm_pdir[KPTDI+i];
  574                         pdir |= pgeflag;
  575                         kernel_pmap->pm_pdir[KPTDI+i] = PTD[KPTDI+i] = pdir;
  576                         invltlb();      /* Play it safe, invltlb() every time */
  577                         i++;
  578                         va += NBPDR;
  579                 }
  580         } else {
  581                 va = (vm_offset_t)btext;
  582                 while (va < endva) {
  583                         pte = vtopte(va);
  584                         if (*pte & PG_V)
  585                                 *pte |= pgeflag;
  586                         invltlb();      /* Play it safe, invltlb() every time */
  587                         va += PAGE_SIZE;
  588                 }
  589         }
  590 }
  591 
  592 /*
  593  * Initialize a vm_page's machine-dependent fields.
  594  */
  595 void
  596 pmap_page_init(vm_page_t m)
  597 {
  598 
  599         TAILQ_INIT(&m->md.pv_list);
  600         m->md.pat_mode = PAT_WRITE_BACK;
  601 }
  602 
  603 #if defined(PAE) && !defined(XEN)
  604 static void *
  605 pmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
  606 {
  607 
  608         /* Inform UMA that this allocator uses kernel_map/object. */
  609         *flags = UMA_SLAB_KERNEL;
  610         return ((void *)kmem_alloc_contig(kernel_map, bytes, wait, 0x0ULL,
  611             0xffffffffULL, 1, 0, VM_MEMATTR_DEFAULT));
  612 }
  613 #endif
  614 
  615 /*
  616  * ABuse the pte nodes for unmapped kva to thread a kva freelist through.
  617  * Requirements:
  618  *  - Must deal with pages in order to ensure that none of the PG_* bits
  619  *    are ever set, PG_V in particular.
  620  *  - Assumes we can write to ptes without pte_store() atomic ops, even
  621  *    on PAE systems.  This should be ok.
  622  *  - Assumes nothing will ever test these addresses for 0 to indicate
  623  *    no mapping instead of correctly checking PG_V.
  624  *  - Assumes a vm_offset_t will fit in a pte (true for i386).
  625  * Because PG_V is never set, there can be no mappings to invalidate.
  626  */
  627 static int ptelist_count = 0;
  628 static vm_offset_t
  629 pmap_ptelist_alloc(vm_offset_t *head)
  630 {
  631         vm_offset_t va;
  632         vm_offset_t *phead = (vm_offset_t *)*head;
  633         
  634         if (ptelist_count == 0) {
  635                 printf("out of memory!!!!!!\n");
  636                 return (0);     /* Out of memory */
  637         }
  638         ptelist_count--;
  639         va = phead[ptelist_count];
  640         return (va);
  641 }
  642 
  643 static void
  644 pmap_ptelist_free(vm_offset_t *head, vm_offset_t va)
  645 {
  646         vm_offset_t *phead = (vm_offset_t *)*head;
  647 
  648         phead[ptelist_count++] = va;
  649 }
  650 
  651 static void
  652 pmap_ptelist_init(vm_offset_t *head, void *base, int npages)
  653 {
  654         int i, nstackpages;
  655         vm_offset_t va;
  656         vm_page_t m;
  657         
  658         nstackpages = (npages + PAGE_SIZE/sizeof(vm_offset_t) - 1)/ (PAGE_SIZE/sizeof(vm_offset_t));
  659         for (i = 0; i < nstackpages; i++) {
  660                 va = (vm_offset_t)base + i * PAGE_SIZE;
  661                 m = vm_page_alloc(NULL, i,
  662                     VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
  663                     VM_ALLOC_ZERO);
  664                 pmap_qenter(va, &m, 1);
  665         }
  666 
  667         *head = (vm_offset_t)base;
  668         for (i = npages - 1; i >= nstackpages; i--) {
  669                 va = (vm_offset_t)base + i * PAGE_SIZE;
  670                 pmap_ptelist_free(head, va);
  671         }
  672 }
  673 
  674 
  675 /*
  676  *      Initialize the pmap module.
  677  *      Called by vm_init, to initialize any structures that the pmap
  678  *      system needs to map virtual memory.
  679  */
  680 void
  681 pmap_init(void)
  682 {
  683 
  684         /*
  685          * Initialize the address space (zone) for the pv entries.  Set a
  686          * high water mark so that the system can recover from excessive
  687          * numbers of pv entries.
  688          */
  689         TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
  690         pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
  691         TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
  692         pv_entry_max = roundup(pv_entry_max, _NPCPV);
  693         pv_entry_high_water = 9 * (pv_entry_max / 10);
  694 
  695         pv_maxchunks = MAX(pv_entry_max / _NPCPV, maxproc);
  696         pv_chunkbase = (struct pv_chunk *)kmem_alloc_nofault(kernel_map,
  697             PAGE_SIZE * pv_maxchunks);
  698         if (pv_chunkbase == NULL)
  699                 panic("pmap_init: not enough kvm for pv chunks");
  700         pmap_ptelist_init(&pv_vafree, pv_chunkbase, pv_maxchunks);
  701 #if defined(PAE) && !defined(XEN)
  702         pdptzone = uma_zcreate("PDPT", NPGPTD * sizeof(pdpt_entry_t), NULL,
  703             NULL, NULL, NULL, (NPGPTD * sizeof(pdpt_entry_t)) - 1,
  704             UMA_ZONE_VM | UMA_ZONE_NOFREE);
  705         uma_zone_set_allocf(pdptzone, pmap_pdpt_allocf);
  706 #endif
  707 }
  708 
  709 
  710 /***************************************************
  711  * Low level helper routines.....
  712  ***************************************************/
  713 
  714 /*
  715  * Determine the appropriate bits to set in a PTE or PDE for a specified
  716  * caching mode.
  717  */
  718 int
  719 pmap_cache_bits(int mode, boolean_t is_pde)
  720 {
  721         int pat_flag, pat_index, cache_bits;
  722 
  723         /* The PAT bit is different for PTE's and PDE's. */
  724         pat_flag = is_pde ? PG_PDE_PAT : PG_PTE_PAT;
  725 
  726         /* If we don't support PAT, map extended modes to older ones. */
  727         if (!(cpu_feature & CPUID_PAT)) {
  728                 switch (mode) {
  729                 case PAT_UNCACHEABLE:
  730                 case PAT_WRITE_THROUGH:
  731                 case PAT_WRITE_BACK:
  732                         break;
  733                 case PAT_UNCACHED:
  734                 case PAT_WRITE_COMBINING:
  735                 case PAT_WRITE_PROTECTED:
  736                         mode = PAT_UNCACHEABLE;
  737                         break;
  738                 }
  739         }
  740         
  741         /* Map the caching mode to a PAT index. */
  742         if (pat_works) {
  743                 switch (mode) {
  744                         case PAT_UNCACHEABLE:
  745                                 pat_index = 3;
  746                                 break;
  747                         case PAT_WRITE_THROUGH:
  748                                 pat_index = 1;
  749                                 break;
  750                         case PAT_WRITE_BACK:
  751                                 pat_index = 0;
  752                                 break;
  753                         case PAT_UNCACHED:
  754                                 pat_index = 2;
  755                                 break;
  756                         case PAT_WRITE_COMBINING:
  757                                 pat_index = 5;
  758                                 break;
  759                         case PAT_WRITE_PROTECTED:
  760                                 pat_index = 4;
  761                                 break;
  762                         default:
  763                                 panic("Unknown caching mode %d\n", mode);
  764                 }
  765         } else {
  766                 switch (mode) {
  767                         case PAT_UNCACHED:
  768                         case PAT_UNCACHEABLE:
  769                         case PAT_WRITE_PROTECTED:
  770                                 pat_index = 3;
  771                                 break;
  772                         case PAT_WRITE_THROUGH:
  773                                 pat_index = 1;
  774                                 break;
  775                         case PAT_WRITE_BACK:
  776                                 pat_index = 0;
  777                                 break;
  778                         case PAT_WRITE_COMBINING:
  779                                 pat_index = 2;
  780                                 break;
  781                         default:
  782                                 panic("Unknown caching mode %d\n", mode);
  783                 }
  784         }       
  785 
  786         /* Map the 3-bit index value into the PAT, PCD, and PWT bits. */
  787         cache_bits = 0;
  788         if (pat_index & 0x4)
  789                 cache_bits |= pat_flag;
  790         if (pat_index & 0x2)
  791                 cache_bits |= PG_NC_PCD;
  792         if (pat_index & 0x1)
  793                 cache_bits |= PG_NC_PWT;
  794         return (cache_bits);
  795 }
  796 #ifdef SMP
  797 /*
  798  * For SMP, these functions have to use the IPI mechanism for coherence.
  799  *
  800  * N.B.: Before calling any of the following TLB invalidation functions,
  801  * the calling processor must ensure that all stores updating a non-
  802  * kernel page table are globally performed.  Otherwise, another
  803  * processor could cache an old, pre-update entry without being
  804  * invalidated.  This can happen one of two ways: (1) The pmap becomes
  805  * active on another processor after its pm_active field is checked by
  806  * one of the following functions but before a store updating the page
  807  * table is globally performed. (2) The pmap becomes active on another
  808  * processor before its pm_active field is checked but due to
  809  * speculative loads one of the following functions stills reads the
  810  * pmap as inactive on the other processor.
  811  * 
  812  * The kernel page table is exempt because its pm_active field is
  813  * immutable.  The kernel page table is always active on every
  814  * processor.
  815  */
  816 void
  817 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
  818 {
  819         cpumask_t cpumask, other_cpus;
  820 
  821         CTR2(KTR_PMAP, "pmap_invalidate_page: pmap=%p va=0x%x",
  822             pmap, va);
  823         
  824         sched_pin();
  825         if (pmap == kernel_pmap || pmap->pm_active == all_cpus) {
  826                 invlpg(va);
  827                 smp_invlpg(va);
  828         } else {
  829                 cpumask = PCPU_GET(cpumask);
  830                 other_cpus = PCPU_GET(other_cpus);
  831                 if (pmap->pm_active & cpumask)
  832                         invlpg(va);
  833                 if (pmap->pm_active & other_cpus)
  834                         smp_masked_invlpg(pmap->pm_active & other_cpus, va);
  835         }
  836         sched_unpin();
  837         PT_UPDATES_FLUSH();
  838 }
  839 
  840 void
  841 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
  842 {
  843         cpumask_t cpumask, other_cpus;
  844         vm_offset_t addr;
  845 
  846         CTR3(KTR_PMAP, "pmap_invalidate_page: pmap=%p eva=0x%x sva=0x%x",
  847             pmap, sva, eva);
  848 
  849         sched_pin();
  850         if (pmap == kernel_pmap || pmap->pm_active == all_cpus) {
  851                 for (addr = sva; addr < eva; addr += PAGE_SIZE)
  852                         invlpg(addr);
  853                 smp_invlpg_range(sva, eva);
  854         } else {
  855                 cpumask = PCPU_GET(cpumask);
  856                 other_cpus = PCPU_GET(other_cpus);
  857                 if (pmap->pm_active & cpumask)
  858                         for (addr = sva; addr < eva; addr += PAGE_SIZE)
  859                                 invlpg(addr);
  860                 if (pmap->pm_active & other_cpus)
  861                         smp_masked_invlpg_range(pmap->pm_active & other_cpus,
  862                             sva, eva);
  863         }
  864         sched_unpin();
  865         PT_UPDATES_FLUSH();
  866 }
  867 
  868 void
  869 pmap_invalidate_all(pmap_t pmap)
  870 {
  871         cpumask_t cpumask, other_cpus;
  872 
  873         CTR1(KTR_PMAP, "pmap_invalidate_page: pmap=%p", pmap);
  874 
  875         sched_pin();
  876         if (pmap == kernel_pmap || pmap->pm_active == all_cpus) {
  877                 invltlb();
  878                 smp_invltlb();
  879         } else {
  880                 cpumask = PCPU_GET(cpumask);
  881                 other_cpus = PCPU_GET(other_cpus);
  882                 if (pmap->pm_active & cpumask)
  883                         invltlb();
  884                 if (pmap->pm_active & other_cpus)
  885                         smp_masked_invltlb(pmap->pm_active & other_cpus);
  886         }
  887         sched_unpin();
  888 }
  889 
  890 void
  891 pmap_invalidate_cache(void)
  892 {
  893 
  894         sched_pin();
  895         wbinvd();
  896         smp_cache_flush();
  897         sched_unpin();
  898 }
  899 #else /* !SMP */
  900 /*
  901  * Normal, non-SMP, 486+ invalidation functions.
  902  * We inline these within pmap.c for speed.
  903  */
  904 PMAP_INLINE void
  905 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
  906 {
  907         CTR2(KTR_PMAP, "pmap_invalidate_page: pmap=%p va=0x%x",
  908             pmap, va);
  909 
  910         if (pmap == kernel_pmap || pmap->pm_active)
  911                 invlpg(va);
  912         PT_UPDATES_FLUSH();
  913 }
  914 
  915 PMAP_INLINE void
  916 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
  917 {
  918         vm_offset_t addr;
  919 
  920         if (eva - sva > PAGE_SIZE)
  921                 CTR3(KTR_PMAP, "pmap_invalidate_range: pmap=%p sva=0x%x eva=0x%x",
  922                     pmap, sva, eva);
  923 
  924         if (pmap == kernel_pmap || pmap->pm_active)
  925                 for (addr = sva; addr < eva; addr += PAGE_SIZE)
  926                         invlpg(addr);
  927         PT_UPDATES_FLUSH();
  928 }
  929 
  930 PMAP_INLINE void
  931 pmap_invalidate_all(pmap_t pmap)
  932 {
  933 
  934         CTR1(KTR_PMAP, "pmap_invalidate_all: pmap=%p", pmap);
  935         
  936         if (pmap == kernel_pmap || pmap->pm_active)
  937                 invltlb();
  938 }
  939 
  940 PMAP_INLINE void
  941 pmap_invalidate_cache(void)
  942 {
  943 
  944         wbinvd();
  945 }
  946 #endif /* !SMP */
  947 
  948 void
  949 pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva)
  950 {
  951 
  952         KASSERT((sva & PAGE_MASK) == 0,
  953             ("pmap_invalidate_cache_range: sva not page-aligned"));
  954         KASSERT((eva & PAGE_MASK) == 0,
  955             ("pmap_invalidate_cache_range: eva not page-aligned"));
  956 
  957         if (cpu_feature & CPUID_SS)
  958                 ; /* If "Self Snoop" is supported, do nothing. */
  959         else if (cpu_feature & CPUID_CLFSH) {
  960 
  961                 /*
  962                  * Otherwise, do per-cache line flush.  Use the mfence
  963                  * instruction to insure that previous stores are
  964                  * included in the write-back.  The processor
  965                  * propagates flush to other processors in the cache
  966                  * coherence domain.
  967                  */
  968                 mfence();
  969                 for (; sva < eva; sva += cpu_clflush_line_size)
  970                         clflush(sva);
  971                 mfence();
  972         } else {
  973 
  974                 /*
  975                  * No targeted cache flush methods are supported by CPU,
  976                  * globally invalidate cache as a last resort.
  977                  */
  978                 pmap_invalidate_cache();
  979         }
  980 }
  981 
  982 /*
  983  * Are we current address space or kernel?  N.B. We return FALSE when
  984  * a pmap's page table is in use because a kernel thread is borrowing
  985  * it.  The borrowed page table can change spontaneously, making any
  986  * dependence on its continued use subject to a race condition.
  987  */
  988 static __inline int
  989 pmap_is_current(pmap_t pmap)
  990 {
  991 
  992         return (pmap == kernel_pmap ||
  993             (pmap == vmspace_pmap(curthread->td_proc->p_vmspace) &&
  994                 (pmap->pm_pdir[PTDPTDI] & PG_FRAME) == (PTDpde[0] & PG_FRAME)));
  995 }
  996 
  997 /*
  998  * If the given pmap is not the current or kernel pmap, the returned pte must
  999  * be released by passing it to pmap_pte_release().
 1000  */
 1001 pt_entry_t *
 1002 pmap_pte(pmap_t pmap, vm_offset_t va)
 1003 {
 1004         pd_entry_t newpf;
 1005         pd_entry_t *pde;
 1006 
 1007         pde = pmap_pde(pmap, va);
 1008         if (*pde & PG_PS)
 1009                 return (pde);
 1010         if (*pde != 0) {
 1011                 /* are we current address space or kernel? */
 1012                 if (pmap_is_current(pmap))
 1013                         return (vtopte(va));
 1014                 mtx_lock(&PMAP2mutex);
 1015                 newpf = *pde & PG_FRAME;
 1016                 if ((*PMAP2 & PG_FRAME) != newpf) {
 1017                         PT_SET_MA(PADDR2, newpf | PG_V | PG_A | PG_M);
 1018                         CTR3(KTR_PMAP, "pmap_pte: pmap=%p va=0x%x newpte=0x%08x",
 1019                             pmap, va, (*PMAP2 & 0xffffffff));
 1020                 }
 1021                 
 1022                 return (PADDR2 + (i386_btop(va) & (NPTEPG - 1)));
 1023         }
 1024         return (0);
 1025 }
 1026 
 1027 /*
 1028  * Releases a pte that was obtained from pmap_pte().  Be prepared for the pte
 1029  * being NULL.
 1030  */
 1031 static __inline void
 1032 pmap_pte_release(pt_entry_t *pte)
 1033 {
 1034 
 1035         if ((pt_entry_t *)((vm_offset_t)pte & ~PAGE_MASK) == PADDR2) {
 1036                 CTR1(KTR_PMAP, "pmap_pte_release: pte=0x%jx",
 1037                     *PMAP2);
 1038                 vm_page_lock_queues();
 1039                 PT_SET_VA(PMAP2, 0, TRUE);
 1040                 vm_page_unlock_queues();
 1041                 mtx_unlock(&PMAP2mutex);
 1042         }
 1043 }
 1044 
 1045 static __inline void
 1046 invlcaddr(void *caddr)
 1047 {
 1048 
 1049         invlpg((u_int)caddr);
 1050         PT_UPDATES_FLUSH();
 1051 }
 1052 
 1053 /*
 1054  * Super fast pmap_pte routine best used when scanning
 1055  * the pv lists.  This eliminates many coarse-grained
 1056  * invltlb calls.  Note that many of the pv list
 1057  * scans are across different pmaps.  It is very wasteful
 1058  * to do an entire invltlb for checking a single mapping.
 1059  *
 1060  * If the given pmap is not the current pmap, vm_page_queue_mtx
 1061  * must be held and curthread pinned to a CPU.
 1062  */
 1063 static pt_entry_t *
 1064 pmap_pte_quick(pmap_t pmap, vm_offset_t va)
 1065 {
 1066         pd_entry_t newpf;
 1067         pd_entry_t *pde;
 1068 
 1069         pde = pmap_pde(pmap, va);
 1070         if (*pde & PG_PS)
 1071                 return (pde);
 1072         if (*pde != 0) {
 1073                 /* are we current address space or kernel? */
 1074                 if (pmap_is_current(pmap))
 1075                         return (vtopte(va));
 1076                 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1077                 KASSERT(curthread->td_pinned > 0, ("curthread not pinned"));
 1078                 newpf = *pde & PG_FRAME;
 1079                 if ((*PMAP1 & PG_FRAME) != newpf) {
 1080                         PT_SET_MA(PADDR1, newpf | PG_V | PG_A | PG_M);
 1081                         CTR3(KTR_PMAP, "pmap_pte_quick: pmap=%p va=0x%x newpte=0x%08x",
 1082                             pmap, va, (u_long)*PMAP1);
 1083                         
 1084 #ifdef SMP
 1085                         PMAP1cpu = PCPU_GET(cpuid);
 1086 #endif
 1087                         PMAP1changed++;
 1088                 } else
 1089 #ifdef SMP
 1090                 if (PMAP1cpu != PCPU_GET(cpuid)) {
 1091                         PMAP1cpu = PCPU_GET(cpuid);
 1092                         invlcaddr(PADDR1);
 1093                         PMAP1changedcpu++;
 1094                 } else
 1095 #endif
 1096                         PMAP1unchanged++;
 1097                 return (PADDR1 + (i386_btop(va) & (NPTEPG - 1)));
 1098         }
 1099         return (0);
 1100 }
 1101 
 1102 /*
 1103  *      Routine:        pmap_extract
 1104  *      Function:
 1105  *              Extract the physical page address associated
 1106  *              with the given map/virtual_address pair.
 1107  */
 1108 vm_paddr_t 
 1109 pmap_extract(pmap_t pmap, vm_offset_t va)
 1110 {
 1111         vm_paddr_t rtval;
 1112         pt_entry_t *pte;
 1113         pd_entry_t pde;
 1114         pt_entry_t pteval;
 1115         
 1116         rtval = 0;
 1117         PMAP_LOCK(pmap);
 1118         pde = pmap->pm_pdir[va >> PDRSHIFT];
 1119         if (pde != 0) {
 1120                 if ((pde & PG_PS) != 0) {
 1121                         rtval = xpmap_mtop(pde & PG_PS_FRAME) | (va & PDRMASK);
 1122                         PMAP_UNLOCK(pmap);
 1123                         return rtval;
 1124                 }
 1125                 pte = pmap_pte(pmap, va);
 1126                 pteval = *pte ? xpmap_mtop(*pte) : 0;
 1127                 rtval = (pteval & PG_FRAME) | (va & PAGE_MASK);
 1128                 pmap_pte_release(pte);
 1129         }
 1130         PMAP_UNLOCK(pmap);
 1131         return (rtval);
 1132 }
 1133 
 1134 /*
 1135  *      Routine:        pmap_extract_ma
 1136  *      Function:
 1137  *              Like pmap_extract, but returns machine address
 1138  */
 1139 vm_paddr_t 
 1140 pmap_extract_ma(pmap_t pmap, vm_offset_t va)
 1141 {
 1142         vm_paddr_t rtval;
 1143         pt_entry_t *pte;
 1144         pd_entry_t pde;
 1145 
 1146         rtval = 0;
 1147         PMAP_LOCK(pmap);
 1148         pde = pmap->pm_pdir[va >> PDRSHIFT];
 1149         if (pde != 0) {
 1150                 if ((pde & PG_PS) != 0) {
 1151                         rtval = (pde & ~PDRMASK) | (va & PDRMASK);
 1152                         PMAP_UNLOCK(pmap);
 1153                         return rtval;
 1154                 }
 1155                 pte = pmap_pte(pmap, va);
 1156                 rtval = (*pte & PG_FRAME) | (va & PAGE_MASK);
 1157                 pmap_pte_release(pte);
 1158         }
 1159         PMAP_UNLOCK(pmap);
 1160         return (rtval);
 1161 }
 1162 
 1163 /*
 1164  *      Routine:        pmap_extract_and_hold
 1165  *      Function:
 1166  *              Atomically extract and hold the physical page
 1167  *              with the given pmap and virtual address pair
 1168  *              if that mapping permits the given protection.
 1169  */
 1170 vm_page_t
 1171 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
 1172 {
 1173         pd_entry_t pde;
 1174         pt_entry_t pte;
 1175         vm_page_t m;
 1176 
 1177         m = NULL;
 1178         vm_page_lock_queues();
 1179         PMAP_LOCK(pmap);
 1180         pde = PT_GET(pmap_pde(pmap, va));
 1181         if (pde != 0) {
 1182                 if (pde & PG_PS) {
 1183                         if ((pde & PG_RW) || (prot & VM_PROT_WRITE) == 0) {
 1184                                 m = PHYS_TO_VM_PAGE((pde & PG_PS_FRAME) |
 1185                                     (va & PDRMASK));
 1186                                 vm_page_hold(m);
 1187                         }
 1188                 } else {
 1189                         sched_pin();
 1190                         pte = PT_GET(pmap_pte_quick(pmap, va));
 1191                         if (*PMAP1)
 1192                                 PT_SET_MA(PADDR1, 0);
 1193                         if ((pte & PG_V) &&
 1194                             ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0)) {
 1195                                 m = PHYS_TO_VM_PAGE(pte & PG_FRAME);
 1196                                 vm_page_hold(m);
 1197                         }
 1198                         sched_unpin();
 1199                 }
 1200         }
 1201         vm_page_unlock_queues();
 1202         PMAP_UNLOCK(pmap);
 1203         return (m);
 1204 }
 1205 
 1206 /***************************************************
 1207  * Low level mapping routines.....
 1208  ***************************************************/
 1209 
 1210 /*
 1211  * Add a wired page to the kva.
 1212  * Note: not SMP coherent.
 1213  */
 1214 void 
 1215 pmap_kenter(vm_offset_t va, vm_paddr_t pa)
 1216 {
 1217         PT_SET_MA(va, xpmap_ptom(pa)| PG_RW | PG_V | pgeflag);
 1218 }
 1219 
 1220 void 
 1221 pmap_kenter_ma(vm_offset_t va, vm_paddr_t ma)
 1222 {
 1223         pt_entry_t *pte;
 1224 
 1225         pte = vtopte(va);
 1226         pte_store_ma(pte, ma | PG_RW | PG_V | pgeflag);
 1227 }
 1228 
 1229 
 1230 static __inline void 
 1231 pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode)
 1232 {
 1233         PT_SET_MA(va, pa | PG_RW | PG_V | pgeflag | pmap_cache_bits(mode, 0));
 1234 }
 1235 
 1236 /*
 1237  * Remove a page from the kernel pagetables.
 1238  * Note: not SMP coherent.
 1239  */
 1240 PMAP_INLINE void
 1241 pmap_kremove(vm_offset_t va)
 1242 {
 1243         pt_entry_t *pte;
 1244 
 1245         pte = vtopte(va);
 1246         PT_CLEAR_VA(pte, FALSE);
 1247 }
 1248 
 1249 /*
 1250  *      Used to map a range of physical addresses into kernel
 1251  *      virtual address space.
 1252  *
 1253  *      The value passed in '*virt' is a suggested virtual address for
 1254  *      the mapping. Architectures which can support a direct-mapped
 1255  *      physical to virtual region can return the appropriate address
 1256  *      within that region, leaving '*virt' unchanged. Other
 1257  *      architectures should map the pages starting at '*virt' and
 1258  *      update '*virt' with the first usable address after the mapped
 1259  *      region.
 1260  */
 1261 vm_offset_t
 1262 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
 1263 {
 1264         vm_offset_t va, sva;
 1265 
 1266         va = sva = *virt;
 1267         CTR4(KTR_PMAP, "pmap_map: va=0x%x start=0x%jx end=0x%jx prot=0x%x",
 1268             va, start, end, prot);
 1269         while (start < end) {
 1270                 pmap_kenter(va, start);
 1271                 va += PAGE_SIZE;
 1272                 start += PAGE_SIZE;
 1273         }
 1274         pmap_invalidate_range(kernel_pmap, sva, va);
 1275         *virt = va;
 1276         return (sva);
 1277 }
 1278 
 1279 
 1280 /*
 1281  * Add a list of wired pages to the kva
 1282  * this routine is only used for temporary
 1283  * kernel mappings that do not need to have
 1284  * page modification or references recorded.
 1285  * Note that old mappings are simply written
 1286  * over.  The page *must* be wired.
 1287  * Note: SMP coherent.  Uses a ranged shootdown IPI.
 1288  */
 1289 void
 1290 pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
 1291 {
 1292         pt_entry_t *endpte, *pte;
 1293         vm_paddr_t pa;
 1294         vm_offset_t va = sva;
 1295         int mclcount = 0;
 1296         multicall_entry_t mcl[16];
 1297         multicall_entry_t *mclp = mcl;
 1298         int error;
 1299 
 1300         CTR2(KTR_PMAP, "pmap_qenter:sva=0x%x count=%d", va, count);
 1301         pte = vtopte(sva);
 1302         endpte = pte + count;
 1303         while (pte < endpte) {
 1304                 pa = xpmap_ptom(VM_PAGE_TO_PHYS(*ma)) | pgeflag | PG_RW | PG_V | PG_M | PG_A;
 1305 
 1306                 mclp->op = __HYPERVISOR_update_va_mapping;
 1307                 mclp->args[0] = va;
 1308                 mclp->args[1] = (uint32_t)(pa & 0xffffffff);
 1309                 mclp->args[2] = (uint32_t)(pa >> 32);
 1310                 mclp->args[3] = (*pte & PG_V) ? UVMF_INVLPG|UVMF_ALL : 0;
 1311         
 1312                 va += PAGE_SIZE;
 1313                 pte++;
 1314                 ma++;
 1315                 mclp++;
 1316                 mclcount++;
 1317                 if (mclcount == 16) {
 1318                         error = HYPERVISOR_multicall(mcl, mclcount);
 1319                         mclp = mcl;
 1320                         mclcount = 0;
 1321                         KASSERT(error == 0, ("bad multicall %d", error));
 1322                 }               
 1323         }
 1324         if (mclcount) {
 1325                 error = HYPERVISOR_multicall(mcl, mclcount);
 1326                 KASSERT(error == 0, ("bad multicall %d", error));
 1327         }
 1328         
 1329 #ifdef INVARIANTS
 1330         for (pte = vtopte(sva), mclcount = 0; mclcount < count; mclcount++, pte++)
 1331                 KASSERT(*pte, ("pte not set for va=0x%x", sva + mclcount*PAGE_SIZE));
 1332 #endif  
 1333 }
 1334 
 1335 
 1336 /*
 1337  * This routine tears out page mappings from the
 1338  * kernel -- it is meant only for temporary mappings.
 1339  * Note: SMP coherent.  Uses a ranged shootdown IPI.
 1340  */
 1341 void
 1342 pmap_qremove(vm_offset_t sva, int count)
 1343 {
 1344         vm_offset_t va;
 1345 
 1346         CTR2(KTR_PMAP, "pmap_qremove: sva=0x%x count=%d", sva, count);
 1347         va = sva;
 1348         vm_page_lock_queues();
 1349         critical_enter();
 1350         while (count-- > 0) {
 1351                 pmap_kremove(va);
 1352                 va += PAGE_SIZE;
 1353         }
 1354         PT_UPDATES_FLUSH();
 1355         pmap_invalidate_range(kernel_pmap, sva, va);
 1356         critical_exit();
 1357         vm_page_unlock_queues();
 1358 }
 1359 
 1360 /***************************************************
 1361  * Page table page management routines.....
 1362  ***************************************************/
 1363 static __inline void
 1364 pmap_free_zero_pages(vm_page_t free)
 1365 {
 1366         vm_page_t m;
 1367 
 1368         while (free != NULL) {
 1369                 m = free;
 1370                 free = m->right;
 1371                 vm_page_free_zero(m);
 1372         }
 1373 }
 1374 
 1375 /*
 1376  * This routine unholds page table pages, and if the hold count
 1377  * drops to zero, then it decrements the wire count.
 1378  */
 1379 static __inline int
 1380 pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free)
 1381 {
 1382 
 1383         --m->wire_count;
 1384         if (m->wire_count == 0)
 1385                 return _pmap_unwire_pte_hold(pmap, m, free);
 1386         else
 1387                 return 0;
 1388 }
 1389 
 1390 static int 
 1391 _pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free)
 1392 {
 1393         vm_offset_t pteva;
 1394 
 1395         PT_UPDATES_FLUSH();
 1396         /*
 1397          * unmap the page table page
 1398          */
 1399         xen_pt_unpin(pmap->pm_pdir[m->pindex]);
 1400         /*
 1401          * page *might* contain residual mapping :-/  
 1402          */
 1403         PD_CLEAR_VA(pmap, m->pindex, TRUE);
 1404         pmap_zero_page(m);
 1405         --pmap->pm_stats.resident_count;
 1406 
 1407         /*
 1408          * This is a release store so that the ordinary store unmapping
 1409          * the page table page is globally performed before TLB shoot-
 1410          * down is begun.
 1411          */
 1412         atomic_subtract_rel_int(&cnt.v_wire_count, 1);
 1413 
 1414         /*
 1415          * Do an invltlb to make the invalidated mapping
 1416          * take effect immediately.
 1417          */
 1418         pteva = VM_MAXUSER_ADDRESS + i386_ptob(m->pindex);
 1419         pmap_invalidate_page(pmap, pteva);
 1420 
 1421         /* 
 1422          * Put page on a list so that it is released after
 1423          * *ALL* TLB shootdown is done
 1424          */
 1425         m->right = *free;
 1426         *free = m;
 1427 
 1428         return 1;
 1429 }
 1430 
 1431 /*
 1432  * After removing a page table entry, this routine is used to
 1433  * conditionally free the page, and manage the hold/wire counts.
 1434  */
 1435 static int
 1436 pmap_unuse_pt(pmap_t pmap, vm_offset_t va, vm_page_t *free)
 1437 {
 1438         pd_entry_t ptepde;
 1439         vm_page_t mpte;
 1440 
 1441         if (va >= VM_MAXUSER_ADDRESS)
 1442                 return 0;
 1443         ptepde = PT_GET(pmap_pde(pmap, va));
 1444         mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME);
 1445         return pmap_unwire_pte_hold(pmap, mpte, free);
 1446 }
 1447 
 1448 void
 1449 pmap_pinit0(pmap_t pmap)
 1450 {
 1451 
 1452         PMAP_LOCK_INIT(pmap);
 1453         pmap->pm_pdir = (pd_entry_t *)(KERNBASE + (vm_offset_t)IdlePTD);
 1454 #ifdef PAE
 1455         pmap->pm_pdpt = (pdpt_entry_t *)(KERNBASE + (vm_offset_t)IdlePDPT);
 1456 #endif
 1457         pmap->pm_active = 0;
 1458         PCPU_SET(curpmap, pmap);
 1459         TAILQ_INIT(&pmap->pm_pvchunk);
 1460         bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
 1461         mtx_lock_spin(&allpmaps_lock);
 1462         LIST_INSERT_HEAD(&allpmaps, pmap, pm_list);
 1463         mtx_unlock_spin(&allpmaps_lock);
 1464 }
 1465 
 1466 /*
 1467  * Initialize a preallocated and zeroed pmap structure,
 1468  * such as one in a vmspace structure.
 1469  */
 1470 int
 1471 pmap_pinit(pmap_t pmap)
 1472 {
 1473         vm_page_t m, ptdpg[NPGPTD + 1];
 1474         int npgptd = NPGPTD + 1;
 1475         static int color;
 1476         int i;
 1477 
 1478 #ifdef HAMFISTED_LOCKING
 1479         mtx_lock(&createdelete_lock);
 1480 #endif
 1481 
 1482         PMAP_LOCK_INIT(pmap);
 1483 
 1484         /*
 1485          * No need to allocate page table space yet but we do need a valid
 1486          * page directory table.
 1487          */
 1488         if (pmap->pm_pdir == NULL) {
 1489                 pmap->pm_pdir = (pd_entry_t *)kmem_alloc_nofault(kernel_map,
 1490                     NBPTD);
 1491                 if (pmap->pm_pdir == NULL) {
 1492                         PMAP_LOCK_DESTROY(pmap);
 1493 #ifdef HAMFISTED_LOCKING
 1494                         mtx_unlock(&createdelete_lock);
 1495 #endif
 1496                         return (0);
 1497                 }
 1498 #if defined(XEN) && defined(PAE)        
 1499                 pmap->pm_pdpt = (pd_entry_t *)kmem_alloc_nofault(kernel_map, 1);
 1500 #endif
 1501                 
 1502 #if defined(PAE) && !defined(XEN)
 1503                 pmap->pm_pdpt = uma_zalloc(pdptzone, M_WAITOK | M_ZERO);
 1504                 KASSERT(((vm_offset_t)pmap->pm_pdpt &
 1505                     ((NPGPTD * sizeof(pdpt_entry_t)) - 1)) == 0,
 1506                     ("pmap_pinit: pdpt misaligned"));
 1507                 KASSERT(pmap_kextract((vm_offset_t)pmap->pm_pdpt) < (4ULL<<30),
 1508                     ("pmap_pinit: pdpt above 4g"));
 1509 #endif
 1510         }
 1511 
 1512         /*
 1513          * allocate the page directory page(s)
 1514          */
 1515         for (i = 0; i < npgptd;) {
 1516                 m = vm_page_alloc(NULL, color++,
 1517                     VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
 1518                     VM_ALLOC_ZERO);
 1519                 if (m == NULL)
 1520                         VM_WAIT;
 1521                 else {
 1522                         ptdpg[i++] = m;
 1523                 }
 1524         }
 1525         pmap_qenter((vm_offset_t)pmap->pm_pdir, ptdpg, NPGPTD);
 1526         for (i = 0; i < NPGPTD; i++) {
 1527                 if ((ptdpg[i]->flags & PG_ZERO) == 0)
 1528                         pagezero(&pmap->pm_pdir[i*NPTEPG]);
 1529         }
 1530 
 1531         mtx_lock_spin(&allpmaps_lock);
 1532         LIST_INSERT_HEAD(&allpmaps, pmap, pm_list);
 1533         mtx_unlock_spin(&allpmaps_lock);
 1534         /* Wire in kernel global address entries. */
 1535 
 1536         bcopy(PTD + KPTDI, pmap->pm_pdir + KPTDI, nkpt * sizeof(pd_entry_t));
 1537 #ifdef PAE
 1538 #ifdef XEN
 1539         pmap_qenter((vm_offset_t)pmap->pm_pdpt, &ptdpg[NPGPTD], 1);
 1540         if ((ptdpg[NPGPTD]->flags & PG_ZERO) == 0)
 1541                 bzero(pmap->pm_pdpt, PAGE_SIZE);
 1542 #endif  
 1543         for (i = 0; i < NPGPTD; i++) {
 1544                 vm_paddr_t ma;
 1545                 
 1546                 ma = xpmap_ptom(VM_PAGE_TO_PHYS(ptdpg[i]));
 1547                 pmap->pm_pdpt[i] = ma | PG_V;
 1548 
 1549         }
 1550 #endif  
 1551 #ifdef XEN
 1552         for (i = 0; i < NPGPTD; i++) {
 1553                 pt_entry_t *pd;
 1554                 vm_paddr_t ma;
 1555                 
 1556                 ma = xpmap_ptom(VM_PAGE_TO_PHYS(ptdpg[i]));
 1557                 pd = pmap->pm_pdir + (i * NPDEPG);
 1558                 PT_SET_MA(pd, *vtopte((vm_offset_t)pd) & ~(PG_M|PG_A|PG_U|PG_RW));
 1559 #if 0           
 1560                 xen_pgd_pin(ma);
 1561 #endif          
 1562         }
 1563         
 1564 #ifdef PAE      
 1565         PT_SET_MA(pmap->pm_pdpt, *vtopte((vm_offset_t)pmap->pm_pdpt) & ~PG_RW);
 1566 #endif
 1567         vm_page_lock_queues();
 1568         xen_flush_queue();
 1569         xen_pgdpt_pin(xpmap_ptom(VM_PAGE_TO_PHYS(ptdpg[NPGPTD])));
 1570         for (i = 0; i < NPGPTD; i++) {
 1571                 vm_paddr_t ma = xpmap_ptom(VM_PAGE_TO_PHYS(ptdpg[i]));
 1572                 PT_SET_VA_MA(&pmap->pm_pdir[PTDPTDI + i], ma | PG_V | PG_A, FALSE);
 1573         }
 1574         xen_flush_queue();
 1575         vm_page_unlock_queues();
 1576 #endif
 1577         pmap->pm_active = 0;
 1578         TAILQ_INIT(&pmap->pm_pvchunk);
 1579         bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
 1580 
 1581 #ifdef HAMFISTED_LOCKING
 1582         mtx_unlock(&createdelete_lock);
 1583 #endif
 1584         return (1);
 1585 }
 1586 
 1587 /*
 1588  * this routine is called if the page table page is not
 1589  * mapped correctly.
 1590  */
 1591 static vm_page_t
 1592 _pmap_allocpte(pmap_t pmap, unsigned int ptepindex, int flags)
 1593 {
 1594         vm_paddr_t ptema;
 1595         vm_page_t m;
 1596 
 1597         KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
 1598             (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
 1599             ("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
 1600 
 1601         /*
 1602          * Allocate a page table page.
 1603          */
 1604         if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ |
 1605             VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
 1606                 if (flags & M_WAITOK) {
 1607                         PMAP_UNLOCK(pmap);
 1608                         vm_page_unlock_queues();
 1609                         VM_WAIT;
 1610                         vm_page_lock_queues();
 1611                         PMAP_LOCK(pmap);
 1612                 }
 1613 
 1614                 /*
 1615                  * Indicate the need to retry.  While waiting, the page table
 1616                  * page may have been allocated.
 1617                  */
 1618                 return (NULL);
 1619         }
 1620         if ((m->flags & PG_ZERO) == 0)
 1621                 pmap_zero_page(m);
 1622 
 1623         /*
 1624          * Map the pagetable page into the process address space, if
 1625          * it isn't already there.
 1626          */
 1627         pmap->pm_stats.resident_count++;
 1628 
 1629         ptema = xpmap_ptom(VM_PAGE_TO_PHYS(m));
 1630         xen_pt_pin(ptema);
 1631         PT_SET_VA_MA(&pmap->pm_pdir[ptepindex],
 1632                 (ptema | PG_U | PG_RW | PG_V | PG_A | PG_M), TRUE);
 1633         
 1634         KASSERT(pmap->pm_pdir[ptepindex],
 1635             ("_pmap_allocpte: ptepindex=%d did not get mapped", ptepindex));
 1636         return (m);
 1637 }
 1638 
 1639 static vm_page_t
 1640 pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags)
 1641 {
 1642         unsigned ptepindex;
 1643         pd_entry_t ptema;
 1644         vm_page_t m;
 1645 
 1646         KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
 1647             (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
 1648             ("pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
 1649 
 1650         /*
 1651          * Calculate pagetable page index
 1652          */
 1653         ptepindex = va >> PDRSHIFT;
 1654 retry:
 1655         /*
 1656          * Get the page directory entry
 1657          */
 1658         ptema = pmap->pm_pdir[ptepindex];
 1659 
 1660         /*
 1661          * This supports switching from a 4MB page to a
 1662          * normal 4K page.
 1663          */
 1664         if (ptema & PG_PS) {
 1665                 /*
 1666                  * XXX 
 1667                  */
 1668                 pmap->pm_pdir[ptepindex] = 0;
 1669                 ptema = 0;
 1670                 pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
 1671                 pmap_invalidate_all(kernel_pmap);
 1672         }
 1673 
 1674         /*
 1675          * If the page table page is mapped, we just increment the
 1676          * hold count, and activate it.
 1677          */
 1678         if (ptema & PG_V) {
 1679                 m = PHYS_TO_VM_PAGE(xpmap_mtop(ptema) & PG_FRAME);
 1680                 m->wire_count++;
 1681         } else {
 1682                 /*
 1683                  * Here if the pte page isn't mapped, or if it has
 1684                  * been deallocated. 
 1685                  */
 1686                 CTR3(KTR_PMAP, "pmap_allocpte: pmap=%p va=0x%08x flags=0x%x",
 1687                     pmap, va, flags);
 1688                 m = _pmap_allocpte(pmap, ptepindex, flags);
 1689                 if (m == NULL && (flags & M_WAITOK))
 1690                         goto retry;
 1691 
 1692                 KASSERT(pmap->pm_pdir[ptepindex], ("ptepindex=%d did not get mapped", ptepindex));
 1693         }
 1694         return (m);
 1695 }
 1696 
 1697 
 1698 /***************************************************
 1699 * Pmap allocation/deallocation routines.
 1700  ***************************************************/
 1701 
 1702 #ifdef SMP
 1703 /*
 1704  * Deal with a SMP shootdown of other users of the pmap that we are
 1705  * trying to dispose of.  This can be a bit hairy.
 1706  */
 1707 static cpumask_t *lazymask;
 1708 static u_int lazyptd;
 1709 static volatile u_int lazywait;
 1710 
 1711 void pmap_lazyfix_action(void);
 1712 
 1713 void
 1714 pmap_lazyfix_action(void)
 1715 {
 1716         cpumask_t mymask = PCPU_GET(cpumask);
 1717 
 1718 #ifdef COUNT_IPIS
 1719         (*ipi_lazypmap_counts[PCPU_GET(cpuid)])++;
 1720 #endif
 1721         if (rcr3() == lazyptd)
 1722                 load_cr3(PCPU_GET(curpcb)->pcb_cr3);
 1723         atomic_clear_int(lazymask, mymask);
 1724         atomic_store_rel_int(&lazywait, 1);
 1725 }
 1726 
 1727 static void
 1728 pmap_lazyfix_self(cpumask_t mymask)
 1729 {
 1730 
 1731         if (rcr3() == lazyptd)
 1732                 load_cr3(PCPU_GET(curpcb)->pcb_cr3);
 1733         atomic_clear_int(lazymask, mymask);
 1734 }
 1735 
 1736 
 1737 static void
 1738 pmap_lazyfix(pmap_t pmap)
 1739 {
 1740         cpumask_t mymask, mask;
 1741         u_int spins;
 1742 
 1743         while ((mask = pmap->pm_active) != 0) {
 1744                 spins = 50000000;
 1745                 mask = mask & -mask;    /* Find least significant set bit */
 1746                 mtx_lock_spin(&smp_ipi_mtx);
 1747 #ifdef PAE
 1748                 lazyptd = vtophys(pmap->pm_pdpt);
 1749 #else
 1750                 lazyptd = vtophys(pmap->pm_pdir);
 1751 #endif
 1752                 mymask = PCPU_GET(cpumask);
 1753                 if (mask == mymask) {
 1754                         lazymask = &pmap->pm_active;
 1755                         pmap_lazyfix_self(mymask);
 1756                 } else {
 1757                         atomic_store_rel_int((u_int *)&lazymask,
 1758                             (u_int)&pmap->pm_active);
 1759                         atomic_store_rel_int(&lazywait, 0);
 1760                         ipi_selected(mask, IPI_LAZYPMAP);
 1761                         while (lazywait == 0) {
 1762                                 ia32_pause();
 1763                                 if (--spins == 0)
 1764                                         break;
 1765                         }
 1766                 }
 1767                 mtx_unlock_spin(&smp_ipi_mtx);
 1768                 if (spins == 0)
 1769                         printf("pmap_lazyfix: spun for 50000000\n");
 1770         }
 1771 }
 1772 
 1773 #else   /* SMP */
 1774 
 1775 /*
 1776  * Cleaning up on uniprocessor is easy.  For various reasons, we're
 1777  * unlikely to have to even execute this code, including the fact
 1778  * that the cleanup is deferred until the parent does a wait(2), which
 1779  * means that another userland process has run.
 1780  */
 1781 static void
 1782 pmap_lazyfix(pmap_t pmap)
 1783 {
 1784         u_int cr3;
 1785 
 1786         cr3 = vtophys(pmap->pm_pdir);
 1787         if (cr3 == rcr3()) {
 1788                 load_cr3(PCPU_GET(curpcb)->pcb_cr3);
 1789                 pmap->pm_active &= ~(PCPU_GET(cpumask));
 1790         }
 1791 }
 1792 #endif  /* SMP */
 1793 
 1794 /*
 1795  * Release any resources held by the given physical map.
 1796  * Called when a pmap initialized by pmap_pinit is being released.
 1797  * Should only be called if the map contains no valid mappings.
 1798  */
 1799 void
 1800 pmap_release(pmap_t pmap)
 1801 {
 1802         vm_page_t m, ptdpg[2*NPGPTD+1];
 1803         vm_paddr_t ma;
 1804         int i;
 1805 #ifdef XEN
 1806 #ifdef PAE      
 1807         int npgptd = NPGPTD + 1;
 1808 #else
 1809         int npgptd = NPGPTD;
 1810 #endif
 1811 #else 
 1812         int npgptd = NPGPTD;
 1813 #endif  
 1814         KASSERT(pmap->pm_stats.resident_count == 0,
 1815             ("pmap_release: pmap resident count %ld != 0",
 1816             pmap->pm_stats.resident_count));
 1817         PT_UPDATES_FLUSH();
 1818 
 1819 #ifdef HAMFISTED_LOCKING
 1820         mtx_lock(&createdelete_lock);
 1821 #endif
 1822 
 1823         pmap_lazyfix(pmap);
 1824         mtx_lock_spin(&allpmaps_lock);
 1825         LIST_REMOVE(pmap, pm_list);
 1826         mtx_unlock_spin(&allpmaps_lock);
 1827 
 1828         for (i = 0; i < NPGPTD; i++)
 1829                 ptdpg[i] = PHYS_TO_VM_PAGE(vtophys(pmap->pm_pdir + (i*NPDEPG)) & PG_FRAME);
 1830         pmap_qremove((vm_offset_t)pmap->pm_pdir, NPGPTD);
 1831 #if defined(PAE) && defined(XEN)
 1832         ptdpg[NPGPTD] = PHYS_TO_VM_PAGE(vtophys(pmap->pm_pdpt));
 1833 #endif  
 1834 
 1835         for (i = 0; i < npgptd; i++) {
 1836                 m = ptdpg[i];
 1837                 ma = xpmap_ptom(VM_PAGE_TO_PHYS(m));
 1838                 /* unpinning L1 and L2 treated the same */
 1839 #if 0
 1840                 xen_pgd_unpin(ma);
 1841 #else
 1842                 if (i == NPGPTD)
 1843                         xen_pgd_unpin(ma);
 1844 #endif
 1845 #ifdef PAE
 1846                 if (i < NPGPTD)
 1847                         KASSERT(xpmap_ptom(VM_PAGE_TO_PHYS(m)) == (pmap->pm_pdpt[i] & PG_FRAME),
 1848                             ("pmap_release: got wrong ptd page"));
 1849 #endif
 1850                 m->wire_count--;
 1851                 atomic_subtract_int(&cnt.v_wire_count, 1);
 1852                 vm_page_free(m);
 1853         }
 1854 #ifdef PAE
 1855         pmap_qremove((vm_offset_t)pmap->pm_pdpt, 1);
 1856 #endif
 1857         PMAP_LOCK_DESTROY(pmap);
 1858 
 1859 #ifdef HAMFISTED_LOCKING
 1860         mtx_unlock(&createdelete_lock);
 1861 #endif
 1862 }
 1863 
 1864 static int
 1865 kvm_size(SYSCTL_HANDLER_ARGS)
 1866 {
 1867         unsigned long ksize = VM_MAX_KERNEL_ADDRESS - KERNBASE;
 1868 
 1869         return sysctl_handle_long(oidp, &ksize, 0, req);
 1870 }
 1871 SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG|CTLFLAG_RD, 
 1872     0, 0, kvm_size, "IU", "Size of KVM");
 1873 
 1874 static int
 1875 kvm_free(SYSCTL_HANDLER_ARGS)
 1876 {
 1877         unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end;
 1878 
 1879         return sysctl_handle_long(oidp, &kfree, 0, req);
 1880 }
 1881 SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD, 
 1882     0, 0, kvm_free, "IU", "Amount of KVM free");
 1883 
 1884 /*
 1885  * grow the number of kernel page table entries, if needed
 1886  */
 1887 void
 1888 pmap_growkernel(vm_offset_t addr)
 1889 {
 1890         struct pmap *pmap;
 1891         vm_paddr_t ptppaddr;
 1892         vm_page_t nkpg;
 1893         pd_entry_t newpdir;
 1894 
 1895         mtx_assert(&kernel_map->system_mtx, MA_OWNED);
 1896         if (kernel_vm_end == 0) {
 1897                 kernel_vm_end = KERNBASE;
 1898                 nkpt = 0;
 1899                 while (pdir_pde(PTD, kernel_vm_end)) {
 1900                         kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
 1901                         nkpt++;
 1902                         if (kernel_vm_end - 1 >= kernel_map->max_offset) {
 1903                                 kernel_vm_end = kernel_map->max_offset;
 1904                                 break;
 1905                         }
 1906                 }
 1907         }
 1908         addr = roundup2(addr, PAGE_SIZE * NPTEPG);
 1909         if (addr - 1 >= kernel_map->max_offset)
 1910                 addr = kernel_map->max_offset;
 1911         while (kernel_vm_end < addr) {
 1912                 if (pdir_pde(PTD, kernel_vm_end)) {
 1913                         kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
 1914                         if (kernel_vm_end - 1 >= kernel_map->max_offset) {
 1915                                 kernel_vm_end = kernel_map->max_offset;
 1916                                 break;
 1917                         }
 1918                         continue;
 1919                 }
 1920 
 1921                 /*
 1922                  * This index is bogus, but out of the way
 1923                  */
 1924                 nkpg = vm_page_alloc(NULL, nkpt,
 1925                     VM_ALLOC_NOOBJ | VM_ALLOC_SYSTEM | VM_ALLOC_WIRED);
 1926                 if (!nkpg)
 1927                         panic("pmap_growkernel: no memory to grow kernel");
 1928 
 1929                 nkpt++;
 1930 
 1931                 pmap_zero_page(nkpg);
 1932                 ptppaddr = VM_PAGE_TO_PHYS(nkpg);
 1933                 newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M);
 1934                 vm_page_lock_queues();
 1935                 PD_SET_VA(kernel_pmap, (kernel_vm_end >> PDRSHIFT), newpdir, TRUE);
 1936                 mtx_lock_spin(&allpmaps_lock);
 1937                 LIST_FOREACH(pmap, &allpmaps, pm_list)
 1938                         PD_SET_VA(pmap, (kernel_vm_end >> PDRSHIFT), newpdir, TRUE);
 1939 
 1940                 mtx_unlock_spin(&allpmaps_lock);
 1941                 vm_page_unlock_queues();
 1942 
 1943                 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
 1944                 if (kernel_vm_end - 1 >= kernel_map->max_offset) {
 1945                         kernel_vm_end = kernel_map->max_offset;
 1946                         break;
 1947                 }
 1948         }
 1949 }
 1950 
 1951 
 1952 /***************************************************
 1953  * page management routines.
 1954  ***************************************************/
 1955 
 1956 CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE);
 1957 CTASSERT(_NPCM == 11);
 1958 
 1959 static __inline struct pv_chunk *
 1960 pv_to_chunk(pv_entry_t pv)
 1961 {
 1962 
 1963         return (struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK);
 1964 }
 1965 
 1966 #define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap)
 1967 
 1968 #define PC_FREE0_9      0xfffffffful    /* Free values for index 0 through 9 */
 1969 #define PC_FREE10       0x0000fffful    /* Free values for index 10 */
 1970 
 1971 static uint32_t pc_freemask[11] = {
 1972         PC_FREE0_9, PC_FREE0_9, PC_FREE0_9,
 1973         PC_FREE0_9, PC_FREE0_9, PC_FREE0_9,
 1974         PC_FREE0_9, PC_FREE0_9, PC_FREE0_9,
 1975         PC_FREE0_9, PC_FREE10
 1976 };
 1977 
 1978 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0,
 1979         "Current number of pv entries");
 1980 
 1981 #ifdef PV_STATS
 1982 static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail;
 1983 
 1984 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0,
 1985         "Current number of pv entry chunks");
 1986 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0,
 1987         "Current number of pv entry chunks allocated");
 1988 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0,
 1989         "Current number of pv entry chunks frees");
 1990 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0,
 1991         "Number of times tried to get a chunk page but failed.");
 1992 
 1993 static long pv_entry_frees, pv_entry_allocs;
 1994 static int pv_entry_spare;
 1995 
 1996 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0,
 1997         "Current number of pv entry frees");
 1998 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0,
 1999         "Current number of pv entry allocs");
 2000 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0,
 2001         "Current number of spare pv entries");
 2002 
 2003 static int pmap_collect_inactive, pmap_collect_active;
 2004 
 2005 SYSCTL_INT(_vm_pmap, OID_AUTO, pmap_collect_inactive, CTLFLAG_RD, &pmap_collect_inactive, 0,
 2006         "Current number times pmap_collect called on inactive queue");
 2007 SYSCTL_INT(_vm_pmap, OID_AUTO, pmap_collect_active, CTLFLAG_RD, &pmap_collect_active, 0,
 2008         "Current number times pmap_collect called on active queue");
 2009 #endif
 2010 
 2011 /*
 2012  * We are in a serious low memory condition.  Resort to
 2013  * drastic measures to free some pages so we can allocate
 2014  * another pv entry chunk.  This is normally called to
 2015  * unmap inactive pages, and if necessary, active pages.
 2016  */
 2017 static void
 2018 pmap_collect(pmap_t locked_pmap, struct vpgqueues *vpq)
 2019 {
 2020         pmap_t pmap;
 2021         pt_entry_t *pte, tpte;
 2022         pv_entry_t next_pv, pv;
 2023         vm_offset_t va;
 2024         vm_page_t m, free;
 2025 
 2026         sched_pin();
 2027         TAILQ_FOREACH(m, &vpq->pl, pageq) {
 2028                 if ((m->flags & PG_MARKER) != 0 || m->hold_count || m->busy)
 2029                         continue;
 2030                 TAILQ_FOREACH_SAFE(pv, &m->md.pv_list, pv_list, next_pv) {
 2031                         va = pv->pv_va;
 2032                         pmap = PV_PMAP(pv);
 2033                         /* Avoid deadlock and lock recursion. */
 2034                         if (pmap > locked_pmap)
 2035                                 PMAP_LOCK(pmap);
 2036                         else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap))
 2037                                 continue;
 2038                         pmap->pm_stats.resident_count--;
 2039                         pte = pmap_pte_quick(pmap, va);
 2040                         tpte = pte_load_clear(pte);
 2041                         KASSERT((tpte & PG_W) == 0,
 2042                             ("pmap_collect: wired pte %#jx", (uintmax_t)tpte));
 2043                         if (tpte & PG_A)
 2044                                 vm_page_flag_set(m, PG_REFERENCED);
 2045                         if (tpte & PG_M) {
 2046                                 KASSERT((tpte & PG_RW),
 2047         ("pmap_collect: modified page not writable: va: %#x, pte: %#jx",
 2048                                     va, (uintmax_t)tpte));
 2049                                 vm_page_dirty(m);
 2050                         }
 2051                         free = NULL;
 2052                         pmap_unuse_pt(pmap, va, &free);
 2053                         pmap_invalidate_page(pmap, va);
 2054                         pmap_free_zero_pages(free);
 2055                         TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
 2056                         if (TAILQ_EMPTY(&m->md.pv_list))
 2057                                 vm_page_flag_clear(m, PG_WRITEABLE);
 2058                         free_pv_entry(pmap, pv);
 2059                         if (pmap != locked_pmap)
 2060                                 PMAP_UNLOCK(pmap);
 2061                 }
 2062         }
 2063         sched_unpin();
 2064 }
 2065 
 2066 
 2067 /*
 2068  * free the pv_entry back to the free list
 2069  */
 2070 static void
 2071 free_pv_entry(pmap_t pmap, pv_entry_t pv)
 2072 {
 2073         vm_page_t m;
 2074         struct pv_chunk *pc;
 2075         int idx, field, bit;
 2076 
 2077         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2078         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2079         PV_STAT(pv_entry_frees++);
 2080         PV_STAT(pv_entry_spare++);
 2081         pv_entry_count--;
 2082         pc = pv_to_chunk(pv);
 2083         idx = pv - &pc->pc_pventry[0];
 2084         field = idx / 32;
 2085         bit = idx % 32;
 2086         pc->pc_map[field] |= 1ul << bit;
 2087         /* move to head of list */
 2088         TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
 2089         TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
 2090         for (idx = 0; idx < _NPCM; idx++)
 2091                 if (pc->pc_map[idx] != pc_freemask[idx])
 2092                         return;
 2093         PV_STAT(pv_entry_spare -= _NPCPV);
 2094         PV_STAT(pc_chunk_count--);
 2095         PV_STAT(pc_chunk_frees++);
 2096         /* entire chunk is free, return it */
 2097         TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
 2098         m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc));
 2099         pmap_qremove((vm_offset_t)pc, 1);
 2100         vm_page_unwire(m, 0);
 2101         vm_page_free(m);
 2102         pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc);
 2103 }
 2104 
 2105 /*
 2106  * get a new pv_entry, allocating a block from the system
 2107  * when needed.
 2108  */
 2109 static pv_entry_t
 2110 get_pv_entry(pmap_t pmap, int try)
 2111 {
 2112         static const struct timeval printinterval = { 60, 0 };
 2113         static struct timeval lastprint;
 2114         static vm_pindex_t colour;
 2115         struct vpgqueues *pq;
 2116         int bit, field;
 2117         pv_entry_t pv;
 2118         struct pv_chunk *pc;
 2119         vm_page_t m;
 2120 
 2121         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2122         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2123         PV_STAT(pv_entry_allocs++);
 2124         pv_entry_count++;
 2125         if (pv_entry_count > pv_entry_high_water)
 2126                 if (ratecheck(&lastprint, &printinterval))
 2127                         printf("Approaching the limit on PV entries, consider "
 2128                             "increasing either the vm.pmap.shpgperproc or the "
 2129                             "vm.pmap.pv_entry_max tunable.\n");
 2130         pq = NULL;
 2131 retry:
 2132         pc = TAILQ_FIRST(&pmap->pm_pvchunk);
 2133         if (pc != NULL) {
 2134                 for (field = 0; field < _NPCM; field++) {
 2135                         if (pc->pc_map[field]) {
 2136                                 bit = bsfl(pc->pc_map[field]);
 2137                                 break;
 2138                         }
 2139                 }
 2140                 if (field < _NPCM) {
 2141                         pv = &pc->pc_pventry[field * 32 + bit];
 2142                         pc->pc_map[field] &= ~(1ul << bit);
 2143                         /* If this was the last item, move it to tail */
 2144                         for (field = 0; field < _NPCM; field++)
 2145                                 if (pc->pc_map[field] != 0) {
 2146                                         PV_STAT(pv_entry_spare--);
 2147                                         return (pv);    /* not full, return */
 2148                                 }
 2149                         TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
 2150                         TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
 2151                         PV_STAT(pv_entry_spare--);
 2152                         return (pv);
 2153                 }
 2154         }
 2155         /*
 2156          * Access to the ptelist "pv_vafree" is synchronized by the page
 2157          * queues lock.  If "pv_vafree" is currently non-empty, it will
 2158          * remain non-empty until pmap_ptelist_alloc() completes.
 2159          */
 2160         if (pv_vafree == 0 || (m = vm_page_alloc(NULL, colour, (pq ==
 2161             &vm_page_queues[PQ_ACTIVE] ? VM_ALLOC_SYSTEM : VM_ALLOC_NORMAL) |
 2162             VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
 2163                 if (try) {
 2164                         pv_entry_count--;
 2165                         PV_STAT(pc_chunk_tryfail++);
 2166                         return (NULL);
 2167                 }
 2168                 /*
 2169                  * Reclaim pv entries: At first, destroy mappings to
 2170                  * inactive pages.  After that, if a pv chunk entry
 2171                  * is still needed, destroy mappings to active pages.
 2172                  */
 2173                 if (pq == NULL) {
 2174                         PV_STAT(pmap_collect_inactive++);
 2175                         pq = &vm_page_queues[PQ_INACTIVE];
 2176                 } else if (pq == &vm_page_queues[PQ_INACTIVE]) {
 2177                         PV_STAT(pmap_collect_active++);
 2178                         pq = &vm_page_queues[PQ_ACTIVE];
 2179                 } else
 2180                         panic("get_pv_entry: increase vm.pmap.shpgperproc");
 2181                 pmap_collect(pmap, pq);
 2182                 goto retry;
 2183         }
 2184         PV_STAT(pc_chunk_count++);
 2185         PV_STAT(pc_chunk_allocs++);
 2186         colour++;
 2187         pc = (struct pv_chunk *)pmap_ptelist_alloc(&pv_vafree);
 2188         pmap_qenter((vm_offset_t)pc, &m, 1);
 2189         if ((m->flags & PG_ZERO) == 0)
 2190                 pagezero(pc);
 2191         pc->pc_pmap = pmap;
 2192         pc->pc_map[0] = pc_freemask[0] & ~1ul;  /* preallocated bit 0 */
 2193         for (field = 1; field < _NPCM; field++)
 2194                 pc->pc_map[field] = pc_freemask[field];
 2195         pv = &pc->pc_pventry[0];
 2196         TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
 2197         PV_STAT(pv_entry_spare += _NPCPV - 1);
 2198         return (pv);
 2199 }
 2200 
 2201 static void
 2202 pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
 2203 {
 2204         pv_entry_t pv;
 2205 
 2206         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2207         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2208         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
 2209                 if (pmap == PV_PMAP(pv) && va == pv->pv_va)
 2210                         break;
 2211         }
 2212         KASSERT(pv != NULL, ("pmap_remove_entry: pv not found"));
 2213         TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
 2214         if (TAILQ_EMPTY(&m->md.pv_list))
 2215                 vm_page_flag_clear(m, PG_WRITEABLE);
 2216         free_pv_entry(pmap, pv);
 2217 }
 2218 
 2219 /*
 2220  * Create a pv entry for page at pa for
 2221  * (pmap, va).
 2222  */
 2223 static void
 2224 pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
 2225 {
 2226         pv_entry_t pv;
 2227 
 2228         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2229         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2230         pv = get_pv_entry(pmap, FALSE);
 2231         pv->pv_va = va;
 2232         TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
 2233 }
 2234 
 2235 /*
 2236  * Conditionally create a pv entry.
 2237  */
 2238 static boolean_t
 2239 pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
 2240 {
 2241         pv_entry_t pv;
 2242 
 2243         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2244         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2245         if (pv_entry_count < pv_entry_high_water && 
 2246             (pv = get_pv_entry(pmap, TRUE)) != NULL) {
 2247                 pv->pv_va = va;
 2248                 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
 2249                 return (TRUE);
 2250         } else
 2251                 return (FALSE);
 2252 }
 2253 
 2254 /*
 2255  * pmap_remove_pte: do the things to unmap a page in a process
 2256  */
 2257 static int
 2258 pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va, vm_page_t *free)
 2259 {
 2260         pt_entry_t oldpte;
 2261         vm_page_t m;
 2262 
 2263         CTR3(KTR_PMAP, "pmap_remove_pte: pmap=%p *ptq=0x%x va=0x%x",
 2264             pmap, (u_long)*ptq, va);
 2265         
 2266         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2267         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2268         oldpte = *ptq;
 2269         PT_SET_VA_MA(ptq, 0, TRUE);
 2270         if (oldpte & PG_W)
 2271                 pmap->pm_stats.wired_count -= 1;
 2272         /*
 2273          * Machines that don't support invlpg, also don't support
 2274          * PG_G.
 2275          */
 2276         if (oldpte & PG_G)
 2277                 pmap_invalidate_page(kernel_pmap, va);
 2278         pmap->pm_stats.resident_count -= 1;
 2279         if (oldpte & PG_MANAGED) {
 2280                 m = PHYS_TO_VM_PAGE(xpmap_mtop(oldpte) & PG_FRAME);
 2281                 if (oldpte & PG_M) {
 2282                         KASSERT((oldpte & PG_RW),
 2283         ("pmap_remove_pte: modified page not writable: va: %#x, pte: %#jx",
 2284                             va, (uintmax_t)oldpte));
 2285                         vm_page_dirty(m);
 2286                 }
 2287                 if (oldpte & PG_A)
 2288                         vm_page_flag_set(m, PG_REFERENCED);
 2289                 pmap_remove_entry(pmap, m, va);
 2290         }
 2291         return (pmap_unuse_pt(pmap, va, free));
 2292 }
 2293 
 2294 /*
 2295  * Remove a single page from a process address space
 2296  */
 2297 static void
 2298 pmap_remove_page(pmap_t pmap, vm_offset_t va, vm_page_t *free)
 2299 {
 2300         pt_entry_t *pte;
 2301 
 2302         CTR2(KTR_PMAP, "pmap_remove_page: pmap=%p va=0x%x",
 2303             pmap, va);
 2304         
 2305         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2306         KASSERT(curthread->td_pinned > 0, ("curthread not pinned"));
 2307         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2308         if ((pte = pmap_pte_quick(pmap, va)) == NULL || (*pte & PG_V) == 0)
 2309                 return;
 2310         pmap_remove_pte(pmap, pte, va, free);
 2311         pmap_invalidate_page(pmap, va);
 2312         if (*PMAP1)
 2313                 PT_SET_MA(PADDR1, 0);
 2314 
 2315 }
 2316 
 2317 /*
 2318  *      Remove the given range of addresses from the specified map.
 2319  *
 2320  *      It is assumed that the start and end are properly
 2321  *      rounded to the page size.
 2322  */
 2323 void
 2324 pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
 2325 {
 2326         vm_offset_t pdnxt;
 2327         pd_entry_t ptpaddr;
 2328         pt_entry_t *pte;
 2329         vm_page_t free = NULL;
 2330         int anyvalid;
 2331         
 2332         CTR3(KTR_PMAP, "pmap_remove: pmap=%p sva=0x%x eva=0x%x",
 2333             pmap, sva, eva);
 2334         
 2335         /*
 2336          * Perform an unsynchronized read.  This is, however, safe.
 2337          */
 2338         if (pmap->pm_stats.resident_count == 0)
 2339                 return;
 2340 
 2341         anyvalid = 0;
 2342 
 2343         vm_page_lock_queues();
 2344         sched_pin();
 2345         PMAP_LOCK(pmap);
 2346 
 2347         /*
 2348          * special handling of removing one page.  a very
 2349          * common operation and easy to short circuit some
 2350          * code.
 2351          */
 2352         if ((sva + PAGE_SIZE == eva) && 
 2353             ((pmap->pm_pdir[(sva >> PDRSHIFT)] & PG_PS) == 0)) {
 2354                 pmap_remove_page(pmap, sva, &free);
 2355                 goto out;
 2356         }
 2357 
 2358         for (; sva < eva; sva = pdnxt) {
 2359                 unsigned pdirindex;
 2360 
 2361                 /*
 2362                  * Calculate index for next page table.
 2363                  */
 2364                 pdnxt = (sva + NBPDR) & ~PDRMASK;
 2365                 if (pmap->pm_stats.resident_count == 0)
 2366                         break;
 2367 
 2368                 pdirindex = sva >> PDRSHIFT;
 2369                 ptpaddr = pmap->pm_pdir[pdirindex];
 2370 
 2371                 /*
 2372                  * Weed out invalid mappings. Note: we assume that the page
 2373                  * directory table is always allocated, and in kernel virtual.
 2374                  */
 2375                 if (ptpaddr == 0)
 2376                         continue;
 2377 
 2378                 /*
 2379                  * Check for large page.
 2380                  */
 2381                 if ((ptpaddr & PG_PS) != 0) {
 2382                         PD_CLEAR_VA(pmap, pdirindex, TRUE);
 2383                         pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
 2384                         anyvalid = 1;
 2385                         continue;
 2386                 }
 2387 
 2388                 /*
 2389                  * Limit our scan to either the end of the va represented
 2390                  * by the current page table page, or to the end of the
 2391                  * range being removed.
 2392                  */
 2393                 if (pdnxt > eva)
 2394                         pdnxt = eva;
 2395 
 2396                 for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++,
 2397                     sva += PAGE_SIZE) {
 2398                         if ((*pte & PG_V) == 0)
 2399                                 continue;
 2400 
 2401                         /*
 2402                          * The TLB entry for a PG_G mapping is invalidated
 2403                          * by pmap_remove_pte().
 2404                          */
 2405                         if ((*pte & PG_G) == 0)
 2406                                 anyvalid = 1;
 2407                         if (pmap_remove_pte(pmap, pte, sva, &free))
 2408                                 break;
 2409                 }
 2410         }
 2411         PT_UPDATES_FLUSH();
 2412         if (*PMAP1)
 2413                 PT_SET_VA_MA(PMAP1, 0, TRUE);
 2414 out:
 2415         if (anyvalid)
 2416                 pmap_invalidate_all(pmap);
 2417         sched_unpin();
 2418         vm_page_unlock_queues();
 2419         PMAP_UNLOCK(pmap);
 2420         pmap_free_zero_pages(free);
 2421 }
 2422 
 2423 /*
 2424  *      Routine:        pmap_remove_all
 2425  *      Function:
 2426  *              Removes this physical page from
 2427  *              all physical maps in which it resides.
 2428  *              Reflects back modify bits to the pager.
 2429  *
 2430  *      Notes:
 2431  *              Original versions of this routine were very
 2432  *              inefficient because they iteratively called
 2433  *              pmap_remove (slow...)
 2434  */
 2435 
 2436 void
 2437 pmap_remove_all(vm_page_t m)
 2438 {
 2439         pv_entry_t pv;
 2440         pmap_t pmap;
 2441         pt_entry_t *pte, tpte;
 2442         vm_page_t free;
 2443 
 2444 #if defined(PMAP_DIAGNOSTIC)
 2445         /*
 2446          * XXX This makes pmap_remove_all() illegal for non-managed pages!
 2447          */
 2448         if (m->flags & PG_FICTITIOUS) {
 2449                 panic("pmap_remove_all: illegal for unmanaged page, va: 0x%jx",
 2450                     VM_PAGE_TO_PHYS(m) & 0xffffffff);
 2451         }
 2452 #endif
 2453         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2454         sched_pin();
 2455         while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
 2456                 pmap = PV_PMAP(pv);
 2457                 PMAP_LOCK(pmap);
 2458                 pmap->pm_stats.resident_count--;
 2459                 pte = pmap_pte_quick(pmap, pv->pv_va);
 2460 
 2461                 tpte = *pte;
 2462                 PT_SET_VA_MA(pte, 0, TRUE);
 2463                 if (tpte & PG_W)
 2464                         pmap->pm_stats.wired_count--;
 2465                 if (tpte & PG_A)
 2466                         vm_page_flag_set(m, PG_REFERENCED);
 2467 
 2468                 /*
 2469                  * Update the vm_page_t clean and reference bits.
 2470                  */
 2471                 if (tpte & PG_M) {
 2472                         KASSERT((tpte & PG_RW),
 2473         ("pmap_remove_all: modified page not writable: va: %#x, pte: %#jx",
 2474                             pv->pv_va, (uintmax_t)tpte));
 2475                         vm_page_dirty(m);
 2476                 }
 2477                 free = NULL;
 2478                 pmap_unuse_pt(pmap, pv->pv_va, &free);
 2479                 pmap_invalidate_page(pmap, pv->pv_va);
 2480                 pmap_free_zero_pages(free);
 2481                 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
 2482                 free_pv_entry(pmap, pv);
 2483                 PMAP_UNLOCK(pmap);
 2484         }
 2485         vm_page_flag_clear(m, PG_WRITEABLE);
 2486         PT_UPDATES_FLUSH();
 2487         if (*PMAP1)
 2488                 PT_SET_MA(PADDR1, 0);
 2489         sched_unpin();
 2490 }
 2491 
 2492 /*
 2493  *      Set the physical protection on the
 2494  *      specified range of this map as requested.
 2495  */
 2496 void
 2497 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
 2498 {
 2499         vm_offset_t pdnxt;
 2500         pd_entry_t ptpaddr;
 2501         pt_entry_t *pte;
 2502         int anychanged;
 2503 
 2504         CTR4(KTR_PMAP, "pmap_protect: pmap=%p sva=0x%x eva=0x%x prot=0x%x",
 2505             pmap, sva, eva, prot);
 2506         
 2507         if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
 2508                 pmap_remove(pmap, sva, eva);
 2509                 return;
 2510         }
 2511 
 2512 #ifdef PAE
 2513         if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) ==
 2514             (VM_PROT_WRITE|VM_PROT_EXECUTE))
 2515                 return;
 2516 #else
 2517         if (prot & VM_PROT_WRITE)
 2518                 return;
 2519 #endif
 2520 
 2521         anychanged = 0;
 2522 
 2523         vm_page_lock_queues();
 2524         sched_pin();
 2525         PMAP_LOCK(pmap);
 2526         for (; sva < eva; sva = pdnxt) {
 2527                 pt_entry_t obits, pbits;
 2528                 unsigned pdirindex;
 2529 
 2530                 pdnxt = (sva + NBPDR) & ~PDRMASK;
 2531 
 2532                 pdirindex = sva >> PDRSHIFT;
 2533                 ptpaddr = pmap->pm_pdir[pdirindex];
 2534 
 2535                 /*
 2536                  * Weed out invalid mappings. Note: we assume that the page
 2537                  * directory table is always allocated, and in kernel virtual.
 2538                  */
 2539                 if (ptpaddr == 0)
 2540                         continue;
 2541 
 2542                 /*
 2543                  * Check for large page.
 2544                  */
 2545                 if ((ptpaddr & PG_PS) != 0) {
 2546                         if ((prot & VM_PROT_WRITE) == 0)
 2547                                 pmap->pm_pdir[pdirindex] &= ~(PG_M|PG_RW);
 2548 #ifdef PAE
 2549                         if ((prot & VM_PROT_EXECUTE) == 0)
 2550                                 pmap->pm_pdir[pdirindex] |= pg_nx;
 2551 #endif
 2552                         anychanged = 1;
 2553                         continue;
 2554                 }
 2555 
 2556                 if (pdnxt > eva)
 2557                         pdnxt = eva;
 2558 
 2559                 for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++,
 2560                     sva += PAGE_SIZE) {
 2561                         vm_page_t m;
 2562 
 2563 retry:
 2564                         /*
 2565                          * Regardless of whether a pte is 32 or 64 bits in
 2566                          * size, PG_RW, PG_A, and PG_M are among the least
 2567                          * significant 32 bits.
 2568                          */
 2569                         obits = pbits = *pte;
 2570                         if ((pbits & PG_V) == 0)
 2571                                 continue;
 2572                         if (pbits & PG_MANAGED) {
 2573                                 m = NULL;
 2574                                 if (pbits & PG_A) {
 2575                                         m = PHYS_TO_VM_PAGE(xpmap_mtop(pbits) & PG_FRAME);
 2576                                         vm_page_flag_set(m, PG_REFERENCED);
 2577                                         pbits &= ~PG_A;
 2578                                 }
 2579                                 if ((pbits & PG_M) != 0) {
 2580                                         if (m == NULL)
 2581                                                 m = PHYS_TO_VM_PAGE(xpmap_mtop(pbits) & PG_FRAME);
 2582                                         vm_page_dirty(m);
 2583                                 }
 2584                         }
 2585 
 2586                         if ((prot & VM_PROT_WRITE) == 0)
 2587                                 pbits &= ~(PG_RW | PG_M);
 2588 #ifdef PAE
 2589                         if ((prot & VM_PROT_EXECUTE) == 0)
 2590                                 pbits |= pg_nx;
 2591 #endif
 2592 
 2593                         if (pbits != obits) {
 2594 #ifdef XEN
 2595                                 obits = *pte;
 2596                                 PT_SET_VA_MA(pte, pbits, TRUE);
 2597                                 if (*pte != pbits)
 2598                                         goto retry;
 2599 #else                           
 2600 #ifdef PAE
 2601                                 if (!atomic_cmpset_64(pte, obits, pbits))
 2602                                         goto retry;
 2603 #else
 2604                                 if (!atomic_cmpset_int((u_int *)pte, obits,
 2605                                     pbits))
 2606                                         goto retry;
 2607 #endif
 2608 #endif
 2609                                 if (obits & PG_G)
 2610                                         pmap_invalidate_page(pmap, sva);
 2611                                 else
 2612                                         anychanged = 1;
 2613                         }
 2614                 }
 2615         }
 2616         PT_UPDATES_FLUSH();
 2617         if (*PMAP1)
 2618                 PT_SET_VA_MA(PMAP1, 0, TRUE);
 2619         if (anychanged)
 2620                 pmap_invalidate_all(pmap);
 2621         sched_unpin();
 2622         vm_page_unlock_queues();
 2623         PMAP_UNLOCK(pmap);
 2624 }
 2625 
 2626 /*
 2627  *      Insert the given physical page (p) at
 2628  *      the specified virtual address (v) in the
 2629  *      target physical map with the protection requested.
 2630  *
 2631  *      If specified, the page will be wired down, meaning
 2632  *      that the related pte can not be reclaimed.
 2633  *
 2634  *      NB:  This is the only routine which MAY NOT lazy-evaluate
 2635  *      or lose information.  That is, this routine must actually
 2636  *      insert this page into the given map NOW.
 2637  */
 2638 void
 2639 pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
 2640     vm_prot_t prot, boolean_t wired)
 2641 {
 2642         vm_paddr_t pa;
 2643         pd_entry_t *pde;
 2644         pt_entry_t *pte;
 2645         vm_paddr_t opa;
 2646         pt_entry_t origpte, newpte;
 2647         vm_page_t mpte, om;
 2648         boolean_t invlva;
 2649 
 2650         CTR6(KTR_PMAP, "pmap_enter: pmap=%08p va=0x%08x access=0x%x ma=0x%08x prot=0x%x wired=%d",
 2651             pmap, va, access, xpmap_ptom(VM_PAGE_TO_PHYS(m)), prot, wired);
 2652         va = trunc_page(va);
 2653 #ifdef PMAP_DIAGNOSTIC
 2654         if (va > VM_MAX_KERNEL_ADDRESS)
 2655                 panic("pmap_enter: toobig");
 2656         if ((va >= UPT_MIN_ADDRESS) && (va < UPT_MAX_ADDRESS))
 2657                 panic("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)", va);
 2658 #endif
 2659 
 2660         mpte = NULL;
 2661 
 2662         vm_page_lock_queues();
 2663         PMAP_LOCK(pmap);
 2664         sched_pin();
 2665 
 2666         /*
 2667          * In the case that a page table page is not
 2668          * resident, we are creating it here.
 2669          */
 2670         if (va < VM_MAXUSER_ADDRESS) {
 2671                 mpte = pmap_allocpte(pmap, va, M_WAITOK);
 2672         }
 2673 #if 0 && defined(PMAP_DIAGNOSTIC)
 2674         else {
 2675                 pd_entry_t *pdeaddr = pmap_pde(pmap, va);
 2676                 origpte = *pdeaddr;
 2677                 if ((origpte & PG_V) == 0) { 
 2678                         panic("pmap_enter: invalid kernel page table page, pdir=%p, pde=%p, va=%p\n",
 2679                                 pmap->pm_pdir[PTDPTDI], origpte, va);
 2680                 }
 2681         }
 2682 #endif
 2683 
 2684         pde = pmap_pde(pmap, va);
 2685         if ((*pde & PG_PS) != 0)
 2686                 panic("pmap_enter: attempted pmap_enter on 4MB page");
 2687         pte = pmap_pte_quick(pmap, va);
 2688 
 2689         /*
 2690          * Page Directory table entry not valid, we need a new PT page
 2691          */
 2692         if (pte == NULL) {
 2693                 panic("pmap_enter: invalid page directory pdir=%#jx, va=%#x\n",
 2694                         (uintmax_t)pmap->pm_pdir[va >> PDRSHIFT], va);
 2695         }
 2696 
 2697         pa = VM_PAGE_TO_PHYS(m);
 2698         om = NULL;
 2699         opa = origpte = 0;
 2700 
 2701 #if 0
 2702         KASSERT((*pte & PG_V) || (*pte == 0), ("address set but not valid pte=%p *pte=0x%016jx",
 2703                 pte, *pte));
 2704 #endif
 2705         origpte = *pte;
 2706         if (origpte)
 2707                 origpte = xpmap_mtop(origpte);
 2708         opa = origpte & PG_FRAME;
 2709 
 2710         /*
 2711          * Mapping has not changed, must be protection or wiring change.
 2712          */
 2713         if (origpte && (opa == pa)) {
 2714                 /*
 2715                  * Wiring change, just update stats. We don't worry about
 2716                  * wiring PT pages as they remain resident as long as there
 2717                  * are valid mappings in them. Hence, if a user page is wired,
 2718                  * the PT page will be also.
 2719                  */
 2720                 if (wired && ((origpte & PG_W) == 0))
 2721                         pmap->pm_stats.wired_count++;
 2722                 else if (!wired && (origpte & PG_W))
 2723                         pmap->pm_stats.wired_count--;
 2724 
 2725                 /*
 2726                  * Remove extra pte reference
 2727                  */
 2728                 if (mpte)
 2729                         mpte->wire_count--;
 2730 
 2731                 /*
 2732                  * We might be turning off write access to the page,
 2733                  * so we go ahead and sense modify status.
 2734                  */
 2735                 if (origpte & PG_MANAGED) {
 2736                         om = m;
 2737                         pa |= PG_MANAGED;
 2738                 }
 2739                 goto validate;
 2740         } 
 2741         /*
 2742          * Mapping has changed, invalidate old range and fall through to
 2743          * handle validating new mapping.
 2744          */
 2745         if (opa) {
 2746                 if (origpte & PG_W)
 2747                         pmap->pm_stats.wired_count--;
 2748                 if (origpte & PG_MANAGED) {
 2749                         om = PHYS_TO_VM_PAGE(opa);
 2750                         pmap_remove_entry(pmap, om, va);
 2751                 } else if (va < VM_MAXUSER_ADDRESS) 
 2752                         printf("va=0x%x is unmanaged :-( \n", va);
 2753                         
 2754                 if (mpte != NULL) {
 2755                         mpte->wire_count--;
 2756                         KASSERT(mpte->wire_count > 0,
 2757                             ("pmap_enter: missing reference to page table page,"
 2758                              " va: 0x%x", va));
 2759                 }
 2760         } else
 2761                 pmap->pm_stats.resident_count++;
 2762 
 2763         /*
 2764          * Enter on the PV list if part of our managed memory.
 2765          */
 2766         if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
 2767                 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva,
 2768                     ("pmap_enter: managed mapping within the clean submap"));
 2769                 pmap_insert_entry(pmap, va, m);
 2770                 pa |= PG_MANAGED;
 2771         }
 2772 
 2773         /*
 2774          * Increment counters
 2775          */
 2776         if (wired)
 2777                 pmap->pm_stats.wired_count++;
 2778 
 2779 validate:
 2780         /*
 2781          * Now validate mapping with desired protection/wiring.
 2782          */
 2783         newpte = (pt_entry_t)(pa | PG_V);
 2784         if ((prot & VM_PROT_WRITE) != 0) {
 2785                 newpte |= PG_RW;
 2786                 vm_page_flag_set(m, PG_WRITEABLE);
 2787         }
 2788 #ifdef PAE
 2789         if ((prot & VM_PROT_EXECUTE) == 0)
 2790                 newpte |= pg_nx;
 2791 #endif
 2792         if (wired)
 2793                 newpte |= PG_W;
 2794         if (va < VM_MAXUSER_ADDRESS)
 2795                 newpte |= PG_U;
 2796         if (pmap == kernel_pmap)
 2797                 newpte |= pgeflag;
 2798 
 2799         critical_enter();
 2800         /*
 2801          * if the mapping or permission bits are different, we need
 2802          * to update the pte.
 2803          */
 2804         if ((origpte & ~(PG_M|PG_A)) != newpte) {
 2805                 if (origpte) {
 2806                         invlva = FALSE;
 2807                         origpte = *pte;
 2808                         PT_SET_VA(pte, newpte | PG_A, FALSE);
 2809                         if (origpte & PG_A) {
 2810                                 if (origpte & PG_MANAGED)
 2811                                         vm_page_flag_set(om, PG_REFERENCED);
 2812                                 if (opa != VM_PAGE_TO_PHYS(m))
 2813                                         invlva = TRUE;
 2814 #ifdef PAE
 2815                                 if ((origpte & PG_NX) == 0 &&
 2816                                     (newpte & PG_NX) != 0)
 2817                                         invlva = TRUE;
 2818 #endif
 2819                         }
 2820                         if (origpte & PG_M) {
 2821                                 KASSERT((origpte & PG_RW),
 2822         ("pmap_enter: modified page not writable: va: %#x, pte: %#jx",
 2823                                     va, (uintmax_t)origpte));
 2824                                 if ((origpte & PG_MANAGED) != 0)
 2825                                         vm_page_dirty(om);
 2826                                 if ((prot & VM_PROT_WRITE) == 0)
 2827                                         invlva = TRUE;
 2828                         }
 2829                         if (invlva)
 2830                                 pmap_invalidate_page(pmap, va);
 2831                 } else{
 2832                         PT_SET_VA(pte, newpte | PG_A, FALSE);
 2833                 }
 2834                 
 2835         }
 2836         PT_UPDATES_FLUSH();
 2837         critical_exit();
 2838         if (*PMAP1)
 2839                 PT_SET_VA_MA(PMAP1, 0, TRUE);
 2840         sched_unpin();
 2841         vm_page_unlock_queues();
 2842         PMAP_UNLOCK(pmap);
 2843 }
 2844 
 2845 /*
 2846  * Maps a sequence of resident pages belonging to the same object.
 2847  * The sequence begins with the given page m_start.  This page is
 2848  * mapped at the given virtual address start.  Each subsequent page is
 2849  * mapped at a virtual address that is offset from start by the same
 2850  * amount as the page is offset from m_start within the object.  The
 2851  * last page in the sequence is the page with the largest offset from
 2852  * m_start that can be mapped at a virtual address less than the given
 2853  * virtual address end.  Not every virtual page between start and end
 2854  * is mapped; only those for which a resident page exists with the
 2855  * corresponding offset from m_start are mapped.
 2856  */
 2857 void
 2858 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
 2859     vm_page_t m_start, vm_prot_t prot)
 2860 {
 2861         vm_page_t m, mpte;
 2862         vm_pindex_t diff, psize;
 2863         multicall_entry_t mcl[16];
 2864         multicall_entry_t *mclp = mcl;
 2865         int error, count = 0;
 2866         
 2867         VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED);
 2868         psize = atop(end - start);
 2869             
 2870         mpte = NULL;
 2871         m = m_start;
 2872         PMAP_LOCK(pmap);
 2873         while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
 2874                 mpte = pmap_enter_quick_locked(&mclp, &count, pmap, start + ptoa(diff), m,
 2875                     prot, mpte);
 2876                 m = TAILQ_NEXT(m, listq);
 2877                 if (count == 16) {
 2878                         error = HYPERVISOR_multicall(mcl, count);
 2879                         KASSERT(error == 0, ("bad multicall %d", error));
 2880                         mclp = mcl;
 2881                         count = 0;
 2882                 }
 2883         }
 2884         if (count) {
 2885                 error = HYPERVISOR_multicall(mcl, count);
 2886                 KASSERT(error == 0, ("bad multicall %d", error));
 2887         }
 2888         
 2889         PMAP_UNLOCK(pmap);
 2890 }
 2891 
 2892 /*
 2893  * this code makes some *MAJOR* assumptions:
 2894  * 1. Current pmap & pmap exists.
 2895  * 2. Not wired.
 2896  * 3. Read access.
 2897  * 4. No page table pages.
 2898  * but is *MUCH* faster than pmap_enter...
 2899  */
 2900 
 2901 void
 2902 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
 2903 {
 2904         multicall_entry_t mcl, *mclp;
 2905         int count = 0;
 2906         mclp = &mcl;
 2907         
 2908         CTR4(KTR_PMAP, "pmap_enter_quick: pmap=%p va=0x%x m=%p prot=0x%x",
 2909             pmap, va, m, prot);
 2910         
 2911         PMAP_LOCK(pmap);
 2912         (void) pmap_enter_quick_locked(&mclp, &count, pmap, va, m, prot, NULL);
 2913         if (count)
 2914                 HYPERVISOR_multicall(&mcl, count);
 2915         PMAP_UNLOCK(pmap);
 2916 }
 2917 
 2918 #ifdef notyet
 2919 void
 2920 pmap_enter_quick_range(pmap_t pmap, vm_offset_t *addrs, vm_page_t *pages, vm_prot_t *prots, int count)
 2921 {
 2922         int i, error, index = 0;
 2923         multicall_entry_t mcl[16];
 2924         multicall_entry_t *mclp = mcl;
 2925                 
 2926         PMAP_LOCK(pmap);
 2927         for (i = 0; i < count; i++, addrs++, pages++, prots++) {
 2928                 if (!pmap_is_prefaultable_locked(pmap, *addrs))
 2929                         continue;
 2930 
 2931                 (void) pmap_enter_quick_locked(&mclp, &index, pmap, *addrs, *pages, *prots, NULL);
 2932                 if (index == 16) {
 2933                         error = HYPERVISOR_multicall(mcl, index);
 2934                         mclp = mcl;
 2935                         index = 0;
 2936                         KASSERT(error == 0, ("bad multicall %d", error));
 2937                 }
 2938         }
 2939         if (index) {
 2940                 error = HYPERVISOR_multicall(mcl, index);
 2941                 KASSERT(error == 0, ("bad multicall %d", error));
 2942         }
 2943         
 2944         PMAP_UNLOCK(pmap);
 2945 }
 2946 #endif
 2947 
 2948 static vm_page_t
 2949 pmap_enter_quick_locked(multicall_entry_t **mclpp, int *count, pmap_t pmap, vm_offset_t va, vm_page_t m,
 2950     vm_prot_t prot, vm_page_t mpte)
 2951 {
 2952         pt_entry_t *pte;
 2953         vm_paddr_t pa;
 2954         vm_page_t free;
 2955         multicall_entry_t *mcl = *mclpp;
 2956         
 2957         KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
 2958             (m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0,
 2959             ("pmap_enter_quick_locked: managed mapping within the clean submap"));
 2960         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2961         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 2962 
 2963         /*
 2964          * In the case that a page table page is not
 2965          * resident, we are creating it here.
 2966          */
 2967         if (va < VM_MAXUSER_ADDRESS) {
 2968                 unsigned ptepindex;
 2969                 pd_entry_t ptema;
 2970 
 2971                 /*
 2972                  * Calculate pagetable page index
 2973                  */
 2974                 ptepindex = va >> PDRSHIFT;
 2975                 if (mpte && (mpte->pindex == ptepindex)) {
 2976                         mpte->wire_count++;
 2977                 } else {
 2978                         /*
 2979                          * Get the page directory entry
 2980                          */
 2981                         ptema = pmap->pm_pdir[ptepindex];
 2982 
 2983                         /*
 2984                          * If the page table page is mapped, we just increment
 2985                          * the hold count, and activate it.
 2986                          */
 2987                         if (ptema & PG_V) {
 2988                                 if (ptema & PG_PS)
 2989                                         panic("pmap_enter_quick: unexpected mapping into 4MB page");
 2990                                 mpte = PHYS_TO_VM_PAGE(xpmap_mtop(ptema) & PG_FRAME);
 2991                                 mpte->wire_count++;
 2992                         } else {
 2993                                 mpte = _pmap_allocpte(pmap, ptepindex,
 2994                                     M_NOWAIT);
 2995                                 if (mpte == NULL)
 2996                                         return (mpte);
 2997                         }
 2998                 }
 2999         } else {
 3000                 mpte = NULL;
 3001         }
 3002 
 3003         /*
 3004          * This call to vtopte makes the assumption that we are
 3005          * entering the page into the current pmap.  In order to support
 3006          * quick entry into any pmap, one would likely use pmap_pte_quick.
 3007          * But that isn't as quick as vtopte.
 3008          */
 3009         KASSERT(pmap_is_current(pmap), ("entering pages in non-current pmap"));
 3010         pte = vtopte(va);
 3011         if (*pte & PG_V) {
 3012                 if (mpte != NULL) {
 3013                         mpte->wire_count--;
 3014                         mpte = NULL;
 3015                 }
 3016                 return (mpte);
 3017         }
 3018 
 3019         /*
 3020          * Enter on the PV list if part of our managed memory.
 3021          */
 3022         if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0 &&
 3023             !pmap_try_insert_pv_entry(pmap, va, m)) {
 3024                 if (mpte != NULL) {
 3025                         free = NULL;
 3026                         if (pmap_unwire_pte_hold(pmap, mpte, &free)) {
 3027                                 pmap_invalidate_page(pmap, va);
 3028                                 pmap_free_zero_pages(free);
 3029                         }
 3030                         
 3031                         mpte = NULL;
 3032                 }
 3033                 return (mpte);
 3034         }
 3035 
 3036         /*
 3037          * Increment counters
 3038          */
 3039         pmap->pm_stats.resident_count++;
 3040 
 3041         pa = VM_PAGE_TO_PHYS(m);
 3042 #ifdef PAE
 3043         if ((prot & VM_PROT_EXECUTE) == 0)
 3044                 pa |= pg_nx;
 3045 #endif
 3046 
 3047 #if 0
 3048         /*
 3049          * Now validate mapping with RO protection
 3050          */
 3051         if (m->flags & (PG_FICTITIOUS|PG_UNMANAGED))
 3052                 pte_store(pte, pa | PG_V | PG_U);
 3053         else
 3054                 pte_store(pte, pa | PG_V | PG_U | PG_MANAGED);
 3055 #else
 3056         /*
 3057          * Now validate mapping with RO protection
 3058          */
 3059         if (m->flags & (PG_FICTITIOUS|PG_UNMANAGED))
 3060                 pa =    xpmap_ptom(pa | PG_V | PG_U);
 3061         else
 3062                 pa = xpmap_ptom(pa | PG_V | PG_U | PG_MANAGED);
 3063 
 3064         mcl->op = __HYPERVISOR_update_va_mapping;
 3065         mcl->args[0] = va;
 3066         mcl->args[1] = (uint32_t)(pa & 0xffffffff);
 3067         mcl->args[2] = (uint32_t)(pa >> 32);
 3068         mcl->args[3] = 0;
 3069         *mclpp = mcl + 1;
 3070         *count = *count + 1;
 3071 #endif  
 3072         return mpte;
 3073 }
 3074 
 3075 /*
 3076  * Make a temporary mapping for a physical address.  This is only intended
 3077  * to be used for panic dumps.
 3078  */
 3079 void *
 3080 pmap_kenter_temporary(vm_paddr_t pa, int i)
 3081 {
 3082         vm_offset_t va;
 3083         vm_paddr_t ma = xpmap_ptom(pa);
 3084 
 3085         va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE);
 3086         PT_SET_MA(va, (ma & ~PAGE_MASK) | PG_V | pgeflag);
 3087         invlpg(va);
 3088         return ((void *)crashdumpmap);
 3089 }
 3090 
 3091 /*
 3092  * This code maps large physical mmap regions into the
 3093  * processor address space.  Note that some shortcuts
 3094  * are taken, but the code works.
 3095  */
 3096 void
 3097 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
 3098                     vm_object_t object, vm_pindex_t pindex,
 3099                     vm_size_t size)
 3100 {
 3101         vm_page_t p;
 3102 
 3103         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
 3104         KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
 3105             ("pmap_object_init_pt: non-device object"));
 3106         if (pseflag && 
 3107             ((addr & (NBPDR - 1)) == 0) && ((size & (NBPDR - 1)) == 0)) {
 3108                 int i;
 3109                 vm_page_t m[1];
 3110                 unsigned int ptepindex;
 3111                 int npdes;
 3112                 pd_entry_t ptepa;
 3113 
 3114                 PMAP_LOCK(pmap);
 3115                 if (pmap->pm_pdir[ptepindex = (addr >> PDRSHIFT)])
 3116                         goto out;
 3117                 PMAP_UNLOCK(pmap);
 3118 retry:
 3119                 p = vm_page_lookup(object, pindex);
 3120                 if (p != NULL) {
 3121                         if (vm_page_sleep_if_busy(p, FALSE, "init4p"))
 3122                                 goto retry;
 3123                 } else {
 3124                         p = vm_page_alloc(object, pindex, VM_ALLOC_NORMAL);
 3125                         if (p == NULL)
 3126                                 return;
 3127                         m[0] = p;
 3128 
 3129                         if (vm_pager_get_pages(object, m, 1, 0) != VM_PAGER_OK) {
 3130                                 vm_page_lock_queues();
 3131                                 vm_page_free(p);
 3132                                 vm_page_unlock_queues();
 3133                                 return;
 3134                         }
 3135 
 3136                         p = vm_page_lookup(object, pindex);
 3137                         vm_page_wakeup(p);
 3138                 }
 3139 
 3140                 ptepa = VM_PAGE_TO_PHYS(p);
 3141                 if (ptepa & (NBPDR - 1))
 3142                         return;
 3143 
 3144                 p->valid = VM_PAGE_BITS_ALL;
 3145 
 3146                 PMAP_LOCK(pmap);
 3147                 pmap->pm_stats.resident_count += size >> PAGE_SHIFT;
 3148                 npdes = size >> PDRSHIFT;
 3149                 critical_enter();
 3150                 for(i = 0; i < npdes; i++) {
 3151                         PD_SET_VA(pmap, ptepindex,
 3152                             ptepa | PG_U | PG_M | PG_RW | PG_V | PG_PS, FALSE);
 3153                         ptepa += NBPDR;
 3154                         ptepindex += 1;
 3155                 }
 3156                 pmap_invalidate_all(pmap);
 3157                 critical_exit();
 3158 out:
 3159                 PMAP_UNLOCK(pmap);
 3160         }
 3161 }
 3162 
 3163 /*
 3164  *      Routine:        pmap_change_wiring
 3165  *      Function:       Change the wiring attribute for a map/virtual-address
 3166  *                      pair.
 3167  *      In/out conditions:
 3168  *                      The mapping must already exist in the pmap.
 3169  */
 3170 void
 3171 pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
 3172 {
 3173         pt_entry_t *pte;
 3174 
 3175         vm_page_lock_queues();
 3176         PMAP_LOCK(pmap);
 3177         pte = pmap_pte(pmap, va);
 3178 
 3179         if (wired && !pmap_pte_w(pte)) {
 3180                 PT_SET_VA_MA((pte), *(pte) | PG_W, TRUE);
 3181                 pmap->pm_stats.wired_count++;
 3182         } else if (!wired && pmap_pte_w(pte)) {
 3183                 PT_SET_VA_MA((pte), *(pte) & ~PG_W, TRUE);
 3184                 pmap->pm_stats.wired_count--;
 3185         }
 3186         
 3187         /*
 3188          * Wiring is not a hardware characteristic so there is no need to
 3189          * invalidate TLB.
 3190          */
 3191         pmap_pte_release(pte);
 3192         PMAP_UNLOCK(pmap);
 3193         vm_page_unlock_queues();
 3194 }
 3195 
 3196 
 3197 
 3198 /*
 3199  *      Copy the range specified by src_addr/len
 3200  *      from the source map to the range dst_addr/len
 3201  *      in the destination map.
 3202  *
 3203  *      This routine is only advisory and need not do anything.
 3204  */
 3205 
 3206 void
 3207 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
 3208           vm_offset_t src_addr)
 3209 {
 3210         vm_page_t   free;
 3211         vm_offset_t addr;
 3212         vm_offset_t end_addr = src_addr + len;
 3213         vm_offset_t pdnxt;
 3214 
 3215         if (dst_addr != src_addr)
 3216                 return;
 3217 
 3218         if (!pmap_is_current(src_pmap)) {
 3219                 CTR2(KTR_PMAP,
 3220                     "pmap_copy, skipping: pdir[PTDPTDI]=0x%jx PTDpde[0]=0x%jx",
 3221                     (src_pmap->pm_pdir[PTDPTDI] & PG_FRAME), (PTDpde[0] & PG_FRAME));
 3222                 
 3223                 return;
 3224         }
 3225         CTR5(KTR_PMAP, "pmap_copy:  dst_pmap=%p src_pmap=%p dst_addr=0x%x len=%d src_addr=0x%x",
 3226             dst_pmap, src_pmap, dst_addr, len, src_addr);
 3227         
 3228 #ifdef HAMFISTED_LOCKING
 3229         mtx_lock(&createdelete_lock);
 3230 #endif
 3231 
 3232         vm_page_lock_queues();
 3233         if (dst_pmap < src_pmap) {
 3234                 PMAP_LOCK(dst_pmap);
 3235                 PMAP_LOCK(src_pmap);
 3236         } else {
 3237                 PMAP_LOCK(src_pmap);
 3238                 PMAP_LOCK(dst_pmap);
 3239         }
 3240         sched_pin();
 3241         for (addr = src_addr; addr < end_addr; addr = pdnxt) {
 3242                 pt_entry_t *src_pte, *dst_pte;
 3243                 vm_page_t dstmpte, srcmpte;
 3244                 pd_entry_t srcptepaddr;
 3245                 unsigned ptepindex;
 3246 
 3247                 if (addr >= UPT_MIN_ADDRESS)
 3248                         panic("pmap_copy: invalid to pmap_copy page tables");
 3249 
 3250                 pdnxt = (addr + NBPDR) & ~PDRMASK;
 3251                 ptepindex = addr >> PDRSHIFT;
 3252 
 3253                 srcptepaddr = PT_GET(&src_pmap->pm_pdir[ptepindex]);
 3254                 if (srcptepaddr == 0)
 3255                         continue;
 3256                         
 3257                 if (srcptepaddr & PG_PS) {
 3258                         if (dst_pmap->pm_pdir[ptepindex] == 0) {
 3259                                 PD_SET_VA(dst_pmap, ptepindex, srcptepaddr & ~PG_W, TRUE);
 3260                                 dst_pmap->pm_stats.resident_count +=
 3261                                     NBPDR / PAGE_SIZE;
 3262                         }
 3263                         continue;
 3264                 }
 3265 
 3266                 srcmpte = PHYS_TO_VM_PAGE(srcptepaddr & PG_FRAME);
 3267                 if (srcmpte->wire_count == 0)
 3268                         panic("pmap_copy: source page table page is unused");
 3269 
 3270                 if (pdnxt > end_addr)
 3271                         pdnxt = end_addr;
 3272 
 3273                 src_pte = vtopte(addr);
 3274                 while (addr < pdnxt) {
 3275                         pt_entry_t ptetemp;
 3276                         ptetemp = *src_pte;
 3277                         /*
 3278                          * we only virtual copy managed pages
 3279                          */
 3280                         if ((ptetemp & PG_MANAGED) != 0) {
 3281                                 dstmpte = pmap_allocpte(dst_pmap, addr,
 3282                                     M_NOWAIT);
 3283                                 if (dstmpte == NULL)
 3284                                         break;
 3285                                 dst_pte = pmap_pte_quick(dst_pmap, addr);
 3286                                 if (*dst_pte == 0 &&
 3287                                     pmap_try_insert_pv_entry(dst_pmap, addr,
 3288                                     PHYS_TO_VM_PAGE(xpmap_mtop(ptetemp) & PG_FRAME))) {
 3289                                         /*
 3290                                          * Clear the wired, modified, and
 3291                                          * accessed (referenced) bits
 3292                                          * during the copy.
 3293                                          */
 3294                                         KASSERT(ptetemp != 0, ("src_pte not set"));
 3295                                         PT_SET_VA_MA(dst_pte, ptetemp & ~(PG_W | PG_M | PG_A), TRUE /* XXX debug */);
 3296                                         KASSERT(*dst_pte == (ptetemp & ~(PG_W | PG_M | PG_A)),
 3297                                             ("no pmap copy expected: 0x%jx saw: 0x%jx",
 3298                                                 ptetemp &  ~(PG_W | PG_M | PG_A), *dst_pte));
 3299                                         dst_pmap->pm_stats.resident_count++;
 3300                                 } else {
 3301                                         free = NULL;
 3302                                         if (pmap_unwire_pte_hold(dst_pmap,
 3303                                             dstmpte, &free)) {
 3304                                                 pmap_invalidate_page(dst_pmap,
 3305                                                     addr);
 3306                                                 pmap_free_zero_pages(free);
 3307                                         }
 3308                                 }
 3309                                 if (dstmpte->wire_count >= srcmpte->wire_count)
 3310                                         break;
 3311                         }
 3312                         addr += PAGE_SIZE;
 3313                         src_pte++;
 3314                 }
 3315         }
 3316         PT_UPDATES_FLUSH();
 3317         sched_unpin();
 3318         vm_page_unlock_queues();
 3319         PMAP_UNLOCK(src_pmap);
 3320         PMAP_UNLOCK(dst_pmap);
 3321 
 3322 #ifdef HAMFISTED_LOCKING
 3323         mtx_unlock(&createdelete_lock);
 3324 #endif
 3325 }       
 3326 
 3327 static __inline void
 3328 pagezero(void *page)
 3329 {
 3330 #if defined(I686_CPU)
 3331         if (cpu_class == CPUCLASS_686) {
 3332 #if defined(CPU_ENABLE_SSE)
 3333                 if (cpu_feature & CPUID_SSE2)
 3334                         sse2_pagezero(page);
 3335                 else
 3336 #endif
 3337                         i686_pagezero(page);
 3338         } else
 3339 #endif
 3340                 bzero(page, PAGE_SIZE);
 3341 }
 3342 
 3343 /*
 3344  *      pmap_zero_page zeros the specified hardware page by mapping 
 3345  *      the page into KVM and using bzero to clear its contents.
 3346  */
 3347 void
 3348 pmap_zero_page(vm_page_t m)
 3349 {
 3350         struct sysmaps *sysmaps;
 3351 
 3352         sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)];
 3353         mtx_lock(&sysmaps->lock);
 3354         if (*sysmaps->CMAP2)
 3355                 panic("pmap_zero_page: CMAP2 busy");
 3356         sched_pin();
 3357         PT_SET_MA(sysmaps->CADDR2, PG_V | PG_RW | xpmap_ptom(VM_PAGE_TO_PHYS(m)) | PG_A | PG_M);
 3358         pagezero(sysmaps->CADDR2);
 3359         PT_SET_MA(sysmaps->CADDR2, 0);
 3360         sched_unpin();
 3361         mtx_unlock(&sysmaps->lock);
 3362 }
 3363 
 3364 /*
 3365  *      pmap_zero_page_area zeros the specified hardware page by mapping 
 3366  *      the page into KVM and using bzero to clear its contents.
 3367  *
 3368  *      off and size may not cover an area beyond a single hardware page.
 3369  */
 3370 void
 3371 pmap_zero_page_area(vm_page_t m, int off, int size)
 3372 {
 3373         struct sysmaps *sysmaps;
 3374 
 3375         sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)];
 3376         mtx_lock(&sysmaps->lock);
 3377         if (*sysmaps->CMAP2)
 3378                 panic("pmap_zero_page: CMAP2 busy");
 3379         sched_pin();
 3380         PT_SET_MA(sysmaps->CADDR2, PG_V | PG_RW | xpmap_ptom(VM_PAGE_TO_PHYS(m)) | PG_A | PG_M);
 3381 
 3382         if (off == 0 && size == PAGE_SIZE) 
 3383                 pagezero(sysmaps->CADDR2);
 3384         else
 3385                 bzero((char *)sysmaps->CADDR2 + off, size);
 3386         PT_SET_MA(sysmaps->CADDR2, 0);
 3387         sched_unpin();
 3388         mtx_unlock(&sysmaps->lock);
 3389 }
 3390 
 3391 /*
 3392  *      pmap_zero_page_idle zeros the specified hardware page by mapping 
 3393  *      the page into KVM and using bzero to clear its contents.  This
 3394  *      is intended to be called from the vm_pagezero process only and
 3395  *      outside of Giant.
 3396  */
 3397 void
 3398 pmap_zero_page_idle(vm_page_t m)
 3399 {
 3400 
 3401         if (*CMAP3)
 3402                 panic("pmap_zero_page: CMAP3 busy");
 3403         sched_pin();
 3404         PT_SET_MA(CADDR3, PG_V | PG_RW | xpmap_ptom(VM_PAGE_TO_PHYS(m)) | PG_A | PG_M);
 3405         pagezero(CADDR3);
 3406         PT_SET_MA(CADDR3, 0);
 3407         sched_unpin();
 3408 }
 3409 
 3410 /*
 3411  *      pmap_copy_page copies the specified (machine independent)
 3412  *      page by mapping the page into virtual memory and using
 3413  *      bcopy to copy the page, one machine dependent page at a
 3414  *      time.
 3415  */
 3416 void
 3417 pmap_copy_page(vm_page_t src, vm_page_t dst)
 3418 {
 3419         struct sysmaps *sysmaps;
 3420 
 3421         sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)];
 3422         mtx_lock(&sysmaps->lock);
 3423         if (*sysmaps->CMAP1)
 3424                 panic("pmap_copy_page: CMAP1 busy");
 3425         if (*sysmaps->CMAP2)
 3426                 panic("pmap_copy_page: CMAP2 busy");
 3427         sched_pin();
 3428         PT_SET_MA(sysmaps->CADDR1, PG_V | xpmap_ptom(VM_PAGE_TO_PHYS(src)) | PG_A);
 3429         PT_SET_MA(sysmaps->CADDR2, PG_V | PG_RW | xpmap_ptom(VM_PAGE_TO_PHYS(dst)) | PG_A | PG_M);
 3430         bcopy(sysmaps->CADDR1, sysmaps->CADDR2, PAGE_SIZE);
 3431         PT_SET_MA(sysmaps->CADDR1, 0);
 3432         PT_SET_MA(sysmaps->CADDR2, 0);
 3433         sched_unpin();
 3434         mtx_unlock(&sysmaps->lock);
 3435 }
 3436 
 3437 /*
 3438  * Returns true if the pmap's pv is one of the first
 3439  * 16 pvs linked to from this page.  This count may
 3440  * be changed upwards or downwards in the future; it
 3441  * is only necessary that true be returned for a small
 3442  * subset of pmaps for proper page aging.
 3443  */
 3444 boolean_t
 3445 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
 3446 {
 3447         pv_entry_t pv;
 3448         int loops = 0;
 3449 
 3450         if (m->flags & PG_FICTITIOUS)
 3451                 return (FALSE);
 3452 
 3453         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 3454         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
 3455                 if (PV_PMAP(pv) == pmap) {
 3456                         return TRUE;
 3457                 }
 3458                 loops++;
 3459                 if (loops >= 16)
 3460                         break;
 3461         }
 3462         return (FALSE);
 3463 }
 3464 
 3465 /*
 3466  *      pmap_page_wired_mappings:
 3467  *
 3468  *      Return the number of managed mappings to the given physical page
 3469  *      that are wired.
 3470  */
 3471 int
 3472 pmap_page_wired_mappings(vm_page_t m)
 3473 {
 3474         pv_entry_t pv;
 3475         pt_entry_t *pte;
 3476         pmap_t pmap;
 3477         int count;
 3478 
 3479         count = 0;
 3480         if ((m->flags & PG_FICTITIOUS) != 0)
 3481                 return (count);
 3482         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 3483         sched_pin();
 3484         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
 3485                 pmap = PV_PMAP(pv);
 3486                 PMAP_LOCK(pmap);
 3487                 pte = pmap_pte_quick(pmap, pv->pv_va);
 3488                 if ((*pte & PG_W) != 0)
 3489                         count++;
 3490                 PMAP_UNLOCK(pmap);
 3491         }
 3492         sched_unpin();
 3493         return (count);
 3494 }
 3495 
 3496 /*
 3497  * Returns TRUE if the given page is mapped.  Otherwise, returns FALSE.
 3498  */
 3499 boolean_t
 3500 pmap_page_is_mapped(vm_page_t m)
 3501 {
 3502 
 3503         if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
 3504                 return (FALSE);
 3505         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 3506         return (!TAILQ_EMPTY(&m->md.pv_list));
 3507 }
 3508 
 3509 /*
 3510  * Remove all pages from specified address space
 3511  * this aids process exit speeds.  Also, this code
 3512  * is special cased for current process only, but
 3513  * can have the more generic (and slightly slower)
 3514  * mode enabled.  This is much faster than pmap_remove
 3515  * in the case of running down an entire address space.
 3516  */
 3517 void
 3518 pmap_remove_pages(pmap_t pmap)
 3519 {
 3520         pt_entry_t *pte, tpte;
 3521         vm_page_t m, free = NULL;
 3522         pv_entry_t pv;
 3523         struct pv_chunk *pc, *npc;
 3524         int field, idx;
 3525         int32_t bit;
 3526         uint32_t inuse, bitmask;
 3527         int allfree;
 3528 
 3529         CTR1(KTR_PMAP, "pmap_remove_pages: pmap=%p", pmap);
 3530         
 3531         if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) {
 3532                 printf("warning: pmap_remove_pages called with non-current pmap\n");
 3533                 return;
 3534         }
 3535         vm_page_lock_queues();
 3536         KASSERT(pmap_is_current(pmap), ("removing pages from non-current pmap"));
 3537         PMAP_LOCK(pmap);
 3538         sched_pin();
 3539         TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
 3540                 allfree = 1;
 3541                 for (field = 0; field < _NPCM; field++) {
 3542                         inuse = (~(pc->pc_map[field])) & pc_freemask[field];
 3543                         while (inuse != 0) {
 3544                                 bit = bsfl(inuse);
 3545                                 bitmask = 1UL << bit;
 3546                                 idx = field * 32 + bit;
 3547                                 pv = &pc->pc_pventry[idx];
 3548                                 inuse &= ~bitmask;
 3549 
 3550                                 pte = vtopte(pv->pv_va);
 3551                                 tpte = *pte ? xpmap_mtop(*pte) : 0;
 3552 
 3553                                 if (tpte == 0) {
 3554                                         printf(
 3555                                             "TPTE at %p  IS ZERO @ VA %08x\n",
 3556                                             pte, pv->pv_va);
 3557                                         panic("bad pte");
 3558                                 }
 3559 
 3560 /*
 3561  * We cannot remove wired pages from a process' mapping at this time
 3562  */
 3563                                 if (tpte & PG_W) {
 3564                                         allfree = 0;
 3565                                         continue;
 3566                                 }
 3567 
 3568                                 m = PHYS_TO_VM_PAGE(tpte & PG_FRAME);
 3569                                 KASSERT(m->phys_addr == (tpte & PG_FRAME),
 3570                                     ("vm_page_t %p phys_addr mismatch %016jx %016jx",
 3571                                     m, (uintmax_t)m->phys_addr,
 3572                                     (uintmax_t)tpte));
 3573 
 3574                                 KASSERT(m < &vm_page_array[vm_page_array_size],
 3575                                         ("pmap_remove_pages: bad tpte %#jx",
 3576                                         (uintmax_t)tpte));
 3577 
 3578 
 3579                                 PT_CLEAR_VA(pte, FALSE);
 3580                                 
 3581                                 /*
 3582                                  * Update the vm_page_t clean/reference bits.
 3583                                  */
 3584                                 if (tpte & PG_M)
 3585                                         vm_page_dirty(m);
 3586 
 3587                                 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
 3588                                 if (TAILQ_EMPTY(&m->md.pv_list))
 3589                                         vm_page_flag_clear(m, PG_WRITEABLE);
 3590 
 3591                                 pmap_unuse_pt(pmap, pv->pv_va, &free);
 3592 
 3593                                 /* Mark free */
 3594                                 PV_STAT(pv_entry_frees++);
 3595                                 PV_STAT(pv_entry_spare++);
 3596                                 pv_entry_count--;
 3597                                 pc->pc_map[field] |= bitmask;
 3598                                 pmap->pm_stats.resident_count--;                        
 3599                         }
 3600                 }
 3601                 PT_UPDATES_FLUSH();
 3602                 if (allfree) {
 3603                         PV_STAT(pv_entry_spare -= _NPCPV);
 3604                         PV_STAT(pc_chunk_count--);
 3605                         PV_STAT(pc_chunk_frees++);
 3606                         TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
 3607                         m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc));
 3608                         pmap_qremove((vm_offset_t)pc, 1);
 3609                         vm_page_unwire(m, 0);
 3610                         vm_page_free(m);
 3611                         pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc);
 3612                 }
 3613         }
 3614         PT_UPDATES_FLUSH();
 3615         if (*PMAP1)
 3616                 PT_SET_MA(PADDR1, 0);
 3617 
 3618         sched_unpin();
 3619         pmap_invalidate_all(pmap);
 3620         vm_page_unlock_queues();
 3621         PMAP_UNLOCK(pmap);
 3622         pmap_free_zero_pages(free);
 3623 }
 3624 
 3625 /*
 3626  *      pmap_is_modified:
 3627  *
 3628  *      Return whether or not the specified physical page was modified
 3629  *      in any physical maps.
 3630  */
 3631 boolean_t
 3632 pmap_is_modified(vm_page_t m)
 3633 {
 3634         pv_entry_t pv;
 3635         pt_entry_t *pte;
 3636         pmap_t pmap;
 3637         boolean_t rv;
 3638 
 3639         rv = FALSE;
 3640         if (m->flags & PG_FICTITIOUS)
 3641                 return (rv);
 3642 
 3643         sched_pin();
 3644         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 3645         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
 3646                 pmap = PV_PMAP(pv);
 3647                 PMAP_LOCK(pmap);
 3648                 pte = pmap_pte_quick(pmap, pv->pv_va);
 3649                 rv = (*pte & PG_M) != 0;
 3650                 PMAP_UNLOCK(pmap);
 3651                 if (rv)
 3652                         break;
 3653         }
 3654         if (*PMAP1)
 3655                 PT_SET_MA(PADDR1, 0);
 3656         sched_unpin();
 3657         return (rv);
 3658 }
 3659 
 3660 /*
 3661  *      pmap_is_prefaultable:
 3662  *
 3663  *      Return whether or not the specified virtual address is elgible
 3664  *      for prefault.
 3665  */
 3666 static boolean_t
 3667 pmap_is_prefaultable_locked(pmap_t pmap, vm_offset_t addr)
 3668 {
 3669         pt_entry_t *pte;
 3670         boolean_t rv = FALSE;
 3671 
 3672         return (rv);
 3673         
 3674         if (pmap_is_current(pmap) && *pmap_pde(pmap, addr)) {
 3675                 pte = vtopte(addr);
 3676                 rv = (*pte == 0);
 3677         }
 3678         return (rv);
 3679 }
 3680 
 3681 boolean_t
 3682 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
 3683 {
 3684         boolean_t rv;
 3685         
 3686         PMAP_LOCK(pmap);
 3687         rv = pmap_is_prefaultable_locked(pmap, addr);
 3688         PMAP_UNLOCK(pmap);
 3689         return (rv);
 3690 }
 3691 
 3692 void
 3693 pmap_map_readonly(pmap_t pmap, vm_offset_t va, int len)
 3694 {
 3695         int i, npages = round_page(len) >> PAGE_SHIFT;
 3696         for (i = 0; i < npages; i++) {
 3697                 pt_entry_t *pte;
 3698                 pte = pmap_pte(pmap, (vm_offset_t)(va + i*PAGE_SIZE));
 3699                 vm_page_lock_queues();
 3700                 pte_store(pte, xpmap_mtop(*pte & ~(PG_RW|PG_M)));
 3701                 vm_page_unlock_queues();
 3702                 PMAP_MARK_PRIV(xpmap_mtop(*pte));
 3703                 pmap_pte_release(pte);
 3704         }
 3705 }
 3706 
 3707 void
 3708 pmap_map_readwrite(pmap_t pmap, vm_offset_t va, int len)
 3709 {
 3710         int i, npages = round_page(len) >> PAGE_SHIFT;
 3711         for (i = 0; i < npages; i++) {
 3712                 pt_entry_t *pte;
 3713                 pte = pmap_pte(pmap, (vm_offset_t)(va + i*PAGE_SIZE));
 3714                 PMAP_MARK_UNPRIV(xpmap_mtop(*pte));
 3715                 vm_page_lock_queues();
 3716                 pte_store(pte, xpmap_mtop(*pte) | (PG_RW|PG_M));
 3717                 vm_page_unlock_queues();
 3718                 pmap_pte_release(pte);
 3719         }
 3720 }
 3721 
 3722 /*
 3723  * Clear the write and modified bits in each of the given page's mappings.
 3724  */
 3725 void
 3726 pmap_remove_write(vm_page_t m)
 3727 {
 3728         pv_entry_t pv;
 3729         pmap_t pmap;
 3730         pt_entry_t oldpte, *pte;
 3731 
 3732         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 3733         if ((m->flags & PG_FICTITIOUS) != 0 ||
 3734             (m->flags & PG_WRITEABLE) == 0)
 3735                 return;
 3736         sched_pin();
 3737         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
 3738                 pmap = PV_PMAP(pv);
 3739                 PMAP_LOCK(pmap);
 3740                 pte = pmap_pte_quick(pmap, pv->pv_va);
 3741 retry:
 3742                 oldpte = *pte;
 3743                 if ((oldpte & PG_RW) != 0) {
 3744                         vm_paddr_t newpte = oldpte & ~(PG_RW | PG_M);
 3745                         
 3746                         /*
 3747                          * Regardless of whether a pte is 32 or 64 bits
 3748                          * in size, PG_RW and PG_M are among the least
 3749                          * significant 32 bits.
 3750                          */
 3751                         PT_SET_VA_MA(pte, newpte, TRUE);
 3752                         if (*pte != newpte)
 3753                                 goto retry;
 3754                         
 3755                         if ((oldpte & PG_M) != 0)
 3756                                 vm_page_dirty(m);
 3757                         pmap_invalidate_page(pmap, pv->pv_va);
 3758                 }
 3759                 PMAP_UNLOCK(pmap);
 3760         }
 3761         vm_page_flag_clear(m, PG_WRITEABLE);
 3762         PT_UPDATES_FLUSH();
 3763         if (*PMAP1)
 3764                 PT_SET_MA(PADDR1, 0);
 3765         sched_unpin();
 3766 }
 3767 
 3768 /*
 3769  *      pmap_ts_referenced:
 3770  *
 3771  *      Return a count of reference bits for a page, clearing those bits.
 3772  *      It is not necessary for every reference bit to be cleared, but it
 3773  *      is necessary that 0 only be returned when there are truly no
 3774  *      reference bits set.
 3775  *
 3776  *      XXX: The exact number of bits to check and clear is a matter that
 3777  *      should be tested and standardized at some point in the future for
 3778  *      optimal aging of shared pages.
 3779  */
 3780 int
 3781 pmap_ts_referenced(vm_page_t m)
 3782 {
 3783         pv_entry_t pv, pvf, pvn;
 3784         pmap_t pmap;
 3785         pt_entry_t *pte;
 3786         int rtval = 0;
 3787 
 3788         if (m->flags & PG_FICTITIOUS)
 3789                 return (rtval);
 3790         sched_pin();
 3791         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 3792         if ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
 3793                 pvf = pv;
 3794                 do {
 3795                         pvn = TAILQ_NEXT(pv, pv_list);
 3796                         TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
 3797                         TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
 3798                         pmap = PV_PMAP(pv);
 3799                         PMAP_LOCK(pmap);
 3800                         pte = pmap_pte_quick(pmap, pv->pv_va);
 3801                         if ((*pte & PG_A) != 0) {
 3802                                 PT_SET_VA_MA(pte, *pte & ~PG_A, FALSE);
 3803                                 pmap_invalidate_page(pmap, pv->pv_va);
 3804                                 rtval++;
 3805                                 if (rtval > 4)
 3806                                         pvn = NULL;
 3807                         }
 3808                         PMAP_UNLOCK(pmap);
 3809                 } while ((pv = pvn) != NULL && pv != pvf);
 3810         }
 3811         PT_UPDATES_FLUSH();
 3812         if (*PMAP1)
 3813                 PT_SET_MA(PADDR1, 0);
 3814 
 3815         sched_unpin();
 3816         return (rtval);
 3817 }
 3818 
 3819 /*
 3820  *      Clear the modify bits on the specified physical page.
 3821  */
 3822 void
 3823 pmap_clear_modify(vm_page_t m)
 3824 {
 3825         pv_entry_t pv;
 3826         pmap_t pmap;
 3827         pt_entry_t *pte;
 3828 
 3829         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 3830         if ((m->flags & PG_FICTITIOUS) != 0)
 3831                 return;
 3832         sched_pin();
 3833         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
 3834                 pmap = PV_PMAP(pv);
 3835                 PMAP_LOCK(pmap);
 3836                 pte = pmap_pte_quick(pmap, pv->pv_va);
 3837                 if ((*pte & PG_M) != 0) {
 3838                         /*
 3839                          * Regardless of whether a pte is 32 or 64 bits
 3840                          * in size, PG_M is among the least significant
 3841                          * 32 bits. 
 3842                          */
 3843                         PT_SET_VA_MA(pte, *pte & ~PG_M, FALSE);
 3844                         pmap_invalidate_page(pmap, pv->pv_va);
 3845                 }
 3846                 PMAP_UNLOCK(pmap);
 3847         }
 3848         sched_unpin();
 3849 }
 3850 
 3851 /*
 3852  *      pmap_clear_reference:
 3853  *
 3854  *      Clear the reference bit on the specified physical page.
 3855  */
 3856 void
 3857 pmap_clear_reference(vm_page_t m)
 3858 {
 3859         pv_entry_t pv;
 3860         pmap_t pmap;
 3861         pt_entry_t *pte;
 3862 
 3863         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 3864         if ((m->flags & PG_FICTITIOUS) != 0)
 3865                 return;
 3866         sched_pin();
 3867         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
 3868                 pmap = PV_PMAP(pv);
 3869                 PMAP_LOCK(pmap);
 3870                 pte = pmap_pte_quick(pmap, pv->pv_va);
 3871                 if ((*pte & PG_A) != 0) {
 3872                         /*
 3873                          * Regardless of whether a pte is 32 or 64 bits
 3874                          * in size, PG_A is among the least significant
 3875                          * 32 bits. 
 3876                          */
 3877                         PT_SET_VA_MA(pte, *pte & ~PG_A, FALSE);
 3878                         pmap_invalidate_page(pmap, pv->pv_va);
 3879                 }
 3880                 PMAP_UNLOCK(pmap);
 3881         }
 3882         sched_unpin();
 3883 }
 3884 
 3885 /*
 3886  * Miscellaneous support routines follow
 3887  */
 3888 
 3889 /*
 3890  * Map a set of physical memory pages into the kernel virtual
 3891  * address space. Return a pointer to where it is mapped. This
 3892  * routine is intended to be used for mapping device memory,
 3893  * NOT real memory.
 3894  */
 3895 void *
 3896 pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
 3897 {
 3898         vm_offset_t va, offset;
 3899         vm_size_t tmpsize;
 3900 
 3901         offset = pa & PAGE_MASK;
 3902         size = roundup(offset + size, PAGE_SIZE);
 3903         pa = pa & PG_FRAME;
 3904 
 3905         if (pa < KERNLOAD && pa + size <= KERNLOAD)
 3906                 va = KERNBASE + pa;
 3907         else
 3908                 va = kmem_alloc_nofault(kernel_map, size);
 3909         if (!va)
 3910                 panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
 3911 
 3912         for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE)
 3913                 pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode);
 3914         pmap_invalidate_range(kernel_pmap, va, va + tmpsize);
 3915         pmap_invalidate_cache_range(va, va + size);
 3916         return ((void *)(va + offset));
 3917 }
 3918 
 3919 void *
 3920 pmap_mapdev(vm_paddr_t pa, vm_size_t size)
 3921 {
 3922 
 3923         return (pmap_mapdev_attr(pa, size, PAT_UNCACHEABLE));
 3924 }
 3925 
 3926 void *
 3927 pmap_mapbios(vm_paddr_t pa, vm_size_t size)
 3928 {
 3929 
 3930         return (pmap_mapdev_attr(pa, size, PAT_WRITE_BACK));
 3931 }
 3932 
 3933 void
 3934 pmap_unmapdev(vm_offset_t va, vm_size_t size)
 3935 {
 3936         vm_offset_t base, offset, tmpva;
 3937 
 3938         if (va >= KERNBASE && va + size <= KERNBASE + KERNLOAD)
 3939                 return;
 3940         base = trunc_page(va);
 3941         offset = va & PAGE_MASK;
 3942         size = roundup(offset + size, PAGE_SIZE);
 3943         critical_enter();
 3944         for (tmpva = base; tmpva < (base + size); tmpva += PAGE_SIZE)
 3945                 pmap_kremove(tmpva);
 3946         pmap_invalidate_range(kernel_pmap, va, tmpva);
 3947         critical_exit();
 3948         kmem_free(kernel_map, base, size);
 3949 }
 3950 
 3951 /*
 3952  * Sets the memory attribute for the specified page.
 3953  */
 3954 void
 3955 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
 3956 {
 3957         struct sysmaps *sysmaps;
 3958         vm_offset_t sva, eva;
 3959 
 3960         m->md.pat_mode = ma;
 3961         if ((m->flags & PG_FICTITIOUS) != 0)
 3962                 return;
 3963 
 3964         /*
 3965          * If "m" is a normal page, flush it from the cache.
 3966          * See pmap_invalidate_cache_range().
 3967          *
 3968          * First, try to find an existing mapping of the page by sf
 3969          * buffer. sf_buf_invalidate_cache() modifies mapping and
 3970          * flushes the cache.
 3971          */    
 3972         if (sf_buf_invalidate_cache(m))
 3973                 return;
 3974 
 3975         /*
 3976          * If page is not mapped by sf buffer, but CPU does not
 3977          * support self snoop, map the page transient and do
 3978          * invalidation. In the worst case, whole cache is flushed by
 3979          * pmap_invalidate_cache_range().
 3980          */
 3981         if ((cpu_feature & (CPUID_SS|CPUID_CLFSH)) == CPUID_CLFSH) {
 3982                 sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)];
 3983                 mtx_lock(&sysmaps->lock);
 3984                 if (*sysmaps->CMAP2)
 3985                         panic("pmap_page_set_memattr: CMAP2 busy");
 3986                 sched_pin();
 3987                 PT_SET_MA(sysmaps->CADDR2, PG_V | PG_RW |
 3988                     xpmap_ptom(VM_PAGE_TO_PHYS(m)) | PG_A | PG_M |
 3989                     pmap_cache_bits(m->md.pat_mode, 0));
 3990                 invlcaddr(sysmaps->CADDR2);
 3991                 sva = (vm_offset_t)sysmaps->CADDR2;
 3992                 eva = sva + PAGE_SIZE;
 3993         } else
 3994                 sva = eva = 0; /* gcc */
 3995         pmap_invalidate_cache_range(sva, eva);
 3996         if (sva != 0) {
 3997                 PT_SET_MA(sysmaps->CADDR2, 0);
 3998                 sched_unpin();
 3999                 mtx_unlock(&sysmaps->lock);
 4000         }
 4001 }
 4002 
 4003 int
 4004 pmap_change_attr(va, size, mode)
 4005         vm_offset_t va;
 4006         vm_size_t size;
 4007         int mode;
 4008 {
 4009         vm_offset_t base, offset, tmpva;
 4010         pt_entry_t *pte;
 4011         u_int opte, npte;
 4012         pd_entry_t *pde;
 4013         boolean_t changed;
 4014 
 4015         base = trunc_page(va);
 4016         offset = va & PAGE_MASK;
 4017         size = roundup(offset + size, PAGE_SIZE);
 4018 
 4019         /* Only supported on kernel virtual addresses. */
 4020         if (base <= VM_MAXUSER_ADDRESS)
 4021                 return (EINVAL);
 4022 
 4023         /* 4MB pages and pages that aren't mapped aren't supported. */
 4024         for (tmpva = base; tmpva < (base + size); tmpva += PAGE_SIZE) {
 4025                 pde = pmap_pde(kernel_pmap, tmpva);
 4026                 if (*pde & PG_PS)
 4027                         return (EINVAL);
 4028                 if ((*pde & PG_V) == 0)
 4029                         return (EINVAL);
 4030                 pte = vtopte(va);
 4031                 if ((*pte & PG_V) == 0)
 4032                         return (EINVAL);
 4033         }
 4034 
 4035         changed = FALSE;
 4036 
 4037         /*
 4038          * Ok, all the pages exist and are 4k, so run through them updating
 4039          * their cache mode.
 4040          */
 4041         for (tmpva = base; size > 0; ) {
 4042                 pte = vtopte(tmpva);
 4043 
 4044                 /*
 4045                  * The cache mode bits are all in the low 32-bits of the
 4046                  * PTE, so we can just spin on updating the low 32-bits.
 4047                  */
 4048                 do {
 4049                         opte = *(u_int *)pte;
 4050                         npte = opte & ~(PG_PTE_PAT | PG_NC_PCD | PG_NC_PWT);
 4051                         npte |= pmap_cache_bits(mode, 0);
 4052                         PT_SET_VA_MA(pte, npte, TRUE);
 4053                 } while (npte != opte && (*pte != npte));
 4054                 if (npte != opte)
 4055                         changed = TRUE;
 4056                 tmpva += PAGE_SIZE;
 4057                 size -= PAGE_SIZE;
 4058         }
 4059 
 4060         /*
 4061          * Flush CPU caches to make sure any data isn't cached that shouldn't
 4062          * be, etc.
 4063          */
 4064         if (changed) {
 4065                 pmap_invalidate_range(kernel_pmap, base, tmpva);
 4066                 pmap_invalidate_cache_range(base, tmpva);
 4067         }
 4068         return (0);
 4069 }
 4070 
 4071 /*
 4072  * perform the pmap work for mincore
 4073  */
 4074 int
 4075 pmap_mincore(pmap_t pmap, vm_offset_t addr)
 4076 {
 4077         pt_entry_t *ptep, pte;
 4078         vm_page_t m;
 4079         int val = 0;
 4080         
 4081         PMAP_LOCK(pmap);
 4082         ptep = pmap_pte(pmap, addr);
 4083         pte = (ptep != NULL) ? PT_GET(ptep) : 0;
 4084         pmap_pte_release(ptep);
 4085         PMAP_UNLOCK(pmap);
 4086 
 4087         if (pte != 0) {
 4088                 vm_paddr_t pa;
 4089 
 4090                 val = MINCORE_INCORE;
 4091                 if ((pte & PG_MANAGED) == 0)
 4092                         return val;
 4093 
 4094                 pa = pte & PG_FRAME;
 4095 
 4096                 m = PHYS_TO_VM_PAGE(pa);
 4097 
 4098                 /*
 4099                  * Modified by us
 4100                  */
 4101                 if (pte & PG_M)
 4102                         val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER;
 4103                 else {
 4104                         /*
 4105                          * Modified by someone else
 4106                          */
 4107                         vm_page_lock_queues();
 4108                         if (m->dirty || pmap_is_modified(m))
 4109                                 val |= MINCORE_MODIFIED_OTHER;
 4110                         vm_page_unlock_queues();
 4111                 }
 4112                 /*
 4113                  * Referenced by us
 4114                  */
 4115                 if (pte & PG_A)
 4116                         val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER;
 4117                 else {
 4118                         /*
 4119                          * Referenced by someone else
 4120                          */
 4121                         vm_page_lock_queues();
 4122                         if ((m->flags & PG_REFERENCED) ||
 4123                             pmap_ts_referenced(m)) {
 4124                                 val |= MINCORE_REFERENCED_OTHER;
 4125                                 vm_page_flag_set(m, PG_REFERENCED);
 4126                         }
 4127                         vm_page_unlock_queues();
 4128                 }
 4129         } 
 4130         return val;
 4131 }
 4132 
 4133 void
 4134 pmap_activate(struct thread *td)
 4135 {
 4136         pmap_t  pmap, oldpmap;
 4137         u_int32_t  cr3;
 4138 
 4139         critical_enter();
 4140         pmap = vmspace_pmap(td->td_proc->p_vmspace);
 4141         oldpmap = PCPU_GET(curpmap);
 4142 #if defined(SMP)
 4143         atomic_clear_int(&oldpmap->pm_active, PCPU_GET(cpumask));
 4144         atomic_set_int(&pmap->pm_active, PCPU_GET(cpumask));
 4145 #else
 4146         oldpmap->pm_active &= ~1;
 4147         pmap->pm_active |= 1;
 4148 #endif
 4149 #ifdef PAE
 4150         cr3 = vtophys(pmap->pm_pdpt);
 4151 #else
 4152         cr3 = vtophys(pmap->pm_pdir);
 4153 #endif
 4154         /*
 4155          * pmap_activate is for the current thread on the current cpu
 4156          */
 4157         td->td_pcb->pcb_cr3 = cr3;
 4158         PT_UPDATES_FLUSH();
 4159         load_cr3(cr3);
 4160         PCPU_SET(curpmap, pmap);
 4161         critical_exit();
 4162 }
 4163 
 4164 void
 4165 pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
 4166 {
 4167 }
 4168 
 4169 /*
 4170  *      Increase the starting virtual address of the given mapping if a
 4171  *      different alignment might result in more superpage mappings.
 4172  */
 4173 void
 4174 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
 4175     vm_offset_t *addr, vm_size_t size)
 4176 {
 4177         vm_offset_t superpage_offset;
 4178 
 4179         if (size < NBPDR)
 4180                 return;
 4181         if (object != NULL && (object->flags & OBJ_COLORED) != 0)
 4182                 offset += ptoa(object->pg_color);
 4183         superpage_offset = offset & PDRMASK;
 4184         if (size - ((NBPDR - superpage_offset) & PDRMASK) < NBPDR ||
 4185             (*addr & PDRMASK) == superpage_offset)
 4186                 return;
 4187         if ((*addr & PDRMASK) < superpage_offset)
 4188                 *addr = (*addr & ~PDRMASK) + superpage_offset;
 4189         else
 4190                 *addr = ((*addr + PDRMASK) & ~PDRMASK) + superpage_offset;
 4191 }
 4192 
 4193 #ifdef XEN
 4194 
 4195 void
 4196 pmap_suspend()
 4197 {
 4198         pmap_t pmap;
 4199         int i, pdir, offset;
 4200         vm_paddr_t pdirma;
 4201         mmu_update_t mu[4];
 4202 
 4203         /*
 4204          * We need to remove the recursive mapping structure from all
 4205          * our pmaps so that Xen doesn't get confused when it restores
 4206          * the page tables. The recursive map lives at page directory
 4207          * index PTDPTDI. We assume that the suspend code has stopped
 4208          * the other vcpus (if any).
 4209          */
 4210         LIST_FOREACH(pmap, &allpmaps, pm_list) {
 4211                 for (i = 0; i < 4; i++) {
 4212                         /*
 4213                          * Figure out which page directory (L2) page
 4214                          * contains this bit of the recursive map and
 4215                          * the offset within that page of the map
 4216                          * entry
 4217                          */
 4218                         pdir = (PTDPTDI + i) / NPDEPG;
 4219                         offset = (PTDPTDI + i) % NPDEPG;
 4220                         pdirma = pmap->pm_pdpt[pdir] & PG_FRAME;
 4221                         mu[i].ptr = pdirma + offset * sizeof(pd_entry_t);
 4222                         mu[i].val = 0;
 4223                 }
 4224                 HYPERVISOR_mmu_update(mu, 4, NULL, DOMID_SELF);
 4225         }
 4226 }
 4227 
 4228 void
 4229 pmap_resume()
 4230 {
 4231         pmap_t pmap;
 4232         int i, pdir, offset;
 4233         vm_paddr_t pdirma;
 4234         mmu_update_t mu[4];
 4235 
 4236         /*
 4237          * Restore the recursive map that we removed on suspend.
 4238          */
 4239         LIST_FOREACH(pmap, &allpmaps, pm_list) {
 4240                 for (i = 0; i < 4; i++) {
 4241                         /*
 4242                          * Figure out which page directory (L2) page
 4243                          * contains this bit of the recursive map and
 4244                          * the offset within that page of the map
 4245                          * entry
 4246                          */
 4247                         pdir = (PTDPTDI + i) / NPDEPG;
 4248                         offset = (PTDPTDI + i) % NPDEPG;
 4249                         pdirma = pmap->pm_pdpt[pdir] & PG_FRAME;
 4250                         mu[i].ptr = pdirma + offset * sizeof(pd_entry_t);
 4251                         mu[i].val = (pmap->pm_pdpt[i] & PG_FRAME) | PG_V;
 4252                 }
 4253                 HYPERVISOR_mmu_update(mu, 4, NULL, DOMID_SELF);
 4254         }
 4255 }
 4256 
 4257 #endif
 4258 
 4259 #if defined(PMAP_DEBUG)
 4260 pmap_pid_dump(int pid)
 4261 {
 4262         pmap_t pmap;
 4263         struct proc *p;
 4264         int npte = 0;
 4265         int index;
 4266 
 4267         sx_slock(&allproc_lock);
 4268         FOREACH_PROC_IN_SYSTEM(p) {
 4269                 if (p->p_pid != pid)
 4270                         continue;
 4271 
 4272                 if (p->p_vmspace) {
 4273                         int i,j;
 4274                         index = 0;
 4275                         pmap = vmspace_pmap(p->p_vmspace);
 4276                         for (i = 0; i < NPDEPTD; i++) {
 4277                                 pd_entry_t *pde;
 4278                                 pt_entry_t *pte;
 4279                                 vm_offset_t base = i << PDRSHIFT;
 4280                                 
 4281                                 pde = &pmap->pm_pdir[i];
 4282                                 if (pde && pmap_pde_v(pde)) {
 4283                                         for (j = 0; j < NPTEPG; j++) {
 4284                                                 vm_offset_t va = base + (j << PAGE_SHIFT);
 4285                                                 if (va >= (vm_offset_t) VM_MIN_KERNEL_ADDRESS) {
 4286                                                         if (index) {
 4287                                                                 index = 0;
 4288                                                                 printf("\n");
 4289                                                         }
 4290                                                         sx_sunlock(&allproc_lock);
 4291                                                         return npte;
 4292                                                 }
 4293                                                 pte = pmap_pte(pmap, va);
 4294                                                 if (pte && pmap_pte_v(pte)) {
 4295                                                         pt_entry_t pa;
 4296                                                         vm_page_t m;
 4297                                                         pa = PT_GET(pte);
 4298                                                         m = PHYS_TO_VM_PAGE(pa & PG_FRAME);
 4299                                                         printf("va: 0x%x, pt: 0x%x, h: %d, w: %d, f: 0x%x",
 4300                                                                 va, pa, m->hold_count, m->wire_count, m->flags);
 4301                                                         npte++;
 4302                                                         index++;
 4303                                                         if (index >= 2) {
 4304                                                                 index = 0;
 4305                                                                 printf("\n");
 4306                                                         } else {
 4307                                                                 printf(" ");
 4308                                                         }
 4309                                                 }
 4310                                         }
 4311                                 }
 4312                         }
 4313                 }
 4314         }
 4315         sx_sunlock(&allproc_lock);
 4316         return npte;
 4317 }
 4318 #endif
 4319 
 4320 #if defined(DEBUG)
 4321 
 4322 static void     pads(pmap_t pm);
 4323 void            pmap_pvdump(vm_paddr_t pa);
 4324 
 4325 /* print address space of pmap*/
 4326 static void
 4327 pads(pmap_t pm)
 4328 {
 4329         int i, j;
 4330         vm_paddr_t va;
 4331         pt_entry_t *ptep;
 4332 
 4333         if (pm == kernel_pmap)
 4334                 return;
 4335         for (i = 0; i < NPDEPTD; i++)
 4336                 if (pm->pm_pdir[i])
 4337                         for (j = 0; j < NPTEPG; j++) {
 4338                                 va = (i << PDRSHIFT) + (j << PAGE_SHIFT);
 4339                                 if (pm == kernel_pmap && va < KERNBASE)
 4340                                         continue;
 4341                                 if (pm != kernel_pmap && va > UPT_MAX_ADDRESS)
 4342                                         continue;
 4343                                 ptep = pmap_pte(pm, va);
 4344                                 if (pmap_pte_v(ptep))
 4345                                         printf("%x:%x ", va, *ptep);
 4346                         };
 4347 
 4348 }
 4349 
 4350 void
 4351 pmap_pvdump(vm_paddr_t pa)
 4352 {
 4353         pv_entry_t pv;
 4354         pmap_t pmap;
 4355         vm_page_t m;
 4356 
 4357         printf("pa %x", pa);
 4358         m = PHYS_TO_VM_PAGE(pa);
 4359         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
 4360                 pmap = PV_PMAP(pv);
 4361                 printf(" -> pmap %p, va %x", (void *)pmap, pv->pv_va);
 4362                 pads(pmap);
 4363         }
 4364         printf(" ");
 4365 }
 4366 #endif

Cache object: 94170f7a194cdf8db643d45313c80932


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.