The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/i386/i386/pmap.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1991 Regents of the University of California.
    3  * All rights reserved.
    4  * Copyright (c) 1994 John S. Dyson
    5  * All rights reserved.
    6  * Copyright (c) 1994 David Greenman
    7  * All rights reserved.
    8  *
    9  * This code is derived from software contributed to Berkeley by
   10  * the Systems Programming Group of the University of Utah Computer
   11  * Science Department and William Jolitz of UUNET Technologies Inc.
   12  *
   13  * Redistribution and use in source and binary forms, with or without
   14  * modification, are permitted provided that the following conditions
   15  * are met:
   16  * 1. Redistributions of source code must retain the above copyright
   17  *    notice, this list of conditions and the following disclaimer.
   18  * 2. Redistributions in binary form must reproduce the above copyright
   19  *    notice, this list of conditions and the following disclaimer in the
   20  *    documentation and/or other materials provided with the distribution.
   21  * 3. All advertising materials mentioning features or use of this software
   22  *    must display the following acknowledgement:
   23  *      This product includes software developed by the University of
   24  *      California, Berkeley and its contributors.
   25  * 4. Neither the name of the University nor the names of its contributors
   26  *    may be used to endorse or promote products derived from this software
   27  *    without specific prior written permission.
   28  *
   29  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   39  * SUCH DAMAGE.
   40  *
   41  *      from:   @(#)pmap.c      7.7 (Berkeley)  5/12/91
   42  */
   43 /*-
   44  * Copyright (c) 2003 Networks Associates Technology, Inc.
   45  * All rights reserved.
   46  *
   47  * This software was developed for the FreeBSD Project by Jake Burkholder,
   48  * Safeport Network Services, and Network Associates Laboratories, the
   49  * Security Research Division of Network Associates, Inc. under
   50  * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA
   51  * CHATS research program.
   52  *
   53  * Redistribution and use in source and binary forms, with or without
   54  * modification, are permitted provided that the following conditions
   55  * are met:
   56  * 1. Redistributions of source code must retain the above copyright
   57  *    notice, this list of conditions and the following disclaimer.
   58  * 2. Redistributions in binary form must reproduce the above copyright
   59  *    notice, this list of conditions and the following disclaimer in the
   60  *    documentation and/or other materials provided with the distribution.
   61  *
   62  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   63  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   64  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   65  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   66  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   67  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   68  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   69  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   70  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   71  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   72  * SUCH DAMAGE.
   73  */
   74 
   75 #include <sys/cdefs.h>
   76 __FBSDID("$FreeBSD: releng/5.4/sys/i386/i386/pmap.c 145335 2005-04-20 19:11:07Z cvs2svn $");
   77 
   78 /*
   79  *      Manages physical address maps.
   80  *
   81  *      In addition to hardware address maps, this
   82  *      module is called upon to provide software-use-only
   83  *      maps which may or may not be stored in the same
   84  *      form as hardware maps.  These pseudo-maps are
   85  *      used to store intermediate results from copy
   86  *      operations to and from address spaces.
   87  *
   88  *      Since the information managed by this module is
   89  *      also stored by the logical address mapping module,
   90  *      this module may throw away valid virtual-to-physical
   91  *      mappings at almost any time.  However, invalidations
   92  *      of virtual-to-physical mappings must be done as
   93  *      requested.
   94  *
   95  *      In order to cope with hardware architectures which
   96  *      make virtual-to-physical map invalidates expensive,
   97  *      this module may delay invalidate or reduced protection
   98  *      operations until such time as they are actually
   99  *      necessary.  This module is given full information as
  100  *      to which processors are currently using which maps,
  101  *      and to when physical maps must be made correct.
  102  */
  103 
  104 #include "opt_cpu.h"
  105 #include "opt_pmap.h"
  106 #include "opt_msgbuf.h"
  107 #include "opt_kstack_pages.h"
  108 
  109 #include <sys/param.h>
  110 #include <sys/systm.h>
  111 #include <sys/kernel.h>
  112 #include <sys/lock.h>
  113 #include <sys/malloc.h>
  114 #include <sys/mman.h>
  115 #include <sys/msgbuf.h>
  116 #include <sys/mutex.h>
  117 #include <sys/proc.h>
  118 #include <sys/sx.h>
  119 #include <sys/vmmeter.h>
  120 #include <sys/sched.h>
  121 #include <sys/sysctl.h>
  122 #ifdef SMP
  123 #include <sys/smp.h>
  124 #endif
  125 
  126 #include <vm/vm.h>
  127 #include <vm/vm_param.h>
  128 #include <vm/vm_kern.h>
  129 #include <vm/vm_page.h>
  130 #include <vm/vm_map.h>
  131 #include <vm/vm_object.h>
  132 #include <vm/vm_extern.h>
  133 #include <vm/vm_pageout.h>
  134 #include <vm/vm_pager.h>
  135 #include <vm/uma.h>
  136 
  137 #include <machine/cpu.h>
  138 #include <machine/cputypes.h>
  139 #include <machine/md_var.h>
  140 #include <machine/pcb.h>
  141 #include <machine/specialreg.h>
  142 #ifdef SMP
  143 #include <machine/smp.h>
  144 #endif
  145 
  146 #if !defined(CPU_ENABLE_SSE) && defined(I686_CPU)
  147 #define CPU_ENABLE_SSE
  148 #endif
  149 #if defined(CPU_DISABLE_SSE)
  150 #undef CPU_ENABLE_SSE
  151 #endif
  152 
  153 #ifndef PMAP_SHPGPERPROC
  154 #define PMAP_SHPGPERPROC 200
  155 #endif
  156 
  157 #if defined(DIAGNOSTIC)
  158 #define PMAP_DIAGNOSTIC
  159 #endif
  160 
  161 #define MINPV 2048
  162 
  163 #if !defined(PMAP_DIAGNOSTIC)
  164 #define PMAP_INLINE __inline
  165 #else
  166 #define PMAP_INLINE
  167 #endif
  168 
  169 /*
  170  * Get PDEs and PTEs for user/kernel address space
  171  */
  172 #define pmap_pde(m, v)  (&((m)->pm_pdir[(vm_offset_t)(v) >> PDRSHIFT]))
  173 #define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDRSHIFT])
  174 
  175 #define pmap_pde_v(pte)         ((*(int *)pte & PG_V) != 0)
  176 #define pmap_pte_w(pte)         ((*(int *)pte & PG_W) != 0)
  177 #define pmap_pte_m(pte)         ((*(int *)pte & PG_M) != 0)
  178 #define pmap_pte_u(pte)         ((*(int *)pte & PG_A) != 0)
  179 #define pmap_pte_v(pte)         ((*(int *)pte & PG_V) != 0)
  180 
  181 #define pmap_pte_set_w(pte, v)  ((v) ? atomic_set_int((u_int *)(pte), PG_W) : \
  182     atomic_clear_int((u_int *)(pte), PG_W))
  183 #define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v)))
  184 
  185 struct pmap kernel_pmap_store;
  186 LIST_HEAD(pmaplist, pmap);
  187 static struct pmaplist allpmaps;
  188 static struct mtx allpmaps_lock;
  189 
  190 vm_paddr_t avail_end;   /* PA of last available physical page */
  191 vm_offset_t virtual_avail;      /* VA of first avail page (after kernel bss) */
  192 vm_offset_t virtual_end;        /* VA of last avail page (end of kernel AS) */
  193 static boolean_t pmap_initialized = FALSE;      /* Has pmap_init completed? */
  194 int pgeflag = 0;                /* PG_G or-in */
  195 int pseflag = 0;                /* PG_PS or-in */
  196 
  197 static int nkpt;
  198 vm_offset_t kernel_vm_end;
  199 extern u_int32_t KERNend;
  200 
  201 #ifdef PAE
  202 static uma_zone_t pdptzone;
  203 #endif
  204 
  205 /*
  206  * Data for the pv entry allocation mechanism
  207  */
  208 static uma_zone_t pvzone;
  209 static struct vm_object pvzone_obj;
  210 static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
  211 int pmap_pagedaemon_waken;
  212 
  213 /*
  214  * All those kernel PT submaps that BSD is so fond of
  215  */
  216 struct sysmaps {
  217         struct  mtx lock;
  218         pt_entry_t *CMAP1;
  219         pt_entry_t *CMAP2;
  220         caddr_t CADDR1;
  221         caddr_t CADDR2;
  222 };
  223 static struct sysmaps sysmaps_pcpu[MAXCPU];
  224 pt_entry_t *CMAP1 = 0;
  225 static pt_entry_t *CMAP3;
  226 caddr_t CADDR1 = 0, ptvmmap = 0;
  227 static caddr_t CADDR3;
  228 struct msgbuf *msgbufp = 0;
  229 
  230 /*
  231  * Crashdump maps.
  232  */
  233 static caddr_t crashdumpmap;
  234 
  235 #ifdef SMP
  236 extern pt_entry_t *SMPpt;
  237 #endif
  238 static pt_entry_t *PMAP1 = 0, *PMAP2;
  239 static pt_entry_t *PADDR1 = 0, *PADDR2;
  240 #ifdef SMP
  241 static int PMAP1cpu;
  242 static int PMAP1changedcpu;
  243 SYSCTL_INT(_debug, OID_AUTO, PMAP1changedcpu, CTLFLAG_RD, 
  244            &PMAP1changedcpu, 0,
  245            "Number of times pmap_pte_quick changed CPU with same PMAP1");
  246 #endif
  247 static int PMAP1changed;
  248 SYSCTL_INT(_debug, OID_AUTO, PMAP1changed, CTLFLAG_RD, 
  249            &PMAP1changed, 0,
  250            "Number of times pmap_pte_quick changed PMAP1");
  251 static int PMAP1unchanged;
  252 SYSCTL_INT(_debug, OID_AUTO, PMAP1unchanged, CTLFLAG_RD, 
  253            &PMAP1unchanged, 0,
  254            "Number of times pmap_pte_quick didn't change PMAP1");
  255 static struct mtx PMAP2mutex;
  256 
  257 static PMAP_INLINE void free_pv_entry(pv_entry_t pv);
  258 static pv_entry_t get_pv_entry(void);
  259 static void     pmap_clear_ptes(vm_page_t m, int bit);
  260 
  261 static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva);
  262 static void pmap_remove_page(struct pmap *pmap, vm_offset_t va);
  263 static int pmap_remove_entry(struct pmap *pmap, vm_page_t m,
  264                                         vm_offset_t va);
  265 static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m);
  266 
  267 static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags);
  268 
  269 static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags);
  270 static int _pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m);
  271 static pt_entry_t *pmap_pte_quick(pmap_t pmap, vm_offset_t va);
  272 static void pmap_pte_release(pt_entry_t *pte);
  273 static int pmap_unuse_pt(pmap_t, vm_offset_t);
  274 static vm_offset_t pmap_kmem_choose(vm_offset_t addr);
  275 #ifdef PAE
  276 static void *pmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait);
  277 #endif
  278 
  279 CTASSERT(1 << PDESHIFT == sizeof(pd_entry_t));
  280 CTASSERT(1 << PTESHIFT == sizeof(pt_entry_t));
  281 
  282 /*
  283  * Move the kernel virtual free pointer to the next
  284  * 4MB.  This is used to help improve performance
  285  * by using a large (4MB) page for much of the kernel
  286  * (.text, .data, .bss)
  287  */
  288 static vm_offset_t
  289 pmap_kmem_choose(vm_offset_t addr)
  290 {
  291         vm_offset_t newaddr = addr;
  292 
  293 #ifndef DISABLE_PSE
  294         if (cpu_feature & CPUID_PSE)
  295                 newaddr = (addr + PDRMASK) & ~PDRMASK;
  296 #endif
  297         return newaddr;
  298 }
  299 
  300 /*
  301  *      Bootstrap the system enough to run with virtual memory.
  302  *
  303  *      On the i386 this is called after mapping has already been enabled
  304  *      and just syncs the pmap module with what has already been done.
  305  *      [We can't call it easily with mapping off since the kernel is not
  306  *      mapped with PA == VA, hence we would have to relocate every address
  307  *      from the linked base (virtual) address "KERNBASE" to the actual
  308  *      (physical) address starting relative to 0]
  309  */
  310 void
  311 pmap_bootstrap(firstaddr, loadaddr)
  312         vm_paddr_t firstaddr;
  313         vm_paddr_t loadaddr;
  314 {
  315         vm_offset_t va;
  316         pt_entry_t *pte, *unused;
  317         struct sysmaps *sysmaps;
  318         int i;
  319 
  320         /*
  321          * XXX The calculation of virtual_avail is wrong. It's NKPT*PAGE_SIZE too
  322          * large. It should instead be correctly calculated in locore.s and
  323          * not based on 'first' (which is a physical address, not a virtual
  324          * address, for the start of unused physical memory). The kernel
  325          * page tables are NOT double mapped and thus should not be included
  326          * in this calculation.
  327          */
  328         virtual_avail = (vm_offset_t) KERNBASE + firstaddr;
  329         virtual_avail = pmap_kmem_choose(virtual_avail);
  330 
  331         virtual_end = VM_MAX_KERNEL_ADDRESS;
  332 
  333         /*
  334          * Initialize the kernel pmap (which is statically allocated).
  335          */
  336         PMAP_LOCK_INIT(kernel_pmap);
  337         kernel_pmap->pm_pdir = (pd_entry_t *) (KERNBASE + (u_int)IdlePTD);
  338 #ifdef PAE
  339         kernel_pmap->pm_pdpt = (pdpt_entry_t *) (KERNBASE + (u_int)IdlePDPT);
  340 #endif
  341         kernel_pmap->pm_active = -1;    /* don't allow deactivation */
  342         TAILQ_INIT(&kernel_pmap->pm_pvlist);
  343         LIST_INIT(&allpmaps);
  344         mtx_init(&allpmaps_lock, "allpmaps", NULL, MTX_SPIN);
  345         mtx_lock_spin(&allpmaps_lock);
  346         LIST_INSERT_HEAD(&allpmaps, kernel_pmap, pm_list);
  347         mtx_unlock_spin(&allpmaps_lock);
  348         nkpt = NKPT;
  349 
  350         /*
  351          * Reserve some special page table entries/VA space for temporary
  352          * mapping of pages.
  353          */
  354 #define SYSMAP(c, p, v, n)      \
  355         v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n);
  356 
  357         va = virtual_avail;
  358         pte = vtopte(va);
  359 
  360         /*
  361          * CMAP1/CMAP2 are used for zeroing and copying pages.
  362          * CMAP3 is used for the idle process page zeroing.
  363          */
  364         for (i = 0; i < MAXCPU; i++) {
  365                 sysmaps = &sysmaps_pcpu[i];
  366                 mtx_init(&sysmaps->lock, "SYSMAPS", NULL, MTX_DEF);
  367                 SYSMAP(caddr_t, sysmaps->CMAP1, sysmaps->CADDR1, 1)
  368                 SYSMAP(caddr_t, sysmaps->CMAP2, sysmaps->CADDR2, 1)
  369         }
  370         SYSMAP(caddr_t, CMAP1, CADDR1, 1)
  371         SYSMAP(caddr_t, CMAP3, CADDR3, 1)
  372         *CMAP3 = 0;
  373 
  374         /*
  375          * Crashdump maps.
  376          */
  377         SYSMAP(caddr_t, unused, crashdumpmap, MAXDUMPPGS)
  378 
  379         /*
  380          * ptvmmap is used for reading arbitrary physical pages via /dev/mem.
  381          */
  382         SYSMAP(caddr_t, unused, ptvmmap, 1)
  383 
  384         /*
  385          * msgbufp is used to map the system message buffer.
  386          */
  387         SYSMAP(struct msgbuf *, unused, msgbufp, atop(round_page(MSGBUF_SIZE)))
  388 
  389         /*
  390          * ptemap is used for pmap_pte_quick
  391          */
  392         SYSMAP(pt_entry_t *, PMAP1, PADDR1, 1);
  393         SYSMAP(pt_entry_t *, PMAP2, PADDR2, 1);
  394 
  395         mtx_init(&PMAP2mutex, "PMAP2", NULL, MTX_DEF);
  396 
  397         virtual_avail = va;
  398 
  399         *CMAP1 = 0;
  400         for (i = 0; i < NKPT; i++)
  401                 PTD[i] = 0;
  402 
  403         /* Turn on PG_G on kernel page(s) */
  404         pmap_set_pg();
  405 }
  406 
  407 /*
  408  * Set PG_G on kernel pages.  Only the BSP calls this when SMP is turned on.
  409  */
  410 void
  411 pmap_set_pg(void)
  412 {
  413         pd_entry_t pdir;
  414         pt_entry_t *pte;
  415         vm_offset_t va, endva;
  416         int i; 
  417 
  418         if (pgeflag == 0)
  419                 return;
  420 
  421         i = KERNLOAD/NBPDR;
  422         endva = KERNBASE + KERNend;
  423 
  424         if (pseflag) {
  425                 va = KERNBASE + KERNLOAD;
  426                 while (va  < endva) {
  427                         pdir = kernel_pmap->pm_pdir[KPTDI+i];
  428                         pdir |= pgeflag;
  429                         kernel_pmap->pm_pdir[KPTDI+i] = PTD[KPTDI+i] = pdir;
  430                         invltlb();      /* Play it safe, invltlb() every time */
  431                         i++;
  432                         va += NBPDR;
  433                 }
  434         } else {
  435                 va = (vm_offset_t)btext;
  436                 while (va < endva) {
  437                         pte = vtopte(va);
  438                         if (*pte)
  439                                 *pte |= pgeflag;
  440                         invltlb();      /* Play it safe, invltlb() every time */
  441                         va += PAGE_SIZE;
  442                 }
  443         }
  444 }
  445 
  446 #ifdef PAE
  447 
  448 static MALLOC_DEFINE(M_PMAPPDPT, "pmap", "pmap pdpt");
  449 
  450 static void *
  451 pmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
  452 {
  453         *flags = UMA_SLAB_PRIV;
  454         return (contigmalloc(PAGE_SIZE, M_PMAPPDPT, 0, 0x0ULL, 0xffffffffULL,
  455             1, 0));
  456 }
  457 #endif
  458 
  459 /*
  460  *      Initialize the pmap module.
  461  *      Called by vm_init, to initialize any structures that the pmap
  462  *      system needs to map virtual memory.
  463  *      pmap_init has been enhanced to support in a fairly consistant
  464  *      way, discontiguous physical memory.
  465  */
  466 void
  467 pmap_init(void)
  468 {
  469         int i;
  470 
  471         /*
  472          * Allocate memory for random pmap data structures.  Includes the
  473          * pv_head_table.
  474          */
  475 
  476         for(i = 0; i < vm_page_array_size; i++) {
  477                 vm_page_t m;
  478 
  479                 m = &vm_page_array[i];
  480                 TAILQ_INIT(&m->md.pv_list);
  481                 m->md.pv_list_count = 0;
  482         }
  483 
  484         /*
  485          * init the pv free list
  486          */
  487         pvzone = uma_zcreate("PV ENTRY", sizeof (struct pv_entry), NULL, NULL, 
  488             NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
  489         uma_prealloc(pvzone, MINPV);
  490 
  491 #ifdef PAE
  492         pdptzone = uma_zcreate("PDPT", NPGPTD * sizeof(pdpt_entry_t), NULL,
  493             NULL, NULL, NULL, (NPGPTD * sizeof(pdpt_entry_t)) - 1,
  494             UMA_ZONE_VM | UMA_ZONE_NOFREE);
  495         uma_zone_set_allocf(pdptzone, pmap_pdpt_allocf);
  496 #endif
  497 
  498         /*
  499          * Now it is safe to enable pv_table recording.
  500          */
  501         pmap_initialized = TRUE;
  502 }
  503 
  504 /*
  505  * Initialize the address space (zone) for the pv_entries.  Set a
  506  * high water mark so that the system can recover from excessive
  507  * numbers of pv entries.
  508  */
  509 void
  510 pmap_init2()
  511 {
  512         int shpgperproc = PMAP_SHPGPERPROC;
  513 
  514         TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
  515         pv_entry_max = shpgperproc * maxproc + vm_page_array_size;
  516         TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
  517         pv_entry_high_water = 9 * (pv_entry_max / 10);
  518         uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max);
  519 }
  520 
  521 
  522 /***************************************************
  523  * Low level helper routines.....
  524  ***************************************************/
  525 
  526 #if defined(PMAP_DIAGNOSTIC)
  527 
  528 /*
  529  * This code checks for non-writeable/modified pages.
  530  * This should be an invalid condition.
  531  */
  532 static int
  533 pmap_nw_modified(pt_entry_t ptea)
  534 {
  535         int pte;
  536 
  537         pte = (int) ptea;
  538 
  539         if ((pte & (PG_M|PG_RW)) == PG_M)
  540                 return 1;
  541         else
  542                 return 0;
  543 }
  544 #endif
  545 
  546 
  547 /*
  548  * this routine defines the region(s) of memory that should
  549  * not be tested for the modified bit.
  550  */
  551 static PMAP_INLINE int
  552 pmap_track_modified(vm_offset_t va)
  553 {
  554         if ((va < kmi.clean_sva) || (va >= kmi.clean_eva)) 
  555                 return 1;
  556         else
  557                 return 0;
  558 }
  559 
  560 #ifdef I386_CPU
  561 /*
  562  * i386 only has "invalidate everything" and no SMP to worry about.
  563  */
  564 PMAP_INLINE void
  565 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
  566 {
  567 
  568         if (pmap == kernel_pmap || pmap->pm_active)
  569                 invltlb();
  570 }
  571 
  572 PMAP_INLINE void
  573 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
  574 {
  575 
  576         if (pmap == kernel_pmap || pmap->pm_active)
  577                 invltlb();
  578 }
  579 
  580 PMAP_INLINE void
  581 pmap_invalidate_all(pmap_t pmap)
  582 {
  583 
  584         if (pmap == kernel_pmap || pmap->pm_active)
  585                 invltlb();
  586 }
  587 #else /* !I386_CPU */
  588 #ifdef SMP
  589 /*
  590  * For SMP, these functions have to use the IPI mechanism for coherence.
  591  */
  592 void
  593 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
  594 {
  595         u_int cpumask;
  596         u_int other_cpus;
  597 
  598         if (smp_started) {
  599                 if (!(read_eflags() & PSL_I))
  600                         panic("%s: interrupts disabled", __func__);
  601                 mtx_lock_spin(&smp_ipi_mtx);
  602         } else
  603                 critical_enter();
  604         /*
  605          * We need to disable interrupt preemption but MUST NOT have
  606          * interrupts disabled here.
  607          * XXX we may need to hold schedlock to get a coherent pm_active
  608          * XXX critical sections disable interrupts again
  609          */
  610         if (pmap == kernel_pmap || pmap->pm_active == all_cpus) {
  611                 invlpg(va);
  612                 smp_invlpg(va);
  613         } else {
  614                 cpumask = PCPU_GET(cpumask);
  615                 other_cpus = PCPU_GET(other_cpus);
  616                 if (pmap->pm_active & cpumask)
  617                         invlpg(va);
  618                 if (pmap->pm_active & other_cpus)
  619                         smp_masked_invlpg(pmap->pm_active & other_cpus, va);
  620         }
  621         if (smp_started)
  622                 mtx_unlock_spin(&smp_ipi_mtx);
  623         else
  624                 critical_exit();
  625 }
  626 
  627 void
  628 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
  629 {
  630         u_int cpumask;
  631         u_int other_cpus;
  632         vm_offset_t addr;
  633 
  634         if (smp_started) {
  635                 if (!(read_eflags() & PSL_I))
  636                         panic("%s: interrupts disabled", __func__);
  637                 mtx_lock_spin(&smp_ipi_mtx);
  638         } else
  639                 critical_enter();
  640         /*
  641          * We need to disable interrupt preemption but MUST NOT have
  642          * interrupts disabled here.
  643          * XXX we may need to hold schedlock to get a coherent pm_active
  644          * XXX critical sections disable interrupts again
  645          */
  646         if (pmap == kernel_pmap || pmap->pm_active == all_cpus) {
  647                 for (addr = sva; addr < eva; addr += PAGE_SIZE)
  648                         invlpg(addr);
  649                 smp_invlpg_range(sva, eva);
  650         } else {
  651                 cpumask = PCPU_GET(cpumask);
  652                 other_cpus = PCPU_GET(other_cpus);
  653                 if (pmap->pm_active & cpumask)
  654                         for (addr = sva; addr < eva; addr += PAGE_SIZE)
  655                                 invlpg(addr);
  656                 if (pmap->pm_active & other_cpus)
  657                         smp_masked_invlpg_range(pmap->pm_active & other_cpus,
  658                             sva, eva);
  659         }
  660         if (smp_started)
  661                 mtx_unlock_spin(&smp_ipi_mtx);
  662         else
  663                 critical_exit();
  664 }
  665 
  666 void
  667 pmap_invalidate_all(pmap_t pmap)
  668 {
  669         u_int cpumask;
  670         u_int other_cpus;
  671 
  672         if (smp_started) {
  673                 if (!(read_eflags() & PSL_I))
  674                         panic("%s: interrupts disabled", __func__);
  675                 mtx_lock_spin(&smp_ipi_mtx);
  676         } else
  677                 critical_enter();
  678         /*
  679          * We need to disable interrupt preemption but MUST NOT have
  680          * interrupts disabled here.
  681          * XXX we may need to hold schedlock to get a coherent pm_active
  682          * XXX critical sections disable interrupts again
  683          */
  684         if (pmap == kernel_pmap || pmap->pm_active == all_cpus) {
  685                 invltlb();
  686                 smp_invltlb();
  687         } else {
  688                 cpumask = PCPU_GET(cpumask);
  689                 other_cpus = PCPU_GET(other_cpus);
  690                 if (pmap->pm_active & cpumask)
  691                         invltlb();
  692                 if (pmap->pm_active & other_cpus)
  693                         smp_masked_invltlb(pmap->pm_active & other_cpus);
  694         }
  695         if (smp_started)
  696                 mtx_unlock_spin(&smp_ipi_mtx);
  697         else
  698                 critical_exit();
  699 }
  700 #else /* !SMP */
  701 /*
  702  * Normal, non-SMP, 486+ invalidation functions.
  703  * We inline these within pmap.c for speed.
  704  */
  705 PMAP_INLINE void
  706 pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
  707 {
  708 
  709         if (pmap == kernel_pmap || pmap->pm_active)
  710                 invlpg(va);
  711 }
  712 
  713 PMAP_INLINE void
  714 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
  715 {
  716         vm_offset_t addr;
  717 
  718         if (pmap == kernel_pmap || pmap->pm_active)
  719                 for (addr = sva; addr < eva; addr += PAGE_SIZE)
  720                         invlpg(addr);
  721 }
  722 
  723 PMAP_INLINE void
  724 pmap_invalidate_all(pmap_t pmap)
  725 {
  726 
  727         if (pmap == kernel_pmap || pmap->pm_active)
  728                 invltlb();
  729 }
  730 #endif /* !SMP */
  731 #endif /* !I386_CPU */
  732 
  733 /*
  734  * Are we current address space or kernel?  N.B. We return FALSE when
  735  * a pmap's page table is in use because a kernel thread is borrowing
  736  * it.  The borrowed page table can change spontaneously, making any
  737  * dependence on its continued use subject to a race condition.
  738  */
  739 static __inline int
  740 pmap_is_current(pmap_t pmap)
  741 {
  742 
  743         return (pmap == kernel_pmap ||
  744                 (pmap == vmspace_pmap(curthread->td_proc->p_vmspace) &&
  745             (pmap->pm_pdir[PTDPTDI] & PG_FRAME) == (PTDpde[0] & PG_FRAME)));
  746 }
  747 
  748 /*
  749  * If the given pmap is not the current or kernel pmap, the returned pte must
  750  * be released by passing it to pmap_pte_release().
  751  */
  752 pt_entry_t *
  753 pmap_pte(pmap_t pmap, vm_offset_t va)
  754 {
  755         pd_entry_t newpf;
  756         pd_entry_t *pde;
  757 
  758         pde = pmap_pde(pmap, va);
  759         if (*pde & PG_PS)
  760                 return (pde);
  761         if (*pde != 0) {
  762                 /* are we current address space or kernel? */
  763                 if (pmap_is_current(pmap))
  764                         return (vtopte(va));
  765                 mtx_lock(&PMAP2mutex);
  766                 newpf = *pde & PG_FRAME;
  767                 if ((*PMAP2 & PG_FRAME) != newpf) {
  768                         *PMAP2 = newpf | PG_RW | PG_V | PG_A | PG_M;
  769                         pmap_invalidate_page(kernel_pmap, (vm_offset_t)PADDR2);
  770                 }
  771                 return (PADDR2 + (i386_btop(va) & (NPTEPG - 1)));
  772         }
  773         return (0);
  774 }
  775 
  776 /*
  777  * Releases a pte that was obtained from pmap_pte().  Be prepared for the pte
  778  * being NULL.
  779  */
  780 static __inline void
  781 pmap_pte_release(pt_entry_t *pte)
  782 {
  783 
  784         if ((pt_entry_t *)((vm_offset_t)pte & ~PAGE_MASK) == PADDR2)
  785                 mtx_unlock(&PMAP2mutex);
  786 }
  787 
  788 static __inline void
  789 invlcaddr(void *caddr)
  790 {
  791 #ifdef I386_CPU
  792         invltlb();
  793 #else
  794         invlpg((u_int)caddr);
  795 #endif
  796 }
  797 
  798 /*
  799  * Super fast pmap_pte routine best used when scanning
  800  * the pv lists.  This eliminates many coarse-grained
  801  * invltlb calls.  Note that many of the pv list
  802  * scans are across different pmaps.  It is very wasteful
  803  * to do an entire invltlb for checking a single mapping.
  804  *
  805  * If the given pmap is not the current pmap, vm_page_queue_mtx
  806  * must be held and curthread pinned to a CPU.
  807  */
  808 static pt_entry_t *
  809 pmap_pte_quick(pmap_t pmap, vm_offset_t va)
  810 {
  811         pd_entry_t newpf;
  812         pd_entry_t *pde;
  813 
  814         pde = pmap_pde(pmap, va);
  815         if (*pde & PG_PS)
  816                 return (pde);
  817         if (*pde != 0) {
  818                 /* are we current address space or kernel? */
  819                 if (pmap_is_current(pmap))
  820                         return (vtopte(va));
  821                 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
  822                 KASSERT(curthread->td_pinned > 0, ("curthread not pinned"));
  823                 newpf = *pde & PG_FRAME;
  824                 if ((*PMAP1 & PG_FRAME) != newpf) {
  825                         *PMAP1 = newpf | PG_RW | PG_V | PG_A | PG_M;
  826 #ifdef SMP
  827                         PMAP1cpu = PCPU_GET(cpuid);
  828 #endif
  829                         invlcaddr(PADDR1);
  830                         PMAP1changed++;
  831                 } else
  832 #ifdef SMP
  833                 if (PMAP1cpu != PCPU_GET(cpuid)) {
  834                         PMAP1cpu = PCPU_GET(cpuid);
  835                         invlcaddr(PADDR1);
  836                         PMAP1changedcpu++;
  837                 } else
  838 #endif
  839                         PMAP1unchanged++;
  840                 return (PADDR1 + (i386_btop(va) & (NPTEPG - 1)));
  841         }
  842         return (0);
  843 }
  844 
  845 /*
  846  *      Routine:        pmap_extract
  847  *      Function:
  848  *              Extract the physical page address associated
  849  *              with the given map/virtual_address pair.
  850  */
  851 vm_paddr_t 
  852 pmap_extract(pmap_t pmap, vm_offset_t va)
  853 {
  854         vm_paddr_t rtval;
  855         pt_entry_t *pte;
  856         pd_entry_t pde;
  857 
  858         rtval = 0;
  859         PMAP_LOCK(pmap);
  860         pde = pmap->pm_pdir[va >> PDRSHIFT];
  861         if (pde != 0) {
  862                 if ((pde & PG_PS) != 0) {
  863                         rtval = (pde & ~PDRMASK) | (va & PDRMASK);
  864                         PMAP_UNLOCK(pmap);
  865                         return rtval;
  866                 }
  867                 pte = pmap_pte(pmap, va);
  868                 rtval = (*pte & PG_FRAME) | (va & PAGE_MASK);
  869                 pmap_pte_release(pte);
  870         }
  871         PMAP_UNLOCK(pmap);
  872         return (rtval);
  873 }
  874 
  875 /*
  876  *      Routine:        pmap_extract_and_hold
  877  *      Function:
  878  *              Atomically extract and hold the physical page
  879  *              with the given pmap and virtual address pair
  880  *              if that mapping permits the given protection.
  881  */
  882 vm_page_t
  883 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
  884 {
  885         pd_entry_t pde;
  886         pt_entry_t pte;
  887         vm_page_t m;
  888 
  889         m = NULL;
  890         vm_page_lock_queues();
  891         PMAP_LOCK(pmap);
  892         pde = *pmap_pde(pmap, va);
  893         if (pde != 0) {
  894                 if (pde & PG_PS) {
  895                         if ((pde & PG_RW) || (prot & VM_PROT_WRITE) == 0) {
  896                                 m = PHYS_TO_VM_PAGE((pde & ~PDRMASK) |
  897                                     (va & PDRMASK));
  898                                 vm_page_hold(m);
  899                         }
  900                 } else {
  901                         sched_pin();
  902                         pte = *pmap_pte_quick(pmap, va);
  903                         if (pte != 0 &&
  904                             ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0)) {
  905                                 m = PHYS_TO_VM_PAGE(pte & PG_FRAME);
  906                                 vm_page_hold(m);
  907                         }
  908                         sched_unpin();
  909                 }
  910         }
  911         vm_page_unlock_queues();
  912         PMAP_UNLOCK(pmap);
  913         return (m);
  914 }
  915 
  916 /***************************************************
  917  * Low level mapping routines.....
  918  ***************************************************/
  919 
  920 /*
  921  * Add a wired page to the kva.
  922  * Note: not SMP coherent.
  923  */
  924 PMAP_INLINE void 
  925 pmap_kenter(vm_offset_t va, vm_paddr_t pa)
  926 {
  927         pt_entry_t *pte;
  928 
  929         pte = vtopte(va);
  930         pte_store(pte, pa | PG_RW | PG_V | pgeflag);
  931 }
  932 
  933 /*
  934  * Remove a page from the kernel pagetables.
  935  * Note: not SMP coherent.
  936  */
  937 PMAP_INLINE void
  938 pmap_kremove(vm_offset_t va)
  939 {
  940         pt_entry_t *pte;
  941 
  942         pte = vtopte(va);
  943         pte_clear(pte);
  944 }
  945 
  946 /*
  947  *      Used to map a range of physical addresses into kernel
  948  *      virtual address space.
  949  *
  950  *      The value passed in '*virt' is a suggested virtual address for
  951  *      the mapping. Architectures which can support a direct-mapped
  952  *      physical to virtual region can return the appropriate address
  953  *      within that region, leaving '*virt' unchanged. Other
  954  *      architectures should map the pages starting at '*virt' and
  955  *      update '*virt' with the first usable address after the mapped
  956  *      region.
  957  */
  958 vm_offset_t
  959 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
  960 {
  961         vm_offset_t va, sva;
  962 
  963         va = sva = *virt;
  964         while (start < end) {
  965                 pmap_kenter(va, start);
  966                 va += PAGE_SIZE;
  967                 start += PAGE_SIZE;
  968         }
  969         pmap_invalidate_range(kernel_pmap, sva, va);
  970         *virt = va;
  971         return (sva);
  972 }
  973 
  974 
  975 /*
  976  * Add a list of wired pages to the kva
  977  * this routine is only used for temporary
  978  * kernel mappings that do not need to have
  979  * page modification or references recorded.
  980  * Note that old mappings are simply written
  981  * over.  The page *must* be wired.
  982  * Note: SMP coherent.  Uses a ranged shootdown IPI.
  983  */
  984 void
  985 pmap_qenter(vm_offset_t sva, vm_page_t *m, int count)
  986 {
  987         vm_offset_t va;
  988 
  989         va = sva;
  990         while (count-- > 0) {
  991                 pmap_kenter(va, VM_PAGE_TO_PHYS(*m));
  992                 va += PAGE_SIZE;
  993                 m++;
  994         }
  995         pmap_invalidate_range(kernel_pmap, sva, va);
  996 }
  997 
  998 /*
  999  * This routine tears out page mappings from the
 1000  * kernel -- it is meant only for temporary mappings.
 1001  * Note: SMP coherent.  Uses a ranged shootdown IPI.
 1002  */
 1003 void
 1004 pmap_qremove(vm_offset_t sva, int count)
 1005 {
 1006         vm_offset_t va;
 1007 
 1008         va = sva;
 1009         while (count-- > 0) {
 1010                 pmap_kremove(va);
 1011                 va += PAGE_SIZE;
 1012         }
 1013         pmap_invalidate_range(kernel_pmap, sva, va);
 1014 }
 1015 
 1016 /***************************************************
 1017  * Page table page management routines.....
 1018  ***************************************************/
 1019 
 1020 /*
 1021  * This routine unholds page table pages, and if the hold count
 1022  * drops to zero, then it decrements the wire count.
 1023  */
 1024 static PMAP_INLINE int
 1025 pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m)
 1026 {
 1027 
 1028         --m->wire_count;
 1029         if (m->wire_count == 0)
 1030                 return _pmap_unwire_pte_hold(pmap, m);
 1031         else
 1032                 return 0;
 1033 }
 1034 
 1035 static int 
 1036 _pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m)
 1037 {
 1038         vm_offset_t pteva;
 1039 
 1040         /*
 1041          * unmap the page table page
 1042          */
 1043         pmap->pm_pdir[m->pindex] = 0;
 1044         --pmap->pm_stats.resident_count;
 1045 
 1046         /*
 1047          * Do an invltlb to make the invalidated mapping
 1048          * take effect immediately.
 1049          */
 1050         pteva = VM_MAXUSER_ADDRESS + i386_ptob(m->pindex);
 1051         pmap_invalidate_page(pmap, pteva);
 1052 
 1053         vm_page_free_zero(m);
 1054         atomic_subtract_int(&cnt.v_wire_count, 1);
 1055         return 1;
 1056 }
 1057 
 1058 /*
 1059  * After removing a page table entry, this routine is used to
 1060  * conditionally free the page, and manage the hold/wire counts.
 1061  */
 1062 static int
 1063 pmap_unuse_pt(pmap_t pmap, vm_offset_t va)
 1064 {
 1065         pd_entry_t ptepde;
 1066         vm_page_t mpte;
 1067 
 1068         if (va >= VM_MAXUSER_ADDRESS)
 1069                 return 0;
 1070         ptepde = *pmap_pde(pmap, va);
 1071         mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME);
 1072         return pmap_unwire_pte_hold(pmap, mpte);
 1073 }
 1074 
 1075 void
 1076 pmap_pinit0(pmap)
 1077         struct pmap *pmap;
 1078 {
 1079 
 1080         PMAP_LOCK_INIT(pmap);
 1081         pmap->pm_pdir = (pd_entry_t *)(KERNBASE + (vm_offset_t)IdlePTD);
 1082 #ifdef PAE
 1083         pmap->pm_pdpt = (pdpt_entry_t *)(KERNBASE + (vm_offset_t)IdlePDPT);
 1084 #endif
 1085         pmap->pm_active = 0;
 1086         PCPU_SET(curpmap, pmap);
 1087         TAILQ_INIT(&pmap->pm_pvlist);
 1088         bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
 1089         mtx_lock_spin(&allpmaps_lock);
 1090         LIST_INSERT_HEAD(&allpmaps, pmap, pm_list);
 1091         mtx_unlock_spin(&allpmaps_lock);
 1092 }
 1093 
 1094 /*
 1095  * Initialize a preallocated and zeroed pmap structure,
 1096  * such as one in a vmspace structure.
 1097  */
 1098 void
 1099 pmap_pinit(pmap)
 1100         register struct pmap *pmap;
 1101 {
 1102         vm_page_t m, ptdpg[NPGPTD];
 1103         vm_paddr_t pa;
 1104         static int color;
 1105         int i;
 1106 
 1107         PMAP_LOCK_INIT(pmap);
 1108 
 1109         /*
 1110          * No need to allocate page table space yet but we do need a valid
 1111          * page directory table.
 1112          */
 1113         if (pmap->pm_pdir == NULL) {
 1114                 pmap->pm_pdir = (pd_entry_t *)kmem_alloc_nofault(kernel_map,
 1115                     NBPTD);
 1116 #ifdef PAE
 1117                 pmap->pm_pdpt = uma_zalloc(pdptzone, M_WAITOK | M_ZERO);
 1118                 KASSERT(((vm_offset_t)pmap->pm_pdpt &
 1119                     ((NPGPTD * sizeof(pdpt_entry_t)) - 1)) == 0,
 1120                     ("pmap_pinit: pdpt misaligned"));
 1121                 KASSERT(pmap_kextract((vm_offset_t)pmap->pm_pdpt) < (4ULL<<30),
 1122                     ("pmap_pinit: pdpt above 4g"));
 1123 #endif
 1124         }
 1125 
 1126         /*
 1127          * allocate the page directory page(s)
 1128          */
 1129         for (i = 0; i < NPGPTD;) {
 1130                 m = vm_page_alloc(NULL, color++,
 1131                     VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
 1132                     VM_ALLOC_ZERO);
 1133                 if (m == NULL)
 1134                         VM_WAIT;
 1135                 else {
 1136                         ptdpg[i++] = m;
 1137                 }
 1138         }
 1139 
 1140         pmap_qenter((vm_offset_t)pmap->pm_pdir, ptdpg, NPGPTD);
 1141 
 1142         for (i = 0; i < NPGPTD; i++) {
 1143                 if ((ptdpg[i]->flags & PG_ZERO) == 0)
 1144                         bzero(pmap->pm_pdir + (i * NPDEPG), PAGE_SIZE);
 1145         }
 1146 
 1147         mtx_lock_spin(&allpmaps_lock);
 1148         LIST_INSERT_HEAD(&allpmaps, pmap, pm_list);
 1149         mtx_unlock_spin(&allpmaps_lock);
 1150         /* Wire in kernel global address entries. */
 1151         /* XXX copies current process, does not fill in MPPTDI */
 1152         bcopy(PTD + KPTDI, pmap->pm_pdir + KPTDI, nkpt * sizeof(pd_entry_t));
 1153 #ifdef SMP
 1154         pmap->pm_pdir[MPPTDI] = PTD[MPPTDI];
 1155 #endif
 1156 
 1157         /* install self-referential address mapping entry(s) */
 1158         for (i = 0; i < NPGPTD; i++) {
 1159                 pa = VM_PAGE_TO_PHYS(ptdpg[i]);
 1160                 pmap->pm_pdir[PTDPTDI + i] = pa | PG_V | PG_RW | PG_A | PG_M;
 1161 #ifdef PAE
 1162                 pmap->pm_pdpt[i] = pa | PG_V;
 1163 #endif
 1164         }
 1165 
 1166         pmap->pm_active = 0;
 1167         TAILQ_INIT(&pmap->pm_pvlist);
 1168         bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
 1169 }
 1170 
 1171 /*
 1172  * this routine is called if the page table page is not
 1173  * mapped correctly.
 1174  */
 1175 static vm_page_t
 1176 _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags)
 1177 {
 1178         vm_paddr_t ptepa;
 1179         vm_page_t m;
 1180 
 1181         KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
 1182             (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
 1183             ("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
 1184 
 1185         /*
 1186          * Allocate a page table page.
 1187          */
 1188         if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ |
 1189             VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
 1190                 if (flags & M_WAITOK) {
 1191                         PMAP_UNLOCK(pmap);
 1192                         vm_page_unlock_queues();
 1193                         VM_WAIT;
 1194                         vm_page_lock_queues();
 1195                         PMAP_LOCK(pmap);
 1196                 }
 1197 
 1198                 /*
 1199                  * Indicate the need to retry.  While waiting, the page table
 1200                  * page may have been allocated.
 1201                  */
 1202                 return (NULL);
 1203         }
 1204         if ((m->flags & PG_ZERO) == 0)
 1205                 pmap_zero_page(m);
 1206 
 1207         /*
 1208          * Map the pagetable page into the process address space, if
 1209          * it isn't already there.
 1210          */
 1211 
 1212         pmap->pm_stats.resident_count++;
 1213 
 1214         ptepa = VM_PAGE_TO_PHYS(m);
 1215         pmap->pm_pdir[ptepindex] =
 1216                 (pd_entry_t) (ptepa | PG_U | PG_RW | PG_V | PG_A | PG_M);
 1217 
 1218         return m;
 1219 }
 1220 
 1221 static vm_page_t
 1222 pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags)
 1223 {
 1224         unsigned ptepindex;
 1225         pd_entry_t ptepa;
 1226         vm_page_t m;
 1227 
 1228         KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
 1229             (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
 1230             ("pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
 1231 
 1232         /*
 1233          * Calculate pagetable page index
 1234          */
 1235         ptepindex = va >> PDRSHIFT;
 1236 retry:
 1237         /*
 1238          * Get the page directory entry
 1239          */
 1240         ptepa = pmap->pm_pdir[ptepindex];
 1241 
 1242         /*
 1243          * This supports switching from a 4MB page to a
 1244          * normal 4K page.
 1245          */
 1246         if (ptepa & PG_PS) {
 1247                 pmap->pm_pdir[ptepindex] = 0;
 1248                 ptepa = 0;
 1249                 pmap_invalidate_all(kernel_pmap);
 1250         }
 1251 
 1252         /*
 1253          * If the page table page is mapped, we just increment the
 1254          * hold count, and activate it.
 1255          */
 1256         if (ptepa) {
 1257                 m = PHYS_TO_VM_PAGE(ptepa);
 1258                 m->wire_count++;
 1259         } else {
 1260                 /*
 1261                  * Here if the pte page isn't mapped, or if it has
 1262                  * been deallocated. 
 1263                  */
 1264                 m = _pmap_allocpte(pmap, ptepindex, flags);
 1265                 if (m == NULL && (flags & M_WAITOK))
 1266                         goto retry;
 1267         }
 1268         return (m);
 1269 }
 1270 
 1271 
 1272 /***************************************************
 1273 * Pmap allocation/deallocation routines.
 1274  ***************************************************/
 1275 
 1276 #ifdef SMP
 1277 /*
 1278  * Deal with a SMP shootdown of other users of the pmap that we are
 1279  * trying to dispose of.  This can be a bit hairy.
 1280  */
 1281 static u_int *lazymask;
 1282 static u_int lazyptd;
 1283 static volatile u_int lazywait;
 1284 
 1285 void pmap_lazyfix_action(void);
 1286 
 1287 void
 1288 pmap_lazyfix_action(void)
 1289 {
 1290         u_int mymask = PCPU_GET(cpumask);
 1291 
 1292         if (rcr3() == lazyptd)
 1293                 load_cr3(PCPU_GET(curpcb)->pcb_cr3);
 1294         atomic_clear_int(lazymask, mymask);
 1295         atomic_store_rel_int(&lazywait, 1);
 1296 }
 1297 
 1298 static void
 1299 pmap_lazyfix_self(u_int mymask)
 1300 {
 1301 
 1302         if (rcr3() == lazyptd)
 1303                 load_cr3(PCPU_GET(curpcb)->pcb_cr3);
 1304         atomic_clear_int(lazymask, mymask);
 1305 }
 1306 
 1307 
 1308 static void
 1309 pmap_lazyfix(pmap_t pmap)
 1310 {
 1311         u_int mymask = PCPU_GET(cpumask);
 1312         u_int mask;
 1313         register u_int spins;
 1314 
 1315         while ((mask = pmap->pm_active) != 0) {
 1316                 spins = 50000000;
 1317                 mask = mask & -mask;    /* Find least significant set bit */
 1318                 mtx_lock_spin(&smp_ipi_mtx);
 1319 #ifdef PAE
 1320                 lazyptd = vtophys(pmap->pm_pdpt);
 1321 #else
 1322                 lazyptd = vtophys(pmap->pm_pdir);
 1323 #endif
 1324                 if (mask == mymask) {
 1325                         lazymask = &pmap->pm_active;
 1326                         pmap_lazyfix_self(mymask);
 1327                 } else {
 1328                         atomic_store_rel_int((u_int *)&lazymask,
 1329                             (u_int)&pmap->pm_active);
 1330                         atomic_store_rel_int(&lazywait, 0);
 1331                         ipi_selected(mask, IPI_LAZYPMAP);
 1332                         while (lazywait == 0) {
 1333                                 ia32_pause();
 1334                                 if (--spins == 0)
 1335                                         break;
 1336                         }
 1337                 }
 1338                 mtx_unlock_spin(&smp_ipi_mtx);
 1339                 if (spins == 0)
 1340                         printf("pmap_lazyfix: spun for 50000000\n");
 1341         }
 1342 }
 1343 
 1344 #else   /* SMP */
 1345 
 1346 /*
 1347  * Cleaning up on uniprocessor is easy.  For various reasons, we're
 1348  * unlikely to have to even execute this code, including the fact
 1349  * that the cleanup is deferred until the parent does a wait(2), which
 1350  * means that another userland process has run.
 1351  */
 1352 static void
 1353 pmap_lazyfix(pmap_t pmap)
 1354 {
 1355         u_int cr3;
 1356 
 1357         cr3 = vtophys(pmap->pm_pdir);
 1358         if (cr3 == rcr3()) {
 1359                 load_cr3(PCPU_GET(curpcb)->pcb_cr3);
 1360                 pmap->pm_active &= ~(PCPU_GET(cpumask));
 1361         }
 1362 }
 1363 #endif  /* SMP */
 1364 
 1365 /*
 1366  * Release any resources held by the given physical map.
 1367  * Called when a pmap initialized by pmap_pinit is being released.
 1368  * Should only be called if the map contains no valid mappings.
 1369  */
 1370 void
 1371 pmap_release(pmap_t pmap)
 1372 {
 1373         vm_page_t m, ptdpg[NPGPTD];
 1374         int i;
 1375 
 1376         KASSERT(pmap->pm_stats.resident_count == 0,
 1377             ("pmap_release: pmap resident count %ld != 0",
 1378             pmap->pm_stats.resident_count));
 1379 
 1380         pmap_lazyfix(pmap);
 1381         mtx_lock_spin(&allpmaps_lock);
 1382         LIST_REMOVE(pmap, pm_list);
 1383         mtx_unlock_spin(&allpmaps_lock);
 1384 
 1385         for (i = 0; i < NPGPTD; i++)
 1386                 ptdpg[i] = PHYS_TO_VM_PAGE(pmap->pm_pdir[PTDPTDI + i]);
 1387 
 1388         bzero(pmap->pm_pdir + PTDPTDI, (nkpt + NPGPTD) *
 1389             sizeof(*pmap->pm_pdir));
 1390 #ifdef SMP
 1391         pmap->pm_pdir[MPPTDI] = 0;
 1392 #endif
 1393 
 1394         pmap_qremove((vm_offset_t)pmap->pm_pdir, NPGPTD);
 1395 
 1396         vm_page_lock_queues();
 1397         for (i = 0; i < NPGPTD; i++) {
 1398                 m = ptdpg[i];
 1399 #ifdef PAE
 1400                 KASSERT(VM_PAGE_TO_PHYS(m) == (pmap->pm_pdpt[i] & PG_FRAME),
 1401                     ("pmap_release: got wrong ptd page"));
 1402 #endif
 1403                 m->wire_count--;
 1404                 atomic_subtract_int(&cnt.v_wire_count, 1);
 1405                 vm_page_free_zero(m);
 1406         }
 1407         vm_page_unlock_queues();
 1408         PMAP_LOCK_DESTROY(pmap);
 1409 }
 1410 
 1411 static int
 1412 kvm_size(SYSCTL_HANDLER_ARGS)
 1413 {
 1414         unsigned long ksize = VM_MAX_KERNEL_ADDRESS - KERNBASE;
 1415 
 1416         return sysctl_handle_long(oidp, &ksize, 0, req);
 1417 }
 1418 SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG|CTLFLAG_RD, 
 1419     0, 0, kvm_size, "IU", "Size of KVM");
 1420 
 1421 static int
 1422 kvm_free(SYSCTL_HANDLER_ARGS)
 1423 {
 1424         unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end;
 1425 
 1426         return sysctl_handle_long(oidp, &kfree, 0, req);
 1427 }
 1428 SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD, 
 1429     0, 0, kvm_free, "IU", "Amount of KVM free");
 1430 
 1431 /*
 1432  * grow the number of kernel page table entries, if needed
 1433  */
 1434 void
 1435 pmap_growkernel(vm_offset_t addr)
 1436 {
 1437         struct pmap *pmap;
 1438         vm_paddr_t ptppaddr;
 1439         vm_page_t nkpg;
 1440         pd_entry_t newpdir;
 1441         pt_entry_t *pde;
 1442 
 1443         mtx_assert(&kernel_map->system_mtx, MA_OWNED);
 1444         if (kernel_vm_end == 0) {
 1445                 kernel_vm_end = KERNBASE;
 1446                 nkpt = 0;
 1447                 while (pdir_pde(PTD, kernel_vm_end)) {
 1448                         kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
 1449                         nkpt++;
 1450                 }
 1451         }
 1452         addr = roundup2(addr, PAGE_SIZE * NPTEPG);
 1453         while (kernel_vm_end < addr) {
 1454                 if (pdir_pde(PTD, kernel_vm_end)) {
 1455                         kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
 1456                         continue;
 1457                 }
 1458 
 1459                 /*
 1460                  * This index is bogus, but out of the way
 1461                  */
 1462                 nkpg = vm_page_alloc(NULL, nkpt,
 1463                     VM_ALLOC_NOOBJ | VM_ALLOC_SYSTEM | VM_ALLOC_WIRED);
 1464                 if (!nkpg)
 1465                         panic("pmap_growkernel: no memory to grow kernel");
 1466 
 1467                 nkpt++;
 1468 
 1469                 pmap_zero_page(nkpg);
 1470                 ptppaddr = VM_PAGE_TO_PHYS(nkpg);
 1471                 newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M);
 1472                 pdir_pde(PTD, kernel_vm_end) = newpdir;
 1473 
 1474                 mtx_lock_spin(&allpmaps_lock);
 1475                 LIST_FOREACH(pmap, &allpmaps, pm_list) {
 1476                         pde = pmap_pde(pmap, kernel_vm_end);
 1477                         pde_store(pde, newpdir);
 1478                 }
 1479                 mtx_unlock_spin(&allpmaps_lock);
 1480                 kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
 1481         }
 1482 }
 1483 
 1484 
 1485 /***************************************************
 1486  * page management routines.
 1487  ***************************************************/
 1488 
 1489 /*
 1490  * free the pv_entry back to the free list
 1491  */
 1492 static PMAP_INLINE void
 1493 free_pv_entry(pv_entry_t pv)
 1494 {
 1495         pv_entry_count--;
 1496         uma_zfree(pvzone, pv);
 1497 }
 1498 
 1499 /*
 1500  * get a new pv_entry, allocating a block from the system
 1501  * when needed.
 1502  * the memory allocation is performed bypassing the malloc code
 1503  * because of the possibility of allocations at interrupt time.
 1504  */
 1505 static pv_entry_t
 1506 get_pv_entry(void)
 1507 {
 1508         pv_entry_count++;
 1509         if (pv_entry_high_water &&
 1510                 (pv_entry_count > pv_entry_high_water) &&
 1511                 (pmap_pagedaemon_waken == 0)) {
 1512                 pmap_pagedaemon_waken = 1;
 1513                 wakeup (&vm_pages_needed);
 1514         }
 1515         return uma_zalloc(pvzone, M_NOWAIT);
 1516 }
 1517 
 1518 
 1519 static int
 1520 pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
 1521 {
 1522         pv_entry_t pv;
 1523         int rtval;
 1524 
 1525         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 1526         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1527         if (m->md.pv_list_count < pmap->pm_stats.resident_count) {
 1528                 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
 1529                         if (pmap == pv->pv_pmap && va == pv->pv_va) 
 1530                                 break;
 1531                 }
 1532         } else {
 1533                 TAILQ_FOREACH(pv, &pmap->pm_pvlist, pv_plist) {
 1534                         if (va == pv->pv_va) 
 1535                                 break;
 1536                 }
 1537         }
 1538 
 1539         rtval = 0;
 1540         if (pv) {
 1541                 rtval = pmap_unuse_pt(pmap, va);
 1542                 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
 1543                 m->md.pv_list_count--;
 1544                 if (TAILQ_FIRST(&m->md.pv_list) == NULL)
 1545                         vm_page_flag_clear(m, PG_WRITEABLE);
 1546 
 1547                 TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
 1548                 free_pv_entry(pv);
 1549         }
 1550                         
 1551         return rtval;
 1552 }
 1553 
 1554 /*
 1555  * Create a pv entry for page at pa for
 1556  * (pmap, va).
 1557  */
 1558 static void
 1559 pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
 1560 {
 1561         pv_entry_t pv;
 1562 
 1563         pv = get_pv_entry();
 1564         pv->pv_va = va;
 1565         pv->pv_pmap = pmap;
 1566 
 1567         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 1568         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1569         TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist);
 1570         TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
 1571         m->md.pv_list_count++;
 1572 }
 1573 
 1574 /*
 1575  * pmap_remove_pte: do the things to unmap a page in a process
 1576  */
 1577 static int
 1578 pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va)
 1579 {
 1580         pt_entry_t oldpte;
 1581         vm_page_t m;
 1582 
 1583         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1584         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 1585         oldpte = pte_load_clear(ptq);
 1586         if (oldpte & PG_W)
 1587                 pmap->pm_stats.wired_count -= 1;
 1588         /*
 1589          * Machines that don't support invlpg, also don't support
 1590          * PG_G.
 1591          */
 1592         if (oldpte & PG_G)
 1593                 pmap_invalidate_page(kernel_pmap, va);
 1594         pmap->pm_stats.resident_count -= 1;
 1595         if (oldpte & PG_MANAGED) {
 1596                 m = PHYS_TO_VM_PAGE(oldpte);
 1597                 if (oldpte & PG_M) {
 1598 #if defined(PMAP_DIAGNOSTIC)
 1599                         if (pmap_nw_modified((pt_entry_t) oldpte)) {
 1600                                 printf(
 1601         "pmap_remove: modified page not writable: va: 0x%x, pte: 0x%x\n",
 1602                                     va, oldpte);
 1603                         }
 1604 #endif
 1605                         if (pmap_track_modified(va))
 1606                                 vm_page_dirty(m);
 1607                 }
 1608                 if (oldpte & PG_A)
 1609                         vm_page_flag_set(m, PG_REFERENCED);
 1610                 return pmap_remove_entry(pmap, m, va);
 1611         } else {
 1612                 return pmap_unuse_pt(pmap, va);
 1613         }
 1614 }
 1615 
 1616 /*
 1617  * Remove a single page from a process address space
 1618  */
 1619 static void
 1620 pmap_remove_page(pmap_t pmap, vm_offset_t va)
 1621 {
 1622         pt_entry_t *pte;
 1623 
 1624         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1625         KASSERT(curthread->td_pinned > 0, ("curthread not pinned"));
 1626         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 1627         if ((pte = pmap_pte_quick(pmap, va)) == NULL || *pte == 0)
 1628                 return;
 1629         pmap_remove_pte(pmap, pte, va);
 1630         pmap_invalidate_page(pmap, va);
 1631 }
 1632 
 1633 /*
 1634  *      Remove the given range of addresses from the specified map.
 1635  *
 1636  *      It is assumed that the start and end are properly
 1637  *      rounded to the page size.
 1638  */
 1639 void
 1640 pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
 1641 {
 1642         vm_offset_t pdnxt;
 1643         pd_entry_t ptpaddr;
 1644         pt_entry_t *pte;
 1645         int anyvalid;
 1646 
 1647         /*
 1648          * Perform an unsynchronized read.  This is, however, safe.
 1649          */
 1650         if (pmap->pm_stats.resident_count == 0)
 1651                 return;
 1652 
 1653         anyvalid = 0;
 1654 
 1655         vm_page_lock_queues();
 1656         sched_pin();
 1657         PMAP_LOCK(pmap);
 1658 
 1659         /*
 1660          * special handling of removing one page.  a very
 1661          * common operation and easy to short circuit some
 1662          * code.
 1663          */
 1664         if ((sva + PAGE_SIZE == eva) && 
 1665             ((pmap->pm_pdir[(sva >> PDRSHIFT)] & PG_PS) == 0)) {
 1666                 pmap_remove_page(pmap, sva);
 1667                 goto out;
 1668         }
 1669 
 1670         for (; sva < eva; sva = pdnxt) {
 1671                 unsigned pdirindex;
 1672 
 1673                 /*
 1674                  * Calculate index for next page table.
 1675                  */
 1676                 pdnxt = (sva + NBPDR) & ~PDRMASK;
 1677                 if (pmap->pm_stats.resident_count == 0)
 1678                         break;
 1679 
 1680                 pdirindex = sva >> PDRSHIFT;
 1681                 ptpaddr = pmap->pm_pdir[pdirindex];
 1682 
 1683                 /*
 1684                  * Weed out invalid mappings. Note: we assume that the page
 1685                  * directory table is always allocated, and in kernel virtual.
 1686                  */
 1687                 if (ptpaddr == 0)
 1688                         continue;
 1689 
 1690                 /*
 1691                  * Check for large page.
 1692                  */
 1693                 if ((ptpaddr & PG_PS) != 0) {
 1694                         pmap->pm_pdir[pdirindex] = 0;
 1695                         pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
 1696                         anyvalid = 1;
 1697                         continue;
 1698                 }
 1699 
 1700                 /*
 1701                  * Limit our scan to either the end of the va represented
 1702                  * by the current page table page, or to the end of the
 1703                  * range being removed.
 1704                  */
 1705                 if (pdnxt > eva)
 1706                         pdnxt = eva;
 1707 
 1708                 for (; sva != pdnxt; sva += PAGE_SIZE) {
 1709                         if ((pte = pmap_pte_quick(pmap, sva)) == NULL ||
 1710                             *pte == 0)
 1711                                 continue;
 1712                         anyvalid = 1;
 1713                         if (pmap_remove_pte(pmap, pte, sva))
 1714                                 break;
 1715                 }
 1716         }
 1717 out:
 1718         sched_unpin();
 1719         vm_page_unlock_queues();
 1720         if (anyvalid)
 1721                 pmap_invalidate_all(pmap);
 1722         PMAP_UNLOCK(pmap);
 1723 }
 1724 
 1725 /*
 1726  *      Routine:        pmap_remove_all
 1727  *      Function:
 1728  *              Removes this physical page from
 1729  *              all physical maps in which it resides.
 1730  *              Reflects back modify bits to the pager.
 1731  *
 1732  *      Notes:
 1733  *              Original versions of this routine were very
 1734  *              inefficient because they iteratively called
 1735  *              pmap_remove (slow...)
 1736  */
 1737 
 1738 void
 1739 pmap_remove_all(vm_page_t m)
 1740 {
 1741         register pv_entry_t pv;
 1742         pt_entry_t *pte, tpte;
 1743 
 1744 #if defined(PMAP_DIAGNOSTIC)
 1745         /*
 1746          * XXX This makes pmap_remove_all() illegal for non-managed pages!
 1747          */
 1748         if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) {
 1749                 panic("pmap_remove_all: illegal for unmanaged page, va: 0x%x",
 1750                     VM_PAGE_TO_PHYS(m));
 1751         }
 1752 #endif
 1753         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1754         sched_pin();
 1755         while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
 1756                 PMAP_LOCK(pv->pv_pmap);
 1757                 pv->pv_pmap->pm_stats.resident_count--;
 1758                 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
 1759                 tpte = pte_load_clear(pte);
 1760                 if (tpte & PG_W)
 1761                         pv->pv_pmap->pm_stats.wired_count--;
 1762                 if (tpte & PG_A)
 1763                         vm_page_flag_set(m, PG_REFERENCED);
 1764 
 1765                 /*
 1766                  * Update the vm_page_t clean and reference bits.
 1767                  */
 1768                 if (tpte & PG_M) {
 1769 #if defined(PMAP_DIAGNOSTIC)
 1770                         if (pmap_nw_modified((pt_entry_t) tpte)) {
 1771                                 printf(
 1772         "pmap_remove_all: modified page not writable: va: 0x%x, pte: 0x%x\n",
 1773                                     pv->pv_va, tpte);
 1774                         }
 1775 #endif
 1776                         if (pmap_track_modified(pv->pv_va))
 1777                                 vm_page_dirty(m);
 1778                 }
 1779                 pmap_invalidate_page(pv->pv_pmap, pv->pv_va);
 1780                 TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist);
 1781                 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
 1782                 m->md.pv_list_count--;
 1783                 pmap_unuse_pt(pv->pv_pmap, pv->pv_va);
 1784                 PMAP_UNLOCK(pv->pv_pmap);
 1785                 free_pv_entry(pv);
 1786         }
 1787         vm_page_flag_clear(m, PG_WRITEABLE);
 1788         sched_unpin();
 1789 }
 1790 
 1791 /*
 1792  *      Set the physical protection on the
 1793  *      specified range of this map as requested.
 1794  */
 1795 void
 1796 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
 1797 {
 1798         vm_offset_t pdnxt;
 1799         pd_entry_t ptpaddr;
 1800         int anychanged;
 1801 
 1802         if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
 1803                 pmap_remove(pmap, sva, eva);
 1804                 return;
 1805         }
 1806 
 1807         if (prot & VM_PROT_WRITE)
 1808                 return;
 1809 
 1810         anychanged = 0;
 1811 
 1812         vm_page_lock_queues();
 1813         sched_pin();
 1814         PMAP_LOCK(pmap);
 1815         for (; sva < eva; sva = pdnxt) {
 1816                 unsigned obits, pbits, pdirindex;
 1817 
 1818                 pdnxt = (sva + NBPDR) & ~PDRMASK;
 1819 
 1820                 pdirindex = sva >> PDRSHIFT;
 1821                 ptpaddr = pmap->pm_pdir[pdirindex];
 1822 
 1823                 /*
 1824                  * Weed out invalid mappings. Note: we assume that the page
 1825                  * directory table is always allocated, and in kernel virtual.
 1826                  */
 1827                 if (ptpaddr == 0)
 1828                         continue;
 1829 
 1830                 /*
 1831                  * Check for large page.
 1832                  */
 1833                 if ((ptpaddr & PG_PS) != 0) {
 1834                         pmap->pm_pdir[pdirindex] &= ~(PG_M|PG_RW);
 1835                         pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
 1836                         anychanged = 1;
 1837                         continue;
 1838                 }
 1839 
 1840                 if (pdnxt > eva)
 1841                         pdnxt = eva;
 1842 
 1843                 for (; sva != pdnxt; sva += PAGE_SIZE) {
 1844                         pt_entry_t *pte;
 1845                         vm_page_t m;
 1846 
 1847                         if ((pte = pmap_pte_quick(pmap, sva)) == NULL)
 1848                                 continue;
 1849 retry:
 1850                         /*
 1851                          * Regardless of whether a pte is 32 or 64 bits in
 1852                          * size, PG_RW, PG_A, and PG_M are among the least
 1853                          * significant 32 bits.
 1854                          */
 1855                         obits = pbits = *(u_int *)pte;
 1856                         if (pbits & PG_MANAGED) {
 1857                                 m = NULL;
 1858                                 if (pbits & PG_A) {
 1859                                         m = PHYS_TO_VM_PAGE(pbits);
 1860                                         vm_page_flag_set(m, PG_REFERENCED);
 1861                                         pbits &= ~PG_A;
 1862                                 }
 1863                                 if ((pbits & PG_M) != 0 &&
 1864                                     pmap_track_modified(sva)) {
 1865                                         if (m == NULL)
 1866                                                 m = PHYS_TO_VM_PAGE(pbits);
 1867                                         vm_page_dirty(m);
 1868                                 }
 1869                         }
 1870 
 1871                         pbits &= ~(PG_RW | PG_M);
 1872 
 1873                         if (pbits != obits) {
 1874                                 if (!atomic_cmpset_int((u_int *)pte, obits,
 1875                                     pbits))
 1876                                         goto retry;
 1877                                 anychanged = 1;
 1878                         }
 1879                 }
 1880         }
 1881         sched_unpin();
 1882         vm_page_unlock_queues();
 1883         if (anychanged)
 1884                 pmap_invalidate_all(pmap);
 1885         PMAP_UNLOCK(pmap);
 1886 }
 1887 
 1888 /*
 1889  *      Insert the given physical page (p) at
 1890  *      the specified virtual address (v) in the
 1891  *      target physical map with the protection requested.
 1892  *
 1893  *      If specified, the page will be wired down, meaning
 1894  *      that the related pte can not be reclaimed.
 1895  *
 1896  *      NB:  This is the only routine which MAY NOT lazy-evaluate
 1897  *      or lose information.  That is, this routine must actually
 1898  *      insert this page into the given map NOW.
 1899  */
 1900 void
 1901 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
 1902            boolean_t wired)
 1903 {
 1904         vm_paddr_t pa;
 1905         register pt_entry_t *pte;
 1906         vm_paddr_t opa;
 1907         pt_entry_t origpte, newpte;
 1908         vm_page_t mpte, om;
 1909 
 1910         va &= PG_FRAME;
 1911 #ifdef PMAP_DIAGNOSTIC
 1912         if (va > VM_MAX_KERNEL_ADDRESS)
 1913                 panic("pmap_enter: toobig");
 1914         if ((va >= UPT_MIN_ADDRESS) && (va < UPT_MAX_ADDRESS))
 1915                 panic("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)", va);
 1916 #endif
 1917 
 1918         mpte = NULL;
 1919 
 1920         vm_page_lock_queues();
 1921         PMAP_LOCK(pmap);
 1922         sched_pin();
 1923 
 1924         /*
 1925          * In the case that a page table page is not
 1926          * resident, we are creating it here.
 1927          */
 1928         if (va < VM_MAXUSER_ADDRESS) {
 1929                 mpte = pmap_allocpte(pmap, va, M_WAITOK);
 1930         }
 1931 #if 0 && defined(PMAP_DIAGNOSTIC)
 1932         else {
 1933                 pd_entry_t *pdeaddr = pmap_pde(pmap, va);
 1934                 origpte = *pdeaddr;
 1935                 if ((origpte & PG_V) == 0) { 
 1936                         panic("pmap_enter: invalid kernel page table page, pdir=%p, pde=%p, va=%p\n",
 1937                                 pmap->pm_pdir[PTDPTDI], origpte, va);
 1938                 }
 1939         }
 1940 #endif
 1941 
 1942         pte = pmap_pte_quick(pmap, va);
 1943 
 1944         /*
 1945          * Page Directory table entry not valid, we need a new PT page
 1946          */
 1947         if (pte == NULL) {
 1948                 panic("pmap_enter: invalid page directory pdir=%#jx, va=%#x\n",
 1949                         (uintmax_t)pmap->pm_pdir[PTDPTDI], va);
 1950         }
 1951 
 1952         pa = VM_PAGE_TO_PHYS(m);
 1953         om = NULL;
 1954         origpte = *pte;
 1955         opa = origpte & PG_FRAME;
 1956 
 1957         if (origpte & PG_PS) {
 1958                 /*
 1959                  * Yes, I know this will truncate upper address bits for PAE,
 1960                  * but I'm actually more interested in the lower bits
 1961                  */
 1962                 printf("pmap_enter: va %p, pte %p, origpte %p\n",
 1963                     (void *)va, (void *)pte, (void *)(uintptr_t)origpte);
 1964                 panic("pmap_enter: attempted pmap_enter on 4MB page");
 1965         }
 1966 
 1967         /*
 1968          * Mapping has not changed, must be protection or wiring change.
 1969          */
 1970         if (origpte && (opa == pa)) {
 1971                 /*
 1972                  * Wiring change, just update stats. We don't worry about
 1973                  * wiring PT pages as they remain resident as long as there
 1974                  * are valid mappings in them. Hence, if a user page is wired,
 1975                  * the PT page will be also.
 1976                  */
 1977                 if (wired && ((origpte & PG_W) == 0))
 1978                         pmap->pm_stats.wired_count++;
 1979                 else if (!wired && (origpte & PG_W))
 1980                         pmap->pm_stats.wired_count--;
 1981 
 1982 #if defined(PMAP_DIAGNOSTIC)
 1983                 if (pmap_nw_modified((pt_entry_t) origpte)) {
 1984                         printf(
 1985         "pmap_enter: modified page not writable: va: 0x%x, pte: 0x%x\n",
 1986                             va, origpte);
 1987                 }
 1988 #endif
 1989 
 1990                 /*
 1991                  * Remove extra pte reference
 1992                  */
 1993                 if (mpte)
 1994                         mpte->wire_count--;
 1995 
 1996                 /*
 1997                  * We might be turning off write access to the page,
 1998                  * so we go ahead and sense modify status.
 1999                  */
 2000                 if (origpte & PG_MANAGED) {
 2001                         om = m;
 2002                         pa |= PG_MANAGED;
 2003                 }
 2004                 goto validate;
 2005         } 
 2006         /*
 2007          * Mapping has changed, invalidate old range and fall through to
 2008          * handle validating new mapping.
 2009          */
 2010         if (opa) {
 2011                 int err;
 2012                 if (origpte & PG_W)
 2013                         pmap->pm_stats.wired_count--;
 2014                 if (origpte & PG_MANAGED) {
 2015                         om = PHYS_TO_VM_PAGE(opa);
 2016                         err = pmap_remove_entry(pmap, om, va);
 2017                 } else
 2018                         err = pmap_unuse_pt(pmap, va);
 2019                 if (err)
 2020                         panic("pmap_enter: pte vanished, va: 0x%x", va);
 2021         } else
 2022                 pmap->pm_stats.resident_count++;
 2023 
 2024         /*
 2025          * Enter on the PV list if part of our managed memory. Note that we
 2026          * raise IPL while manipulating pv_table since pmap_enter can be
 2027          * called at interrupt time.
 2028          */
 2029         if (pmap_initialized && 
 2030             (m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0) {
 2031                 pmap_insert_entry(pmap, va, m);
 2032                 pa |= PG_MANAGED;
 2033         }
 2034 
 2035         /*
 2036          * Increment counters
 2037          */
 2038         if (wired)
 2039                 pmap->pm_stats.wired_count++;
 2040 
 2041 validate:
 2042         /*
 2043          * Now validate mapping with desired protection/wiring.
 2044          */
 2045         newpte = (pt_entry_t)(pa | PG_V);
 2046         if ((prot & VM_PROT_WRITE) != 0)
 2047                 newpte |= PG_RW;
 2048         if (wired)
 2049                 newpte |= PG_W;
 2050         if (va < VM_MAXUSER_ADDRESS)
 2051                 newpte |= PG_U;
 2052         if (pmap == kernel_pmap)
 2053                 newpte |= pgeflag;
 2054 
 2055         /*
 2056          * if the mapping or permission bits are different, we need
 2057          * to update the pte.
 2058          */
 2059         if ((origpte & ~(PG_M|PG_A)) != newpte) {
 2060                 if (origpte & PG_MANAGED) {
 2061                         origpte = pte_load_store(pte, newpte | PG_A);
 2062                         if ((origpte & PG_M) && pmap_track_modified(va))
 2063                                 vm_page_dirty(om);
 2064                         if (origpte & PG_A)
 2065                                 vm_page_flag_set(om, PG_REFERENCED);
 2066                 } else
 2067                         pte_store(pte, newpte | PG_A);
 2068                 if (origpte) {
 2069                         pmap_invalidate_page(pmap, va);
 2070                 }
 2071         }
 2072         sched_unpin();
 2073         vm_page_unlock_queues();
 2074         PMAP_UNLOCK(pmap);
 2075 }
 2076 
 2077 /*
 2078  * this code makes some *MAJOR* assumptions:
 2079  * 1. Current pmap & pmap exists.
 2080  * 2. Not wired.
 2081  * 3. Read access.
 2082  * 4. No page table pages.
 2083  * 5. Tlbflush is deferred to calling procedure.
 2084  * 6. Page IS managed.
 2085  * but is *MUCH* faster than pmap_enter...
 2086  */
 2087 
 2088 vm_page_t
 2089 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
 2090 {
 2091         pt_entry_t *pte;
 2092         vm_paddr_t pa;
 2093 
 2094         vm_page_lock_queues();
 2095         PMAP_LOCK(pmap);
 2096 
 2097         /*
 2098          * In the case that a page table page is not
 2099          * resident, we are creating it here.
 2100          */
 2101         if (va < VM_MAXUSER_ADDRESS) {
 2102                 unsigned ptepindex;
 2103                 pd_entry_t ptepa;
 2104 
 2105                 /*
 2106                  * Calculate pagetable page index
 2107                  */
 2108                 ptepindex = va >> PDRSHIFT;
 2109                 if (mpte && (mpte->pindex == ptepindex)) {
 2110                         mpte->wire_count++;
 2111                 } else {
 2112 retry:
 2113                         /*
 2114                          * Get the page directory entry
 2115                          */
 2116                         ptepa = pmap->pm_pdir[ptepindex];
 2117 
 2118                         /*
 2119                          * If the page table page is mapped, we just increment
 2120                          * the hold count, and activate it.
 2121                          */
 2122                         if (ptepa) {
 2123                                 if (ptepa & PG_PS)
 2124                                         panic("pmap_enter_quick: unexpected mapping into 4MB page");
 2125                                 mpte = PHYS_TO_VM_PAGE(ptepa);
 2126                                 mpte->wire_count++;
 2127                         } else {
 2128                                 mpte = _pmap_allocpte(pmap, ptepindex,
 2129                                     M_WAITOK);
 2130                                 if (mpte == NULL)
 2131                                         goto retry;
 2132                         }
 2133                 }
 2134         } else {
 2135                 mpte = NULL;
 2136         }
 2137 
 2138         /*
 2139          * This call to vtopte makes the assumption that we are
 2140          * entering the page into the current pmap.  In order to support
 2141          * quick entry into any pmap, one would likely use pmap_pte_quick.
 2142          * But that isn't as quick as vtopte.
 2143          */
 2144         pte = vtopte(va);
 2145         if (*pte) {
 2146                 if (mpte != NULL) {
 2147                         pmap_unwire_pte_hold(pmap, mpte);
 2148                         mpte = NULL;
 2149                 }
 2150                 goto out;
 2151         }
 2152 
 2153         /*
 2154          * Enter on the PV list if part of our managed memory. Note that we
 2155          * raise IPL while manipulating pv_table since pmap_enter can be
 2156          * called at interrupt time.
 2157          */
 2158         if ((m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0)
 2159                 pmap_insert_entry(pmap, va, m);
 2160 
 2161         /*
 2162          * Increment counters
 2163          */
 2164         pmap->pm_stats.resident_count++;
 2165 
 2166         pa = VM_PAGE_TO_PHYS(m);
 2167 
 2168         /*
 2169          * Now validate mapping with RO protection
 2170          */
 2171         if (m->flags & (PG_FICTITIOUS|PG_UNMANAGED))
 2172                 pte_store(pte, pa | PG_V | PG_U);
 2173         else
 2174                 pte_store(pte, pa | PG_V | PG_U | PG_MANAGED);
 2175 out:
 2176         vm_page_unlock_queues();
 2177         PMAP_UNLOCK(pmap);
 2178         return mpte;
 2179 }
 2180 
 2181 /*
 2182  * Make a temporary mapping for a physical address.  This is only intended
 2183  * to be used for panic dumps.
 2184  */
 2185 void *
 2186 pmap_kenter_temporary(vm_paddr_t pa, int i)
 2187 {
 2188         vm_offset_t va;
 2189 
 2190         va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE);
 2191         pmap_kenter(va, pa);
 2192 #ifndef I386_CPU
 2193         invlpg(va);
 2194 #else
 2195         invltlb();
 2196 #endif
 2197         return ((void *)crashdumpmap);
 2198 }
 2199 
 2200 /*
 2201  * This code maps large physical mmap regions into the
 2202  * processor address space.  Note that some shortcuts
 2203  * are taken, but the code works.
 2204  */
 2205 void
 2206 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
 2207                     vm_object_t object, vm_pindex_t pindex,
 2208                     vm_size_t size)
 2209 {
 2210         vm_page_t p;
 2211 
 2212         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
 2213         KASSERT(object->type == OBJT_DEVICE,
 2214             ("pmap_object_init_pt: non-device object"));
 2215         if (pseflag && 
 2216             ((addr & (NBPDR - 1)) == 0) && ((size & (NBPDR - 1)) == 0)) {
 2217                 int i;
 2218                 vm_page_t m[1];
 2219                 unsigned int ptepindex;
 2220                 int npdes;
 2221                 pd_entry_t ptepa;
 2222 
 2223                 PMAP_LOCK(pmap);
 2224                 if (pmap->pm_pdir[ptepindex = (addr >> PDRSHIFT)])
 2225                         goto out;
 2226                 PMAP_UNLOCK(pmap);
 2227 retry:
 2228                 p = vm_page_lookup(object, pindex);
 2229                 if (p != NULL) {
 2230                         vm_page_lock_queues();
 2231                         if (vm_page_sleep_if_busy(p, FALSE, "init4p"))
 2232                                 goto retry;
 2233                 } else {
 2234                         p = vm_page_alloc(object, pindex, VM_ALLOC_NORMAL);
 2235                         if (p == NULL)
 2236                                 return;
 2237                         m[0] = p;
 2238 
 2239                         if (vm_pager_get_pages(object, m, 1, 0) != VM_PAGER_OK) {
 2240                                 vm_page_lock_queues();
 2241                                 vm_page_free(p);
 2242                                 vm_page_unlock_queues();
 2243                                 return;
 2244                         }
 2245 
 2246                         p = vm_page_lookup(object, pindex);
 2247                         vm_page_lock_queues();
 2248                         vm_page_wakeup(p);
 2249                 }
 2250                 vm_page_unlock_queues();
 2251 
 2252                 ptepa = VM_PAGE_TO_PHYS(p);
 2253                 if (ptepa & (NBPDR - 1))
 2254                         return;
 2255 
 2256                 p->valid = VM_PAGE_BITS_ALL;
 2257 
 2258                 PMAP_LOCK(pmap);
 2259                 pmap->pm_stats.resident_count += size >> PAGE_SHIFT;
 2260                 npdes = size >> PDRSHIFT;
 2261                 for(i = 0; i < npdes; i++) {
 2262                         pde_store(&pmap->pm_pdir[ptepindex],
 2263                             ptepa | PG_U | PG_RW | PG_V | PG_PS);
 2264                         ptepa += NBPDR;
 2265                         ptepindex += 1;
 2266                 }
 2267                 pmap_invalidate_all(pmap);
 2268 out:
 2269                 PMAP_UNLOCK(pmap);
 2270         }
 2271 }
 2272 
 2273 /*
 2274  *      Routine:        pmap_change_wiring
 2275  *      Function:       Change the wiring attribute for a map/virtual-address
 2276  *                      pair.
 2277  *      In/out conditions:
 2278  *                      The mapping must already exist in the pmap.
 2279  */
 2280 void
 2281 pmap_change_wiring(pmap, va, wired)
 2282         register pmap_t pmap;
 2283         vm_offset_t va;
 2284         boolean_t wired;
 2285 {
 2286         register pt_entry_t *pte;
 2287 
 2288         PMAP_LOCK(pmap);
 2289         pte = pmap_pte(pmap, va);
 2290 
 2291         if (wired && !pmap_pte_w(pte))
 2292                 pmap->pm_stats.wired_count++;
 2293         else if (!wired && pmap_pte_w(pte))
 2294                 pmap->pm_stats.wired_count--;
 2295 
 2296         /*
 2297          * Wiring is not a hardware characteristic so there is no need to
 2298          * invalidate TLB.
 2299          */
 2300         pmap_pte_set_w(pte, wired);
 2301         pmap_pte_release(pte);
 2302         PMAP_UNLOCK(pmap);
 2303 }
 2304 
 2305 
 2306 
 2307 /*
 2308  *      Copy the range specified by src_addr/len
 2309  *      from the source map to the range dst_addr/len
 2310  *      in the destination map.
 2311  *
 2312  *      This routine is only advisory and need not do anything.
 2313  */
 2314 
 2315 void
 2316 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
 2317           vm_offset_t src_addr)
 2318 {
 2319         vm_offset_t addr;
 2320         vm_offset_t end_addr = src_addr + len;
 2321         vm_offset_t pdnxt;
 2322         vm_page_t m;
 2323 
 2324         if (dst_addr != src_addr)
 2325                 return;
 2326 
 2327         if (!pmap_is_current(src_pmap))
 2328                 return;
 2329 
 2330         vm_page_lock_queues();
 2331         if (dst_pmap < src_pmap) {
 2332                 PMAP_LOCK(dst_pmap);
 2333                 PMAP_LOCK(src_pmap);
 2334         } else {
 2335                 PMAP_LOCK(src_pmap);
 2336                 PMAP_LOCK(dst_pmap);
 2337         }
 2338         sched_pin();
 2339         for (addr = src_addr; addr < end_addr; addr = pdnxt) {
 2340                 pt_entry_t *src_pte, *dst_pte;
 2341                 vm_page_t dstmpte, srcmpte;
 2342                 pd_entry_t srcptepaddr;
 2343                 unsigned ptepindex;
 2344 
 2345                 if (addr >= UPT_MIN_ADDRESS)
 2346                         panic("pmap_copy: invalid to pmap_copy page tables");
 2347 
 2348                 /*
 2349                  * Don't let optional prefaulting of pages make us go
 2350                  * way below the low water mark of free pages or way
 2351                  * above high water mark of used pv entries.
 2352                  */
 2353                 if (cnt.v_free_count < cnt.v_free_reserved ||
 2354                     pv_entry_count > pv_entry_high_water)
 2355                         break;
 2356                 
 2357                 pdnxt = (addr + NBPDR) & ~PDRMASK;
 2358                 ptepindex = addr >> PDRSHIFT;
 2359 
 2360                 srcptepaddr = src_pmap->pm_pdir[ptepindex];
 2361                 if (srcptepaddr == 0)
 2362                         continue;
 2363                         
 2364                 if (srcptepaddr & PG_PS) {
 2365                         if (dst_pmap->pm_pdir[ptepindex] == 0) {
 2366                                 dst_pmap->pm_pdir[ptepindex] = srcptepaddr;
 2367                                 dst_pmap->pm_stats.resident_count +=
 2368                                     NBPDR / PAGE_SIZE;
 2369                         }
 2370                         continue;
 2371                 }
 2372 
 2373                 srcmpte = PHYS_TO_VM_PAGE(srcptepaddr);
 2374                 if (srcmpte->wire_count == 0)
 2375                         panic("pmap_copy: source page table page is unused");
 2376 
 2377                 if (pdnxt > end_addr)
 2378                         pdnxt = end_addr;
 2379 
 2380                 src_pte = vtopte(addr);
 2381                 while (addr < pdnxt) {
 2382                         pt_entry_t ptetemp;
 2383                         ptetemp = *src_pte;
 2384                         /*
 2385                          * we only virtual copy managed pages
 2386                          */
 2387                         if ((ptetemp & PG_MANAGED) != 0) {
 2388                                 /*
 2389                                  * We have to check after allocpte for the
 2390                                  * pte still being around...  allocpte can
 2391                                  * block.
 2392                                  */
 2393                                 dstmpte = pmap_allocpte(dst_pmap, addr,
 2394                                     M_NOWAIT);
 2395                                 if (dstmpte == NULL)
 2396                                         break;
 2397                                 dst_pte = pmap_pte_quick(dst_pmap, addr);
 2398                                 if (*dst_pte == 0) {
 2399                                         /*
 2400                                          * Clear the modified and
 2401                                          * accessed (referenced) bits
 2402                                          * during the copy.
 2403                                          */
 2404                                         m = PHYS_TO_VM_PAGE(ptetemp);
 2405                                         *dst_pte = ptetemp & ~(PG_M | PG_A);
 2406                                         dst_pmap->pm_stats.resident_count++;
 2407                                         pmap_insert_entry(dst_pmap, addr, m);
 2408                                 } else
 2409                                         pmap_unwire_pte_hold(dst_pmap, dstmpte);
 2410                                 if (dstmpte->wire_count >= srcmpte->wire_count)
 2411                                         break;
 2412                         }
 2413                         addr += PAGE_SIZE;
 2414                         src_pte++;
 2415                 }
 2416         }
 2417         sched_unpin();
 2418         vm_page_unlock_queues();
 2419         PMAP_UNLOCK(src_pmap);
 2420         PMAP_UNLOCK(dst_pmap);
 2421 }       
 2422 
 2423 static __inline void
 2424 pagezero(void *page)
 2425 {
 2426 #if defined(I686_CPU)
 2427         if (cpu_class == CPUCLASS_686) {
 2428 #if defined(CPU_ENABLE_SSE)
 2429                 if (cpu_feature & CPUID_SSE2)
 2430                         sse2_pagezero(page);
 2431                 else
 2432 #endif
 2433                         i686_pagezero(page);
 2434         } else
 2435 #endif
 2436                 bzero(page, PAGE_SIZE);
 2437 }
 2438 
 2439 /*
 2440  *      pmap_zero_page zeros the specified hardware page by mapping 
 2441  *      the page into KVM and using bzero to clear its contents.
 2442  */
 2443 void
 2444 pmap_zero_page(vm_page_t m)
 2445 {
 2446         struct sysmaps *sysmaps;
 2447 
 2448         sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)];
 2449         mtx_lock(&sysmaps->lock);
 2450         if (*sysmaps->CMAP2)
 2451                 panic("pmap_zero_page: CMAP2 busy");
 2452         sched_pin();
 2453         *sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M;
 2454         invlcaddr(sysmaps->CADDR2);
 2455         pagezero(sysmaps->CADDR2);
 2456         *sysmaps->CMAP2 = 0;
 2457         sched_unpin();
 2458         mtx_unlock(&sysmaps->lock);
 2459 }
 2460 
 2461 /*
 2462  *      pmap_zero_page_area zeros the specified hardware page by mapping 
 2463  *      the page into KVM and using bzero to clear its contents.
 2464  *
 2465  *      off and size may not cover an area beyond a single hardware page.
 2466  */
 2467 void
 2468 pmap_zero_page_area(vm_page_t m, int off, int size)
 2469 {
 2470         struct sysmaps *sysmaps;
 2471 
 2472         sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)];
 2473         mtx_lock(&sysmaps->lock);
 2474         if (*sysmaps->CMAP2)
 2475                 panic("pmap_zero_page: CMAP2 busy");
 2476         sched_pin();
 2477         *sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M;
 2478         invlcaddr(sysmaps->CADDR2);
 2479         if (off == 0 && size == PAGE_SIZE) 
 2480                 pagezero(sysmaps->CADDR2);
 2481         else
 2482                 bzero((char *)sysmaps->CADDR2 + off, size);
 2483         *sysmaps->CMAP2 = 0;
 2484         sched_unpin();
 2485         mtx_unlock(&sysmaps->lock);
 2486 }
 2487 
 2488 /*
 2489  *      pmap_zero_page_idle zeros the specified hardware page by mapping 
 2490  *      the page into KVM and using bzero to clear its contents.  This
 2491  *      is intended to be called from the vm_pagezero process only and
 2492  *      outside of Giant.
 2493  */
 2494 void
 2495 pmap_zero_page_idle(vm_page_t m)
 2496 {
 2497 
 2498         if (*CMAP3)
 2499                 panic("pmap_zero_page: CMAP3 busy");
 2500         sched_pin();
 2501         *CMAP3 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M;
 2502         invlcaddr(CADDR3);
 2503         pagezero(CADDR3);
 2504         *CMAP3 = 0;
 2505         sched_unpin();
 2506 }
 2507 
 2508 /*
 2509  *      pmap_copy_page copies the specified (machine independent)
 2510  *      page by mapping the page into virtual memory and using
 2511  *      bcopy to copy the page, one machine dependent page at a
 2512  *      time.
 2513  */
 2514 void
 2515 pmap_copy_page(vm_page_t src, vm_page_t dst)
 2516 {
 2517         struct sysmaps *sysmaps;
 2518 
 2519         sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)];
 2520         mtx_lock(&sysmaps->lock);
 2521         if (*sysmaps->CMAP1)
 2522                 panic("pmap_copy_page: CMAP1 busy");
 2523         if (*sysmaps->CMAP2)
 2524                 panic("pmap_copy_page: CMAP2 busy");
 2525         sched_pin();
 2526 #ifdef I386_CPU
 2527         invltlb();
 2528 #else
 2529         invlpg((u_int)sysmaps->CADDR1);
 2530         invlpg((u_int)sysmaps->CADDR2);
 2531 #endif
 2532         *sysmaps->CMAP1 = PG_V | VM_PAGE_TO_PHYS(src) | PG_A;
 2533         *sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(dst) | PG_A | PG_M;
 2534         bcopy(sysmaps->CADDR1, sysmaps->CADDR2, PAGE_SIZE);
 2535         *sysmaps->CMAP1 = 0;
 2536         *sysmaps->CMAP2 = 0;
 2537         sched_unpin();
 2538         mtx_unlock(&sysmaps->lock);
 2539 }
 2540 
 2541 /*
 2542  * Returns true if the pmap's pv is one of the first
 2543  * 16 pvs linked to from this page.  This count may
 2544  * be changed upwards or downwards in the future; it
 2545  * is only necessary that true be returned for a small
 2546  * subset of pmaps for proper page aging.
 2547  */
 2548 boolean_t
 2549 pmap_page_exists_quick(pmap, m)
 2550         pmap_t pmap;
 2551         vm_page_t m;
 2552 {
 2553         pv_entry_t pv;
 2554         int loops = 0;
 2555 
 2556         if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
 2557                 return FALSE;
 2558 
 2559         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2560         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
 2561                 if (pv->pv_pmap == pmap) {
 2562                         return TRUE;
 2563                 }
 2564                 loops++;
 2565                 if (loops >= 16)
 2566                         break;
 2567         }
 2568         return (FALSE);
 2569 }
 2570 
 2571 #define PMAP_REMOVE_PAGES_CURPROC_ONLY
 2572 /*
 2573  * Remove all pages from specified address space
 2574  * this aids process exit speeds.  Also, this code
 2575  * is special cased for current process only, but
 2576  * can have the more generic (and slightly slower)
 2577  * mode enabled.  This is much faster than pmap_remove
 2578  * in the case of running down an entire address space.
 2579  */
 2580 void
 2581 pmap_remove_pages(pmap, sva, eva)
 2582         pmap_t pmap;
 2583         vm_offset_t sva, eva;
 2584 {
 2585         pt_entry_t *pte, tpte;
 2586         vm_page_t m;
 2587         pv_entry_t pv, npv;
 2588 
 2589 #ifdef PMAP_REMOVE_PAGES_CURPROC_ONLY
 2590         if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) {
 2591                 printf("warning: pmap_remove_pages called with non-current pmap\n");
 2592                 return;
 2593         }
 2594 #endif
 2595         vm_page_lock_queues();
 2596         PMAP_LOCK(pmap);
 2597         sched_pin();
 2598         for (pv = TAILQ_FIRST(&pmap->pm_pvlist); pv; pv = npv) {
 2599 
 2600                 if (pv->pv_va >= eva || pv->pv_va < sva) {
 2601                         npv = TAILQ_NEXT(pv, pv_plist);
 2602                         continue;
 2603                 }
 2604 
 2605 #ifdef PMAP_REMOVE_PAGES_CURPROC_ONLY
 2606                 pte = vtopte(pv->pv_va);
 2607 #else
 2608                 pte = pmap_pte_quick(pmap, pv->pv_va);
 2609 #endif
 2610                 tpte = *pte;
 2611 
 2612                 if (tpte == 0) {
 2613                         printf("TPTE at %p  IS ZERO @ VA %08x\n",
 2614                                                         pte, pv->pv_va);
 2615                         panic("bad pte");
 2616                 }
 2617 
 2618 /*
 2619  * We cannot remove wired pages from a process' mapping at this time
 2620  */
 2621                 if (tpte & PG_W) {
 2622                         npv = TAILQ_NEXT(pv, pv_plist);
 2623                         continue;
 2624                 }
 2625 
 2626                 m = PHYS_TO_VM_PAGE(tpte);
 2627                 KASSERT(m->phys_addr == (tpte & PG_FRAME),
 2628                     ("vm_page_t %p phys_addr mismatch %016jx %016jx",
 2629                     m, (uintmax_t)m->phys_addr, (uintmax_t)tpte));
 2630 
 2631                 KASSERT(m < &vm_page_array[vm_page_array_size],
 2632                         ("pmap_remove_pages: bad tpte %#jx", (uintmax_t)tpte));
 2633 
 2634                 pmap->pm_stats.resident_count--;
 2635 
 2636                 pte_clear(pte);
 2637 
 2638                 /*
 2639                  * Update the vm_page_t clean and reference bits.
 2640                  */
 2641                 if (tpte & PG_M) {
 2642                         vm_page_dirty(m);
 2643                 }
 2644 
 2645                 npv = TAILQ_NEXT(pv, pv_plist);
 2646                 TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
 2647 
 2648                 m->md.pv_list_count--;
 2649                 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
 2650                 if (TAILQ_EMPTY(&m->md.pv_list))
 2651                         vm_page_flag_clear(m, PG_WRITEABLE);
 2652 
 2653                 pmap_unuse_pt(pmap, pv->pv_va);
 2654                 free_pv_entry(pv);
 2655         }
 2656         sched_unpin();
 2657         pmap_invalidate_all(pmap);
 2658         PMAP_UNLOCK(pmap);
 2659         vm_page_unlock_queues();
 2660 }
 2661 
 2662 /*
 2663  *      pmap_is_modified:
 2664  *
 2665  *      Return whether or not the specified physical page was modified
 2666  *      in any physical maps.
 2667  */
 2668 boolean_t
 2669 pmap_is_modified(vm_page_t m)
 2670 {
 2671         pv_entry_t pv;
 2672         pt_entry_t *pte;
 2673         boolean_t rv;
 2674 
 2675         rv = FALSE;
 2676         if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
 2677                 return (rv);
 2678 
 2679         sched_pin();
 2680         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2681         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
 2682                 /*
 2683                  * if the bit being tested is the modified bit, then
 2684                  * mark clean_map and ptes as never
 2685                  * modified.
 2686                  */
 2687                 if (!pmap_track_modified(pv->pv_va))
 2688                         continue;
 2689 #if defined(PMAP_DIAGNOSTIC)
 2690                 if (!pv->pv_pmap) {
 2691                         printf("Null pmap (tb) at va: 0x%x\n", pv->pv_va);
 2692                         continue;
 2693                 }
 2694 #endif
 2695                 PMAP_LOCK(pv->pv_pmap);
 2696                 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
 2697                 rv = (*pte & PG_M) != 0;
 2698                 PMAP_UNLOCK(pv->pv_pmap);
 2699                 if (rv)
 2700                         break;
 2701         }
 2702         sched_unpin();
 2703         return (rv);
 2704 }
 2705 
 2706 /*
 2707  *      pmap_is_prefaultable:
 2708  *
 2709  *      Return whether or not the specified virtual address is elgible
 2710  *      for prefault.
 2711  */
 2712 boolean_t
 2713 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
 2714 {
 2715         pt_entry_t *pte;
 2716         boolean_t rv;
 2717 
 2718         rv = FALSE;
 2719         PMAP_LOCK(pmap);
 2720         if (*pmap_pde(pmap, addr)) {
 2721                 pte = vtopte(addr);
 2722                 rv = *pte == 0;
 2723         }
 2724         PMAP_UNLOCK(pmap);
 2725         return (rv);
 2726 }
 2727 
 2728 /*
 2729  *      Clear the given bit in each of the given page's ptes.  The bit is
 2730  *      expressed as a 32-bit mask.  Consequently, if the pte is 64 bits in
 2731  *      size, only a bit within the least significant 32 can be cleared.
 2732  */
 2733 static __inline void
 2734 pmap_clear_ptes(vm_page_t m, int bit)
 2735 {
 2736         register pv_entry_t pv;
 2737         pt_entry_t pbits, *pte;
 2738 
 2739         if (!pmap_initialized || (m->flags & PG_FICTITIOUS) ||
 2740             (bit == PG_RW && (m->flags & PG_WRITEABLE) == 0))
 2741                 return;
 2742 
 2743         sched_pin();
 2744         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2745         /*
 2746          * Loop over all current mappings setting/clearing as appropos If
 2747          * setting RO do we need to clear the VAC?
 2748          */
 2749         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
 2750                 /*
 2751                  * don't write protect pager mappings
 2752                  */
 2753                 if (bit == PG_RW) {
 2754                         if (!pmap_track_modified(pv->pv_va))
 2755                                 continue;
 2756                 }
 2757 
 2758 #if defined(PMAP_DIAGNOSTIC)
 2759                 if (!pv->pv_pmap) {
 2760                         printf("Null pmap (cb) at va: 0x%x\n", pv->pv_va);
 2761                         continue;
 2762                 }
 2763 #endif
 2764 
 2765                 PMAP_LOCK(pv->pv_pmap);
 2766                 pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
 2767 retry:
 2768                 pbits = *pte;
 2769                 if (pbits & bit) {
 2770                         if (bit == PG_RW) {
 2771                                 /*
 2772                                  * Regardless of whether a pte is 32 or 64 bits
 2773                                  * in size, PG_RW and PG_M are among the least
 2774                                  * significant 32 bits.
 2775                                  */
 2776                                 if (!atomic_cmpset_int((u_int *)pte, pbits,
 2777                                     pbits & ~(PG_RW | PG_M)))
 2778                                         goto retry;
 2779                                 if (pbits & PG_M) {
 2780                                         vm_page_dirty(m);
 2781                                 }
 2782                         } else {
 2783                                 atomic_clear_int((u_int *)pte, bit);
 2784                         }
 2785                         pmap_invalidate_page(pv->pv_pmap, pv->pv_va);
 2786                 }
 2787                 PMAP_UNLOCK(pv->pv_pmap);
 2788         }
 2789         if (bit == PG_RW)
 2790                 vm_page_flag_clear(m, PG_WRITEABLE);
 2791         sched_unpin();
 2792 }
 2793 
 2794 /*
 2795  *      pmap_page_protect:
 2796  *
 2797  *      Lower the permission for all mappings to a given page.
 2798  */
 2799 void
 2800 pmap_page_protect(vm_page_t m, vm_prot_t prot)
 2801 {
 2802         if ((prot & VM_PROT_WRITE) == 0) {
 2803                 if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) {
 2804                         pmap_clear_ptes(m, PG_RW);
 2805                 } else {
 2806                         pmap_remove_all(m);
 2807                 }
 2808         }
 2809 }
 2810 
 2811 /*
 2812  *      pmap_ts_referenced:
 2813  *
 2814  *      Return a count of reference bits for a page, clearing those bits.
 2815  *      It is not necessary for every reference bit to be cleared, but it
 2816  *      is necessary that 0 only be returned when there are truly no
 2817  *      reference bits set.
 2818  *
 2819  *      XXX: The exact number of bits to check and clear is a matter that
 2820  *      should be tested and standardized at some point in the future for
 2821  *      optimal aging of shared pages.
 2822  */
 2823 int
 2824 pmap_ts_referenced(vm_page_t m)
 2825 {
 2826         register pv_entry_t pv, pvf, pvn;
 2827         pt_entry_t *pte;
 2828         pt_entry_t v;
 2829         int rtval = 0;
 2830 
 2831         if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
 2832                 return (rtval);
 2833 
 2834         sched_pin();
 2835         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 2836         if ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
 2837 
 2838                 pvf = pv;
 2839 
 2840                 do {
 2841                         pvn = TAILQ_NEXT(pv, pv_list);
 2842 
 2843                         TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
 2844 
 2845                         TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
 2846 
 2847                         if (!pmap_track_modified(pv->pv_va))
 2848                                 continue;
 2849 
 2850                         PMAP_LOCK(pv->pv_pmap);
 2851                         pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
 2852 
 2853                         if (pte && ((v = pte_load(pte)) & PG_A) != 0) {
 2854                                 atomic_clear_int((u_int *)pte, PG_A);
 2855                                 pmap_invalidate_page(pv->pv_pmap, pv->pv_va);
 2856 
 2857                                 rtval++;
 2858                                 if (rtval > 4) {
 2859                                         PMAP_UNLOCK(pv->pv_pmap);
 2860                                         break;
 2861                                 }
 2862                         }
 2863                         PMAP_UNLOCK(pv->pv_pmap);
 2864                 } while ((pv = pvn) != NULL && pv != pvf);
 2865         }
 2866         sched_unpin();
 2867 
 2868         return (rtval);
 2869 }
 2870 
 2871 /*
 2872  *      Clear the modify bits on the specified physical page.
 2873  */
 2874 void
 2875 pmap_clear_modify(vm_page_t m)
 2876 {
 2877         pmap_clear_ptes(m, PG_M);
 2878 }
 2879 
 2880 /*
 2881  *      pmap_clear_reference:
 2882  *
 2883  *      Clear the reference bit on the specified physical page.
 2884  */
 2885 void
 2886 pmap_clear_reference(vm_page_t m)
 2887 {
 2888         pmap_clear_ptes(m, PG_A);
 2889 }
 2890 
 2891 /*
 2892  * Miscellaneous support routines follow
 2893  */
 2894 
 2895 /*
 2896  * Map a set of physical memory pages into the kernel virtual
 2897  * address space. Return a pointer to where it is mapped. This
 2898  * routine is intended to be used for mapping device memory,
 2899  * NOT real memory.
 2900  */
 2901 void *
 2902 pmap_mapdev(pa, size)
 2903         vm_paddr_t pa;
 2904         vm_size_t size;
 2905 {
 2906         vm_offset_t va, tmpva, offset;
 2907 
 2908         offset = pa & PAGE_MASK;
 2909         size = roundup(offset + size, PAGE_SIZE);
 2910         pa = pa & PG_FRAME;
 2911 
 2912         if (pa < KERNLOAD && pa + size <= KERNLOAD)
 2913                 va = KERNBASE + pa;
 2914         else
 2915                 va = kmem_alloc_nofault(kernel_map, size);
 2916         if (!va)
 2917                 panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
 2918 
 2919         for (tmpva = va; size > 0; ) {
 2920                 pmap_kenter(tmpva, pa);
 2921                 size -= PAGE_SIZE;
 2922                 tmpva += PAGE_SIZE;
 2923                 pa += PAGE_SIZE;
 2924         }
 2925         pmap_invalidate_range(kernel_pmap, va, tmpva);
 2926         return ((void *)(va + offset));
 2927 }
 2928 
 2929 void
 2930 pmap_unmapdev(va, size)
 2931         vm_offset_t va;
 2932         vm_size_t size;
 2933 {
 2934         vm_offset_t base, offset, tmpva;
 2935 
 2936         if (va >= KERNBASE && va + size <= KERNBASE + KERNLOAD)
 2937                 return;
 2938         base = va & PG_FRAME;
 2939         offset = va & PAGE_MASK;
 2940         size = roundup(offset + size, PAGE_SIZE);
 2941         for (tmpva = base; tmpva < (base + size); tmpva += PAGE_SIZE)
 2942                 pmap_kremove(tmpva);
 2943         pmap_invalidate_range(kernel_pmap, va, tmpva);
 2944         kmem_free(kernel_map, base, size);
 2945 }
 2946 
 2947 /*
 2948  * perform the pmap work for mincore
 2949  */
 2950 int
 2951 pmap_mincore(pmap, addr)
 2952         pmap_t pmap;
 2953         vm_offset_t addr;
 2954 {
 2955         pt_entry_t *ptep, pte;
 2956         vm_page_t m;
 2957         int val = 0;
 2958         
 2959         PMAP_LOCK(pmap);
 2960         ptep = pmap_pte(pmap, addr);
 2961         pte = (ptep != NULL) ? *ptep : 0;
 2962         pmap_pte_release(ptep);
 2963         PMAP_UNLOCK(pmap);
 2964 
 2965         if (pte != 0) {
 2966                 vm_paddr_t pa;
 2967 
 2968                 val = MINCORE_INCORE;
 2969                 if ((pte & PG_MANAGED) == 0)
 2970                         return val;
 2971 
 2972                 pa = pte & PG_FRAME;
 2973 
 2974                 m = PHYS_TO_VM_PAGE(pa);
 2975 
 2976                 /*
 2977                  * Modified by us
 2978                  */
 2979                 if (pte & PG_M)
 2980                         val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER;
 2981                 else {
 2982                         /*
 2983                          * Modified by someone else
 2984                          */
 2985                         vm_page_lock_queues();
 2986                         if (m->dirty || pmap_is_modified(m))
 2987                                 val |= MINCORE_MODIFIED_OTHER;
 2988                         vm_page_unlock_queues();
 2989                 }
 2990                 /*
 2991                  * Referenced by us
 2992                  */
 2993                 if (pte & PG_A)
 2994                         val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER;
 2995                 else {
 2996                         /*
 2997                          * Referenced by someone else
 2998                          */
 2999                         vm_page_lock_queues();
 3000                         if ((m->flags & PG_REFERENCED) ||
 3001                             pmap_ts_referenced(m)) {
 3002                                 val |= MINCORE_REFERENCED_OTHER;
 3003                                 vm_page_flag_set(m, PG_REFERENCED);
 3004                         }
 3005                         vm_page_unlock_queues();
 3006                 }
 3007         } 
 3008         return val;
 3009 }
 3010 
 3011 void
 3012 pmap_activate(struct thread *td)
 3013 {
 3014         struct proc *p = td->td_proc;
 3015         pmap_t  pmap, oldpmap;
 3016         u_int32_t  cr3;
 3017 
 3018         critical_enter();
 3019         pmap = vmspace_pmap(td->td_proc->p_vmspace);
 3020         oldpmap = PCPU_GET(curpmap);
 3021 #if defined(SMP)
 3022         atomic_clear_int(&oldpmap->pm_active, PCPU_GET(cpumask));
 3023         atomic_set_int(&pmap->pm_active, PCPU_GET(cpumask));
 3024 #else
 3025         oldpmap->pm_active &= ~1;
 3026         pmap->pm_active |= 1;
 3027 #endif
 3028 #ifdef PAE
 3029         cr3 = vtophys(pmap->pm_pdpt);
 3030 #else
 3031         cr3 = vtophys(pmap->pm_pdir);
 3032 #endif
 3033         /* XXXKSE this is wrong.
 3034          * pmap_activate is for the current thread on the current cpu
 3035          */
 3036         if (p->p_flag & P_SA) {
 3037                 /* Make sure all other cr3 entries are updated. */
 3038                 /* what if they are running?  XXXKSE (maybe abort them) */
 3039                 FOREACH_THREAD_IN_PROC(p, td) {
 3040                         td->td_pcb->pcb_cr3 = cr3;
 3041                 }
 3042         } else {
 3043                 td->td_pcb->pcb_cr3 = cr3;
 3044         }
 3045         load_cr3(cr3);
 3046         PCPU_SET(curpmap, pmap);
 3047         critical_exit();
 3048 }
 3049 
 3050 vm_offset_t
 3051 pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size)
 3052 {
 3053 
 3054         if ((obj == NULL) || (size < NBPDR) || (obj->type != OBJT_DEVICE)) {
 3055                 return addr;
 3056         }
 3057 
 3058         addr = (addr + PDRMASK) & ~PDRMASK;
 3059         return addr;
 3060 }
 3061 
 3062 
 3063 #if defined(PMAP_DEBUG)
 3064 pmap_pid_dump(int pid)
 3065 {
 3066         pmap_t pmap;
 3067         struct proc *p;
 3068         int npte = 0;
 3069         int index;
 3070 
 3071         sx_slock(&allproc_lock);
 3072         LIST_FOREACH(p, &allproc, p_list) {
 3073                 if (p->p_pid != pid)
 3074                         continue;
 3075 
 3076                 if (p->p_vmspace) {
 3077                         int i,j;
 3078                         index = 0;
 3079                         pmap = vmspace_pmap(p->p_vmspace);
 3080                         for (i = 0; i < NPDEPTD; i++) {
 3081                                 pd_entry_t *pde;
 3082                                 pt_entry_t *pte;
 3083                                 vm_offset_t base = i << PDRSHIFT;
 3084                                 
 3085                                 pde = &pmap->pm_pdir[i];
 3086                                 if (pde && pmap_pde_v(pde)) {
 3087                                         for (j = 0; j < NPTEPG; j++) {
 3088                                                 vm_offset_t va = base + (j << PAGE_SHIFT);
 3089                                                 if (va >= (vm_offset_t) VM_MIN_KERNEL_ADDRESS) {
 3090                                                         if (index) {
 3091                                                                 index = 0;
 3092                                                                 printf("\n");
 3093                                                         }
 3094                                                         sx_sunlock(&allproc_lock);
 3095                                                         return npte;
 3096                                                 }
 3097                                                 pte = pmap_pte(pmap, va);
 3098                                                 if (pte && pmap_pte_v(pte)) {
 3099                                                         pt_entry_t pa;
 3100                                                         vm_page_t m;
 3101                                                         pa = *pte;
 3102                                                         m = PHYS_TO_VM_PAGE(pa);
 3103                                                         printf("va: 0x%x, pt: 0x%x, h: %d, w: %d, f: 0x%x",
 3104                                                                 va, pa, m->hold_count, m->wire_count, m->flags);
 3105                                                         npte++;
 3106                                                         index++;
 3107                                                         if (index >= 2) {
 3108                                                                 index = 0;
 3109                                                                 printf("\n");
 3110                                                         } else {
 3111                                                                 printf(" ");
 3112                                                         }
 3113                                                 }
 3114                                         }
 3115                                 }
 3116                         }
 3117                 }
 3118         }
 3119         sx_sunlock(&allproc_lock);
 3120         return npte;
 3121 }
 3122 #endif
 3123 
 3124 #if defined(DEBUG)
 3125 
 3126 static void     pads(pmap_t pm);
 3127 void            pmap_pvdump(vm_offset_t pa);
 3128 
 3129 /* print address space of pmap*/
 3130 static void
 3131 pads(pm)
 3132         pmap_t pm;
 3133 {
 3134         int i, j;
 3135         vm_paddr_t va;
 3136         pt_entry_t *ptep;
 3137 
 3138         if (pm == kernel_pmap)
 3139                 return;
 3140         for (i = 0; i < NPDEPTD; i++)
 3141                 if (pm->pm_pdir[i])
 3142                         for (j = 0; j < NPTEPG; j++) {
 3143                                 va = (i << PDRSHIFT) + (j << PAGE_SHIFT);
 3144                                 if (pm == kernel_pmap && va < KERNBASE)
 3145                                         continue;
 3146                                 if (pm != kernel_pmap && va > UPT_MAX_ADDRESS)
 3147                                         continue;
 3148                                 ptep = pmap_pte(pm, va);
 3149                                 if (pmap_pte_v(ptep))
 3150                                         printf("%x:%x ", va, *ptep);
 3151                         };
 3152 
 3153 }
 3154 
 3155 void
 3156 pmap_pvdump(pa)
 3157         vm_paddr_t pa;
 3158 {
 3159         pv_entry_t pv;
 3160         vm_page_t m;
 3161 
 3162         printf("pa %x", pa);
 3163         m = PHYS_TO_VM_PAGE(pa);
 3164         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
 3165                 printf(" -> pmap %p, va %x", (void *)pv->pv_pmap, pv->pv_va);
 3166                 pads(pv->pv_pmap);
 3167         }
 3168         printf(" ");
 3169 }
 3170 #endif

Cache object: ba64553ecff94cfbc0f4daaface63667


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.