The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/arm/arm/pmap-v6.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /* From: $NetBSD: pmap.c,v 1.148 2004/04/03 04:35:48 bsh Exp $ */
    2 /*-
    3  * Copyright 2011 Semihalf
    4  * Copyright 2004 Olivier Houchard.
    5  * Copyright 2003 Wasabi Systems, Inc.
    6  * All rights reserved.
    7  *
    8  * Written by Steve C. Woodford for Wasabi Systems, Inc.
    9  *
   10  * Redistribution and use in source and binary forms, with or without
   11  * modification, are permitted provided that the following conditions
   12  * are met:
   13  * 1. Redistributions of source code must retain the above copyright
   14  *    notice, this list of conditions and the following disclaimer.
   15  * 2. Redistributions in binary form must reproduce the above copyright
   16  *    notice, this list of conditions and the following disclaimer in the
   17  *    documentation and/or other materials provided with the distribution.
   18  * 3. All advertising materials mentioning features or use of this software
   19  *    must display the following acknowledgement:
   20  *      This product includes software developed for the NetBSD Project by
   21  *      Wasabi Systems, Inc.
   22  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
   23  *    or promote products derived from this software without specific prior
   24  *    written permission.
   25  *
   26  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
   27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
   30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   36  * POSSIBILITY OF SUCH DAMAGE.
   37  *
   38  * From: FreeBSD: src/sys/arm/arm/pmap.c,v 1.113 2009/07/24 13:50:29
   39  */
   40 
   41 /*-
   42  * Copyright (c) 2002-2003 Wasabi Systems, Inc.
   43  * Copyright (c) 2001 Richard Earnshaw
   44  * Copyright (c) 2001-2002 Christopher Gilbert
   45  * All rights reserved.
   46  *
   47  * 1. Redistributions of source code must retain the above copyright
   48  *    notice, this list of conditions and the following disclaimer.
   49  * 2. Redistributions in binary form must reproduce the above copyright
   50  *    notice, this list of conditions and the following disclaimer in the
   51  *    documentation and/or other materials provided with the distribution.
   52  * 3. The name of the company nor the name of the author may be used to
   53  *    endorse or promote products derived from this software without specific
   54  *    prior written permission.
   55  *
   56  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
   57  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
   58  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   59  * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
   60  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
   61  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
   62  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   63  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   64  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   65  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   66  * SUCH DAMAGE.
   67  */
   68 /*-
   69  * Copyright (c) 1999 The NetBSD Foundation, Inc.
   70  * All rights reserved.
   71  *
   72  * This code is derived from software contributed to The NetBSD Foundation
   73  * by Charles M. Hannum.
   74  *
   75  * Redistribution and use in source and binary forms, with or without
   76  * modification, are permitted provided that the following conditions
   77  * are met:
   78  * 1. Redistributions of source code must retain the above copyright
   79  *    notice, this list of conditions and the following disclaimer.
   80  * 2. Redistributions in binary form must reproduce the above copyright
   81  *    notice, this list of conditions and the following disclaimer in the
   82  *    documentation and/or other materials provided with the distribution.
   83  *
   84  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
   85  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   86  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   87  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
   88  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   89  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   90  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   91  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   92  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   93  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   94  * POSSIBILITY OF SUCH DAMAGE.
   95  */
   96 
   97 /*-
   98  * Copyright (c) 1994-1998 Mark Brinicombe.
   99  * Copyright (c) 1994 Brini.
  100  * All rights reserved.
  101  *
  102  * This code is derived from software written for Brini by Mark Brinicombe
  103  *
  104  * Redistribution and use in source and binary forms, with or without
  105  * modification, are permitted provided that the following conditions
  106  * are met:
  107  * 1. Redistributions of source code must retain the above copyright
  108  *    notice, this list of conditions and the following disclaimer.
  109  * 2. Redistributions in binary form must reproduce the above copyright
  110  *    notice, this list of conditions and the following disclaimer in the
  111  *    documentation and/or other materials provided with the distribution.
  112  * 3. All advertising materials mentioning features or use of this software
  113  *    must display the following acknowledgement:
  114  *      This product includes software developed by Mark Brinicombe.
  115  * 4. The name of the author may not be used to endorse or promote products
  116  *    derived from this software without specific prior written permission.
  117  *
  118  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  119  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  120  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  121  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  122  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  123  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  124  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  125  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  126  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  127  *
  128  * RiscBSD kernel project
  129  *
  130  * pmap.c
  131  *
  132  * Machine dependant vm stuff
  133  *
  134  * Created      : 20/09/94
  135  */
  136 
  137 /*
  138  * Special compilation symbols
  139  * PMAP_DEBUG           - Build in pmap_debug_level code
  140  *
  141  * Note that pmap_mapdev() and pmap_unmapdev() are implemented in arm/devmap.c
  142 */
  143 /* Include header files */
  144 
  145 #include "opt_vm.h"
  146 #include "opt_pmap.h"
  147 
  148 #include <sys/cdefs.h>
  149 __FBSDID("$FreeBSD: releng/10.2/sys/arm/arm/pmap-v6.c 278614 2015-02-12 04:15:55Z ian $");
  150 #include <sys/param.h>
  151 #include <sys/systm.h>
  152 #include <sys/kernel.h>
  153 #include <sys/ktr.h>
  154 #include <sys/lock.h>
  155 #include <sys/proc.h>
  156 #include <sys/malloc.h>
  157 #include <sys/msgbuf.h>
  158 #include <sys/mutex.h>
  159 #include <sys/vmmeter.h>
  160 #include <sys/mman.h>
  161 #include <sys/rwlock.h>
  162 #include <sys/smp.h>
  163 #include <sys/sched.h>
  164 #include <sys/sysctl.h>
  165 
  166 #include <vm/vm.h>
  167 #include <vm/vm_param.h>
  168 #include <vm/uma.h>
  169 #include <vm/pmap.h>
  170 #include <vm/vm_kern.h>
  171 #include <vm/vm_object.h>
  172 #include <vm/vm_map.h>
  173 #include <vm/vm_page.h>
  174 #include <vm/vm_pageout.h>
  175 #include <vm/vm_phys.h>
  176 #include <vm/vm_extern.h>
  177 #include <vm/vm_reserv.h>
  178 
  179 #include <machine/md_var.h>
  180 #include <machine/cpu.h>
  181 #include <machine/cpufunc.h>
  182 #include <machine/pcb.h>
  183 
  184 #ifdef DEBUG
  185 extern int last_fault_code;
  186 #endif
  187 
  188 #ifdef PMAP_DEBUG
  189 #define PDEBUG(_lev_,_stat_) \
  190         if (pmap_debug_level >= (_lev_)) \
  191                 ((_stat_))
  192 #define dprintf printf
  193 
  194 int pmap_debug_level = 0;
  195 #define PMAP_INLINE
  196 #else   /* PMAP_DEBUG */
  197 #define PDEBUG(_lev_,_stat_) /* Nothing */
  198 #define dprintf(x, arg...)
  199 #define PMAP_INLINE __inline
  200 #endif  /* PMAP_DEBUG */
  201 
  202 #ifdef PV_STATS
  203 #define PV_STAT(x)      do { x ; } while (0)
  204 #else
  205 #define PV_STAT(x)      do { } while (0)
  206 #endif
  207 
  208 #define pa_to_pvh(pa)   (&pv_table[pa_index(pa)])
  209 
  210 #ifdef ARM_L2_PIPT
  211 #define pmap_l2cache_wbinv_range(va, pa, size) cpu_l2cache_wbinv_range((pa), (size))
  212 #define pmap_l2cache_inv_range(va, pa, size) cpu_l2cache_inv_range((pa), (size))
  213 #else
  214 #define pmap_l2cache_wbinv_range(va, pa, size) cpu_l2cache_wbinv_range((va), (size))
  215 #define pmap_l2cache_inv_range(va, pa, size) cpu_l2cache_inv_range((va), (size))
  216 #endif
  217 
  218 extern struct pv_addr systempage;
  219 
  220 /*
  221  * Internal function prototypes
  222  */
  223 
  224 static PMAP_INLINE
  225 struct pv_entry         *pmap_find_pv(struct md_page *, pmap_t, vm_offset_t);
  226 static void             pmap_free_pv_chunk(struct pv_chunk *pc);
  227 static void             pmap_free_pv_entry(pmap_t pmap, pv_entry_t pv);
  228 static pv_entry_t       pmap_get_pv_entry(pmap_t pmap, boolean_t try);
  229 static vm_page_t        pmap_pv_reclaim(pmap_t locked_pmap);
  230 static boolean_t        pmap_pv_insert_section(pmap_t, vm_offset_t,
  231     vm_paddr_t);
  232 static struct pv_entry  *pmap_remove_pv(struct vm_page *, pmap_t, vm_offset_t);
  233 static int              pmap_pvh_wired_mappings(struct md_page *, int);
  234 
  235 static int              pmap_enter_locked(pmap_t, vm_offset_t, vm_page_t,
  236     vm_prot_t, u_int);
  237 static vm_paddr_t       pmap_extract_locked(pmap_t pmap, vm_offset_t va);
  238 static void             pmap_alloc_l1(pmap_t);
  239 static void             pmap_free_l1(pmap_t);
  240 
  241 static void             pmap_map_section(pmap_t, vm_offset_t, vm_offset_t,
  242     vm_prot_t, boolean_t);
  243 static void             pmap_promote_section(pmap_t, vm_offset_t);
  244 static boolean_t        pmap_demote_section(pmap_t, vm_offset_t);
  245 static boolean_t        pmap_enter_section(pmap_t, vm_offset_t, vm_page_t,
  246     vm_prot_t);
  247 static void             pmap_remove_section(pmap_t, vm_offset_t);
  248 
  249 static int              pmap_clearbit(struct vm_page *, u_int);
  250 
  251 static struct l2_bucket *pmap_get_l2_bucket(pmap_t, vm_offset_t);
  252 static struct l2_bucket *pmap_alloc_l2_bucket(pmap_t, vm_offset_t);
  253 static void             pmap_free_l2_bucket(pmap_t, struct l2_bucket *, u_int);
  254 static vm_offset_t      kernel_pt_lookup(vm_paddr_t);
  255 
  256 static MALLOC_DEFINE(M_VMPMAP, "pmap", "PMAP L1");
  257 
  258 vm_offset_t virtual_avail;      /* VA of first avail page (after kernel bss) */
  259 vm_offset_t virtual_end;        /* VA of last avail page (end of kernel AS) */
  260 vm_offset_t pmap_curmaxkvaddr;
  261 vm_paddr_t kernel_l1pa;
  262 
  263 vm_offset_t kernel_vm_end = 0;
  264 
  265 vm_offset_t vm_max_kernel_address;
  266 
  267 struct pmap kernel_pmap_store;
  268 
  269 /*
  270  * Resources for quickly copying and zeroing pages using virtual address space
  271  * and page table entries that are pre-allocated per-CPU by pmap_init().
  272  */
  273 struct czpages {
  274         struct  mtx     lock;
  275         pt_entry_t      *srcptep;
  276         pt_entry_t      *dstptep;
  277         vm_offset_t     srcva;
  278         vm_offset_t     dstva;
  279 };
  280 static struct czpages cpu_czpages[MAXCPU];
  281 
  282 static void             pmap_init_l1(struct l1_ttable *, pd_entry_t *);
  283 /*
  284  * These routines are called when the CPU type is identified to set up
  285  * the PTE prototypes, cache modes, etc.
  286  *
  287  * The variables are always here, just in case LKMs need to reference
  288  * them (though, they shouldn't).
  289  */
  290 static void pmap_set_prot(pt_entry_t *pte, vm_prot_t prot, uint8_t user);
  291 pt_entry_t      pte_l1_s_cache_mode;
  292 pt_entry_t      pte_l1_s_cache_mode_pt;
  293 
  294 pt_entry_t      pte_l2_l_cache_mode;
  295 pt_entry_t      pte_l2_l_cache_mode_pt;
  296 
  297 pt_entry_t      pte_l2_s_cache_mode;
  298 pt_entry_t      pte_l2_s_cache_mode_pt;
  299 
  300 struct msgbuf *msgbufp = 0;
  301 
  302 /*
  303  * Crashdump maps.
  304  */
  305 static caddr_t crashdumpmap;
  306 
  307 extern void bcopy_page(vm_offset_t, vm_offset_t);
  308 extern void bzero_page(vm_offset_t);
  309 
  310 char *_tmppt;
  311 
  312 /*
  313  * Metadata for L1 translation tables.
  314  */
  315 struct l1_ttable {
  316         /* Entry on the L1 Table list */
  317         SLIST_ENTRY(l1_ttable) l1_link;
  318 
  319         /* Entry on the L1 Least Recently Used list */
  320         TAILQ_ENTRY(l1_ttable) l1_lru;
  321 
  322         /* Track how many domains are allocated from this L1 */
  323         volatile u_int l1_domain_use_count;
  324 
  325         /*
  326          * A free-list of domain numbers for this L1.
  327          * We avoid using ffs() and a bitmap to track domains since ffs()
  328          * is slow on ARM.
  329          */
  330         u_int8_t l1_domain_first;
  331         u_int8_t l1_domain_free[PMAP_DOMAINS];
  332 
  333         /* Physical address of this L1 page table */
  334         vm_paddr_t l1_physaddr;
  335 
  336         /* KVA of this L1 page table */
  337         pd_entry_t *l1_kva;
  338 };
  339 
  340 /*
  341  * Convert a virtual address into its L1 table index. That is, the
  342  * index used to locate the L2 descriptor table pointer in an L1 table.
  343  * This is basically used to index l1->l1_kva[].
  344  *
  345  * Each L2 descriptor table represents 1MB of VA space.
  346  */
  347 #define L1_IDX(va)              (((vm_offset_t)(va)) >> L1_S_SHIFT)
  348 
  349 /*
  350  * L1 Page Tables are tracked using a Least Recently Used list.
  351  *  - New L1s are allocated from the HEAD.
  352  *  - Freed L1s are added to the TAIl.
  353  *  - Recently accessed L1s (where an 'access' is some change to one of
  354  *    the userland pmaps which owns this L1) are moved to the TAIL.
  355  */
  356 static TAILQ_HEAD(, l1_ttable) l1_lru_list;
  357 /*
  358  * A list of all L1 tables
  359  */
  360 static SLIST_HEAD(, l1_ttable) l1_list;
  361 static struct mtx l1_lru_lock;
  362 
  363 /*
  364  * The l2_dtable tracks L2_BUCKET_SIZE worth of L1 slots.
  365  *
  366  * This is normally 16MB worth L2 page descriptors for any given pmap.
  367  * Reference counts are maintained for L2 descriptors so they can be
  368  * freed when empty.
  369  */
  370 struct l2_dtable {
  371         /* The number of L2 page descriptors allocated to this l2_dtable */
  372         u_int l2_occupancy;
  373 
  374         /* List of L2 page descriptors */
  375         struct l2_bucket {
  376                 pt_entry_t *l2b_kva;    /* KVA of L2 Descriptor Table */
  377                 vm_paddr_t l2b_phys;    /* Physical address of same */
  378                 u_short l2b_l1idx;      /* This L2 table's L1 index */
  379                 u_short l2b_occupancy;  /* How many active descriptors */
  380         } l2_bucket[L2_BUCKET_SIZE];
  381 };
  382 
  383 /* pmap_kenter_internal flags */
  384 #define KENTER_CACHE    0x1
  385 #define KENTER_DEVICE   0x2
  386 #define KENTER_USER     0x4
  387 
  388 /*
  389  * Given an L1 table index, calculate the corresponding l2_dtable index
  390  * and bucket index within the l2_dtable.
  391  */
  392 #define L2_IDX(l1idx)           (((l1idx) >> L2_BUCKET_LOG2) & \
  393                                  (L2_SIZE - 1))
  394 #define L2_BUCKET(l1idx)        ((l1idx) & (L2_BUCKET_SIZE - 1))
  395 
  396 /*
  397  * Given a virtual address, this macro returns the
  398  * virtual address required to drop into the next L2 bucket.
  399  */
  400 #define L2_NEXT_BUCKET(va)      (((va) & L1_S_FRAME) + L1_S_SIZE)
  401 
  402 /*
  403  * We try to map the page tables write-through, if possible.  However, not
  404  * all CPUs have a write-through cache mode, so on those we have to sync
  405  * the cache when we frob page tables.
  406  *
  407  * We try to evaluate this at compile time, if possible.  However, it's
  408  * not always possible to do that, hence this run-time var.
  409  */
  410 int     pmap_needs_pte_sync;
  411 
  412 /*
  413  * Macro to determine if a mapping might be resident in the
  414  * instruction cache and/or TLB
  415  */
  416 #define PTE_BEEN_EXECD(pte)  (L2_S_EXECUTABLE(pte) && L2_S_REFERENCED(pte))
  417 
  418 /*
  419  * Macro to determine if a mapping might be resident in the
  420  * data cache and/or TLB
  421  */
  422 #define PTE_BEEN_REFD(pte)   (L2_S_REFERENCED(pte))
  423 
  424 #ifndef PMAP_SHPGPERPROC
  425 #define PMAP_SHPGPERPROC 200
  426 #endif
  427 
  428 #define pmap_is_current(pm)     ((pm) == pmap_kernel() || \
  429             curproc->p_vmspace->vm_map.pmap == (pm))
  430 
  431 /*
  432  * Data for the pv entry allocation mechanism
  433  */
  434 static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks);
  435 static int pv_entry_count, pv_entry_max, pv_entry_high_water;
  436 static struct md_page *pv_table;
  437 static int shpgperproc = PMAP_SHPGPERPROC;
  438 
  439 struct pv_chunk *pv_chunkbase;          /* KVA block for pv_chunks */
  440 int pv_maxchunks;                       /* How many chunks we have KVA for */
  441 vm_offset_t pv_vafree;                  /* Freelist stored in the PTE */
  442 
  443 static __inline struct pv_chunk *
  444 pv_to_chunk(pv_entry_t pv)
  445 {
  446 
  447         return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK));
  448 }
  449 
  450 #define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap)
  451 
  452 CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE);
  453 CTASSERT(_NPCM == 8);
  454 CTASSERT(_NPCPV == 252);
  455 
  456 #define PC_FREE0_6      0xfffffffful    /* Free values for index 0 through 6 */
  457 #define PC_FREE7        0x0ffffffful    /* Free values for index 7 */
  458 
  459 static const uint32_t pc_freemask[_NPCM] = {
  460         PC_FREE0_6, PC_FREE0_6, PC_FREE0_6,
  461         PC_FREE0_6, PC_FREE0_6, PC_FREE0_6,
  462         PC_FREE0_6, PC_FREE7
  463 };
  464 
  465 static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters");
  466 
  467 /* Superpages utilization enabled = 1 / disabled = 0 */
  468 static int sp_enabled = 0;
  469 SYSCTL_INT(_vm_pmap, OID_AUTO, sp_enabled, CTLFLAG_RDTUN, &sp_enabled, 0,
  470     "Are large page mappings enabled?");
  471 
  472 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0,
  473     "Current number of pv entries");
  474 
  475 #ifdef PV_STATS
  476 static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail;
  477 
  478 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0,
  479     "Current number of pv entry chunks");
  480 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0,
  481     "Current number of pv entry chunks allocated");
  482 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0,
  483     "Current number of pv entry chunks frees");
  484 SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0,
  485     "Number of times tried to get a chunk page but failed.");
  486 
  487 static long pv_entry_frees, pv_entry_allocs;
  488 static int pv_entry_spare;
  489 
  490 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0,
  491     "Current number of pv entry frees");
  492 SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0,
  493     "Current number of pv entry allocs");
  494 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0,
  495     "Current number of spare pv entries");
  496 #endif
  497 
  498 uma_zone_t l2zone;
  499 static uma_zone_t l2table_zone;
  500 static vm_offset_t pmap_kernel_l2dtable_kva;
  501 static vm_offset_t pmap_kernel_l2ptp_kva;
  502 static vm_paddr_t pmap_kernel_l2ptp_phys;
  503 static struct rwlock pvh_global_lock;
  504 
  505 int l1_mem_types[] = {
  506         ARM_L1S_STRONG_ORD,
  507         ARM_L1S_DEVICE_NOSHARE,
  508         ARM_L1S_DEVICE_SHARE,
  509         ARM_L1S_NRML_NOCACHE,
  510         ARM_L1S_NRML_IWT_OWT,
  511         ARM_L1S_NRML_IWB_OWB,
  512         ARM_L1S_NRML_IWBA_OWBA
  513 };
  514 
  515 int l2l_mem_types[] = {
  516         ARM_L2L_STRONG_ORD,
  517         ARM_L2L_DEVICE_NOSHARE,
  518         ARM_L2L_DEVICE_SHARE,
  519         ARM_L2L_NRML_NOCACHE,
  520         ARM_L2L_NRML_IWT_OWT,
  521         ARM_L2L_NRML_IWB_OWB,
  522         ARM_L2L_NRML_IWBA_OWBA
  523 };
  524 
  525 int l2s_mem_types[] = {
  526         ARM_L2S_STRONG_ORD,
  527         ARM_L2S_DEVICE_NOSHARE,
  528         ARM_L2S_DEVICE_SHARE,
  529         ARM_L2S_NRML_NOCACHE,
  530         ARM_L2S_NRML_IWT_OWT,
  531         ARM_L2S_NRML_IWB_OWB,
  532         ARM_L2S_NRML_IWBA_OWBA
  533 };
  534 
  535 /*
  536  * This list exists for the benefit of pmap_map_chunk().  It keeps track
  537  * of the kernel L2 tables during bootstrap, so that pmap_map_chunk() can
  538  * find them as necessary.
  539  *
  540  * Note that the data on this list MUST remain valid after initarm() returns,
  541  * as pmap_bootstrap() uses it to contruct L2 table metadata.
  542  */
  543 SLIST_HEAD(, pv_addr) kernel_pt_list = SLIST_HEAD_INITIALIZER(kernel_pt_list);
  544 
  545 static void
  546 pmap_init_l1(struct l1_ttable *l1, pd_entry_t *l1pt)
  547 {
  548         int i;
  549 
  550         l1->l1_kva = l1pt;
  551         l1->l1_domain_use_count = 0;
  552         l1->l1_domain_first = 0;
  553 
  554         for (i = 0; i < PMAP_DOMAINS; i++)
  555                 l1->l1_domain_free[i] = i + 1;
  556 
  557         /*
  558          * Copy the kernel's L1 entries to each new L1.
  559          */
  560         if (l1pt != pmap_kernel()->pm_l1->l1_kva)
  561                 memcpy(l1pt, pmap_kernel()->pm_l1->l1_kva, L1_TABLE_SIZE);
  562 
  563         if ((l1->l1_physaddr = pmap_extract(pmap_kernel(), (vm_offset_t)l1pt)) == 0)
  564                 panic("pmap_init_l1: can't get PA of L1 at %p", l1pt);
  565         SLIST_INSERT_HEAD(&l1_list, l1, l1_link);
  566         TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru);
  567 }
  568 
  569 static vm_offset_t
  570 kernel_pt_lookup(vm_paddr_t pa)
  571 {
  572         struct pv_addr *pv;
  573 
  574         SLIST_FOREACH(pv, &kernel_pt_list, pv_list) {
  575                 if (pv->pv_pa == pa)
  576                         return (pv->pv_va);
  577         }
  578         return (0);
  579 }
  580 
  581 void
  582 pmap_pte_init_mmu_v6(void)
  583 {
  584 
  585         if (PTE_PAGETABLE >= 3)
  586                 pmap_needs_pte_sync = 1;
  587         pte_l1_s_cache_mode = l1_mem_types[PTE_CACHE];
  588         pte_l2_l_cache_mode = l2l_mem_types[PTE_CACHE];
  589         pte_l2_s_cache_mode = l2s_mem_types[PTE_CACHE];
  590 
  591         pte_l1_s_cache_mode_pt = l1_mem_types[PTE_PAGETABLE];
  592         pte_l2_l_cache_mode_pt = l2l_mem_types[PTE_PAGETABLE];
  593         pte_l2_s_cache_mode_pt = l2s_mem_types[PTE_PAGETABLE];
  594 
  595 }
  596 
  597 /*
  598  * Allocate an L1 translation table for the specified pmap.
  599  * This is called at pmap creation time.
  600  */
  601 static void
  602 pmap_alloc_l1(pmap_t pmap)
  603 {
  604         struct l1_ttable *l1;
  605         u_int8_t domain;
  606 
  607         /*
  608          * Remove the L1 at the head of the LRU list
  609          */
  610         mtx_lock(&l1_lru_lock);
  611         l1 = TAILQ_FIRST(&l1_lru_list);
  612         TAILQ_REMOVE(&l1_lru_list, l1, l1_lru);
  613 
  614         /*
  615          * Pick the first available domain number, and update
  616          * the link to the next number.
  617          */
  618         domain = l1->l1_domain_first;
  619         l1->l1_domain_first = l1->l1_domain_free[domain];
  620 
  621         /*
  622          * If there are still free domain numbers in this L1,
  623          * put it back on the TAIL of the LRU list.
  624          */
  625         if (++l1->l1_domain_use_count < PMAP_DOMAINS)
  626                 TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru);
  627 
  628         mtx_unlock(&l1_lru_lock);
  629 
  630         /*
  631          * Fix up the relevant bits in the pmap structure
  632          */
  633         pmap->pm_l1 = l1;
  634         pmap->pm_domain = domain + 1;
  635 }
  636 
  637 /*
  638  * Free an L1 translation table.
  639  * This is called at pmap destruction time.
  640  */
  641 static void
  642 pmap_free_l1(pmap_t pmap)
  643 {
  644         struct l1_ttable *l1 = pmap->pm_l1;
  645 
  646         mtx_lock(&l1_lru_lock);
  647 
  648         /*
  649          * If this L1 is currently on the LRU list, remove it.
  650          */
  651         if (l1->l1_domain_use_count < PMAP_DOMAINS)
  652                 TAILQ_REMOVE(&l1_lru_list, l1, l1_lru);
  653 
  654         /*
  655          * Free up the domain number which was allocated to the pmap
  656          */
  657         l1->l1_domain_free[pmap->pm_domain - 1] = l1->l1_domain_first;
  658         l1->l1_domain_first = pmap->pm_domain - 1;
  659         l1->l1_domain_use_count--;
  660 
  661         /*
  662          * The L1 now must have at least 1 free domain, so add
  663          * it back to the LRU list. If the use count is zero,
  664          * put it at the head of the list, otherwise it goes
  665          * to the tail.
  666          */
  667         if (l1->l1_domain_use_count == 0) {
  668                 TAILQ_INSERT_HEAD(&l1_lru_list, l1, l1_lru);
  669         }       else
  670                 TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru);
  671 
  672         mtx_unlock(&l1_lru_lock);
  673 }
  674 
  675 /*
  676  * Returns a pointer to the L2 bucket associated with the specified pmap
  677  * and VA, or NULL if no L2 bucket exists for the address.
  678  */
  679 static PMAP_INLINE struct l2_bucket *
  680 pmap_get_l2_bucket(pmap_t pmap, vm_offset_t va)
  681 {
  682         struct l2_dtable *l2;
  683         struct l2_bucket *l2b;
  684         u_short l1idx;
  685 
  686         l1idx = L1_IDX(va);
  687 
  688         if ((l2 = pmap->pm_l2[L2_IDX(l1idx)]) == NULL ||
  689             (l2b = &l2->l2_bucket[L2_BUCKET(l1idx)])->l2b_kva == NULL)
  690                 return (NULL);
  691 
  692         return (l2b);
  693 }
  694 
  695 /*
  696  * Returns a pointer to the L2 bucket associated with the specified pmap
  697  * and VA.
  698  *
  699  * If no L2 bucket exists, perform the necessary allocations to put an L2
  700  * bucket/page table in place.
  701  *
  702  * Note that if a new L2 bucket/page was allocated, the caller *must*
  703  * increment the bucket occupancy counter appropriately *before*
  704  * releasing the pmap's lock to ensure no other thread or cpu deallocates
  705  * the bucket/page in the meantime.
  706  */
  707 static struct l2_bucket *
  708 pmap_alloc_l2_bucket(pmap_t pmap, vm_offset_t va)
  709 {
  710         struct l2_dtable *l2;
  711         struct l2_bucket *l2b;
  712         u_short l1idx;
  713 
  714         l1idx = L1_IDX(va);
  715 
  716         PMAP_ASSERT_LOCKED(pmap);
  717         rw_assert(&pvh_global_lock, RA_WLOCKED);
  718         if ((l2 = pmap->pm_l2[L2_IDX(l1idx)]) == NULL) {
  719                 /*
  720                  * No mapping at this address, as there is
  721                  * no entry in the L1 table.
  722                  * Need to allocate a new l2_dtable.
  723                  */
  724                 PMAP_UNLOCK(pmap);
  725                 rw_wunlock(&pvh_global_lock);
  726                 if ((l2 = uma_zalloc(l2table_zone, M_NOWAIT)) == NULL) {
  727                         rw_wlock(&pvh_global_lock);
  728                         PMAP_LOCK(pmap);
  729                         return (NULL);
  730                 }
  731                 rw_wlock(&pvh_global_lock);
  732                 PMAP_LOCK(pmap);
  733                 if (pmap->pm_l2[L2_IDX(l1idx)] != NULL) {
  734                         /*
  735                          * Someone already allocated the l2_dtable while
  736                          * we were doing the same.
  737                          */
  738                         uma_zfree(l2table_zone, l2);
  739                         l2 = pmap->pm_l2[L2_IDX(l1idx)];
  740                 } else {
  741                         bzero(l2, sizeof(*l2));
  742                         /*
  743                          * Link it into the parent pmap
  744                          */
  745                         pmap->pm_l2[L2_IDX(l1idx)] = l2;
  746                 }
  747         }
  748 
  749         l2b = &l2->l2_bucket[L2_BUCKET(l1idx)];
  750 
  751         /*
  752          * Fetch pointer to the L2 page table associated with the address.
  753          */
  754         if (l2b->l2b_kva == NULL) {
  755                 pt_entry_t *ptep;
  756 
  757                 /*
  758                  * No L2 page table has been allocated. Chances are, this
  759                  * is because we just allocated the l2_dtable, above.
  760                  */
  761                 PMAP_UNLOCK(pmap);
  762                 rw_wunlock(&pvh_global_lock);
  763                 ptep = uma_zalloc(l2zone, M_NOWAIT);
  764                 rw_wlock(&pvh_global_lock);
  765                 PMAP_LOCK(pmap);
  766                 if (l2b->l2b_kva != 0) {
  767                         /* We lost the race. */
  768                         uma_zfree(l2zone, ptep);
  769                         return (l2b);
  770                 }
  771                 l2b->l2b_phys = vtophys(ptep);
  772                 if (ptep == NULL) {
  773                         /*
  774                          * Oops, no more L2 page tables available at this
  775                          * time. We may need to deallocate the l2_dtable
  776                          * if we allocated a new one above.
  777                          */
  778                         if (l2->l2_occupancy == 0) {
  779                                 pmap->pm_l2[L2_IDX(l1idx)] = NULL;
  780                                 uma_zfree(l2table_zone, l2);
  781                         }
  782                         return (NULL);
  783                 }
  784 
  785                 l2->l2_occupancy++;
  786                 l2b->l2b_kva = ptep;
  787                 l2b->l2b_l1idx = l1idx;
  788         }
  789 
  790         return (l2b);
  791 }
  792 
  793 static PMAP_INLINE void
  794 pmap_free_l2_ptp(pt_entry_t *l2)
  795 {
  796         uma_zfree(l2zone, l2);
  797 }
  798 /*
  799  * One or more mappings in the specified L2 descriptor table have just been
  800  * invalidated.
  801  *
  802  * Garbage collect the metadata and descriptor table itself if necessary.
  803  *
  804  * The pmap lock must be acquired when this is called (not necessary
  805  * for the kernel pmap).
  806  */
  807 static void
  808 pmap_free_l2_bucket(pmap_t pmap, struct l2_bucket *l2b, u_int count)
  809 {
  810         struct l2_dtable *l2;
  811         pd_entry_t *pl1pd, l1pd;
  812         pt_entry_t *ptep;
  813         u_short l1idx;
  814 
  815 
  816         /*
  817          * Update the bucket's reference count according to how many
  818          * PTEs the caller has just invalidated.
  819          */
  820         l2b->l2b_occupancy -= count;
  821 
  822         /*
  823          * Note:
  824          *
  825          * Level 2 page tables allocated to the kernel pmap are never freed
  826          * as that would require checking all Level 1 page tables and
  827          * removing any references to the Level 2 page table. See also the
  828          * comment elsewhere about never freeing bootstrap L2 descriptors.
  829          *
  830          * We make do with just invalidating the mapping in the L2 table.
  831          *
  832          * This isn't really a big deal in practice and, in fact, leads
  833          * to a performance win over time as we don't need to continually
  834          * alloc/free.
  835          */
  836         if (l2b->l2b_occupancy > 0 || pmap == pmap_kernel())
  837                 return;
  838 
  839         /*
  840          * There are no more valid mappings in this level 2 page table.
  841          * Go ahead and NULL-out the pointer in the bucket, then
  842          * free the page table.
  843          */
  844         l1idx = l2b->l2b_l1idx;
  845         ptep = l2b->l2b_kva;
  846         l2b->l2b_kva = NULL;
  847 
  848         pl1pd = &pmap->pm_l1->l1_kva[l1idx];
  849 
  850         /*
  851          * If the L1 slot matches the pmap's domain
  852          * number, then invalidate it.
  853          */
  854         l1pd = *pl1pd & (L1_TYPE_MASK | L1_C_DOM_MASK);
  855         if (l1pd == (L1_C_DOM(pmap->pm_domain) | L1_TYPE_C)) {
  856                 *pl1pd = 0;
  857                 PTE_SYNC(pl1pd);
  858                 cpu_tlb_flushD_SE((vm_offset_t)ptep);
  859                 cpu_cpwait();
  860         }
  861 
  862         /*
  863          * Release the L2 descriptor table back to the pool cache.
  864          */
  865         pmap_free_l2_ptp(ptep);
  866 
  867         /*
  868          * Update the reference count in the associated l2_dtable
  869          */
  870         l2 = pmap->pm_l2[L2_IDX(l1idx)];
  871         if (--l2->l2_occupancy > 0)
  872                 return;
  873 
  874         /*
  875          * There are no more valid mappings in any of the Level 1
  876          * slots managed by this l2_dtable. Go ahead and NULL-out
  877          * the pointer in the parent pmap and free the l2_dtable.
  878          */
  879         pmap->pm_l2[L2_IDX(l1idx)] = NULL;
  880         uma_zfree(l2table_zone, l2);
  881 }
  882 
  883 /*
  884  * Pool cache constructors for L2 descriptor tables, metadata and pmap
  885  * structures.
  886  */
  887 static int
  888 pmap_l2ptp_ctor(void *mem, int size, void *arg, int flags)
  889 {
  890         struct l2_bucket *l2b;
  891         pt_entry_t *ptep, pte;
  892         vm_offset_t va = (vm_offset_t)mem & ~PAGE_MASK;
  893 
  894         /*
  895          * The mappings for these page tables were initially made using
  896          * pmap_kenter() by the pool subsystem. Therefore, the cache-
  897          * mode will not be right for page table mappings. To avoid
  898          * polluting the pmap_kenter() code with a special case for
  899          * page tables, we simply fix up the cache-mode here if it's not
  900          * correct.
  901          */
  902         l2b = pmap_get_l2_bucket(pmap_kernel(), va);
  903         ptep = &l2b->l2b_kva[l2pte_index(va)];
  904         pte = *ptep;
  905 
  906         cpu_idcache_wbinv_range(va, PAGE_SIZE);
  907         pmap_l2cache_wbinv_range(va, pte & L2_S_FRAME, PAGE_SIZE);
  908         if ((pte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) {
  909                 /*
  910                  * Page tables must have the cache-mode set to
  911                  * Write-Thru.
  912                  */
  913                 *ptep = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt;
  914                 PTE_SYNC(ptep);
  915                 cpu_tlb_flushD_SE(va);
  916                 cpu_cpwait();
  917         }
  918 
  919         memset(mem, 0, L2_TABLE_SIZE_REAL);
  920         return (0);
  921 }
  922 
  923 /*
  924  * Modify pte bits for all ptes corresponding to the given physical address.
  925  * We use `maskbits' rather than `clearbits' because we're always passing
  926  * constants and the latter would require an extra inversion at run-time.
  927  */
  928 static int
  929 pmap_clearbit(struct vm_page *m, u_int maskbits)
  930 {
  931         struct l2_bucket *l2b;
  932         struct pv_entry *pv, *pve, *next_pv;
  933         struct md_page *pvh;
  934         pd_entry_t *pl1pd;
  935         pt_entry_t *ptep, npte, opte;
  936         pmap_t pmap;
  937         vm_offset_t va;
  938         u_int oflags;
  939         int count = 0;
  940 
  941         rw_wlock(&pvh_global_lock);
  942         if ((m->flags & PG_FICTITIOUS) != 0)
  943                 goto small_mappings;
  944 
  945         pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
  946         TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
  947                 va = pv->pv_va;
  948                 pmap = PV_PMAP(pv);
  949                 PMAP_LOCK(pmap);
  950                 pl1pd = &pmap->pm_l1->l1_kva[L1_IDX(va)];
  951                 KASSERT((*pl1pd & L1_TYPE_MASK) == L1_S_PROTO,
  952                     ("pmap_clearbit: valid section mapping expected"));
  953                 if ((maskbits & PVF_WRITE) && (pv->pv_flags & PVF_WRITE))
  954                         (void)pmap_demote_section(pmap, va);
  955                 else if ((maskbits & PVF_REF) && L1_S_REFERENCED(*pl1pd)) {
  956                         if (pmap_demote_section(pmap, va)) {
  957                                 if ((pv->pv_flags & PVF_WIRED) == 0) {
  958                                         /*
  959                                          * Remove the mapping to a single page
  960                                          * so that a subsequent access may
  961                                          * repromote. Since the underlying
  962                                          * l2_bucket is fully populated, this
  963                                          * removal never frees an entire
  964                                          * l2_bucket.
  965                                          */
  966                                         va += (VM_PAGE_TO_PHYS(m) &
  967                                             L1_S_OFFSET);
  968                                         l2b = pmap_get_l2_bucket(pmap, va);
  969                                         KASSERT(l2b != NULL,
  970                                             ("pmap_clearbit: no l2 bucket for "
  971                                              "va 0x%#x, pmap 0x%p", va, pmap));
  972                                         ptep = &l2b->l2b_kva[l2pte_index(va)];
  973                                         *ptep = 0;
  974                                         PTE_SYNC(ptep);
  975                                         pmap_free_l2_bucket(pmap, l2b, 1);
  976                                         pve = pmap_remove_pv(m, pmap, va);
  977                                         KASSERT(pve != NULL, ("pmap_clearbit: "
  978                                             "no PV entry for managed mapping"));
  979                                         pmap_free_pv_entry(pmap, pve);
  980 
  981                                 }
  982                         }
  983                 } else if ((maskbits & PVF_MOD) && L1_S_WRITABLE(*pl1pd)) {
  984                         if (pmap_demote_section(pmap, va)) {
  985                                 if ((pv->pv_flags & PVF_WIRED) == 0) {
  986                                         /*
  987                                          * Write protect the mapping to a
  988                                          * single page so that a subsequent
  989                                          * write access may repromote.
  990                                          */
  991                                         va += (VM_PAGE_TO_PHYS(m) &
  992                                             L1_S_OFFSET);
  993                                         l2b = pmap_get_l2_bucket(pmap, va);
  994                                         KASSERT(l2b != NULL,
  995                                             ("pmap_clearbit: no l2 bucket for "
  996                                              "va 0x%#x, pmap 0x%p", va, pmap));
  997                                         ptep = &l2b->l2b_kva[l2pte_index(va)];
  998                                         if ((*ptep & L2_S_PROTO) != 0) {
  999                                                 pve = pmap_find_pv(&m->md,
 1000                                                     pmap, va);
 1001                                                 KASSERT(pve != NULL,
 1002                                                     ("pmap_clearbit: no PV "
 1003                                                     "entry for managed mapping"));
 1004                                                 pve->pv_flags &= ~PVF_WRITE;
 1005                                                 *ptep |= L2_APX;
 1006                                                 PTE_SYNC(ptep);
 1007                                         }
 1008                                 }
 1009                         }
 1010                 }
 1011                 PMAP_UNLOCK(pmap);
 1012         }
 1013 
 1014 small_mappings:
 1015         if (TAILQ_EMPTY(&m->md.pv_list)) {
 1016                 rw_wunlock(&pvh_global_lock);
 1017                 return (0);
 1018         }
 1019 
 1020         /*
 1021          * Loop over all current mappings setting/clearing as appropos
 1022          */
 1023         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
 1024                 va = pv->pv_va;
 1025                 pmap = PV_PMAP(pv);
 1026                 oflags = pv->pv_flags;
 1027                 pv->pv_flags &= ~maskbits;
 1028 
 1029                 PMAP_LOCK(pmap);
 1030 
 1031                 l2b = pmap_get_l2_bucket(pmap, va);
 1032                 KASSERT(l2b != NULL, ("pmap_clearbit: no l2 bucket for "
 1033                     "va 0x%#x, pmap 0x%p", va, pmap));
 1034 
 1035                 ptep = &l2b->l2b_kva[l2pte_index(va)];
 1036                 npte = opte = *ptep;
 1037 
 1038                 if (maskbits & (PVF_WRITE | PVF_MOD)) {
 1039                         /* make the pte read only */
 1040                         npte |= L2_APX;
 1041                 }
 1042 
 1043                 if (maskbits & PVF_REF) {
 1044                         /*
 1045                          * Clear referenced flag in PTE so that we
 1046                          * will take a flag fault the next time the mapping
 1047                          * is referenced.
 1048                          */
 1049                         npte &= ~L2_S_REF;
 1050                 }
 1051 
 1052                 CTR4(KTR_PMAP,"clearbit: pmap:%p bits:%x pte:%x->%x",
 1053                     pmap, maskbits, opte, npte);
 1054                 if (npte != opte) {
 1055                         count++;
 1056                         *ptep = npte;
 1057                         PTE_SYNC(ptep);
 1058                         /* Flush the TLB entry if a current pmap. */
 1059                         if (PTE_BEEN_EXECD(opte))
 1060                                 cpu_tlb_flushID_SE(pv->pv_va);
 1061                         else if (PTE_BEEN_REFD(opte))
 1062                                 cpu_tlb_flushD_SE(pv->pv_va);
 1063                         cpu_cpwait();
 1064                 }
 1065 
 1066                 PMAP_UNLOCK(pmap);
 1067 
 1068         }
 1069 
 1070         if (maskbits & PVF_WRITE)
 1071                 vm_page_aflag_clear(m, PGA_WRITEABLE);
 1072         rw_wunlock(&pvh_global_lock);
 1073         return (count);
 1074 }
 1075 
 1076 /*
 1077  * main pv_entry manipulation functions:
 1078  *   pmap_enter_pv: enter a mapping onto a vm_page list
 1079  *   pmap_remove_pv: remove a mappiing from a vm_page list
 1080  *
 1081  * NOTE: pmap_enter_pv expects to lock the pvh itself
 1082  *       pmap_remove_pv expects the caller to lock the pvh before calling
 1083  */
 1084 
 1085 /*
 1086  * pmap_enter_pv: enter a mapping onto a vm_page's PV list
 1087  *
 1088  * => caller should hold the proper lock on pvh_global_lock
 1089  * => caller should have pmap locked
 1090  * => we will (someday) gain the lock on the vm_page's PV list
 1091  * => caller should adjust ptp's wire_count before calling
 1092  * => caller should not adjust pmap's wire_count
 1093  */
 1094 static void
 1095 pmap_enter_pv(struct vm_page *m, struct pv_entry *pve, pmap_t pmap,
 1096     vm_offset_t va, u_int flags)
 1097 {
 1098 
 1099         rw_assert(&pvh_global_lock, RA_WLOCKED);
 1100 
 1101         PMAP_ASSERT_LOCKED(pmap);
 1102         pve->pv_va = va;
 1103         pve->pv_flags = flags;
 1104 
 1105         TAILQ_INSERT_HEAD(&m->md.pv_list, pve, pv_list);
 1106         if (pve->pv_flags & PVF_WIRED)
 1107                 ++pmap->pm_stats.wired_count;
 1108 }
 1109 
 1110 /*
 1111  *
 1112  * pmap_find_pv: Find a pv entry
 1113  *
 1114  * => caller should hold lock on vm_page
 1115  */
 1116 static PMAP_INLINE struct pv_entry *
 1117 pmap_find_pv(struct md_page *md, pmap_t pmap, vm_offset_t va)
 1118 {
 1119         struct pv_entry *pv;
 1120 
 1121         rw_assert(&pvh_global_lock, RA_WLOCKED);
 1122         TAILQ_FOREACH(pv, &md->pv_list, pv_list)
 1123                 if (pmap == PV_PMAP(pv) && va == pv->pv_va)
 1124                         break;
 1125 
 1126         return (pv);
 1127 }
 1128 
 1129 /*
 1130  * vector_page_setprot:
 1131  *
 1132  *      Manipulate the protection of the vector page.
 1133  */
 1134 void
 1135 vector_page_setprot(int prot)
 1136 {
 1137         struct l2_bucket *l2b;
 1138         pt_entry_t *ptep;
 1139 
 1140         l2b = pmap_get_l2_bucket(pmap_kernel(), vector_page);
 1141 
 1142         ptep = &l2b->l2b_kva[l2pte_index(vector_page)];
 1143         /*
 1144          * Set referenced flag.
 1145          * Vectors' page is always desired
 1146          * to be allowed to reside in TLB. 
 1147          */
 1148         *ptep |= L2_S_REF;
 1149 
 1150         pmap_set_prot(ptep, prot|VM_PROT_EXECUTE, 0);
 1151         PTE_SYNC(ptep);
 1152         cpu_tlb_flushID_SE(vector_page);
 1153         cpu_cpwait();
 1154 }
 1155 
 1156 static void
 1157 pmap_set_prot(pt_entry_t *ptep, vm_prot_t prot, uint8_t user)
 1158 {
 1159 
 1160         *ptep &= ~(L2_S_PROT_MASK | L2_XN);
 1161 
 1162         if (!(prot & VM_PROT_EXECUTE))
 1163                 *ptep |= L2_XN;
 1164 
 1165         /* Set defaults first - kernel read access */
 1166         *ptep |= L2_APX;
 1167         *ptep |= L2_S_PROT_R;
 1168         /* Now tune APs as desired */
 1169         if (user)
 1170                 *ptep |= L2_S_PROT_U;
 1171 
 1172         if (prot & VM_PROT_WRITE)
 1173                 *ptep &= ~(L2_APX);
 1174 }
 1175 
 1176 /*
 1177  * pmap_remove_pv: try to remove a mapping from a pv_list
 1178  *
 1179  * => caller should hold proper lock on pmap_main_lock
 1180  * => pmap should be locked
 1181  * => caller should hold lock on vm_page [so that attrs can be adjusted]
 1182  * => caller should adjust ptp's wire_count and free PTP if needed
 1183  * => caller should NOT adjust pmap's wire_count
 1184  * => we return the removed pve
 1185  */
 1186 static struct pv_entry *
 1187 pmap_remove_pv(struct vm_page *m, pmap_t pmap, vm_offset_t va)
 1188 {
 1189         struct pv_entry *pve;
 1190 
 1191         rw_assert(&pvh_global_lock, RA_WLOCKED);
 1192         PMAP_ASSERT_LOCKED(pmap);
 1193 
 1194         pve = pmap_find_pv(&m->md, pmap, va);   /* find corresponding pve */
 1195         if (pve != NULL) {
 1196                 TAILQ_REMOVE(&m->md.pv_list, pve, pv_list);
 1197                 if (pve->pv_flags & PVF_WIRED)
 1198                         --pmap->pm_stats.wired_count;
 1199         }
 1200         if (TAILQ_EMPTY(&m->md.pv_list))
 1201                 vm_page_aflag_clear(m, PGA_WRITEABLE);
 1202 
 1203         return(pve);                            /* return removed pve */
 1204 }
 1205 
 1206 /*
 1207  *
 1208  * pmap_modify_pv: Update pv flags
 1209  *
 1210  * => caller should hold lock on vm_page [so that attrs can be adjusted]
 1211  * => caller should NOT adjust pmap's wire_count
 1212  * => we return the old flags
 1213  *
 1214  * Modify a physical-virtual mapping in the pv table
 1215  */
 1216 static u_int
 1217 pmap_modify_pv(struct vm_page *m, pmap_t pmap, vm_offset_t va,
 1218     u_int clr_mask, u_int set_mask)
 1219 {
 1220         struct pv_entry *npv;
 1221         u_int flags, oflags;
 1222 
 1223         PMAP_ASSERT_LOCKED(pmap);
 1224         rw_assert(&pvh_global_lock, RA_WLOCKED);
 1225         if ((npv = pmap_find_pv(&m->md, pmap, va)) == NULL)
 1226                 return (0);
 1227 
 1228         /*
 1229          * There is at least one VA mapping this page.
 1230          */
 1231         oflags = npv->pv_flags;
 1232         npv->pv_flags = flags = (oflags & ~clr_mask) | set_mask;
 1233 
 1234         if ((flags ^ oflags) & PVF_WIRED) {
 1235                 if (flags & PVF_WIRED)
 1236                         ++pmap->pm_stats.wired_count;
 1237                 else
 1238                         --pmap->pm_stats.wired_count;
 1239         }
 1240 
 1241         return (oflags);
 1242 }
 1243 
 1244 /* Function to set the debug level of the pmap code */
 1245 #ifdef PMAP_DEBUG
 1246 void
 1247 pmap_debug(int level)
 1248 {
 1249         pmap_debug_level = level;
 1250         dprintf("pmap_debug: level=%d\n", pmap_debug_level);
 1251 }
 1252 #endif  /* PMAP_DEBUG */
 1253 
 1254 void
 1255 pmap_pinit0(struct pmap *pmap)
 1256 {
 1257         PDEBUG(1, printf("pmap_pinit0: pmap = %08x\n", (u_int32_t) pmap));
 1258 
 1259         bcopy(kernel_pmap, pmap, sizeof(*pmap));
 1260         bzero(&pmap->pm_mtx, sizeof(pmap->pm_mtx));
 1261         PMAP_LOCK_INIT(pmap);
 1262         TAILQ_INIT(&pmap->pm_pvchunk);
 1263 }
 1264 
 1265 /*
 1266  *      Initialize a vm_page's machine-dependent fields.
 1267  */
 1268 void
 1269 pmap_page_init(vm_page_t m)
 1270 {
 1271 
 1272         TAILQ_INIT(&m->md.pv_list);
 1273         m->md.pv_memattr = VM_MEMATTR_DEFAULT;
 1274 }
 1275 
 1276 static vm_offset_t
 1277 pmap_ptelist_alloc(vm_offset_t *head)
 1278 {
 1279         pt_entry_t *pte;
 1280         vm_offset_t va;
 1281 
 1282         va = *head;
 1283         if (va == 0)
 1284                 return (va);    /* Out of memory */
 1285         pte = vtopte(va);
 1286         *head = *pte;
 1287         if ((*head & L2_TYPE_MASK) != L2_TYPE_INV)
 1288                 panic("%s: va is not L2_TYPE_INV!", __func__);
 1289         *pte = 0;
 1290         return (va);
 1291 }
 1292 
 1293 static void
 1294 pmap_ptelist_free(vm_offset_t *head, vm_offset_t va)
 1295 {
 1296         pt_entry_t *pte;
 1297 
 1298         if ((va & L2_TYPE_MASK) != L2_TYPE_INV)
 1299                 panic("%s: freeing va that is not L2_TYPE INV!", __func__);
 1300         pte = vtopte(va);
 1301         *pte = *head;           /* virtual! L2_TYPE is L2_TYPE_INV though */
 1302         *head = va;
 1303 }
 1304 
 1305 static void
 1306 pmap_ptelist_init(vm_offset_t *head, void *base, int npages)
 1307 {
 1308         int i;
 1309         vm_offset_t va;
 1310 
 1311         *head = 0;
 1312         for (i = npages - 1; i >= 0; i--) {
 1313                 va = (vm_offset_t)base + i * PAGE_SIZE;
 1314                 pmap_ptelist_free(head, va);
 1315         }
 1316 }
 1317 
 1318 /*
 1319  *      Initialize the pmap module.
 1320  *      Called by vm_init, to initialize any structures that the pmap
 1321  *      system needs to map virtual memory.
 1322  */
 1323 void
 1324 pmap_init(void)
 1325 {
 1326         vm_size_t s;
 1327         int i, pv_npg;
 1328 
 1329         l2zone = uma_zcreate("L2 Table", L2_TABLE_SIZE_REAL, pmap_l2ptp_ctor,
 1330             NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
 1331         l2table_zone = uma_zcreate("L2 Table", sizeof(struct l2_dtable), NULL,
 1332             NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
 1333 
 1334         /*
 1335          * Are large page mappings supported and enabled?
 1336          */
 1337         TUNABLE_INT_FETCH("vm.pmap.sp_enabled", &sp_enabled);
 1338         if (sp_enabled) {
 1339                 KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0,
 1340                     ("pmap_init: can't assign to pagesizes[1]"));
 1341                 pagesizes[1] = NBPDR;
 1342         }
 1343 
 1344         /*
 1345          * Calculate the size of the pv head table for superpages.
 1346          * Handle the possibility that "vm_phys_segs[...].end" is zero.
 1347          */
 1348         pv_npg = trunc_1mpage(vm_phys_segs[vm_phys_nsegs - 1].end -
 1349             PAGE_SIZE) / NBPDR + 1;
 1350 
 1351         /*
 1352          * Allocate memory for the pv head table for superpages.
 1353          */
 1354         s = (vm_size_t)(pv_npg * sizeof(struct md_page));
 1355         s = round_page(s);
 1356         pv_table = (struct md_page *)kmem_malloc(kernel_arena, s,
 1357             M_WAITOK | M_ZERO);
 1358         for (i = 0; i < pv_npg; i++)
 1359                 TAILQ_INIT(&pv_table[i].pv_list);
 1360 
 1361         /*
 1362          * Initialize the address space for the pv chunks.
 1363          */
 1364 
 1365         TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
 1366         pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
 1367         TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
 1368         pv_entry_max = roundup(pv_entry_max, _NPCPV);
 1369         pv_entry_high_water = 9 * (pv_entry_max / 10);
 1370 
 1371         pv_maxchunks = MAX(pv_entry_max / _NPCPV, maxproc);
 1372         pv_chunkbase = (struct pv_chunk *)kva_alloc(PAGE_SIZE * pv_maxchunks);
 1373 
 1374         if (pv_chunkbase == NULL)
 1375                 panic("pmap_init: not enough kvm for pv chunks");
 1376 
 1377         pmap_ptelist_init(&pv_vafree, pv_chunkbase, pv_maxchunks);
 1378 
 1379         /*
 1380          * Now it is safe to enable pv_table recording.
 1381          */
 1382         PDEBUG(1, printf("pmap_init: done!\n"));
 1383 }
 1384 
 1385 SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_max, CTLFLAG_RD, &pv_entry_max, 0,
 1386         "Max number of PV entries");
 1387 SYSCTL_INT(_vm_pmap, OID_AUTO, shpgperproc, CTLFLAG_RD, &shpgperproc, 0,
 1388         "Page share factor per proc");
 1389 
 1390 static SYSCTL_NODE(_vm_pmap, OID_AUTO, section, CTLFLAG_RD, 0,
 1391     "1MB page mapping counters");
 1392 
 1393 static u_long pmap_section_demotions;
 1394 SYSCTL_ULONG(_vm_pmap_section, OID_AUTO, demotions, CTLFLAG_RD,
 1395     &pmap_section_demotions, 0, "1MB page demotions");
 1396 
 1397 static u_long pmap_section_mappings;
 1398 SYSCTL_ULONG(_vm_pmap_section, OID_AUTO, mappings, CTLFLAG_RD,
 1399     &pmap_section_mappings, 0, "1MB page mappings");
 1400 
 1401 static u_long pmap_section_p_failures;
 1402 SYSCTL_ULONG(_vm_pmap_section, OID_AUTO, p_failures, CTLFLAG_RD,
 1403     &pmap_section_p_failures, 0, "1MB page promotion failures");
 1404 
 1405 static u_long pmap_section_promotions;
 1406 SYSCTL_ULONG(_vm_pmap_section, OID_AUTO, promotions, CTLFLAG_RD,
 1407     &pmap_section_promotions, 0, "1MB page promotions");
 1408 
 1409 int
 1410 pmap_fault_fixup(pmap_t pmap, vm_offset_t va, vm_prot_t ftype, int user)
 1411 {
 1412         struct l2_dtable *l2;
 1413         struct l2_bucket *l2b;
 1414         pd_entry_t *pl1pd, l1pd;
 1415         pt_entry_t *ptep, pte;
 1416         vm_paddr_t pa;
 1417         u_int l1idx;
 1418         int rv = 0;
 1419 
 1420         l1idx = L1_IDX(va);
 1421         rw_wlock(&pvh_global_lock);
 1422         PMAP_LOCK(pmap);
 1423         /*
 1424          * Check and possibly fix-up L1 section mapping
 1425          * only when superpage mappings are enabled to speed up.
 1426          */
 1427         if (sp_enabled) {
 1428                 pl1pd = &pmap->pm_l1->l1_kva[l1idx];
 1429                 l1pd = *pl1pd;
 1430                 if ((l1pd & L1_TYPE_MASK) == L1_S_PROTO) {
 1431                         /* Catch an access to the vectors section */
 1432                         if (l1idx == L1_IDX(vector_page))
 1433                                 goto out;
 1434                         /*
 1435                          * Stay away from the kernel mappings.
 1436                          * None of them should fault from L1 entry.
 1437                          */
 1438                         if (pmap == pmap_kernel())
 1439                                 goto out;
 1440                         /*
 1441                          * Catch a forbidden userland access
 1442                          */
 1443                         if (user && !(l1pd & L1_S_PROT_U))
 1444                                 goto out;
 1445                         /*
 1446                          * Superpage is always either mapped read only
 1447                          * or it is modified and permitted to be written
 1448                          * by default. Therefore, process only reference
 1449                          * flag fault and demote page in case of write fault.
 1450                          */
 1451                         if ((ftype & VM_PROT_WRITE) && !L1_S_WRITABLE(l1pd) &&
 1452                             L1_S_REFERENCED(l1pd)) {
 1453                                 (void)pmap_demote_section(pmap, va);
 1454                                 goto out;
 1455                         } else if (!L1_S_REFERENCED(l1pd)) {
 1456                                 /* Mark the page "referenced" */
 1457                                 *pl1pd = l1pd | L1_S_REF;
 1458                                 PTE_SYNC(pl1pd);
 1459                                 goto l1_section_out;
 1460                         } else
 1461                                 goto out;
 1462                 }
 1463         }
 1464         /*
 1465          * If there is no l2_dtable for this address, then the process
 1466          * has no business accessing it.
 1467          *
 1468          * Note: This will catch userland processes trying to access
 1469          * kernel addresses.
 1470          */
 1471         l2 = pmap->pm_l2[L2_IDX(l1idx)];
 1472         if (l2 == NULL)
 1473                 goto out;
 1474 
 1475         /*
 1476          * Likewise if there is no L2 descriptor table
 1477          */
 1478         l2b = &l2->l2_bucket[L2_BUCKET(l1idx)];
 1479         if (l2b->l2b_kva == NULL)
 1480                 goto out;
 1481 
 1482         /*
 1483          * Check the PTE itself.
 1484          */
 1485         ptep = &l2b->l2b_kva[l2pte_index(va)];
 1486         pte = *ptep;
 1487         if (pte == 0)
 1488                 goto out;
 1489 
 1490         /*
 1491          * Catch a userland access to the vector page mapped at 0x0
 1492          */
 1493         if (user && !(pte & L2_S_PROT_U))
 1494                 goto out;
 1495         if (va == vector_page)
 1496                 goto out;
 1497 
 1498         pa = l2pte_pa(pte);
 1499         CTR5(KTR_PMAP, "pmap_fault_fix: pmap:%p va:%x pte:0x%x ftype:%x user:%x",
 1500             pmap, va, pte, ftype, user);
 1501         if ((ftype & VM_PROT_WRITE) && !(L2_S_WRITABLE(pte)) &&
 1502             L2_S_REFERENCED(pte)) {
 1503                 /*
 1504                  * This looks like a good candidate for "page modified"
 1505                  * emulation...
 1506                  */
 1507                 struct pv_entry *pv;
 1508                 struct vm_page *m;
 1509 
 1510                 /* Extract the physical address of the page */
 1511                 if ((m = PHYS_TO_VM_PAGE(pa)) == NULL) {
 1512                         goto out;
 1513                 }
 1514                 /* Get the current flags for this page. */
 1515 
 1516                 pv = pmap_find_pv(&m->md, pmap, va);
 1517                 if (pv == NULL) {
 1518                         goto out;
 1519                 }
 1520 
 1521                 /*
 1522                  * Do the flags say this page is writable? If not then it
 1523                  * is a genuine write fault. If yes then the write fault is
 1524                  * our fault as we did not reflect the write access in the
 1525                  * PTE. Now we know a write has occurred we can correct this
 1526                  * and also set the modified bit
 1527                  */
 1528                 if ((pv->pv_flags & PVF_WRITE) == 0) {
 1529                         goto out;
 1530                 }
 1531 
 1532                 vm_page_dirty(m);
 1533 
 1534                 /* Re-enable write permissions for the page */
 1535                 *ptep = (pte & ~L2_APX);
 1536                 PTE_SYNC(ptep);
 1537                 rv = 1;
 1538                 CTR1(KTR_PMAP, "pmap_fault_fix: new pte:0x%x", *ptep);
 1539         } else if (!L2_S_REFERENCED(pte)) {
 1540                 /*
 1541                  * This looks like a good candidate for "page referenced"
 1542                  * emulation.
 1543                  */
 1544                 struct pv_entry *pv;
 1545                 struct vm_page *m;
 1546 
 1547                 /* Extract the physical address of the page */
 1548                 if ((m = PHYS_TO_VM_PAGE(pa)) == NULL)
 1549                         goto out;
 1550                 /* Get the current flags for this page. */
 1551                 pv = pmap_find_pv(&m->md, pmap, va);
 1552                 if (pv == NULL)
 1553                         goto out;
 1554 
 1555                 vm_page_aflag_set(m, PGA_REFERENCED);
 1556 
 1557                 /* Mark the page "referenced" */
 1558                 *ptep = pte | L2_S_REF;
 1559                 PTE_SYNC(ptep);
 1560                 rv = 1;
 1561                 CTR1(KTR_PMAP, "pmap_fault_fix: new pte:0x%x", *ptep);
 1562         }
 1563 
 1564         /*
 1565          * We know there is a valid mapping here, so simply
 1566          * fix up the L1 if necessary.
 1567          */
 1568         pl1pd = &pmap->pm_l1->l1_kva[l1idx];
 1569         l1pd = l2b->l2b_phys | L1_C_DOM(pmap->pm_domain) | L1_C_PROTO;
 1570         if (*pl1pd != l1pd) {
 1571                 *pl1pd = l1pd;
 1572                 PTE_SYNC(pl1pd);
 1573                 rv = 1;
 1574         }
 1575 
 1576 #ifdef DEBUG
 1577         /*
 1578          * If 'rv == 0' at this point, it generally indicates that there is a
 1579          * stale TLB entry for the faulting address. This happens when two or
 1580          * more processes are sharing an L1. Since we don't flush the TLB on
 1581          * a context switch between such processes, we can take domain faults
 1582          * for mappings which exist at the same VA in both processes. EVEN IF
 1583          * WE'VE RECENTLY FIXED UP THE CORRESPONDING L1 in pmap_enter(), for
 1584          * example.
 1585          *
 1586          * This is extremely likely to happen if pmap_enter() updated the L1
 1587          * entry for a recently entered mapping. In this case, the TLB is
 1588          * flushed for the new mapping, but there may still be TLB entries for
 1589          * other mappings belonging to other processes in the 1MB range
 1590          * covered by the L1 entry.
 1591          *
 1592          * Since 'rv == 0', we know that the L1 already contains the correct
 1593          * value, so the fault must be due to a stale TLB entry.
 1594          *
 1595          * Since we always need to flush the TLB anyway in the case where we
 1596          * fixed up the L1, or frobbed the L2 PTE, we effectively deal with
 1597          * stale TLB entries dynamically.
 1598          *
 1599          * However, the above condition can ONLY happen if the current L1 is
 1600          * being shared. If it happens when the L1 is unshared, it indicates
 1601          * that other parts of the pmap are not doing their job WRT managing
 1602          * the TLB.
 1603          */
 1604         if (rv == 0 && pmap->pm_l1->l1_domain_use_count == 1) {
 1605                 printf("fixup: pmap %p, va 0x%08x, ftype %d - nothing to do!\n",
 1606                     pmap, va, ftype);
 1607                 printf("fixup: l2 %p, l2b %p, ptep %p, pl1pd %p\n",
 1608                     l2, l2b, ptep, pl1pd);
 1609                 printf("fixup: pte 0x%x, l1pd 0x%x, last code 0x%x\n",
 1610                     pte, l1pd, last_fault_code);
 1611 #ifdef DDB
 1612                 Debugger();
 1613 #endif
 1614         }
 1615 #endif
 1616 
 1617 l1_section_out:
 1618         cpu_tlb_flushID_SE(va);
 1619         cpu_cpwait();
 1620 
 1621         rv = 1;
 1622 
 1623 out:
 1624         rw_wunlock(&pvh_global_lock);
 1625         PMAP_UNLOCK(pmap);
 1626         return (rv);
 1627 }
 1628 
 1629 void
 1630 pmap_postinit(void)
 1631 {
 1632         struct l2_bucket *l2b;
 1633         struct l1_ttable *l1;
 1634         pd_entry_t *pl1pt;
 1635         pt_entry_t *ptep, pte;
 1636         vm_offset_t va, eva;
 1637         u_int loop, needed;
 1638 
 1639         needed = (maxproc / PMAP_DOMAINS) + ((maxproc % PMAP_DOMAINS) ? 1 : 0);
 1640         needed -= 1;
 1641         l1 = malloc(sizeof(*l1) * needed, M_VMPMAP, M_WAITOK);
 1642 
 1643         for (loop = 0; loop < needed; loop++, l1++) {
 1644                 /* Allocate a L1 page table */
 1645                 va = (vm_offset_t)contigmalloc(L1_TABLE_SIZE, M_VMPMAP, 0, 0x0,
 1646                     0xffffffff, L1_TABLE_SIZE, 0);
 1647 
 1648                 if (va == 0)
 1649                         panic("Cannot allocate L1 KVM");
 1650 
 1651                 eva = va + L1_TABLE_SIZE;
 1652                 pl1pt = (pd_entry_t *)va;
 1653 
 1654                 while (va < eva) {
 1655                                 l2b = pmap_get_l2_bucket(pmap_kernel(), va);
 1656                                 ptep = &l2b->l2b_kva[l2pte_index(va)];
 1657                                 pte = *ptep;
 1658                                 pte = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt;
 1659                                 *ptep = pte;
 1660                                 PTE_SYNC(ptep);
 1661                                 cpu_tlb_flushID_SE(va);
 1662                                 cpu_cpwait();
 1663                                 va += PAGE_SIZE;
 1664                 }
 1665                 pmap_init_l1(l1, pl1pt);
 1666         }
 1667 #ifdef DEBUG
 1668         printf("pmap_postinit: Allocated %d static L1 descriptor tables\n",
 1669             needed);
 1670 #endif
 1671 }
 1672 
 1673 /*
 1674  * This is used to stuff certain critical values into the PCB where they
 1675  * can be accessed quickly from cpu_switch() et al.
 1676  */
 1677 void
 1678 pmap_set_pcb_pagedir(pmap_t pmap, struct pcb *pcb)
 1679 {
 1680         struct l2_bucket *l2b;
 1681 
 1682         pcb->pcb_pagedir = pmap->pm_l1->l1_physaddr;
 1683         pcb->pcb_dacr = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) |
 1684             (DOMAIN_CLIENT << (pmap->pm_domain * 2));
 1685 
 1686         if (vector_page < KERNBASE) {
 1687                 pcb->pcb_pl1vec = &pmap->pm_l1->l1_kva[L1_IDX(vector_page)];
 1688                 l2b = pmap_get_l2_bucket(pmap, vector_page);
 1689                 pcb->pcb_l1vec = l2b->l2b_phys | L1_C_PROTO |
 1690                     L1_C_DOM(pmap->pm_domain) | L1_C_DOM(PMAP_DOMAIN_KERNEL);
 1691         } else
 1692                 pcb->pcb_pl1vec = NULL;
 1693 }
 1694 
 1695 void
 1696 pmap_activate(struct thread *td)
 1697 {
 1698         pmap_t pmap;
 1699         struct pcb *pcb;
 1700 
 1701         pmap = vmspace_pmap(td->td_proc->p_vmspace);
 1702         pcb = td->td_pcb;
 1703 
 1704         critical_enter();
 1705         pmap_set_pcb_pagedir(pmap, pcb);
 1706 
 1707         if (td == curthread) {
 1708                 u_int cur_dacr, cur_ttb;
 1709 
 1710                 __asm __volatile("mrc p15, 0, %0, c2, c0, 0" : "=r"(cur_ttb));
 1711                 __asm __volatile("mrc p15, 0, %0, c3, c0, 0" : "=r"(cur_dacr));
 1712 
 1713                 cur_ttb &= ~(L1_TABLE_SIZE - 1);
 1714 
 1715                 if (cur_ttb == (u_int)pcb->pcb_pagedir &&
 1716                     cur_dacr == pcb->pcb_dacr) {
 1717                         /*
 1718                          * No need to switch address spaces.
 1719                          */
 1720                         critical_exit();
 1721                         return;
 1722                 }
 1723 
 1724 
 1725                 /*
 1726                  * We MUST, I repeat, MUST fix up the L1 entry corresponding
 1727                  * to 'vector_page' in the incoming L1 table before switching
 1728                  * to it otherwise subsequent interrupts/exceptions (including
 1729                  * domain faults!) will jump into hyperspace.
 1730                  */
 1731                 if (pcb->pcb_pl1vec) {
 1732                         *pcb->pcb_pl1vec = pcb->pcb_l1vec;
 1733                 }
 1734 
 1735                 cpu_domains(pcb->pcb_dacr);
 1736                 cpu_setttb(pcb->pcb_pagedir);
 1737         }
 1738         critical_exit();
 1739 }
 1740 
 1741 static int
 1742 pmap_set_pt_cache_mode(pd_entry_t *kl1, vm_offset_t va)
 1743 {
 1744         pd_entry_t *pdep, pde;
 1745         pt_entry_t *ptep, pte;
 1746         vm_offset_t pa;
 1747         int rv = 0;
 1748 
 1749         /*
 1750          * Make sure the descriptor itself has the correct cache mode
 1751          */
 1752         pdep = &kl1[L1_IDX(va)];
 1753         pde = *pdep;
 1754 
 1755         if (l1pte_section_p(pde)) {
 1756                 if ((pde & L1_S_CACHE_MASK) != pte_l1_s_cache_mode_pt) {
 1757                         *pdep = (pde & ~L1_S_CACHE_MASK) |
 1758                             pte_l1_s_cache_mode_pt;
 1759                         PTE_SYNC(pdep);
 1760                         rv = 1;
 1761                 }
 1762         } else {
 1763                 pa = (vm_paddr_t)(pde & L1_C_ADDR_MASK);
 1764                 ptep = (pt_entry_t *)kernel_pt_lookup(pa);
 1765                 if (ptep == NULL)
 1766                         panic("pmap_bootstrap: No L2 for L2 @ va %p\n", ptep);
 1767 
 1768                 ptep = &ptep[l2pte_index(va)];
 1769                 pte = *ptep;
 1770                 if ((pte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) {
 1771                         *ptep = (pte & ~L2_S_CACHE_MASK) |
 1772                             pte_l2_s_cache_mode_pt;
 1773                         PTE_SYNC(ptep);
 1774                         rv = 1;
 1775                 }
 1776         }
 1777 
 1778         return (rv);
 1779 }
 1780 
 1781 static void
 1782 pmap_alloc_specials(vm_offset_t *availp, int pages, vm_offset_t *vap,
 1783     pt_entry_t **ptep)
 1784 {
 1785         vm_offset_t va = *availp;
 1786         struct l2_bucket *l2b;
 1787 
 1788         if (ptep) {
 1789                 l2b = pmap_get_l2_bucket(pmap_kernel(), va);
 1790                 if (l2b == NULL)
 1791                         panic("pmap_alloc_specials: no l2b for 0x%x", va);
 1792 
 1793                 *ptep = &l2b->l2b_kva[l2pte_index(va)];
 1794         }
 1795 
 1796         *vap = va;
 1797         *availp = va + (PAGE_SIZE * pages);
 1798 }
 1799 
 1800 /*
 1801  *      Bootstrap the system enough to run with virtual memory.
 1802  *
 1803  *      On the arm this is called after mapping has already been enabled
 1804  *      and just syncs the pmap module with what has already been done.
 1805  *      [We can't call it easily with mapping off since the kernel is not
 1806  *      mapped with PA == VA, hence we would have to relocate every address
 1807  *      from the linked base (virtual) address "KERNBASE" to the actual
 1808  *      (physical) address starting relative to 0]
 1809  */
 1810 #define PMAP_STATIC_L2_SIZE 16
 1811 
 1812 void
 1813 pmap_bootstrap(vm_offset_t firstaddr, struct pv_addr *l1pt)
 1814 {
 1815         static struct l1_ttable static_l1;
 1816         static struct l2_dtable static_l2[PMAP_STATIC_L2_SIZE];
 1817         struct l1_ttable *l1 = &static_l1;
 1818         struct l2_dtable *l2;
 1819         struct l2_bucket *l2b;
 1820         struct czpages *czp;
 1821         pd_entry_t pde;
 1822         pd_entry_t *kernel_l1pt = (pd_entry_t *)l1pt->pv_va;
 1823         pt_entry_t *ptep;
 1824         vm_paddr_t pa;
 1825         vm_offset_t va;
 1826         vm_size_t size;
 1827         int i, l1idx, l2idx, l2next = 0;
 1828 
 1829         PDEBUG(1, printf("firstaddr = %08x, lastaddr = %08x\n",
 1830             firstaddr, vm_max_kernel_address));
 1831 
 1832         virtual_avail = firstaddr;
 1833         kernel_pmap->pm_l1 = l1;
 1834         kernel_l1pa = l1pt->pv_pa;
 1835 
 1836         /*
 1837          * Scan the L1 translation table created by initarm() and create
 1838          * the required metadata for all valid mappings found in it.
 1839          */
 1840         for (l1idx = 0; l1idx < (L1_TABLE_SIZE / sizeof(pd_entry_t)); l1idx++) {
 1841                 pde = kernel_l1pt[l1idx];
 1842 
 1843                 /*
 1844                  * We're only interested in Coarse mappings.
 1845                  * pmap_extract() can deal with section mappings without
 1846                  * recourse to checking L2 metadata.
 1847                  */
 1848                 if ((pde & L1_TYPE_MASK) != L1_TYPE_C)
 1849                         continue;
 1850 
 1851                 /*
 1852                  * Lookup the KVA of this L2 descriptor table
 1853                  */
 1854                 pa = (vm_paddr_t)(pde & L1_C_ADDR_MASK);
 1855                 ptep = (pt_entry_t *)kernel_pt_lookup(pa);
 1856 
 1857                 if (ptep == NULL) {
 1858                         panic("pmap_bootstrap: No L2 for va 0x%x, pa 0x%lx",
 1859                             (u_int)l1idx << L1_S_SHIFT, (long unsigned int)pa);
 1860                 }
 1861 
 1862                 /*
 1863                  * Fetch the associated L2 metadata structure.
 1864                  * Allocate a new one if necessary.
 1865                  */
 1866                 if ((l2 = kernel_pmap->pm_l2[L2_IDX(l1idx)]) == NULL) {
 1867                         if (l2next == PMAP_STATIC_L2_SIZE)
 1868                                 panic("pmap_bootstrap: out of static L2s");
 1869                         kernel_pmap->pm_l2[L2_IDX(l1idx)] = l2 =
 1870                             &static_l2[l2next++];
 1871                 }
 1872 
 1873                 /*
 1874                  * One more L1 slot tracked...
 1875                  */
 1876                 l2->l2_occupancy++;
 1877 
 1878                 /*
 1879                  * Fill in the details of the L2 descriptor in the
 1880                  * appropriate bucket.
 1881                  */
 1882                 l2b = &l2->l2_bucket[L2_BUCKET(l1idx)];
 1883                 l2b->l2b_kva = ptep;
 1884                 l2b->l2b_phys = pa;
 1885                 l2b->l2b_l1idx = l1idx;
 1886 
 1887                 /*
 1888                  * Establish an initial occupancy count for this descriptor
 1889                  */
 1890                 for (l2idx = 0;
 1891                     l2idx < (L2_TABLE_SIZE_REAL / sizeof(pt_entry_t));
 1892                     l2idx++) {
 1893                         if ((ptep[l2idx] & L2_TYPE_MASK) != L2_TYPE_INV) {
 1894                                 l2b->l2b_occupancy++;
 1895                         }
 1896                 }
 1897 
 1898                 /*
 1899                  * Make sure the descriptor itself has the correct cache mode.
 1900                  * If not, fix it, but whine about the problem. Port-meisters
 1901                  * should consider this a clue to fix up their initarm()
 1902                  * function. :)
 1903                  */
 1904                 if (pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)ptep)) {
 1905                         printf("pmap_bootstrap: WARNING! wrong cache mode for "
 1906                             "L2 pte @ %p\n", ptep);
 1907                 }
 1908         }
 1909 
 1910 
 1911         /*
 1912          * Ensure the primary (kernel) L1 has the correct cache mode for
 1913          * a page table. Bitch if it is not correctly set.
 1914          */
 1915         for (va = (vm_offset_t)kernel_l1pt;
 1916             va < ((vm_offset_t)kernel_l1pt + L1_TABLE_SIZE); va += PAGE_SIZE) {
 1917                 if (pmap_set_pt_cache_mode(kernel_l1pt, va))
 1918                         printf("pmap_bootstrap: WARNING! wrong cache mode for "
 1919                             "primary L1 @ 0x%x\n", va);
 1920         }
 1921 
 1922         cpu_dcache_wbinv_all();
 1923         cpu_l2cache_wbinv_all();
 1924         cpu_tlb_flushID();
 1925         cpu_cpwait();
 1926 
 1927         PMAP_LOCK_INIT(kernel_pmap);
 1928         CPU_FILL(&kernel_pmap->pm_active);
 1929         kernel_pmap->pm_domain = PMAP_DOMAIN_KERNEL;
 1930         TAILQ_INIT(&kernel_pmap->pm_pvchunk);
 1931 
 1932         /*
 1933          * Initialize the global pv list lock.
 1934          */
 1935         rw_init(&pvh_global_lock, "pmap pv global");
 1936 
 1937         /*
 1938          * Reserve some special page table entries/VA space for temporary
 1939          * mapping of pages that are being copied or zeroed.
 1940          */
 1941         for (czp = cpu_czpages, i = 0; i < MAXCPU; ++i, ++czp) {
 1942                 mtx_init(&czp->lock, "czpages", NULL, MTX_DEF);
 1943                 pmap_alloc_specials(&virtual_avail, 1, &czp->srcva, &czp->srcptep);
 1944                 pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)czp->srcptep);
 1945                 pmap_alloc_specials(&virtual_avail, 1, &czp->dstva, &czp->dstptep);
 1946                 pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)czp->dstptep);
 1947         }
 1948 
 1949         size = ((vm_max_kernel_address - pmap_curmaxkvaddr) + L1_S_OFFSET) /
 1950             L1_S_SIZE;
 1951         pmap_alloc_specials(&virtual_avail,
 1952             round_page(size * L2_TABLE_SIZE_REAL) / PAGE_SIZE,
 1953             &pmap_kernel_l2ptp_kva, NULL);
 1954 
 1955         size = (size + (L2_BUCKET_SIZE - 1)) / L2_BUCKET_SIZE;
 1956         pmap_alloc_specials(&virtual_avail,
 1957             round_page(size * sizeof(struct l2_dtable)) / PAGE_SIZE,
 1958             &pmap_kernel_l2dtable_kva, NULL);
 1959 
 1960         pmap_alloc_specials(&virtual_avail,
 1961             1, (vm_offset_t*)&_tmppt, NULL);
 1962         pmap_alloc_specials(&virtual_avail,
 1963             MAXDUMPPGS, (vm_offset_t *)&crashdumpmap, NULL);
 1964         SLIST_INIT(&l1_list);
 1965         TAILQ_INIT(&l1_lru_list);
 1966         mtx_init(&l1_lru_lock, "l1 list lock", NULL, MTX_DEF);
 1967         pmap_init_l1(l1, kernel_l1pt);
 1968         cpu_dcache_wbinv_all();
 1969         cpu_l2cache_wbinv_all();
 1970         cpu_tlb_flushID();
 1971         cpu_cpwait();
 1972 
 1973         virtual_avail = round_page(virtual_avail);
 1974         virtual_end = vm_max_kernel_address;
 1975         kernel_vm_end = pmap_curmaxkvaddr;
 1976 
 1977         pmap_set_pcb_pagedir(kernel_pmap, thread0.td_pcb);
 1978 }
 1979 
 1980 /***************************************************
 1981  * Pmap allocation/deallocation routines.
 1982  ***************************************************/
 1983 
 1984 /*
 1985  * Release any resources held by the given physical map.
 1986  * Called when a pmap initialized by pmap_pinit is being released.
 1987  * Should only be called if the map contains no valid mappings.
 1988  */
 1989 void
 1990 pmap_release(pmap_t pmap)
 1991 {
 1992         struct pcb *pcb;
 1993 
 1994         cpu_tlb_flushID();
 1995         cpu_cpwait();
 1996         if (vector_page < KERNBASE) {
 1997                 struct pcb *curpcb = PCPU_GET(curpcb);
 1998                 pcb = thread0.td_pcb;
 1999                 if (pmap_is_current(pmap)) {
 2000                         /*
 2001                          * Frob the L1 entry corresponding to the vector
 2002                          * page so that it contains the kernel pmap's domain
 2003                          * number. This will ensure pmap_remove() does not
 2004                          * pull the current vector page out from under us.
 2005                          */
 2006                         critical_enter();
 2007                         *pcb->pcb_pl1vec = pcb->pcb_l1vec;
 2008                         cpu_domains(pcb->pcb_dacr);
 2009                         cpu_setttb(pcb->pcb_pagedir);
 2010                         critical_exit();
 2011                 }
 2012                 pmap_remove(pmap, vector_page, vector_page + PAGE_SIZE);
 2013                 /*
 2014                  * Make sure cpu_switch(), et al, DTRT. This is safe to do
 2015                  * since this process has no remaining mappings of its own.
 2016                  */
 2017                 curpcb->pcb_pl1vec = pcb->pcb_pl1vec;
 2018                 curpcb->pcb_l1vec = pcb->pcb_l1vec;
 2019                 curpcb->pcb_dacr = pcb->pcb_dacr;
 2020                 curpcb->pcb_pagedir = pcb->pcb_pagedir;
 2021 
 2022         }
 2023         pmap_free_l1(pmap);
 2024 
 2025         dprintf("pmap_release()\n");
 2026 }
 2027 
 2028 
 2029 
 2030 /*
 2031  * Helper function for pmap_grow_l2_bucket()
 2032  */
 2033 static __inline int
 2034 pmap_grow_map(vm_offset_t va, pt_entry_t cache_mode, vm_paddr_t *pap)
 2035 {
 2036         struct l2_bucket *l2b;
 2037         pt_entry_t *ptep;
 2038         vm_paddr_t pa;
 2039         struct vm_page *m;
 2040 
 2041         m = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_WIRED);
 2042         if (m == NULL)
 2043                 return (1);
 2044         pa = VM_PAGE_TO_PHYS(m);
 2045 
 2046         if (pap)
 2047                 *pap = pa;
 2048 
 2049         l2b = pmap_get_l2_bucket(pmap_kernel(), va);
 2050 
 2051         ptep = &l2b->l2b_kva[l2pte_index(va)];
 2052         *ptep = L2_S_PROTO | pa | cache_mode | L2_S_REF;
 2053         pmap_set_prot(ptep, VM_PROT_READ | VM_PROT_WRITE, 0);
 2054         PTE_SYNC(ptep);
 2055         cpu_tlb_flushD_SE(va);
 2056         cpu_cpwait();
 2057 
 2058         return (0);
 2059 }
 2060 
 2061 /*
 2062  * This is the same as pmap_alloc_l2_bucket(), except that it is only
 2063  * used by pmap_growkernel().
 2064  */
 2065 static __inline struct l2_bucket *
 2066 pmap_grow_l2_bucket(pmap_t pmap, vm_offset_t va)
 2067 {
 2068         struct l2_dtable *l2;
 2069         struct l2_bucket *l2b;
 2070         struct l1_ttable *l1;
 2071         pd_entry_t *pl1pd;
 2072         u_short l1idx;
 2073         vm_offset_t nva;
 2074 
 2075         l1idx = L1_IDX(va);
 2076 
 2077         if ((l2 = pmap->pm_l2[L2_IDX(l1idx)]) == NULL) {
 2078                 /*
 2079                  * No mapping at this address, as there is
 2080                  * no entry in the L1 table.
 2081                  * Need to allocate a new l2_dtable.
 2082                  */
 2083                 nva = pmap_kernel_l2dtable_kva;
 2084                 if ((nva & PAGE_MASK) == 0) {
 2085                         /*
 2086                          * Need to allocate a backing page
 2087                          */
 2088                         if (pmap_grow_map(nva, pte_l2_s_cache_mode, NULL))
 2089                                 return (NULL);
 2090                 }
 2091 
 2092                 l2 = (struct l2_dtable *)nva;
 2093                 nva += sizeof(struct l2_dtable);
 2094 
 2095                 if ((nva & PAGE_MASK) < (pmap_kernel_l2dtable_kva &
 2096                     PAGE_MASK)) {
 2097                         /*
 2098                          * The new l2_dtable straddles a page boundary.
 2099                          * Map in another page to cover it.
 2100                          */
 2101                         if (pmap_grow_map(nva, pte_l2_s_cache_mode, NULL))
 2102                                 return (NULL);
 2103                 }
 2104 
 2105                 pmap_kernel_l2dtable_kva = nva;
 2106 
 2107                 /*
 2108                  * Link it into the parent pmap
 2109                  */
 2110                 pmap->pm_l2[L2_IDX(l1idx)] = l2;
 2111                 memset(l2, 0, sizeof(*l2));
 2112         }
 2113 
 2114         l2b = &l2->l2_bucket[L2_BUCKET(l1idx)];
 2115 
 2116         /*
 2117          * Fetch pointer to the L2 page table associated with the address.
 2118          */
 2119         if (l2b->l2b_kva == NULL) {
 2120                 pt_entry_t *ptep;
 2121 
 2122                 /*
 2123                  * No L2 page table has been allocated. Chances are, this
 2124                  * is because we just allocated the l2_dtable, above.
 2125                  */
 2126                 nva = pmap_kernel_l2ptp_kva;
 2127                 ptep = (pt_entry_t *)nva;
 2128                 if ((nva & PAGE_MASK) == 0) {
 2129                         /*
 2130                          * Need to allocate a backing page
 2131                          */
 2132                         if (pmap_grow_map(nva, pte_l2_s_cache_mode_pt,
 2133                             &pmap_kernel_l2ptp_phys))
 2134                                 return (NULL);
 2135                 }
 2136                 memset(ptep, 0, L2_TABLE_SIZE_REAL);
 2137                 l2->l2_occupancy++;
 2138                 l2b->l2b_kva = ptep;
 2139                 l2b->l2b_l1idx = l1idx;
 2140                 l2b->l2b_phys = pmap_kernel_l2ptp_phys;
 2141 
 2142                 pmap_kernel_l2ptp_kva += L2_TABLE_SIZE_REAL;
 2143                 pmap_kernel_l2ptp_phys += L2_TABLE_SIZE_REAL;
 2144         }
 2145 
 2146         /* Distribute new L1 entry to all other L1s */
 2147         SLIST_FOREACH(l1, &l1_list, l1_link) {
 2148                         pl1pd = &l1->l1_kva[L1_IDX(va)];
 2149                         *pl1pd = l2b->l2b_phys | L1_C_DOM(PMAP_DOMAIN_KERNEL) |
 2150                             L1_C_PROTO;
 2151                         PTE_SYNC(pl1pd);
 2152         }
 2153         cpu_tlb_flushID_SE(va);
 2154         cpu_cpwait();
 2155 
 2156         return (l2b);
 2157 }
 2158 
 2159 
 2160 /*
 2161  * grow the number of kernel page table entries, if needed
 2162  */
 2163 void
 2164 pmap_growkernel(vm_offset_t addr)
 2165 {
 2166         pmap_t kpmap = pmap_kernel();
 2167 
 2168         if (addr <= pmap_curmaxkvaddr)
 2169                 return;         /* we are OK */
 2170 
 2171         /*
 2172          * whoops!   we need to add kernel PTPs
 2173          */
 2174 
 2175         /* Map 1MB at a time */
 2176         for (; pmap_curmaxkvaddr < addr; pmap_curmaxkvaddr += L1_S_SIZE)
 2177                 pmap_grow_l2_bucket(kpmap, pmap_curmaxkvaddr);
 2178 
 2179         kernel_vm_end = pmap_curmaxkvaddr;
 2180 }
 2181 
 2182 /*
 2183  * Returns TRUE if the given page is mapped individually or as part of
 2184  * a 1MB section.  Otherwise, returns FALSE.
 2185  */
 2186 boolean_t
 2187 pmap_page_is_mapped(vm_page_t m)
 2188 {
 2189         boolean_t rv;
 2190 
 2191         if ((m->oflags & VPO_UNMANAGED) != 0)
 2192                 return (FALSE);
 2193         rw_wlock(&pvh_global_lock);
 2194         rv = !TAILQ_EMPTY(&m->md.pv_list) ||
 2195             ((m->flags & PG_FICTITIOUS) == 0 &&
 2196             !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list));
 2197         rw_wunlock(&pvh_global_lock);
 2198         return (rv);
 2199 }
 2200 
 2201 /*
 2202  * Remove all pages from specified address space
 2203  * this aids process exit speeds.  Also, this code
 2204  * is special cased for current process only, but
 2205  * can have the more generic (and slightly slower)
 2206  * mode enabled.  This is much faster than pmap_remove
 2207  * in the case of running down an entire address space.
 2208  */
 2209 void
 2210 pmap_remove_pages(pmap_t pmap)
 2211 {
 2212         struct pv_entry *pv;
 2213         struct l2_bucket *l2b = NULL;
 2214         struct pv_chunk *pc, *npc;
 2215         struct md_page *pvh;
 2216         pd_entry_t *pl1pd, l1pd;
 2217         pt_entry_t *ptep;
 2218         vm_page_t m, mt;
 2219         vm_offset_t va;
 2220         uint32_t inuse, bitmask;
 2221         int allfree, bit, field, idx;
 2222  
 2223         rw_wlock(&pvh_global_lock);
 2224         PMAP_LOCK(pmap);
 2225 
 2226         TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
 2227                 allfree = 1;
 2228                 for (field = 0; field < _NPCM; field++) {
 2229                         inuse = ~pc->pc_map[field] & pc_freemask[field];
 2230                         while (inuse != 0) {
 2231                                 bit = ffs(inuse) - 1;
 2232                                 bitmask = 1ul << bit;
 2233                                 idx = field * sizeof(inuse) * NBBY + bit;
 2234                                 pv = &pc->pc_pventry[idx];
 2235                                 va = pv->pv_va;
 2236                                 inuse &= ~bitmask;
 2237                                 if (pv->pv_flags & PVF_WIRED) {
 2238                                         /* Cannot remove wired pages now. */
 2239                                         allfree = 0;
 2240                                         continue;
 2241                                 }
 2242                                 pl1pd = &pmap->pm_l1->l1_kva[L1_IDX(va)];
 2243                                 l1pd = *pl1pd;
 2244                                 l2b = pmap_get_l2_bucket(pmap, va);
 2245                                 if ((l1pd & L1_TYPE_MASK) == L1_S_PROTO) {
 2246                                         pvh = pa_to_pvh(l1pd & L1_S_FRAME);
 2247                                         TAILQ_REMOVE(&pvh->pv_list, pv, pv_list);
 2248                                         if (TAILQ_EMPTY(&pvh->pv_list)) {
 2249                                                 m = PHYS_TO_VM_PAGE(l1pd & L1_S_FRAME);
 2250                                                 KASSERT((vm_offset_t)m >= KERNBASE,
 2251                                                     ("Trying to access non-existent page "
 2252                                                      "va %x l1pd %x", trunc_1mpage(va), l1pd));
 2253                                                 for (mt = m; mt < &m[L2_PTE_NUM_TOTAL]; mt++) {
 2254                                                         if (TAILQ_EMPTY(&mt->md.pv_list))
 2255                                                                 vm_page_aflag_clear(mt, PGA_WRITEABLE);
 2256                                                 }
 2257                                         }
 2258                                         if (l2b != NULL) {
 2259                                                 KASSERT(l2b->l2b_occupancy == L2_PTE_NUM_TOTAL,
 2260                                                     ("pmap_remove_pages: l2_bucket occupancy error"));
 2261                                                 pmap_free_l2_bucket(pmap, l2b, L2_PTE_NUM_TOTAL);
 2262                                         }
 2263                                         pmap->pm_stats.resident_count -= L2_PTE_NUM_TOTAL;
 2264                                         *pl1pd = 0;
 2265                                         PTE_SYNC(pl1pd);
 2266                                 } else {
 2267                                         KASSERT(l2b != NULL,
 2268                                             ("No L2 bucket in pmap_remove_pages"));
 2269                                         ptep = &l2b->l2b_kva[l2pte_index(va)];
 2270                                         m = PHYS_TO_VM_PAGE(l2pte_pa(*ptep));
 2271                                         KASSERT((vm_offset_t)m >= KERNBASE,
 2272                                             ("Trying to access non-existent page "
 2273                                              "va %x pte %x", va, *ptep));
 2274                                         TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
 2275                                         if (TAILQ_EMPTY(&m->md.pv_list) &&
 2276                                             (m->flags & PG_FICTITIOUS) == 0) {
 2277                                                 pvh = pa_to_pvh(l2pte_pa(*ptep));
 2278                                                 if (TAILQ_EMPTY(&pvh->pv_list))
 2279                                                         vm_page_aflag_clear(m, PGA_WRITEABLE);
 2280                                         }
 2281                                         *ptep = 0;
 2282                                         PTE_SYNC(ptep);
 2283                                         pmap_free_l2_bucket(pmap, l2b, 1);
 2284                                         pmap->pm_stats.resident_count--;
 2285                                 }
 2286 
 2287                                 /* Mark free */
 2288                                 PV_STAT(pv_entry_frees++);
 2289                                 PV_STAT(pv_entry_spare++);
 2290                                 pv_entry_count--;
 2291                                 pc->pc_map[field] |= bitmask;
 2292                         }
 2293                 }
 2294                 if (allfree) {
 2295                         TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
 2296                         pmap_free_pv_chunk(pc);
 2297                 }
 2298 
 2299         }
 2300 
 2301         rw_wunlock(&pvh_global_lock);
 2302         cpu_tlb_flushID();
 2303         cpu_cpwait();
 2304         PMAP_UNLOCK(pmap);
 2305 }
 2306 
 2307 
 2308 /***************************************************
 2309  * Low level mapping routines.....
 2310  ***************************************************/
 2311 
 2312 #ifdef ARM_HAVE_SUPERSECTIONS
 2313 /* Map a super section into the KVA. */
 2314 
 2315 void
 2316 pmap_kenter_supersection(vm_offset_t va, uint64_t pa, int flags)
 2317 {
 2318         pd_entry_t pd = L1_S_PROTO | L1_S_SUPERSEC | (pa & L1_SUP_FRAME) |
 2319             (((pa >> 32) & 0xf) << 20) | L1_S_PROT(PTE_KERNEL,
 2320             VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE) |
 2321             L1_S_DOM(PMAP_DOMAIN_KERNEL);
 2322         struct l1_ttable *l1;
 2323         vm_offset_t va0, va_end;
 2324 
 2325         KASSERT(((va | pa) & L1_SUP_OFFSET) == 0,
 2326             ("Not a valid super section mapping"));
 2327         if (flags & SECTION_CACHE)
 2328                 pd |= pte_l1_s_cache_mode;
 2329         else if (flags & SECTION_PT)
 2330                 pd |= pte_l1_s_cache_mode_pt;
 2331 
 2332         va0 = va & L1_SUP_FRAME;
 2333         va_end = va + L1_SUP_SIZE;
 2334         SLIST_FOREACH(l1, &l1_list, l1_link) {
 2335                 va = va0;
 2336                 for (; va < va_end; va += L1_S_SIZE) {
 2337                         l1->l1_kva[L1_IDX(va)] = pd;
 2338                         PTE_SYNC(&l1->l1_kva[L1_IDX(va)]);
 2339                 }
 2340         }
 2341 }
 2342 #endif
 2343 
 2344 /* Map a section into the KVA. */
 2345 
 2346 void
 2347 pmap_kenter_section(vm_offset_t va, vm_offset_t pa, int flags)
 2348 {
 2349         pd_entry_t pd = L1_S_PROTO | pa | L1_S_PROT(PTE_KERNEL,
 2350             VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE) | L1_S_REF |
 2351             L1_S_DOM(PMAP_DOMAIN_KERNEL);
 2352         struct l1_ttable *l1;
 2353 
 2354         KASSERT(((va | pa) & L1_S_OFFSET) == 0,
 2355             ("Not a valid section mapping"));
 2356         if (flags & SECTION_CACHE)
 2357                 pd |= pte_l1_s_cache_mode;
 2358         else if (flags & SECTION_PT)
 2359                 pd |= pte_l1_s_cache_mode_pt;
 2360 
 2361         SLIST_FOREACH(l1, &l1_list, l1_link) {
 2362                 l1->l1_kva[L1_IDX(va)] = pd;
 2363                 PTE_SYNC(&l1->l1_kva[L1_IDX(va)]);
 2364         }
 2365         cpu_tlb_flushID_SE(va);
 2366         cpu_cpwait();
 2367 }
 2368 
 2369 /*
 2370  * Make a temporary mapping for a physical address.  This is only intended
 2371  * to be used for panic dumps.
 2372  */
 2373 void *
 2374 pmap_kenter_temporary(vm_paddr_t pa, int i)
 2375 {
 2376         vm_offset_t va;
 2377 
 2378         va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE);
 2379         pmap_kenter(va, pa);
 2380         return ((void *)crashdumpmap);
 2381 }
 2382 
 2383 /*
 2384  * add a wired page to the kva
 2385  * note that in order for the mapping to take effect -- you
 2386  * should do a invltlb after doing the pmap_kenter...
 2387  */
 2388 static PMAP_INLINE void
 2389 pmap_kenter_internal(vm_offset_t va, vm_offset_t pa, int flags)
 2390 {
 2391         struct l2_bucket *l2b;
 2392         pt_entry_t *ptep;
 2393         pt_entry_t opte;
 2394 
 2395         PDEBUG(1, printf("pmap_kenter: va = %08x, pa = %08x\n",
 2396             (uint32_t) va, (uint32_t) pa));
 2397 
 2398 
 2399         l2b = pmap_get_l2_bucket(pmap_kernel(), va);
 2400         if (l2b == NULL)
 2401                 l2b = pmap_grow_l2_bucket(pmap_kernel(), va);
 2402         KASSERT(l2b != NULL, ("No L2 Bucket"));
 2403 
 2404         ptep = &l2b->l2b_kva[l2pte_index(va)];
 2405         opte = *ptep;
 2406 
 2407         if (flags & KENTER_CACHE)
 2408                 *ptep = L2_S_PROTO | l2s_mem_types[PTE_CACHE] | pa | L2_S_REF;
 2409         else if (flags & KENTER_DEVICE)
 2410                 *ptep = L2_S_PROTO | l2s_mem_types[PTE_DEVICE] | pa | L2_S_REF;
 2411         else
 2412                 *ptep = L2_S_PROTO | l2s_mem_types[PTE_NOCACHE] | pa | L2_S_REF;
 2413 
 2414         if (flags & KENTER_CACHE) {
 2415                 pmap_set_prot(ptep, VM_PROT_READ | VM_PROT_WRITE,
 2416                     flags & KENTER_USER);
 2417         } else {
 2418                 pmap_set_prot(ptep, VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE,
 2419                     0);
 2420         }
 2421 
 2422         PTE_SYNC(ptep);
 2423         if (l2pte_valid(opte)) {
 2424                 if (L2_S_EXECUTABLE(opte) || L2_S_EXECUTABLE(*ptep))
 2425                         cpu_tlb_flushID_SE(va);
 2426                 else
 2427                         cpu_tlb_flushD_SE(va);
 2428         } else {
 2429                 if (opte == 0)
 2430                         l2b->l2b_occupancy++;
 2431         }
 2432         cpu_cpwait();
 2433 
 2434         PDEBUG(1, printf("pmap_kenter: pte = %08x, opte = %08x, npte = %08x\n",
 2435             (uint32_t) ptep, opte, *ptep));
 2436 }
 2437 
 2438 void
 2439 pmap_kenter(vm_offset_t va, vm_paddr_t pa)
 2440 {
 2441         pmap_kenter_internal(va, pa, KENTER_CACHE);
 2442 }
 2443 
 2444 void
 2445 pmap_kenter_nocache(vm_offset_t va, vm_paddr_t pa)
 2446 {
 2447 
 2448         pmap_kenter_internal(va, pa, 0);
 2449 }
 2450 
 2451 void
 2452 pmap_kenter_device(vm_offset_t va, vm_paddr_t pa)
 2453 {
 2454 
 2455         pmap_kenter_internal(va, pa, KENTER_DEVICE);
 2456 }
 2457 
 2458 void
 2459 pmap_kenter_user(vm_offset_t va, vm_paddr_t pa)
 2460 {
 2461 
 2462         pmap_kenter_internal(va, pa, KENTER_CACHE|KENTER_USER);
 2463         /*
 2464          * Call pmap_fault_fixup now, to make sure we'll have no exception
 2465          * at the first use of the new address, or bad things will happen,
 2466          * as we use one of these addresses in the exception handlers.
 2467          */
 2468         pmap_fault_fixup(pmap_kernel(), va, VM_PROT_READ|VM_PROT_WRITE, 1);
 2469 }
 2470 
 2471 vm_paddr_t
 2472 pmap_kextract(vm_offset_t va)
 2473 {
 2474 
 2475         if (kernel_vm_end == 0)
 2476                 return (0);
 2477         return (pmap_extract_locked(kernel_pmap, va));
 2478 }
 2479 
 2480 /*
 2481  * remove a page from the kernel pagetables
 2482  */
 2483 void
 2484 pmap_kremove(vm_offset_t va)
 2485 {
 2486         struct l2_bucket *l2b;
 2487         pt_entry_t *ptep, opte;
 2488 
 2489         l2b = pmap_get_l2_bucket(pmap_kernel(), va);
 2490         if (!l2b)
 2491                 return;
 2492         KASSERT(l2b != NULL, ("No L2 Bucket"));
 2493         ptep = &l2b->l2b_kva[l2pte_index(va)];
 2494         opte = *ptep;
 2495         if (l2pte_valid(opte)) {
 2496                 va = va & ~PAGE_MASK;
 2497                 *ptep = 0;
 2498                 PTE_SYNC(ptep);
 2499                 if (L2_S_EXECUTABLE(opte))
 2500                         cpu_tlb_flushID_SE(va);
 2501                 else
 2502                         cpu_tlb_flushD_SE(va);
 2503                 cpu_cpwait();
 2504         }
 2505 }
 2506 
 2507 
 2508 /*
 2509  *      Used to map a range of physical addresses into kernel
 2510  *      virtual address space.
 2511  *
 2512  *      The value passed in '*virt' is a suggested virtual address for
 2513  *      the mapping. Architectures which can support a direct-mapped
 2514  *      physical to virtual region can return the appropriate address
 2515  *      within that region, leaving '*virt' unchanged. Other
 2516  *      architectures should map the pages starting at '*virt' and
 2517  *      update '*virt' with the first usable address after the mapped
 2518  *      region.
 2519  */
 2520 vm_offset_t
 2521 pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot)
 2522 {
 2523         vm_offset_t sva = *virt;
 2524         vm_offset_t va = sva;
 2525 
 2526         PDEBUG(1, printf("pmap_map: virt = %08x, start = %08x, end = %08x, "
 2527             "prot = %d\n", (uint32_t) *virt, (uint32_t) start, (uint32_t) end,
 2528             prot));
 2529 
 2530         while (start < end) {
 2531                 pmap_kenter(va, start);
 2532                 va += PAGE_SIZE;
 2533                 start += PAGE_SIZE;
 2534         }
 2535         *virt = va;
 2536         return (sva);
 2537 }
 2538 
 2539 /*
 2540  * Add a list of wired pages to the kva
 2541  * this routine is only used for temporary
 2542  * kernel mappings that do not need to have
 2543  * page modification or references recorded.
 2544  * Note that old mappings are simply written
 2545  * over.  The page *must* be wired.
 2546  */
 2547 void
 2548 pmap_qenter(vm_offset_t va, vm_page_t *m, int count)
 2549 {
 2550         int i;
 2551 
 2552         for (i = 0; i < count; i++) {
 2553                 pmap_kenter_internal(va, VM_PAGE_TO_PHYS(m[i]),
 2554                     KENTER_CACHE);
 2555                 va += PAGE_SIZE;
 2556         }
 2557 }
 2558 
 2559 
 2560 /*
 2561  * this routine jerks page mappings from the
 2562  * kernel -- it is meant only for temporary mappings.
 2563  */
 2564 void
 2565 pmap_qremove(vm_offset_t va, int count)
 2566 {
 2567         int i;
 2568 
 2569         for (i = 0; i < count; i++) {
 2570                 if (vtophys(va))
 2571                         pmap_kremove(va);
 2572 
 2573                 va += PAGE_SIZE;
 2574         }
 2575 }
 2576 
 2577 
 2578 /*
 2579  * pmap_object_init_pt preloads the ptes for a given object
 2580  * into the specified pmap.  This eliminates the blast of soft
 2581  * faults on process startup and immediately after an mmap.
 2582  */
 2583 void
 2584 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
 2585     vm_pindex_t pindex, vm_size_t size)
 2586 {
 2587 
 2588         VM_OBJECT_ASSERT_WLOCKED(object);
 2589         KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
 2590             ("pmap_object_init_pt: non-device object"));
 2591 }
 2592 
 2593 
 2594 /*
 2595  *      pmap_is_prefaultable:
 2596  *
 2597  *      Return whether or not the specified virtual address is elgible
 2598  *      for prefault.
 2599  */
 2600 boolean_t
 2601 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
 2602 {
 2603         pd_entry_t *pdep;
 2604         pt_entry_t *ptep;
 2605 
 2606         if (!pmap_get_pde_pte(pmap, addr, &pdep, &ptep))
 2607                 return (FALSE);
 2608         KASSERT((pdep != NULL && (l1pte_section_p(*pdep) || ptep != NULL)),
 2609             ("Valid mapping but no pte ?"));
 2610         if (*pdep != 0 && !l1pte_section_p(*pdep))
 2611                 if (*ptep == 0)
 2612                         return (TRUE);
 2613         return (FALSE);
 2614 }
 2615 
 2616 /*
 2617  * Fetch pointers to the PDE/PTE for the given pmap/VA pair.
 2618  * Returns TRUE if the mapping exists, else FALSE.
 2619  *
 2620  * NOTE: This function is only used by a couple of arm-specific modules.
 2621  * It is not safe to take any pmap locks here, since we could be right
 2622  * in the middle of debugging the pmap anyway...
 2623  *
 2624  * It is possible for this routine to return FALSE even though a valid
 2625  * mapping does exist. This is because we don't lock, so the metadata
 2626  * state may be inconsistent.
 2627  *
 2628  * NOTE: We can return a NULL *ptp in the case where the L1 pde is
 2629  * a "section" mapping.
 2630  */
 2631 boolean_t
 2632 pmap_get_pde_pte(pmap_t pmap, vm_offset_t va, pd_entry_t **pdp,
 2633     pt_entry_t **ptp)
 2634 {
 2635         struct l2_dtable *l2;
 2636         pd_entry_t *pl1pd, l1pd;
 2637         pt_entry_t *ptep;
 2638         u_short l1idx;
 2639 
 2640         if (pmap->pm_l1 == NULL)
 2641                 return (FALSE);
 2642 
 2643         l1idx = L1_IDX(va);
 2644         *pdp = pl1pd = &pmap->pm_l1->l1_kva[l1idx];
 2645         l1pd = *pl1pd;
 2646 
 2647         if (l1pte_section_p(l1pd)) {
 2648                 *ptp = NULL;
 2649                 return (TRUE);
 2650         }
 2651 
 2652         if (pmap->pm_l2 == NULL)
 2653                 return (FALSE);
 2654 
 2655         l2 = pmap->pm_l2[L2_IDX(l1idx)];
 2656 
 2657         if (l2 == NULL ||
 2658             (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) {
 2659                 return (FALSE);
 2660         }
 2661 
 2662         *ptp = &ptep[l2pte_index(va)];
 2663         return (TRUE);
 2664 }
 2665 
 2666 /*
 2667  *      Routine:        pmap_remove_all
 2668  *      Function:
 2669  *              Removes this physical page from
 2670  *              all physical maps in which it resides.
 2671  *              Reflects back modify bits to the pager.
 2672  *
 2673  *      Notes:
 2674  *              Original versions of this routine were very
 2675  *              inefficient because they iteratively called
 2676  *              pmap_remove (slow...)
 2677  */
 2678 void
 2679 pmap_remove_all(vm_page_t m)
 2680 {
 2681         struct md_page *pvh;
 2682         pv_entry_t pv;
 2683         pmap_t pmap;
 2684         pt_entry_t *ptep;
 2685         struct l2_bucket *l2b;
 2686         boolean_t flush = FALSE;
 2687         pmap_t curpmap;
 2688         u_int is_exec = 0;
 2689 
 2690         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 2691             ("pmap_remove_all: page %p is not managed", m));
 2692         rw_wlock(&pvh_global_lock);
 2693         if ((m->flags & PG_FICTITIOUS) != 0)
 2694                 goto small_mappings;
 2695         pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 2696         while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) {
 2697                 pmap = PV_PMAP(pv);
 2698                 PMAP_LOCK(pmap);
 2699                 pd_entry_t *pl1pd;
 2700                 pl1pd = &pmap->pm_l1->l1_kva[L1_IDX(pv->pv_va)];
 2701                 KASSERT((*pl1pd & L1_TYPE_MASK) == L1_S_PROTO,
 2702                     ("pmap_remove_all: valid section mapping expected"));
 2703                 (void)pmap_demote_section(pmap, pv->pv_va);
 2704                 PMAP_UNLOCK(pmap);
 2705         }
 2706 small_mappings:
 2707         curpmap = vmspace_pmap(curproc->p_vmspace);
 2708         while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
 2709                 pmap = PV_PMAP(pv);
 2710                 if (flush == FALSE && (pmap == curpmap ||
 2711                     pmap == pmap_kernel()))
 2712                         flush = TRUE;
 2713 
 2714                 PMAP_LOCK(pmap);
 2715                 l2b = pmap_get_l2_bucket(pmap, pv->pv_va);
 2716                 KASSERT(l2b != NULL, ("No l2 bucket"));
 2717                 ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
 2718                 is_exec |= PTE_BEEN_EXECD(*ptep);
 2719                 *ptep = 0;
 2720                 if (pmap_is_current(pmap))
 2721                         PTE_SYNC(ptep);
 2722                 pmap_free_l2_bucket(pmap, l2b, 1);
 2723                 pmap->pm_stats.resident_count--;
 2724                 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
 2725                 if (pv->pv_flags & PVF_WIRED)
 2726                         pmap->pm_stats.wired_count--;
 2727                 pmap_free_pv_entry(pmap, pv);
 2728                 PMAP_UNLOCK(pmap);
 2729         }
 2730 
 2731         if (flush) {
 2732                 if (is_exec)
 2733                         cpu_tlb_flushID();
 2734                 else
 2735                         cpu_tlb_flushD();
 2736                 cpu_cpwait();
 2737         }
 2738         vm_page_aflag_clear(m, PGA_WRITEABLE);
 2739         rw_wunlock(&pvh_global_lock);
 2740 }
 2741 
 2742 int
 2743 pmap_change_attr(vm_offset_t sva, vm_size_t len, int mode)
 2744 {
 2745         vm_offset_t base, offset, tmpva;
 2746         vm_size_t size;
 2747         struct l2_bucket *l2b;
 2748         pt_entry_t *ptep, pte;
 2749         vm_offset_t next_bucket;
 2750 
 2751         PMAP_LOCK(kernel_pmap);
 2752 
 2753         base = trunc_page(sva);
 2754         offset = sva & PAGE_MASK;
 2755         size = roundup(offset + len, PAGE_SIZE);
 2756 
 2757 #ifdef checkit
 2758         /*
 2759          * Only supported on kernel virtual addresses, including the direct
 2760          * map but excluding the recursive map.
 2761          */
 2762         if (base < DMAP_MIN_ADDRESS) {
 2763                 PMAP_UNLOCK(kernel_pmap);
 2764                 return (EINVAL);
 2765         }
 2766 #endif
 2767         for (tmpva = base; tmpva < base + size; ) {
 2768                 next_bucket = L2_NEXT_BUCKET(tmpva);
 2769                 if (next_bucket > base + size)
 2770                         next_bucket = base + size;
 2771 
 2772                 l2b = pmap_get_l2_bucket(kernel_pmap, tmpva);
 2773                 if (l2b == NULL) {
 2774                         tmpva = next_bucket;
 2775                         continue;
 2776                 }
 2777 
 2778                 ptep = &l2b->l2b_kva[l2pte_index(tmpva)];
 2779 
 2780                 if (*ptep == 0) {
 2781                         PMAP_UNLOCK(kernel_pmap);
 2782                         return(EINVAL);
 2783                 }
 2784 
 2785                 pte = *ptep &~ L2_S_CACHE_MASK;
 2786                 cpu_idcache_wbinv_range(tmpva, PAGE_SIZE);
 2787                 pmap_l2cache_wbinv_range(tmpva, pte & L2_S_FRAME, PAGE_SIZE);
 2788                 *ptep = pte;
 2789                 cpu_tlb_flushID_SE(tmpva);
 2790                 cpu_cpwait();
 2791 
 2792                 dprintf("%s: for va:%x ptep:%x pte:%x\n",
 2793                     __func__, tmpva, (uint32_t)ptep, pte);
 2794                 tmpva += PAGE_SIZE;
 2795         }
 2796 
 2797         PMAP_UNLOCK(kernel_pmap);
 2798 
 2799         return (0);
 2800 }
 2801 
 2802 /*
 2803  *      Set the physical protection on the
 2804  *      specified range of this map as requested.
 2805  */
 2806 void
 2807 pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
 2808 {
 2809         struct l2_bucket *l2b;
 2810         struct md_page *pvh;
 2811         struct pv_entry *pve;
 2812         pd_entry_t *pl1pd, l1pd;
 2813         pt_entry_t *ptep, pte;
 2814         vm_offset_t next_bucket;
 2815         u_int is_exec, is_refd;
 2816         int flush;
 2817 
 2818         if ((prot & VM_PROT_READ) == 0) {
 2819                 pmap_remove(pmap, sva, eva);
 2820                 return;
 2821         }
 2822 
 2823         if (prot & VM_PROT_WRITE) {
 2824                 /*
 2825                  * If this is a read->write transition, just ignore it and let
 2826                  * vm_fault() take care of it later.
 2827                  */
 2828                 return;
 2829         }
 2830 
 2831         rw_wlock(&pvh_global_lock);
 2832         PMAP_LOCK(pmap);
 2833 
 2834         /*
 2835          * OK, at this point, we know we're doing write-protect operation.
 2836          * If the pmap is active, write-back the range.
 2837          */
 2838 
 2839         flush = ((eva - sva) >= (PAGE_SIZE * 4)) ? 0 : -1;
 2840         is_exec = is_refd = 0;
 2841 
 2842         while (sva < eva) {
 2843                 next_bucket = L2_NEXT_BUCKET(sva);
 2844                 /*
 2845                  * Check for large page.
 2846                  */
 2847                 pl1pd = &pmap->pm_l1->l1_kva[L1_IDX(sva)];
 2848                 l1pd = *pl1pd;
 2849                 if ((l1pd & L1_TYPE_MASK) == L1_S_PROTO) {
 2850                         KASSERT(pmap != pmap_kernel(),
 2851                             ("pmap_protect: trying to modify "
 2852                             "kernel section protections"));
 2853                         /*
 2854                          * Are we protecting the entire large page? If not,
 2855                          * demote the mapping and fall through.
 2856                          */
 2857                         if (sva + L1_S_SIZE == L2_NEXT_BUCKET(sva) &&
 2858                             eva >= L2_NEXT_BUCKET(sva)) {
 2859                                 l1pd &= ~(L1_S_PROT_MASK | L1_S_XN);
 2860                                 if (!(prot & VM_PROT_EXECUTE))
 2861                                         *pl1pd |= L1_S_XN;
 2862                                 /*
 2863                                  * At this point we are always setting
 2864                                  * write-protect bit.
 2865                                  */
 2866                                 l1pd |= L1_S_APX;
 2867                                 /* All managed superpages are user pages. */
 2868                                 l1pd |= L1_S_PROT_U;
 2869                                 *pl1pd = l1pd;
 2870                                 PTE_SYNC(pl1pd);
 2871                                 pvh = pa_to_pvh(l1pd & L1_S_FRAME);
 2872                                 pve = pmap_find_pv(pvh, pmap,
 2873                                     trunc_1mpage(sva));
 2874                                 pve->pv_flags &= ~PVF_WRITE;
 2875                                 sva = next_bucket;
 2876                                 continue;
 2877                         } else if (!pmap_demote_section(pmap, sva)) {
 2878                                 /* The large page mapping was destroyed. */
 2879                                 sva = next_bucket;
 2880                                 continue;
 2881                         }
 2882                 }
 2883                 if (next_bucket > eva)
 2884                         next_bucket = eva;
 2885                 l2b = pmap_get_l2_bucket(pmap, sva);
 2886                 if (l2b == NULL) {
 2887                         sva = next_bucket;
 2888                         continue;
 2889                 }
 2890 
 2891                 ptep = &l2b->l2b_kva[l2pte_index(sva)];
 2892 
 2893                 while (sva < next_bucket) {
 2894                         if ((pte = *ptep) != 0 && L2_S_WRITABLE(pte)) {
 2895                                 struct vm_page *m;
 2896 
 2897                                 m = PHYS_TO_VM_PAGE(l2pte_pa(pte));
 2898                                 pmap_set_prot(ptep, prot,
 2899                                     !(pmap == pmap_kernel()));
 2900                                 PTE_SYNC(ptep);
 2901 
 2902                                 pmap_modify_pv(m, pmap, sva, PVF_WRITE, 0);
 2903 
 2904                                 if (flush >= 0) {
 2905                                         flush++;
 2906                                         is_exec |= PTE_BEEN_EXECD(pte);
 2907                                         is_refd |= PTE_BEEN_REFD(pte);
 2908                                 } else {
 2909                                         if (PTE_BEEN_EXECD(pte))
 2910                                                 cpu_tlb_flushID_SE(sva);
 2911                                         else if (PTE_BEEN_REFD(pte))
 2912                                                 cpu_tlb_flushD_SE(sva);
 2913                                 }
 2914                         }
 2915 
 2916                         sva += PAGE_SIZE;
 2917                         ptep++;
 2918                 }
 2919         }
 2920 
 2921 
 2922         if (flush) {
 2923                 if (is_exec)
 2924                         cpu_tlb_flushID();
 2925                 else
 2926                 if (is_refd)
 2927                         cpu_tlb_flushD();
 2928                 cpu_cpwait();
 2929         }
 2930         rw_wunlock(&pvh_global_lock);
 2931 
 2932         PMAP_UNLOCK(pmap);
 2933 }
 2934 
 2935 
 2936 /*
 2937  *      Insert the given physical page (p) at
 2938  *      the specified virtual address (v) in the
 2939  *      target physical map with the protection requested.
 2940  *
 2941  *      If specified, the page will be wired down, meaning
 2942  *      that the related pte can not be reclaimed.
 2943  *
 2944  *      NB:  This is the only routine which MAY NOT lazy-evaluate
 2945  *      or lose information.  That is, this routine must actually
 2946  *      insert this page into the given map NOW.
 2947  */
 2948 
 2949 int
 2950 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
 2951     u_int flags, int8_t psind __unused)
 2952 {
 2953         struct l2_bucket *l2b;
 2954         int rv;
 2955 
 2956         rw_wlock(&pvh_global_lock);
 2957         PMAP_LOCK(pmap);
 2958         rv = pmap_enter_locked(pmap, va, m, prot, flags);
 2959         if (rv == KERN_SUCCESS) {
 2960                 /*
 2961                  * If both the l2b_occupancy and the reservation are fully
 2962                  * populated, then attempt promotion.
 2963                  */
 2964                 l2b = pmap_get_l2_bucket(pmap, va);
 2965                 if (l2b != NULL && l2b->l2b_occupancy == L2_PTE_NUM_TOTAL &&
 2966                     sp_enabled && (m->flags & PG_FICTITIOUS) == 0 &&
 2967                     vm_reserv_level_iffullpop(m) == 0)
 2968                         pmap_promote_section(pmap, va);
 2969         }
 2970         PMAP_UNLOCK(pmap);
 2971         rw_wunlock(&pvh_global_lock);
 2972         return (rv);
 2973 }
 2974 
 2975 /*
 2976  *      The pvh global and pmap locks must be held.
 2977  */
 2978 static int
 2979 pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
 2980     u_int flags)
 2981 {
 2982         struct l2_bucket *l2b = NULL;
 2983         struct vm_page *om;
 2984         struct pv_entry *pve = NULL;
 2985         pd_entry_t *pl1pd, l1pd;
 2986         pt_entry_t *ptep, npte, opte;
 2987         u_int nflags;
 2988         u_int is_exec, is_refd;
 2989         vm_paddr_t pa;
 2990         u_char user;
 2991 
 2992         PMAP_ASSERT_LOCKED(pmap);
 2993         rw_assert(&pvh_global_lock, RA_WLOCKED);
 2994         if (va == vector_page) {
 2995                 pa = systempage.pv_pa;
 2996                 m = NULL;
 2997         } else {
 2998                 if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
 2999                         VM_OBJECT_ASSERT_LOCKED(m->object);
 3000                 pa = VM_PAGE_TO_PHYS(m);
 3001         }
 3002 
 3003         pl1pd = &pmap->pm_l1->l1_kva[L1_IDX(va)];
 3004         if ((va < VM_MAXUSER_ADDRESS) &&
 3005             (*pl1pd & L1_TYPE_MASK) == L1_S_PROTO) {
 3006                 (void)pmap_demote_section(pmap, va);
 3007         }
 3008 
 3009         user = 0;
 3010         /*
 3011          * Make sure userland mappings get the right permissions
 3012          */
 3013         if (pmap != pmap_kernel() && va != vector_page)
 3014                 user = 1;
 3015 
 3016         nflags = 0;
 3017 
 3018         if (prot & VM_PROT_WRITE)
 3019                 nflags |= PVF_WRITE;
 3020         if ((flags & PMAP_ENTER_WIRED) != 0)
 3021                 nflags |= PVF_WIRED;
 3022 
 3023         PDEBUG(1, printf("pmap_enter: pmap = %08x, va = %08x, m = %08x, "
 3024             "prot = %x, flags = %x\n", (uint32_t) pmap, va, (uint32_t) m,
 3025             prot, flags));
 3026 
 3027         if (pmap == pmap_kernel()) {
 3028                 l2b = pmap_get_l2_bucket(pmap, va);
 3029                 if (l2b == NULL)
 3030                         l2b = pmap_grow_l2_bucket(pmap, va);
 3031         } else {
 3032 do_l2b_alloc:
 3033                 l2b = pmap_alloc_l2_bucket(pmap, va);
 3034                 if (l2b == NULL) {
 3035                         if ((flags & PMAP_ENTER_NOSLEEP) == 0) {
 3036                                 PMAP_UNLOCK(pmap);
 3037                                 rw_wunlock(&pvh_global_lock);
 3038                                 VM_WAIT;
 3039                                 rw_wlock(&pvh_global_lock);
 3040                                 PMAP_LOCK(pmap);
 3041                                 goto do_l2b_alloc;
 3042                         }
 3043                         return (KERN_RESOURCE_SHORTAGE);
 3044                 }
 3045         }
 3046 
 3047         pl1pd = &pmap->pm_l1->l1_kva[L1_IDX(va)];
 3048         if ((*pl1pd & L1_TYPE_MASK) == L1_S_PROTO)
 3049                 panic("pmap_enter: attempt to enter on 1MB page, va: %#x", va);
 3050 
 3051         ptep = &l2b->l2b_kva[l2pte_index(va)];
 3052 
 3053         opte = *ptep;
 3054         npte = pa;
 3055         is_exec = is_refd = 0;
 3056 
 3057         if (opte) {
 3058                 if (l2pte_pa(opte) == pa) {
 3059                         /*
 3060                          * We're changing the attrs of an existing mapping.
 3061                          */
 3062                         if (m != NULL)
 3063                                 pmap_modify_pv(m, pmap, va,
 3064                                     PVF_WRITE | PVF_WIRED, nflags);
 3065                         is_exec |= PTE_BEEN_EXECD(opte);
 3066                         is_refd |= PTE_BEEN_REFD(opte);
 3067                         goto validate;
 3068                 }
 3069                 if ((om = PHYS_TO_VM_PAGE(l2pte_pa(opte)))) {
 3070                         /*
 3071                          * Replacing an existing mapping with a new one.
 3072                          * It is part of our managed memory so we
 3073                          * must remove it from the PV list
 3074                          */
 3075                         if ((pve = pmap_remove_pv(om, pmap, va))) {
 3076                                 is_exec |= PTE_BEEN_EXECD(opte);
 3077                                 is_refd |= PTE_BEEN_REFD(opte);
 3078                 
 3079                                 if (m && ((m->oflags & VPO_UNMANAGED)))
 3080                                         pmap_free_pv_entry(pmap, pve);
 3081                         }
 3082                 }
 3083 
 3084         } else {
 3085                 /*
 3086                  * Keep the stats up to date
 3087                  */
 3088                 l2b->l2b_occupancy++;
 3089                 pmap->pm_stats.resident_count++;
 3090         }
 3091 
 3092         /*
 3093          * Enter on the PV list if part of our managed memory.
 3094          */
 3095         if ((m && !(m->oflags & VPO_UNMANAGED))) {
 3096                 if ((!pve) && (pve = pmap_get_pv_entry(pmap, FALSE)) == NULL)
 3097                         panic("pmap_enter: no pv entries");
 3098 
 3099                 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva,
 3100                 ("pmap_enter: managed mapping within the clean submap"));
 3101                 KASSERT(pve != NULL, ("No pv"));
 3102                 pmap_enter_pv(m, pve, pmap, va, nflags);
 3103         }
 3104 
 3105 validate:
 3106         /* Make the new PTE valid */
 3107         npte |= L2_S_PROTO;
 3108 #ifdef SMP
 3109         npte |= L2_SHARED;
 3110 #endif
 3111         /* Set defaults first - kernel read access */
 3112         npte |= L2_APX;
 3113         npte |= L2_S_PROT_R;
 3114         /* Set "referenced" flag */
 3115         npte |= L2_S_REF;
 3116 
 3117         /* Now tune APs as desired */
 3118         if (user)
 3119                 npte |= L2_S_PROT_U;
 3120         /*
 3121          * If this is not a vector_page
 3122          * then continue setting mapping parameters
 3123          */
 3124         if (m != NULL) {
 3125                 if ((m->oflags & VPO_UNMANAGED) == 0) {
 3126                         if (prot & (VM_PROT_ALL)) {
 3127                                 vm_page_aflag_set(m, PGA_REFERENCED);
 3128                         } else {
 3129                                 /*
 3130                                  * Need to do page referenced emulation.
 3131                                  */
 3132                                 npte &= ~L2_S_REF;
 3133                         }
 3134                 }
 3135 
 3136                 if (prot & VM_PROT_WRITE) {
 3137                         if ((m->oflags & VPO_UNMANAGED) == 0) {
 3138                                 vm_page_aflag_set(m, PGA_WRITEABLE);
 3139                                 /*
 3140                                  * XXX: Skip modified bit emulation for now.
 3141                                  *      The emulation reveals problems
 3142                                  *      that result in random failures
 3143                                  *      during memory allocation on some
 3144                                  *      platforms.
 3145                                  *      Therefore, the page is marked RW
 3146                                  *      immediately.
 3147                                  */
 3148                                 npte &= ~(L2_APX);
 3149                                 vm_page_dirty(m);
 3150                         } else
 3151                                 npte &= ~(L2_APX);
 3152                 }
 3153                 if (!(prot & VM_PROT_EXECUTE))
 3154                         npte |= L2_XN;
 3155 
 3156                 if (m->md.pv_memattr != VM_MEMATTR_UNCACHEABLE)
 3157                         npte |= pte_l2_s_cache_mode;
 3158         }
 3159 
 3160         CTR5(KTR_PMAP,"enter: pmap:%p va:%x prot:%x pte:%x->%x",
 3161             pmap, va, prot, opte, npte);
 3162         /*
 3163          * If this is just a wiring change, the two PTEs will be
 3164          * identical, so there's no need to update the page table.
 3165          */
 3166         if (npte != opte) {
 3167                 boolean_t is_cached = pmap_is_current(pmap);
 3168 
 3169                 *ptep = npte;
 3170                 PTE_SYNC(ptep);
 3171                 if (is_cached) {
 3172                         /*
 3173                          * We only need to frob the cache/tlb if this pmap
 3174                          * is current
 3175                          */
 3176                         if (L1_IDX(va) != L1_IDX(vector_page) &&
 3177                             l2pte_valid(npte)) {
 3178                                 /*
 3179                                  * This mapping is likely to be accessed as
 3180                                  * soon as we return to userland. Fix up the
 3181                                  * L1 entry to avoid taking another
 3182                                  * page/domain fault.
 3183                                  */
 3184                                 l1pd = l2b->l2b_phys |
 3185                                     L1_C_DOM(pmap->pm_domain) | L1_C_PROTO;
 3186                                 if (*pl1pd != l1pd) {
 3187                                         *pl1pd = l1pd;
 3188                                         PTE_SYNC(pl1pd);
 3189                                 }
 3190                         }
 3191                 }
 3192 
 3193                 if (is_exec)
 3194                         cpu_tlb_flushID_SE(va);
 3195                 else if (is_refd)
 3196                         cpu_tlb_flushD_SE(va);
 3197                 cpu_cpwait();
 3198         }
 3199 
 3200         if ((pmap != pmap_kernel()) && (pmap == &curproc->p_vmspace->vm_pmap))
 3201                 cpu_icache_sync_range(va, PAGE_SIZE);
 3202         return (KERN_SUCCESS);
 3203 }
 3204 
 3205 /*
 3206  * Maps a sequence of resident pages belonging to the same object.
 3207  * The sequence begins with the given page m_start.  This page is
 3208  * mapped at the given virtual address start.  Each subsequent page is
 3209  * mapped at a virtual address that is offset from start by the same
 3210  * amount as the page is offset from m_start within the object.  The
 3211  * last page in the sequence is the page with the largest offset from
 3212  * m_start that can be mapped at a virtual address less than the given
 3213  * virtual address end.  Not every virtual page between start and end
 3214  * is mapped; only those for which a resident page exists with the
 3215  * corresponding offset from m_start are mapped.
 3216  */
 3217 void
 3218 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
 3219     vm_page_t m_start, vm_prot_t prot)
 3220 {
 3221         vm_offset_t va;
 3222         vm_page_t m;
 3223         vm_pindex_t diff, psize;
 3224 
 3225         VM_OBJECT_ASSERT_LOCKED(m_start->object);
 3226 
 3227         psize = atop(end - start);
 3228         m = m_start;
 3229         prot &= VM_PROT_READ | VM_PROT_EXECUTE;
 3230         rw_wlock(&pvh_global_lock);
 3231         PMAP_LOCK(pmap);
 3232         while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
 3233                 va = start + ptoa(diff);
 3234                 if ((va & L1_S_OFFSET) == 0 && L2_NEXT_BUCKET(va) <= end &&
 3235                     m->psind == 1 && sp_enabled &&
 3236                     pmap_enter_section(pmap, va, m, prot))
 3237                         m = &m[L1_S_SIZE / PAGE_SIZE - 1];
 3238                 else
 3239                         pmap_enter_locked(pmap, va, m, prot,
 3240                             PMAP_ENTER_NOSLEEP);
 3241                 m = TAILQ_NEXT(m, listq);
 3242         }
 3243         PMAP_UNLOCK(pmap);
 3244         rw_wunlock(&pvh_global_lock);
 3245 }
 3246 
 3247 /*
 3248  * this code makes some *MAJOR* assumptions:
 3249  * 1. Current pmap & pmap exists.
 3250  * 2. Not wired.
 3251  * 3. Read access.
 3252  * 4. No page table pages.
 3253  * but is *MUCH* faster than pmap_enter...
 3254  */
 3255 
 3256 void
 3257 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
 3258 {
 3259 
 3260         prot &= VM_PROT_READ | VM_PROT_EXECUTE;
 3261         rw_wlock(&pvh_global_lock);
 3262         PMAP_LOCK(pmap);
 3263         pmap_enter_locked(pmap, va, m, prot, PMAP_ENTER_NOSLEEP);
 3264         PMAP_UNLOCK(pmap);
 3265         rw_wunlock(&pvh_global_lock);
 3266 }
 3267 
 3268 /*
 3269  *      Clear the wired attribute from the mappings for the specified range of
 3270  *      addresses in the given pmap.  Every valid mapping within that range
 3271  *      must have the wired attribute set.  In contrast, invalid mappings
 3272  *      cannot have the wired attribute set, so they are ignored.
 3273  *
 3274  *      XXX Wired mappings of unmanaged pages cannot be counted by this pmap
 3275  *      implementation.
 3276  */
 3277 void
 3278 pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
 3279 {
 3280         struct l2_bucket *l2b;
 3281         struct md_page *pvh;
 3282         pd_entry_t l1pd;
 3283         pt_entry_t *ptep, pte;
 3284         pv_entry_t pv;
 3285         vm_offset_t next_bucket;
 3286         vm_paddr_t pa;
 3287         vm_page_t m;
 3288  
 3289         rw_wlock(&pvh_global_lock);
 3290         PMAP_LOCK(pmap);
 3291         while (sva < eva) {
 3292                 next_bucket = L2_NEXT_BUCKET(sva);
 3293                 l1pd = pmap->pm_l1->l1_kva[L1_IDX(sva)];
 3294                 if ((l1pd & L1_TYPE_MASK) == L1_S_PROTO) {
 3295                         pa = l1pd & L1_S_FRAME;
 3296                         m = PHYS_TO_VM_PAGE(pa);
 3297                         KASSERT(m != NULL && (m->oflags & VPO_UNMANAGED) == 0,
 3298                             ("pmap_unwire: unmanaged 1mpage %p", m));
 3299                         pvh = pa_to_pvh(pa);
 3300                         pv = pmap_find_pv(pvh, pmap, trunc_1mpage(sva));
 3301                         if ((pv->pv_flags & PVF_WIRED) == 0)
 3302                                 panic("pmap_unwire: pv %p isn't wired", pv);
 3303 
 3304                         /*
 3305                          * Are we unwiring the entire large page? If not,
 3306                          * demote the mapping and fall through.
 3307                          */
 3308                         if (sva + L1_S_SIZE == next_bucket &&
 3309                             eva >= next_bucket) {
 3310                                 pv->pv_flags &= ~PVF_WIRED;
 3311                                 pmap->pm_stats.wired_count -= L2_PTE_NUM_TOTAL;
 3312                                 sva = next_bucket;
 3313                                 continue;
 3314                         } else if (!pmap_demote_section(pmap, sva))
 3315                                 panic("pmap_unwire: demotion failed");
 3316                 }
 3317                 if (next_bucket > eva)
 3318                         next_bucket = eva;
 3319                 l2b = pmap_get_l2_bucket(pmap, sva);
 3320                 if (l2b == NULL) {
 3321                         sva = next_bucket;
 3322                         continue;
 3323                 }
 3324                 for (ptep = &l2b->l2b_kva[l2pte_index(sva)]; sva < next_bucket;
 3325                     sva += PAGE_SIZE, ptep++) {
 3326                         if ((pte = *ptep) == 0 ||
 3327                             (m = PHYS_TO_VM_PAGE(l2pte_pa(pte))) == NULL ||
 3328                             (m->oflags & VPO_UNMANAGED) != 0)
 3329                                 continue;
 3330                         pv = pmap_find_pv(&m->md, pmap, sva);
 3331                         if ((pv->pv_flags & PVF_WIRED) == 0)
 3332                                 panic("pmap_unwire: pv %p isn't wired", pv);
 3333                         pv->pv_flags &= ~PVF_WIRED;
 3334                         pmap->pm_stats.wired_count--;
 3335                 }
 3336         }
 3337         rw_wunlock(&pvh_global_lock);
 3338         PMAP_UNLOCK(pmap);
 3339 }
 3340 
 3341 
 3342 /*
 3343  *      Copy the range specified by src_addr/len
 3344  *      from the source map to the range dst_addr/len
 3345  *      in the destination map.
 3346  *
 3347  *      This routine is only advisory and need not do anything.
 3348  */
 3349 void
 3350 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
 3351     vm_size_t len, vm_offset_t src_addr)
 3352 {
 3353 }
 3354 
 3355 
 3356 /*
 3357  *      Routine:        pmap_extract
 3358  *      Function:
 3359  *              Extract the physical page address associated
 3360  *              with the given map/virtual_address pair.
 3361  */
 3362 vm_paddr_t
 3363 pmap_extract(pmap_t pmap, vm_offset_t va)
 3364 {
 3365         vm_paddr_t pa;
 3366 
 3367         if (kernel_vm_end != 0)
 3368                 PMAP_LOCK(pmap);
 3369         pa = pmap_extract_locked(pmap, va);
 3370         if (kernel_vm_end != 0)
 3371                 PMAP_UNLOCK(pmap);
 3372         return (pa);
 3373 }
 3374 
 3375 static vm_paddr_t
 3376 pmap_extract_locked(pmap_t pmap, vm_offset_t va)
 3377 {
 3378         struct l2_dtable *l2;
 3379         pd_entry_t l1pd;
 3380         pt_entry_t *ptep, pte;
 3381         vm_paddr_t pa;
 3382         u_int l1idx;
 3383 
 3384         if (kernel_vm_end != 0 && pmap != kernel_pmap)
 3385                 PMAP_ASSERT_LOCKED(pmap);
 3386         l1idx = L1_IDX(va);
 3387         l1pd = pmap->pm_l1->l1_kva[l1idx];
 3388         if (l1pte_section_p(l1pd)) {
 3389                 /* XXX: what to do about the bits > 32 ? */
 3390                 if (l1pd & L1_S_SUPERSEC)
 3391                         pa = (l1pd & L1_SUP_FRAME) | (va & L1_SUP_OFFSET);
 3392                 else
 3393                         pa = (l1pd & L1_S_FRAME) | (va & L1_S_OFFSET);
 3394         } else {
 3395                 /*
 3396                  * Note that we can't rely on the validity of the L1
 3397                  * descriptor as an indication that a mapping exists.
 3398                  * We have to look it up in the L2 dtable.
 3399                  */
 3400                 l2 = pmap->pm_l2[L2_IDX(l1idx)];
 3401                 if (l2 == NULL ||
 3402                     (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL)
 3403                         return (0);
 3404                 pte = ptep[l2pte_index(va)];
 3405                 if (pte == 0)
 3406                         return (0);
 3407                 switch (pte & L2_TYPE_MASK) {
 3408                 case L2_TYPE_L:
 3409                         pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET);
 3410                         break;
 3411                 default:
 3412                         pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET);
 3413                         break;
 3414                 }
 3415         }
 3416         return (pa);
 3417 }
 3418 
 3419 /*
 3420  * Atomically extract and hold the physical page with the given
 3421  * pmap and virtual address pair if that mapping permits the given
 3422  * protection.
 3423  *
 3424  */
 3425 vm_page_t
 3426 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
 3427 {
 3428         struct l2_dtable *l2;
 3429         pd_entry_t l1pd;
 3430         pt_entry_t *ptep, pte;
 3431         vm_paddr_t pa, paddr;
 3432         vm_page_t m = NULL;
 3433         u_int l1idx;
 3434         l1idx = L1_IDX(va);
 3435         paddr = 0;
 3436 
 3437         PMAP_LOCK(pmap);
 3438 retry:
 3439         l1pd = pmap->pm_l1->l1_kva[l1idx];
 3440         if (l1pte_section_p(l1pd)) {
 3441                 /* XXX: what to do about the bits > 32 ? */
 3442                 if (l1pd & L1_S_SUPERSEC)
 3443                         pa = (l1pd & L1_SUP_FRAME) | (va & L1_SUP_OFFSET);
 3444                 else
 3445                         pa = (l1pd & L1_S_FRAME) | (va & L1_S_OFFSET);
 3446                 if (vm_page_pa_tryrelock(pmap, pa & PG_FRAME, &paddr))
 3447                         goto retry;
 3448                 if (L1_S_WRITABLE(l1pd) || (prot & VM_PROT_WRITE) == 0) {
 3449                         m = PHYS_TO_VM_PAGE(pa);
 3450                         vm_page_hold(m);
 3451                 }
 3452         } else {
 3453                 /*
 3454                  * Note that we can't rely on the validity of the L1
 3455                  * descriptor as an indication that a mapping exists.
 3456                  * We have to look it up in the L2 dtable.
 3457                  */
 3458                 l2 = pmap->pm_l2[L2_IDX(l1idx)];
 3459 
 3460                 if (l2 == NULL ||
 3461                     (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) {
 3462                         PMAP_UNLOCK(pmap);
 3463                         return (NULL);
 3464                 }
 3465 
 3466                 ptep = &ptep[l2pte_index(va)];
 3467                 pte = *ptep;
 3468 
 3469                 if (pte == 0) {
 3470                         PMAP_UNLOCK(pmap);
 3471                         return (NULL);
 3472                 } else if ((prot & VM_PROT_WRITE) && (pte & L2_APX)) {
 3473                         PMAP_UNLOCK(pmap);
 3474                         return (NULL);
 3475                 } else {
 3476                         switch (pte & L2_TYPE_MASK) {
 3477                         case L2_TYPE_L:
 3478                                 panic("extract and hold section mapping");
 3479                                 break;
 3480                         default:
 3481                                 pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET);
 3482                                 break;
 3483                         }
 3484                         if (vm_page_pa_tryrelock(pmap, pa & PG_FRAME, &paddr))
 3485                                 goto retry;
 3486                         m = PHYS_TO_VM_PAGE(pa);
 3487                         vm_page_hold(m);
 3488                 }
 3489 
 3490         }
 3491 
 3492         PMAP_UNLOCK(pmap);
 3493         PA_UNLOCK_COND(paddr);
 3494         return (m);
 3495 }
 3496 
 3497 /*
 3498  * Initialize a preallocated and zeroed pmap structure,
 3499  * such as one in a vmspace structure.
 3500  */
 3501 
 3502 int
 3503 pmap_pinit(pmap_t pmap)
 3504 {
 3505         PDEBUG(1, printf("pmap_pinit: pmap = %08x\n", (uint32_t) pmap));
 3506 
 3507         pmap_alloc_l1(pmap);
 3508         bzero(pmap->pm_l2, sizeof(pmap->pm_l2));
 3509 
 3510         CPU_ZERO(&pmap->pm_active);
 3511 
 3512         TAILQ_INIT(&pmap->pm_pvchunk);
 3513         bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
 3514         pmap->pm_stats.resident_count = 1;
 3515         if (vector_page < KERNBASE) {
 3516                 pmap_enter(pmap, vector_page,
 3517                     PHYS_TO_VM_PAGE(systempage.pv_pa), VM_PROT_READ,
 3518                     PMAP_ENTER_WIRED, 0);
 3519         }
 3520         return (1);
 3521 }
 3522 
 3523 
 3524 /***************************************************
 3525  * Superpage management routines.
 3526  ***************************************************/
 3527 
 3528 static PMAP_INLINE struct pv_entry *
 3529 pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
 3530 {
 3531         pv_entry_t pv;
 3532 
 3533         rw_assert(&pvh_global_lock, RA_WLOCKED);
 3534 
 3535         pv = pmap_find_pv(pvh, pmap, va);
 3536         if (pv != NULL)
 3537                 TAILQ_REMOVE(&pvh->pv_list, pv, pv_list);
 3538 
 3539         return (pv);
 3540 }
 3541 
 3542 static void
 3543 pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
 3544 {
 3545         pv_entry_t pv;
 3546 
 3547         pv = pmap_pvh_remove(pvh, pmap, va);
 3548         KASSERT(pv != NULL, ("pmap_pvh_free: pv not found"));
 3549         pmap_free_pv_entry(pmap, pv);
 3550 }
 3551 
 3552 static boolean_t
 3553 pmap_pv_insert_section(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
 3554 {
 3555         struct md_page *pvh;
 3556         pv_entry_t pv;
 3557 
 3558         rw_assert(&pvh_global_lock, RA_WLOCKED);
 3559         if (pv_entry_count < pv_entry_high_water && 
 3560             (pv = pmap_get_pv_entry(pmap, TRUE)) != NULL) {
 3561                 pv->pv_va = va;
 3562                 pvh = pa_to_pvh(pa);
 3563                 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_list);
 3564                 return (TRUE);
 3565         } else
 3566                 return (FALSE);
 3567 }
 3568 
 3569 /*
 3570  * Create the pv entries for each of the pages within a superpage.
 3571  */
 3572 static void
 3573 pmap_pv_demote_section(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
 3574 {
 3575         struct md_page *pvh;
 3576         pv_entry_t pve, pv;
 3577         vm_offset_t va_last;
 3578         vm_page_t m;
 3579 
 3580         rw_assert(&pvh_global_lock, RA_WLOCKED);
 3581         KASSERT((pa & L1_S_OFFSET) == 0,
 3582             ("pmap_pv_demote_section: pa is not 1mpage aligned"));
 3583 
 3584         /*
 3585          * Transfer the 1mpage's pv entry for this mapping to the first
 3586          * page's pv list.
 3587          */
 3588         pvh = pa_to_pvh(pa);
 3589         va = trunc_1mpage(va);
 3590         pv = pmap_pvh_remove(pvh, pmap, va);
 3591         KASSERT(pv != NULL, ("pmap_pv_demote_section: pv not found"));
 3592         m = PHYS_TO_VM_PAGE(pa);
 3593         TAILQ_INSERT_HEAD(&m->md.pv_list, pv, pv_list);
 3594         /* Instantiate the remaining pv entries. */
 3595         va_last = L2_NEXT_BUCKET(va) - PAGE_SIZE;
 3596         do {
 3597                 m++;
 3598                 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 3599                     ("pmap_pv_demote_section: page %p is not managed", m));
 3600                 va += PAGE_SIZE;
 3601                 pve = pmap_get_pv_entry(pmap, FALSE);
 3602                 pmap_enter_pv(m, pve, pmap, va, pv->pv_flags);
 3603         } while (va < va_last);
 3604 }
 3605 
 3606 static void
 3607 pmap_pv_promote_section(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
 3608 {
 3609         struct md_page *pvh;
 3610         pv_entry_t pv;
 3611         vm_offset_t va_last;
 3612         vm_page_t m;
 3613 
 3614         rw_assert(&pvh_global_lock, RA_WLOCKED);
 3615         KASSERT((pa & L1_S_OFFSET) == 0,
 3616             ("pmap_pv_promote_section: pa is not 1mpage aligned"));
 3617 
 3618         /*
 3619          * Transfer the first page's pv entry for this mapping to the
 3620          * 1mpage's pv list.  Aside from avoiding the cost of a call
 3621          * to get_pv_entry(), a transfer avoids the possibility that
 3622          * get_pv_entry() calls pmap_pv_reclaim() and that pmap_pv_reclaim()
 3623          * removes one of the mappings that is being promoted.
 3624          */
 3625         m = PHYS_TO_VM_PAGE(pa);
 3626         va = trunc_1mpage(va);
 3627         pv = pmap_pvh_remove(&m->md, pmap, va);
 3628         KASSERT(pv != NULL, ("pmap_pv_promote_section: pv not found"));
 3629         pvh = pa_to_pvh(pa);
 3630         TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_list);
 3631         /* Free the remaining pv entries in the newly mapped section pages */
 3632         va_last = L2_NEXT_BUCKET(va) - PAGE_SIZE;
 3633         do {
 3634                 m++;
 3635                 va += PAGE_SIZE;
 3636                 /*
 3637                  * Don't care the flags, first pv contains sufficient
 3638                  * information for all of the pages so nothing is really lost.
 3639                  */
 3640                 pmap_pvh_free(&m->md, pmap, va);
 3641         } while (va < va_last);
 3642 }
 3643 
 3644 /*
 3645  * Tries to create a 1MB page mapping.  Returns TRUE if successful and
 3646  * FALSE otherwise.  Fails if (1) page is unmanageg, kernel pmap or vectors
 3647  * page, (2) a mapping already exists at the specified virtual address, or
 3648  * (3) a pv entry cannot be allocated without reclaiming another pv entry. 
 3649  */
 3650 static boolean_t
 3651 pmap_enter_section(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
 3652 {
 3653         pd_entry_t *pl1pd;
 3654         vm_offset_t pa;
 3655         struct l2_bucket *l2b;
 3656 
 3657         rw_assert(&pvh_global_lock, RA_WLOCKED);
 3658         PMAP_ASSERT_LOCKED(pmap);
 3659 
 3660         /* Skip kernel, vectors page and unmanaged mappings */
 3661         if ((pmap == pmap_kernel()) || (L1_IDX(va) == L1_IDX(vector_page)) ||
 3662             ((m->oflags & VPO_UNMANAGED) != 0)) {
 3663                 CTR2(KTR_PMAP, "pmap_enter_section: failure for va %#lx"
 3664                     " in pmap %p", va, pmap);
 3665                 return (FALSE);
 3666         }
 3667         /*
 3668          * Check whether this is a valid section superpage entry or
 3669          * there is a l2_bucket associated with that L1 page directory.
 3670          */
 3671         va = trunc_1mpage(va);
 3672         pl1pd = &pmap->pm_l1->l1_kva[L1_IDX(va)];
 3673         l2b = pmap_get_l2_bucket(pmap, va);
 3674         if ((*pl1pd & L1_S_PROTO) || (l2b != NULL)) {
 3675                 CTR2(KTR_PMAP, "pmap_enter_section: failure for va %#lx"
 3676                     " in pmap %p", va, pmap);
 3677                 return (FALSE);
 3678         }
 3679         pa = VM_PAGE_TO_PHYS(m); 
 3680         /*
 3681          * Abort this mapping if its PV entry could not be created.
 3682          */
 3683         if (!pmap_pv_insert_section(pmap, va, VM_PAGE_TO_PHYS(m))) {
 3684                 CTR2(KTR_PMAP, "pmap_enter_section: failure for va %#lx"
 3685                     " in pmap %p", va, pmap);
 3686                 return (FALSE);
 3687         }
 3688         /*
 3689          * Increment counters.
 3690          */
 3691         pmap->pm_stats.resident_count += L2_PTE_NUM_TOTAL;
 3692         /*
 3693          * Despite permissions, mark the superpage read-only.
 3694          */
 3695         prot &= ~VM_PROT_WRITE;
 3696         /*
 3697          * Map the superpage.
 3698          */
 3699         pmap_map_section(pmap, va, pa, prot, FALSE);
 3700 
 3701         pmap_section_mappings++;
 3702         CTR2(KTR_PMAP, "pmap_enter_section: success for va %#lx"
 3703             " in pmap %p", va, pmap);
 3704         return (TRUE);
 3705 }
 3706 
 3707 /*
 3708  * pmap_remove_section: do the things to unmap a superpage in a process
 3709  */
 3710 static void
 3711 pmap_remove_section(pmap_t pmap, vm_offset_t sva)
 3712 {
 3713         struct md_page *pvh;
 3714         struct l2_bucket *l2b;
 3715         pd_entry_t *pl1pd, l1pd;
 3716         vm_offset_t eva, va;
 3717         vm_page_t m;
 3718 
 3719         PMAP_ASSERT_LOCKED(pmap);
 3720         if ((pmap == pmap_kernel()) || (L1_IDX(sva) == L1_IDX(vector_page)))
 3721                 return;
 3722 
 3723         KASSERT((sva & L1_S_OFFSET) == 0,
 3724             ("pmap_remove_section: sva is not 1mpage aligned"));
 3725 
 3726         pl1pd = &pmap->pm_l1->l1_kva[L1_IDX(sva)];
 3727         l1pd = *pl1pd;
 3728 
 3729         m = PHYS_TO_VM_PAGE(l1pd & L1_S_FRAME);
 3730         KASSERT((m != NULL && ((m->oflags & VPO_UNMANAGED) == 0)),
 3731             ("pmap_remove_section: no corresponding vm_page or "
 3732             "page unmanaged"));
 3733 
 3734         pmap->pm_stats.resident_count -= L2_PTE_NUM_TOTAL;
 3735         pvh = pa_to_pvh(l1pd & L1_S_FRAME);
 3736         pmap_pvh_free(pvh, pmap, sva);
 3737         eva = L2_NEXT_BUCKET(sva);
 3738         for (va = sva, m = PHYS_TO_VM_PAGE(l1pd & L1_S_FRAME);
 3739             va < eva; va += PAGE_SIZE, m++) {
 3740                 /*
 3741                  * Mark base pages referenced but skip marking them dirty.
 3742                  * If the superpage is writeable, hence all base pages were
 3743                  * already marked as dirty in pmap_fault_fixup() before
 3744                  * promotion. Reference bit however, might not have been set
 3745                  * for each base page when the superpage was created at once,
 3746                  * not as a result of promotion.
 3747                  */
 3748                 if (L1_S_REFERENCED(l1pd))
 3749                         vm_page_aflag_set(m, PGA_REFERENCED);
 3750                 if (TAILQ_EMPTY(&m->md.pv_list) &&
 3751                     TAILQ_EMPTY(&pvh->pv_list))
 3752                         vm_page_aflag_clear(m, PGA_WRITEABLE);
 3753         }
 3754         
 3755         l2b = pmap_get_l2_bucket(pmap, sva);
 3756         if (l2b != NULL) {
 3757                 KASSERT(l2b->l2b_occupancy == L2_PTE_NUM_TOTAL,
 3758                     ("pmap_remove_section: l2_bucket occupancy error"));
 3759                 pmap_free_l2_bucket(pmap, l2b, L2_PTE_NUM_TOTAL);
 3760         }
 3761         /* Now invalidate L1 slot */
 3762         *pl1pd = 0;
 3763         PTE_SYNC(pl1pd);
 3764         if (L1_S_EXECUTABLE(l1pd))
 3765                 cpu_tlb_flushID_SE(sva);
 3766         else
 3767                 cpu_tlb_flushD_SE(sva);
 3768         cpu_cpwait();
 3769 }
 3770 
 3771 /*
 3772  * Tries to promote the 256, contiguous 4KB page mappings that are
 3773  * within a single l2_bucket to a single 1MB section mapping.
 3774  * For promotion to occur, two conditions must be met: (1) the 4KB page
 3775  * mappings must map aligned, contiguous physical memory and (2) the 4KB page
 3776  * mappings must have identical characteristics.
 3777  */
 3778 static void
 3779 pmap_promote_section(pmap_t pmap, vm_offset_t va)
 3780 {
 3781         pt_entry_t *firstptep, firstpte, oldpte, pa, *pte;
 3782         vm_page_t m, oldm;
 3783         vm_offset_t first_va, old_va;
 3784         struct l2_bucket *l2b = NULL;
 3785         vm_prot_t prot;
 3786         struct pv_entry *pve, *first_pve;
 3787 
 3788         PMAP_ASSERT_LOCKED(pmap);
 3789 
 3790         prot = VM_PROT_ALL;
 3791         /*
 3792          * Skip promoting kernel pages. This is justified by following:
 3793          * 1. Kernel is already mapped using section mappings in each pmap
 3794          * 2. Managed mappings within the kernel are not to be promoted anyway
 3795          */
 3796         if (pmap == pmap_kernel()) {
 3797                 pmap_section_p_failures++;
 3798                 CTR2(KTR_PMAP, "pmap_promote_section: failure for va %#x"
 3799                     " in pmap %p", va, pmap);
 3800                 return;
 3801         }
 3802         /* Do not attemp to promote vectors pages */
 3803         if (L1_IDX(va) == L1_IDX(vector_page)) {
 3804                 pmap_section_p_failures++;
 3805                 CTR2(KTR_PMAP, "pmap_promote_section: failure for va %#x"
 3806                     " in pmap %p", va, pmap);
 3807                 return;
 3808         }
 3809         /*
 3810          * Examine the first PTE in the specified l2_bucket. Abort if this PTE
 3811          * is either invalid, unused, or does not map the first 4KB physical
 3812          * page within 1MB page.
 3813          */
 3814         first_va = trunc_1mpage(va);
 3815         l2b = pmap_get_l2_bucket(pmap, first_va);
 3816         KASSERT(l2b != NULL, ("pmap_promote_section: trying to promote "
 3817             "not existing l2 bucket"));
 3818         firstptep = &l2b->l2b_kva[0];
 3819 
 3820         firstpte = *firstptep;
 3821         if ((l2pte_pa(firstpte) & L1_S_OFFSET) != 0) {
 3822                 pmap_section_p_failures++;
 3823                 CTR2(KTR_PMAP, "pmap_promote_section: failure for va %#x"
 3824                     " in pmap %p", va, pmap);
 3825                 return;
 3826         }
 3827 
 3828         if ((firstpte & (L2_S_PROTO | L2_S_REF)) != (L2_S_PROTO | L2_S_REF)) {
 3829                 pmap_section_p_failures++;
 3830                 CTR2(KTR_PMAP, "pmap_promote_section: failure for va %#x"
 3831                     " in pmap %p", va, pmap);
 3832                 return;
 3833         }
 3834         /*
 3835          * ARM uses pv_entry to mark particular mapping WIRED so don't promote
 3836          * unmanaged pages since it is impossible to determine, whether the
 3837          * page is wired or not if there is no corresponding pv_entry.
 3838          */
 3839         m = PHYS_TO_VM_PAGE(l2pte_pa(firstpte));
 3840         if (m && ((m->oflags & VPO_UNMANAGED) != 0)) {
 3841                 pmap_section_p_failures++;
 3842                 CTR2(KTR_PMAP, "pmap_promote_section: failure for va %#x"
 3843                     " in pmap %p", va, pmap);
 3844                 return;
 3845         }
 3846         first_pve = pmap_find_pv(&m->md, pmap, first_va);
 3847         /*
 3848          * PTE is modified only on write due to modified bit
 3849          * emulation. If the entry is referenced and writable
 3850          * then it is modified and we don't clear write enable.
 3851          * Otherwise, writing is disabled in PTE anyway and
 3852          * we just configure protections for the section mapping
 3853          * that is going to be created.
 3854          */
 3855         if ((first_pve->pv_flags & PVF_WRITE) != 0) {
 3856                 if (!L2_S_WRITABLE(firstpte)) {
 3857                         first_pve->pv_flags &= ~PVF_WRITE;
 3858                         prot &= ~VM_PROT_WRITE;
 3859                 }
 3860         } else
 3861                 prot &= ~VM_PROT_WRITE;
 3862 
 3863         if (!L2_S_EXECUTABLE(firstpte))
 3864                 prot &= ~VM_PROT_EXECUTE;
 3865 
 3866         /* 
 3867          * Examine each of the other PTEs in the specified l2_bucket. 
 3868          * Abort if this PTE maps an unexpected 4KB physical page or
 3869          * does not have identical characteristics to the first PTE.
 3870          */
 3871         pa = l2pte_pa(firstpte) + ((L2_PTE_NUM_TOTAL - 1) * PAGE_SIZE);
 3872         old_va = L2_NEXT_BUCKET(first_va) - PAGE_SIZE;
 3873 
 3874         for (pte = (firstptep + L2_PTE_NUM_TOTAL - 1); pte > firstptep; pte--) {
 3875                 oldpte = *pte;
 3876                 if (l2pte_pa(oldpte) != pa) {
 3877                         pmap_section_p_failures++;
 3878                         CTR2(KTR_PMAP, "pmap_promote_section: failure for "
 3879                             "va %#x in pmap %p", va, pmap);
 3880                         return;
 3881                 }
 3882                 if ((oldpte & L2_S_PROMOTE) != (firstpte & L2_S_PROMOTE)) {
 3883                         pmap_section_p_failures++;
 3884                         CTR2(KTR_PMAP, "pmap_promote_section: failure for "
 3885                             "va %#x in pmap %p", va, pmap);
 3886                         return;
 3887                 }
 3888                 oldm = PHYS_TO_VM_PAGE(l2pte_pa(oldpte));
 3889                 if (oldm && ((oldm->oflags & VPO_UNMANAGED) != 0)) {
 3890                         pmap_section_p_failures++;
 3891                         CTR2(KTR_PMAP, "pmap_promote_section: failure for "
 3892                             "va %#x in pmap %p", va, pmap);
 3893                         return;
 3894                 }
 3895 
 3896                 pve = pmap_find_pv(&oldm->md, pmap, old_va);
 3897                 if (pve == NULL) {
 3898                         pmap_section_p_failures++;
 3899                         CTR2(KTR_PMAP, "pmap_promote_section: failure for "
 3900                             "va %#x old_va  %x - no pve", va, old_va);
 3901                         return;
 3902                 }
 3903 
 3904                 if (!L2_S_WRITABLE(oldpte) && (pve->pv_flags & PVF_WRITE))
 3905                         pve->pv_flags &= ~PVF_WRITE;
 3906                 if (pve->pv_flags != first_pve->pv_flags) {
 3907                         pmap_section_p_failures++;
 3908                         CTR2(KTR_PMAP, "pmap_promote_section: failure for "
 3909                             "va %#x in pmap %p", va, pmap);
 3910                         return;
 3911                 }
 3912 
 3913                 old_va -= PAGE_SIZE;
 3914                 pa -= PAGE_SIZE;
 3915         }
 3916         /*
 3917          * Promote the pv entries.
 3918          */
 3919         pmap_pv_promote_section(pmap, first_va, l2pte_pa(firstpte));
 3920         /*
 3921          * Map the superpage.
 3922          */
 3923         pmap_map_section(pmap, first_va, l2pte_pa(firstpte), prot, TRUE);
 3924         /*
 3925          * Invalidate all possible TLB mappings for small
 3926          * pages within the newly created superpage.
 3927          * Rely on the first PTE's attributes since they
 3928          * have to be consistent across all of the base pages
 3929          * within the superpage. If page is not executable it
 3930          * is at least referenced.
 3931          * The fastest way to do that is to invalidate whole
 3932          * TLB at once instead of executing 256 CP15 TLB
 3933          * invalidations by single entry. TLBs usually maintain
 3934          * several dozen entries so loss of unrelated entries is
 3935          * still a less agresive approach.
 3936          */
 3937         if (L2_S_EXECUTABLE(firstpte))
 3938                 cpu_tlb_flushID();
 3939         else
 3940                 cpu_tlb_flushD();
 3941         cpu_cpwait();
 3942 
 3943         pmap_section_promotions++;
 3944         CTR2(KTR_PMAP, "pmap_promote_section: success for va %#x"
 3945             " in pmap %p", first_va, pmap);
 3946 }
 3947 
 3948 /*
 3949  * Fills a l2_bucket with mappings to consecutive physical pages.
 3950  */
 3951 static void
 3952 pmap_fill_l2b(struct l2_bucket *l2b, pt_entry_t newpte)
 3953 {
 3954         pt_entry_t *ptep;
 3955         int i;
 3956 
 3957         for (i = 0; i < L2_PTE_NUM_TOTAL; i++) {
 3958                 ptep = &l2b->l2b_kva[i];
 3959                 *ptep = newpte;
 3960                 PTE_SYNC(ptep);
 3961 
 3962                 newpte += PAGE_SIZE;
 3963         }
 3964 
 3965         l2b->l2b_occupancy = L2_PTE_NUM_TOTAL;
 3966 }
 3967 
 3968 /*
 3969  * Tries to demote a 1MB section mapping. If demotion fails, the
 3970  * 1MB section mapping is invalidated.
 3971  */
 3972 static boolean_t
 3973 pmap_demote_section(pmap_t pmap, vm_offset_t va)
 3974 {
 3975         struct l2_bucket *l2b;
 3976         struct pv_entry *l1pdpve;
 3977         struct md_page *pvh;
 3978         pd_entry_t *pl1pd, l1pd, newl1pd;
 3979         pt_entry_t *firstptep, newpte;
 3980         vm_offset_t pa;
 3981         vm_page_t m;
 3982 
 3983         PMAP_ASSERT_LOCKED(pmap);
 3984         /*
 3985          * According to assumptions described in pmap_promote_section,
 3986          * kernel is and always should be mapped using 1MB section mappings.
 3987          * What more, managed kernel pages were not to be promoted. 
 3988          */
 3989         KASSERT(pmap != pmap_kernel() && L1_IDX(va) != L1_IDX(vector_page),
 3990             ("pmap_demote_section: forbidden section mapping"));
 3991 
 3992         va = trunc_1mpage(va);
 3993         pl1pd = &pmap->pm_l1->l1_kva[L1_IDX(va)];
 3994         l1pd = *pl1pd;
 3995         KASSERT((l1pd & L1_TYPE_MASK) == L1_S_PROTO,
 3996             ("pmap_demote_section: not section or invalid section"));
 3997         
 3998         pa = l1pd & L1_S_FRAME;
 3999         m = PHYS_TO_VM_PAGE(pa);
 4000         KASSERT((m != NULL && (m->oflags & VPO_UNMANAGED) == 0),
 4001             ("pmap_demote_section: no vm_page for selected superpage or"
 4002              "unmanaged"));
 4003 
 4004         pvh = pa_to_pvh(pa);
 4005         l1pdpve = pmap_find_pv(pvh, pmap, va);
 4006         KASSERT(l1pdpve != NULL, ("pmap_demote_section: no pv entry for "
 4007             "managed page"));
 4008 
 4009         l2b = pmap_get_l2_bucket(pmap, va);
 4010         if (l2b == NULL) {
 4011                 KASSERT((l1pdpve->pv_flags & PVF_WIRED) == 0,
 4012                     ("pmap_demote_section: No l2_bucket for wired mapping"));
 4013                 /*
 4014                  * Invalidate the 1MB section mapping and return
 4015                  * "failure" if the mapping was never accessed or the
 4016                  * allocation of the new l2_bucket fails.
 4017                  */
 4018                 if (!L1_S_REFERENCED(l1pd) ||
 4019                     (l2b = pmap_alloc_l2_bucket(pmap, va)) == NULL) {
 4020                         /* Unmap and invalidate superpage. */
 4021                         pmap_remove_section(pmap, trunc_1mpage(va));
 4022                         CTR2(KTR_PMAP, "pmap_demote_section: failure for "
 4023                             "va %#x in pmap %p", va, pmap);
 4024                         return (FALSE);
 4025                 }
 4026         }
 4027 
 4028         /*
 4029          * Now we should have corresponding l2_bucket available.
 4030          * Let's process it to recreate 256 PTEs for each base page
 4031          * within superpage.
 4032          */
 4033         newpte = pa | L1_S_DEMOTE(l1pd);
 4034         if (m->md.pv_memattr != VM_MEMATTR_UNCACHEABLE)
 4035                 newpte |= pte_l2_s_cache_mode;
 4036 
 4037         /*
 4038          * If the l2_bucket is new, initialize it.
 4039          */
 4040         if (l2b->l2b_occupancy == 0)
 4041                 pmap_fill_l2b(l2b, newpte);
 4042         else {
 4043                 firstptep = &l2b->l2b_kva[0];
 4044                 KASSERT(l2pte_pa(*firstptep) == (pa),
 4045                     ("pmap_demote_section: firstpte and newpte map different "
 4046                      "physical addresses"));
 4047                 /*
 4048                  * If the mapping has changed attributes, update the page table
 4049                  * entries.
 4050                  */ 
 4051                 if ((*firstptep & L2_S_PROMOTE) != (L1_S_DEMOTE(l1pd)))
 4052                         pmap_fill_l2b(l2b, newpte);
 4053         }
 4054         /* Demote PV entry */
 4055         pmap_pv_demote_section(pmap, va, pa);
 4056 
 4057         /* Now fix-up L1 */
 4058         newl1pd = l2b->l2b_phys | L1_C_DOM(pmap->pm_domain) | L1_C_PROTO;
 4059         *pl1pd = newl1pd;
 4060         PTE_SYNC(pl1pd);
 4061         /* Invalidate old TLB mapping */
 4062         if (L1_S_EXECUTABLE(l1pd))
 4063                 cpu_tlb_flushID_SE(va);
 4064         else if (L1_S_REFERENCED(l1pd))
 4065                 cpu_tlb_flushD_SE(va);
 4066         cpu_cpwait();
 4067 
 4068         pmap_section_demotions++;
 4069         CTR2(KTR_PMAP, "pmap_demote_section: success for va %#x"
 4070             " in pmap %p", va, pmap);
 4071         return (TRUE);
 4072 }
 4073 
 4074 /***************************************************
 4075  * page management routines.
 4076  ***************************************************/
 4077 
 4078 /*
 4079  * We are in a serious low memory condition.  Resort to
 4080  * drastic measures to free some pages so we can allocate
 4081  * another pv entry chunk.
 4082  */
 4083 static vm_page_t
 4084 pmap_pv_reclaim(pmap_t locked_pmap)
 4085 {
 4086         struct pch newtail;
 4087         struct pv_chunk *pc;
 4088         struct l2_bucket *l2b = NULL;
 4089         pmap_t pmap;
 4090         pd_entry_t *pl1pd;
 4091         pt_entry_t *ptep;
 4092         pv_entry_t pv;
 4093         vm_offset_t va;
 4094         vm_page_t free, m, m_pc;
 4095         uint32_t inuse;
 4096         int bit, field, freed, idx;
 4097 
 4098         PMAP_ASSERT_LOCKED(locked_pmap);
 4099         pmap = NULL;
 4100         free = m_pc = NULL;
 4101         TAILQ_INIT(&newtail);
 4102         while ((pc = TAILQ_FIRST(&pv_chunks)) != NULL && (pv_vafree == 0 ||
 4103             free == NULL)) {
 4104                 TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
 4105                 if (pmap != pc->pc_pmap) {
 4106                         if (pmap != NULL) {
 4107                                 cpu_tlb_flushID();
 4108                                 cpu_cpwait();
 4109                                 if (pmap != locked_pmap)
 4110                                         PMAP_UNLOCK(pmap);
 4111                         }
 4112                         pmap = pc->pc_pmap;
 4113                         /* Avoid deadlock and lock recursion. */
 4114                         if (pmap > locked_pmap)
 4115                                 PMAP_LOCK(pmap);
 4116                         else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap)) {
 4117                                 pmap = NULL;
 4118                                 TAILQ_INSERT_TAIL(&newtail, pc, pc_lru);
 4119                                 continue;
 4120                         }
 4121                 }
 4122 
 4123                 /*
 4124                  * Destroy every non-wired, 4 KB page mapping in the chunk.
 4125                  */
 4126                 freed = 0;
 4127                 for (field = 0; field < _NPCM; field++) {
 4128                         for (inuse = ~pc->pc_map[field] & pc_freemask[field];
 4129                             inuse != 0; inuse &= ~(1UL << bit)) {
 4130                                 bit = ffs(inuse) - 1;
 4131                                 idx = field * sizeof(inuse) * NBBY + bit;
 4132                                 pv = &pc->pc_pventry[idx];
 4133                                 va = pv->pv_va;
 4134 
 4135                                 pl1pd = &pmap->pm_l1->l1_kva[L1_IDX(va)];
 4136                                 if ((*pl1pd & L1_TYPE_MASK) == L1_S_PROTO)
 4137                                         continue;
 4138                                 if (pv->pv_flags & PVF_WIRED)
 4139                                         continue;
 4140 
 4141                                 l2b = pmap_get_l2_bucket(pmap, va);
 4142                                 KASSERT(l2b != NULL, ("No l2 bucket"));
 4143                                 ptep = &l2b->l2b_kva[l2pte_index(va)];
 4144                                 m = PHYS_TO_VM_PAGE(l2pte_pa(*ptep));
 4145                                 KASSERT((vm_offset_t)m >= KERNBASE,
 4146                                     ("Trying to access non-existent page "
 4147                                      "va %x pte %x", va, *ptep));
 4148                                 *ptep = 0;
 4149                                 PTE_SYNC(ptep);
 4150                                 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
 4151                                 if (TAILQ_EMPTY(&m->md.pv_list))
 4152                                         vm_page_aflag_clear(m, PGA_WRITEABLE);
 4153                                 pc->pc_map[field] |= 1UL << bit;
 4154                                 freed++;
 4155                         }
 4156                 }
 4157 
 4158                 if (freed == 0) {
 4159                         TAILQ_INSERT_TAIL(&newtail, pc, pc_lru);
 4160                         continue;
 4161                 }
 4162                 /* Every freed mapping is for a 4 KB page. */
 4163                 pmap->pm_stats.resident_count -= freed;
 4164                 PV_STAT(pv_entry_frees += freed);
 4165                 PV_STAT(pv_entry_spare += freed);
 4166                 pv_entry_count -= freed;
 4167                 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
 4168                 for (field = 0; field < _NPCM; field++)
 4169                         if (pc->pc_map[field] != pc_freemask[field]) {
 4170                                 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc,
 4171                                     pc_list);
 4172                                 TAILQ_INSERT_TAIL(&newtail, pc, pc_lru);
 4173 
 4174                                 /*
 4175                                  * One freed pv entry in locked_pmap is
 4176                                  * sufficient.
 4177                                  */
 4178                                 if (pmap == locked_pmap)
 4179                                         goto out;
 4180                                 break;
 4181                         }
 4182                 if (field == _NPCM) {
 4183                         PV_STAT(pv_entry_spare -= _NPCPV);
 4184                         PV_STAT(pc_chunk_count--);
 4185                         PV_STAT(pc_chunk_frees++);
 4186                         /* Entire chunk is free; return it. */
 4187                         m_pc = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc));
 4188                         pmap_qremove((vm_offset_t)pc, 1);
 4189                         pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc);
 4190                         break;
 4191                 }
 4192         }
 4193 out:
 4194         TAILQ_CONCAT(&pv_chunks, &newtail, pc_lru);
 4195         if (pmap != NULL) {
 4196                 cpu_tlb_flushID();
 4197                 cpu_cpwait();
 4198                 if (pmap != locked_pmap)
 4199                         PMAP_UNLOCK(pmap);
 4200         }
 4201         return (m_pc);
 4202 }
 4203 
 4204 /*
 4205  * free the pv_entry back to the free list
 4206  */
 4207 static void
 4208 pmap_free_pv_entry(pmap_t pmap, pv_entry_t pv)
 4209 {
 4210         struct pv_chunk *pc;
 4211         int bit, field, idx;
 4212 
 4213         rw_assert(&pvh_global_lock, RA_WLOCKED);
 4214         PMAP_ASSERT_LOCKED(pmap);
 4215         PV_STAT(pv_entry_frees++);
 4216         PV_STAT(pv_entry_spare++);
 4217         pv_entry_count--;
 4218         pc = pv_to_chunk(pv);
 4219         idx = pv - &pc->pc_pventry[0];
 4220         field = idx / (sizeof(u_long) * NBBY);
 4221         bit = idx % (sizeof(u_long) * NBBY);
 4222         pc->pc_map[field] |= 1ul << bit;
 4223         for (idx = 0; idx < _NPCM; idx++)
 4224                 if (pc->pc_map[idx] != pc_freemask[idx]) {
 4225                         /*
 4226                          * 98% of the time, pc is already at the head of the
 4227                          * list.  If it isn't already, move it to the head.
 4228                          */
 4229                         if (__predict_false(TAILQ_FIRST(&pmap->pm_pvchunk) !=
 4230                             pc)) {
 4231                                 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
 4232                                 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc,
 4233                                     pc_list);
 4234                         }
 4235                         return;
 4236                 }
 4237         TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
 4238         pmap_free_pv_chunk(pc);
 4239 }
 4240 
 4241 static void
 4242 pmap_free_pv_chunk(struct pv_chunk *pc)
 4243 {
 4244         vm_page_t m;
 4245 
 4246         TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
 4247         PV_STAT(pv_entry_spare -= _NPCPV);
 4248         PV_STAT(pc_chunk_count--);
 4249         PV_STAT(pc_chunk_frees++);
 4250         /* entire chunk is free, return it */
 4251         m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc));
 4252         pmap_qremove((vm_offset_t)pc, 1);
 4253         vm_page_unwire(m, 0);
 4254         vm_page_free(m);
 4255         pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc);
 4256 
 4257 }
 4258 
 4259 static pv_entry_t
 4260 pmap_get_pv_entry(pmap_t pmap, boolean_t try)
 4261 {
 4262         static const struct timeval printinterval = { 60, 0 };
 4263         static struct timeval lastprint;
 4264         struct pv_chunk *pc;
 4265         pv_entry_t pv;
 4266         vm_page_t m;
 4267         int bit, field, idx;
 4268 
 4269         rw_assert(&pvh_global_lock, RA_WLOCKED);
 4270         PMAP_ASSERT_LOCKED(pmap);
 4271         PV_STAT(pv_entry_allocs++);
 4272         pv_entry_count++;
 4273 
 4274         if (pv_entry_count > pv_entry_high_water)
 4275                 if (ratecheck(&lastprint, &printinterval))
 4276                         printf("%s: Approaching the limit on PV entries.\n",
 4277                             __func__);
 4278 retry:
 4279         pc = TAILQ_FIRST(&pmap->pm_pvchunk);
 4280         if (pc != NULL) {
 4281                 for (field = 0; field < _NPCM; field++) {
 4282                         if (pc->pc_map[field]) {
 4283                                 bit = ffs(pc->pc_map[field]) - 1;
 4284                                 break;
 4285                         }
 4286                 }
 4287                 if (field < _NPCM) {
 4288                         idx = field * sizeof(pc->pc_map[field]) * NBBY + bit;
 4289                         pv = &pc->pc_pventry[idx];
 4290                         pc->pc_map[field] &= ~(1ul << bit);
 4291                         /* If this was the last item, move it to tail */
 4292                         for (field = 0; field < _NPCM; field++)
 4293                                 if (pc->pc_map[field] != 0) {
 4294                                         PV_STAT(pv_entry_spare--);
 4295                                         return (pv);    /* not full, return */
 4296                                 }
 4297                         TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
 4298                         TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
 4299                         PV_STAT(pv_entry_spare--);
 4300                         return (pv);
 4301                 }
 4302         }
 4303         /*
 4304          * Access to the ptelist "pv_vafree" is synchronized by the pvh
 4305          * global lock.  If "pv_vafree" is currently non-empty, it will
 4306          * remain non-empty until pmap_ptelist_alloc() completes.
 4307          */
 4308         if (pv_vafree == 0 || (m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
 4309             VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
 4310                 if (try) {
 4311                         pv_entry_count--;
 4312                         PV_STAT(pc_chunk_tryfail++);
 4313                         return (NULL);
 4314                 }
 4315                 m = pmap_pv_reclaim(pmap);
 4316                 if (m == NULL)
 4317                         goto retry;
 4318         }
 4319         PV_STAT(pc_chunk_count++);
 4320         PV_STAT(pc_chunk_allocs++);
 4321         pc = (struct pv_chunk *)pmap_ptelist_alloc(&pv_vafree);
 4322         pmap_qenter((vm_offset_t)pc, &m, 1);
 4323         pc->pc_pmap = pmap;
 4324         pc->pc_map[0] = pc_freemask[0] & ~1ul;  /* preallocated bit 0 */
 4325         for (field = 1; field < _NPCM; field++)
 4326                 pc->pc_map[field] = pc_freemask[field];
 4327         TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
 4328         pv = &pc->pc_pventry[0];
 4329         TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
 4330         PV_STAT(pv_entry_spare += _NPCPV - 1);
 4331         return (pv);
 4332 }
 4333 
 4334 /*
 4335  *      Remove the given range of addresses from the specified map.
 4336  *
 4337  *      It is assumed that the start and end are properly
 4338  *      rounded to the page size.
 4339  */
 4340 #define PMAP_REMOVE_CLEAN_LIST_SIZE     3
 4341 void
 4342 pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
 4343 {
 4344         struct l2_bucket *l2b;
 4345         vm_offset_t next_bucket;
 4346         pd_entry_t *pl1pd, l1pd;
 4347         pt_entry_t *ptep;
 4348         u_int total;
 4349         u_int mappings, is_exec, is_refd;
 4350         int flushall = 0;
 4351 
 4352 
 4353         /*
 4354          * we lock in the pmap => pv_head direction
 4355          */
 4356 
 4357         rw_wlock(&pvh_global_lock);
 4358         PMAP_LOCK(pmap);
 4359         total = 0;
 4360         while (sva < eva) {
 4361                 /*
 4362                  * Check for large page.
 4363                  */
 4364                 pl1pd = &pmap->pm_l1->l1_kva[L1_IDX(sva)];
 4365                 l1pd = *pl1pd;
 4366                 if ((l1pd & L1_TYPE_MASK) == L1_S_PROTO) {
 4367                         KASSERT((l1pd & L1_S_DOM_MASK) !=
 4368                             L1_S_DOM(PMAP_DOMAIN_KERNEL), ("pmap_remove: "
 4369                             "Trying to remove kernel section mapping"));
 4370                         /*
 4371                          * Are we removing the entire large page?  If not,
 4372                          * demote the mapping and fall through.
 4373                          */
 4374                         if (sva + L1_S_SIZE == L2_NEXT_BUCKET(sva) &&
 4375                             eva >= L2_NEXT_BUCKET(sva)) {
 4376                                 pmap_remove_section(pmap, sva);
 4377                                 sva = L2_NEXT_BUCKET(sva);
 4378                                 continue;
 4379                         } else if (!pmap_demote_section(pmap, sva)) {
 4380                                 /* The large page mapping was destroyed. */
 4381                                 sva = L2_NEXT_BUCKET(sva);
 4382                                 continue;
 4383                         }
 4384                 }
 4385                 /*
 4386                  * Do one L2 bucket's worth at a time.
 4387                  */
 4388                 next_bucket = L2_NEXT_BUCKET(sva);
 4389                 if (next_bucket > eva)
 4390                         next_bucket = eva;
 4391 
 4392                 l2b = pmap_get_l2_bucket(pmap, sva);
 4393                 if (l2b == NULL) {
 4394                         sva = next_bucket;
 4395                         continue;
 4396                 }
 4397 
 4398                 ptep = &l2b->l2b_kva[l2pte_index(sva)];
 4399                 mappings = 0;
 4400 
 4401                 while (sva < next_bucket) {
 4402                         struct vm_page *m;
 4403                         pt_entry_t pte;
 4404                         vm_paddr_t pa;
 4405 
 4406                         pte = *ptep;
 4407 
 4408                         if (pte == 0) {
 4409                                 /*
 4410                                  * Nothing here, move along
 4411                                  */
 4412                                 sva += PAGE_SIZE;
 4413                                 ptep++;
 4414                                 continue;
 4415                         }
 4416 
 4417                         pmap->pm_stats.resident_count--;
 4418                         pa = l2pte_pa(pte);
 4419                         is_exec = 0;
 4420                         is_refd = 1;
 4421 
 4422                         /*
 4423                          * Update flags. In a number of circumstances,
 4424                          * we could cluster a lot of these and do a
 4425                          * number of sequential pages in one go.
 4426                          */
 4427                         if ((m = PHYS_TO_VM_PAGE(pa)) != NULL) {
 4428                                 struct pv_entry *pve;
 4429 
 4430                                 pve = pmap_remove_pv(m, pmap, sva);
 4431                                 if (pve) {
 4432                                         is_exec = PTE_BEEN_EXECD(pte);
 4433                                         is_refd = PTE_BEEN_REFD(pte);
 4434                                         pmap_free_pv_entry(pmap, pve);
 4435                                 }
 4436                         }
 4437 
 4438                         *ptep = 0;
 4439                         PTE_SYNC(ptep);
 4440                         if (pmap_is_current(pmap)) {
 4441                                 total++;
 4442                                 if (total < PMAP_REMOVE_CLEAN_LIST_SIZE) {
 4443                                         if (is_exec)
 4444                                                 cpu_tlb_flushID_SE(sva);
 4445                                         else if (is_refd)
 4446                                                 cpu_tlb_flushD_SE(sva);
 4447                                 } else if (total == PMAP_REMOVE_CLEAN_LIST_SIZE)
 4448                                         flushall = 1;
 4449                         }
 4450 
 4451                         sva += PAGE_SIZE;
 4452                         ptep++;
 4453                         mappings++;
 4454                 }
 4455 
 4456                 pmap_free_l2_bucket(pmap, l2b, mappings);
 4457         }
 4458 
 4459         rw_wunlock(&pvh_global_lock);
 4460         if (flushall)
 4461                 cpu_tlb_flushID();
 4462         cpu_cpwait();
 4463 
 4464         PMAP_UNLOCK(pmap);
 4465 }
 4466 
 4467 /*
 4468  * pmap_zero_page()
 4469  *
 4470  * Zero a given physical page by mapping it at a page hook point.
 4471  * In doing the zero page op, the page we zero is mapped cachable, as with
 4472  * StrongARM accesses to non-cached pages are non-burst making writing
 4473  * _any_ bulk data very slow.
 4474  */
 4475 static void
 4476 pmap_zero_page_gen(vm_page_t m, int off, int size)
 4477 {
 4478         struct czpages *czp;
 4479 
 4480         KASSERT(TAILQ_EMPTY(&m->md.pv_list), 
 4481             ("pmap_zero_page_gen: page has mappings"));
 4482 
 4483         vm_paddr_t phys = VM_PAGE_TO_PHYS(m);
 4484 
 4485         sched_pin();
 4486         czp = &cpu_czpages[PCPU_GET(cpuid)];
 4487         mtx_lock(&czp->lock);
 4488         
 4489         /*
 4490          * Hook in the page, zero it.
 4491          */
 4492         *czp->dstptep = L2_S_PROTO | phys | pte_l2_s_cache_mode | L2_S_REF;
 4493         pmap_set_prot(czp->dstptep, VM_PROT_WRITE, 0);
 4494         PTE_SYNC(czp->dstptep);
 4495         cpu_tlb_flushD_SE(czp->dstva);
 4496         cpu_cpwait();
 4497 
 4498         if (off || size != PAGE_SIZE)
 4499                 bzero((void *)(czp->dstva + off), size);
 4500         else
 4501                 bzero_page(czp->dstva);
 4502 
 4503         /*
 4504          * Although aliasing is not possible, if we use temporary mappings with
 4505          * memory that will be mapped later as non-cached or with write-through
 4506          * caches, we might end up overwriting it when calling wbinv_all.  So
 4507          * make sure caches are clean after the operation.
 4508          */
 4509         cpu_idcache_wbinv_range(czp->dstva, size);
 4510         pmap_l2cache_wbinv_range(czp->dstva, phys, size);
 4511 
 4512         mtx_unlock(&czp->lock);
 4513         sched_unpin();
 4514 }
 4515 
 4516 /*
 4517  *      pmap_zero_page zeros the specified hardware page by mapping
 4518  *      the page into KVM and using bzero to clear its contents.
 4519  */
 4520 void
 4521 pmap_zero_page(vm_page_t m)
 4522 {
 4523         pmap_zero_page_gen(m, 0, PAGE_SIZE);
 4524 }
 4525 
 4526 
 4527 /*
 4528  *      pmap_zero_page_area zeros the specified hardware page by mapping
 4529  *      the page into KVM and using bzero to clear its contents.
 4530  *
 4531  *      off and size may not cover an area beyond a single hardware page.
 4532  */
 4533 void
 4534 pmap_zero_page_area(vm_page_t m, int off, int size)
 4535 {
 4536 
 4537         pmap_zero_page_gen(m, off, size);
 4538 }
 4539 
 4540 
 4541 /*
 4542  *      pmap_zero_page_idle zeros the specified hardware page by mapping
 4543  *      the page into KVM and using bzero to clear its contents.  This
 4544  *      is intended to be called from the vm_pagezero process only and
 4545  *      outside of Giant.
 4546  */
 4547 void
 4548 pmap_zero_page_idle(vm_page_t m)
 4549 {
 4550 
 4551         pmap_zero_page(m);
 4552 }
 4553 
 4554 /*
 4555  *      pmap_copy_page copies the specified (machine independent)
 4556  *      page by mapping the page into virtual memory and using
 4557  *      bcopy to copy the page, one machine dependent page at a
 4558  *      time.
 4559  */
 4560 
 4561 /*
 4562  * pmap_copy_page()
 4563  *
 4564  * Copy one physical page into another, by mapping the pages into
 4565  * hook points. The same comment regarding cachability as in
 4566  * pmap_zero_page also applies here.
 4567  */
 4568 void
 4569 pmap_copy_page_generic(vm_paddr_t src, vm_paddr_t dst)
 4570 {
 4571         struct czpages *czp;
 4572 
 4573         sched_pin();
 4574         czp = &cpu_czpages[PCPU_GET(cpuid)];
 4575         mtx_lock(&czp->lock);
 4576         
 4577         /*
 4578          * Map the pages into the page hook points, copy them, and purge the
 4579          * cache for the appropriate page.
 4580          */
 4581         *czp->srcptep = L2_S_PROTO | src | pte_l2_s_cache_mode | L2_S_REF;
 4582         pmap_set_prot(czp->srcptep, VM_PROT_READ, 0);
 4583         PTE_SYNC(czp->srcptep);
 4584         cpu_tlb_flushD_SE(czp->srcva);
 4585         *czp->dstptep = L2_S_PROTO | dst | pte_l2_s_cache_mode | L2_S_REF;
 4586         pmap_set_prot(czp->dstptep, VM_PROT_READ | VM_PROT_WRITE, 0);
 4587         PTE_SYNC(czp->dstptep);
 4588         cpu_tlb_flushD_SE(czp->dstva);
 4589         cpu_cpwait();
 4590 
 4591         bcopy_page(czp->srcva, czp->dstva);
 4592 
 4593         /*
 4594          * Although aliasing is not possible, if we use temporary mappings with
 4595          * memory that will be mapped later as non-cached or with write-through
 4596          * caches, we might end up overwriting it when calling wbinv_all.  So
 4597          * make sure caches are clean after the operation.
 4598          */
 4599         cpu_idcache_wbinv_range(czp->dstva, PAGE_SIZE);
 4600         pmap_l2cache_wbinv_range(czp->dstva, dst, PAGE_SIZE);
 4601 
 4602         mtx_unlock(&czp->lock);
 4603         sched_unpin();
 4604 }
 4605 
 4606 int unmapped_buf_allowed = 1;
 4607 
 4608 void
 4609 pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
 4610     vm_offset_t b_offset, int xfersize)
 4611 {
 4612         vm_page_t a_pg, b_pg;
 4613         vm_offset_t a_pg_offset, b_pg_offset;
 4614         int cnt;
 4615         struct czpages *czp;
 4616 
 4617         sched_pin();
 4618         czp = &cpu_czpages[PCPU_GET(cpuid)];
 4619         mtx_lock(&czp->lock);
 4620 
 4621         while (xfersize > 0) {
 4622                 a_pg = ma[a_offset >> PAGE_SHIFT];
 4623                 a_pg_offset = a_offset & PAGE_MASK;
 4624                 cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
 4625                 b_pg = mb[b_offset >> PAGE_SHIFT];
 4626                 b_pg_offset = b_offset & PAGE_MASK;
 4627                 cnt = min(cnt, PAGE_SIZE - b_pg_offset);
 4628                 *czp->srcptep = L2_S_PROTO | VM_PAGE_TO_PHYS(a_pg) |
 4629                     pte_l2_s_cache_mode | L2_S_REF;
 4630                 pmap_set_prot(czp->srcptep, VM_PROT_READ, 0);
 4631                 PTE_SYNC(czp->srcptep);
 4632                 cpu_tlb_flushD_SE(czp->srcva);
 4633                 *czp->dstptep = L2_S_PROTO | VM_PAGE_TO_PHYS(b_pg) |
 4634                     pte_l2_s_cache_mode | L2_S_REF;
 4635                 pmap_set_prot(czp->dstptep, VM_PROT_READ | VM_PROT_WRITE, 0);
 4636                 PTE_SYNC(czp->dstptep);
 4637                 cpu_tlb_flushD_SE(czp->dstva);
 4638                 cpu_cpwait();
 4639                 bcopy((char *)czp->srcva + a_pg_offset, (char *)czp->dstva + b_pg_offset,
 4640                     cnt);
 4641                 cpu_idcache_wbinv_range(czp->dstva + b_pg_offset, cnt);
 4642                 pmap_l2cache_wbinv_range(czp->dstva + b_pg_offset,
 4643                     VM_PAGE_TO_PHYS(b_pg) + b_pg_offset, cnt);
 4644                 xfersize -= cnt;
 4645                 a_offset += cnt;
 4646                 b_offset += cnt;
 4647         }
 4648 
 4649         mtx_unlock(&czp->lock);
 4650         sched_unpin();
 4651 }
 4652 
 4653 void
 4654 pmap_copy_page(vm_page_t src, vm_page_t dst)
 4655 {
 4656 
 4657         if (_arm_memcpy && PAGE_SIZE >= _min_memcpy_size &&
 4658             _arm_memcpy((void *)VM_PAGE_TO_PHYS(dst),
 4659             (void *)VM_PAGE_TO_PHYS(src), PAGE_SIZE, IS_PHYSICAL) == 0)
 4660                 return;
 4661 
 4662         pmap_copy_page_generic(VM_PAGE_TO_PHYS(src), VM_PAGE_TO_PHYS(dst));
 4663 }
 4664 
 4665 /*
 4666  * this routine returns true if a physical page resides
 4667  * in the given pmap.
 4668  */
 4669 boolean_t
 4670 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
 4671 {
 4672         struct md_page *pvh;
 4673         pv_entry_t pv;
 4674         int loops = 0;
 4675         boolean_t rv;
 4676 
 4677         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 4678             ("pmap_page_exists_quick: page %p is not managed", m));
 4679         rv = FALSE;
 4680         rw_wlock(&pvh_global_lock);
 4681         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
 4682                 if (PV_PMAP(pv) == pmap) {
 4683                         rv = TRUE;
 4684                         break;
 4685                 }
 4686                 loops++;
 4687                 if (loops >= 16)
 4688                         break;
 4689         }
 4690         if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) {
 4691                 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
 4692                 TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
 4693                         if (PV_PMAP(pv) == pmap) {
 4694                                 rv = TRUE;
 4695                                 break;
 4696                         }
 4697                         loops++;
 4698                         if (loops >= 16)
 4699                                 break;
 4700                 }
 4701         }
 4702         rw_wunlock(&pvh_global_lock);
 4703         return (rv);
 4704 }
 4705 
 4706 /*
 4707  *      pmap_page_wired_mappings:
 4708  *
 4709  *      Return the number of managed mappings to the given physical page
 4710  *      that are wired.
 4711  */
 4712 int
 4713 pmap_page_wired_mappings(vm_page_t m)
 4714 {
 4715         int count;
 4716 
 4717         count = 0;
 4718         if ((m->oflags & VPO_UNMANAGED) != 0)
 4719                 return (count);
 4720         rw_wlock(&pvh_global_lock);
 4721         count = pmap_pvh_wired_mappings(&m->md, count);
 4722         if ((m->flags & PG_FICTITIOUS) == 0) {
 4723             count = pmap_pvh_wired_mappings(pa_to_pvh(VM_PAGE_TO_PHYS(m)),
 4724                 count);
 4725         }
 4726         rw_wunlock(&pvh_global_lock);
 4727         return (count);
 4728 }
 4729 
 4730 /*
 4731  *      pmap_pvh_wired_mappings:
 4732  *
 4733  *      Return the updated number "count" of managed mappings that are wired.
 4734  */
 4735 static int
 4736 pmap_pvh_wired_mappings(struct md_page *pvh, int count)
 4737 {
 4738         pv_entry_t pv;
 4739 
 4740         rw_assert(&pvh_global_lock, RA_WLOCKED);
 4741         TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
 4742                 if ((pv->pv_flags & PVF_WIRED) != 0)
 4743                         count++;
 4744         }
 4745         return (count);
 4746 }
 4747 
 4748 /*
 4749  * Returns TRUE if any of the given mappings were referenced and FALSE
 4750  * otherwise.  Both page and section mappings are supported.
 4751  */
 4752 static boolean_t
 4753 pmap_is_referenced_pvh(struct md_page *pvh)
 4754 {
 4755         struct l2_bucket *l2b;
 4756         pv_entry_t pv;
 4757         pd_entry_t *pl1pd;
 4758         pt_entry_t *ptep;
 4759         pmap_t pmap;
 4760         boolean_t rv;
 4761 
 4762         rw_assert(&pvh_global_lock, RA_WLOCKED);
 4763         rv = FALSE;
 4764         TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
 4765                 pmap = PV_PMAP(pv);
 4766                 PMAP_LOCK(pmap);
 4767                 pl1pd = &pmap->pm_l1->l1_kva[L1_IDX(pv->pv_va)];
 4768                 if ((*pl1pd & L1_TYPE_MASK) == L1_S_PROTO)
 4769                         rv = L1_S_REFERENCED(*pl1pd);
 4770                 else {
 4771                         l2b = pmap_get_l2_bucket(pmap, pv->pv_va);
 4772                         ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
 4773                         rv = L2_S_REFERENCED(*ptep);
 4774                 }
 4775                 PMAP_UNLOCK(pmap);
 4776                 if (rv)
 4777                         break;
 4778         }
 4779         return (rv);
 4780 }
 4781 
 4782 /*
 4783  *      pmap_is_referenced:
 4784  *
 4785  *      Return whether or not the specified physical page was referenced
 4786  *      in any physical maps.
 4787  */
 4788 boolean_t
 4789 pmap_is_referenced(vm_page_t m)
 4790 {
 4791         boolean_t rv;
 4792 
 4793         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 4794             ("pmap_is_referenced: page %p is not managed", m));
 4795         rw_wlock(&pvh_global_lock);
 4796         rv = pmap_is_referenced_pvh(&m->md) ||
 4797             ((m->flags & PG_FICTITIOUS) == 0 &&
 4798             pmap_is_referenced_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m))));
 4799         rw_wunlock(&pvh_global_lock);
 4800         return (rv);
 4801 }
 4802 
 4803 /*
 4804  *      pmap_ts_referenced:
 4805  *
 4806  *      Return the count of reference bits for a page, clearing all of them.
 4807  */
 4808 int
 4809 pmap_ts_referenced(vm_page_t m)
 4810 {
 4811 
 4812         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 4813             ("pmap_ts_referenced: page %p is not managed", m));
 4814         return (pmap_clearbit(m, PVF_REF));
 4815 }
 4816 
 4817 /*
 4818  * Returns TRUE if any of the given mappings were used to modify
 4819  * physical memory. Otherwise, returns FALSE. Both page and 1MB section
 4820  * mappings are supported.
 4821  */
 4822 static boolean_t
 4823 pmap_is_modified_pvh(struct md_page *pvh)
 4824 {
 4825         pd_entry_t *pl1pd;
 4826         struct l2_bucket *l2b;
 4827         pv_entry_t pv;
 4828         pt_entry_t *ptep;
 4829         pmap_t pmap;
 4830         boolean_t rv;
 4831 
 4832         rw_assert(&pvh_global_lock, RA_WLOCKED);
 4833         rv = FALSE;
 4834 
 4835         TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
 4836                 pmap = PV_PMAP(pv);
 4837                 PMAP_LOCK(pmap);
 4838                 pl1pd = &pmap->pm_l1->l1_kva[L1_IDX(pv->pv_va)];
 4839                 if ((*pl1pd & L1_TYPE_MASK) == L1_S_PROTO)
 4840                         rv = L1_S_WRITABLE(*pl1pd);
 4841                 else {
 4842                         l2b = pmap_get_l2_bucket(pmap, pv->pv_va);
 4843                         ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
 4844                         rv = L2_S_WRITABLE(*ptep);
 4845                 }
 4846                 PMAP_UNLOCK(pmap);
 4847                 if (rv)
 4848                         break;
 4849         }
 4850 
 4851         return (rv);
 4852 }
 4853 
 4854 boolean_t
 4855 pmap_is_modified(vm_page_t m)
 4856 {
 4857         boolean_t rv;
 4858 
 4859         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 4860             ("pmap_is_modified: page %p is not managed", m));
 4861         /*
 4862          * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
 4863          * concurrently set while the object is locked.  Thus, if PGA_WRITEABLE
 4864          * is clear, no PTEs can have APX cleared.
 4865          */
 4866         VM_OBJECT_ASSERT_WLOCKED(m->object);
 4867         if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
 4868                 return (FALSE);
 4869         rw_wlock(&pvh_global_lock);
 4870         rv = pmap_is_modified_pvh(&m->md) ||
 4871             ((m->flags & PG_FICTITIOUS) == 0 &&
 4872             pmap_is_modified_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m))));
 4873         rw_wunlock(&pvh_global_lock);
 4874         return (rv);
 4875 }
 4876 
 4877 /*
 4878  *      Apply the given advice to the specified range of addresses within the
 4879  *      given pmap.  Depending on the advice, clear the referenced and/or
 4880  *      modified flags in each mapping.
 4881  */
 4882 void
 4883 pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
 4884 {
 4885         struct l2_bucket *l2b;
 4886         struct pv_entry *pve;
 4887         pd_entry_t *pl1pd, l1pd;
 4888         pt_entry_t *ptep, opte, pte;
 4889         vm_offset_t next_bucket;
 4890         vm_page_t m;
 4891 
 4892         if (advice != MADV_DONTNEED && advice != MADV_FREE)
 4893                 return;
 4894         rw_wlock(&pvh_global_lock);
 4895         PMAP_LOCK(pmap);
 4896         for (; sva < eva; sva = next_bucket) {
 4897                 next_bucket = L2_NEXT_BUCKET(sva);
 4898                 if (next_bucket < sva)
 4899                         next_bucket = eva;
 4900                 pl1pd = &pmap->pm_l1->l1_kva[L1_IDX(sva)];
 4901                 l1pd = *pl1pd;
 4902                 if ((l1pd & L1_TYPE_MASK) == L1_S_PROTO) {
 4903                         if (pmap == pmap_kernel())
 4904                                 continue;
 4905                         if (!pmap_demote_section(pmap, sva)) {
 4906                                 /*
 4907                                  * The large page mapping was destroyed.
 4908                                  */
 4909                                 continue;
 4910                         }
 4911                         /*
 4912                          * Unless the page mappings are wired, remove the
 4913                          * mapping to a single page so that a subsequent
 4914                          * access may repromote. Since the underlying
 4915                          * l2_bucket is fully populated, this removal
 4916                          * never frees an entire l2_bucket.
 4917                          */
 4918                         l2b = pmap_get_l2_bucket(pmap, sva);
 4919                         KASSERT(l2b != NULL,
 4920                             ("pmap_advise: no l2 bucket for "
 4921                              "va 0x%#x, pmap 0x%p", sva, pmap));
 4922                         ptep = &l2b->l2b_kva[l2pte_index(sva)];
 4923                         opte = *ptep;
 4924                         m = PHYS_TO_VM_PAGE(l2pte_pa(*ptep));
 4925                         KASSERT(m != NULL,
 4926                             ("pmap_advise: no vm_page for demoted superpage"));
 4927                         pve = pmap_find_pv(&m->md, pmap, sva);
 4928                         KASSERT(pve != NULL,
 4929                             ("pmap_advise: no PV entry for managed mapping"));
 4930                         if ((pve->pv_flags & PVF_WIRED) == 0) {
 4931                                 pmap_free_l2_bucket(pmap, l2b, 1);
 4932                                 pve = pmap_remove_pv(m, pmap, sva);
 4933                                 pmap_free_pv_entry(pmap, pve);
 4934                                 *ptep = 0;
 4935                                 PTE_SYNC(ptep);
 4936                                 if (pmap_is_current(pmap)) {
 4937                                         if (PTE_BEEN_EXECD(opte))
 4938                                                 cpu_tlb_flushID_SE(sva);
 4939                                         else if (PTE_BEEN_REFD(opte))
 4940                                                 cpu_tlb_flushD_SE(sva);
 4941                                 }
 4942                         }
 4943                 }
 4944                 if (next_bucket > eva)
 4945                         next_bucket = eva;
 4946                 l2b = pmap_get_l2_bucket(pmap, sva);
 4947                 if (l2b == NULL)
 4948                         continue;
 4949                 for (ptep = &l2b->l2b_kva[l2pte_index(sva)];
 4950                     sva != next_bucket; ptep++, sva += PAGE_SIZE) {
 4951                         opte = pte = *ptep;
 4952                         if ((opte & L2_S_PROTO) == 0)
 4953                                 continue;
 4954                         m = PHYS_TO_VM_PAGE(l2pte_pa(opte));
 4955                         if (m == NULL || (m->oflags & VPO_UNMANAGED) != 0)
 4956                                 continue;
 4957                         else if (L2_S_WRITABLE(opte)) {
 4958                                 if (advice == MADV_DONTNEED) {
 4959                                         /*
 4960                                          * Don't need to mark the page
 4961                                          * dirty as it was already marked as
 4962                                          * such in pmap_fault_fixup() or
 4963                                          * pmap_enter_locked().
 4964                                          * Just clear the state.
 4965                                          */
 4966                                 } else
 4967                                         pte |= L2_APX;
 4968 
 4969                                 pte &= ~L2_S_REF;
 4970                                 *ptep = pte;
 4971                                 PTE_SYNC(ptep);
 4972                         } else if (L2_S_REFERENCED(opte)) {
 4973                                 pte &= ~L2_S_REF;
 4974                                 *ptep = pte;
 4975                                 PTE_SYNC(ptep);
 4976                         } else
 4977                                 continue;
 4978                         if (pmap_is_current(pmap)) {
 4979                                 if (PTE_BEEN_EXECD(opte))
 4980                                         cpu_tlb_flushID_SE(sva);
 4981                                 else if (PTE_BEEN_REFD(opte))
 4982                                         cpu_tlb_flushD_SE(sva);
 4983                         }
 4984                 }
 4985         }
 4986         cpu_cpwait();
 4987         rw_wunlock(&pvh_global_lock);
 4988         PMAP_UNLOCK(pmap);
 4989 }
 4990 
 4991 /*
 4992  *      Clear the modify bits on the specified physical page.
 4993  */
 4994 void
 4995 pmap_clear_modify(vm_page_t m)
 4996 {
 4997 
 4998         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 4999             ("pmap_clear_modify: page %p is not managed", m));
 5000         VM_OBJECT_ASSERT_WLOCKED(m->object);
 5001         KASSERT(!vm_page_xbusied(m),
 5002             ("pmap_clear_modify: page %p is exclusive busied", m));
 5003 
 5004         /*
 5005          * If the page is not PGA_WRITEABLE, then no mappings can be modified.
 5006          * If the object containing the page is locked and the page is not
 5007          * exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
 5008          */
 5009         if ((m->aflags & PGA_WRITEABLE) == 0)
 5010                 return;
 5011         if (pmap_is_modified(m))
 5012                 pmap_clearbit(m, PVF_MOD);
 5013 }
 5014 
 5015 
 5016 /*
 5017  * Clear the write and modified bits in each of the given page's mappings.
 5018  */
 5019 void
 5020 pmap_remove_write(vm_page_t m)
 5021 {
 5022         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 5023             ("pmap_remove_write: page %p is not managed", m));
 5024 
 5025         /*
 5026          * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
 5027          * set by another thread while the object is locked.  Thus,
 5028          * if PGA_WRITEABLE is clear, no page table entries need updating.
 5029          */
 5030         VM_OBJECT_ASSERT_WLOCKED(m->object);
 5031         if (vm_page_xbusied(m) || (m->aflags & PGA_WRITEABLE) != 0)
 5032                 pmap_clearbit(m, PVF_WRITE);
 5033 }
 5034 
 5035 
 5036 /*
 5037  * perform the pmap work for mincore
 5038  */
 5039 int
 5040 pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
 5041 {
 5042         struct l2_bucket *l2b;
 5043         pd_entry_t *pl1pd, l1pd;
 5044         pt_entry_t *ptep, pte;
 5045         vm_paddr_t pa;
 5046         vm_page_t m;
 5047         int val;
 5048         boolean_t managed;
 5049 
 5050         PMAP_LOCK(pmap);
 5051 retry:
 5052         pl1pd = &pmap->pm_l1->l1_kva[L1_IDX(addr)];
 5053         l1pd = *pl1pd;
 5054         if ((l1pd & L1_TYPE_MASK) == L1_S_PROTO) {
 5055                 pa = (l1pd & L1_S_FRAME);
 5056                 val = MINCORE_SUPER | MINCORE_INCORE;
 5057                 if (L1_S_WRITABLE(l1pd))
 5058                         val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
 5059                 managed = FALSE;
 5060                 m = PHYS_TO_VM_PAGE(pa);
 5061                 if (m != NULL && (m->oflags & VPO_UNMANAGED) == 0)
 5062                         managed = TRUE;
 5063                 if (managed) {
 5064                         if (L1_S_REFERENCED(l1pd))
 5065                                 val |= MINCORE_REFERENCED |
 5066                                     MINCORE_REFERENCED_OTHER;
 5067                 }
 5068         } else {
 5069                 l2b = pmap_get_l2_bucket(pmap, addr);
 5070                 if (l2b == NULL) {
 5071                         val = 0;
 5072                         goto out;
 5073                 }
 5074                 ptep = &l2b->l2b_kva[l2pte_index(addr)];
 5075                 pte = *ptep;
 5076                 if (!l2pte_valid(pte)) {
 5077                         val = 0;
 5078                         goto out;
 5079                 }
 5080                 val = MINCORE_INCORE;
 5081                 if (L2_S_WRITABLE(pte))
 5082                         val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
 5083                 managed = FALSE;
 5084                 pa = l2pte_pa(pte);
 5085                 m = PHYS_TO_VM_PAGE(pa);
 5086                 if (m != NULL && (m->oflags & VPO_UNMANAGED) == 0)
 5087                         managed = TRUE;
 5088                 if (managed) {
 5089                         if (L2_S_REFERENCED(pte))
 5090                                 val |= MINCORE_REFERENCED |
 5091                                     MINCORE_REFERENCED_OTHER;
 5092                 }
 5093         }
 5094         if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
 5095             (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && managed) {
 5096                 /* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */
 5097                 if (vm_page_pa_tryrelock(pmap, pa, locked_pa))
 5098                         goto retry;
 5099         } else
 5100 out:
 5101                 PA_UNLOCK_COND(*locked_pa);
 5102         PMAP_UNLOCK(pmap);
 5103         return (val);
 5104 }
 5105 
 5106 void
 5107 pmap_sync_icache(pmap_t pmap, vm_offset_t va, vm_size_t sz)
 5108 {
 5109 }
 5110 
 5111 /*
 5112  *      Increase the starting virtual address of the given mapping if a
 5113  *      different alignment might result in more superpage mappings.
 5114  */
 5115 void
 5116 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
 5117     vm_offset_t *addr, vm_size_t size)
 5118 {
 5119         vm_offset_t superpage_offset;
 5120 
 5121         if (size < NBPDR)
 5122                 return;
 5123         if (object != NULL && (object->flags & OBJ_COLORED) != 0)
 5124                 offset += ptoa(object->pg_color);
 5125         superpage_offset = offset & PDRMASK;
 5126         if (size - ((NBPDR - superpage_offset) & PDRMASK) < NBPDR ||
 5127             (*addr & PDRMASK) == superpage_offset)
 5128                 return;
 5129         if ((*addr & PDRMASK) < superpage_offset)
 5130                 *addr = (*addr & ~PDRMASK) + superpage_offset;
 5131         else
 5132                 *addr = ((*addr + PDRMASK) & ~PDRMASK) + superpage_offset;
 5133 }
 5134 
 5135 /*
 5136  * pmap_map_section:
 5137  *
 5138  *      Create a single section mapping.
 5139  */
 5140 void
 5141 pmap_map_section(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot,
 5142     boolean_t ref)
 5143 {
 5144         pd_entry_t *pl1pd, l1pd;
 5145         pd_entry_t fl;
 5146 
 5147         KASSERT(((va | pa) & L1_S_OFFSET) == 0,
 5148             ("Not a valid section mapping"));
 5149 
 5150         fl = pte_l1_s_cache_mode;
 5151 
 5152         pl1pd = &pmap->pm_l1->l1_kva[L1_IDX(va)];
 5153         l1pd = L1_S_PROTO | pa | L1_S_PROT(PTE_USER, prot) | fl |
 5154             L1_S_DOM(pmap->pm_domain);
 5155 
 5156         /* Mark page referenced if this section is a result of a promotion. */
 5157         if (ref == TRUE)
 5158                 l1pd |= L1_S_REF;
 5159 #ifdef SMP
 5160         l1pd |= L1_SHARED;
 5161 #endif
 5162         *pl1pd = l1pd;
 5163         PTE_SYNC(pl1pd);
 5164 }
 5165 
 5166 /*
 5167  * pmap_link_l2pt:
 5168  *
 5169  *      Link the L2 page table specified by l2pv.pv_pa into the L1
 5170  *      page table at the slot for "va".
 5171  */
 5172 void
 5173 pmap_link_l2pt(vm_offset_t l1pt, vm_offset_t va, struct pv_addr *l2pv)
 5174 {
 5175         pd_entry_t *pde = (pd_entry_t *) l1pt, proto;
 5176         u_int slot = va >> L1_S_SHIFT;
 5177 
 5178         proto = L1_S_DOM(PMAP_DOMAIN_KERNEL) | L1_C_PROTO;
 5179 
 5180 #ifdef VERBOSE_INIT_ARM
 5181         printf("pmap_link_l2pt: pa=0x%x va=0x%x\n", l2pv->pv_pa, l2pv->pv_va);
 5182 #endif
 5183 
 5184         pde[slot + 0] = proto | (l2pv->pv_pa + 0x000);
 5185         PTE_SYNC(&pde[slot]);
 5186 
 5187         SLIST_INSERT_HEAD(&kernel_pt_list, l2pv, pv_list);
 5188 
 5189 }
 5190 
 5191 /*
 5192  * pmap_map_entry
 5193  *
 5194  *      Create a single page mapping.
 5195  */
 5196 void
 5197 pmap_map_entry(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, int prot,
 5198     int cache)
 5199 {
 5200         pd_entry_t *pde = (pd_entry_t *) l1pt;
 5201         pt_entry_t fl;
 5202         pt_entry_t *ptep;
 5203 
 5204         KASSERT(((va | pa) & PAGE_MASK) == 0, ("ouin"));
 5205 
 5206         fl = l2s_mem_types[cache];
 5207 
 5208         if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C)
 5209                 panic("pmap_map_entry: no L2 table for VA 0x%08x", va);
 5210 
 5211         ptep = (pt_entry_t *)kernel_pt_lookup(pde[L1_IDX(va)] & L1_C_ADDR_MASK);
 5212 
 5213         if (ptep == NULL)
 5214                 panic("pmap_map_entry: can't find L2 table for VA 0x%08x", va);
 5215 
 5216         ptep[l2pte_index(va)] = L2_S_PROTO | pa | fl | L2_S_REF;
 5217         pmap_set_prot(&ptep[l2pte_index(va)], prot, 0);
 5218         PTE_SYNC(&ptep[l2pte_index(va)]);
 5219 }
 5220 
 5221 /*
 5222  * pmap_map_chunk:
 5223  *
 5224  *      Map a chunk of memory using the most efficient mappings
 5225  *      possible (section. large page, small page) into the
 5226  *      provided L1 and L2 tables at the specified virtual address.
 5227  */
 5228 vm_size_t
 5229 pmap_map_chunk(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa,
 5230     vm_size_t size, int prot, int type)
 5231 {
 5232         pd_entry_t *pde = (pd_entry_t *) l1pt;
 5233         pt_entry_t *ptep, f1, f2s, f2l;
 5234         vm_size_t resid;
 5235         int i;
 5236 
 5237         resid = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
 5238 
 5239         if (l1pt == 0)
 5240                 panic("pmap_map_chunk: no L1 table provided");
 5241 
 5242 #ifdef VERBOSE_INIT_ARM
 5243         printf("pmap_map_chunk: pa=0x%x va=0x%x size=0x%x resid=0x%x "
 5244             "prot=0x%x type=%d\n", pa, va, size, resid, prot, type);
 5245 #endif
 5246 
 5247         f1 = l1_mem_types[type];
 5248         f2l = l2l_mem_types[type];
 5249         f2s = l2s_mem_types[type];
 5250 
 5251         size = resid;
 5252 
 5253         while (resid > 0) {
 5254                 /* See if we can use a section mapping. */
 5255                 if (L1_S_MAPPABLE_P(va, pa, resid)) {
 5256 #ifdef VERBOSE_INIT_ARM
 5257                         printf("S");
 5258 #endif
 5259                         pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa |
 5260                             L1_S_PROT(PTE_KERNEL, prot | VM_PROT_EXECUTE) |
 5261                             f1 | L1_S_DOM(PMAP_DOMAIN_KERNEL) | L1_S_REF;
 5262                         PTE_SYNC(&pde[va >> L1_S_SHIFT]);
 5263                         va += L1_S_SIZE;
 5264                         pa += L1_S_SIZE;
 5265                         resid -= L1_S_SIZE;
 5266                         continue;
 5267                 }
 5268 
 5269                 /*
 5270                  * Ok, we're going to use an L2 table.  Make sure
 5271                  * one is actually in the corresponding L1 slot
 5272                  * for the current VA.
 5273                  */
 5274                 if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C)
 5275                         panic("pmap_map_chunk: no L2 table for VA 0x%08x", va);
 5276 
 5277                 ptep = (pt_entry_t *) kernel_pt_lookup(
 5278                     pde[L1_IDX(va)] & L1_C_ADDR_MASK);
 5279                 if (ptep == NULL)
 5280                         panic("pmap_map_chunk: can't find L2 table for VA"
 5281                             "0x%08x", va);
 5282                 /* See if we can use a L2 large page mapping. */
 5283                 if (L2_L_MAPPABLE_P(va, pa, resid)) {
 5284 #ifdef VERBOSE_INIT_ARM
 5285                         printf("L");
 5286 #endif
 5287                         for (i = 0; i < 16; i++) {
 5288                                 ptep[l2pte_index(va) + i] =
 5289                                     L2_L_PROTO | pa |
 5290                                     L2_L_PROT(PTE_KERNEL, prot) | f2l;
 5291                                 PTE_SYNC(&ptep[l2pte_index(va) + i]);
 5292                         }
 5293                         va += L2_L_SIZE;
 5294                         pa += L2_L_SIZE;
 5295                         resid -= L2_L_SIZE;
 5296                         continue;
 5297                 }
 5298 
 5299                 /* Use a small page mapping. */
 5300 #ifdef VERBOSE_INIT_ARM
 5301                 printf("P");
 5302 #endif
 5303                 ptep[l2pte_index(va)] = L2_S_PROTO | pa | f2s | L2_S_REF;
 5304                 pmap_set_prot(&ptep[l2pte_index(va)], prot, 0);
 5305                 PTE_SYNC(&ptep[l2pte_index(va)]);
 5306                 va += PAGE_SIZE;
 5307                 pa += PAGE_SIZE;
 5308                 resid -= PAGE_SIZE;
 5309         }
 5310 #ifdef VERBOSE_INIT_ARM
 5311         printf("\n");
 5312 #endif
 5313         return (size);
 5314 
 5315 }
 5316 
 5317 int
 5318 pmap_dmap_iscurrent(pmap_t pmap)
 5319 {
 5320         return(pmap_is_current(pmap));
 5321 }
 5322 
 5323 void
 5324 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
 5325 {
 5326         /* 
 5327          * Remember the memattr in a field that gets used to set the appropriate
 5328          * bits in the PTEs as mappings are established.
 5329          */
 5330         m->md.pv_memattr = ma;
 5331 
 5332         /*
 5333          * It appears that this function can only be called before any mappings
 5334          * for the page are established on ARM.  If this ever changes, this code
 5335          * will need to walk the pv_list and make each of the existing mappings
 5336          * uncacheable, being careful to sync caches and PTEs (and maybe
 5337          * invalidate TLB?) for any current mapping it modifies.
 5338          */
 5339         if (TAILQ_FIRST(&m->md.pv_list) != NULL)
 5340                 panic("Can't change memattr on page with existing mappings");
 5341 }

Cache object: 8d95048fbea836280d0ea60144632114


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.