The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/arm/arm/pmap.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /* From: $NetBSD: pmap.c,v 1.148 2004/04/03 04:35:48 bsh Exp $ */
    2 /*-
    3  * Copyright 2004 Olivier Houchard.
    4  * Copyright 2003 Wasabi Systems, Inc.
    5  * All rights reserved.
    6  *
    7  * Written by Steve C. Woodford for Wasabi Systems, Inc.
    8  *
    9  * Redistribution and use in source and binary forms, with or without
   10  * modification, are permitted provided that the following conditions
   11  * are met:
   12  * 1. Redistributions of source code must retain the above copyright
   13  *    notice, this list of conditions and the following disclaimer.
   14  * 2. Redistributions in binary form must reproduce the above copyright
   15  *    notice, this list of conditions and the following disclaimer in the
   16  *    documentation and/or other materials provided with the distribution.
   17  * 3. All advertising materials mentioning features or use of this software
   18  *    must display the following acknowledgement:
   19  *      This product includes software developed for the NetBSD Project by
   20  *      Wasabi Systems, Inc.
   21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
   22  *    or promote products derived from this software without specific prior
   23  *    written permission.
   24  *
   25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
   26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
   29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   35  * POSSIBILITY OF SUCH DAMAGE.
   36  */
   37 
   38 /*-
   39  * Copyright (c) 2002-2003 Wasabi Systems, Inc.
   40  * Copyright (c) 2001 Richard Earnshaw
   41  * Copyright (c) 2001-2002 Christopher Gilbert
   42  * All rights reserved.
   43  *
   44  * 1. Redistributions of source code must retain the above copyright
   45  *    notice, this list of conditions and the following disclaimer.
   46  * 2. Redistributions in binary form must reproduce the above copyright
   47  *    notice, this list of conditions and the following disclaimer in the
   48  *    documentation and/or other materials provided with the distribution.
   49  * 3. The name of the company nor the name of the author may be used to
   50  *    endorse or promote products derived from this software without specific
   51  *    prior written permission.
   52  *
   53  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
   54  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
   55  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   56  * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
   57  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
   58  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
   59  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   60  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   61  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   62  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   63  * SUCH DAMAGE.
   64  */
   65 /*-
   66  * Copyright (c) 1999 The NetBSD Foundation, Inc.
   67  * All rights reserved.
   68  *
   69  * This code is derived from software contributed to The NetBSD Foundation
   70  * by Charles M. Hannum.
   71  *
   72  * Redistribution and use in source and binary forms, with or without
   73  * modification, are permitted provided that the following conditions
   74  * are met:
   75  * 1. Redistributions of source code must retain the above copyright
   76  *    notice, this list of conditions and the following disclaimer.
   77  * 2. Redistributions in binary form must reproduce the above copyright
   78  *    notice, this list of conditions and the following disclaimer in the
   79  *    documentation and/or other materials provided with the distribution.
   80  * 3. All advertising materials mentioning features or use of this software
   81  *    must display the following acknowledgement:
   82  *        This product includes software developed by the NetBSD
   83  *        Foundation, Inc. and its contributors.
   84  * 4. Neither the name of The NetBSD Foundation nor the names of its
   85  *    contributors may be used to endorse or promote products derived
   86  *    from this software without specific prior written permission.
   87  *
   88  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
   89  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   90  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   91  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
   92  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   93  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   94  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   95  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   96  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   97  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   98  * POSSIBILITY OF SUCH DAMAGE.
   99  */
  100 
  101 /*-
  102  * Copyright (c) 1994-1998 Mark Brinicombe.
  103  * Copyright (c) 1994 Brini.
  104  * All rights reserved.
  105  *
  106  * This code is derived from software written for Brini by Mark Brinicombe
  107  *
  108  * Redistribution and use in source and binary forms, with or without
  109  * modification, are permitted provided that the following conditions
  110  * are met:
  111  * 1. Redistributions of source code must retain the above copyright
  112  *    notice, this list of conditions and the following disclaimer.
  113  * 2. Redistributions in binary form must reproduce the above copyright
  114  *    notice, this list of conditions and the following disclaimer in the
  115  *    documentation and/or other materials provided with the distribution.
  116  * 3. All advertising materials mentioning features or use of this software
  117  *    must display the following acknowledgement:
  118  *      This product includes software developed by Mark Brinicombe.
  119  * 4. The name of the author may not be used to endorse or promote products
  120  *    derived from this software without specific prior written permission.
  121  *
  122  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  123  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  124  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  125  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  126  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  127  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  128  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  129  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  130  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  131  *
  132  * RiscBSD kernel project
  133  *
  134  * pmap.c
  135  *
  136  * Machine dependant vm stuff
  137  *
  138  * Created      : 20/09/94
  139  */
  140 
  141 /*
  142  * Special compilation symbols
  143  * PMAP_DEBUG           - Build in pmap_debug_level code
  144  */
  145 /* Include header files */
  146 
  147 #include "opt_vm.h"
  148 
  149 #include <sys/cdefs.h>
  150 __FBSDID("$FreeBSD$");
  151 #include <sys/param.h>
  152 #include <sys/systm.h>
  153 #include <sys/kernel.h>
  154 #include <sys/ktr.h>
  155 #include <sys/proc.h>
  156 #include <sys/malloc.h>
  157 #include <sys/msgbuf.h>
  158 #include <sys/vmmeter.h>
  159 #include <sys/mman.h>
  160 #include <sys/smp.h>
  161 #include <sys/sched.h>
  162 
  163 #include <vm/vm.h>
  164 #include <vm/uma.h>
  165 #include <vm/pmap.h>
  166 #include <vm/vm_kern.h>
  167 #include <vm/vm_object.h>
  168 #include <vm/vm_map.h>
  169 #include <vm/vm_page.h>
  170 #include <vm/vm_pageout.h>
  171 #include <vm/vm_extern.h>
  172 #include <sys/lock.h>
  173 #include <sys/mutex.h>
  174 #include <machine/md_var.h>
  175 #include <machine/vmparam.h>
  176 #include <machine/cpu.h>
  177 #include <machine/cpufunc.h>
  178 #include <machine/pcb.h>
  179 
  180 #ifdef PMAP_DEBUG
  181 #define PDEBUG(_lev_,_stat_) \
  182         if (pmap_debug_level >= (_lev_)) \
  183                 ((_stat_))
  184 #define dprintf printf
  185 
  186 int pmap_debug_level = 0;
  187 #define PMAP_INLINE 
  188 #else   /* PMAP_DEBUG */
  189 #define PDEBUG(_lev_,_stat_) /* Nothing */
  190 #define dprintf(x, arg...)
  191 #define PMAP_INLINE __inline
  192 #endif  /* PMAP_DEBUG */
  193 
  194 extern struct pv_addr systempage;
  195 /*
  196  * Internal function prototypes
  197  */
  198 static void pmap_free_pv_entry (pv_entry_t);
  199 static pv_entry_t pmap_get_pv_entry(void);
  200 
  201 static void             pmap_enter_locked(pmap_t, vm_offset_t, vm_page_t,
  202     vm_prot_t, boolean_t, int);
  203 static void             pmap_fix_cache(struct vm_page *, pmap_t, vm_offset_t);
  204 static void             pmap_alloc_l1(pmap_t);
  205 static void             pmap_free_l1(pmap_t);
  206 static void             pmap_use_l1(pmap_t);
  207 
  208 static int              pmap_clearbit(struct vm_page *, u_int);
  209 
  210 static struct l2_bucket *pmap_get_l2_bucket(pmap_t, vm_offset_t);
  211 static struct l2_bucket *pmap_alloc_l2_bucket(pmap_t, vm_offset_t);
  212 static void             pmap_free_l2_bucket(pmap_t, struct l2_bucket *, u_int);
  213 static vm_offset_t      kernel_pt_lookup(vm_paddr_t);
  214 
  215 static MALLOC_DEFINE(M_VMPMAP, "pmap", "PMAP L1");
  216 
  217 vm_offset_t virtual_avail;      /* VA of first avail page (after kernel bss) */
  218 vm_offset_t virtual_end;        /* VA of last avail page (end of kernel AS) */
  219 vm_offset_t pmap_curmaxkvaddr;
  220 vm_paddr_t kernel_l1pa;
  221 
  222 extern void *end;
  223 vm_offset_t kernel_vm_end = 0;
  224 
  225 struct pmap kernel_pmap_store;
  226 
  227 static pt_entry_t *csrc_pte, *cdst_pte;
  228 static vm_offset_t csrcp, cdstp;
  229 static struct mtx cmtx;
  230 
  231 static void             pmap_init_l1(struct l1_ttable *, pd_entry_t *);
  232 /*
  233  * These routines are called when the CPU type is identified to set up
  234  * the PTE prototypes, cache modes, etc.
  235  *
  236  * The variables are always here, just in case LKMs need to reference
  237  * them (though, they shouldn't).
  238  */
  239 
  240 pt_entry_t      pte_l1_s_cache_mode;
  241 pt_entry_t      pte_l1_s_cache_mode_pt;
  242 pt_entry_t      pte_l1_s_cache_mask;
  243 
  244 pt_entry_t      pte_l2_l_cache_mode;
  245 pt_entry_t      pte_l2_l_cache_mode_pt;
  246 pt_entry_t      pte_l2_l_cache_mask;
  247 
  248 pt_entry_t      pte_l2_s_cache_mode;
  249 pt_entry_t      pte_l2_s_cache_mode_pt;
  250 pt_entry_t      pte_l2_s_cache_mask;
  251 
  252 pt_entry_t      pte_l2_s_prot_u;
  253 pt_entry_t      pte_l2_s_prot_w;
  254 pt_entry_t      pte_l2_s_prot_mask;
  255 
  256 pt_entry_t      pte_l1_s_proto;
  257 pt_entry_t      pte_l1_c_proto;
  258 pt_entry_t      pte_l2_s_proto;
  259 
  260 void            (*pmap_copy_page_func)(vm_paddr_t, vm_paddr_t);
  261 void            (*pmap_zero_page_func)(vm_paddr_t, int, int);
  262 /*
  263  * Which pmap is currently 'live' in the cache
  264  *
  265  * XXXSCW: Fix for SMP ...
  266  */
  267 union pmap_cache_state *pmap_cache_state;
  268 
  269 struct msgbuf *msgbufp = 0;
  270 
  271 /*
  272  * Crashdump maps.
  273  */
  274 static caddr_t crashdumpmap;
  275 
  276 extern void bcopy_page(vm_offset_t, vm_offset_t);
  277 extern void bzero_page(vm_offset_t);
  278 
  279 extern vm_offset_t alloc_firstaddr;
  280 
  281 char *_tmppt;
  282 
  283 /*
  284  * Metadata for L1 translation tables.
  285  */
  286 struct l1_ttable {
  287         /* Entry on the L1 Table list */
  288         SLIST_ENTRY(l1_ttable) l1_link;
  289 
  290         /* Entry on the L1 Least Recently Used list */
  291         TAILQ_ENTRY(l1_ttable) l1_lru;
  292 
  293         /* Track how many domains are allocated from this L1 */
  294         volatile u_int l1_domain_use_count;
  295 
  296         /*
  297          * A free-list of domain numbers for this L1.
  298          * We avoid using ffs() and a bitmap to track domains since ffs()
  299          * is slow on ARM.
  300          */
  301         u_int8_t l1_domain_first;
  302         u_int8_t l1_domain_free[PMAP_DOMAINS];
  303 
  304         /* Physical address of this L1 page table */
  305         vm_paddr_t l1_physaddr;
  306 
  307         /* KVA of this L1 page table */
  308         pd_entry_t *l1_kva;
  309 };
  310 
  311 /*
  312  * Convert a virtual address into its L1 table index. That is, the
  313  * index used to locate the L2 descriptor table pointer in an L1 table.
  314  * This is basically used to index l1->l1_kva[].
  315  *
  316  * Each L2 descriptor table represents 1MB of VA space.
  317  */
  318 #define L1_IDX(va)              (((vm_offset_t)(va)) >> L1_S_SHIFT)
  319 
  320 /*
  321  * L1 Page Tables are tracked using a Least Recently Used list.
  322  *  - New L1s are allocated from the HEAD.
  323  *  - Freed L1s are added to the TAIl.
  324  *  - Recently accessed L1s (where an 'access' is some change to one of
  325  *    the userland pmaps which owns this L1) are moved to the TAIL.
  326  */
  327 static TAILQ_HEAD(, l1_ttable) l1_lru_list;
  328 /*
  329  * A list of all L1 tables
  330  */
  331 static SLIST_HEAD(, l1_ttable) l1_list;
  332 static struct mtx l1_lru_lock;
  333 
  334 /*
  335  * The l2_dtable tracks L2_BUCKET_SIZE worth of L1 slots.
  336  *
  337  * This is normally 16MB worth L2 page descriptors for any given pmap.
  338  * Reference counts are maintained for L2 descriptors so they can be
  339  * freed when empty.
  340  */
  341 struct l2_dtable {
  342         /* The number of L2 page descriptors allocated to this l2_dtable */
  343         u_int l2_occupancy;
  344 
  345         /* List of L2 page descriptors */
  346         struct l2_bucket {
  347                 pt_entry_t *l2b_kva;    /* KVA of L2 Descriptor Table */
  348                 vm_paddr_t l2b_phys;    /* Physical address of same */
  349                 u_short l2b_l1idx;      /* This L2 table's L1 index */
  350                 u_short l2b_occupancy;  /* How many active descriptors */
  351         } l2_bucket[L2_BUCKET_SIZE];
  352 };
  353 
  354 /* pmap_kenter_internal flags */
  355 #define KENTER_CACHE    0x1
  356 #define KENTER_USER     0x2
  357 
  358 /*
  359  * Given an L1 table index, calculate the corresponding l2_dtable index
  360  * and bucket index within the l2_dtable.
  361  */
  362 #define L2_IDX(l1idx)           (((l1idx) >> L2_BUCKET_LOG2) & \
  363                                  (L2_SIZE - 1))
  364 #define L2_BUCKET(l1idx)        ((l1idx) & (L2_BUCKET_SIZE - 1))
  365 
  366 /*
  367  * Given a virtual address, this macro returns the
  368  * virtual address required to drop into the next L2 bucket.
  369  */
  370 #define L2_NEXT_BUCKET(va)      (((va) & L1_S_FRAME) + L1_S_SIZE)
  371 
  372 /*
  373  * L2 allocation.
  374  */
  375 #define pmap_alloc_l2_dtable()          \
  376                 (void*)uma_zalloc(l2table_zone, M_NOWAIT|M_USE_RESERVE)
  377 #define pmap_free_l2_dtable(l2)         \
  378                 uma_zfree(l2table_zone, l2)
  379 
  380 /*
  381  * We try to map the page tables write-through, if possible.  However, not
  382  * all CPUs have a write-through cache mode, so on those we have to sync
  383  * the cache when we frob page tables.
  384  *
  385  * We try to evaluate this at compile time, if possible.  However, it's
  386  * not always possible to do that, hence this run-time var.
  387  */
  388 int     pmap_needs_pte_sync;
  389 
  390 /*
  391  * Macro to determine if a mapping might be resident in the
  392  * instruction cache and/or TLB
  393  */
  394 #define PV_BEEN_EXECD(f)  (((f) & (PVF_REF | PVF_EXEC)) == (PVF_REF | PVF_EXEC))
  395 
  396 /*
  397  * Macro to determine if a mapping might be resident in the
  398  * data cache and/or TLB
  399  */
  400 #define PV_BEEN_REFD(f)   (((f) & PVF_REF) != 0)
  401 
  402 #ifndef PMAP_SHPGPERPROC
  403 #define PMAP_SHPGPERPROC 200
  404 #endif
  405 
  406 #define pmap_is_current(pm)     ((pm) == pmap_kernel() || \
  407             curproc->p_vmspace->vm_map.pmap == (pm))
  408 static uma_zone_t pvzone = NULL;
  409 uma_zone_t l2zone;
  410 static uma_zone_t l2table_zone;
  411 static vm_offset_t pmap_kernel_l2dtable_kva;
  412 static vm_offset_t pmap_kernel_l2ptp_kva;
  413 static vm_paddr_t pmap_kernel_l2ptp_phys;
  414 static struct vm_object pvzone_obj;
  415 static int pv_entry_count=0, pv_entry_max=0, pv_entry_high_water=0;
  416 
  417 /*
  418  * This list exists for the benefit of pmap_map_chunk().  It keeps track
  419  * of the kernel L2 tables during bootstrap, so that pmap_map_chunk() can
  420  * find them as necessary.
  421  *
  422  * Note that the data on this list MUST remain valid after initarm() returns,
  423  * as pmap_bootstrap() uses it to contruct L2 table metadata.
  424  */
  425 SLIST_HEAD(, pv_addr) kernel_pt_list = SLIST_HEAD_INITIALIZER(kernel_pt_list);
  426 
  427 static void
  428 pmap_init_l1(struct l1_ttable *l1, pd_entry_t *l1pt)
  429 {
  430         int i;
  431 
  432         l1->l1_kva = l1pt;
  433         l1->l1_domain_use_count = 0;
  434         l1->l1_domain_first = 0;
  435 
  436         for (i = 0; i < PMAP_DOMAINS; i++)
  437                 l1->l1_domain_free[i] = i + 1;
  438 
  439         /*
  440          * Copy the kernel's L1 entries to each new L1.
  441          */
  442         if (l1pt != pmap_kernel()->pm_l1->l1_kva)
  443                 memcpy(l1pt, pmap_kernel()->pm_l1->l1_kva, L1_TABLE_SIZE);
  444 
  445         if ((l1->l1_physaddr = pmap_extract(pmap_kernel(), (vm_offset_t)l1pt)) == 0)
  446                 panic("pmap_init_l1: can't get PA of L1 at %p", l1pt);
  447         SLIST_INSERT_HEAD(&l1_list, l1, l1_link);
  448         TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru);
  449 }
  450 
  451 static vm_offset_t
  452 kernel_pt_lookup(vm_paddr_t pa)
  453 {
  454         struct pv_addr *pv;
  455 
  456         SLIST_FOREACH(pv, &kernel_pt_list, pv_list) {
  457                 if (pv->pv_pa == pa)
  458                         return (pv->pv_va);
  459         }
  460         return (0);
  461 }
  462 
  463 #if (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0
  464 void
  465 pmap_pte_init_generic(void)
  466 {
  467 
  468         pte_l1_s_cache_mode = L1_S_B|L1_S_C;
  469         pte_l1_s_cache_mask = L1_S_CACHE_MASK_generic;
  470 
  471         pte_l2_l_cache_mode = L2_B|L2_C;
  472         pte_l2_l_cache_mask = L2_L_CACHE_MASK_generic;
  473 
  474         pte_l2_s_cache_mode = L2_B|L2_C;
  475         pte_l2_s_cache_mask = L2_S_CACHE_MASK_generic;
  476 
  477         /*
  478          * If we have a write-through cache, set B and C.  If
  479          * we have a write-back cache, then we assume setting
  480          * only C will make those pages write-through.
  481          */
  482         if (cpufuncs.cf_dcache_wb_range == (void *) cpufunc_nullop) {
  483                 pte_l1_s_cache_mode_pt = L1_S_B|L1_S_C;
  484                 pte_l2_l_cache_mode_pt = L2_B|L2_C;
  485                 pte_l2_s_cache_mode_pt = L2_B|L2_C;
  486         } else {
  487                 pte_l1_s_cache_mode_pt = L1_S_C;
  488                 pte_l2_l_cache_mode_pt = L2_C;
  489                 pte_l2_s_cache_mode_pt = L2_C;
  490         }
  491 
  492         pte_l2_s_prot_u = L2_S_PROT_U_generic;
  493         pte_l2_s_prot_w = L2_S_PROT_W_generic;
  494         pte_l2_s_prot_mask = L2_S_PROT_MASK_generic;
  495 
  496         pte_l1_s_proto = L1_S_PROTO_generic;
  497         pte_l1_c_proto = L1_C_PROTO_generic;
  498         pte_l2_s_proto = L2_S_PROTO_generic;
  499 
  500         pmap_copy_page_func = pmap_copy_page_generic;
  501         pmap_zero_page_func = pmap_zero_page_generic;
  502 }
  503 
  504 #if defined(CPU_ARM8)
  505 void
  506 pmap_pte_init_arm8(void)
  507 {
  508 
  509         /*
  510          * ARM8 is compatible with generic, but we need to use
  511          * the page tables uncached.
  512          */
  513         pmap_pte_init_generic();
  514 
  515         pte_l1_s_cache_mode_pt = 0;
  516         pte_l2_l_cache_mode_pt = 0;
  517         pte_l2_s_cache_mode_pt = 0;
  518 }
  519 #endif /* CPU_ARM8 */
  520 
  521 #if defined(CPU_ARM9) && defined(ARM9_CACHE_WRITE_THROUGH)
  522 void
  523 pmap_pte_init_arm9(void)
  524 {
  525 
  526         /*
  527          * ARM9 is compatible with generic, but we want to use
  528          * write-through caching for now.
  529          */
  530         pmap_pte_init_generic();
  531 
  532         pte_l1_s_cache_mode = L1_S_C;
  533         pte_l2_l_cache_mode = L2_C;
  534         pte_l2_s_cache_mode = L2_C;
  535 
  536         pte_l1_s_cache_mode_pt = L1_S_C;
  537         pte_l2_l_cache_mode_pt = L2_C;
  538         pte_l2_s_cache_mode_pt = L2_C;
  539 }
  540 #endif /* CPU_ARM9 */
  541 #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
  542 
  543 #if defined(CPU_ARM10)
  544 void
  545 pmap_pte_init_arm10(void)
  546 {
  547 
  548         /*
  549          * ARM10 is compatible with generic, but we want to use
  550          * write-through caching for now.
  551          */
  552         pmap_pte_init_generic();
  553 
  554         pte_l1_s_cache_mode = L1_S_B | L1_S_C;
  555         pte_l2_l_cache_mode = L2_B | L2_C;
  556         pte_l2_s_cache_mode = L2_B | L2_C;
  557 
  558         pte_l1_s_cache_mode_pt = L1_S_C;
  559         pte_l2_l_cache_mode_pt = L2_C;
  560         pte_l2_s_cache_mode_pt = L2_C;
  561 
  562 }
  563 #endif /* CPU_ARM10 */
  564 
  565 #if  ARM_MMU_SA1 == 1
  566 void
  567 pmap_pte_init_sa1(void)
  568 {
  569 
  570         /*
  571          * The StrongARM SA-1 cache does not have a write-through
  572          * mode.  So, do the generic initialization, then reset
  573          * the page table cache mode to B=1,C=1, and note that
  574          * the PTEs need to be sync'd.
  575          */
  576         pmap_pte_init_generic();
  577 
  578         pte_l1_s_cache_mode_pt = L1_S_B|L1_S_C;
  579         pte_l2_l_cache_mode_pt = L2_B|L2_C;
  580         pte_l2_s_cache_mode_pt = L2_B|L2_C;
  581 
  582         pmap_needs_pte_sync = 1;
  583 }
  584 #endif /* ARM_MMU_SA1 == 1*/
  585 
  586 #if ARM_MMU_XSCALE == 1
  587 #if (ARM_NMMUS > 1) || defined (CPU_XSCALE_CORE3)
  588 static u_int xscale_use_minidata;
  589 #endif
  590 
  591 void
  592 pmap_pte_init_xscale(void)
  593 {
  594         uint32_t auxctl;
  595         int write_through = 0;
  596 
  597         pte_l1_s_cache_mode = L1_S_B|L1_S_C|L1_S_XSCALE_P;
  598         pte_l1_s_cache_mask = L1_S_CACHE_MASK_xscale;
  599 
  600         pte_l2_l_cache_mode = L2_B|L2_C;
  601         pte_l2_l_cache_mask = L2_L_CACHE_MASK_xscale;
  602 
  603         pte_l2_s_cache_mode = L2_B|L2_C;
  604         pte_l2_s_cache_mask = L2_S_CACHE_MASK_xscale;
  605 
  606         pte_l1_s_cache_mode_pt = L1_S_C;
  607         pte_l2_l_cache_mode_pt = L2_C;
  608         pte_l2_s_cache_mode_pt = L2_C;
  609 #ifdef XSCALE_CACHE_READ_WRITE_ALLOCATE
  610         /*
  611          * The XScale core has an enhanced mode where writes that
  612          * miss the cache cause a cache line to be allocated.  This
  613          * is significantly faster than the traditional, write-through
  614          * behavior of this case.
  615          */
  616         pte_l1_s_cache_mode |= L1_S_XSCALE_TEX(TEX_XSCALE_X);
  617         pte_l2_l_cache_mode |= L2_XSCALE_L_TEX(TEX_XSCALE_X);
  618         pte_l2_s_cache_mode |= L2_XSCALE_T_TEX(TEX_XSCALE_X);
  619 #endif /* XSCALE_CACHE_READ_WRITE_ALLOCATE */
  620 #ifdef XSCALE_CACHE_WRITE_THROUGH
  621         /*
  622          * Some versions of the XScale core have various bugs in
  623          * their cache units, the work-around for which is to run
  624          * the cache in write-through mode.  Unfortunately, this
  625          * has a major (negative) impact on performance.  So, we
  626          * go ahead and run fast-and-loose, in the hopes that we
  627          * don't line up the planets in a way that will trip the
  628          * bugs.
  629          *
  630          * However, we give you the option to be slow-but-correct.
  631          */
  632         write_through = 1;
  633 #elif defined(XSCALE_CACHE_WRITE_BACK)
  634         /* force write back cache mode */
  635         write_through = 0;
  636 #elif defined(CPU_XSCALE_PXA2X0)
  637         /*
  638          * Intel PXA2[15]0 processors are known to have a bug in
  639          * write-back cache on revision 4 and earlier (stepping
  640          * A[01] and B[012]).  Fixed for C0 and later.
  641          */
  642         {
  643                 uint32_t id, type;
  644 
  645                 id = cpufunc_id();
  646                 type = id & ~(CPU_ID_XSCALE_COREREV_MASK|CPU_ID_REVISION_MASK);
  647 
  648                 if (type == CPU_ID_PXA250 || type == CPU_ID_PXA210) {
  649                         if ((id & CPU_ID_REVISION_MASK) < 5) {
  650                                 /* write through for stepping A0-1 and B0-2 */
  651                                 write_through = 1;
  652                         }
  653                 }
  654         }
  655 #endif /* XSCALE_CACHE_WRITE_THROUGH */
  656 
  657         if (write_through) {
  658                 pte_l1_s_cache_mode = L1_S_C;
  659                 pte_l2_l_cache_mode = L2_C;
  660                 pte_l2_s_cache_mode = L2_C;
  661         }
  662 
  663 #if (ARM_NMMUS > 1)
  664         xscale_use_minidata = 1;
  665 #endif
  666 
  667         pte_l2_s_prot_u = L2_S_PROT_U_xscale;
  668         pte_l2_s_prot_w = L2_S_PROT_W_xscale;
  669         pte_l2_s_prot_mask = L2_S_PROT_MASK_xscale;
  670 
  671         pte_l1_s_proto = L1_S_PROTO_xscale;
  672         pte_l1_c_proto = L1_C_PROTO_xscale;
  673         pte_l2_s_proto = L2_S_PROTO_xscale;
  674 
  675 #ifdef CPU_XSCALE_CORE3
  676         pmap_copy_page_func = pmap_copy_page_generic;
  677         pmap_zero_page_func = pmap_zero_page_generic;
  678         xscale_use_minidata = 0;
  679         /* Make sure it is L2-cachable */
  680         pte_l1_s_cache_mode |= L1_S_XSCALE_TEX(TEX_XSCALE_T);
  681         pte_l1_s_cache_mode_pt = pte_l1_s_cache_mode &~ L1_S_XSCALE_P;
  682         pte_l2_l_cache_mode |= L2_XSCALE_L_TEX(TEX_XSCALE_T) ;
  683         pte_l2_l_cache_mode_pt = pte_l1_s_cache_mode;
  684         pte_l2_s_cache_mode |= L2_XSCALE_T_TEX(TEX_XSCALE_T);
  685         pte_l2_s_cache_mode_pt = pte_l2_s_cache_mode;
  686 
  687 #else
  688         pmap_copy_page_func = pmap_copy_page_xscale;
  689         pmap_zero_page_func = pmap_zero_page_xscale;
  690 #endif
  691 
  692         /*
  693          * Disable ECC protection of page table access, for now.
  694          */
  695         __asm __volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (auxctl));
  696         auxctl &= ~XSCALE_AUXCTL_P;
  697         __asm __volatile("mcr p15, 0, %0, c1, c0, 1" : : "r" (auxctl));
  698 }
  699 
  700 /*
  701  * xscale_setup_minidata:
  702  *
  703  *      Set up the mini-data cache clean area.  We require the
  704  *      caller to allocate the right amount of physically and
  705  *      virtually contiguous space.
  706  */
  707 extern vm_offset_t xscale_minidata_clean_addr;
  708 extern vm_size_t xscale_minidata_clean_size; /* already initialized */
  709 void
  710 xscale_setup_minidata(vm_offset_t l1pt, vm_offset_t va, vm_paddr_t pa)
  711 {
  712         pd_entry_t *pde = (pd_entry_t *) l1pt;
  713         pt_entry_t *pte;
  714         vm_size_t size;
  715         uint32_t auxctl;
  716 
  717         xscale_minidata_clean_addr = va;
  718 
  719         /* Round it to page size. */
  720         size = (xscale_minidata_clean_size + L2_S_OFFSET) & L2_S_FRAME;
  721 
  722         for (; size != 0;
  723              va += L2_S_SIZE, pa += L2_S_SIZE, size -= L2_S_SIZE) {
  724                 pte = (pt_entry_t *) kernel_pt_lookup(
  725                     pde[L1_IDX(va)] & L1_C_ADDR_MASK);
  726                 if (pte == NULL)
  727                         panic("xscale_setup_minidata: can't find L2 table for "
  728                             "VA 0x%08x", (u_int32_t) va);
  729                 pte[l2pte_index(va)] =
  730                     L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, VM_PROT_READ) |
  731                     L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X);
  732         }
  733 
  734         /*
  735          * Configure the mini-data cache for write-back with
  736          * read/write-allocate.
  737          *
  738          * NOTE: In order to reconfigure the mini-data cache, we must
  739          * make sure it contains no valid data!  In order to do that,
  740          * we must issue a global data cache invalidate command!
  741          *
  742          * WE ASSUME WE ARE RUNNING UN-CACHED WHEN THIS ROUTINE IS CALLED!
  743          * THIS IS VERY IMPORTANT!
  744          */
  745 
  746         /* Invalidate data and mini-data. */
  747         __asm __volatile("mcr p15, 0, %0, c7, c6, 0" : : "r" (0));
  748         __asm __volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (auxctl));
  749         auxctl = (auxctl & ~XSCALE_AUXCTL_MD_MASK) | XSCALE_AUXCTL_MD_WB_RWA;
  750         __asm __volatile("mcr p15, 0, %0, c1, c0, 1" : : "r" (auxctl));
  751 }
  752 #endif
  753 
  754 /*
  755  * Allocate an L1 translation table for the specified pmap.
  756  * This is called at pmap creation time.
  757  */
  758 static void
  759 pmap_alloc_l1(pmap_t pm)
  760 {
  761         struct l1_ttable *l1;
  762         u_int8_t domain;
  763 
  764         /*
  765          * Remove the L1 at the head of the LRU list
  766          */
  767         mtx_lock(&l1_lru_lock);
  768         l1 = TAILQ_FIRST(&l1_lru_list);
  769         TAILQ_REMOVE(&l1_lru_list, l1, l1_lru);
  770 
  771         /*
  772          * Pick the first available domain number, and update
  773          * the link to the next number.
  774          */
  775         domain = l1->l1_domain_first;
  776         l1->l1_domain_first = l1->l1_domain_free[domain];
  777 
  778         /*
  779          * If there are still free domain numbers in this L1,
  780          * put it back on the TAIL of the LRU list.
  781          */
  782         if (++l1->l1_domain_use_count < PMAP_DOMAINS)
  783                 TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru);
  784 
  785         mtx_unlock(&l1_lru_lock);
  786 
  787         /*
  788          * Fix up the relevant bits in the pmap structure
  789          */
  790         pm->pm_l1 = l1;
  791         pm->pm_domain = domain + 1;
  792 }
  793 
  794 /*
  795  * Free an L1 translation table.
  796  * This is called at pmap destruction time.
  797  */
  798 static void
  799 pmap_free_l1(pmap_t pm)
  800 {
  801         struct l1_ttable *l1 = pm->pm_l1;
  802 
  803         mtx_lock(&l1_lru_lock);
  804 
  805         /*
  806          * If this L1 is currently on the LRU list, remove it.
  807          */
  808         if (l1->l1_domain_use_count < PMAP_DOMAINS)
  809                 TAILQ_REMOVE(&l1_lru_list, l1, l1_lru);
  810 
  811         /*
  812          * Free up the domain number which was allocated to the pmap
  813          */
  814         l1->l1_domain_free[pm->pm_domain - 1] = l1->l1_domain_first;
  815         l1->l1_domain_first = pm->pm_domain - 1;
  816         l1->l1_domain_use_count--;
  817 
  818         /*
  819          * The L1 now must have at least 1 free domain, so add
  820          * it back to the LRU list. If the use count is zero,
  821          * put it at the head of the list, otherwise it goes
  822          * to the tail.
  823          */
  824         if (l1->l1_domain_use_count == 0) {
  825                 TAILQ_INSERT_HEAD(&l1_lru_list, l1, l1_lru);
  826         }       else
  827                 TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru);
  828 
  829         mtx_unlock(&l1_lru_lock);
  830 }
  831 
  832 static PMAP_INLINE void
  833 pmap_use_l1(pmap_t pm)
  834 {
  835         struct l1_ttable *l1;
  836 
  837         /*
  838          * Do nothing if we're in interrupt context.
  839          * Access to an L1 by the kernel pmap must not affect
  840          * the LRU list.
  841          */
  842         if (pm == pmap_kernel())
  843                 return;
  844 
  845         l1 = pm->pm_l1;
  846 
  847         /*
  848          * If the L1 is not currently on the LRU list, just return
  849          */
  850         if (l1->l1_domain_use_count == PMAP_DOMAINS)
  851                 return;
  852 
  853         mtx_lock(&l1_lru_lock);
  854 
  855         /*
  856          * Check the use count again, now that we've acquired the lock
  857          */
  858         if (l1->l1_domain_use_count == PMAP_DOMAINS) {
  859                 mtx_unlock(&l1_lru_lock);
  860                 return;
  861         }
  862 
  863         /*
  864          * Move the L1 to the back of the LRU list
  865          */
  866         TAILQ_REMOVE(&l1_lru_list, l1, l1_lru);
  867         TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru);
  868 
  869         mtx_unlock(&l1_lru_lock);
  870 }
  871 
  872 
  873 /*
  874  * Returns a pointer to the L2 bucket associated with the specified pmap
  875  * and VA, or NULL if no L2 bucket exists for the address.
  876  */
  877 static PMAP_INLINE struct l2_bucket *
  878 pmap_get_l2_bucket(pmap_t pm, vm_offset_t va)
  879 {
  880         struct l2_dtable *l2;
  881         struct l2_bucket *l2b;
  882         u_short l1idx;
  883 
  884         l1idx = L1_IDX(va);
  885 
  886         if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL ||
  887             (l2b = &l2->l2_bucket[L2_BUCKET(l1idx)])->l2b_kva == NULL)
  888                 return (NULL);
  889 
  890         return (l2b);
  891 }
  892 
  893 /*
  894  * Returns a pointer to the L2 bucket associated with the specified pmap
  895  * and VA.
  896  *
  897  * If no L2 bucket exists, perform the necessary allocations to put an L2
  898  * bucket/page table in place.
  899  *
  900  * Note that if a new L2 bucket/page was allocated, the caller *must*
  901  * increment the bucket occupancy counter appropriately *before* 
  902  * releasing the pmap's lock to ensure no other thread or cpu deallocates
  903  * the bucket/page in the meantime.
  904  */
  905 static struct l2_bucket *
  906 pmap_alloc_l2_bucket(pmap_t pm, vm_offset_t va)
  907 {
  908         struct l2_dtable *l2;
  909         struct l2_bucket *l2b;
  910         u_short l1idx;
  911 
  912         l1idx = L1_IDX(va);
  913 
  914         PMAP_ASSERT_LOCKED(pm);
  915         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
  916         if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) {
  917                 /*
  918                  * No mapping at this address, as there is
  919                  * no entry in the L1 table.
  920                  * Need to allocate a new l2_dtable.
  921                  */
  922 again_l2table:
  923                 PMAP_UNLOCK(pm);
  924                 vm_page_unlock_queues();
  925                 if ((l2 = pmap_alloc_l2_dtable()) == NULL) {
  926                         vm_page_lock_queues();
  927                         PMAP_LOCK(pm);
  928                         return (NULL);
  929                 }
  930                 vm_page_lock_queues();
  931                 PMAP_LOCK(pm);
  932                 if (pm->pm_l2[L2_IDX(l1idx)] != NULL) {
  933                         PMAP_UNLOCK(pm);
  934                         vm_page_unlock_queues();
  935                         uma_zfree(l2table_zone, l2);
  936                         vm_page_lock_queues();
  937                         PMAP_LOCK(pm);
  938                         l2 = pm->pm_l2[L2_IDX(l1idx)];
  939                         if (l2 == NULL)
  940                                 goto again_l2table;
  941                         /*
  942                          * Someone already allocated the l2_dtable while
  943                          * we were doing the same.
  944                          */
  945                 } else {
  946                         bzero(l2, sizeof(*l2));
  947                         /*
  948                          * Link it into the parent pmap
  949                          */
  950                         pm->pm_l2[L2_IDX(l1idx)] = l2;
  951                 }
  952         } 
  953 
  954         l2b = &l2->l2_bucket[L2_BUCKET(l1idx)];
  955 
  956         /*
  957          * Fetch pointer to the L2 page table associated with the address.
  958          */
  959         if (l2b->l2b_kva == NULL) {
  960                 pt_entry_t *ptep;
  961 
  962                 /*
  963                  * No L2 page table has been allocated. Chances are, this
  964                  * is because we just allocated the l2_dtable, above.
  965                  */
  966 again_ptep:
  967                 PMAP_UNLOCK(pm);
  968                 vm_page_unlock_queues();
  969                 ptep = (void*)uma_zalloc(l2zone, M_NOWAIT|M_USE_RESERVE);
  970                 vm_page_lock_queues();
  971                 PMAP_LOCK(pm);
  972                 if (l2b->l2b_kva != 0) {
  973                         /* We lost the race. */
  974                         PMAP_UNLOCK(pm);
  975                         vm_page_unlock_queues();
  976                         uma_zfree(l2zone, ptep);
  977                         vm_page_lock_queues();
  978                         PMAP_LOCK(pm);
  979                         if (l2b->l2b_kva == 0)
  980                                 goto again_ptep;
  981                         return (l2b);
  982                 }
  983                 l2b->l2b_phys = vtophys(ptep);
  984                 if (ptep == NULL) {
  985                         /*
  986                          * Oops, no more L2 page tables available at this
  987                          * time. We may need to deallocate the l2_dtable
  988                          * if we allocated a new one above.
  989                          */
  990                         if (l2->l2_occupancy == 0) {
  991                                 pm->pm_l2[L2_IDX(l1idx)] = NULL;
  992                                 pmap_free_l2_dtable(l2);
  993                         }
  994                         return (NULL);
  995                 }
  996 
  997                 l2->l2_occupancy++;
  998                 l2b->l2b_kva = ptep;
  999                 l2b->l2b_l1idx = l1idx;
 1000         }
 1001 
 1002         return (l2b);
 1003 }
 1004 
 1005 static PMAP_INLINE void
 1006 #ifndef PMAP_INCLUDE_PTE_SYNC
 1007 pmap_free_l2_ptp(pt_entry_t *l2)
 1008 #else
 1009 pmap_free_l2_ptp(boolean_t need_sync, pt_entry_t *l2)
 1010 #endif
 1011 {
 1012 #ifdef PMAP_INCLUDE_PTE_SYNC
 1013         /*
 1014          * Note: With a write-back cache, we may need to sync this
 1015          * L2 table before re-using it.
 1016          * This is because it may have belonged to a non-current
 1017          * pmap, in which case the cache syncs would have been
 1018          * skipped when the pages were being unmapped. If the
 1019          * L2 table were then to be immediately re-allocated to
 1020          * the *current* pmap, it may well contain stale mappings
 1021          * which have not yet been cleared by a cache write-back
 1022          * and so would still be visible to the mmu.
 1023          */
 1024         if (need_sync)
 1025                 PTE_SYNC_RANGE(l2, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t));
 1026 #endif
 1027         uma_zfree(l2zone, l2);
 1028 }
 1029 /*
 1030  * One or more mappings in the specified L2 descriptor table have just been
 1031  * invalidated.
 1032  *
 1033  * Garbage collect the metadata and descriptor table itself if necessary.
 1034  *
 1035  * The pmap lock must be acquired when this is called (not necessary
 1036  * for the kernel pmap).
 1037  */
 1038 static void
 1039 pmap_free_l2_bucket(pmap_t pm, struct l2_bucket *l2b, u_int count)
 1040 {
 1041         struct l2_dtable *l2;
 1042         pd_entry_t *pl1pd, l1pd;
 1043         pt_entry_t *ptep;
 1044         u_short l1idx;
 1045 
 1046 
 1047         /*
 1048          * Update the bucket's reference count according to how many
 1049          * PTEs the caller has just invalidated.
 1050          */
 1051         l2b->l2b_occupancy -= count;
 1052 
 1053         /*
 1054          * Note:
 1055          *
 1056          * Level 2 page tables allocated to the kernel pmap are never freed
 1057          * as that would require checking all Level 1 page tables and
 1058          * removing any references to the Level 2 page table. See also the
 1059          * comment elsewhere about never freeing bootstrap L2 descriptors.
 1060          *
 1061          * We make do with just invalidating the mapping in the L2 table.
 1062          *
 1063          * This isn't really a big deal in practice and, in fact, leads
 1064          * to a performance win over time as we don't need to continually
 1065          * alloc/free.
 1066          */
 1067         if (l2b->l2b_occupancy > 0 || pm == pmap_kernel())
 1068                 return;
 1069 
 1070         /*
 1071          * There are no more valid mappings in this level 2 page table.
 1072          * Go ahead and NULL-out the pointer in the bucket, then
 1073          * free the page table.
 1074          */
 1075         l1idx = l2b->l2b_l1idx;
 1076         ptep = l2b->l2b_kva;
 1077         l2b->l2b_kva = NULL;
 1078 
 1079         pl1pd = &pm->pm_l1->l1_kva[l1idx];
 1080 
 1081         /*
 1082          * If the L1 slot matches the pmap's domain
 1083          * number, then invalidate it.
 1084          */
 1085         l1pd = *pl1pd & (L1_TYPE_MASK | L1_C_DOM_MASK);
 1086         if (l1pd == (L1_C_DOM(pm->pm_domain) | L1_TYPE_C)) {
 1087                 *pl1pd = 0;
 1088                 PTE_SYNC(pl1pd);
 1089         }
 1090 
 1091         /*
 1092          * Release the L2 descriptor table back to the pool cache.
 1093          */
 1094 #ifndef PMAP_INCLUDE_PTE_SYNC
 1095         pmap_free_l2_ptp(ptep);
 1096 #else
 1097         pmap_free_l2_ptp(!pmap_is_current(pm), ptep);
 1098 #endif
 1099 
 1100         /*
 1101          * Update the reference count in the associated l2_dtable
 1102          */
 1103         l2 = pm->pm_l2[L2_IDX(l1idx)];
 1104         if (--l2->l2_occupancy > 0)
 1105                 return;
 1106 
 1107         /*
 1108          * There are no more valid mappings in any of the Level 1
 1109          * slots managed by this l2_dtable. Go ahead and NULL-out
 1110          * the pointer in the parent pmap and free the l2_dtable.
 1111          */
 1112         pm->pm_l2[L2_IDX(l1idx)] = NULL;
 1113         pmap_free_l2_dtable(l2);
 1114 }
 1115 
 1116 /*
 1117  * Pool cache constructors for L2 descriptor tables, metadata and pmap
 1118  * structures.
 1119  */
 1120 static int
 1121 pmap_l2ptp_ctor(void *mem, int size, void *arg, int flags)
 1122 {
 1123 #ifndef PMAP_INCLUDE_PTE_SYNC
 1124         struct l2_bucket *l2b;
 1125         pt_entry_t *ptep, pte;
 1126 #ifdef ARM_USE_SMALL_ALLOC
 1127         pd_entry_t *pde;
 1128 #endif
 1129         vm_offset_t va = (vm_offset_t)mem & ~PAGE_MASK;
 1130 
 1131         /*
 1132          * The mappings for these page tables were initially made using
 1133          * pmap_kenter() by the pool subsystem. Therefore, the cache-
 1134          * mode will not be right for page table mappings. To avoid
 1135          * polluting the pmap_kenter() code with a special case for
 1136          * page tables, we simply fix up the cache-mode here if it's not
 1137          * correct.
 1138          */
 1139 #ifdef ARM_USE_SMALL_ALLOC
 1140         pde = &kernel_pmap->pm_l1->l1_kva[L1_IDX(va)];
 1141         if (!l1pte_section_p(*pde)) {
 1142 #endif
 1143                 l2b = pmap_get_l2_bucket(pmap_kernel(), va);
 1144                 ptep = &l2b->l2b_kva[l2pte_index(va)];
 1145                 pte = *ptep;
 1146                 
 1147                 if ((pte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) {
 1148                         /*
 1149                          * Page tables must have the cache-mode set to 
 1150                          * Write-Thru.
 1151                          */
 1152                         *ptep = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt;
 1153                         PTE_SYNC(ptep);
 1154                         cpu_tlb_flushD_SE(va);
 1155                         cpu_cpwait();
 1156                 }
 1157 #ifdef ARM_USE_SMALL_ALLOC
 1158         }
 1159 #endif
 1160 #endif
 1161         memset(mem, 0, L2_TABLE_SIZE_REAL);
 1162         PTE_SYNC_RANGE(mem, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t));
 1163         return (0);
 1164 }
 1165 
 1166 /*
 1167  * A bunch of routines to conditionally flush the caches/TLB depending
 1168  * on whether the specified pmap actually needs to be flushed at any
 1169  * given time.
 1170  */
 1171 static PMAP_INLINE void
 1172 pmap_tlb_flushID_SE(pmap_t pm, vm_offset_t va)
 1173 {
 1174 
 1175         if (pmap_is_current(pm))
 1176                 cpu_tlb_flushID_SE(va);
 1177 }
 1178 
 1179 static PMAP_INLINE void
 1180 pmap_tlb_flushD_SE(pmap_t pm, vm_offset_t va)
 1181 {
 1182 
 1183         if (pmap_is_current(pm))
 1184                 cpu_tlb_flushD_SE(va);
 1185 }
 1186 
 1187 static PMAP_INLINE void
 1188 pmap_tlb_flushID(pmap_t pm)
 1189 {
 1190 
 1191         if (pmap_is_current(pm))
 1192                 cpu_tlb_flushID();
 1193 }
 1194 static PMAP_INLINE void
 1195 pmap_tlb_flushD(pmap_t pm)
 1196 {
 1197 
 1198         if (pmap_is_current(pm))
 1199                 cpu_tlb_flushD();
 1200 }
 1201 
 1202 static int
 1203 pmap_has_valid_mapping(pmap_t pm, vm_offset_t va)
 1204 {
 1205         pd_entry_t *pde;
 1206         pt_entry_t *ptep;
 1207 
 1208         if (pmap_get_pde_pte(pm, va, &pde, &ptep) &&
 1209             ptep && ((*ptep & L2_TYPE_MASK) != L2_TYPE_INV))
 1210                 return (1);
 1211 
 1212         return (0);
 1213 }
 1214 
 1215 static PMAP_INLINE void
 1216 pmap_idcache_wbinv_range(pmap_t pm, vm_offset_t va, vm_size_t len)
 1217 {
 1218         vm_size_t rest;
 1219 
 1220         CTR4(KTR_PMAP, "pmap_dcache_wbinv_range: pmap %p is_kernel %d va 0x%08x"
 1221             " len 0x%x ", pm, pm == pmap_kernel(), va, len);
 1222 
 1223         if (pmap_is_current(pm) || pm == pmap_kernel()) {
 1224                 rest = MIN(PAGE_SIZE - (va & PAGE_MASK), len);
 1225                 while (len > 0) {
 1226                         if (pmap_has_valid_mapping(pm, va)) {
 1227                                 cpu_idcache_wbinv_range(va, rest);
 1228                                 cpu_l2cache_wbinv_range(va, rest);
 1229                         }
 1230                         len -= rest;
 1231                         va += rest;
 1232                         rest = MIN(PAGE_SIZE, len);
 1233                 }
 1234         }
 1235 }
 1236 
 1237 static PMAP_INLINE void
 1238 pmap_dcache_wb_range(pmap_t pm, vm_offset_t va, vm_size_t len, boolean_t do_inv,
 1239     boolean_t rd_only)
 1240 {
 1241         vm_size_t rest;
 1242 
 1243         CTR4(KTR_PMAP, "pmap_dcache_wb_range: pmap %p is_kernel %d va 0x%08x "
 1244             "len 0x%x ", pm, pm == pmap_kernel(), va, len);
 1245         CTR2(KTR_PMAP, " do_inv %d rd_only %d", do_inv, rd_only);
 1246 
 1247         if (pmap_is_current(pm)) {
 1248                 rest = MIN(PAGE_SIZE - (va & PAGE_MASK), len);
 1249                 while (len > 0) {
 1250                         if (pmap_has_valid_mapping(pm, va)) {
 1251                                 if (do_inv && rd_only) {
 1252                                         cpu_dcache_inv_range(va, rest);
 1253                                         cpu_l2cache_inv_range(va, rest);
 1254                                 } else if (do_inv) {
 1255                                         cpu_dcache_wbinv_range(va, rest);
 1256                                         cpu_l2cache_wbinv_range(va, rest);
 1257                                 } else if (!rd_only) {
 1258                                         cpu_dcache_wb_range(va, rest);
 1259                                         cpu_l2cache_wb_range(va, rest);
 1260                                 }
 1261                         }
 1262                         len -= rest;
 1263                         va += rest;
 1264 
 1265                         rest = MIN(PAGE_SIZE, len);
 1266                 }
 1267         }
 1268 }
 1269 
 1270 static PMAP_INLINE void
 1271 pmap_idcache_wbinv_all(pmap_t pm)
 1272 {
 1273 
 1274         if (pmap_is_current(pm)) {
 1275                 cpu_idcache_wbinv_all();
 1276                 cpu_l2cache_wbinv_all();
 1277         }
 1278 }
 1279 
 1280 static PMAP_INLINE void
 1281 pmap_dcache_wbinv_all(pmap_t pm)
 1282 {
 1283 
 1284         if (pmap_is_current(pm)) {
 1285                 cpu_dcache_wbinv_all();
 1286                 cpu_l2cache_wbinv_all();
 1287         }
 1288 }
 1289 
 1290 /*
 1291  * PTE_SYNC_CURRENT:
 1292  *
 1293  *     Make sure the pte is written out to RAM.
 1294  *     We need to do this for one of two cases:
 1295  *       - We're dealing with the kernel pmap
 1296  *       - There is no pmap active in the cache/tlb.
 1297  *       - The specified pmap is 'active' in the cache/tlb.
 1298  */
 1299 #ifdef PMAP_INCLUDE_PTE_SYNC
 1300 #define PTE_SYNC_CURRENT(pm, ptep)      \
 1301 do {                                    \
 1302         if (PMAP_NEEDS_PTE_SYNC &&      \
 1303             pmap_is_current(pm))        \
 1304                 PTE_SYNC(ptep);         \
 1305 } while (/*CONSTCOND*/0)
 1306 #else
 1307 #define PTE_SYNC_CURRENT(pm, ptep)      /* nothing */
 1308 #endif
 1309 
 1310 /*
 1311  * cacheable == -1 means we must make the entry uncacheable, 1 means
 1312  * cacheable;
 1313  */
 1314 static __inline void
 1315 pmap_set_cache_entry(pv_entry_t pv, pmap_t pm, vm_offset_t va, int cacheable)
 1316 {
 1317         struct l2_bucket *l2b;
 1318         pt_entry_t *ptep, pte;
 1319 
 1320         l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va);
 1321         ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
 1322 
 1323         if (cacheable == 1) {
 1324                 pte = (*ptep & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode;
 1325                 if (l2pte_valid(pte)) {
 1326                         if (PV_BEEN_EXECD(pv->pv_flags)) {
 1327                                 pmap_tlb_flushID_SE(pv->pv_pmap, pv->pv_va);
 1328                         } else if (PV_BEEN_REFD(pv->pv_flags)) {
 1329                                 pmap_tlb_flushD_SE(pv->pv_pmap, pv->pv_va);
 1330                         }
 1331                 }
 1332         } else {
 1333                 pte = *ptep &~ L2_S_CACHE_MASK;
 1334                 if ((va != pv->pv_va || pm != pv->pv_pmap) &&
 1335                             l2pte_valid(pte)) {
 1336                         if (PV_BEEN_EXECD(pv->pv_flags)) {
 1337                                 pmap_idcache_wbinv_range(pv->pv_pmap,
 1338                                             pv->pv_va, PAGE_SIZE);
 1339                                 pmap_tlb_flushID_SE(pv->pv_pmap, pv->pv_va);
 1340                         } else if (PV_BEEN_REFD(pv->pv_flags)) {
 1341                                 pmap_dcache_wb_range(pv->pv_pmap,
 1342                                             pv->pv_va, PAGE_SIZE, TRUE,
 1343                                             (pv->pv_flags & PVF_WRITE) == 0);
 1344                                 pmap_tlb_flushD_SE(pv->pv_pmap,
 1345                                             pv->pv_va);
 1346                         }
 1347                 }
 1348         }
 1349         *ptep = pte;
 1350         PTE_SYNC_CURRENT(pv->pv_pmap, ptep);
 1351 }
 1352 
 1353 static void
 1354 pmap_fix_cache(struct vm_page *pg, pmap_t pm, vm_offset_t va)
 1355 {
 1356         int pmwc = 0;
 1357         int writable = 0, kwritable = 0, uwritable = 0;
 1358         int entries = 0, kentries = 0, uentries = 0;
 1359         struct pv_entry *pv;
 1360 
 1361         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1362 
 1363         /* the cache gets written back/invalidated on context switch.
 1364          * therefore, if a user page shares an entry in the same page or
 1365          * with the kernel map and at least one is writable, then the
 1366          * cache entry must be set write-through.
 1367          */
 1368 
 1369         TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) {
 1370                         /* generate a count of the pv_entry uses */
 1371                 if (pv->pv_flags & PVF_WRITE) {
 1372                         if (pv->pv_pmap == pmap_kernel())
 1373                                 kwritable++;
 1374                         else if (pv->pv_pmap == pm)
 1375                                 uwritable++;
 1376                         writable++;
 1377                 }
 1378                 if (pv->pv_pmap == pmap_kernel())
 1379                         kentries++;
 1380                 else {
 1381                         if (pv->pv_pmap == pm)
 1382                                 uentries++;
 1383                         entries++;
 1384                 }
 1385         }
 1386                 /*
 1387                  * check if the user duplicate mapping has
 1388                  * been removed.
 1389                  */
 1390         if ((pm != pmap_kernel()) && (((uentries > 1) && uwritable) ||
 1391             (uwritable > 1)))
 1392                         pmwc = 1;
 1393 
 1394         TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) {
 1395                 /* check for user uncachable conditions - order is important */
 1396                 if (pm != pmap_kernel() &&
 1397                     (pv->pv_pmap == pm || pv->pv_pmap == pmap_kernel())) {
 1398 
 1399                         if ((uentries > 1 && uwritable) || uwritable > 1) {
 1400 
 1401                                 /* user duplicate mapping */
 1402                                 if (pv->pv_pmap != pmap_kernel())
 1403                                         pv->pv_flags |= PVF_MWC;
 1404 
 1405                                 if (!(pv->pv_flags & PVF_NC)) {
 1406                                         pv->pv_flags |= PVF_NC;
 1407                                         pmap_set_cache_entry(pv, pm, va, -1);
 1408                                 }
 1409                                 continue;
 1410                         } else  /* no longer a duplicate user */
 1411                                 pv->pv_flags &= ~PVF_MWC;
 1412                 }
 1413 
 1414                 /*
 1415                  * check for kernel uncachable conditions
 1416                  * kernel writable or kernel readable with writable user entry
 1417                  */
 1418                 if ((kwritable && entries) ||
 1419                     (kwritable > 1) ||
 1420                     ((kwritable != writable) && kentries &&
 1421                      (pv->pv_pmap == pmap_kernel() ||
 1422                       (pv->pv_flags & PVF_WRITE) ||
 1423                       (pv->pv_flags & PVF_MWC)))) {
 1424 
 1425                         if (!(pv->pv_flags & PVF_NC)) {
 1426                                 pv->pv_flags |= PVF_NC;
 1427                                 pmap_set_cache_entry(pv, pm, va, -1);
 1428                         }
 1429                         continue;
 1430                 }
 1431 
 1432                         /* kernel and user are cachable */
 1433                 if ((pm == pmap_kernel()) && !(pv->pv_flags & PVF_MWC) &&
 1434                     (pv->pv_flags & PVF_NC)) {
 1435 
 1436                         pv->pv_flags &= ~PVF_NC;
 1437                         pmap_set_cache_entry(pv, pm, va, 1);
 1438                         continue;
 1439                 }
 1440                         /* user is no longer sharable and writable */
 1441                 if (pm != pmap_kernel() &&
 1442                     (pv->pv_pmap == pm || pv->pv_pmap == pmap_kernel()) &&
 1443                     !pmwc && (pv->pv_flags & PVF_NC)) {
 1444 
 1445                         pv->pv_flags &= ~(PVF_NC | PVF_MWC);
 1446                         pmap_set_cache_entry(pv, pm, va, 1);
 1447                 }
 1448         }
 1449 
 1450         if ((kwritable == 0) && (writable == 0)) {
 1451                 pg->md.pvh_attrs &= ~PVF_MOD;
 1452                 vm_page_flag_clear(pg, PG_WRITEABLE);
 1453                 return;
 1454         }
 1455 }
 1456 
 1457 /*
 1458  * Modify pte bits for all ptes corresponding to the given physical address.
 1459  * We use `maskbits' rather than `clearbits' because we're always passing
 1460  * constants and the latter would require an extra inversion at run-time.
 1461  */
 1462 static int 
 1463 pmap_clearbit(struct vm_page *pg, u_int maskbits)
 1464 {
 1465         struct l2_bucket *l2b;
 1466         struct pv_entry *pv;
 1467         pt_entry_t *ptep, npte, opte;
 1468         pmap_t pm;
 1469         vm_offset_t va;
 1470         u_int oflags;
 1471         int count = 0;
 1472 
 1473         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1474 
 1475         if (maskbits & PVF_WRITE)
 1476                 maskbits |= PVF_MOD;
 1477         /*
 1478          * Clear saved attributes (modify, reference)
 1479          */
 1480         pg->md.pvh_attrs &= ~(maskbits & (PVF_MOD | PVF_REF));
 1481 
 1482         if (TAILQ_EMPTY(&pg->md.pv_list)) {
 1483                 return (0);
 1484         }
 1485 
 1486         /*
 1487          * Loop over all current mappings setting/clearing as appropos
 1488          */
 1489         TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) {
 1490                 va = pv->pv_va;
 1491                 pm = pv->pv_pmap;
 1492                 oflags = pv->pv_flags;
 1493 
 1494                 if (!(oflags & maskbits)) {
 1495                         if ((maskbits & PVF_WRITE) && (pv->pv_flags & PVF_NC)) {
 1496                                 /* It is safe to re-enable cacheing here. */
 1497                                 PMAP_LOCK(pm);
 1498                                 l2b = pmap_get_l2_bucket(pm, va);
 1499                                 ptep = &l2b->l2b_kva[l2pte_index(va)];
 1500                                 *ptep |= pte_l2_s_cache_mode;
 1501                                 PTE_SYNC(ptep);
 1502                                 PMAP_UNLOCK(pm);
 1503                                 pv->pv_flags &= ~(PVF_NC | PVF_MWC);
 1504                                 
 1505                         }
 1506                         continue;
 1507                 }
 1508                 pv->pv_flags &= ~maskbits;
 1509 
 1510                 PMAP_LOCK(pm);
 1511 
 1512                 l2b = pmap_get_l2_bucket(pm, va);
 1513 
 1514                 ptep = &l2b->l2b_kva[l2pte_index(va)];
 1515                 npte = opte = *ptep;
 1516 
 1517                 if (maskbits & (PVF_WRITE|PVF_MOD)) {
 1518                         if ((pv->pv_flags & PVF_NC)) {
 1519                                 /* 
 1520                                  * Entry is not cacheable:
 1521                                  *
 1522                                  * Don't turn caching on again if this is a 
 1523                                  * modified emulation. This would be
 1524                                  * inconsitent with the settings created by
 1525                                  * pmap_fix_cache(). Otherwise, it's safe
 1526                                  * to re-enable cacheing.
 1527                                  *
 1528                                  * There's no need to call pmap_fix_cache()
 1529                                  * here: all pages are losing their write
 1530                                  * permission.
 1531                                  */
 1532                                 if (maskbits & PVF_WRITE) {
 1533                                         npte |= pte_l2_s_cache_mode;
 1534                                         pv->pv_flags &= ~(PVF_NC | PVF_MWC);
 1535                                 }
 1536                         } else
 1537                         if (opte & L2_S_PROT_W) {
 1538                                 vm_page_dirty(pg);
 1539                                 /* 
 1540                                  * Entry is writable/cacheable: check if pmap
 1541                                  * is current if it is flush it, otherwise it
 1542                                  * won't be in the cache
 1543                                  */
 1544                                 if (PV_BEEN_EXECD(oflags))
 1545                                         pmap_idcache_wbinv_range(pm, pv->pv_va,
 1546                                             PAGE_SIZE);
 1547                                 else
 1548                                 if (PV_BEEN_REFD(oflags))
 1549                                         pmap_dcache_wb_range(pm, pv->pv_va,
 1550                                             PAGE_SIZE,
 1551                                             (maskbits & PVF_REF) ? TRUE : FALSE,
 1552                                             FALSE);
 1553                         }
 1554 
 1555                         /* make the pte read only */
 1556                         npte &= ~L2_S_PROT_W;
 1557                 }
 1558 
 1559                 if (maskbits & PVF_REF) {
 1560                         if ((pv->pv_flags & PVF_NC) == 0 &&
 1561                             (maskbits & (PVF_WRITE|PVF_MOD)) == 0) {
 1562                                 /*
 1563                                  * Check npte here; we may have already
 1564                                  * done the wbinv above, and the validity
 1565                                  * of the PTE is the same for opte and
 1566                                  * npte.
 1567                                  */
 1568                                 if (npte & L2_S_PROT_W) {
 1569                                         if (PV_BEEN_EXECD(oflags))
 1570                                                 pmap_idcache_wbinv_range(pm,
 1571                                                     pv->pv_va, PAGE_SIZE);
 1572                                         else
 1573                                         if (PV_BEEN_REFD(oflags))
 1574                                                 pmap_dcache_wb_range(pm,
 1575                                                     pv->pv_va, PAGE_SIZE,
 1576                                                     TRUE, FALSE);
 1577                                 } else
 1578                                 if ((npte & L2_TYPE_MASK) != L2_TYPE_INV) {
 1579                                         /* XXXJRT need idcache_inv_range */
 1580                                         if (PV_BEEN_EXECD(oflags))
 1581                                                 pmap_idcache_wbinv_range(pm,
 1582                                                     pv->pv_va, PAGE_SIZE);
 1583                                         else
 1584                                         if (PV_BEEN_REFD(oflags))
 1585                                                 pmap_dcache_wb_range(pm,
 1586                                                     pv->pv_va, PAGE_SIZE,
 1587                                                     TRUE, TRUE);
 1588                                 }
 1589                         }
 1590 
 1591                         /*
 1592                          * Make the PTE invalid so that we will take a
 1593                          * page fault the next time the mapping is
 1594                          * referenced.
 1595                          */
 1596                         npte &= ~L2_TYPE_MASK;
 1597                         npte |= L2_TYPE_INV;
 1598                 }
 1599 
 1600                 if (npte != opte) {
 1601                         count++;
 1602                         *ptep = npte;
 1603                         PTE_SYNC(ptep);
 1604                         /* Flush the TLB entry if a current pmap. */
 1605                         if (PV_BEEN_EXECD(oflags))
 1606                                 pmap_tlb_flushID_SE(pm, pv->pv_va);
 1607                         else
 1608                         if (PV_BEEN_REFD(oflags))
 1609                                 pmap_tlb_flushD_SE(pm, pv->pv_va);
 1610                 }
 1611 
 1612                 PMAP_UNLOCK(pm);
 1613 
 1614         }
 1615 
 1616         if (maskbits & PVF_WRITE)
 1617                 vm_page_flag_clear(pg, PG_WRITEABLE);
 1618         return (count);
 1619 }
 1620 
 1621 /*
 1622  * main pv_entry manipulation functions:
 1623  *   pmap_enter_pv: enter a mapping onto a vm_page list
 1624  *   pmap_remove_pv: remove a mappiing from a vm_page list
 1625  *
 1626  * NOTE: pmap_enter_pv expects to lock the pvh itself
 1627  *       pmap_remove_pv expects te caller to lock the pvh before calling
 1628  */
 1629 
 1630 /*
 1631  * pmap_enter_pv: enter a mapping onto a vm_page lst
 1632  *
 1633  * => caller should hold the proper lock on pmap_main_lock
 1634  * => caller should have pmap locked
 1635  * => we will gain the lock on the vm_page and allocate the new pv_entry
 1636  * => caller should adjust ptp's wire_count before calling
 1637  * => caller should not adjust pmap's wire_count
 1638  */
 1639 static void
 1640 pmap_enter_pv(struct vm_page *pg, struct pv_entry *pve, pmap_t pm,
 1641     vm_offset_t va, u_int flags)
 1642 {
 1643 
 1644         int km;
 1645 
 1646         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1647 
 1648         if (pg->md.pv_kva) {
 1649                 /* PMAP_ASSERT_LOCKED(pmap_kernel()); */
 1650                 pve->pv_pmap = pmap_kernel();
 1651                 pve->pv_va = pg->md.pv_kva;
 1652                 pve->pv_flags = PVF_WRITE | PVF_UNMAN;
 1653                 pg->md.pv_kva = 0;
 1654 
 1655                 TAILQ_INSERT_HEAD(&pg->md.pv_list, pve, pv_list);
 1656                 TAILQ_INSERT_HEAD(&pm->pm_pvlist, pve, pv_plist);
 1657                 if ((km = PMAP_OWNED(pmap_kernel())))
 1658                         PMAP_UNLOCK(pmap_kernel());
 1659                 vm_page_unlock_queues();
 1660                 if ((pve = pmap_get_pv_entry()) == NULL)
 1661                         panic("pmap_kenter_internal: no pv entries");
 1662                 vm_page_lock_queues();
 1663                 if (km)
 1664                         PMAP_LOCK(pmap_kernel());
 1665         }
 1666 
 1667         PMAP_ASSERT_LOCKED(pm);
 1668         pve->pv_pmap = pm;
 1669         pve->pv_va = va;
 1670         pve->pv_flags = flags;
 1671 
 1672         TAILQ_INSERT_HEAD(&pg->md.pv_list, pve, pv_list);
 1673         TAILQ_INSERT_HEAD(&pm->pm_pvlist, pve, pv_plist);
 1674         pg->md.pvh_attrs |= flags & (PVF_REF | PVF_MOD);
 1675         if (pve->pv_flags & PVF_WIRED)
 1676                 ++pm->pm_stats.wired_count;
 1677         vm_page_flag_set(pg, PG_REFERENCED);
 1678 }
 1679 
 1680 /*
 1681  *
 1682  * pmap_find_pv: Find a pv entry
 1683  *
 1684  * => caller should hold lock on vm_page
 1685  */
 1686 static PMAP_INLINE struct pv_entry *
 1687 pmap_find_pv(struct vm_page *pg, pmap_t pm, vm_offset_t va)
 1688 {
 1689         struct pv_entry *pv;
 1690 
 1691         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1692         TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list)
 1693             if (pm == pv->pv_pmap && va == pv->pv_va)
 1694                     break;
 1695         return (pv);
 1696 }
 1697 
 1698 /*
 1699  * vector_page_setprot:
 1700  *
 1701  *      Manipulate the protection of the vector page.
 1702  */
 1703 void
 1704 vector_page_setprot(int prot)
 1705 {
 1706         struct l2_bucket *l2b;
 1707         pt_entry_t *ptep;
 1708 
 1709         l2b = pmap_get_l2_bucket(pmap_kernel(), vector_page);
 1710 
 1711         ptep = &l2b->l2b_kva[l2pte_index(vector_page)];
 1712 
 1713         *ptep = (*ptep & ~L1_S_PROT_MASK) | L2_S_PROT(PTE_KERNEL, prot);
 1714         PTE_SYNC(ptep);
 1715         cpu_tlb_flushD_SE(vector_page);
 1716         cpu_cpwait();
 1717 }
 1718 
 1719 /*
 1720  * pmap_remove_pv: try to remove a mapping from a pv_list
 1721  *
 1722  * => caller should hold proper lock on pmap_main_lock
 1723  * => pmap should be locked
 1724  * => caller should hold lock on vm_page [so that attrs can be adjusted]
 1725  * => caller should adjust ptp's wire_count and free PTP if needed
 1726  * => caller should NOT adjust pmap's wire_count
 1727  * => we return the removed pve
 1728  */
 1729 
 1730 static void
 1731 pmap_nuke_pv(struct vm_page *pg, pmap_t pm, struct pv_entry *pve)
 1732 {
 1733 
 1734         struct pv_entry *pv;
 1735         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1736         PMAP_ASSERT_LOCKED(pm);
 1737         TAILQ_REMOVE(&pg->md.pv_list, pve, pv_list);
 1738         TAILQ_REMOVE(&pm->pm_pvlist, pve, pv_plist);
 1739         if (pve->pv_flags & PVF_WIRED)
 1740                 --pm->pm_stats.wired_count;
 1741         if (pg->md.pvh_attrs & PVF_MOD)
 1742                 vm_page_dirty(pg);
 1743         if (TAILQ_FIRST(&pg->md.pv_list) == NULL)
 1744                 pg->md.pvh_attrs &= ~PVF_REF;
 1745         else
 1746                 vm_page_flag_set(pg, PG_REFERENCED);
 1747         if ((pve->pv_flags & PVF_NC) && ((pm == pmap_kernel()) ||
 1748              (pve->pv_flags & PVF_WRITE) || !(pve->pv_flags & PVF_MWC)))
 1749                 pmap_fix_cache(pg, pm, 0);
 1750         else if (pve->pv_flags & PVF_WRITE) {
 1751                 TAILQ_FOREACH(pve, &pg->md.pv_list, pv_list)
 1752                     if (pve->pv_flags & PVF_WRITE)
 1753                             break;
 1754                 if (!pve) {
 1755                         pg->md.pvh_attrs &= ~PVF_MOD;
 1756                         vm_page_flag_clear(pg, PG_WRITEABLE);
 1757                 }
 1758         }
 1759         pv = TAILQ_FIRST(&pg->md.pv_list);
 1760         if (pv != NULL && (pv->pv_flags & PVF_UNMAN) &&
 1761             TAILQ_NEXT(pv, pv_list) == NULL) {
 1762                 pg->md.pv_kva = pv->pv_va;
 1763                         /* a recursive pmap_nuke_pv */
 1764                 TAILQ_REMOVE(&pg->md.pv_list, pv, pv_list);
 1765                 TAILQ_REMOVE(&pm->pm_pvlist, pv, pv_plist);
 1766                 if (pv->pv_flags & PVF_WIRED)
 1767                         --pm->pm_stats.wired_count;
 1768                 pg->md.pvh_attrs &= ~PVF_REF;
 1769                 pg->md.pvh_attrs &= ~PVF_MOD;
 1770                 vm_page_flag_clear(pg, PG_WRITEABLE);
 1771                 pmap_free_pv_entry(pv);
 1772         }
 1773 }
 1774 
 1775 static struct pv_entry *
 1776 pmap_remove_pv(struct vm_page *pg, pmap_t pm, vm_offset_t va)
 1777 {
 1778         struct pv_entry *pve;
 1779 
 1780         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1781         pve = TAILQ_FIRST(&pg->md.pv_list);
 1782 
 1783         while (pve) {
 1784                 if (pve->pv_pmap == pm && pve->pv_va == va) {   /* match? */
 1785                         pmap_nuke_pv(pg, pm, pve);
 1786                         break;
 1787                 }
 1788                 pve = TAILQ_NEXT(pve, pv_list);
 1789         }
 1790 
 1791         if (pve == NULL && pg->md.pv_kva == va)
 1792                 pg->md.pv_kva = 0;
 1793 
 1794         return(pve);                            /* return removed pve */
 1795 }
 1796 /*
 1797  *
 1798  * pmap_modify_pv: Update pv flags
 1799  *
 1800  * => caller should hold lock on vm_page [so that attrs can be adjusted]
 1801  * => caller should NOT adjust pmap's wire_count
 1802  * => we return the old flags
 1803  * 
 1804  * Modify a physical-virtual mapping in the pv table
 1805  */
 1806 static u_int
 1807 pmap_modify_pv(struct vm_page *pg, pmap_t pm, vm_offset_t va,
 1808     u_int clr_mask, u_int set_mask)
 1809 {
 1810         struct pv_entry *npv;
 1811         u_int flags, oflags;
 1812 
 1813         PMAP_ASSERT_LOCKED(pm);
 1814         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1815         if ((npv = pmap_find_pv(pg, pm, va)) == NULL)
 1816                 return (0);
 1817 
 1818         /*
 1819          * There is at least one VA mapping this page.
 1820          */
 1821 
 1822         if (clr_mask & (PVF_REF | PVF_MOD))
 1823                 pg->md.pvh_attrs |= set_mask & (PVF_REF | PVF_MOD);
 1824 
 1825         oflags = npv->pv_flags;
 1826         npv->pv_flags = flags = (oflags & ~clr_mask) | set_mask;
 1827 
 1828         if ((flags ^ oflags) & PVF_WIRED) {
 1829                 if (flags & PVF_WIRED)
 1830                         ++pm->pm_stats.wired_count;
 1831                 else
 1832                         --pm->pm_stats.wired_count;
 1833         }
 1834 
 1835         if ((flags ^ oflags) & PVF_WRITE)
 1836                 pmap_fix_cache(pg, pm, 0);
 1837 
 1838         return (oflags);
 1839 }
 1840 
 1841 /* Function to set the debug level of the pmap code */
 1842 #ifdef PMAP_DEBUG
 1843 void
 1844 pmap_debug(int level)
 1845 {
 1846         pmap_debug_level = level;
 1847         dprintf("pmap_debug: level=%d\n", pmap_debug_level);
 1848 }
 1849 #endif  /* PMAP_DEBUG */
 1850 
 1851 void
 1852 pmap_pinit0(struct pmap *pmap)
 1853 {
 1854         PDEBUG(1, printf("pmap_pinit0: pmap = %08x\n", (u_int32_t) pmap));
 1855 
 1856         dprintf("pmap_pinit0: pmap = %08x, pm_pdir = %08x\n",
 1857                 (u_int32_t) pmap, (u_int32_t) pmap->pm_pdir);
 1858         bcopy(kernel_pmap, pmap, sizeof(*pmap));
 1859         bzero(&pmap->pm_mtx, sizeof(pmap->pm_mtx));
 1860         PMAP_LOCK_INIT(pmap);
 1861 }
 1862 
 1863 /*
 1864  *      Initialize a vm_page's machine-dependent fields.
 1865  */
 1866 void
 1867 pmap_page_init(vm_page_t m)
 1868 {
 1869 
 1870         TAILQ_INIT(&m->md.pv_list);
 1871 }
 1872 
 1873 /*
 1874  *      Initialize the pmap module.
 1875  *      Called by vm_init, to initialize any structures that the pmap
 1876  *      system needs to map virtual memory.
 1877  */
 1878 void
 1879 pmap_init(void)
 1880 {
 1881         int shpgperproc = PMAP_SHPGPERPROC;
 1882 
 1883         PDEBUG(1, printf("pmap_init: phys_start = %08x\n"));
 1884 
 1885         /*
 1886          * init the pv free list
 1887          */
 1888         pvzone = uma_zcreate("PV ENTRY", sizeof (struct pv_entry), NULL, NULL, 
 1889             NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
 1890         /*
 1891          * Now it is safe to enable pv_table recording.
 1892          */
 1893         PDEBUG(1, printf("pmap_init: done!\n"));
 1894 
 1895         TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
 1896         
 1897         pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
 1898         pv_entry_high_water = 9 * (pv_entry_max / 10);
 1899         l2zone = uma_zcreate("L2 Table", L2_TABLE_SIZE_REAL, pmap_l2ptp_ctor,
 1900             NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
 1901         l2table_zone = uma_zcreate("L2 Table", sizeof(struct l2_dtable),
 1902             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
 1903             UMA_ZONE_VM | UMA_ZONE_NOFREE);
 1904 
 1905         uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max);
 1906 
 1907 }
 1908 
 1909 int
 1910 pmap_fault_fixup(pmap_t pm, vm_offset_t va, vm_prot_t ftype, int user)
 1911 {
 1912         struct l2_dtable *l2;
 1913         struct l2_bucket *l2b;
 1914         pd_entry_t *pl1pd, l1pd;
 1915         pt_entry_t *ptep, pte;
 1916         vm_paddr_t pa;
 1917         u_int l1idx;
 1918         int rv = 0;
 1919 
 1920         l1idx = L1_IDX(va);
 1921         vm_page_lock_queues();
 1922         PMAP_LOCK(pm);
 1923 
 1924         /*
 1925          * If there is no l2_dtable for this address, then the process
 1926          * has no business accessing it.
 1927          *
 1928          * Note: This will catch userland processes trying to access
 1929          * kernel addresses.
 1930          */
 1931         l2 = pm->pm_l2[L2_IDX(l1idx)];
 1932         if (l2 == NULL)
 1933                 goto out;
 1934 
 1935         /*
 1936          * Likewise if there is no L2 descriptor table
 1937          */
 1938         l2b = &l2->l2_bucket[L2_BUCKET(l1idx)];
 1939         if (l2b->l2b_kva == NULL)
 1940                 goto out;
 1941 
 1942         /*
 1943          * Check the PTE itself.
 1944          */
 1945         ptep = &l2b->l2b_kva[l2pte_index(va)];
 1946         pte = *ptep;
 1947         if (pte == 0)
 1948                 goto out;
 1949 
 1950         /*
 1951          * Catch a userland access to the vector page mapped at 0x0
 1952          */
 1953         if (user && (pte & L2_S_PROT_U) == 0)
 1954                 goto out;
 1955         if (va == vector_page)
 1956                 goto out;
 1957 
 1958         pa = l2pte_pa(pte);
 1959 
 1960         if ((ftype & VM_PROT_WRITE) && (pte & L2_S_PROT_W) == 0) {
 1961                 /*
 1962                  * This looks like a good candidate for "page modified"
 1963                  * emulation...
 1964                  */
 1965                 struct pv_entry *pv;
 1966                 struct vm_page *pg;
 1967 
 1968                 /* Extract the physical address of the page */
 1969                 if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) {
 1970                         goto out;
 1971                 }
 1972                 /* Get the current flags for this page. */
 1973 
 1974                 pv = pmap_find_pv(pg, pm, va);
 1975                 if (pv == NULL) {
 1976                         goto out;
 1977                 }
 1978 
 1979                 /*
 1980                  * Do the flags say this page is writable? If not then it
 1981                  * is a genuine write fault. If yes then the write fault is
 1982                  * our fault as we did not reflect the write access in the
 1983                  * PTE. Now we know a write has occurred we can correct this
 1984                  * and also set the modified bit
 1985                  */
 1986                 if ((pv->pv_flags & PVF_WRITE) == 0) {
 1987                         goto out;
 1988                 }
 1989 
 1990                 pg->md.pvh_attrs |= PVF_REF | PVF_MOD;
 1991                 vm_page_dirty(pg);
 1992                 pv->pv_flags |= PVF_REF | PVF_MOD;
 1993 
 1994                 /* 
 1995                  * Re-enable write permissions for the page.  No need to call
 1996                  * pmap_fix_cache(), since this is just a
 1997                  * modified-emulation fault, and the PVF_WRITE bit isn't
 1998                  * changing. We've already set the cacheable bits based on
 1999                  * the assumption that we can write to this page.
 2000                  */
 2001                 *ptep = (pte & ~L2_TYPE_MASK) | L2_S_PROTO | L2_S_PROT_W;
 2002                 PTE_SYNC(ptep);
 2003                 rv = 1;
 2004         } else
 2005         if ((pte & L2_TYPE_MASK) == L2_TYPE_INV) {
 2006                 /*
 2007                  * This looks like a good candidate for "page referenced"
 2008                  * emulation.
 2009                  */
 2010                 struct pv_entry *pv;
 2011                 struct vm_page *pg;
 2012 
 2013                 /* Extract the physical address of the page */
 2014                 if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL)
 2015                         goto out;
 2016                 /* Get the current flags for this page. */
 2017 
 2018                 pv = pmap_find_pv(pg, pm, va);
 2019                 if (pv == NULL)
 2020                         goto out;
 2021 
 2022                 pg->md.pvh_attrs |= PVF_REF;
 2023                 pv->pv_flags |= PVF_REF;
 2024 
 2025 
 2026                 *ptep = (pte & ~L2_TYPE_MASK) | L2_S_PROTO;
 2027                 PTE_SYNC(ptep);
 2028                 rv = 1;
 2029         }
 2030 
 2031         /*
 2032          * We know there is a valid mapping here, so simply
 2033          * fix up the L1 if necessary.
 2034          */
 2035         pl1pd = &pm->pm_l1->l1_kva[l1idx];
 2036         l1pd = l2b->l2b_phys | L1_C_DOM(pm->pm_domain) | L1_C_PROTO;
 2037         if (*pl1pd != l1pd) {
 2038                 *pl1pd = l1pd;
 2039                 PTE_SYNC(pl1pd);
 2040                 rv = 1;
 2041         }
 2042 
 2043 #ifdef CPU_SA110
 2044         /*
 2045          * There are bugs in the rev K SA110.  This is a check for one
 2046          * of them.
 2047          */
 2048         if (rv == 0 && curcpu()->ci_arm_cputype == CPU_ID_SA110 &&
 2049             curcpu()->ci_arm_cpurev < 3) {
 2050                 /* Always current pmap */
 2051                 if (l2pte_valid(pte)) {
 2052                         extern int kernel_debug;
 2053                         if (kernel_debug & 1) {
 2054                                 struct proc *p = curlwp->l_proc;
 2055                                 printf("prefetch_abort: page is already "
 2056                                     "mapped - pte=%p *pte=%08x\n", ptep, pte);
 2057                                 printf("prefetch_abort: pc=%08lx proc=%p "
 2058                                     "process=%s\n", va, p, p->p_comm);
 2059                                 printf("prefetch_abort: far=%08x fs=%x\n",
 2060                                     cpu_faultaddress(), cpu_faultstatus());
 2061                         }
 2062 #ifdef DDB
 2063                         if (kernel_debug & 2)
 2064                                 Debugger();
 2065 #endif
 2066                         rv = 1;
 2067                 }
 2068         }
 2069 #endif /* CPU_SA110 */
 2070 
 2071 #ifdef DEBUG
 2072         /*
 2073          * If 'rv == 0' at this point, it generally indicates that there is a
 2074          * stale TLB entry for the faulting address. This happens when two or
 2075          * more processes are sharing an L1. Since we don't flush the TLB on
 2076          * a context switch between such processes, we can take domain faults
 2077          * for mappings which exist at the same VA in both processes. EVEN IF
 2078          * WE'VE RECENTLY FIXED UP THE CORRESPONDING L1 in pmap_enter(), for
 2079          * example.
 2080          *
 2081          * This is extremely likely to happen if pmap_enter() updated the L1
 2082          * entry for a recently entered mapping. In this case, the TLB is
 2083          * flushed for the new mapping, but there may still be TLB entries for
 2084          * other mappings belonging to other processes in the 1MB range
 2085          * covered by the L1 entry.
 2086          *
 2087          * Since 'rv == 0', we know that the L1 already contains the correct
 2088          * value, so the fault must be due to a stale TLB entry.
 2089          *
 2090          * Since we always need to flush the TLB anyway in the case where we
 2091          * fixed up the L1, or frobbed the L2 PTE, we effectively deal with
 2092          * stale TLB entries dynamically.
 2093          *
 2094          * However, the above condition can ONLY happen if the current L1 is
 2095          * being shared. If it happens when the L1 is unshared, it indicates
 2096          * that other parts of the pmap are not doing their job WRT managing
 2097          * the TLB.
 2098          */
 2099         if (rv == 0 && pm->pm_l1->l1_domain_use_count == 1) {
 2100                 extern int last_fault_code;
 2101                 printf("fixup: pm %p, va 0x%lx, ftype %d - nothing to do!\n",
 2102                     pm, va, ftype);
 2103                 printf("fixup: l2 %p, l2b %p, ptep %p, pl1pd %p\n",
 2104                     l2, l2b, ptep, pl1pd);
 2105                 printf("fixup: pte 0x%x, l1pd 0x%x, last code 0x%x\n",
 2106                     pte, l1pd, last_fault_code);
 2107 #ifdef DDB
 2108                 Debugger();
 2109 #endif
 2110         }
 2111 #endif
 2112 
 2113         cpu_tlb_flushID_SE(va);
 2114         cpu_cpwait();
 2115 
 2116         rv = 1;
 2117 
 2118 out:
 2119         vm_page_unlock_queues();
 2120         PMAP_UNLOCK(pm);
 2121         return (rv);
 2122 }
 2123 
 2124 void
 2125 pmap_postinit(void)
 2126 {
 2127         struct l2_bucket *l2b;
 2128         struct l1_ttable *l1;
 2129         pd_entry_t *pl1pt;
 2130         pt_entry_t *ptep, pte;
 2131         vm_offset_t va, eva;
 2132         u_int loop, needed;
 2133         
 2134         needed = (maxproc / PMAP_DOMAINS) + ((maxproc % PMAP_DOMAINS) ? 1 : 0);
 2135         needed -= 1;
 2136         l1 = malloc(sizeof(*l1) * needed, M_VMPMAP, M_WAITOK);
 2137 
 2138         for (loop = 0; loop < needed; loop++, l1++) {
 2139                 /* Allocate a L1 page table */
 2140                 va = (vm_offset_t)contigmalloc(L1_TABLE_SIZE, M_VMPMAP, 0, 0x0,
 2141                     0xffffffff, L1_TABLE_SIZE, 0);
 2142 
 2143                 if (va == 0)
 2144                         panic("Cannot allocate L1 KVM");
 2145 
 2146                 eva = va + L1_TABLE_SIZE;
 2147                 pl1pt = (pd_entry_t *)va;
 2148                 
 2149                 while (va < eva) {
 2150                                 l2b = pmap_get_l2_bucket(pmap_kernel(), va);
 2151                                 ptep = &l2b->l2b_kva[l2pte_index(va)];
 2152                                 pte = *ptep;
 2153                                 pte = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt;
 2154                                 *ptep = pte;
 2155                                 PTE_SYNC(ptep);
 2156                                 cpu_tlb_flushD_SE(va);
 2157                                 
 2158                                 va += PAGE_SIZE;
 2159                 }
 2160                 pmap_init_l1(l1, pl1pt);
 2161         }
 2162 
 2163 
 2164 #ifdef DEBUG
 2165         printf("pmap_postinit: Allocated %d static L1 descriptor tables\n",
 2166             needed);
 2167 #endif
 2168 }
 2169 
 2170 /*
 2171  * This is used to stuff certain critical values into the PCB where they
 2172  * can be accessed quickly from cpu_switch() et al.
 2173  */
 2174 void
 2175 pmap_set_pcb_pagedir(pmap_t pm, struct pcb *pcb)
 2176 {
 2177         struct l2_bucket *l2b;
 2178 
 2179         pcb->pcb_pagedir = pm->pm_l1->l1_physaddr;
 2180         pcb->pcb_dacr = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) |
 2181             (DOMAIN_CLIENT << (pm->pm_domain * 2));
 2182 
 2183         if (vector_page < KERNBASE) {
 2184                 pcb->pcb_pl1vec = &pm->pm_l1->l1_kva[L1_IDX(vector_page)];
 2185                 l2b = pmap_get_l2_bucket(pm, vector_page);
 2186                 pcb->pcb_l1vec = l2b->l2b_phys | L1_C_PROTO |
 2187                     L1_C_DOM(pm->pm_domain) | L1_C_DOM(PMAP_DOMAIN_KERNEL);
 2188         } else
 2189                 pcb->pcb_pl1vec = NULL;
 2190 }
 2191 
 2192 void
 2193 pmap_activate(struct thread *td)
 2194 {
 2195         pmap_t pm;
 2196         struct pcb *pcb;
 2197 
 2198         pm = vmspace_pmap(td->td_proc->p_vmspace);
 2199         pcb = td->td_pcb;
 2200 
 2201         critical_enter();
 2202         pmap_set_pcb_pagedir(pm, pcb);
 2203 
 2204         if (td == curthread) {
 2205                 u_int cur_dacr, cur_ttb;
 2206 
 2207                 __asm __volatile("mrc p15, 0, %0, c2, c0, 0" : "=r"(cur_ttb));
 2208                 __asm __volatile("mrc p15, 0, %0, c3, c0, 0" : "=r"(cur_dacr));
 2209 
 2210                 cur_ttb &= ~(L1_TABLE_SIZE - 1);
 2211 
 2212                 if (cur_ttb == (u_int)pcb->pcb_pagedir &&
 2213                     cur_dacr == pcb->pcb_dacr) {
 2214                         /*
 2215                          * No need to switch address spaces.
 2216                          */
 2217                         critical_exit();
 2218                         return;
 2219                 }
 2220 
 2221 
 2222                 /*
 2223                  * We MUST, I repeat, MUST fix up the L1 entry corresponding
 2224                  * to 'vector_page' in the incoming L1 table before switching
 2225                  * to it otherwise subsequent interrupts/exceptions (including
 2226                  * domain faults!) will jump into hyperspace.
 2227                  */
 2228                 if (pcb->pcb_pl1vec) {
 2229 
 2230                         *pcb->pcb_pl1vec = pcb->pcb_l1vec;
 2231                         /*
 2232                          * Don't need to PTE_SYNC() at this point since
 2233                          * cpu_setttb() is about to flush both the cache
 2234                          * and the TLB.
 2235                          */
 2236                 }
 2237 
 2238                 cpu_domains(pcb->pcb_dacr);
 2239                 cpu_setttb(pcb->pcb_pagedir);
 2240         }
 2241         critical_exit();
 2242 }
 2243 
 2244 static int
 2245 pmap_set_pt_cache_mode(pd_entry_t *kl1, vm_offset_t va)
 2246 {
 2247         pd_entry_t *pdep, pde;
 2248         pt_entry_t *ptep, pte;
 2249         vm_offset_t pa;
 2250         int rv = 0;
 2251 
 2252         /*
 2253          * Make sure the descriptor itself has the correct cache mode
 2254          */
 2255         pdep = &kl1[L1_IDX(va)];
 2256         pde = *pdep;
 2257 
 2258         if (l1pte_section_p(pde)) {
 2259                 if ((pde & L1_S_CACHE_MASK) != pte_l1_s_cache_mode_pt) {
 2260                         *pdep = (pde & ~L1_S_CACHE_MASK) |
 2261                             pte_l1_s_cache_mode_pt;
 2262                         PTE_SYNC(pdep);
 2263                         cpu_dcache_wbinv_range((vm_offset_t)pdep,
 2264                             sizeof(*pdep));
 2265                         cpu_l2cache_wbinv_range((vm_offset_t)pdep,
 2266                             sizeof(*pdep));
 2267                         rv = 1;
 2268                 }
 2269         } else {
 2270                 pa = (vm_paddr_t)(pde & L1_C_ADDR_MASK);
 2271                 ptep = (pt_entry_t *)kernel_pt_lookup(pa);
 2272                 if (ptep == NULL)
 2273                         panic("pmap_bootstrap: No L2 for L2 @ va %p\n", ptep);
 2274 
 2275                 ptep = &ptep[l2pte_index(va)];
 2276                 pte = *ptep;
 2277                 if ((pte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) {
 2278                         *ptep = (pte & ~L2_S_CACHE_MASK) |
 2279                             pte_l2_s_cache_mode_pt;
 2280                         PTE_SYNC(ptep);
 2281                         cpu_dcache_wbinv_range((vm_offset_t)ptep,
 2282                             sizeof(*ptep));
 2283                         cpu_l2cache_wbinv_range((vm_offset_t)ptep,
 2284                             sizeof(*ptep));
 2285                         rv = 1;
 2286                 }
 2287         }
 2288 
 2289         return (rv);
 2290 }
 2291 
 2292 static void
 2293 pmap_alloc_specials(vm_offset_t *availp, int pages, vm_offset_t *vap, 
 2294     pt_entry_t **ptep)
 2295 {
 2296         vm_offset_t va = *availp;
 2297         struct l2_bucket *l2b;
 2298 
 2299         if (ptep) {
 2300                 l2b = pmap_get_l2_bucket(pmap_kernel(), va);
 2301                 if (l2b == NULL)
 2302                         panic("pmap_alloc_specials: no l2b for 0x%x", va);
 2303 
 2304                 *ptep = &l2b->l2b_kva[l2pte_index(va)];
 2305         }
 2306 
 2307         *vap = va;
 2308         *availp = va + (PAGE_SIZE * pages);
 2309 }
 2310 
 2311 /*
 2312  *      Bootstrap the system enough to run with virtual memory.
 2313  *
 2314  *      On the arm this is called after mapping has already been enabled
 2315  *      and just syncs the pmap module with what has already been done.
 2316  *      [We can't call it easily with mapping off since the kernel is not
 2317  *      mapped with PA == VA, hence we would have to relocate every address
 2318  *      from the linked base (virtual) address "KERNBASE" to the actual
 2319  *      (physical) address starting relative to 0]
 2320  */
 2321 #define PMAP_STATIC_L2_SIZE 16
 2322 #ifdef ARM_USE_SMALL_ALLOC
 2323 extern struct mtx smallalloc_mtx;
 2324 #endif
 2325 
 2326 void
 2327 pmap_bootstrap(vm_offset_t firstaddr, vm_offset_t lastaddr, struct pv_addr *l1pt)
 2328 {
 2329         static struct l1_ttable static_l1;
 2330         static struct l2_dtable static_l2[PMAP_STATIC_L2_SIZE];
 2331         struct l1_ttable *l1 = &static_l1;
 2332         struct l2_dtable *l2;
 2333         struct l2_bucket *l2b;
 2334         pd_entry_t pde;
 2335         pd_entry_t *kernel_l1pt = (pd_entry_t *)l1pt->pv_va;
 2336         pt_entry_t *ptep;
 2337         vm_paddr_t pa;
 2338         vm_offset_t va;
 2339         vm_size_t size;
 2340         int l1idx, l2idx, l2next = 0;
 2341 
 2342         PDEBUG(1, printf("firstaddr = %08x, loadaddr = %08x\n",
 2343             firstaddr, loadaddr));
 2344         
 2345         virtual_avail = firstaddr;
 2346         kernel_pmap->pm_l1 = l1;
 2347         kernel_l1pa = l1pt->pv_pa;
 2348         
 2349         /*
 2350          * Scan the L1 translation table created by initarm() and create
 2351          * the required metadata for all valid mappings found in it.
 2352          */
 2353         for (l1idx = 0; l1idx < (L1_TABLE_SIZE / sizeof(pd_entry_t)); l1idx++) {
 2354                 pde = kernel_l1pt[l1idx];
 2355 
 2356                 /*
 2357                  * We're only interested in Coarse mappings.
 2358                  * pmap_extract() can deal with section mappings without
 2359                  * recourse to checking L2 metadata.
 2360                  */
 2361                 if ((pde & L1_TYPE_MASK) != L1_TYPE_C)
 2362                         continue;
 2363 
 2364                 /*
 2365                  * Lookup the KVA of this L2 descriptor table
 2366                  */
 2367                 pa = (vm_paddr_t)(pde & L1_C_ADDR_MASK);
 2368                 ptep = (pt_entry_t *)kernel_pt_lookup(pa);
 2369                 
 2370                 if (ptep == NULL) {
 2371                         panic("pmap_bootstrap: No L2 for va 0x%x, pa 0x%lx",
 2372                             (u_int)l1idx << L1_S_SHIFT, (long unsigned int)pa);
 2373                 }
 2374 
 2375                 /*
 2376                  * Fetch the associated L2 metadata structure.
 2377                  * Allocate a new one if necessary.
 2378                  */
 2379                 if ((l2 = kernel_pmap->pm_l2[L2_IDX(l1idx)]) == NULL) {
 2380                         if (l2next == PMAP_STATIC_L2_SIZE)
 2381                                 panic("pmap_bootstrap: out of static L2s");
 2382                         kernel_pmap->pm_l2[L2_IDX(l1idx)] = l2 = 
 2383                             &static_l2[l2next++];
 2384                 }
 2385 
 2386                 /*
 2387                  * One more L1 slot tracked...
 2388                  */
 2389                 l2->l2_occupancy++;
 2390 
 2391                 /*
 2392                  * Fill in the details of the L2 descriptor in the
 2393                  * appropriate bucket.
 2394                  */
 2395                 l2b = &l2->l2_bucket[L2_BUCKET(l1idx)];
 2396                 l2b->l2b_kva = ptep;
 2397                 l2b->l2b_phys = pa;
 2398                 l2b->l2b_l1idx = l1idx;
 2399 
 2400                 /*
 2401                  * Establish an initial occupancy count for this descriptor
 2402                  */
 2403                 for (l2idx = 0;
 2404                     l2idx < (L2_TABLE_SIZE_REAL / sizeof(pt_entry_t));
 2405                     l2idx++) {
 2406                         if ((ptep[l2idx] & L2_TYPE_MASK) != L2_TYPE_INV) {
 2407                                 l2b->l2b_occupancy++;
 2408                         }
 2409                 }
 2410 
 2411                 /*
 2412                  * Make sure the descriptor itself has the correct cache mode.
 2413                  * If not, fix it, but whine about the problem. Port-meisters
 2414                  * should consider this a clue to fix up their initarm()
 2415                  * function. :)
 2416                  */
 2417                 if (pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)ptep)) {
 2418                         printf("pmap_bootstrap: WARNING! wrong cache mode for "
 2419                             "L2 pte @ %p\n", ptep);
 2420                 }
 2421         }
 2422 
 2423         
 2424         /*
 2425          * Ensure the primary (kernel) L1 has the correct cache mode for
 2426          * a page table. Bitch if it is not correctly set.
 2427          */
 2428         for (va = (vm_offset_t)kernel_l1pt;
 2429             va < ((vm_offset_t)kernel_l1pt + L1_TABLE_SIZE); va += PAGE_SIZE) {
 2430                 if (pmap_set_pt_cache_mode(kernel_l1pt, va))
 2431                         printf("pmap_bootstrap: WARNING! wrong cache mode for "
 2432                             "primary L1 @ 0x%x\n", va);
 2433         }
 2434 
 2435         cpu_dcache_wbinv_all();
 2436         cpu_l2cache_wbinv_all();
 2437         cpu_tlb_flushID();
 2438         cpu_cpwait();
 2439 
 2440         PMAP_LOCK_INIT(kernel_pmap);
 2441         kernel_pmap->pm_active = -1;
 2442         kernel_pmap->pm_domain = PMAP_DOMAIN_KERNEL;
 2443         TAILQ_INIT(&kernel_pmap->pm_pvlist);
 2444         
 2445         /*
 2446          * Reserve some special page table entries/VA space for temporary
 2447          * mapping of pages.
 2448          */
 2449 #define SYSMAP(c, p, v, n)                                              \
 2450     v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n);
 2451     
 2452         pmap_alloc_specials(&virtual_avail, 1, &csrcp, &csrc_pte);
 2453         pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)csrc_pte);
 2454         pmap_alloc_specials(&virtual_avail, 1, &cdstp, &cdst_pte);
 2455         pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)cdst_pte);
 2456         size = ((lastaddr - pmap_curmaxkvaddr) + L1_S_OFFSET) / L1_S_SIZE;
 2457         pmap_alloc_specials(&virtual_avail,
 2458             round_page(size * L2_TABLE_SIZE_REAL) / PAGE_SIZE,
 2459             &pmap_kernel_l2ptp_kva, NULL);
 2460         
 2461         size = (size + (L2_BUCKET_SIZE - 1)) / L2_BUCKET_SIZE;
 2462         pmap_alloc_specials(&virtual_avail,
 2463             round_page(size * sizeof(struct l2_dtable)) / PAGE_SIZE,
 2464             &pmap_kernel_l2dtable_kva, NULL);
 2465 
 2466         pmap_alloc_specials(&virtual_avail,
 2467             1, (vm_offset_t*)&_tmppt, NULL);
 2468         pmap_alloc_specials(&virtual_avail,
 2469             MAXDUMPPGS, (vm_offset_t *)&crashdumpmap, NULL);
 2470         SLIST_INIT(&l1_list);
 2471         TAILQ_INIT(&l1_lru_list);
 2472         mtx_init(&l1_lru_lock, "l1 list lock", NULL, MTX_DEF);
 2473         pmap_init_l1(l1, kernel_l1pt);
 2474         cpu_dcache_wbinv_all();
 2475         cpu_l2cache_wbinv_all();
 2476 
 2477         virtual_avail = round_page(virtual_avail);
 2478         virtual_end = lastaddr;
 2479         kernel_vm_end = pmap_curmaxkvaddr;
 2480         arm_nocache_startaddr = lastaddr;
 2481         mtx_init(&cmtx, "TMP mappings mtx", NULL, MTX_DEF);
 2482 
 2483 #ifdef ARM_USE_SMALL_ALLOC
 2484         mtx_init(&smallalloc_mtx, "Small alloc page list", NULL, MTX_DEF);
 2485         arm_init_smallalloc();
 2486 #endif
 2487         pmap_set_pcb_pagedir(kernel_pmap, thread0.td_pcb);
 2488 }
 2489 
 2490 /***************************************************
 2491  * Pmap allocation/deallocation routines.
 2492  ***************************************************/
 2493 
 2494 /*
 2495  * Release any resources held by the given physical map.
 2496  * Called when a pmap initialized by pmap_pinit is being released.
 2497  * Should only be called if the map contains no valid mappings.
 2498  */
 2499 void
 2500 pmap_release(pmap_t pmap)
 2501 {
 2502         struct pcb *pcb;
 2503         
 2504         pmap_idcache_wbinv_all(pmap);
 2505         cpu_l2cache_wbinv_all();
 2506         pmap_tlb_flushID(pmap);
 2507         cpu_cpwait();
 2508         if (vector_page < KERNBASE) {
 2509                 struct pcb *curpcb = PCPU_GET(curpcb);
 2510                 pcb = thread0.td_pcb;
 2511                 if (pmap_is_current(pmap)) {
 2512                         /*
 2513                          * Frob the L1 entry corresponding to the vector
 2514                          * page so that it contains the kernel pmap's domain
 2515                          * number. This will ensure pmap_remove() does not
 2516                          * pull the current vector page out from under us.
 2517                          */
 2518                         critical_enter();
 2519                         *pcb->pcb_pl1vec = pcb->pcb_l1vec;
 2520                         cpu_domains(pcb->pcb_dacr);
 2521                         cpu_setttb(pcb->pcb_pagedir);
 2522                         critical_exit();
 2523                 }
 2524                 pmap_remove(pmap, vector_page, vector_page + PAGE_SIZE);
 2525                 /*
 2526                  * Make sure cpu_switch(), et al, DTRT. This is safe to do
 2527                  * since this process has no remaining mappings of its own.
 2528                  */
 2529                 curpcb->pcb_pl1vec = pcb->pcb_pl1vec;
 2530                 curpcb->pcb_l1vec = pcb->pcb_l1vec;
 2531                 curpcb->pcb_dacr = pcb->pcb_dacr;
 2532                 curpcb->pcb_pagedir = pcb->pcb_pagedir;
 2533 
 2534         }
 2535         pmap_free_l1(pmap);
 2536         PMAP_LOCK_DESTROY(pmap);
 2537         
 2538         dprintf("pmap_release()\n");
 2539 }
 2540 
 2541 
 2542 
 2543 /*
 2544  * Helper function for pmap_grow_l2_bucket()
 2545  */
 2546 static __inline int
 2547 pmap_grow_map(vm_offset_t va, pt_entry_t cache_mode, vm_paddr_t *pap)
 2548 {
 2549         struct l2_bucket *l2b;
 2550         pt_entry_t *ptep;
 2551         vm_paddr_t pa;
 2552         struct vm_page *pg;
 2553         
 2554         pg = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_WIRED);
 2555         if (pg == NULL)
 2556                 return (1);
 2557         pa = VM_PAGE_TO_PHYS(pg);
 2558 
 2559         if (pap)
 2560                 *pap = pa;
 2561 
 2562         l2b = pmap_get_l2_bucket(pmap_kernel(), va);
 2563 
 2564         ptep = &l2b->l2b_kva[l2pte_index(va)];
 2565         *ptep = L2_S_PROTO | pa | cache_mode |
 2566             L2_S_PROT(PTE_KERNEL, VM_PROT_READ | VM_PROT_WRITE);
 2567         PTE_SYNC(ptep);
 2568         return (0);
 2569 }
 2570 
 2571 /*
 2572  * This is the same as pmap_alloc_l2_bucket(), except that it is only
 2573  * used by pmap_growkernel().
 2574  */
 2575 static __inline struct l2_bucket *
 2576 pmap_grow_l2_bucket(pmap_t pm, vm_offset_t va)
 2577 {
 2578         struct l2_dtable *l2;
 2579         struct l2_bucket *l2b;
 2580         struct l1_ttable *l1;
 2581         pd_entry_t *pl1pd;
 2582         u_short l1idx;
 2583         vm_offset_t nva;
 2584 
 2585         l1idx = L1_IDX(va);
 2586 
 2587         if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) {
 2588                 /*
 2589                  * No mapping at this address, as there is
 2590                  * no entry in the L1 table.
 2591                  * Need to allocate a new l2_dtable.
 2592                  */
 2593                 nva = pmap_kernel_l2dtable_kva;
 2594                 if ((nva & PAGE_MASK) == 0) {
 2595                         /*
 2596                          * Need to allocate a backing page
 2597                          */
 2598                         if (pmap_grow_map(nva, pte_l2_s_cache_mode, NULL))
 2599                                 return (NULL);
 2600                 }
 2601 
 2602                 l2 = (struct l2_dtable *)nva;
 2603                 nva += sizeof(struct l2_dtable);
 2604 
 2605                 if ((nva & PAGE_MASK) < (pmap_kernel_l2dtable_kva & 
 2606                     PAGE_MASK)) {
 2607                         /*
 2608                          * The new l2_dtable straddles a page boundary.
 2609                          * Map in another page to cover it.
 2610                          */
 2611                         if (pmap_grow_map(nva, pte_l2_s_cache_mode, NULL))
 2612                                 return (NULL);
 2613                 }
 2614 
 2615                 pmap_kernel_l2dtable_kva = nva;
 2616 
 2617                 /*
 2618                  * Link it into the parent pmap
 2619                  */
 2620                 pm->pm_l2[L2_IDX(l1idx)] = l2;
 2621                 memset(l2, 0, sizeof(*l2));
 2622         }
 2623 
 2624         l2b = &l2->l2_bucket[L2_BUCKET(l1idx)];
 2625 
 2626         /*
 2627          * Fetch pointer to the L2 page table associated with the address.
 2628          */
 2629         if (l2b->l2b_kva == NULL) {
 2630                 pt_entry_t *ptep;
 2631 
 2632                 /*
 2633                  * No L2 page table has been allocated. Chances are, this
 2634                  * is because we just allocated the l2_dtable, above.
 2635                  */
 2636                 nva = pmap_kernel_l2ptp_kva;
 2637                 ptep = (pt_entry_t *)nva;
 2638                 if ((nva & PAGE_MASK) == 0) {
 2639                         /*
 2640                          * Need to allocate a backing page
 2641                          */
 2642                         if (pmap_grow_map(nva, pte_l2_s_cache_mode_pt,
 2643                             &pmap_kernel_l2ptp_phys))
 2644                                 return (NULL);
 2645                         PTE_SYNC_RANGE(ptep, PAGE_SIZE / sizeof(pt_entry_t));
 2646                 }
 2647                 memset(ptep, 0, L2_TABLE_SIZE_REAL);
 2648                 l2->l2_occupancy++;
 2649                 l2b->l2b_kva = ptep;
 2650                 l2b->l2b_l1idx = l1idx;
 2651                 l2b->l2b_phys = pmap_kernel_l2ptp_phys;
 2652 
 2653                 pmap_kernel_l2ptp_kva += L2_TABLE_SIZE_REAL;
 2654                 pmap_kernel_l2ptp_phys += L2_TABLE_SIZE_REAL;
 2655         }
 2656 
 2657         /* Distribute new L1 entry to all other L1s */
 2658         SLIST_FOREACH(l1, &l1_list, l1_link) {
 2659                         pl1pd = &l1->l1_kva[L1_IDX(va)];
 2660                         *pl1pd = l2b->l2b_phys | L1_C_DOM(PMAP_DOMAIN_KERNEL) |
 2661                             L1_C_PROTO;
 2662                         PTE_SYNC(pl1pd);
 2663         }
 2664 
 2665         return (l2b);
 2666 }
 2667 
 2668 
 2669 /*
 2670  * grow the number of kernel page table entries, if needed
 2671  */
 2672 void
 2673 pmap_growkernel(vm_offset_t addr)
 2674 {
 2675         pmap_t kpm = pmap_kernel();
 2676 
 2677         if (addr <= pmap_curmaxkvaddr)
 2678                 return;         /* we are OK */
 2679 
 2680         /*
 2681          * whoops!   we need to add kernel PTPs
 2682          */
 2683 
 2684         /* Map 1MB at a time */
 2685         for (; pmap_curmaxkvaddr < addr; pmap_curmaxkvaddr += L1_S_SIZE)
 2686                 pmap_grow_l2_bucket(kpm, pmap_curmaxkvaddr);
 2687 
 2688         /*
 2689          * flush out the cache, expensive but growkernel will happen so
 2690          * rarely
 2691          */
 2692         cpu_dcache_wbinv_all();
 2693         cpu_l2cache_wbinv_all();
 2694         cpu_tlb_flushD();
 2695         cpu_cpwait();
 2696         kernel_vm_end = pmap_curmaxkvaddr;
 2697 }
 2698 
 2699 
 2700 /*
 2701  * Remove all pages from specified address space
 2702  * this aids process exit speeds.  Also, this code
 2703  * is special cased for current process only, but
 2704  * can have the more generic (and slightly slower)
 2705  * mode enabled.  This is much faster than pmap_remove
 2706  * in the case of running down an entire address space.
 2707  */
 2708 void
 2709 pmap_remove_pages(pmap_t pmap)
 2710 {
 2711         struct pv_entry *pv, *npv;
 2712         struct l2_bucket *l2b = NULL;
 2713         vm_page_t m;
 2714         pt_entry_t *pt;
 2715         
 2716         vm_page_lock_queues();
 2717         PMAP_LOCK(pmap);
 2718         cpu_idcache_wbinv_all();
 2719         cpu_l2cache_wbinv_all();
 2720         for (pv = TAILQ_FIRST(&pmap->pm_pvlist); pv; pv = npv) {
 2721                 if (pv->pv_flags & PVF_WIRED || pv->pv_flags & PVF_UNMAN) {
 2722                         /* Cannot remove wired or unmanaged pages now. */
 2723                         npv = TAILQ_NEXT(pv, pv_plist);
 2724                         continue;
 2725                 }
 2726                 pmap->pm_stats.resident_count--;
 2727                 l2b = pmap_get_l2_bucket(pmap, pv->pv_va);
 2728                 KASSERT(l2b != NULL, ("No L2 bucket in pmap_remove_pages"));
 2729                 pt = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
 2730                 m = PHYS_TO_VM_PAGE(*pt & L2_ADDR_MASK);
 2731 #ifdef ARM_USE_SMALL_ALLOC
 2732                 KASSERT((vm_offset_t)m >= alloc_firstaddr, ("Trying to access non-existent page va %x pte %x", pv->pv_va, *pt));
 2733 #else
 2734                 KASSERT((vm_offset_t)m >= KERNBASE, ("Trying to access non-existent page va %x pte %x", pv->pv_va, *pt));
 2735 #endif
 2736                 *pt = 0;
 2737                 PTE_SYNC(pt);
 2738                 npv = TAILQ_NEXT(pv, pv_plist);
 2739                 pmap_nuke_pv(m, pmap, pv);
 2740                 if (TAILQ_EMPTY(&m->md.pv_list))
 2741                         vm_page_flag_clear(m, PG_WRITEABLE);
 2742                 pmap_free_pv_entry(pv);
 2743                 pmap_free_l2_bucket(pmap, l2b, 1);
 2744         }
 2745         vm_page_unlock_queues();
 2746         cpu_tlb_flushID();
 2747         cpu_cpwait();
 2748         PMAP_UNLOCK(pmap);
 2749 }
 2750 
 2751 
 2752 /***************************************************
 2753  * Low level mapping routines.....
 2754  ***************************************************/
 2755 
 2756 #ifdef ARM_HAVE_SUPERSECTIONS
 2757 /* Map a super section into the KVA. */
 2758 
 2759 void
 2760 pmap_kenter_supersection(vm_offset_t va, uint64_t pa, int flags)
 2761 {
 2762         pd_entry_t pd = L1_S_PROTO | L1_S_SUPERSEC | (pa & L1_SUP_FRAME) |
 2763             (((pa >> 32) & 0xf) << 20) | L1_S_PROT(PTE_KERNEL,
 2764             VM_PROT_READ|VM_PROT_WRITE) | L1_S_DOM(PMAP_DOMAIN_KERNEL);
 2765         struct l1_ttable *l1;   
 2766         vm_offset_t va0, va_end;
 2767 
 2768         KASSERT(((va | pa) & L1_SUP_OFFSET) == 0,
 2769             ("Not a valid super section mapping"));
 2770         if (flags & SECTION_CACHE)
 2771                 pd |= pte_l1_s_cache_mode;
 2772         else if (flags & SECTION_PT)
 2773                 pd |= pte_l1_s_cache_mode_pt;
 2774         va0 = va & L1_SUP_FRAME;
 2775         va_end = va + L1_SUP_SIZE;
 2776         SLIST_FOREACH(l1, &l1_list, l1_link) {
 2777                 va = va0;
 2778                 for (; va < va_end; va += L1_S_SIZE) {
 2779                         l1->l1_kva[L1_IDX(va)] = pd;
 2780                         PTE_SYNC(&l1->l1_kva[L1_IDX(va)]);
 2781                 }
 2782         }
 2783 }
 2784 #endif
 2785 
 2786 /* Map a section into the KVA. */
 2787 
 2788 void
 2789 pmap_kenter_section(vm_offset_t va, vm_offset_t pa, int flags)
 2790 {
 2791         pd_entry_t pd = L1_S_PROTO | pa | L1_S_PROT(PTE_KERNEL,
 2792             VM_PROT_READ|VM_PROT_WRITE) | L1_S_DOM(PMAP_DOMAIN_KERNEL);
 2793         struct l1_ttable *l1;
 2794 
 2795         KASSERT(((va | pa) & L1_S_OFFSET) == 0,
 2796             ("Not a valid section mapping"));
 2797         if (flags & SECTION_CACHE)
 2798                 pd |= pte_l1_s_cache_mode;
 2799         else if (flags & SECTION_PT)
 2800                 pd |= pte_l1_s_cache_mode_pt;
 2801         SLIST_FOREACH(l1, &l1_list, l1_link) {
 2802                 l1->l1_kva[L1_IDX(va)] = pd;
 2803                 PTE_SYNC(&l1->l1_kva[L1_IDX(va)]);
 2804         }
 2805 }
 2806 
 2807 /*
 2808  * Make a temporary mapping for a physical address.  This is only intended
 2809  * to be used for panic dumps.
 2810  */
 2811 void *
 2812 pmap_kenter_temp(vm_paddr_t pa, int i)
 2813 {
 2814         vm_offset_t va;
 2815 
 2816         va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE);
 2817         pmap_kenter(va, pa);
 2818         return ((void *)crashdumpmap);
 2819 }
 2820 
 2821 /*
 2822  * add a wired page to the kva
 2823  * note that in order for the mapping to take effect -- you
 2824  * should do a invltlb after doing the pmap_kenter...
 2825  */
 2826 static PMAP_INLINE void
 2827 pmap_kenter_internal(vm_offset_t va, vm_offset_t pa, int flags)
 2828 {
 2829         struct l2_bucket *l2b;
 2830         pt_entry_t *pte;
 2831         pt_entry_t opte;
 2832         struct pv_entry *pve;
 2833         vm_page_t m;
 2834 
 2835         PDEBUG(1, printf("pmap_kenter: va = %08x, pa = %08x\n",
 2836             (uint32_t) va, (uint32_t) pa));
 2837 
 2838 
 2839         l2b = pmap_get_l2_bucket(pmap_kernel(), va);
 2840         if (l2b == NULL)
 2841                 l2b = pmap_grow_l2_bucket(pmap_kernel(), va);
 2842         KASSERT(l2b != NULL, ("No L2 Bucket"));
 2843         pte = &l2b->l2b_kva[l2pte_index(va)];
 2844         opte = *pte;
 2845         PDEBUG(1, printf("pmap_kenter: pte = %08x, opte = %08x, npte = %08x\n",
 2846             (uint32_t) pte, opte, *pte));
 2847         if (l2pte_valid(opte)) {
 2848                 pmap_kremove(va);
 2849         } else {
 2850                 if (opte == 0)
 2851                         l2b->l2b_occupancy++;
 2852         }
 2853         *pte = L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, 
 2854             VM_PROT_READ | VM_PROT_WRITE);
 2855         if (flags & KENTER_CACHE)
 2856                 *pte |= pte_l2_s_cache_mode;
 2857         if (flags & KENTER_USER)
 2858                 *pte |= L2_S_PROT_U;
 2859         PTE_SYNC(pte);
 2860 
 2861                 /* kernel direct mappings can be shared, so use a pv_entry
 2862                  * to ensure proper caching.
 2863                  *
 2864                  * The pvzone is used to delay the recording of kernel
 2865                  * mappings until the VM is running.
 2866                  * 
 2867                  * This expects the physical memory to have vm_page_array entry.
 2868                  */
 2869         if (pvzone != NULL && (m = vm_phys_paddr_to_vm_page(pa))) {
 2870                 vm_page_lock_queues();
 2871                 if (!TAILQ_EMPTY(&m->md.pv_list) || m->md.pv_kva) {
 2872                         /* release vm_page lock for pv_entry UMA */
 2873                         vm_page_unlock_queues();
 2874                         if ((pve = pmap_get_pv_entry()) == NULL)
 2875                                 panic("pmap_kenter_internal: no pv entries");   
 2876                         vm_page_lock_queues();
 2877                         PMAP_LOCK(pmap_kernel());
 2878                         pmap_enter_pv(m, pve, pmap_kernel(), va,
 2879                             PVF_WRITE | PVF_UNMAN);
 2880                         pmap_fix_cache(m, pmap_kernel(), va);
 2881                         PMAP_UNLOCK(pmap_kernel());
 2882                 } else {
 2883                         m->md.pv_kva = va;
 2884                 }
 2885                 vm_page_unlock_queues();
 2886         }
 2887 }
 2888 
 2889 void
 2890 pmap_kenter(vm_offset_t va, vm_paddr_t pa)
 2891 {
 2892         pmap_kenter_internal(va, pa, KENTER_CACHE);
 2893 }
 2894 
 2895 void
 2896 pmap_kenter_nocache(vm_offset_t va, vm_paddr_t pa)
 2897 {
 2898 
 2899         pmap_kenter_internal(va, pa, 0);
 2900 }
 2901 
 2902 void
 2903 pmap_kenter_user(vm_offset_t va, vm_paddr_t pa)
 2904 {
 2905 
 2906         pmap_kenter_internal(va, pa, KENTER_CACHE|KENTER_USER);
 2907         /*
 2908          * Call pmap_fault_fixup now, to make sure we'll have no exception
 2909          * at the first use of the new address, or bad things will happen,
 2910          * as we use one of these addresses in the exception handlers.
 2911          */
 2912         pmap_fault_fixup(pmap_kernel(), va, VM_PROT_READ|VM_PROT_WRITE, 1);
 2913 }
 2914 
 2915 /*
 2916  * remove a page from the kernel pagetables
 2917  */
 2918 void
 2919 pmap_kremove(vm_offset_t va)
 2920 {
 2921         struct l2_bucket *l2b;
 2922         pt_entry_t *pte, opte;
 2923         struct pv_entry *pve;
 2924         vm_page_t m;
 2925         vm_offset_t pa;
 2926                 
 2927         l2b = pmap_get_l2_bucket(pmap_kernel(), va);
 2928         if (!l2b)
 2929                 return;
 2930         KASSERT(l2b != NULL, ("No L2 Bucket"));
 2931         pte = &l2b->l2b_kva[l2pte_index(va)];
 2932         opte = *pte;
 2933         if (l2pte_valid(opte)) {
 2934                         /* pa = vtophs(va) taken from pmap_extract() */
 2935                 switch (opte & L2_TYPE_MASK) {
 2936                 case L2_TYPE_L:
 2937                         pa = (opte & L2_L_FRAME) | (va & L2_L_OFFSET);
 2938                         break;
 2939                 default:
 2940                         pa = (opte & L2_S_FRAME) | (va & L2_S_OFFSET);
 2941                         break;
 2942                 }
 2943                         /* note: should never have to remove an allocation
 2944                          * before the pvzone is initialized.
 2945                          */
 2946                 vm_page_lock_queues();
 2947                 PMAP_LOCK(pmap_kernel());
 2948                 if (pvzone != NULL && (m = vm_phys_paddr_to_vm_page(pa)) &&
 2949                     (pve = pmap_remove_pv(m, pmap_kernel(), va)))
 2950                         pmap_free_pv_entry(pve); 
 2951                 PMAP_UNLOCK(pmap_kernel());
 2952                 vm_page_unlock_queues();
 2953                 va = va & ~PAGE_MASK;
 2954                 cpu_dcache_wbinv_range(va, PAGE_SIZE);
 2955                 cpu_l2cache_wbinv_range(va, PAGE_SIZE);
 2956                 cpu_tlb_flushD_SE(va);
 2957                 cpu_cpwait();
 2958                 *pte = 0;
 2959         }
 2960 }
 2961 
 2962 
 2963 /*
 2964  *      Used to map a range of physical addresses into kernel
 2965  *      virtual address space.
 2966  *
 2967  *      The value passed in '*virt' is a suggested virtual address for
 2968  *      the mapping. Architectures which can support a direct-mapped
 2969  *      physical to virtual region can return the appropriate address
 2970  *      within that region, leaving '*virt' unchanged. Other
 2971  *      architectures should map the pages starting at '*virt' and
 2972  *      update '*virt' with the first usable address after the mapped
 2973  *      region.
 2974  */
 2975 vm_offset_t
 2976 pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot)
 2977 {
 2978 #ifdef ARM_USE_SMALL_ALLOC
 2979         return (arm_ptovirt(start));
 2980 #else
 2981         vm_offset_t sva = *virt;
 2982         vm_offset_t va = sva;
 2983 
 2984         PDEBUG(1, printf("pmap_map: virt = %08x, start = %08x, end = %08x, "
 2985             "prot = %d\n", (uint32_t) *virt, (uint32_t) start, (uint32_t) end,
 2986             prot));
 2987             
 2988         while (start < end) {
 2989                 pmap_kenter(va, start);
 2990                 va += PAGE_SIZE;
 2991                 start += PAGE_SIZE;
 2992         }
 2993         *virt = va;
 2994         return (sva);
 2995 #endif
 2996 }
 2997 
 2998 static void
 2999 pmap_wb_page(vm_page_t m)
 3000 {
 3001         struct pv_entry *pv;
 3002 
 3003         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list)
 3004             pmap_dcache_wb_range(pv->pv_pmap, pv->pv_va, PAGE_SIZE, FALSE,
 3005                 (pv->pv_flags & PVF_WRITE) == 0);
 3006 }
 3007 
 3008 static void
 3009 pmap_inv_page(vm_page_t m)
 3010 {
 3011         struct pv_entry *pv;
 3012 
 3013         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list)
 3014             pmap_dcache_wb_range(pv->pv_pmap, pv->pv_va, PAGE_SIZE, TRUE, TRUE);
 3015 }
 3016 /*
 3017  * Add a list of wired pages to the kva
 3018  * this routine is only used for temporary
 3019  * kernel mappings that do not need to have
 3020  * page modification or references recorded.
 3021  * Note that old mappings are simply written
 3022  * over.  The page *must* be wired.
 3023  */
 3024 void
 3025 pmap_qenter(vm_offset_t va, vm_page_t *m, int count)
 3026 {
 3027         int i;
 3028 
 3029         for (i = 0; i < count; i++) {
 3030                 pmap_wb_page(m[i]);
 3031                 pmap_kenter_internal(va, VM_PAGE_TO_PHYS(m[i]), 
 3032                     KENTER_CACHE);
 3033                 va += PAGE_SIZE;
 3034         }
 3035 }
 3036 
 3037 
 3038 /*
 3039  * this routine jerks page mappings from the
 3040  * kernel -- it is meant only for temporary mappings.
 3041  */
 3042 void
 3043 pmap_qremove(vm_offset_t va, int count)
 3044 {
 3045         vm_paddr_t pa;
 3046         int i;
 3047 
 3048         for (i = 0; i < count; i++) {
 3049                 pa = vtophys(va);
 3050                 if (pa) {
 3051                         pmap_inv_page(PHYS_TO_VM_PAGE(pa));
 3052                         pmap_kremove(va);
 3053                 }
 3054                 va += PAGE_SIZE;
 3055         }
 3056 }
 3057 
 3058 
 3059 /*
 3060  * pmap_object_init_pt preloads the ptes for a given object
 3061  * into the specified pmap.  This eliminates the blast of soft
 3062  * faults on process startup and immediately after an mmap.
 3063  */
 3064 void
 3065 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
 3066     vm_pindex_t pindex, vm_size_t size)
 3067 {
 3068 
 3069         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
 3070         KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
 3071             ("pmap_object_init_pt: non-device object"));
 3072 }
 3073 
 3074 
 3075 /*
 3076  *      pmap_is_prefaultable:
 3077  *
 3078  *      Return whether or not the specified virtual address is elgible
 3079  *      for prefault.
 3080  */
 3081 boolean_t
 3082 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
 3083 {
 3084         pd_entry_t *pde;
 3085         pt_entry_t *pte;
 3086 
 3087         if (!pmap_get_pde_pte(pmap, addr, &pde, &pte))
 3088                 return (FALSE);
 3089         KASSERT(pte != NULL, ("Valid mapping but no pte ?"));
 3090         if (*pte == 0)
 3091                 return (TRUE);
 3092         return (FALSE);
 3093 }
 3094 
 3095 /*
 3096  * Fetch pointers to the PDE/PTE for the given pmap/VA pair.
 3097  * Returns TRUE if the mapping exists, else FALSE.
 3098  *
 3099  * NOTE: This function is only used by a couple of arm-specific modules.
 3100  * It is not safe to take any pmap locks here, since we could be right
 3101  * in the middle of debugging the pmap anyway...
 3102  *
 3103  * It is possible for this routine to return FALSE even though a valid
 3104  * mapping does exist. This is because we don't lock, so the metadata
 3105  * state may be inconsistent.
 3106  *
 3107  * NOTE: We can return a NULL *ptp in the case where the L1 pde is
 3108  * a "section" mapping.
 3109  */
 3110 boolean_t
 3111 pmap_get_pde_pte(pmap_t pm, vm_offset_t va, pd_entry_t **pdp, pt_entry_t **ptp)
 3112 {
 3113         struct l2_dtable *l2;
 3114         pd_entry_t *pl1pd, l1pd;
 3115         pt_entry_t *ptep;
 3116         u_short l1idx;
 3117 
 3118         if (pm->pm_l1 == NULL)
 3119                 return (FALSE);
 3120 
 3121         l1idx = L1_IDX(va);
 3122         *pdp = pl1pd = &pm->pm_l1->l1_kva[l1idx];
 3123         l1pd = *pl1pd;
 3124 
 3125         if (l1pte_section_p(l1pd)) {
 3126                 *ptp = NULL;
 3127                 return (TRUE);
 3128         }
 3129 
 3130         if (pm->pm_l2 == NULL)
 3131                 return (FALSE);
 3132 
 3133         l2 = pm->pm_l2[L2_IDX(l1idx)];
 3134 
 3135         if (l2 == NULL ||
 3136             (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) {
 3137                 return (FALSE);
 3138         }
 3139 
 3140         *ptp = &ptep[l2pte_index(va)];
 3141         return (TRUE);
 3142 }
 3143 
 3144 /*
 3145  *      Routine:        pmap_remove_all
 3146  *      Function:
 3147  *              Removes this physical page from
 3148  *              all physical maps in which it resides.
 3149  *              Reflects back modify bits to the pager.
 3150  *
 3151  *      Notes:
 3152  *              Original versions of this routine were very
 3153  *              inefficient because they iteratively called
 3154  *              pmap_remove (slow...)
 3155  */
 3156 void
 3157 pmap_remove_all(vm_page_t m)
 3158 {
 3159         pv_entry_t pv;
 3160         pt_entry_t *ptep;
 3161         struct l2_bucket *l2b;
 3162         boolean_t flush = FALSE;
 3163         pmap_t curpm;
 3164         int flags = 0;
 3165 
 3166 #if defined(PMAP_DEBUG)
 3167         /*
 3168          * XXX This makes pmap_remove_all() illegal for non-managed pages!
 3169          */
 3170         if (m->flags & PG_FICTITIOUS) {
 3171                 panic("pmap_remove_all: illegal for unmanaged page, va: 0x%x", VM_PAGE_TO_PHYS(m));
 3172         }
 3173 #endif
 3174 
 3175         if (TAILQ_EMPTY(&m->md.pv_list))
 3176                 return;
 3177         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 3178         pmap_remove_write(m);
 3179         curpm = vmspace_pmap(curproc->p_vmspace);
 3180         while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
 3181                 if (flush == FALSE && (pv->pv_pmap == curpm ||
 3182                     pv->pv_pmap == pmap_kernel()))
 3183                         flush = TRUE;
 3184 
 3185                 PMAP_LOCK(pv->pv_pmap);
 3186                 /*
 3187                  * Cached contents were written-back in pmap_remove_write(),
 3188                  * but we still have to invalidate the cache entry to make
 3189                  * sure stale data are not retrieved when another page will be
 3190                  * mapped under this virtual address.
 3191                  */
 3192                 if (pmap_is_current(pv->pv_pmap)) {
 3193                         cpu_dcache_inv_range(pv->pv_va, PAGE_SIZE);
 3194                         if (pmap_has_valid_mapping(pv->pv_pmap, pv->pv_va))
 3195                                 cpu_l2cache_inv_range(pv->pv_va, PAGE_SIZE);
 3196                 }
 3197 
 3198                 if (pv->pv_flags & PVF_UNMAN) {
 3199                         /* remove the pv entry, but do not remove the mapping
 3200                          * and remember this is a kernel mapped page
 3201                          */
 3202                         m->md.pv_kva = pv->pv_va;
 3203                 } else {
 3204                         /* remove the mapping and pv entry */
 3205                         l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va);
 3206                         KASSERT(l2b != NULL, ("No l2 bucket"));
 3207                         ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
 3208                         *ptep = 0;
 3209                         PTE_SYNC_CURRENT(pv->pv_pmap, ptep);
 3210                         pmap_free_l2_bucket(pv->pv_pmap, l2b, 1);
 3211                         if (pv->pv_flags & PVF_WIRED)
 3212                                 pv->pv_pmap->pm_stats.wired_count--;
 3213                         pv->pv_pmap->pm_stats.resident_count--;
 3214                         flags |= pv->pv_flags;
 3215                 }
 3216                 pmap_nuke_pv(m, pv->pv_pmap, pv);
 3217                 PMAP_UNLOCK(pv->pv_pmap);
 3218                 pmap_free_pv_entry(pv);
 3219         }
 3220 
 3221         if (flush) {
 3222                 if (PV_BEEN_EXECD(flags))
 3223                         pmap_tlb_flushID(curpm);
 3224                 else
 3225                         pmap_tlb_flushD(curpm);
 3226         }
 3227         vm_page_flag_clear(m, PG_WRITEABLE);
 3228 }
 3229 
 3230 
 3231 /*
 3232  *      Set the physical protection on the
 3233  *      specified range of this map as requested.
 3234  */
 3235 void
 3236 pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
 3237 {
 3238         struct l2_bucket *l2b;
 3239         pt_entry_t *ptep, pte;
 3240         vm_offset_t next_bucket;
 3241         u_int flags;
 3242         int flush;
 3243 
 3244         CTR4(KTR_PMAP, "pmap_protect: pmap %p sva 0x%08x eva 0x%08x prot %x",
 3245             pm, sva, eva, prot);
 3246 
 3247         if ((prot & VM_PROT_READ) == 0) {
 3248                 pmap_remove(pm, sva, eva);
 3249                 return;
 3250         }
 3251 
 3252         if (prot & VM_PROT_WRITE) {
 3253                 /*
 3254                  * If this is a read->write transition, just ignore it and let
 3255                  * vm_fault() take care of it later.
 3256                  */
 3257                 return;
 3258         }
 3259 
 3260         vm_page_lock_queues();
 3261         PMAP_LOCK(pm);
 3262 
 3263         /*
 3264          * OK, at this point, we know we're doing write-protect operation.
 3265          * If the pmap is active, write-back the range.
 3266          */
 3267         pmap_dcache_wb_range(pm, sva, eva - sva, FALSE, FALSE);
 3268 
 3269         flush = ((eva - sva) >= (PAGE_SIZE * 4)) ? 0 : -1;
 3270         flags = 0;
 3271 
 3272         while (sva < eva) {
 3273                 next_bucket = L2_NEXT_BUCKET(sva);
 3274                 if (next_bucket > eva)
 3275                         next_bucket = eva;
 3276 
 3277                 l2b = pmap_get_l2_bucket(pm, sva);
 3278                 if (l2b == NULL) {
 3279                         sva = next_bucket;
 3280                         continue;
 3281                 }
 3282 
 3283                 ptep = &l2b->l2b_kva[l2pte_index(sva)];
 3284 
 3285                 while (sva < next_bucket) {
 3286                         if ((pte = *ptep) != 0 && (pte & L2_S_PROT_W) != 0) {
 3287                                 struct vm_page *pg;
 3288                                 u_int f;
 3289 
 3290                                 pg = PHYS_TO_VM_PAGE(l2pte_pa(pte));
 3291                                 pte &= ~L2_S_PROT_W;
 3292                                 *ptep = pte;
 3293                                 PTE_SYNC(ptep);
 3294 
 3295                                 if (pg != NULL) {
 3296                                         f = pmap_modify_pv(pg, pm, sva,
 3297                                             PVF_WRITE, 0);
 3298                                         vm_page_dirty(pg);
 3299                                 } else
 3300                                         f = PVF_REF | PVF_EXEC;
 3301 
 3302                                 if (flush >= 0) {
 3303                                         flush++;
 3304                                         flags |= f;
 3305                                 } else
 3306                                 if (PV_BEEN_EXECD(f))
 3307                                         pmap_tlb_flushID_SE(pm, sva);
 3308                                 else
 3309                                 if (PV_BEEN_REFD(f))
 3310                                         pmap_tlb_flushD_SE(pm, sva);
 3311                         }
 3312 
 3313                         sva += PAGE_SIZE;
 3314                         ptep++;
 3315                 }
 3316         }
 3317 
 3318 
 3319         if (flush) {
 3320                 if (PV_BEEN_EXECD(flags))
 3321                         pmap_tlb_flushID(pm);
 3322                 else
 3323                 if (PV_BEEN_REFD(flags))
 3324                         pmap_tlb_flushD(pm);
 3325         }
 3326         vm_page_unlock_queues();
 3327 
 3328         PMAP_UNLOCK(pm);
 3329 }
 3330 
 3331 
 3332 /*
 3333  *      Insert the given physical page (p) at
 3334  *      the specified virtual address (v) in the
 3335  *      target physical map with the protection requested.
 3336  *
 3337  *      If specified, the page will be wired down, meaning
 3338  *      that the related pte can not be reclaimed.
 3339  *
 3340  *      NB:  This is the only routine which MAY NOT lazy-evaluate
 3341  *      or lose information.  That is, this routine must actually
 3342  *      insert this page into the given map NOW.
 3343  */
 3344 
 3345 void
 3346 pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
 3347     vm_prot_t prot, boolean_t wired)
 3348 {
 3349 
 3350         vm_page_lock_queues();
 3351         PMAP_LOCK(pmap);
 3352         pmap_enter_locked(pmap, va, m, prot, wired, M_WAITOK);
 3353         vm_page_unlock_queues();
 3354         PMAP_UNLOCK(pmap);
 3355 }
 3356 
 3357 /*
 3358  *      The page queues and pmap must be locked.
 3359  */
 3360 static void
 3361 pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
 3362     boolean_t wired, int flags)
 3363 {
 3364         struct l2_bucket *l2b = NULL;
 3365         struct vm_page *opg;
 3366         struct pv_entry *pve = NULL;
 3367         pt_entry_t *ptep, npte, opte;
 3368         u_int nflags;
 3369         u_int oflags;
 3370         vm_paddr_t pa;
 3371 
 3372         PMAP_ASSERT_LOCKED(pmap);
 3373         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 3374         if (va == vector_page) {
 3375                 pa = systempage.pv_pa;
 3376                 m = NULL;
 3377         } else
 3378                 pa = VM_PAGE_TO_PHYS(m);
 3379         nflags = 0;
 3380         if (prot & VM_PROT_WRITE)
 3381                 nflags |= PVF_WRITE;
 3382         if (prot & VM_PROT_EXECUTE)
 3383                 nflags |= PVF_EXEC;
 3384         if (wired)
 3385                 nflags |= PVF_WIRED;
 3386         PDEBUG(1, printf("pmap_enter: pmap = %08x, va = %08x, m = %08x, prot = %x, "
 3387             "wired = %x\n", (uint32_t) pmap, va, (uint32_t) m, prot, wired));
 3388             
 3389         if (pmap == pmap_kernel()) {
 3390                 l2b = pmap_get_l2_bucket(pmap, va);
 3391                 if (l2b == NULL)
 3392                         l2b = pmap_grow_l2_bucket(pmap, va);
 3393         } else {
 3394 do_l2b_alloc:
 3395                 l2b = pmap_alloc_l2_bucket(pmap, va);
 3396                 if (l2b == NULL) {
 3397                         if (flags & M_WAITOK) {
 3398                                 PMAP_UNLOCK(pmap);
 3399                                 vm_page_unlock_queues();
 3400                                 VM_WAIT;
 3401                                 vm_page_lock_queues();
 3402                                 PMAP_LOCK(pmap);
 3403                                 goto do_l2b_alloc;
 3404                         }
 3405                         return;
 3406                 }
 3407         }
 3408 
 3409         ptep = &l2b->l2b_kva[l2pte_index(va)];
 3410                     
 3411         opte = *ptep;
 3412         npte = pa;
 3413         oflags = 0;
 3414         if (opte) {
 3415                 /*
 3416                  * There is already a mapping at this address.
 3417                  * If the physical address is different, lookup the
 3418                  * vm_page.
 3419                  */
 3420                 if (l2pte_pa(opte) != pa)
 3421                         opg = PHYS_TO_VM_PAGE(l2pte_pa(opte));
 3422                 else
 3423                         opg = m;
 3424         } else
 3425                 opg = NULL;
 3426 
 3427         if ((prot & (VM_PROT_ALL)) ||
 3428             (!m || m->md.pvh_attrs & PVF_REF)) {
 3429                 /*
 3430                  * - The access type indicates that we don't need
 3431                  *   to do referenced emulation.
 3432                  * OR
 3433                  * - The physical page has already been referenced
 3434                  *   so no need to re-do referenced emulation here.
 3435                  */
 3436                 npte |= L2_S_PROTO;
 3437                 
 3438                 nflags |= PVF_REF;
 3439                 
 3440                 if (m && ((prot & VM_PROT_WRITE) != 0 ||
 3441                     (m->md.pvh_attrs & PVF_MOD))) {
 3442                         /*
 3443                          * This is a writable mapping, and the
 3444                          * page's mod state indicates it has
 3445                          * already been modified. Make it
 3446                          * writable from the outset.
 3447                          */
 3448                         nflags |= PVF_MOD;
 3449                         if (!(m->md.pvh_attrs & PVF_MOD))
 3450                                 vm_page_dirty(m);
 3451                 }
 3452                 if (m && opte)
 3453                         vm_page_flag_set(m, PG_REFERENCED);
 3454         } else {
 3455                 /*
 3456                  * Need to do page referenced emulation.
 3457                  */
 3458                 npte |= L2_TYPE_INV;
 3459         }
 3460         
 3461         if (prot & VM_PROT_WRITE) {
 3462                 npte |= L2_S_PROT_W;
 3463                 if (m != NULL)
 3464                         vm_page_flag_set(m, PG_WRITEABLE);
 3465         }
 3466         npte |= pte_l2_s_cache_mode;
 3467         if (m && m == opg) {
 3468                 /*
 3469                  * We're changing the attrs of an existing mapping.
 3470                  */
 3471                 oflags = pmap_modify_pv(m, pmap, va,
 3472                     PVF_WRITE | PVF_EXEC | PVF_WIRED |
 3473                     PVF_MOD | PVF_REF, nflags);
 3474                 
 3475                 /*
 3476                  * We may need to flush the cache if we're
 3477                  * doing rw-ro...
 3478                  */
 3479                 if (pmap_is_current(pmap) &&
 3480                     (oflags & PVF_NC) == 0 &&
 3481                     (opte & L2_S_PROT_W) != 0 &&
 3482                     (prot & VM_PROT_WRITE) == 0 &&
 3483                     (opte & L2_TYPE_MASK) != L2_TYPE_INV) {
 3484                         cpu_dcache_wb_range(va, PAGE_SIZE);
 3485                         cpu_l2cache_wb_range(va, PAGE_SIZE);
 3486                 }
 3487         } else {
 3488                 /*
 3489                  * New mapping, or changing the backing page
 3490                  * of an existing mapping.
 3491                  */
 3492                 if (opg) {
 3493                         /*
 3494                          * Replacing an existing mapping with a new one.
 3495                          * It is part of our managed memory so we
 3496                          * must remove it from the PV list
 3497                          */
 3498                         if ((pve = pmap_remove_pv(opg, pmap, va))) {
 3499 
 3500                         /* note for patch: the oflags/invalidation was moved
 3501                          * because PG_FICTITIOUS pages could free the pve
 3502                          */
 3503                             oflags = pve->pv_flags;
 3504                         /*
 3505                          * If the old mapping was valid (ref/mod
 3506                          * emulation creates 'invalid' mappings
 3507                          * initially) then make sure to frob
 3508                          * the cache.
 3509                          */
 3510                             if ((oflags & PVF_NC) == 0 && l2pte_valid(opte)) {
 3511                                 if (PV_BEEN_EXECD(oflags)) {
 3512                                         pmap_idcache_wbinv_range(pmap, va,
 3513                                             PAGE_SIZE);
 3514                                 } else
 3515                                         if (PV_BEEN_REFD(oflags)) {
 3516                                                 pmap_dcache_wb_range(pmap, va,
 3517                                                     PAGE_SIZE, TRUE,
 3518                                                     (oflags & PVF_WRITE) == 0);
 3519                                         }
 3520                             }
 3521 
 3522                         /* free/allocate a pv_entry for UNMANAGED pages if
 3523                          * this physical page is not/is already mapped.
 3524                          */
 3525 
 3526                             if (m && ((m->flags & PG_FICTITIOUS) ||
 3527                                 ((m->flags & PG_UNMANAGED) &&
 3528                                   !m->md.pv_kva &&
 3529                                  TAILQ_EMPTY(&m->md.pv_list)))) {
 3530                                 pmap_free_pv_entry(pve);
 3531                                 pve = NULL;
 3532                             }
 3533                         } else if (m && !(m->flags & PG_FICTITIOUS) &&
 3534                                  (!(m->flags & PG_UNMANAGED) || m->md.pv_kva ||
 3535                                   !TAILQ_EMPTY(&m->md.pv_list)))
 3536                                 pve = pmap_get_pv_entry();
 3537                 } else if (m && !(m->flags & PG_FICTITIOUS) &&
 3538                            (!(m->flags & PG_UNMANAGED) || m->md.pv_kva ||
 3539                            !TAILQ_EMPTY(&m->md.pv_list)))
 3540                         pve = pmap_get_pv_entry();
 3541 
 3542                 if (m && !(m->flags & PG_FICTITIOUS)) {
 3543                         KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva,
 3544                         ("pmap_enter: managed mapping within the clean submap"));
 3545                         if (m->flags & PG_UNMANAGED) {
 3546                                 if (!TAILQ_EMPTY(&m->md.pv_list) ||
 3547                                      m->md.pv_kva) {
 3548                                         KASSERT(pve != NULL, ("No pv"));
 3549                                         nflags |= PVF_UNMAN;
 3550                                         pmap_enter_pv(m, pve, pmap, va, nflags);
 3551                                 } else
 3552                                         m->md.pv_kva = va;
 3553                         } else {
 3554                                 KASSERT(pve != NULL, ("No pv"));
 3555                                 pmap_enter_pv(m, pve, pmap, va, nflags);
 3556                         }
 3557                 }
 3558         }
 3559         /*
 3560          * Make sure userland mappings get the right permissions
 3561          */
 3562         if (pmap != pmap_kernel() && va != vector_page) {
 3563                 npte |= L2_S_PROT_U;
 3564         }
 3565 
 3566         /*
 3567          * Keep the stats up to date
 3568          */
 3569         if (opte == 0) {
 3570                 l2b->l2b_occupancy++;
 3571                 pmap->pm_stats.resident_count++;
 3572         } 
 3573 
 3574 
 3575         /*
 3576          * If this is just a wiring change, the two PTEs will be
 3577          * identical, so there's no need to update the page table.
 3578          */
 3579         if (npte != opte) {
 3580                 boolean_t is_cached = pmap_is_current(pmap);
 3581 
 3582                 *ptep = npte;
 3583                 if (is_cached) {
 3584                         /*
 3585                          * We only need to frob the cache/tlb if this pmap
 3586                          * is current
 3587                          */
 3588                         PTE_SYNC(ptep);
 3589                         if (L1_IDX(va) != L1_IDX(vector_page) && 
 3590                             l2pte_valid(npte)) {
 3591                                 /*
 3592                                  * This mapping is likely to be accessed as
 3593                                  * soon as we return to userland. Fix up the
 3594                                  * L1 entry to avoid taking another
 3595                                  * page/domain fault.
 3596                                  */
 3597                                 pd_entry_t *pl1pd, l1pd;
 3598 
 3599                                 pl1pd = &pmap->pm_l1->l1_kva[L1_IDX(va)];
 3600                                 l1pd = l2b->l2b_phys | L1_C_DOM(pmap->pm_domain) |
 3601                                     L1_C_PROTO;
 3602                                 if (*pl1pd != l1pd) {
 3603                                         *pl1pd = l1pd;
 3604                                         PTE_SYNC(pl1pd);
 3605                                 }
 3606                         }
 3607                 }
 3608 
 3609                 if (PV_BEEN_EXECD(oflags))
 3610                         pmap_tlb_flushID_SE(pmap, va);
 3611                 else if (PV_BEEN_REFD(oflags))
 3612                         pmap_tlb_flushD_SE(pmap, va);
 3613 
 3614 
 3615                 if (m)
 3616                         pmap_fix_cache(m, pmap, va);
 3617         }
 3618 }
 3619 
 3620 /*
 3621  * Maps a sequence of resident pages belonging to the same object.
 3622  * The sequence begins with the given page m_start.  This page is
 3623  * mapped at the given virtual address start.  Each subsequent page is
 3624  * mapped at a virtual address that is offset from start by the same
 3625  * amount as the page is offset from m_start within the object.  The
 3626  * last page in the sequence is the page with the largest offset from
 3627  * m_start that can be mapped at a virtual address less than the given
 3628  * virtual address end.  Not every virtual page between start and end
 3629  * is mapped; only those for which a resident page exists with the
 3630  * corresponding offset from m_start are mapped.
 3631  */
 3632 void
 3633 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
 3634     vm_page_t m_start, vm_prot_t prot)
 3635 {
 3636         vm_page_t m;
 3637         vm_pindex_t diff, psize;
 3638 
 3639         psize = atop(end - start);
 3640         m = m_start;
 3641         PMAP_LOCK(pmap);
 3642         while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
 3643                 pmap_enter_locked(pmap, start + ptoa(diff), m, prot &
 3644                     (VM_PROT_READ | VM_PROT_EXECUTE), FALSE, M_NOWAIT);
 3645                 m = TAILQ_NEXT(m, listq);
 3646         }
 3647         PMAP_UNLOCK(pmap);
 3648 }
 3649 
 3650 /*
 3651  * this code makes some *MAJOR* assumptions:
 3652  * 1. Current pmap & pmap exists.
 3653  * 2. Not wired.
 3654  * 3. Read access.
 3655  * 4. No page table pages.
 3656  * but is *MUCH* faster than pmap_enter...
 3657  */
 3658 
 3659 void
 3660 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
 3661 {
 3662 
 3663         PMAP_LOCK(pmap);
 3664         pmap_enter_locked(pmap, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
 3665             FALSE, M_NOWAIT);
 3666         PMAP_UNLOCK(pmap);
 3667 }
 3668 
 3669 /*
 3670  *      Routine:        pmap_change_wiring
 3671  *      Function:       Change the wiring attribute for a map/virtual-address
 3672  *                      pair.
 3673  *      In/out conditions:
 3674  *                      The mapping must already exist in the pmap.
 3675  */
 3676 void
 3677 pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
 3678 {
 3679         struct l2_bucket *l2b;
 3680         pt_entry_t *ptep, pte;
 3681         vm_page_t pg;
 3682 
 3683         vm_page_lock_queues();
 3684         PMAP_LOCK(pmap);
 3685         l2b = pmap_get_l2_bucket(pmap, va);
 3686         KASSERT(l2b, ("No l2b bucket in pmap_change_wiring"));
 3687         ptep = &l2b->l2b_kva[l2pte_index(va)];
 3688         pte = *ptep;
 3689         pg = PHYS_TO_VM_PAGE(l2pte_pa(pte));
 3690         if (pg) 
 3691                 pmap_modify_pv(pg, pmap, va, PVF_WIRED, wired);
 3692         vm_page_unlock_queues();
 3693         PMAP_UNLOCK(pmap);
 3694 }
 3695 
 3696 
 3697 /*
 3698  *      Copy the range specified by src_addr/len
 3699  *      from the source map to the range dst_addr/len
 3700  *      in the destination map.
 3701  *
 3702  *      This routine is only advisory and need not do anything.
 3703  */
 3704 void
 3705 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
 3706     vm_size_t len, vm_offset_t src_addr)
 3707 {
 3708 }
 3709 
 3710 
 3711 /*
 3712  *      Routine:        pmap_extract
 3713  *      Function:
 3714  *              Extract the physical page address associated
 3715  *              with the given map/virtual_address pair.
 3716  */
 3717 vm_paddr_t
 3718 pmap_extract(pmap_t pm, vm_offset_t va)
 3719 {
 3720         struct l2_dtable *l2;
 3721         pd_entry_t l1pd;
 3722         pt_entry_t *ptep, pte;
 3723         vm_paddr_t pa;
 3724         u_int l1idx;
 3725         l1idx = L1_IDX(va);
 3726 
 3727         PMAP_LOCK(pm);
 3728         l1pd = pm->pm_l1->l1_kva[l1idx];
 3729         if (l1pte_section_p(l1pd)) {
 3730                 /*
 3731                  * These should only happen for pmap_kernel()
 3732                  */
 3733                 KASSERT(pm == pmap_kernel(), ("huh"));
 3734                 /* XXX: what to do about the bits > 32 ? */
 3735                 if (l1pd & L1_S_SUPERSEC) 
 3736                         pa = (l1pd & L1_SUP_FRAME) | (va & L1_SUP_OFFSET);
 3737                 else
 3738                         pa = (l1pd & L1_S_FRAME) | (va & L1_S_OFFSET);
 3739         } else {
 3740                 /*
 3741                  * Note that we can't rely on the validity of the L1
 3742                  * descriptor as an indication that a mapping exists.
 3743                  * We have to look it up in the L2 dtable.
 3744                  */
 3745                 l2 = pm->pm_l2[L2_IDX(l1idx)];
 3746 
 3747                 if (l2 == NULL ||
 3748                     (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) {
 3749                         PMAP_UNLOCK(pm);
 3750                         return (0);
 3751                 }
 3752 
 3753                 ptep = &ptep[l2pte_index(va)];
 3754                 pte = *ptep;
 3755 
 3756                 if (pte == 0) {
 3757                         PMAP_UNLOCK(pm);
 3758                         return (0);
 3759                 }
 3760 
 3761                 switch (pte & L2_TYPE_MASK) {
 3762                 case L2_TYPE_L:
 3763                         pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET);
 3764                         break;
 3765 
 3766                 default:
 3767                         pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET);
 3768                         break;
 3769                 }
 3770         }
 3771 
 3772         PMAP_UNLOCK(pm);
 3773         return (pa);
 3774 }
 3775 
 3776 /*
 3777  * Atomically extract and hold the physical page with the given
 3778  * pmap and virtual address pair if that mapping permits the given
 3779  * protection.
 3780  *
 3781  */
 3782 vm_page_t
 3783 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
 3784 {
 3785         struct l2_dtable *l2;
 3786         pd_entry_t l1pd;
 3787         pt_entry_t *ptep, pte;
 3788         vm_paddr_t pa;
 3789         vm_page_t m = NULL;
 3790         u_int l1idx;
 3791         l1idx = L1_IDX(va);
 3792 
 3793         vm_page_lock_queues();
 3794         PMAP_LOCK(pmap);
 3795         l1pd = pmap->pm_l1->l1_kva[l1idx];
 3796         if (l1pte_section_p(l1pd)) {
 3797                 /*
 3798                  * These should only happen for pmap_kernel()
 3799                  */
 3800                 KASSERT(pmap == pmap_kernel(), ("huh"));
 3801                 /* XXX: what to do about the bits > 32 ? */
 3802                 if (l1pd & L1_S_SUPERSEC) 
 3803                         pa = (l1pd & L1_SUP_FRAME) | (va & L1_SUP_OFFSET);
 3804                 else
 3805                         pa = (l1pd & L1_S_FRAME) | (va & L1_S_OFFSET);
 3806                 if (l1pd & L1_S_PROT_W || (prot & VM_PROT_WRITE) == 0) {
 3807                         m = PHYS_TO_VM_PAGE(pa);
 3808                         vm_page_hold(m);
 3809                 }
 3810                         
 3811         } else {
 3812                 /*
 3813                  * Note that we can't rely on the validity of the L1
 3814                  * descriptor as an indication that a mapping exists.
 3815                  * We have to look it up in the L2 dtable.
 3816                  */
 3817                 l2 = pmap->pm_l2[L2_IDX(l1idx)];
 3818 
 3819                 if (l2 == NULL ||
 3820                     (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) {
 3821                         PMAP_UNLOCK(pmap);
 3822                         vm_page_unlock_queues();
 3823                         return (NULL);
 3824                 }
 3825 
 3826                 ptep = &ptep[l2pte_index(va)];
 3827                 pte = *ptep;
 3828 
 3829                 if (pte == 0) {
 3830                         PMAP_UNLOCK(pmap);
 3831                         vm_page_unlock_queues();
 3832                         return (NULL);
 3833                 }
 3834                 if (pte & L2_S_PROT_W || (prot & VM_PROT_WRITE) == 0) {
 3835                         switch (pte & L2_TYPE_MASK) {
 3836                         case L2_TYPE_L:
 3837                                 pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET);
 3838                                 break;
 3839                                 
 3840                         default:
 3841                                 pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET);
 3842                                 break;
 3843                         }
 3844                         m = PHYS_TO_VM_PAGE(pa);
 3845                         vm_page_hold(m);
 3846                 }
 3847         }
 3848 
 3849         PMAP_UNLOCK(pmap);
 3850         vm_page_unlock_queues();
 3851         return (m);
 3852 }
 3853 
 3854 /*
 3855  * Initialize a preallocated and zeroed pmap structure,
 3856  * such as one in a vmspace structure.
 3857  */
 3858 
 3859 int
 3860 pmap_pinit(pmap_t pmap)
 3861 {
 3862         PDEBUG(1, printf("pmap_pinit: pmap = %08x\n", (uint32_t) pmap));
 3863         
 3864         PMAP_LOCK_INIT(pmap);
 3865         pmap_alloc_l1(pmap);
 3866         bzero(pmap->pm_l2, sizeof(pmap->pm_l2));
 3867 
 3868         pmap->pm_active = 0;
 3869                 
 3870         TAILQ_INIT(&pmap->pm_pvlist);
 3871         bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
 3872         pmap->pm_stats.resident_count = 1;
 3873         if (vector_page < KERNBASE) {
 3874                 pmap_enter(pmap, vector_page,
 3875                     VM_PROT_READ, PHYS_TO_VM_PAGE(systempage.pv_pa),
 3876                     VM_PROT_READ, 1);
 3877         } 
 3878         return (1);
 3879 }
 3880 
 3881 
 3882 /***************************************************
 3883  * page management routines.
 3884  ***************************************************/
 3885 
 3886 
 3887 static void
 3888 pmap_free_pv_entry(pv_entry_t pv)
 3889 {
 3890         pv_entry_count--;
 3891         uma_zfree(pvzone, pv);
 3892 }
 3893 
 3894 
 3895 /*
 3896  * get a new pv_entry, allocating a block from the system
 3897  * when needed.
 3898  * the memory allocation is performed bypassing the malloc code
 3899  * because of the possibility of allocations at interrupt time.
 3900  */
 3901 static pv_entry_t
 3902 pmap_get_pv_entry(void)
 3903 {
 3904         pv_entry_t ret_value;
 3905         
 3906         pv_entry_count++;
 3907         if (pv_entry_count > pv_entry_high_water)
 3908                 pagedaemon_wakeup();
 3909         ret_value = uma_zalloc(pvzone, M_NOWAIT);
 3910         return ret_value;
 3911 }
 3912 
 3913 /*
 3914  *      Remove the given range of addresses from the specified map.
 3915  *
 3916  *      It is assumed that the start and end are properly
 3917  *      rounded to the page size.
 3918  */
 3919 #define PMAP_REMOVE_CLEAN_LIST_SIZE     3
 3920 void
 3921 pmap_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
 3922 {
 3923         struct l2_bucket *l2b;
 3924         vm_offset_t next_bucket;
 3925         pt_entry_t *ptep;
 3926         u_int total;
 3927         u_int mappings, is_exec, is_refd;
 3928         int flushall = 0;
 3929 
 3930 
 3931         /*
 3932          * we lock in the pmap => pv_head direction
 3933          */
 3934 
 3935         vm_page_lock_queues();
 3936         PMAP_LOCK(pm);
 3937         total = 0;
 3938         while (sva < eva) {
 3939                 /*
 3940                  * Do one L2 bucket's worth at a time.
 3941                  */
 3942                 next_bucket = L2_NEXT_BUCKET(sva);
 3943                 if (next_bucket > eva)
 3944                         next_bucket = eva;
 3945 
 3946                 l2b = pmap_get_l2_bucket(pm, sva);
 3947                 if (l2b == NULL) {
 3948                         sva = next_bucket;
 3949                         continue;
 3950                 }
 3951 
 3952                 ptep = &l2b->l2b_kva[l2pte_index(sva)];
 3953                 mappings = 0;
 3954 
 3955                 while (sva < next_bucket) {
 3956                         struct vm_page *pg;
 3957                         pt_entry_t pte;
 3958                         vm_paddr_t pa;
 3959 
 3960                         pte = *ptep;
 3961 
 3962                         if (pte == 0) {
 3963                                 /*
 3964                                  * Nothing here, move along
 3965                                  */
 3966                                 sva += PAGE_SIZE;
 3967                                 ptep++;
 3968                                 continue;
 3969                         }
 3970 
 3971                         pm->pm_stats.resident_count--;
 3972                         pa = l2pte_pa(pte);
 3973                         is_exec = 0;
 3974                         is_refd = 1;
 3975 
 3976                         /*
 3977                          * Update flags. In a number of circumstances,
 3978                          * we could cluster a lot of these and do a
 3979                          * number of sequential pages in one go.
 3980                          */
 3981                         if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) {
 3982                                 struct pv_entry *pve;
 3983 
 3984                                 pve = pmap_remove_pv(pg, pm, sva);
 3985                                 if (pve) {
 3986                                         is_exec = PV_BEEN_EXECD(pve->pv_flags);
 3987                                         is_refd = PV_BEEN_REFD(pve->pv_flags);
 3988                                         pmap_free_pv_entry(pve);
 3989                                 }
 3990                         }
 3991 
 3992                         if (l2pte_valid(pte) && pmap_is_current(pm)) {
 3993                                 if (total < PMAP_REMOVE_CLEAN_LIST_SIZE) {
 3994                                         total++;
 3995                                         if (is_exec) {
 3996                                                 cpu_idcache_wbinv_range(sva,
 3997                                                     PAGE_SIZE);
 3998                                                 cpu_l2cache_wbinv_range(sva,
 3999                                                     PAGE_SIZE);
 4000                                                 cpu_tlb_flushID_SE(sva);
 4001                                         } else if (is_refd) {
 4002                                                 cpu_dcache_wbinv_range(sva,
 4003                                                     PAGE_SIZE);
 4004                                                 cpu_l2cache_wbinv_range(sva,
 4005                                                     PAGE_SIZE);
 4006                                                 cpu_tlb_flushD_SE(sva);
 4007                                         }
 4008                                 } else if (total == PMAP_REMOVE_CLEAN_LIST_SIZE) {
 4009                                         /* flushall will also only get set for
 4010                                          * for a current pmap
 4011                                          */
 4012                                         cpu_idcache_wbinv_all();
 4013                                         cpu_l2cache_wbinv_all();
 4014                                         flushall = 1;
 4015                                         total++;
 4016                                 }
 4017                         }
 4018                         *ptep = 0;
 4019                         PTE_SYNC(ptep);
 4020 
 4021                         sva += PAGE_SIZE;
 4022                         ptep++;
 4023                         mappings++;
 4024                 }
 4025 
 4026                 pmap_free_l2_bucket(pm, l2b, mappings);
 4027         }
 4028 
 4029         vm_page_unlock_queues();
 4030         if (flushall)
 4031                 cpu_tlb_flushID();
 4032         PMAP_UNLOCK(pm);
 4033 }
 4034 
 4035 /*
 4036  * pmap_zero_page()
 4037  * 
 4038  * Zero a given physical page by mapping it at a page hook point.
 4039  * In doing the zero page op, the page we zero is mapped cachable, as with
 4040  * StrongARM accesses to non-cached pages are non-burst making writing
 4041  * _any_ bulk data very slow.
 4042  */
 4043 #if (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 || defined(CPU_XSCALE_CORE3)
 4044 void
 4045 pmap_zero_page_generic(vm_paddr_t phys, int off, int size)
 4046 {
 4047 #ifdef ARM_USE_SMALL_ALLOC
 4048         char *dstpg;
 4049 #endif
 4050 
 4051 #ifdef DEBUG
 4052         struct vm_page *pg = PHYS_TO_VM_PAGE(phys);
 4053 
 4054         if (pg->md.pvh_list != NULL)
 4055                 panic("pmap_zero_page: page has mappings");
 4056 #endif
 4057 
 4058         if (_arm_bzero && size >= _min_bzero_size &&
 4059             _arm_bzero((void *)(phys + off), size, IS_PHYSICAL) == 0)
 4060                 return;
 4061 
 4062 #ifdef ARM_USE_SMALL_ALLOC
 4063         dstpg = (char *)arm_ptovirt(phys);
 4064         if (off || size != PAGE_SIZE) {
 4065                 bzero(dstpg + off, size);
 4066                 cpu_dcache_wbinv_range((vm_offset_t)(dstpg + off), size);
 4067                 cpu_l2cache_wbinv_range((vm_offset_t)(dstpg + off), size);
 4068         } else {
 4069                 bzero_page((vm_offset_t)dstpg);
 4070                 cpu_dcache_wbinv_range((vm_offset_t)dstpg, PAGE_SIZE);
 4071                 cpu_l2cache_wbinv_range((vm_offset_t)dstpg, PAGE_SIZE);
 4072         }
 4073 #else
 4074 
 4075         mtx_lock(&cmtx);
 4076         /*
 4077          * Hook in the page, zero it, invalidate the TLB as needed.
 4078          *
 4079          * Note the temporary zero-page mapping must be a non-cached page in
 4080          * order to work without corruption when write-allocate is enabled.
 4081          */
 4082         *cdst_pte = L2_S_PROTO | phys | L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE);
 4083         cpu_tlb_flushD_SE(cdstp);
 4084         cpu_cpwait();
 4085         if (off || size != PAGE_SIZE)
 4086                 bzero((void *)(cdstp + off), size);
 4087         else
 4088                 bzero_page(cdstp);
 4089 
 4090         mtx_unlock(&cmtx);
 4091 #endif
 4092 }
 4093 #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
 4094 
 4095 #if ARM_MMU_XSCALE == 1
 4096 void
 4097 pmap_zero_page_xscale(vm_paddr_t phys, int off, int size)
 4098 {
 4099 #ifdef ARM_USE_SMALL_ALLOC
 4100         char *dstpg;
 4101 #endif
 4102 
 4103         if (_arm_bzero && size >= _min_bzero_size &&
 4104             _arm_bzero((void *)(phys + off), size, IS_PHYSICAL) == 0)
 4105                 return;
 4106 #ifdef ARM_USE_SMALL_ALLOC
 4107         dstpg = (char *)arm_ptovirt(phys);
 4108         if (off || size != PAGE_SIZE) {
 4109                 bzero(dstpg + off, size);
 4110                 cpu_dcache_wbinv_range((vm_offset_t)(dstpg + off), size);
 4111         } else {
 4112                 bzero_page((vm_offset_t)dstpg);
 4113                 cpu_dcache_wbinv_range((vm_offset_t)dstpg, PAGE_SIZE);
 4114         }
 4115 #else
 4116         mtx_lock(&cmtx);
 4117         /*
 4118          * Hook in the page, zero it, and purge the cache for that
 4119          * zeroed page. Invalidate the TLB as needed.
 4120          */
 4121         *cdst_pte = L2_S_PROTO | phys |
 4122             L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) |
 4123             L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X);       /* mini-data */
 4124         PTE_SYNC(cdst_pte);
 4125         cpu_tlb_flushD_SE(cdstp);
 4126         cpu_cpwait();
 4127         if (off || size != PAGE_SIZE)
 4128                 bzero((void *)(cdstp + off), size);
 4129         else
 4130                 bzero_page(cdstp);
 4131         mtx_unlock(&cmtx);
 4132         xscale_cache_clean_minidata();
 4133 #endif
 4134 }
 4135 
 4136 /*
 4137  * Change the PTEs for the specified kernel mappings such that they
 4138  * will use the mini data cache instead of the main data cache.
 4139  */
 4140 void
 4141 pmap_use_minicache(vm_offset_t va, vm_size_t size)
 4142 {
 4143         struct l2_bucket *l2b;
 4144         pt_entry_t *ptep, *sptep, pte;
 4145         vm_offset_t next_bucket, eva;
 4146 
 4147 #if (ARM_NMMUS > 1) || defined(CPU_XSCALE_CORE3)
 4148         if (xscale_use_minidata == 0)
 4149                 return;
 4150 #endif
 4151 
 4152         eva = va + size;
 4153 
 4154         while (va < eva) {
 4155                 next_bucket = L2_NEXT_BUCKET(va);
 4156                 if (next_bucket > eva)
 4157                         next_bucket = eva;
 4158 
 4159                 l2b = pmap_get_l2_bucket(pmap_kernel(), va);
 4160 
 4161                 sptep = ptep = &l2b->l2b_kva[l2pte_index(va)];
 4162 
 4163                 while (va < next_bucket) {
 4164                         pte = *ptep;
 4165                         if (!l2pte_minidata(pte)) {
 4166                                 cpu_dcache_wbinv_range(va, PAGE_SIZE);
 4167                                 cpu_tlb_flushD_SE(va);
 4168                                 *ptep = pte & ~L2_B;
 4169                         }
 4170                         ptep++;
 4171                         va += PAGE_SIZE;
 4172                 }
 4173                 PTE_SYNC_RANGE(sptep, (u_int)(ptep - sptep));
 4174         }
 4175         cpu_cpwait();
 4176 }
 4177 #endif /* ARM_MMU_XSCALE == 1 */
 4178 
 4179 /*
 4180  *      pmap_zero_page zeros the specified hardware page by mapping 
 4181  *      the page into KVM and using bzero to clear its contents.
 4182  */
 4183 void
 4184 pmap_zero_page(vm_page_t m)
 4185 {
 4186         pmap_zero_page_func(VM_PAGE_TO_PHYS(m), 0, PAGE_SIZE);
 4187 }
 4188 
 4189 
 4190 /*
 4191  *      pmap_zero_page_area zeros the specified hardware page by mapping 
 4192  *      the page into KVM and using bzero to clear its contents.
 4193  *
 4194  *      off and size may not cover an area beyond a single hardware page.
 4195  */
 4196 void
 4197 pmap_zero_page_area(vm_page_t m, int off, int size)
 4198 {
 4199 
 4200         pmap_zero_page_func(VM_PAGE_TO_PHYS(m), off, size);
 4201 }
 4202 
 4203 
 4204 /*
 4205  *      pmap_zero_page_idle zeros the specified hardware page by mapping 
 4206  *      the page into KVM and using bzero to clear its contents.  This
 4207  *      is intended to be called from the vm_pagezero process only and
 4208  *      outside of Giant.
 4209  */
 4210 void
 4211 pmap_zero_page_idle(vm_page_t m)
 4212 {
 4213 
 4214         pmap_zero_page(m);
 4215 }
 4216 
 4217 #if 0
 4218 /*
 4219  * pmap_clean_page()
 4220  *
 4221  * This is a local function used to work out the best strategy to clean
 4222  * a single page referenced by its entry in the PV table. It's used by
 4223  * pmap_copy_page, pmap_zero page and maybe some others later on.
 4224  *
 4225  * Its policy is effectively:
 4226  *  o If there are no mappings, we don't bother doing anything with the cache.
 4227  *  o If there is one mapping, we clean just that page.
 4228  *  o If there are multiple mappings, we clean the entire cache.
 4229  *
 4230  * So that some functions can be further optimised, it returns 0 if it didn't
 4231  * clean the entire cache, or 1 if it did.
 4232  *
 4233  * XXX One bug in this routine is that if the pv_entry has a single page
 4234  * mapped at 0x00000000 a whole cache clean will be performed rather than
 4235  * just the 1 page. Since this should not occur in everyday use and if it does
 4236  * it will just result in not the most efficient clean for the page.
 4237  */
 4238 static int
 4239 pmap_clean_page(struct pv_entry *pv, boolean_t is_src)
 4240 {
 4241         pmap_t pm, pm_to_clean = NULL;
 4242         struct pv_entry *npv;
 4243         u_int cache_needs_cleaning = 0;
 4244         u_int flags = 0;
 4245         vm_offset_t page_to_clean = 0;
 4246 
 4247         if (pv == NULL) {
 4248                 /* nothing mapped in so nothing to flush */
 4249                 return (0);
 4250         }
 4251 
 4252         /*
 4253          * Since we flush the cache each time we change to a different
 4254          * user vmspace, we only need to flush the page if it is in the
 4255          * current pmap.
 4256          */
 4257         if (curthread)
 4258                 pm = vmspace_pmap(curproc->p_vmspace);
 4259         else
 4260                 pm = pmap_kernel();
 4261 
 4262         for (npv = pv; npv; npv = TAILQ_NEXT(npv, pv_list)) {
 4263                 if (npv->pv_pmap == pmap_kernel() || npv->pv_pmap == pm) {
 4264                         flags |= npv->pv_flags;
 4265                         /*
 4266                          * The page is mapped non-cacheable in 
 4267                          * this map.  No need to flush the cache.
 4268                          */
 4269                         if (npv->pv_flags & PVF_NC) {
 4270 #ifdef DIAGNOSTIC
 4271                                 if (cache_needs_cleaning)
 4272                                         panic("pmap_clean_page: "
 4273                                             "cache inconsistency");
 4274 #endif
 4275                                 break;
 4276                         } else if (is_src && (npv->pv_flags & PVF_WRITE) == 0)
 4277                                 continue;
 4278                         if (cache_needs_cleaning) {
 4279                                 page_to_clean = 0;
 4280                                 break;
 4281                         } else {
 4282                                 page_to_clean = npv->pv_va;
 4283                                 pm_to_clean = npv->pv_pmap;
 4284                         }
 4285                         cache_needs_cleaning = 1;
 4286                 }
 4287         }
 4288         if (page_to_clean) {
 4289                 if (PV_BEEN_EXECD(flags))
 4290                         pmap_idcache_wbinv_range(pm_to_clean, page_to_clean,
 4291                             PAGE_SIZE);
 4292                 else
 4293                         pmap_dcache_wb_range(pm_to_clean, page_to_clean,
 4294                             PAGE_SIZE, !is_src, (flags & PVF_WRITE) == 0);
 4295         } else if (cache_needs_cleaning) {
 4296                 if (PV_BEEN_EXECD(flags))
 4297                         pmap_idcache_wbinv_all(pm);
 4298                 else
 4299                         pmap_dcache_wbinv_all(pm);
 4300                 return (1);
 4301         }
 4302         return (0);
 4303 }
 4304 #endif
 4305 
 4306 /*
 4307  *      pmap_copy_page copies the specified (machine independent)
 4308  *      page by mapping the page into virtual memory and using
 4309  *      bcopy to copy the page, one machine dependent page at a
 4310  *      time.
 4311  */
 4312 
 4313 /*
 4314  * pmap_copy_page()
 4315  *
 4316  * Copy one physical page into another, by mapping the pages into
 4317  * hook points. The same comment regarding cachability as in
 4318  * pmap_zero_page also applies here.
 4319  */
 4320 #if  (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 || defined (CPU_XSCALE_CORE3)
 4321 void
 4322 pmap_copy_page_generic(vm_paddr_t src, vm_paddr_t dst)
 4323 {
 4324 #if 0
 4325         struct vm_page *src_pg = PHYS_TO_VM_PAGE(src);
 4326 #endif
 4327 #ifdef DEBUG
 4328         struct vm_page *dst_pg = PHYS_TO_VM_PAGE(dst);
 4329 
 4330         if (dst_pg->md.pvh_list != NULL)
 4331                 panic("pmap_copy_page: dst page has mappings");
 4332 #endif
 4333 
 4334 
 4335         /*
 4336          * Clean the source page.  Hold the source page's lock for
 4337          * the duration of the copy so that no other mappings can
 4338          * be created while we have a potentially aliased mapping.
 4339          */
 4340 #if 0
 4341         /*
 4342          * XXX: Not needed while we call cpu_dcache_wbinv_all() in
 4343          * pmap_copy_page().
 4344          */
 4345         (void) pmap_clean_page(TAILQ_FIRST(&src_pg->md.pv_list), TRUE);
 4346 #endif
 4347         /*
 4348          * Map the pages into the page hook points, copy them, and purge
 4349          * the cache for the appropriate page. Invalidate the TLB
 4350          * as required.
 4351          */
 4352         mtx_lock(&cmtx);
 4353         *csrc_pte = L2_S_PROTO | src |
 4354             L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | pte_l2_s_cache_mode;
 4355         PTE_SYNC(csrc_pte);
 4356         *cdst_pte = L2_S_PROTO | dst |
 4357             L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
 4358         PTE_SYNC(cdst_pte);
 4359         cpu_tlb_flushD_SE(csrcp);
 4360         cpu_tlb_flushD_SE(cdstp);
 4361         cpu_cpwait();
 4362         bcopy_page(csrcp, cdstp);
 4363         mtx_unlock(&cmtx);
 4364         cpu_dcache_inv_range(csrcp, PAGE_SIZE);
 4365         cpu_dcache_wbinv_range(cdstp, PAGE_SIZE);
 4366         cpu_l2cache_inv_range(csrcp, PAGE_SIZE);
 4367         cpu_l2cache_wbinv_range(cdstp, PAGE_SIZE);
 4368 }
 4369 #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
 4370 
 4371 #if ARM_MMU_XSCALE == 1
 4372 void
 4373 pmap_copy_page_xscale(vm_paddr_t src, vm_paddr_t dst)
 4374 {
 4375 #if 0
 4376         /* XXX: Only needed for pmap_clean_page(), which is commented out. */
 4377         struct vm_page *src_pg = PHYS_TO_VM_PAGE(src);
 4378 #endif
 4379 #ifdef DEBUG
 4380         struct vm_page *dst_pg = PHYS_TO_VM_PAGE(dst);
 4381 
 4382         if (dst_pg->md.pvh_list != NULL)
 4383                 panic("pmap_copy_page: dst page has mappings");
 4384 #endif
 4385 
 4386 
 4387         /*
 4388          * Clean the source page.  Hold the source page's lock for
 4389          * the duration of the copy so that no other mappings can
 4390          * be created while we have a potentially aliased mapping.
 4391          */
 4392 #if 0
 4393         /*
 4394          * XXX: Not needed while we call cpu_dcache_wbinv_all() in
 4395          * pmap_copy_page().
 4396          */
 4397         (void) pmap_clean_page(TAILQ_FIRST(&src_pg->md.pv_list), TRUE);
 4398 #endif
 4399         /*
 4400          * Map the pages into the page hook points, copy them, and purge
 4401          * the cache for the appropriate page. Invalidate the TLB
 4402          * as required.
 4403          */
 4404         mtx_lock(&cmtx);
 4405         *csrc_pte = L2_S_PROTO | src |
 4406             L2_S_PROT(PTE_KERNEL, VM_PROT_READ) |
 4407             L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X);       /* mini-data */
 4408         PTE_SYNC(csrc_pte);
 4409         *cdst_pte = L2_S_PROTO | dst |
 4410             L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) |
 4411             L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X);       /* mini-data */
 4412         PTE_SYNC(cdst_pte);
 4413         cpu_tlb_flushD_SE(csrcp);
 4414         cpu_tlb_flushD_SE(cdstp);
 4415         cpu_cpwait();
 4416         bcopy_page(csrcp, cdstp);
 4417         mtx_unlock(&cmtx);
 4418         xscale_cache_clean_minidata();
 4419 }
 4420 #endif /* ARM_MMU_XSCALE == 1 */
 4421 
 4422 void
 4423 pmap_copy_page(vm_page_t src, vm_page_t dst)
 4424 {
 4425 #ifdef ARM_USE_SMALL_ALLOC
 4426         vm_offset_t srcpg, dstpg;
 4427 #endif
 4428 
 4429         cpu_dcache_wbinv_all();
 4430         cpu_l2cache_wbinv_all();
 4431         if (_arm_memcpy && PAGE_SIZE >= _min_memcpy_size &&
 4432             _arm_memcpy((void *)VM_PAGE_TO_PHYS(dst), 
 4433             (void *)VM_PAGE_TO_PHYS(src), PAGE_SIZE, IS_PHYSICAL) == 0)
 4434                 return;
 4435 #ifdef ARM_USE_SMALL_ALLOC
 4436         srcpg = arm_ptovirt(VM_PAGE_TO_PHYS(src));
 4437         dstpg = arm_ptovirt(VM_PAGE_TO_PHYS(dst));
 4438         bcopy_page(srcpg, dstpg);
 4439         cpu_dcache_wbinv_range(dstpg, PAGE_SIZE);
 4440         cpu_l2cache_wbinv_range(dstpg, PAGE_SIZE);
 4441 #else
 4442         pmap_copy_page_func(VM_PAGE_TO_PHYS(src), VM_PAGE_TO_PHYS(dst));
 4443 #endif
 4444 }
 4445 
 4446 
 4447 
 4448 
 4449 /*
 4450  * this routine returns true if a physical page resides
 4451  * in the given pmap.
 4452  */
 4453 boolean_t
 4454 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
 4455 {
 4456         pv_entry_t pv;
 4457         int loops = 0;
 4458         
 4459         if (m->flags & PG_FICTITIOUS)
 4460                 return (FALSE);
 4461                 
 4462         /*
 4463          * Not found, check current mappings returning immediately
 4464          */
 4465         for (pv = TAILQ_FIRST(&m->md.pv_list);
 4466             pv;
 4467             pv = TAILQ_NEXT(pv, pv_list)) {
 4468                 if (pv->pv_pmap == pmap) {
 4469                         return (TRUE);
 4470                 }
 4471                 loops++;
 4472                 if (loops >= 16)
 4473                         break;
 4474         }
 4475         return (FALSE);
 4476 }
 4477 
 4478 /*
 4479  *      pmap_page_wired_mappings:
 4480  *
 4481  *      Return the number of managed mappings to the given physical page
 4482  *      that are wired.
 4483  */
 4484 int
 4485 pmap_page_wired_mappings(vm_page_t m)
 4486 {
 4487         pv_entry_t pv;
 4488         int count;
 4489 
 4490         count = 0;
 4491         if ((m->flags & PG_FICTITIOUS) != 0)
 4492                 return (count);
 4493         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 4494         TAILQ_FOREACH(pv, &m->md.pv_list, pv_list)
 4495                 if ((pv->pv_flags & PVF_WIRED) != 0)
 4496                         count++;
 4497         return (count);
 4498 }
 4499 
 4500 /*
 4501  *      pmap_ts_referenced:
 4502  *
 4503  *      Return the count of reference bits for a page, clearing all of them.
 4504  */
 4505 int
 4506 pmap_ts_referenced(vm_page_t m)
 4507 {
 4508 
 4509         if (m->flags & PG_FICTITIOUS)
 4510                 return (0);
 4511         return (pmap_clearbit(m, PVF_REF));
 4512 }
 4513 
 4514 
 4515 boolean_t
 4516 pmap_is_modified(vm_page_t m)
 4517 {
 4518 
 4519         if (m->md.pvh_attrs & PVF_MOD)
 4520                 return (TRUE);
 4521         
 4522         return(FALSE);
 4523 }
 4524 
 4525 
 4526 /*
 4527  *      Clear the modify bits on the specified physical page.
 4528  */
 4529 void
 4530 pmap_clear_modify(vm_page_t m)
 4531 {
 4532 
 4533         if (m->md.pvh_attrs & PVF_MOD)
 4534                 pmap_clearbit(m, PVF_MOD);
 4535 }
 4536 
 4537 
 4538 /*
 4539  *      pmap_clear_reference:
 4540  *
 4541  *      Clear the reference bit on the specified physical page.
 4542  */
 4543 void
 4544 pmap_clear_reference(vm_page_t m)
 4545 {
 4546 
 4547         if (m->md.pvh_attrs & PVF_REF) 
 4548                 pmap_clearbit(m, PVF_REF);
 4549 }
 4550 
 4551 
 4552 /*
 4553  * Clear the write and modified bits in each of the given page's mappings.
 4554  */
 4555 void
 4556 pmap_remove_write(vm_page_t m)
 4557 {
 4558 
 4559         if (m->flags & PG_WRITEABLE)
 4560                 pmap_clearbit(m, PVF_WRITE);
 4561 }
 4562 
 4563 
 4564 /*
 4565  * perform the pmap work for mincore
 4566  */
 4567 int
 4568 pmap_mincore(pmap_t pmap, vm_offset_t addr)
 4569 {
 4570         printf("pmap_mincore()\n");
 4571         
 4572         return (0);
 4573 }
 4574 
 4575 
 4576 void
 4577 pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
 4578 {
 4579 }
 4580 
 4581 
 4582 /*
 4583  *      Increase the starting virtual address of the given mapping if a
 4584  *      different alignment might result in more superpage mappings.
 4585  */
 4586 void
 4587 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
 4588     vm_offset_t *addr, vm_size_t size)
 4589 {
 4590 }
 4591 
 4592 
 4593 /*
 4594  * Map a set of physical memory pages into the kernel virtual
 4595  * address space. Return a pointer to where it is mapped. This
 4596  * routine is intended to be used for mapping device memory,
 4597  * NOT real memory.
 4598  */
 4599 void *
 4600 pmap_mapdev(vm_offset_t pa, vm_size_t size)
 4601 {
 4602         vm_offset_t va, tmpva, offset;
 4603         
 4604         offset = pa & PAGE_MASK;
 4605         size = roundup(size, PAGE_SIZE);
 4606         
 4607         GIANT_REQUIRED;
 4608         
 4609         va = kmem_alloc_nofault(kernel_map, size);
 4610         if (!va)
 4611                 panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
 4612         for (tmpva = va; size > 0;) {
 4613                 pmap_kenter_internal(tmpva, pa, 0);
 4614                 size -= PAGE_SIZE;
 4615                 tmpva += PAGE_SIZE;
 4616                 pa += PAGE_SIZE;
 4617         }
 4618         
 4619         return ((void *)(va + offset));
 4620 }
 4621 
 4622 #define BOOTSTRAP_DEBUG
 4623 
 4624 /*
 4625  * pmap_map_section:
 4626  *
 4627  *      Create a single section mapping.
 4628  */
 4629 void
 4630 pmap_map_section(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa,
 4631     int prot, int cache)
 4632 {
 4633         pd_entry_t *pde = (pd_entry_t *) l1pt;
 4634         pd_entry_t fl;
 4635 
 4636         KASSERT(((va | pa) & L1_S_OFFSET) == 0, ("ouin2"));
 4637 
 4638         switch (cache) {
 4639         case PTE_NOCACHE:
 4640         default:
 4641                 fl = 0;
 4642                 break;
 4643 
 4644         case PTE_CACHE:
 4645                 fl = pte_l1_s_cache_mode;
 4646                 break;
 4647 
 4648         case PTE_PAGETABLE:
 4649                 fl = pte_l1_s_cache_mode_pt;
 4650                 break;
 4651         }
 4652 
 4653         pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa |
 4654             L1_S_PROT(PTE_KERNEL, prot) | fl | L1_S_DOM(PMAP_DOMAIN_KERNEL);
 4655         PTE_SYNC(&pde[va >> L1_S_SHIFT]);
 4656 
 4657 }
 4658 
 4659 /*
 4660  * pmap_link_l2pt:
 4661  *
 4662  *      Link the L2 page table specified by l2pv.pv_pa into the L1
 4663  *      page table at the slot for "va".
 4664  */
 4665 void
 4666 pmap_link_l2pt(vm_offset_t l1pt, vm_offset_t va, struct pv_addr *l2pv)
 4667 {
 4668         pd_entry_t *pde = (pd_entry_t *) l1pt, proto;
 4669         u_int slot = va >> L1_S_SHIFT;
 4670 
 4671         proto = L1_S_DOM(PMAP_DOMAIN_KERNEL) | L1_C_PROTO;
 4672 
 4673 #ifdef VERBOSE_INIT_ARM     
 4674         printf("pmap_link_l2pt: pa=0x%x va=0x%x\n", l2pv->pv_pa, l2pv->pv_va);
 4675 #endif
 4676 
 4677         pde[slot + 0] = proto | (l2pv->pv_pa + 0x000);
 4678 
 4679         PTE_SYNC(&pde[slot]);
 4680 
 4681         SLIST_INSERT_HEAD(&kernel_pt_list, l2pv, pv_list);
 4682 
 4683         
 4684 }
 4685 
 4686 /*
 4687  * pmap_map_entry
 4688  *
 4689  *      Create a single page mapping.
 4690  */
 4691 void
 4692 pmap_map_entry(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, int prot,
 4693     int cache)
 4694 {
 4695         pd_entry_t *pde = (pd_entry_t *) l1pt;
 4696         pt_entry_t fl;
 4697         pt_entry_t *pte;
 4698 
 4699         KASSERT(((va | pa) & PAGE_MASK) == 0, ("ouin"));
 4700 
 4701         switch (cache) {
 4702         case PTE_NOCACHE:
 4703         default:
 4704                 fl = 0;
 4705                 break;
 4706 
 4707         case PTE_CACHE:
 4708                 fl = pte_l2_s_cache_mode;
 4709                 break;
 4710 
 4711         case PTE_PAGETABLE:
 4712                 fl = pte_l2_s_cache_mode_pt;
 4713                 break;
 4714         }
 4715 
 4716         if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C)
 4717                 panic("pmap_map_entry: no L2 table for VA 0x%08x", va);
 4718 
 4719         pte = (pt_entry_t *) kernel_pt_lookup(pde[L1_IDX(va)] & L1_C_ADDR_MASK);
 4720 
 4721         if (pte == NULL)
 4722                 panic("pmap_map_entry: can't find L2 table for VA 0x%08x", va);
 4723 
 4724         pte[l2pte_index(va)] =
 4725             L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | fl;
 4726         PTE_SYNC(&pte[l2pte_index(va)]);
 4727 }
 4728 
 4729 /*
 4730  * pmap_map_chunk:
 4731  *
 4732  *      Map a chunk of memory using the most efficient mappings
 4733  *      possible (section. large page, small page) into the
 4734  *      provided L1 and L2 tables at the specified virtual address.
 4735  */
 4736 vm_size_t
 4737 pmap_map_chunk(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa,
 4738     vm_size_t size, int prot, int cache)
 4739 {
 4740         pd_entry_t *pde = (pd_entry_t *) l1pt;
 4741         pt_entry_t *pte, f1, f2s, f2l;
 4742         vm_size_t resid;  
 4743         int i;
 4744 
 4745         resid = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
 4746 
 4747         if (l1pt == 0)
 4748                 panic("pmap_map_chunk: no L1 table provided");
 4749 
 4750 #ifdef VERBOSE_INIT_ARM     
 4751         printf("pmap_map_chunk: pa=0x%x va=0x%x size=0x%x resid=0x%x "
 4752             "prot=0x%x cache=%d\n", pa, va, size, resid, prot, cache);
 4753 #endif
 4754 
 4755         switch (cache) {
 4756         case PTE_NOCACHE:
 4757         default:
 4758                 f1 = 0;
 4759                 f2l = 0;
 4760                 f2s = 0;
 4761                 break;
 4762 
 4763         case PTE_CACHE:
 4764                 f1 = pte_l1_s_cache_mode;
 4765                 f2l = pte_l2_l_cache_mode;
 4766                 f2s = pte_l2_s_cache_mode;
 4767                 break;
 4768 
 4769         case PTE_PAGETABLE:
 4770                 f1 = pte_l1_s_cache_mode_pt;
 4771                 f2l = pte_l2_l_cache_mode_pt;
 4772                 f2s = pte_l2_s_cache_mode_pt;
 4773                 break;
 4774         }
 4775 
 4776         size = resid;
 4777 
 4778         while (resid > 0) {
 4779                 /* See if we can use a section mapping. */
 4780                 if (L1_S_MAPPABLE_P(va, pa, resid)) {
 4781 #ifdef VERBOSE_INIT_ARM
 4782                         printf("S");
 4783 #endif
 4784                         pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa |
 4785                             L1_S_PROT(PTE_KERNEL, prot) | f1 |
 4786                             L1_S_DOM(PMAP_DOMAIN_KERNEL);
 4787                         PTE_SYNC(&pde[va >> L1_S_SHIFT]);
 4788                         va += L1_S_SIZE;
 4789                         pa += L1_S_SIZE;
 4790                         resid -= L1_S_SIZE;
 4791                         continue;
 4792                 }
 4793 
 4794                 /*
 4795                  * Ok, we're going to use an L2 table.  Make sure
 4796                  * one is actually in the corresponding L1 slot
 4797                  * for the current VA.
 4798                  */
 4799                 if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C)
 4800                         panic("pmap_map_chunk: no L2 table for VA 0x%08x", va);
 4801 
 4802                 pte = (pt_entry_t *) kernel_pt_lookup(
 4803                     pde[L1_IDX(va)] & L1_C_ADDR_MASK);
 4804                 if (pte == NULL)
 4805                         panic("pmap_map_chunk: can't find L2 table for VA"
 4806                             "0x%08x", va);
 4807                 /* See if we can use a L2 large page mapping. */
 4808                 if (L2_L_MAPPABLE_P(va, pa, resid)) {
 4809 #ifdef VERBOSE_INIT_ARM
 4810                         printf("L");
 4811 #endif
 4812                         for (i = 0; i < 16; i++) {
 4813                                 pte[l2pte_index(va) + i] =
 4814                                     L2_L_PROTO | pa |
 4815                                     L2_L_PROT(PTE_KERNEL, prot) | f2l;
 4816                                 PTE_SYNC(&pte[l2pte_index(va) + i]);
 4817                         }
 4818                         va += L2_L_SIZE;
 4819                         pa += L2_L_SIZE;
 4820                         resid -= L2_L_SIZE;
 4821                         continue;
 4822                 }
 4823 
 4824                 /* Use a small page mapping. */
 4825 #ifdef VERBOSE_INIT_ARM
 4826                 printf("P");
 4827 #endif
 4828                 pte[l2pte_index(va)] =
 4829                     L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | f2s;
 4830                 PTE_SYNC(&pte[l2pte_index(va)]);
 4831                 va += PAGE_SIZE;
 4832                 pa += PAGE_SIZE;
 4833                 resid -= PAGE_SIZE;
 4834         }
 4835 #ifdef VERBOSE_INIT_ARM
 4836         printf("\n");
 4837 #endif
 4838         return (size);
 4839 
 4840 }
 4841 
 4842 /********************** Static device map routines ***************************/
 4843 
 4844 static const struct pmap_devmap *pmap_devmap_table;
 4845 
 4846 /*
 4847  * Register the devmap table.  This is provided in case early console
 4848  * initialization needs to register mappings created by bootstrap code
 4849  * before pmap_devmap_bootstrap() is called.
 4850  */
 4851 void
 4852 pmap_devmap_register(const struct pmap_devmap *table)
 4853 {
 4854 
 4855         pmap_devmap_table = table;
 4856 }
 4857 
 4858 /*
 4859  * Map all of the static regions in the devmap table, and remember
 4860  * the devmap table so other parts of the kernel can look up entries
 4861  * later.
 4862  */
 4863 void
 4864 pmap_devmap_bootstrap(vm_offset_t l1pt, const struct pmap_devmap *table)
 4865 {
 4866         int i;
 4867 
 4868         pmap_devmap_table = table;
 4869 
 4870         for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) {
 4871 #ifdef VERBOSE_INIT_ARM
 4872                 printf("devmap: %08x -> %08x @ %08x\n",
 4873                     pmap_devmap_table[i].pd_pa,
 4874                     pmap_devmap_table[i].pd_pa +
 4875                         pmap_devmap_table[i].pd_size - 1,
 4876                     pmap_devmap_table[i].pd_va);
 4877 #endif
 4878                 pmap_map_chunk(l1pt, pmap_devmap_table[i].pd_va,
 4879                     pmap_devmap_table[i].pd_pa,
 4880                     pmap_devmap_table[i].pd_size,
 4881                     pmap_devmap_table[i].pd_prot,
 4882                     pmap_devmap_table[i].pd_cache);
 4883         }
 4884 }
 4885 
 4886 const struct pmap_devmap *
 4887 pmap_devmap_find_pa(vm_paddr_t pa, vm_size_t size)
 4888 {
 4889         int i;
 4890 
 4891         if (pmap_devmap_table == NULL)
 4892                 return (NULL);
 4893 
 4894         for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) {
 4895                 if (pa >= pmap_devmap_table[i].pd_pa &&
 4896                     pa + size <= pmap_devmap_table[i].pd_pa +
 4897                                  pmap_devmap_table[i].pd_size)
 4898                         return (&pmap_devmap_table[i]);
 4899         }
 4900 
 4901         return (NULL);
 4902 }
 4903 
 4904 const struct pmap_devmap *
 4905 pmap_devmap_find_va(vm_offset_t va, vm_size_t size)
 4906 {
 4907         int i;
 4908 
 4909         if (pmap_devmap_table == NULL)
 4910                 return (NULL);
 4911 
 4912         for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) {
 4913                 if (va >= pmap_devmap_table[i].pd_va &&
 4914                     va + size <= pmap_devmap_table[i].pd_va +
 4915                                  pmap_devmap_table[i].pd_size)
 4916                         return (&pmap_devmap_table[i]);
 4917         }
 4918 
 4919         return (NULL);
 4920 }
 4921 

Cache object: 0b05d009b62d7fb4fe603f1cd3b5870e


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.