The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/powerpc/aim/mmu_oea.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD AND BSD-4-Clause
    3  *
    4  * Copyright (c) 2001 The NetBSD Foundation, Inc.
    5  * All rights reserved.
    6  *
    7  * This code is derived from software contributed to The NetBSD Foundation
    8  * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc.
    9  *
   10  * Redistribution and use in source and binary forms, with or without
   11  * modification, are permitted provided that the following conditions
   12  * are met:
   13  * 1. Redistributions of source code must retain the above copyright
   14  *    notice, this list of conditions and the following disclaimer.
   15  * 2. Redistributions in binary form must reproduce the above copyright
   16  *    notice, this list of conditions and the following disclaimer in the
   17  *    documentation and/or other materials provided with the distribution.
   18  *
   19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
   20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
   23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   29  * POSSIBILITY OF SUCH DAMAGE.
   30  */
   31 /*-
   32  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
   33  * Copyright (C) 1995, 1996 TooLs GmbH.
   34  * All rights reserved.
   35  *
   36  * Redistribution and use in source and binary forms, with or without
   37  * modification, are permitted provided that the following conditions
   38  * are met:
   39  * 1. Redistributions of source code must retain the above copyright
   40  *    notice, this list of conditions and the following disclaimer.
   41  * 2. Redistributions in binary form must reproduce the above copyright
   42  *    notice, this list of conditions and the following disclaimer in the
   43  *    documentation and/or other materials provided with the distribution.
   44  * 3. All advertising materials mentioning features or use of this software
   45  *    must display the following acknowledgement:
   46  *      This product includes software developed by TooLs GmbH.
   47  * 4. The name of TooLs GmbH may not be used to endorse or promote products
   48  *    derived from this software without specific prior written permission.
   49  *
   50  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
   51  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   52  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   53  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
   54  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
   55  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
   56  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
   57  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
   58  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
   59  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   60  *
   61  * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $
   62  */
   63 /*-
   64  * Copyright (C) 2001 Benno Rice.
   65  * All rights reserved.
   66  *
   67  * Redistribution and use in source and binary forms, with or without
   68  * modification, are permitted provided that the following conditions
   69  * are met:
   70  * 1. Redistributions of source code must retain the above copyright
   71  *    notice, this list of conditions and the following disclaimer.
   72  * 2. Redistributions in binary form must reproduce the above copyright
   73  *    notice, this list of conditions and the following disclaimer in the
   74  *    documentation and/or other materials provided with the distribution.
   75  *
   76  * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
   77  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   78  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   79  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
   80  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
   81  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
   82  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
   83  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
   84  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
   85  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   86  */
   87 
   88 #include <sys/cdefs.h>
   89 __FBSDID("$FreeBSD$");
   90 
   91 /*
   92  * Manages physical address maps.
   93  *
   94  * Since the information managed by this module is also stored by the
   95  * logical address mapping module, this module may throw away valid virtual
   96  * to physical mappings at almost any time.  However, invalidations of
   97  * mappings must be done as requested.
   98  *
   99  * In order to cope with hardware architectures which make virtual to
  100  * physical map invalidates expensive, this module may delay invalidate
  101  * reduced protection operations until such time as they are actually
  102  * necessary.  This module is given full information as to which processors
  103  * are currently using which maps, and to when physical maps must be made
  104  * correct.
  105  */
  106 
  107 #include "opt_kstack_pages.h"
  108 
  109 #include <sys/param.h>
  110 #include <sys/kernel.h>
  111 #include <sys/conf.h>
  112 #include <sys/queue.h>
  113 #include <sys/cpuset.h>
  114 #include <sys/kerneldump.h>
  115 #include <sys/ktr.h>
  116 #include <sys/lock.h>
  117 #include <sys/mman.h>
  118 #include <sys/msgbuf.h>
  119 #include <sys/mutex.h>
  120 #include <sys/proc.h>
  121 #include <sys/rwlock.h>
  122 #include <sys/sched.h>
  123 #include <sys/sysctl.h>
  124 #include <sys/systm.h>
  125 #include <sys/vmmeter.h>
  126 
  127 #include <dev/ofw/openfirm.h>
  128 
  129 #include <vm/vm.h>
  130 #include <vm/pmap.h>
  131 #include <vm/vm_param.h>
  132 #include <vm/vm_kern.h>
  133 #include <vm/vm_page.h>
  134 #include <vm/vm_map.h>
  135 #include <vm/vm_object.h>
  136 #include <vm/vm_extern.h>
  137 #include <vm/vm_page.h>
  138 #include <vm/vm_phys.h>
  139 #include <vm/vm_pageout.h>
  140 #include <vm/uma.h>
  141 
  142 #include <machine/cpu.h>
  143 #include <machine/platform.h>
  144 #include <machine/bat.h>
  145 #include <machine/frame.h>
  146 #include <machine/md_var.h>
  147 #include <machine/psl.h>
  148 #include <machine/pte.h>
  149 #include <machine/smp.h>
  150 #include <machine/sr.h>
  151 #include <machine/mmuvar.h>
  152 #include <machine/trap.h>
  153 
  154 #define MOEA_DEBUG
  155 
  156 #define TODO    panic("%s: not implemented", __func__);
  157 
  158 #define VSID_MAKE(sr, hash)     ((sr) | (((hash) & 0xfffff) << 4))
  159 #define VSID_TO_SR(vsid)        ((vsid) & 0xf)
  160 #define VSID_TO_HASH(vsid)      (((vsid) >> 4) & 0xfffff)
  161 
  162 /* Get physical address from PVO. */
  163 #define PVO_PADDR(pvo)          ((pvo)->pvo_pte.pte.pte_lo & PTE_RPGN)
  164 
  165 struct ofw_map {
  166         vm_offset_t     om_va;
  167         vm_size_t       om_len;
  168         vm_offset_t     om_pa;
  169         u_int           om_mode;
  170 };
  171 
  172 extern unsigned char _etext[];
  173 extern unsigned char _end[];
  174 
  175 /*
  176  * Map of physical memory regions.
  177  */
  178 static struct   mem_region *regions;
  179 static struct   mem_region *pregions;
  180 static u_int    phys_avail_count;
  181 static int      regions_sz, pregions_sz;
  182 static struct   ofw_map *translations;
  183 
  184 /*
  185  * Lock for the pteg and pvo tables.
  186  */
  187 struct mtx      moea_table_mutex;
  188 struct mtx      moea_vsid_mutex;
  189 
  190 /* tlbie instruction synchronization */
  191 static struct mtx tlbie_mtx;
  192 
  193 /*
  194  * PTEG data.
  195  */
  196 static struct   pteg *moea_pteg_table;
  197 u_int           moea_pteg_count;
  198 u_int           moea_pteg_mask;
  199 
  200 /*
  201  * PVO data.
  202  */
  203 struct  pvo_head *moea_pvo_table;               /* pvo entries by pteg index */
  204 struct  pvo_head moea_pvo_kunmanaged =
  205     LIST_HEAD_INITIALIZER(moea_pvo_kunmanaged); /* list of unmanaged pages */
  206 
  207 static struct rwlock_padalign pvh_global_lock;
  208 
  209 uma_zone_t      moea_upvo_zone; /* zone for pvo entries for unmanaged pages */
  210 uma_zone_t      moea_mpvo_zone; /* zone for pvo entries for managed pages */
  211 
  212 #define BPVO_POOL_SIZE  32768
  213 static struct   pvo_entry *moea_bpvo_pool;
  214 static int      moea_bpvo_pool_index = 0;
  215 
  216 #define VSID_NBPW       (sizeof(u_int32_t) * 8)
  217 static u_int    moea_vsid_bitmap[NPMAPS / VSID_NBPW];
  218 
  219 static boolean_t moea_initialized = FALSE;
  220 
  221 /*
  222  * Statistics.
  223  */
  224 u_int   moea_pte_valid = 0;
  225 u_int   moea_pte_overflow = 0;
  226 u_int   moea_pte_replacements = 0;
  227 u_int   moea_pvo_entries = 0;
  228 u_int   moea_pvo_enter_calls = 0;
  229 u_int   moea_pvo_remove_calls = 0;
  230 u_int   moea_pte_spills = 0;
  231 SYSCTL_INT(_machdep, OID_AUTO, moea_pte_valid, CTLFLAG_RD, &moea_pte_valid,
  232     0, "");
  233 SYSCTL_INT(_machdep, OID_AUTO, moea_pte_overflow, CTLFLAG_RD,
  234     &moea_pte_overflow, 0, "");
  235 SYSCTL_INT(_machdep, OID_AUTO, moea_pte_replacements, CTLFLAG_RD,
  236     &moea_pte_replacements, 0, "");
  237 SYSCTL_INT(_machdep, OID_AUTO, moea_pvo_entries, CTLFLAG_RD, &moea_pvo_entries,
  238     0, "");
  239 SYSCTL_INT(_machdep, OID_AUTO, moea_pvo_enter_calls, CTLFLAG_RD,
  240     &moea_pvo_enter_calls, 0, "");
  241 SYSCTL_INT(_machdep, OID_AUTO, moea_pvo_remove_calls, CTLFLAG_RD,
  242     &moea_pvo_remove_calls, 0, "");
  243 SYSCTL_INT(_machdep, OID_AUTO, moea_pte_spills, CTLFLAG_RD,
  244     &moea_pte_spills, 0, "");
  245 
  246 /*
  247  * Allocate physical memory for use in moea_bootstrap.
  248  */
  249 static vm_offset_t      moea_bootstrap_alloc(vm_size_t, u_int);
  250 
  251 /*
  252  * PTE calls.
  253  */
  254 static int              moea_pte_insert(u_int, struct pte *);
  255 
  256 /*
  257  * PVO calls.
  258  */
  259 static int      moea_pvo_enter(pmap_t, uma_zone_t, struct pvo_head *,
  260                     vm_offset_t, vm_paddr_t, u_int, int);
  261 static void     moea_pvo_remove(struct pvo_entry *, int);
  262 static struct   pvo_entry *moea_pvo_find_va(pmap_t, vm_offset_t, int *);
  263 static struct   pte *moea_pvo_to_pte(const struct pvo_entry *, int);
  264 
  265 /*
  266  * Utility routines.
  267  */
  268 static int              moea_enter_locked(pmap_t, vm_offset_t, vm_page_t,
  269                             vm_prot_t, u_int, int8_t);
  270 static void             moea_syncicache(vm_paddr_t, vm_size_t);
  271 static boolean_t        moea_query_bit(vm_page_t, int);
  272 static u_int            moea_clear_bit(vm_page_t, int);
  273 static void             moea_kremove(vm_offset_t);
  274 int             moea_pte_spill(vm_offset_t);
  275 
  276 /*
  277  * Kernel MMU interface
  278  */
  279 void moea_clear_modify(vm_page_t);
  280 void moea_copy_page(vm_page_t, vm_page_t);
  281 void moea_copy_pages(vm_page_t *ma, vm_offset_t a_offset,
  282     vm_page_t *mb, vm_offset_t b_offset, int xfersize);
  283 int moea_enter(pmap_t, vm_offset_t, vm_page_t, vm_prot_t, u_int,
  284     int8_t);
  285 void moea_enter_object(pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
  286     vm_prot_t);
  287 void moea_enter_quick(pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
  288 vm_paddr_t moea_extract(pmap_t, vm_offset_t);
  289 vm_page_t moea_extract_and_hold(pmap_t, vm_offset_t, vm_prot_t);
  290 void moea_init(void);
  291 boolean_t moea_is_modified(vm_page_t);
  292 boolean_t moea_is_prefaultable(pmap_t, vm_offset_t);
  293 boolean_t moea_is_referenced(vm_page_t);
  294 int moea_ts_referenced(vm_page_t);
  295 vm_offset_t moea_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
  296 static int moea_mincore(pmap_t, vm_offset_t, vm_paddr_t *);
  297 boolean_t moea_page_exists_quick(pmap_t, vm_page_t);
  298 void moea_page_init(vm_page_t);
  299 int moea_page_wired_mappings(vm_page_t);
  300 int moea_pinit(pmap_t);
  301 void moea_pinit0(pmap_t);
  302 void moea_protect(pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
  303 void moea_qenter(vm_offset_t, vm_page_t *, int);
  304 void moea_qremove(vm_offset_t, int);
  305 void moea_release(pmap_t);
  306 void moea_remove(pmap_t, vm_offset_t, vm_offset_t);
  307 void moea_remove_all(vm_page_t);
  308 void moea_remove_write(vm_page_t);
  309 void moea_unwire(pmap_t, vm_offset_t, vm_offset_t);
  310 void moea_zero_page(vm_page_t);
  311 void moea_zero_page_area(vm_page_t, int, int);
  312 void moea_activate(struct thread *);
  313 void moea_deactivate(struct thread *);
  314 void moea_cpu_bootstrap(int);
  315 void moea_bootstrap(vm_offset_t, vm_offset_t);
  316 void *moea_mapdev(vm_paddr_t, vm_size_t);
  317 void *moea_mapdev_attr(vm_paddr_t, vm_size_t, vm_memattr_t);
  318 void moea_unmapdev(void *, vm_size_t);
  319 vm_paddr_t moea_kextract(vm_offset_t);
  320 void moea_kenter_attr(vm_offset_t, vm_paddr_t, vm_memattr_t);
  321 void moea_kenter(vm_offset_t, vm_paddr_t);
  322 void moea_page_set_memattr(vm_page_t m, vm_memattr_t ma);
  323 boolean_t moea_dev_direct_mapped(vm_paddr_t, vm_size_t);
  324 static void moea_sync_icache(pmap_t, vm_offset_t, vm_size_t);
  325 void moea_dumpsys_map(vm_paddr_t pa, size_t sz, void **va);
  326 void moea_scan_init(void);
  327 vm_offset_t moea_quick_enter_page(vm_page_t m);
  328 void moea_quick_remove_page(vm_offset_t addr);
  329 boolean_t moea_page_is_mapped(vm_page_t m);
  330 bool moea_ps_enabled(pmap_t pmap);
  331 static int moea_map_user_ptr(pmap_t pm,
  332     volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen);
  333 static int moea_decode_kernel_ptr(vm_offset_t addr,
  334     int *is_user, vm_offset_t *decoded_addr);
  335 
  336 static struct pmap_funcs moea_methods = {
  337         .clear_modify = moea_clear_modify,
  338         .copy_page = moea_copy_page,
  339         .copy_pages = moea_copy_pages,
  340         .enter = moea_enter,
  341         .enter_object = moea_enter_object,
  342         .enter_quick = moea_enter_quick,
  343         .extract = moea_extract,
  344         .extract_and_hold = moea_extract_and_hold,
  345         .init = moea_init,
  346         .is_modified = moea_is_modified,
  347         .is_prefaultable = moea_is_prefaultable,
  348         .is_referenced = moea_is_referenced,
  349         .ts_referenced = moea_ts_referenced,
  350         .map =                  moea_map,
  351         .page_exists_quick = moea_page_exists_quick,
  352         .page_init = moea_page_init,
  353         .page_wired_mappings = moea_page_wired_mappings,
  354         .pinit = moea_pinit,
  355         .pinit0 = moea_pinit0,
  356         .protect = moea_protect,
  357         .qenter = moea_qenter,
  358         .qremove = moea_qremove,
  359         .release = moea_release,
  360         .remove = moea_remove,
  361         .remove_all = moea_remove_all,
  362         .mincore = moea_mincore,
  363         .remove_write = moea_remove_write,
  364         .sync_icache = moea_sync_icache,
  365         .unwire = moea_unwire,
  366         .zero_page =            moea_zero_page,
  367         .zero_page_area = moea_zero_page_area,
  368         .activate = moea_activate,
  369         .deactivate =           moea_deactivate,
  370         .page_set_memattr = moea_page_set_memattr,
  371         .quick_enter_page =  moea_quick_enter_page,
  372         .quick_remove_page =  moea_quick_remove_page,
  373         .page_is_mapped = moea_page_is_mapped,
  374         .ps_enabled = moea_ps_enabled,
  375 
  376         /* Internal interfaces */
  377         .bootstrap =            moea_bootstrap,
  378         .cpu_bootstrap =        moea_cpu_bootstrap,
  379         .mapdev_attr = moea_mapdev_attr,
  380         .mapdev = moea_mapdev,
  381         .unmapdev = moea_unmapdev,
  382         .kextract = moea_kextract,
  383         .kenter = moea_kenter,
  384         .kenter_attr = moea_kenter_attr,
  385         .dev_direct_mapped = moea_dev_direct_mapped,
  386         .dumpsys_pa_init = moea_scan_init,
  387         .dumpsys_map_chunk = moea_dumpsys_map,
  388         .map_user_ptr = moea_map_user_ptr,
  389         .decode_kernel_ptr =  moea_decode_kernel_ptr,
  390 };
  391 
  392 MMU_DEF(oea_mmu, MMU_TYPE_OEA, moea_methods);
  393 
  394 static __inline uint32_t
  395 moea_calc_wimg(vm_paddr_t pa, vm_memattr_t ma)
  396 {
  397         uint32_t pte_lo;
  398         int i;
  399 
  400         if (ma != VM_MEMATTR_DEFAULT) {
  401                 switch (ma) {
  402                 case VM_MEMATTR_UNCACHEABLE:
  403                         return (PTE_I | PTE_G);
  404                 case VM_MEMATTR_CACHEABLE:
  405                         return (PTE_M);
  406                 case VM_MEMATTR_WRITE_COMBINING:
  407                 case VM_MEMATTR_WRITE_BACK:
  408                 case VM_MEMATTR_PREFETCHABLE:
  409                         return (PTE_I);
  410                 case VM_MEMATTR_WRITE_THROUGH:
  411                         return (PTE_W | PTE_M);
  412                 }
  413         }
  414 
  415         /*
  416          * Assume the page is cache inhibited and access is guarded unless
  417          * it's in our available memory array.
  418          */
  419         pte_lo = PTE_I | PTE_G;
  420         for (i = 0; i < pregions_sz; i++) {
  421                 if ((pa >= pregions[i].mr_start) &&
  422                     (pa < (pregions[i].mr_start + pregions[i].mr_size))) {
  423                         pte_lo = PTE_M;
  424                         break;
  425                 }
  426         }
  427 
  428         return pte_lo;
  429 }
  430 
  431 /*
  432  * Translate OFW translations into VM attributes.
  433  */
  434 static __inline vm_memattr_t
  435 moea_bootstrap_convert_wimg(uint32_t mode)
  436 {
  437 
  438         switch (mode) {
  439         case (PTE_I | PTE_G):
  440                 /* PCI device memory */
  441                 return VM_MEMATTR_UNCACHEABLE;
  442         case (PTE_M):
  443                 /* Explicitly coherent */
  444                 return VM_MEMATTR_CACHEABLE;
  445         case 0: /* Default claim */
  446         case 2: /* Alternate PP bits set by OF for the original payload */
  447                 /* "Normal" memory. */
  448                 return VM_MEMATTR_DEFAULT;
  449 
  450         default:
  451                 /* Err on the side of caution for unknowns */
  452                 /* XXX should we panic instead? */
  453                 return VM_MEMATTR_UNCACHEABLE;
  454         }
  455 }
  456 
  457 static void
  458 tlbie(vm_offset_t va)
  459 {
  460 
  461         mtx_lock_spin(&tlbie_mtx);
  462         __asm __volatile("ptesync");
  463         __asm __volatile("tlbie %0" :: "r"(va));
  464         __asm __volatile("eieio; tlbsync; ptesync");
  465         mtx_unlock_spin(&tlbie_mtx);
  466 }
  467 
  468 static void
  469 tlbia(void)
  470 {
  471         vm_offset_t va;
  472 
  473         for (va = 0; va < 0x00040000; va += 0x00001000) {
  474                 __asm __volatile("tlbie %0" :: "r"(va));
  475                 powerpc_sync();
  476         }
  477         __asm __volatile("tlbsync");
  478         powerpc_sync();
  479 }
  480 
  481 static __inline int
  482 va_to_sr(u_int *sr, vm_offset_t va)
  483 {
  484         return (sr[(uintptr_t)va >> ADDR_SR_SHFT]);
  485 }
  486 
  487 static __inline u_int
  488 va_to_pteg(u_int sr, vm_offset_t addr)
  489 {
  490         u_int hash;
  491 
  492         hash = (sr & SR_VSID_MASK) ^ (((u_int)addr & ADDR_PIDX) >>
  493             ADDR_PIDX_SHFT);
  494         return (hash & moea_pteg_mask);
  495 }
  496 
  497 static __inline struct pvo_head *
  498 vm_page_to_pvoh(vm_page_t m)
  499 {
  500 
  501         return (&m->md.mdpg_pvoh);
  502 }
  503 
  504 static __inline void
  505 moea_attr_clear(vm_page_t m, int ptebit)
  506 {
  507 
  508         rw_assert(&pvh_global_lock, RA_WLOCKED);
  509         m->md.mdpg_attrs &= ~ptebit;
  510 }
  511 
  512 static __inline int
  513 moea_attr_fetch(vm_page_t m)
  514 {
  515 
  516         return (m->md.mdpg_attrs);
  517 }
  518 
  519 static __inline void
  520 moea_attr_save(vm_page_t m, int ptebit)
  521 {
  522 
  523         rw_assert(&pvh_global_lock, RA_WLOCKED);
  524         m->md.mdpg_attrs |= ptebit;
  525 }
  526 
  527 static __inline int
  528 moea_pte_compare(const struct pte *pt, const struct pte *pvo_pt)
  529 {
  530         if (pt->pte_hi == pvo_pt->pte_hi)
  531                 return (1);
  532 
  533         return (0);
  534 }
  535 
  536 static __inline int
  537 moea_pte_match(struct pte *pt, u_int sr, vm_offset_t va, int which)
  538 {
  539         return (pt->pte_hi & ~PTE_VALID) ==
  540             (((sr & SR_VSID_MASK) << PTE_VSID_SHFT) |
  541             ((va >> ADDR_API_SHFT) & PTE_API) | which);
  542 }
  543 
  544 static __inline void
  545 moea_pte_create(struct pte *pt, u_int sr, vm_offset_t va, u_int pte_lo)
  546 {
  547 
  548         mtx_assert(&moea_table_mutex, MA_OWNED);
  549 
  550         /*
  551          * Construct a PTE.  Default to IMB initially.  Valid bit only gets
  552          * set when the real pte is set in memory.
  553          *
  554          * Note: Don't set the valid bit for correct operation of tlb update.
  555          */
  556         pt->pte_hi = ((sr & SR_VSID_MASK) << PTE_VSID_SHFT) |
  557             (((va & ADDR_PIDX) >> ADDR_API_SHFT) & PTE_API);
  558         pt->pte_lo = pte_lo;
  559 }
  560 
  561 static __inline void
  562 moea_pte_synch(struct pte *pt, struct pte *pvo_pt)
  563 {
  564 
  565         mtx_assert(&moea_table_mutex, MA_OWNED);
  566         pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF | PTE_CHG);
  567 }
  568 
  569 static __inline void
  570 moea_pte_clear(struct pte *pt, vm_offset_t va, int ptebit)
  571 {
  572 
  573         mtx_assert(&moea_table_mutex, MA_OWNED);
  574 
  575         /*
  576          * As shown in Section 7.6.3.2.3
  577          */
  578         pt->pte_lo &= ~ptebit;
  579         tlbie(va);
  580 }
  581 
  582 static __inline void
  583 moea_pte_set(struct pte *pt, struct pte *pvo_pt)
  584 {
  585 
  586         mtx_assert(&moea_table_mutex, MA_OWNED);
  587         pvo_pt->pte_hi |= PTE_VALID;
  588 
  589         /*
  590          * Update the PTE as defined in section 7.6.3.1.
  591          * Note that the REF/CHG bits are from pvo_pt and thus should have
  592          * been saved so this routine can restore them (if desired).
  593          */
  594         pt->pte_lo = pvo_pt->pte_lo;
  595         powerpc_sync();
  596         pt->pte_hi = pvo_pt->pte_hi;
  597         powerpc_sync();
  598         moea_pte_valid++;
  599 }
  600 
  601 static __inline void
  602 moea_pte_unset(struct pte *pt, struct pte *pvo_pt, vm_offset_t va)
  603 {
  604 
  605         mtx_assert(&moea_table_mutex, MA_OWNED);
  606         pvo_pt->pte_hi &= ~PTE_VALID;
  607 
  608         /*
  609          * Force the reg & chg bits back into the PTEs.
  610          */
  611         powerpc_sync();
  612 
  613         /*
  614          * Invalidate the pte.
  615          */
  616         pt->pte_hi &= ~PTE_VALID;
  617 
  618         tlbie(va);
  619 
  620         /*
  621          * Save the reg & chg bits.
  622          */
  623         moea_pte_synch(pt, pvo_pt);
  624         moea_pte_valid--;
  625 }
  626 
  627 static __inline void
  628 moea_pte_change(struct pte *pt, struct pte *pvo_pt, vm_offset_t va)
  629 {
  630 
  631         /*
  632          * Invalidate the PTE
  633          */
  634         moea_pte_unset(pt, pvo_pt, va);
  635         moea_pte_set(pt, pvo_pt);
  636 }
  637 
  638 /*
  639  * Quick sort callout for comparing memory regions.
  640  */
  641 static int      om_cmp(const void *a, const void *b);
  642 
  643 static int
  644 om_cmp(const void *a, const void *b)
  645 {
  646         const struct    ofw_map *mapa;
  647         const struct    ofw_map *mapb;
  648 
  649         mapa = a;
  650         mapb = b;
  651         if (mapa->om_pa < mapb->om_pa)
  652                 return (-1);
  653         else if (mapa->om_pa > mapb->om_pa)
  654                 return (1);
  655         else
  656                 return (0);
  657 }
  658 
  659 void
  660 moea_cpu_bootstrap(int ap)
  661 {
  662         u_int sdr;
  663         int i;
  664 
  665         if (ap) {
  666                 powerpc_sync();
  667                 __asm __volatile("mtdbatu 0,%0" :: "r"(battable[0].batu));
  668                 __asm __volatile("mtdbatl 0,%0" :: "r"(battable[0].batl));
  669                 isync();
  670                 __asm __volatile("mtibatu 0,%0" :: "r"(battable[0].batu));
  671                 __asm __volatile("mtibatl 0,%0" :: "r"(battable[0].batl));
  672                 isync();
  673         }
  674 
  675         __asm __volatile("mtdbatu 1,%0" :: "r"(battable[8].batu));
  676         __asm __volatile("mtdbatl 1,%0" :: "r"(battable[8].batl));
  677         isync();
  678 
  679         __asm __volatile("mtibatu 1,%0" :: "r"(0));
  680         __asm __volatile("mtdbatu 2,%0" :: "r"(0));
  681         __asm __volatile("mtibatu 2,%0" :: "r"(0));
  682         __asm __volatile("mtdbatu 3,%0" :: "r"(0));
  683         __asm __volatile("mtibatu 3,%0" :: "r"(0));
  684         isync();
  685 
  686         for (i = 0; i < 16; i++)
  687                 mtsrin(i << ADDR_SR_SHFT, kernel_pmap->pm_sr[i]);
  688         powerpc_sync();
  689 
  690         sdr = (u_int)moea_pteg_table | (moea_pteg_mask >> 10);
  691         __asm __volatile("mtsdr1 %0" :: "r"(sdr));
  692         isync();
  693 
  694         tlbia();
  695 }
  696 
  697 void
  698 moea_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend)
  699 {
  700         ihandle_t       mmui;
  701         phandle_t       chosen, mmu;
  702         int             sz;
  703         int             i, j;
  704         vm_size_t       size, physsz, hwphyssz;
  705         vm_offset_t     pa, va, off;
  706         void            *dpcpu;
  707 
  708         /*
  709          * Map PCI memory space.
  710          */
  711         battable[0x8].batl = BATL(0x80000000, BAT_I|BAT_G, BAT_PP_RW);
  712         battable[0x8].batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs);
  713 
  714         battable[0x9].batl = BATL(0x90000000, BAT_I|BAT_G, BAT_PP_RW);
  715         battable[0x9].batu = BATU(0x90000000, BAT_BL_256M, BAT_Vs);
  716 
  717         battable[0xa].batl = BATL(0xa0000000, BAT_I|BAT_G, BAT_PP_RW);
  718         battable[0xa].batu = BATU(0xa0000000, BAT_BL_256M, BAT_Vs);
  719 
  720         battable[0xb].batl = BATL(0xb0000000, BAT_I|BAT_G, BAT_PP_RW);
  721         battable[0xb].batu = BATU(0xb0000000, BAT_BL_256M, BAT_Vs);
  722 
  723         powerpc_sync();
  724 
  725         /* map pci space */
  726         __asm __volatile("mtdbatu 1,%0" :: "r"(battable[8].batu));
  727         __asm __volatile("mtdbatl 1,%0" :: "r"(battable[8].batl));
  728         isync();
  729 
  730         /* set global direct map flag */
  731         hw_direct_map = 1;
  732 
  733         mem_regions(&pregions, &pregions_sz, &regions, &regions_sz);
  734         CTR0(KTR_PMAP, "moea_bootstrap: physical memory");
  735 
  736         for (i = 0; i < pregions_sz; i++) {
  737                 vm_offset_t pa;
  738                 vm_offset_t end;
  739 
  740                 CTR3(KTR_PMAP, "physregion: %#x - %#x (%#x)",
  741                         pregions[i].mr_start,
  742                         pregions[i].mr_start + pregions[i].mr_size,
  743                         pregions[i].mr_size);
  744                 /*
  745                  * Install entries into the BAT table to allow all
  746                  * of physmem to be convered by on-demand BAT entries.
  747                  * The loop will sometimes set the same battable element
  748                  * twice, but that's fine since they won't be used for
  749                  * a while yet.
  750                  */
  751                 pa = pregions[i].mr_start & 0xf0000000;
  752                 end = pregions[i].mr_start + pregions[i].mr_size;
  753                 do {
  754                         u_int n = pa >> ADDR_SR_SHFT;
  755 
  756                         battable[n].batl = BATL(pa, BAT_M, BAT_PP_RW);
  757                         battable[n].batu = BATU(pa, BAT_BL_256M, BAT_Vs);
  758                         pa += SEGMENT_LENGTH;
  759                 } while (pa < end);
  760         }
  761 
  762         if (PHYS_AVAIL_ENTRIES < regions_sz)
  763                 panic("moea_bootstrap: phys_avail too small");
  764 
  765         phys_avail_count = 0;
  766         physsz = 0;
  767         hwphyssz = 0;
  768         TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz);
  769         for (i = 0, j = 0; i < regions_sz; i++, j += 2) {
  770                 CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start,
  771                     regions[i].mr_start + regions[i].mr_size,
  772                     regions[i].mr_size);
  773                 if (hwphyssz != 0 &&
  774                     (physsz + regions[i].mr_size) >= hwphyssz) {
  775                         if (physsz < hwphyssz) {
  776                                 phys_avail[j] = regions[i].mr_start;
  777                                 phys_avail[j + 1] = regions[i].mr_start +
  778                                     hwphyssz - physsz;
  779                                 physsz = hwphyssz;
  780                                 phys_avail_count++;
  781                         }
  782                         break;
  783                 }
  784                 phys_avail[j] = regions[i].mr_start;
  785                 phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size;
  786                 phys_avail_count++;
  787                 physsz += regions[i].mr_size;
  788         }
  789 
  790         /* Check for overlap with the kernel and exception vectors */
  791         for (j = 0; j < 2*phys_avail_count; j+=2) {
  792                 if (phys_avail[j] < EXC_LAST)
  793                         phys_avail[j] += EXC_LAST;
  794 
  795                 if (kernelstart >= phys_avail[j] &&
  796                     kernelstart < phys_avail[j+1]) {
  797                         if (kernelend < phys_avail[j+1]) {
  798                                 phys_avail[2*phys_avail_count] =
  799                                     (kernelend & ~PAGE_MASK) + PAGE_SIZE;
  800                                 phys_avail[2*phys_avail_count + 1] =
  801                                     phys_avail[j+1];
  802                                 phys_avail_count++;
  803                         }
  804 
  805                         phys_avail[j+1] = kernelstart & ~PAGE_MASK;
  806                 }
  807 
  808                 if (kernelend >= phys_avail[j] &&
  809                     kernelend < phys_avail[j+1]) {
  810                         if (kernelstart > phys_avail[j]) {
  811                                 phys_avail[2*phys_avail_count] = phys_avail[j];
  812                                 phys_avail[2*phys_avail_count + 1] =
  813                                     kernelstart & ~PAGE_MASK;
  814                                 phys_avail_count++;
  815                         }
  816 
  817                         phys_avail[j] = (kernelend & ~PAGE_MASK) + PAGE_SIZE;
  818                 }
  819         }
  820 
  821         physmem = btoc(physsz);
  822 
  823         /*
  824          * Allocate PTEG table.
  825          */
  826 #ifdef PTEGCOUNT
  827         moea_pteg_count = PTEGCOUNT;
  828 #else
  829         moea_pteg_count = 0x1000;
  830 
  831         while (moea_pteg_count < physmem)
  832                 moea_pteg_count <<= 1;
  833 
  834         moea_pteg_count >>= 1;
  835 #endif /* PTEGCOUNT */
  836 
  837         size = moea_pteg_count * sizeof(struct pteg);
  838         CTR2(KTR_PMAP, "moea_bootstrap: %d PTEGs, %d bytes", moea_pteg_count,
  839             size);
  840         moea_pteg_table = (struct pteg *)moea_bootstrap_alloc(size, size);
  841         CTR1(KTR_PMAP, "moea_bootstrap: PTEG table at %p", moea_pteg_table);
  842         bzero((void *)moea_pteg_table, moea_pteg_count * sizeof(struct pteg));
  843         moea_pteg_mask = moea_pteg_count - 1;
  844 
  845         /*
  846          * Allocate pv/overflow lists.
  847          */
  848         size = sizeof(struct pvo_head) * moea_pteg_count;
  849         moea_pvo_table = (struct pvo_head *)moea_bootstrap_alloc(size,
  850             PAGE_SIZE);
  851         CTR1(KTR_PMAP, "moea_bootstrap: PVO table at %p", moea_pvo_table);
  852         for (i = 0; i < moea_pteg_count; i++)
  853                 LIST_INIT(&moea_pvo_table[i]);
  854 
  855         /*
  856          * Initialize the lock that synchronizes access to the pteg and pvo
  857          * tables.
  858          */
  859         mtx_init(&moea_table_mutex, "pmap table", NULL, MTX_DEF |
  860             MTX_RECURSE);
  861         mtx_init(&moea_vsid_mutex, "VSID table", NULL, MTX_DEF);
  862 
  863         mtx_init(&tlbie_mtx, "tlbie", NULL, MTX_SPIN);
  864 
  865         /*
  866          * Initialise the unmanaged pvo pool.
  867          */
  868         moea_bpvo_pool = (struct pvo_entry *)moea_bootstrap_alloc(
  869                 BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0);
  870         moea_bpvo_pool_index = 0;
  871 
  872         /*
  873          * Make sure kernel vsid is allocated as well as VSID 0.
  874          */
  875         moea_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS - 1)) / VSID_NBPW]
  876                 |= 1 << (KERNEL_VSIDBITS % VSID_NBPW);
  877         moea_vsid_bitmap[0] |= 1;
  878 
  879         /*
  880          * Initialize the kernel pmap (which is statically allocated).
  881          */
  882         PMAP_LOCK_INIT(kernel_pmap);
  883         for (i = 0; i < 16; i++)
  884                 kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i;
  885         CPU_FILL(&kernel_pmap->pm_active);
  886         RB_INIT(&kernel_pmap->pmap_pvo);
  887 
  888         /*
  889          * Initialize the global pv list lock.
  890          */
  891         rw_init(&pvh_global_lock, "pmap pv global");
  892 
  893         /*
  894          * Set up the Open Firmware mappings
  895          */
  896         chosen = OF_finddevice("/chosen");
  897         if (chosen != -1 && OF_getprop(chosen, "mmu", &mmui, 4) != -1 &&
  898             (mmu = OF_instance_to_package(mmui)) != -1 &&
  899             (sz = OF_getproplen(mmu, "translations")) != -1) {
  900                 translations = NULL;
  901                 for (i = 0; phys_avail[i] != 0; i += 2) {
  902                         if (phys_avail[i + 1] >= sz) {
  903                                 translations = (struct ofw_map *)phys_avail[i];
  904                                 break;
  905                         }
  906                 }
  907                 if (translations == NULL)
  908                         panic("moea_bootstrap: no space to copy translations");
  909                 bzero(translations, sz);
  910                 if (OF_getprop(mmu, "translations", translations, sz) == -1)
  911                         panic("moea_bootstrap: can't get ofw translations");
  912                 CTR0(KTR_PMAP, "moea_bootstrap: translations");
  913                 sz /= sizeof(*translations);
  914                 qsort(translations, sz, sizeof (*translations), om_cmp);
  915                 for (i = 0; i < sz; i++) {
  916                         CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x",
  917                             translations[i].om_pa, translations[i].om_va,
  918                             translations[i].om_len);
  919 
  920                         /*
  921                          * If the mapping is 1:1, let the RAM and device
  922                          * on-demand BAT tables take care of the translation.
  923                          *
  924                          * However, always enter mappings for segment 16,
  925                          * which is mixed-protection and therefore not
  926                          * compatible with a BAT entry.
  927                          */
  928                         if ((translations[i].om_va >> ADDR_SR_SHFT) != 0xf &&
  929                                 translations[i].om_va == translations[i].om_pa)
  930                                         continue;
  931 
  932                         /* Enter the pages */
  933                         for (off = 0; off < translations[i].om_len;
  934                             off += PAGE_SIZE)
  935                                 moea_kenter_attr(translations[i].om_va + off,
  936                                     translations[i].om_pa + off,
  937                                     moea_bootstrap_convert_wimg(translations[i].om_mode));
  938                 }
  939         }
  940 
  941         /*
  942          * Calculate the last available physical address.
  943          */
  944         for (i = 0; phys_avail[i + 2] != 0; i += 2)
  945                 ;
  946         Maxmem = powerpc_btop(phys_avail[i + 1]);
  947 
  948         moea_cpu_bootstrap(0);
  949         mtmsr(mfmsr() | PSL_DR | PSL_IR);
  950         pmap_bootstrapped++;
  951 
  952         /*
  953          * Set the start and end of kva.
  954          */
  955         virtual_avail = VM_MIN_KERNEL_ADDRESS;
  956         virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS;
  957 
  958         /*
  959          * Allocate a kernel stack with a guard page for thread0 and map it
  960          * into the kernel page map.
  961          */
  962         pa = moea_bootstrap_alloc(kstack_pages * PAGE_SIZE, PAGE_SIZE);
  963         va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
  964         virtual_avail = va + kstack_pages * PAGE_SIZE;
  965         CTR2(KTR_PMAP, "moea_bootstrap: kstack0 at %#x (%#x)", pa, va);
  966         thread0.td_kstack = va;
  967         thread0.td_kstack_pages = kstack_pages;
  968         for (i = 0; i < kstack_pages; i++) {
  969                 moea_kenter(va, pa);
  970                 pa += PAGE_SIZE;
  971                 va += PAGE_SIZE;
  972         }
  973 
  974         /*
  975          * Allocate virtual address space for the message buffer.
  976          */
  977         pa = msgbuf_phys = moea_bootstrap_alloc(msgbufsize, PAGE_SIZE);
  978         msgbufp = (struct msgbuf *)virtual_avail;
  979         va = virtual_avail;
  980         virtual_avail += round_page(msgbufsize);
  981         while (va < virtual_avail) {
  982                 moea_kenter(va, pa);
  983                 pa += PAGE_SIZE;
  984                 va += PAGE_SIZE;
  985         }
  986 
  987         /*
  988          * Allocate virtual address space for the dynamic percpu area.
  989          */
  990         pa = moea_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE);
  991         dpcpu = (void *)virtual_avail;
  992         va = virtual_avail;
  993         virtual_avail += DPCPU_SIZE;
  994         while (va < virtual_avail) {
  995                 moea_kenter(va, pa);
  996                 pa += PAGE_SIZE;
  997                 va += PAGE_SIZE;
  998         }
  999         dpcpu_init(dpcpu, 0);
 1000 }
 1001 
 1002 /*
 1003  * Activate a user pmap.  The pmap must be activated before it's address
 1004  * space can be accessed in any way.
 1005  */
 1006 void
 1007 moea_activate(struct thread *td)
 1008 {
 1009         pmap_t  pm, pmr;
 1010 
 1011         /*
 1012          * Load all the data we need up front to encourage the compiler to
 1013          * not issue any loads while we have interrupts disabled below.
 1014          */
 1015         pm = &td->td_proc->p_vmspace->vm_pmap;
 1016         pmr = pm->pmap_phys;
 1017 
 1018         CPU_SET(PCPU_GET(cpuid), &pm->pm_active);
 1019         PCPU_SET(curpmap, pmr);
 1020 
 1021         mtsrin(USER_SR << ADDR_SR_SHFT, td->td_pcb->pcb_cpu.aim.usr_vsid);
 1022 }
 1023 
 1024 void
 1025 moea_deactivate(struct thread *td)
 1026 {
 1027         pmap_t  pm;
 1028 
 1029         pm = &td->td_proc->p_vmspace->vm_pmap;
 1030         CPU_CLR(PCPU_GET(cpuid), &pm->pm_active);
 1031         PCPU_SET(curpmap, NULL);
 1032 }
 1033 
 1034 void
 1035 moea_unwire(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
 1036 {
 1037         struct  pvo_entry key, *pvo;
 1038 
 1039         PMAP_LOCK(pm);
 1040         key.pvo_vaddr = sva;
 1041         for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
 1042             pvo != NULL && PVO_VADDR(pvo) < eva;
 1043             pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) {
 1044                 if ((pvo->pvo_vaddr & PVO_WIRED) == 0)
 1045                         panic("moea_unwire: pvo %p is missing PVO_WIRED", pvo);
 1046                 pvo->pvo_vaddr &= ~PVO_WIRED;
 1047                 pm->pm_stats.wired_count--;
 1048         }
 1049         PMAP_UNLOCK(pm);
 1050 }
 1051 
 1052 void
 1053 moea_copy_page(vm_page_t msrc, vm_page_t mdst)
 1054 {
 1055         vm_offset_t     dst;
 1056         vm_offset_t     src;
 1057 
 1058         dst = VM_PAGE_TO_PHYS(mdst);
 1059         src = VM_PAGE_TO_PHYS(msrc);
 1060 
 1061         bcopy((void *)src, (void *)dst, PAGE_SIZE);
 1062 }
 1063 
 1064 void
 1065 moea_copy_pages(vm_page_t *ma, vm_offset_t a_offset,
 1066     vm_page_t *mb, vm_offset_t b_offset, int xfersize)
 1067 {
 1068         void *a_cp, *b_cp;
 1069         vm_offset_t a_pg_offset, b_pg_offset;
 1070         int cnt;
 1071 
 1072         while (xfersize > 0) {
 1073                 a_pg_offset = a_offset & PAGE_MASK;
 1074                 cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
 1075                 a_cp = (char *)VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]) +
 1076                     a_pg_offset;
 1077                 b_pg_offset = b_offset & PAGE_MASK;
 1078                 cnt = min(cnt, PAGE_SIZE - b_pg_offset);
 1079                 b_cp = (char *)VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]) +
 1080                     b_pg_offset;
 1081                 bcopy(a_cp, b_cp, cnt);
 1082                 a_offset += cnt;
 1083                 b_offset += cnt;
 1084                 xfersize -= cnt;
 1085         }
 1086 }
 1087 
 1088 /*
 1089  * Zero a page of physical memory by temporarily mapping it into the tlb.
 1090  */
 1091 void
 1092 moea_zero_page(vm_page_t m)
 1093 {
 1094         vm_offset_t off, pa = VM_PAGE_TO_PHYS(m);
 1095 
 1096         for (off = 0; off < PAGE_SIZE; off += cacheline_size)
 1097                 __asm __volatile("dcbz 0,%0" :: "r"(pa + off));
 1098 }
 1099 
 1100 void
 1101 moea_zero_page_area(vm_page_t m, int off, int size)
 1102 {
 1103         vm_offset_t pa = VM_PAGE_TO_PHYS(m);
 1104         void *va = (void *)(pa + off);
 1105 
 1106         bzero(va, size);
 1107 }
 1108 
 1109 vm_offset_t
 1110 moea_quick_enter_page(vm_page_t m)
 1111 {
 1112 
 1113         return (VM_PAGE_TO_PHYS(m));
 1114 }
 1115 
 1116 void
 1117 moea_quick_remove_page(vm_offset_t addr)
 1118 {
 1119 }
 1120 
 1121 boolean_t
 1122 moea_page_is_mapped(vm_page_t m)
 1123 {
 1124         return (!LIST_EMPTY(&(m)->md.mdpg_pvoh));
 1125 }
 1126 
 1127 bool
 1128 moea_ps_enabled(pmap_t pmap __unused)
 1129 {
 1130         return (false);
 1131 }
 1132 
 1133 /*
 1134  * Map the given physical page at the specified virtual address in the
 1135  * target pmap with the protection requested.  If specified the page
 1136  * will be wired down.
 1137  */
 1138 int
 1139 moea_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
 1140     u_int flags, int8_t psind)
 1141 {
 1142         int error;
 1143 
 1144         for (;;) {
 1145                 rw_wlock(&pvh_global_lock);
 1146                 PMAP_LOCK(pmap);
 1147                 error = moea_enter_locked(pmap, va, m, prot, flags, psind);
 1148                 rw_wunlock(&pvh_global_lock);
 1149                 PMAP_UNLOCK(pmap);
 1150                 if (error != ENOMEM)
 1151                         return (KERN_SUCCESS);
 1152                 if ((flags & PMAP_ENTER_NOSLEEP) != 0)
 1153                         return (KERN_RESOURCE_SHORTAGE);
 1154                 VM_OBJECT_ASSERT_UNLOCKED(m->object);
 1155                 vm_wait(NULL);
 1156         }
 1157 }
 1158 
 1159 /*
 1160  * Map the given physical page at the specified virtual address in the
 1161  * target pmap with the protection requested.  If specified the page
 1162  * will be wired down.
 1163  *
 1164  * The global pvh and pmap must be locked.
 1165  */
 1166 static int
 1167 moea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
 1168     u_int flags, int8_t psind __unused)
 1169 {
 1170         struct          pvo_head *pvo_head;
 1171         uma_zone_t      zone;
 1172         u_int           pte_lo, pvo_flags;
 1173         int             error;
 1174 
 1175         if (pmap_bootstrapped)
 1176                 rw_assert(&pvh_global_lock, RA_WLOCKED);
 1177         PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 1178         if ((m->oflags & VPO_UNMANAGED) == 0) {
 1179                 if ((flags & PMAP_ENTER_QUICK_LOCKED) == 0)
 1180                         VM_PAGE_OBJECT_BUSY_ASSERT(m);
 1181                 else
 1182                         VM_OBJECT_ASSERT_LOCKED(m->object);
 1183         }
 1184 
 1185         if ((m->oflags & VPO_UNMANAGED) != 0 || !moea_initialized) {
 1186                 pvo_head = &moea_pvo_kunmanaged;
 1187                 zone = moea_upvo_zone;
 1188                 pvo_flags = 0;
 1189         } else {
 1190                 pvo_head = vm_page_to_pvoh(m);
 1191                 zone = moea_mpvo_zone;
 1192                 pvo_flags = PVO_MANAGED;
 1193         }
 1194 
 1195         pte_lo = moea_calc_wimg(VM_PAGE_TO_PHYS(m), pmap_page_get_memattr(m));
 1196 
 1197         if (prot & VM_PROT_WRITE) {
 1198                 pte_lo |= PTE_BW;
 1199                 if (pmap_bootstrapped &&
 1200                     (m->oflags & VPO_UNMANAGED) == 0)
 1201                         vm_page_aflag_set(m, PGA_WRITEABLE);
 1202         } else
 1203                 pte_lo |= PTE_BR;
 1204 
 1205         if ((flags & PMAP_ENTER_WIRED) != 0)
 1206                 pvo_flags |= PVO_WIRED;
 1207 
 1208         error = moea_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m),
 1209             pte_lo, pvo_flags);
 1210 
 1211         /*
 1212          * Flush the real page from the instruction cache. This has be done
 1213          * for all user mappings to prevent information leakage via the
 1214          * instruction cache. moea_pvo_enter() returns ENOENT for the first
 1215          * mapping for a page.
 1216          */
 1217         if (pmap != kernel_pmap && error == ENOENT &&
 1218             (pte_lo & (PTE_I | PTE_G)) == 0)
 1219                 moea_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE);
 1220 
 1221         return (error);
 1222 }
 1223 
 1224 /*
 1225  * Maps a sequence of resident pages belonging to the same object.
 1226  * The sequence begins with the given page m_start.  This page is
 1227  * mapped at the given virtual address start.  Each subsequent page is
 1228  * mapped at a virtual address that is offset from start by the same
 1229  * amount as the page is offset from m_start within the object.  The
 1230  * last page in the sequence is the page with the largest offset from
 1231  * m_start that can be mapped at a virtual address less than the given
 1232  * virtual address end.  Not every virtual page between start and end
 1233  * is mapped; only those for which a resident page exists with the
 1234  * corresponding offset from m_start are mapped.
 1235  */
 1236 void
 1237 moea_enter_object(pmap_t pm, vm_offset_t start, vm_offset_t end,
 1238     vm_page_t m_start, vm_prot_t prot)
 1239 {
 1240         vm_page_t m;
 1241         vm_pindex_t diff, psize;
 1242 
 1243         VM_OBJECT_ASSERT_LOCKED(m_start->object);
 1244 
 1245         psize = atop(end - start);
 1246         m = m_start;
 1247         rw_wlock(&pvh_global_lock);
 1248         PMAP_LOCK(pm);
 1249         while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
 1250                 moea_enter_locked(pm, start + ptoa(diff), m, prot &
 1251                     (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_QUICK_LOCKED,
 1252                     0);
 1253                 m = TAILQ_NEXT(m, listq);
 1254         }
 1255         rw_wunlock(&pvh_global_lock);
 1256         PMAP_UNLOCK(pm);
 1257 }
 1258 
 1259 void
 1260 moea_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m,
 1261     vm_prot_t prot)
 1262 {
 1263 
 1264         rw_wlock(&pvh_global_lock);
 1265         PMAP_LOCK(pm);
 1266         moea_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
 1267             PMAP_ENTER_QUICK_LOCKED, 0);
 1268         rw_wunlock(&pvh_global_lock);
 1269         PMAP_UNLOCK(pm);
 1270 }
 1271 
 1272 vm_paddr_t
 1273 moea_extract(pmap_t pm, vm_offset_t va)
 1274 {
 1275         struct  pvo_entry *pvo;
 1276         vm_paddr_t pa;
 1277 
 1278         PMAP_LOCK(pm);
 1279         pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
 1280         if (pvo == NULL)
 1281                 pa = 0;
 1282         else
 1283                 pa = PVO_PADDR(pvo) | (va & ADDR_POFF);
 1284         PMAP_UNLOCK(pm);
 1285         return (pa);
 1286 }
 1287 
 1288 /*
 1289  * Atomically extract and hold the physical page with the given
 1290  * pmap and virtual address pair if that mapping permits the given
 1291  * protection.
 1292  */
 1293 vm_page_t
 1294 moea_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
 1295 {
 1296         struct  pvo_entry *pvo;
 1297         vm_page_t m;
 1298 
 1299         m = NULL;
 1300         PMAP_LOCK(pmap);
 1301         pvo = moea_pvo_find_va(pmap, va & ~ADDR_POFF, NULL);
 1302         if (pvo != NULL && (pvo->pvo_pte.pte.pte_hi & PTE_VALID) &&
 1303             ((pvo->pvo_pte.pte.pte_lo & PTE_PP) == PTE_RW ||
 1304              (prot & VM_PROT_WRITE) == 0)) {
 1305                 m = PHYS_TO_VM_PAGE(PVO_PADDR(pvo));
 1306                 if (!vm_page_wire_mapped(m))
 1307                         m = NULL;
 1308         }
 1309         PMAP_UNLOCK(pmap);
 1310         return (m);
 1311 }
 1312 
 1313 void
 1314 moea_init()
 1315 {
 1316 
 1317         moea_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry),
 1318             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
 1319             UMA_ZONE_VM | UMA_ZONE_NOFREE);
 1320         moea_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry),
 1321             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
 1322             UMA_ZONE_VM | UMA_ZONE_NOFREE);
 1323         moea_initialized = TRUE;
 1324 }
 1325 
 1326 boolean_t
 1327 moea_is_referenced(vm_page_t m)
 1328 {
 1329         boolean_t rv;
 1330 
 1331         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 1332             ("moea_is_referenced: page %p is not managed", m));
 1333         rw_wlock(&pvh_global_lock);
 1334         rv = moea_query_bit(m, PTE_REF);
 1335         rw_wunlock(&pvh_global_lock);
 1336         return (rv);
 1337 }
 1338 
 1339 boolean_t
 1340 moea_is_modified(vm_page_t m)
 1341 {
 1342         boolean_t rv;
 1343 
 1344         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 1345             ("moea_is_modified: page %p is not managed", m));
 1346 
 1347         /*
 1348          * If the page is not busied then this check is racy.
 1349          */
 1350         if (!pmap_page_is_write_mapped(m))
 1351                 return (FALSE);
 1352 
 1353         rw_wlock(&pvh_global_lock);
 1354         rv = moea_query_bit(m, PTE_CHG);
 1355         rw_wunlock(&pvh_global_lock);
 1356         return (rv);
 1357 }
 1358 
 1359 boolean_t
 1360 moea_is_prefaultable(pmap_t pmap, vm_offset_t va)
 1361 {
 1362         struct pvo_entry *pvo;
 1363         boolean_t rv;
 1364 
 1365         PMAP_LOCK(pmap);
 1366         pvo = moea_pvo_find_va(pmap, va & ~ADDR_POFF, NULL);
 1367         rv = pvo == NULL || (pvo->pvo_pte.pte.pte_hi & PTE_VALID) == 0;
 1368         PMAP_UNLOCK(pmap);
 1369         return (rv);
 1370 }
 1371 
 1372 void
 1373 moea_clear_modify(vm_page_t m)
 1374 {
 1375 
 1376         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 1377             ("moea_clear_modify: page %p is not managed", m));
 1378         vm_page_assert_busied(m);
 1379 
 1380         if (!pmap_page_is_write_mapped(m))
 1381                 return;
 1382         rw_wlock(&pvh_global_lock);
 1383         moea_clear_bit(m, PTE_CHG);
 1384         rw_wunlock(&pvh_global_lock);
 1385 }
 1386 
 1387 /*
 1388  * Clear the write and modified bits in each of the given page's mappings.
 1389  */
 1390 void
 1391 moea_remove_write(vm_page_t m)
 1392 {
 1393         struct  pvo_entry *pvo;
 1394         struct  pte *pt;
 1395         pmap_t  pmap;
 1396         u_int   lo;
 1397 
 1398         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 1399             ("moea_remove_write: page %p is not managed", m));
 1400         vm_page_assert_busied(m);
 1401 
 1402         if (!pmap_page_is_write_mapped(m))
 1403                 return;
 1404         rw_wlock(&pvh_global_lock);
 1405         lo = moea_attr_fetch(m);
 1406         powerpc_sync();
 1407         LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
 1408                 pmap = pvo->pvo_pmap;
 1409                 PMAP_LOCK(pmap);
 1410                 if ((pvo->pvo_pte.pte.pte_lo & PTE_PP) != PTE_BR) {
 1411                         pt = moea_pvo_to_pte(pvo, -1);
 1412                         pvo->pvo_pte.pte.pte_lo &= ~PTE_PP;
 1413                         pvo->pvo_pte.pte.pte_lo |= PTE_BR;
 1414                         if (pt != NULL) {
 1415                                 moea_pte_synch(pt, &pvo->pvo_pte.pte);
 1416                                 lo |= pvo->pvo_pte.pte.pte_lo;
 1417                                 pvo->pvo_pte.pte.pte_lo &= ~PTE_CHG;
 1418                                 moea_pte_change(pt, &pvo->pvo_pte.pte,
 1419                                     pvo->pvo_vaddr);
 1420                                 mtx_unlock(&moea_table_mutex);
 1421                         }
 1422                 }
 1423                 PMAP_UNLOCK(pmap);
 1424         }
 1425         if ((lo & PTE_CHG) != 0) {
 1426                 moea_attr_clear(m, PTE_CHG);
 1427                 vm_page_dirty(m);
 1428         }
 1429         vm_page_aflag_clear(m, PGA_WRITEABLE);
 1430         rw_wunlock(&pvh_global_lock);
 1431 }
 1432 
 1433 /*
 1434  *      moea_ts_referenced:
 1435  *
 1436  *      Return a count of reference bits for a page, clearing those bits.
 1437  *      It is not necessary for every reference bit to be cleared, but it
 1438  *      is necessary that 0 only be returned when there are truly no
 1439  *      reference bits set.
 1440  *
 1441  *      XXX: The exact number of bits to check and clear is a matter that
 1442  *      should be tested and standardized at some point in the future for
 1443  *      optimal aging of shared pages.
 1444  */
 1445 int
 1446 moea_ts_referenced(vm_page_t m)
 1447 {
 1448         int count;
 1449 
 1450         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 1451             ("moea_ts_referenced: page %p is not managed", m));
 1452         rw_wlock(&pvh_global_lock);
 1453         count = moea_clear_bit(m, PTE_REF);
 1454         rw_wunlock(&pvh_global_lock);
 1455         return (count);
 1456 }
 1457 
 1458 /*
 1459  * Modify the WIMG settings of all mappings for a page.
 1460  */
 1461 void
 1462 moea_page_set_memattr(vm_page_t m, vm_memattr_t ma)
 1463 {
 1464         struct  pvo_entry *pvo;
 1465         struct  pvo_head *pvo_head;
 1466         struct  pte *pt;
 1467         pmap_t  pmap;
 1468         u_int   lo;
 1469 
 1470         if ((m->oflags & VPO_UNMANAGED) != 0) {
 1471                 m->md.mdpg_cache_attrs = ma;
 1472                 return;
 1473         }
 1474 
 1475         rw_wlock(&pvh_global_lock);
 1476         pvo_head = vm_page_to_pvoh(m);
 1477         lo = moea_calc_wimg(VM_PAGE_TO_PHYS(m), ma);
 1478 
 1479         LIST_FOREACH(pvo, pvo_head, pvo_vlink) {
 1480                 pmap = pvo->pvo_pmap;
 1481                 PMAP_LOCK(pmap);
 1482                 pt = moea_pvo_to_pte(pvo, -1);
 1483                 pvo->pvo_pte.pte.pte_lo &= ~PTE_WIMG;
 1484                 pvo->pvo_pte.pte.pte_lo |= lo;
 1485                 if (pt != NULL) {
 1486                         moea_pte_change(pt, &pvo->pvo_pte.pte,
 1487                             pvo->pvo_vaddr);
 1488                         if (pvo->pvo_pmap == kernel_pmap)
 1489                                 isync();
 1490                 }
 1491                 mtx_unlock(&moea_table_mutex);
 1492                 PMAP_UNLOCK(pmap);
 1493         }
 1494         m->md.mdpg_cache_attrs = ma;
 1495         rw_wunlock(&pvh_global_lock);
 1496 }
 1497 
 1498 /*
 1499  * Map a wired page into kernel virtual address space.
 1500  */
 1501 void
 1502 moea_kenter(vm_offset_t va, vm_paddr_t pa)
 1503 {
 1504 
 1505         moea_kenter_attr(va, pa, VM_MEMATTR_DEFAULT);
 1506 }
 1507 
 1508 void
 1509 moea_kenter_attr(vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
 1510 {
 1511         u_int           pte_lo;
 1512         int             error;
 1513 
 1514 #if 0
 1515         if (va < VM_MIN_KERNEL_ADDRESS)
 1516                 panic("moea_kenter: attempt to enter non-kernel address %#x",
 1517                     va);
 1518 #endif
 1519 
 1520         pte_lo = moea_calc_wimg(pa, ma);
 1521 
 1522         PMAP_LOCK(kernel_pmap);
 1523         error = moea_pvo_enter(kernel_pmap, moea_upvo_zone,
 1524             &moea_pvo_kunmanaged, va, pa, pte_lo, PVO_WIRED);
 1525 
 1526         if (error != 0 && error != ENOENT)
 1527                 panic("moea_kenter: failed to enter va %#x pa %#x: %d", va,
 1528                     pa, error);
 1529 
 1530         PMAP_UNLOCK(kernel_pmap);
 1531 }
 1532 
 1533 /*
 1534  * Extract the physical page address associated with the given kernel virtual
 1535  * address.
 1536  */
 1537 vm_paddr_t
 1538 moea_kextract(vm_offset_t va)
 1539 {
 1540         struct          pvo_entry *pvo;
 1541         vm_paddr_t pa;
 1542 
 1543         /*
 1544          * Allow direct mappings on 32-bit OEA
 1545          */
 1546         if (va < VM_MIN_KERNEL_ADDRESS) {
 1547                 return (va);
 1548         }
 1549 
 1550         PMAP_LOCK(kernel_pmap);
 1551         pvo = moea_pvo_find_va(kernel_pmap, va & ~ADDR_POFF, NULL);
 1552         KASSERT(pvo != NULL, ("moea_kextract: no addr found"));
 1553         pa = PVO_PADDR(pvo) | (va & ADDR_POFF);
 1554         PMAP_UNLOCK(kernel_pmap);
 1555         return (pa);
 1556 }
 1557 
 1558 /*
 1559  * Remove a wired page from kernel virtual address space.
 1560  */
 1561 void
 1562 moea_kremove(vm_offset_t va)
 1563 {
 1564 
 1565         moea_remove(kernel_pmap, va, va + PAGE_SIZE);
 1566 }
 1567 
 1568 /*
 1569  * Provide a kernel pointer corresponding to a given userland pointer.
 1570  * The returned pointer is valid until the next time this function is
 1571  * called in this thread. This is used internally in copyin/copyout.
 1572  */
 1573 int
 1574 moea_map_user_ptr(pmap_t pm, volatile const void *uaddr,
 1575     void **kaddr, size_t ulen, size_t *klen)
 1576 {
 1577         size_t l;
 1578         register_t vsid;
 1579 
 1580         *kaddr = (char *)USER_ADDR + ((uintptr_t)uaddr & ~SEGMENT_MASK);
 1581         l = ((char *)USER_ADDR + SEGMENT_LENGTH) - (char *)(*kaddr);
 1582         if (l > ulen)
 1583                 l = ulen;
 1584         if (klen)
 1585                 *klen = l;
 1586         else if (l != ulen)
 1587                 return (EFAULT);
 1588 
 1589         vsid = va_to_vsid(pm, (vm_offset_t)uaddr);
 1590 
 1591         /* Mark segment no-execute */
 1592         vsid |= SR_N;
 1593 
 1594         /* If we have already set this VSID, we can just return */
 1595         if (curthread->td_pcb->pcb_cpu.aim.usr_vsid == vsid)
 1596                 return (0);
 1597 
 1598         __asm __volatile("isync");
 1599         curthread->td_pcb->pcb_cpu.aim.usr_segm =
 1600             (uintptr_t)uaddr >> ADDR_SR_SHFT;
 1601         curthread->td_pcb->pcb_cpu.aim.usr_vsid = vsid;
 1602         __asm __volatile("mtsr %0,%1; isync" :: "n"(USER_SR), "r"(vsid));
 1603 
 1604         return (0);
 1605 }
 1606 
 1607 /*
 1608  * Figure out where a given kernel pointer (usually in a fault) points
 1609  * to from the VM's perspective, potentially remapping into userland's
 1610  * address space.
 1611  */
 1612 static int
 1613 moea_decode_kernel_ptr(vm_offset_t addr, int *is_user,
 1614     vm_offset_t *decoded_addr)
 1615 {
 1616         vm_offset_t user_sr;
 1617 
 1618         if ((addr >> ADDR_SR_SHFT) == (USER_ADDR >> ADDR_SR_SHFT)) {
 1619                 user_sr = curthread->td_pcb->pcb_cpu.aim.usr_segm;
 1620                 addr &= ADDR_PIDX | ADDR_POFF;
 1621                 addr |= user_sr << ADDR_SR_SHFT;
 1622                 *decoded_addr = addr;
 1623                 *is_user = 1;
 1624         } else {
 1625                 *decoded_addr = addr;
 1626                 *is_user = 0;
 1627         }
 1628 
 1629         return (0);
 1630 }
 1631 
 1632 /*
 1633  * Map a range of physical addresses into kernel virtual address space.
 1634  *
 1635  * The value passed in *virt is a suggested virtual address for the mapping.
 1636  * Architectures which can support a direct-mapped physical to virtual region
 1637  * can return the appropriate address within that region, leaving '*virt'
 1638  * unchanged.  We cannot and therefore do not; *virt is updated with the
 1639  * first usable address after the mapped region.
 1640  */
 1641 vm_offset_t
 1642 moea_map(vm_offset_t *virt, vm_paddr_t pa_start,
 1643     vm_paddr_t pa_end, int prot)
 1644 {
 1645         vm_offset_t     sva, va;
 1646 
 1647         sva = *virt;
 1648         va = sva;
 1649         for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE)
 1650                 moea_kenter(va, pa_start);
 1651         *virt = va;
 1652         return (sva);
 1653 }
 1654 
 1655 /*
 1656  * Returns true if the pmap's pv is one of the first
 1657  * 16 pvs linked to from this page.  This count may
 1658  * be changed upwards or downwards in the future; it
 1659  * is only necessary that true be returned for a small
 1660  * subset of pmaps for proper page aging.
 1661  */
 1662 boolean_t
 1663 moea_page_exists_quick(pmap_t pmap, vm_page_t m)
 1664 {
 1665         int loops;
 1666         struct pvo_entry *pvo;
 1667         boolean_t rv;
 1668 
 1669         KASSERT((m->oflags & VPO_UNMANAGED) == 0,
 1670             ("moea_page_exists_quick: page %p is not managed", m));
 1671         loops = 0;
 1672         rv = FALSE;
 1673         rw_wlock(&pvh_global_lock);
 1674         LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
 1675                 if (pvo->pvo_pmap == pmap) {
 1676                         rv = TRUE;
 1677                         break;
 1678                 }
 1679                 if (++loops >= 16)
 1680                         break;
 1681         }
 1682         rw_wunlock(&pvh_global_lock);
 1683         return (rv);
 1684 }
 1685 
 1686 void
 1687 moea_page_init(vm_page_t m)
 1688 {
 1689 
 1690         m->md.mdpg_attrs = 0;
 1691         m->md.mdpg_cache_attrs = VM_MEMATTR_DEFAULT;
 1692         LIST_INIT(&m->md.mdpg_pvoh);
 1693 }
 1694 
 1695 /*
 1696  * Return the number of managed mappings to the given physical page
 1697  * that are wired.
 1698  */
 1699 int
 1700 moea_page_wired_mappings(vm_page_t m)
 1701 {
 1702         struct pvo_entry *pvo;
 1703         int count;
 1704 
 1705         count = 0;
 1706         if ((m->oflags & VPO_UNMANAGED) != 0)
 1707                 return (count);
 1708         rw_wlock(&pvh_global_lock);
 1709         LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink)
 1710                 if ((pvo->pvo_vaddr & PVO_WIRED) != 0)
 1711                         count++;
 1712         rw_wunlock(&pvh_global_lock);
 1713         return (count);
 1714 }
 1715 
 1716 static u_int    moea_vsidcontext;
 1717 
 1718 int
 1719 moea_pinit(pmap_t pmap)
 1720 {
 1721         int     i, mask;
 1722         u_int   entropy;
 1723 
 1724         RB_INIT(&pmap->pmap_pvo);
 1725 
 1726         entropy = 0;
 1727         __asm __volatile("mftb %0" : "=r"(entropy));
 1728 
 1729         if ((pmap->pmap_phys = (pmap_t)moea_kextract((vm_offset_t)pmap))
 1730             == NULL) {
 1731                 pmap->pmap_phys = pmap;
 1732         }
 1733 
 1734         mtx_lock(&moea_vsid_mutex);
 1735         /*
 1736          * Allocate some segment registers for this pmap.
 1737          */
 1738         for (i = 0; i < NPMAPS; i += VSID_NBPW) {
 1739                 u_int   hash, n;
 1740 
 1741                 /*
 1742                  * Create a new value by multiplying by a prime and adding in
 1743                  * entropy from the timebase register.  This is to make the
 1744                  * VSID more random so that the PT hash function collides
 1745                  * less often.  (Note that the prime casues gcc to do shifts
 1746                  * instead of a multiply.)
 1747                  */
 1748                 moea_vsidcontext = (moea_vsidcontext * 0x1105) + entropy;
 1749                 hash = moea_vsidcontext & (NPMAPS - 1);
 1750                 if (hash == 0)          /* 0 is special, avoid it */
 1751                         continue;
 1752                 n = hash >> 5;
 1753                 mask = 1 << (hash & (VSID_NBPW - 1));
 1754                 hash = (moea_vsidcontext & 0xfffff);
 1755                 if (moea_vsid_bitmap[n] & mask) {       /* collision? */
 1756                         /* anything free in this bucket? */
 1757                         if (moea_vsid_bitmap[n] == 0xffffffff) {
 1758                                 entropy = (moea_vsidcontext >> 20);
 1759                                 continue;
 1760                         }
 1761                         i = ffs(~moea_vsid_bitmap[n]) - 1;
 1762                         mask = 1 << i;
 1763                         hash &= rounddown2(0xfffff, VSID_NBPW);
 1764                         hash |= i;
 1765                 }
 1766                 KASSERT(!(moea_vsid_bitmap[n] & mask),
 1767                     ("Allocating in-use VSID group %#x\n", hash));
 1768                 moea_vsid_bitmap[n] |= mask;
 1769                 for (i = 0; i < 16; i++)
 1770                         pmap->pm_sr[i] = VSID_MAKE(i, hash);
 1771                 mtx_unlock(&moea_vsid_mutex);
 1772                 return (1);
 1773         }
 1774 
 1775         mtx_unlock(&moea_vsid_mutex);
 1776         panic("moea_pinit: out of segments");
 1777 }
 1778 
 1779 /*
 1780  * Initialize the pmap associated with process 0.
 1781  */
 1782 void
 1783 moea_pinit0(pmap_t pm)
 1784 {
 1785 
 1786         PMAP_LOCK_INIT(pm);
 1787         moea_pinit(pm);
 1788         bzero(&pm->pm_stats, sizeof(pm->pm_stats));
 1789 }
 1790 
 1791 /*
 1792  * Set the physical protection on the specified range of this map as requested.
 1793  */
 1794 void
 1795 moea_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva,
 1796     vm_prot_t prot)
 1797 {
 1798         struct  pvo_entry *pvo, *tpvo, key;
 1799         struct  pte *pt;
 1800 
 1801         KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
 1802             ("moea_protect: non current pmap"));
 1803 
 1804         if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
 1805                 moea_remove(pm, sva, eva);
 1806                 return;
 1807         }
 1808 
 1809         rw_wlock(&pvh_global_lock);
 1810         PMAP_LOCK(pm);
 1811         key.pvo_vaddr = sva;
 1812         for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
 1813             pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) {
 1814                 tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo);
 1815 
 1816                 /*
 1817                  * Grab the PTE pointer before we diddle with the cached PTE
 1818                  * copy.
 1819                  */
 1820                 pt = moea_pvo_to_pte(pvo, -1);
 1821                 /*
 1822                  * Change the protection of the page.
 1823                  */
 1824                 pvo->pvo_pte.pte.pte_lo &= ~PTE_PP;
 1825                 pvo->pvo_pte.pte.pte_lo |= PTE_BR;
 1826 
 1827                 /*
 1828                  * If the PVO is in the page table, update that pte as well.
 1829                  */
 1830                 if (pt != NULL) {
 1831                         moea_pte_change(pt, &pvo->pvo_pte.pte, pvo->pvo_vaddr);
 1832                         mtx_unlock(&moea_table_mutex);
 1833                 }
 1834         }
 1835         rw_wunlock(&pvh_global_lock);
 1836         PMAP_UNLOCK(pm);
 1837 }
 1838 
 1839 /*
 1840  * Map a list of wired pages into kernel virtual address space.  This is
 1841  * intended for temporary mappings which do not need page modification or
 1842  * references recorded.  Existing mappings in the region are overwritten.
 1843  */
 1844 void
 1845 moea_qenter(vm_offset_t sva, vm_page_t *m, int count)
 1846 {
 1847         vm_offset_t va;
 1848 
 1849         va = sva;
 1850         while (count-- > 0) {
 1851                 moea_kenter(va, VM_PAGE_TO_PHYS(*m));
 1852                 va += PAGE_SIZE;
 1853                 m++;
 1854         }
 1855 }
 1856 
 1857 /*
 1858  * Remove page mappings from kernel virtual address space.  Intended for
 1859  * temporary mappings entered by moea_qenter.
 1860  */
 1861 void
 1862 moea_qremove(vm_offset_t sva, int count)
 1863 {
 1864         vm_offset_t va;
 1865 
 1866         va = sva;
 1867         while (count-- > 0) {
 1868                 moea_kremove(va);
 1869                 va += PAGE_SIZE;
 1870         }
 1871 }
 1872 
 1873 void
 1874 moea_release(pmap_t pmap)
 1875 {
 1876         int idx, mask;
 1877 
 1878         /*
 1879          * Free segment register's VSID
 1880          */
 1881         if (pmap->pm_sr[0] == 0)
 1882                 panic("moea_release");
 1883 
 1884         mtx_lock(&moea_vsid_mutex);
 1885         idx = VSID_TO_HASH(pmap->pm_sr[0]) & (NPMAPS-1);
 1886         mask = 1 << (idx % VSID_NBPW);
 1887         idx /= VSID_NBPW;
 1888         moea_vsid_bitmap[idx] &= ~mask;
 1889         mtx_unlock(&moea_vsid_mutex);
 1890 }
 1891 
 1892 /*
 1893  * Remove the given range of addresses from the specified map.
 1894  */
 1895 void
 1896 moea_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
 1897 {
 1898         struct  pvo_entry *pvo, *tpvo, key;
 1899 
 1900         rw_wlock(&pvh_global_lock);
 1901         PMAP_LOCK(pm);
 1902         key.pvo_vaddr = sva;
 1903         for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
 1904             pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) {
 1905                 tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo);
 1906                 moea_pvo_remove(pvo, -1);
 1907         }
 1908         PMAP_UNLOCK(pm);
 1909         rw_wunlock(&pvh_global_lock);
 1910 }
 1911 
 1912 /*
 1913  * Remove physical page from all pmaps in which it resides. moea_pvo_remove()
 1914  * will reflect changes in pte's back to the vm_page.
 1915  */
 1916 void
 1917 moea_remove_all(vm_page_t m)
 1918 {
 1919         struct  pvo_head *pvo_head;
 1920         struct  pvo_entry *pvo, *next_pvo;
 1921         pmap_t  pmap;
 1922 
 1923         rw_wlock(&pvh_global_lock);
 1924         pvo_head = vm_page_to_pvoh(m);
 1925         for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
 1926                 next_pvo = LIST_NEXT(pvo, pvo_vlink);
 1927 
 1928                 pmap = pvo->pvo_pmap;
 1929                 PMAP_LOCK(pmap);
 1930                 moea_pvo_remove(pvo, -1);
 1931                 PMAP_UNLOCK(pmap);
 1932         }
 1933         if ((m->a.flags & PGA_WRITEABLE) && moea_query_bit(m, PTE_CHG)) {
 1934                 moea_attr_clear(m, PTE_CHG);
 1935                 vm_page_dirty(m);
 1936         }
 1937         vm_page_aflag_clear(m, PGA_WRITEABLE);
 1938         rw_wunlock(&pvh_global_lock);
 1939 }
 1940 
 1941 static int
 1942 moea_mincore(pmap_t pm, vm_offset_t va, vm_paddr_t *pap)
 1943 {
 1944         struct pvo_entry *pvo;
 1945         vm_paddr_t pa;
 1946         vm_page_t m;
 1947         int val;
 1948         bool managed;
 1949 
 1950         PMAP_LOCK(pm);
 1951 
 1952         pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
 1953         if (pvo != NULL) {
 1954                 pa = PVO_PADDR(pvo);
 1955                 m = PHYS_TO_VM_PAGE(pa);
 1956                 managed = (pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED;
 1957                 val = MINCORE_INCORE;
 1958         } else {
 1959                 PMAP_UNLOCK(pm);
 1960                 return (0);
 1961         }
 1962 
 1963         PMAP_UNLOCK(pm);
 1964 
 1965         if (m == NULL)
 1966                 return (0);
 1967 
 1968         if (managed) {
 1969                 if (moea_is_modified(m))
 1970                         val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
 1971 
 1972                 if (moea_is_referenced(m))
 1973                         val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
 1974         }
 1975 
 1976         if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
 1977             (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) &&
 1978             managed) {
 1979                 *pap = pa;
 1980         }
 1981 
 1982         return (val);
 1983 }
 1984 
 1985 /*
 1986  * Allocate a physical page of memory directly from the phys_avail map.
 1987  * Can only be called from moea_bootstrap before avail start and end are
 1988  * calculated.
 1989  */
 1990 static vm_offset_t
 1991 moea_bootstrap_alloc(vm_size_t size, u_int align)
 1992 {
 1993         vm_offset_t     s, e;
 1994         int             i, j;
 1995 
 1996         size = round_page(size);
 1997         for (i = 0; phys_avail[i + 1] != 0; i += 2) {
 1998                 if (align != 0)
 1999                         s = roundup2(phys_avail[i], align);
 2000                 else
 2001                         s = phys_avail[i];
 2002                 e = s + size;
 2003 
 2004                 if (s < phys_avail[i] || e > phys_avail[i + 1])
 2005                         continue;
 2006 
 2007                 if (s == phys_avail[i]) {
 2008                         phys_avail[i] += size;
 2009                 } else if (e == phys_avail[i + 1]) {
 2010                         phys_avail[i + 1] -= size;
 2011                 } else {
 2012                         for (j = phys_avail_count * 2; j > i; j -= 2) {
 2013                                 phys_avail[j] = phys_avail[j - 2];
 2014                                 phys_avail[j + 1] = phys_avail[j - 1];
 2015                         }
 2016 
 2017                         phys_avail[i + 3] = phys_avail[i + 1];
 2018                         phys_avail[i + 1] = s;
 2019                         phys_avail[i + 2] = e;
 2020                         phys_avail_count++;
 2021                 }
 2022 
 2023                 return (s);
 2024         }
 2025         panic("moea_bootstrap_alloc: could not allocate memory");
 2026 }
 2027 
 2028 static void
 2029 moea_syncicache(vm_paddr_t pa, vm_size_t len)
 2030 {
 2031         __syncicache((void *)pa, len);
 2032 }
 2033 
 2034 static int
 2035 moea_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head,
 2036     vm_offset_t va, vm_paddr_t pa, u_int pte_lo, int flags)
 2037 {
 2038         struct  pvo_entry *pvo;
 2039         u_int   sr;
 2040         int     first;
 2041         u_int   ptegidx;
 2042         int     i;
 2043         int     bootstrap;
 2044 
 2045         moea_pvo_enter_calls++;
 2046         first = 0;
 2047         bootstrap = 0;
 2048 
 2049         /*
 2050          * Compute the PTE Group index.
 2051          */
 2052         va &= ~ADDR_POFF;
 2053         sr = va_to_sr(pm->pm_sr, va);
 2054         ptegidx = va_to_pteg(sr, va);
 2055 
 2056         /*
 2057          * Remove any existing mapping for this page.  Reuse the pvo entry if
 2058          * there is a mapping.
 2059          */
 2060         mtx_lock(&moea_table_mutex);
 2061         LIST_FOREACH(pvo, &moea_pvo_table[ptegidx], pvo_olink) {
 2062                 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
 2063                         if (PVO_PADDR(pvo) == pa &&
 2064                             (pvo->pvo_pte.pte.pte_lo & PTE_PP) ==
 2065                             (pte_lo & PTE_PP)) {
 2066                                 /*
 2067                                  * The PTE is not changing.  Instead, this may
 2068                                  * be a request to change the mapping's wired
 2069                                  * attribute.
 2070                                  */
 2071                                 mtx_unlock(&moea_table_mutex);
 2072                                 if ((flags & PVO_WIRED) != 0 &&
 2073                                     (pvo->pvo_vaddr & PVO_WIRED) == 0) {
 2074                                         pvo->pvo_vaddr |= PVO_WIRED;
 2075                                         pm->pm_stats.wired_count++;
 2076                                 } else if ((flags & PVO_WIRED) == 0 &&
 2077                                     (pvo->pvo_vaddr & PVO_WIRED) != 0) {
 2078                                         pvo->pvo_vaddr &= ~PVO_WIRED;
 2079                                         pm->pm_stats.wired_count--;
 2080                                 }
 2081                                 return (0);
 2082                         }
 2083                         moea_pvo_remove(pvo, -1);
 2084                         break;
 2085                 }
 2086         }
 2087 
 2088         /*
 2089          * If we aren't overwriting a mapping, try to allocate.
 2090          */
 2091         if (moea_initialized) {
 2092                 pvo = uma_zalloc(zone, M_NOWAIT);
 2093         } else {
 2094                 if (moea_bpvo_pool_index >= BPVO_POOL_SIZE) {
 2095                         panic("moea_enter: bpvo pool exhausted, %d, %d, %d",
 2096                               moea_bpvo_pool_index, BPVO_POOL_SIZE,
 2097                               BPVO_POOL_SIZE * sizeof(struct pvo_entry));
 2098                 }
 2099                 pvo = &moea_bpvo_pool[moea_bpvo_pool_index];
 2100                 moea_bpvo_pool_index++;
 2101                 bootstrap = 1;
 2102         }
 2103 
 2104         if (pvo == NULL) {
 2105                 mtx_unlock(&moea_table_mutex);
 2106                 return (ENOMEM);
 2107         }
 2108 
 2109         moea_pvo_entries++;
 2110         pvo->pvo_vaddr = va;
 2111         pvo->pvo_pmap = pm;
 2112         LIST_INSERT_HEAD(&moea_pvo_table[ptegidx], pvo, pvo_olink);
 2113         pvo->pvo_vaddr &= ~ADDR_POFF;
 2114         if (flags & PVO_WIRED)
 2115                 pvo->pvo_vaddr |= PVO_WIRED;
 2116         if (pvo_head != &moea_pvo_kunmanaged)
 2117                 pvo->pvo_vaddr |= PVO_MANAGED;
 2118         if (bootstrap)
 2119                 pvo->pvo_vaddr |= PVO_BOOTSTRAP;
 2120 
 2121         moea_pte_create(&pvo->pvo_pte.pte, sr, va, pa | pte_lo);
 2122 
 2123         /*
 2124          * Add to pmap list
 2125          */
 2126         RB_INSERT(pvo_tree, &pm->pmap_pvo, pvo);
 2127 
 2128         /*
 2129          * Remember if the list was empty and therefore will be the first
 2130          * item.
 2131          */
 2132         if (LIST_FIRST(pvo_head) == NULL)
 2133                 first = 1;
 2134         LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
 2135 
 2136         if (pvo->pvo_vaddr & PVO_WIRED)
 2137                 pm->pm_stats.wired_count++;
 2138         pm->pm_stats.resident_count++;
 2139 
 2140         i = moea_pte_insert(ptegidx, &pvo->pvo_pte.pte);
 2141         KASSERT(i < 8, ("Invalid PTE index"));
 2142         if (i >= 0) {
 2143                 PVO_PTEGIDX_SET(pvo, i);
 2144         } else {
 2145                 panic("moea_pvo_enter: overflow");
 2146                 moea_pte_overflow++;
 2147         }
 2148         mtx_unlock(&moea_table_mutex);
 2149 
 2150         return (first ? ENOENT : 0);
 2151 }
 2152 
 2153 static void
 2154 moea_pvo_remove(struct pvo_entry *pvo, int pteidx)
 2155 {
 2156         struct  pte *pt;
 2157 
 2158         /*
 2159          * If there is an active pte entry, we need to deactivate it (and
 2160          * save the ref & cfg bits).
 2161          */
 2162         pt = moea_pvo_to_pte(pvo, pteidx);
 2163         if (pt != NULL) {
 2164                 moea_pte_unset(pt, &pvo->pvo_pte.pte, pvo->pvo_vaddr);
 2165                 mtx_unlock(&moea_table_mutex);
 2166                 PVO_PTEGIDX_CLR(pvo);
 2167         } else {
 2168                 moea_pte_overflow--;
 2169         }
 2170 
 2171         /*
 2172          * Update our statistics.
 2173          */
 2174         pvo->pvo_pmap->pm_stats.resident_count--;
 2175         if (pvo->pvo_vaddr & PVO_WIRED)
 2176                 pvo->pvo_pmap->pm_stats.wired_count--;
 2177 
 2178         /*
 2179          * Remove this PVO from the PV and pmap lists.
 2180          */
 2181         LIST_REMOVE(pvo, pvo_vlink);
 2182         RB_REMOVE(pvo_tree, &pvo->pvo_pmap->pmap_pvo, pvo);
 2183 
 2184         /*
 2185          * Save the REF/CHG bits into their cache if the page is managed.
 2186          * Clear PGA_WRITEABLE if all mappings of the page have been removed.
 2187          */
 2188         if ((pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED) {
 2189                 struct vm_page *pg;
 2190 
 2191                 pg = PHYS_TO_VM_PAGE(PVO_PADDR(pvo));
 2192                 if (pg != NULL) {
 2193                         moea_attr_save(pg, pvo->pvo_pte.pte.pte_lo &
 2194                             (PTE_REF | PTE_CHG));
 2195                         if (LIST_EMPTY(&pg->md.mdpg_pvoh))
 2196                                 vm_page_aflag_clear(pg, PGA_WRITEABLE);
 2197                 }
 2198         }
 2199 
 2200         /*
 2201          * Remove this from the overflow list and return it to the pool
 2202          * if we aren't going to reuse it.
 2203          */
 2204         LIST_REMOVE(pvo, pvo_olink);
 2205         if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP))
 2206                 uma_zfree(pvo->pvo_vaddr & PVO_MANAGED ? moea_mpvo_zone :
 2207                     moea_upvo_zone, pvo);
 2208         moea_pvo_entries--;
 2209         moea_pvo_remove_calls++;
 2210 }
 2211 
 2212 static __inline int
 2213 moea_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx)
 2214 {
 2215         int     pteidx;
 2216 
 2217         /*
 2218          * We can find the actual pte entry without searching by grabbing
 2219          * the PTEG index from 3 unused bits in pte_lo[11:9] and by
 2220          * noticing the HID bit.
 2221          */
 2222         pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo);
 2223         if (pvo->pvo_pte.pte.pte_hi & PTE_HID)
 2224                 pteidx ^= moea_pteg_mask * 8;
 2225 
 2226         return (pteidx);
 2227 }
 2228 
 2229 static struct pvo_entry *
 2230 moea_pvo_find_va(pmap_t pm, vm_offset_t va, int *pteidx_p)
 2231 {
 2232         struct  pvo_entry *pvo;
 2233         int     ptegidx;
 2234         u_int   sr;
 2235 
 2236         va &= ~ADDR_POFF;
 2237         sr = va_to_sr(pm->pm_sr, va);
 2238         ptegidx = va_to_pteg(sr, va);
 2239 
 2240         mtx_lock(&moea_table_mutex);
 2241         LIST_FOREACH(pvo, &moea_pvo_table[ptegidx], pvo_olink) {
 2242                 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
 2243                         if (pteidx_p)
 2244                                 *pteidx_p = moea_pvo_pte_index(pvo, ptegidx);
 2245                         break;
 2246                 }
 2247         }
 2248         mtx_unlock(&moea_table_mutex);
 2249 
 2250         return (pvo);
 2251 }
 2252 
 2253 static struct pte *
 2254 moea_pvo_to_pte(const struct pvo_entry *pvo, int pteidx)
 2255 {
 2256         struct  pte *pt;
 2257 
 2258         /*
 2259          * If we haven't been supplied the ptegidx, calculate it.
 2260          */
 2261         if (pteidx == -1) {
 2262                 int     ptegidx;
 2263                 u_int   sr;
 2264 
 2265                 sr = va_to_sr(pvo->pvo_pmap->pm_sr, pvo->pvo_vaddr);
 2266                 ptegidx = va_to_pteg(sr, pvo->pvo_vaddr);
 2267                 pteidx = moea_pvo_pte_index(pvo, ptegidx);
 2268         }
 2269 
 2270         pt = &moea_pteg_table[pteidx >> 3].pt[pteidx & 7];
 2271         mtx_lock(&moea_table_mutex);
 2272 
 2273         if ((pvo->pvo_pte.pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) {
 2274                 panic("moea_pvo_to_pte: pvo %p has valid pte in pvo but no "
 2275                     "valid pte index", pvo);
 2276         }
 2277 
 2278         if ((pvo->pvo_pte.pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) {
 2279                 panic("moea_pvo_to_pte: pvo %p has valid pte index in pvo "
 2280                     "pvo but no valid pte", pvo);
 2281         }
 2282 
 2283         if ((pt->pte_hi ^ (pvo->pvo_pte.pte.pte_hi & ~PTE_VALID)) == PTE_VALID) {
 2284                 if ((pvo->pvo_pte.pte.pte_hi & PTE_VALID) == 0) {
 2285                         panic("moea_pvo_to_pte: pvo %p has valid pte in "
 2286                             "moea_pteg_table %p but invalid in pvo", pvo, pt);
 2287                 }
 2288 
 2289                 if (((pt->pte_lo ^ pvo->pvo_pte.pte.pte_lo) & ~(PTE_CHG|PTE_REF))
 2290                     != 0) {
 2291                         panic("moea_pvo_to_pte: pvo %p pte does not match "
 2292                             "pte %p in moea_pteg_table", pvo, pt);
 2293                 }
 2294 
 2295                 mtx_assert(&moea_table_mutex, MA_OWNED);
 2296                 return (pt);
 2297         }
 2298 
 2299         if (pvo->pvo_pte.pte.pte_hi & PTE_VALID) {
 2300                 panic("moea_pvo_to_pte: pvo %p has invalid pte %p in "
 2301                     "moea_pteg_table but valid in pvo: %8x, %8x", pvo, pt, pvo->pvo_pte.pte.pte_hi, pt->pte_hi);
 2302         }
 2303 
 2304         mtx_unlock(&moea_table_mutex);
 2305         return (NULL);
 2306 }
 2307 
 2308 /*
 2309  * XXX: THIS STUFF SHOULD BE IN pte.c?
 2310  */
 2311 int
 2312 moea_pte_spill(vm_offset_t addr)
 2313 {
 2314         struct  pvo_entry *source_pvo, *victim_pvo;
 2315         struct  pvo_entry *pvo;
 2316         int     ptegidx, i, j;
 2317         u_int   sr;
 2318         struct  pteg *pteg;
 2319         struct  pte *pt;
 2320 
 2321         moea_pte_spills++;
 2322 
 2323         sr = mfsrin(addr);
 2324         ptegidx = va_to_pteg(sr, addr);
 2325 
 2326         /*
 2327          * Have to substitute some entry.  Use the primary hash for this.
 2328          * Use low bits of timebase as random generator.
 2329          */
 2330         pteg = &moea_pteg_table[ptegidx];
 2331         mtx_lock(&moea_table_mutex);
 2332         __asm __volatile("mftb %0" : "=r"(i));
 2333         i &= 7;
 2334         pt = &pteg->pt[i];
 2335 
 2336         source_pvo = NULL;
 2337         victim_pvo = NULL;
 2338         LIST_FOREACH(pvo, &moea_pvo_table[ptegidx], pvo_olink) {
 2339                 /*
 2340                  * We need to find a pvo entry for this address.
 2341                  */
 2342                 if (source_pvo == NULL &&
 2343                     moea_pte_match(&pvo->pvo_pte.pte, sr, addr,
 2344                     pvo->pvo_pte.pte.pte_hi & PTE_HID)) {
 2345                         /*
 2346                          * Now found an entry to be spilled into the pteg.
 2347                          * The PTE is now valid, so we know it's active.
 2348                          */
 2349                         j = moea_pte_insert(ptegidx, &pvo->pvo_pte.pte);
 2350 
 2351                         if (j >= 0) {
 2352                                 PVO_PTEGIDX_SET(pvo, j);
 2353                                 moea_pte_overflow--;
 2354                                 mtx_unlock(&moea_table_mutex);
 2355                                 return (1);
 2356                         }
 2357 
 2358                         source_pvo = pvo;
 2359 
 2360                         if (victim_pvo != NULL)
 2361                                 break;
 2362                 }
 2363 
 2364                 /*
 2365                  * We also need the pvo entry of the victim we are replacing
 2366                  * so save the R & C bits of the PTE.
 2367                  */
 2368                 if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL &&
 2369                     moea_pte_compare(pt, &pvo->pvo_pte.pte)) {
 2370                         victim_pvo = pvo;
 2371                         if (source_pvo != NULL)
 2372                                 break;
 2373                 }
 2374         }
 2375 
 2376         if (source_pvo == NULL) {
 2377                 mtx_unlock(&moea_table_mutex);
 2378                 return (0);
 2379         }
 2380 
 2381         if (victim_pvo == NULL) {
 2382                 if ((pt->pte_hi & PTE_HID) == 0)
 2383                         panic("moea_pte_spill: victim p-pte (%p) has no pvo"
 2384                             "entry", pt);
 2385 
 2386                 /*
 2387                  * If this is a secondary PTE, we need to search it's primary
 2388                  * pvo bucket for the matching PVO.
 2389                  */
 2390                 LIST_FOREACH(pvo, &moea_pvo_table[ptegidx ^ moea_pteg_mask],
 2391                     pvo_olink) {
 2392                         /*
 2393                          * We also need the pvo entry of the victim we are
 2394                          * replacing so save the R & C bits of the PTE.
 2395                          */
 2396                         if (moea_pte_compare(pt, &pvo->pvo_pte.pte)) {
 2397                                 victim_pvo = pvo;
 2398                                 break;
 2399                         }
 2400                 }
 2401 
 2402                 if (victim_pvo == NULL)
 2403                         panic("moea_pte_spill: victim s-pte (%p) has no pvo"
 2404                             "entry", pt);
 2405         }
 2406 
 2407         /*
 2408          * We are invalidating the TLB entry for the EA we are replacing even
 2409          * though it's valid.  If we don't, we lose any ref/chg bit changes
 2410          * contained in the TLB entry.
 2411          */
 2412         source_pvo->pvo_pte.pte.pte_hi &= ~PTE_HID;
 2413 
 2414         moea_pte_unset(pt, &victim_pvo->pvo_pte.pte, victim_pvo->pvo_vaddr);
 2415         moea_pte_set(pt, &source_pvo->pvo_pte.pte);
 2416 
 2417         PVO_PTEGIDX_CLR(victim_pvo);
 2418         PVO_PTEGIDX_SET(source_pvo, i);
 2419         moea_pte_replacements++;
 2420 
 2421         mtx_unlock(&moea_table_mutex);
 2422         return (1);
 2423 }
 2424 
 2425 static __inline struct pvo_entry *
 2426 moea_pte_spillable_ident(u_int ptegidx)
 2427 {
 2428         struct  pte *pt;
 2429         struct  pvo_entry *pvo_walk, *pvo = NULL;
 2430 
 2431         LIST_FOREACH(pvo_walk, &moea_pvo_table[ptegidx], pvo_olink) {
 2432                 if (pvo_walk->pvo_vaddr & PVO_WIRED)
 2433                         continue;
 2434 
 2435                 if (!(pvo_walk->pvo_pte.pte.pte_hi & PTE_VALID))
 2436                         continue;
 2437 
 2438                 pt = moea_pvo_to_pte(pvo_walk, -1);
 2439 
 2440                 if (pt == NULL)
 2441                         continue;
 2442 
 2443                 pvo = pvo_walk;
 2444 
 2445                 mtx_unlock(&moea_table_mutex);
 2446                 if (!(pt->pte_lo & PTE_REF))
 2447                         return (pvo_walk);
 2448         }
 2449 
 2450         return (pvo);
 2451 }
 2452 
 2453 static int
 2454 moea_pte_insert(u_int ptegidx, struct pte *pvo_pt)
 2455 {
 2456         struct  pte *pt;
 2457         struct  pvo_entry *victim_pvo;
 2458         int     i;
 2459         int     victim_idx;
 2460         u_int   pteg_bkpidx = ptegidx;
 2461 
 2462         mtx_assert(&moea_table_mutex, MA_OWNED);
 2463 
 2464         /*
 2465          * First try primary hash.
 2466          */
 2467         for (pt = moea_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
 2468                 if ((pt->pte_hi & PTE_VALID) == 0) {
 2469                         pvo_pt->pte_hi &= ~PTE_HID;
 2470                         moea_pte_set(pt, pvo_pt);
 2471                         return (i);
 2472                 }
 2473         }
 2474 
 2475         /*
 2476          * Now try secondary hash.
 2477          */
 2478         ptegidx ^= moea_pteg_mask;
 2479 
 2480         for (pt = moea_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
 2481                 if ((pt->pte_hi & PTE_VALID) == 0) {
 2482                         pvo_pt->pte_hi |= PTE_HID;
 2483                         moea_pte_set(pt, pvo_pt);
 2484                         return (i);
 2485                 }
 2486         }
 2487 
 2488         /* Try again, but this time try to force a PTE out. */
 2489         ptegidx = pteg_bkpidx;
 2490 
 2491         victim_pvo = moea_pte_spillable_ident(ptegidx);
 2492         if (victim_pvo == NULL) {
 2493                 ptegidx ^= moea_pteg_mask;
 2494                 victim_pvo = moea_pte_spillable_ident(ptegidx);
 2495         }
 2496 
 2497         if (victim_pvo == NULL) {
 2498                 panic("moea_pte_insert: overflow");
 2499                 return (-1);
 2500         }
 2501 
 2502         victim_idx = moea_pvo_pte_index(victim_pvo, ptegidx);
 2503 
 2504         if (pteg_bkpidx == ptegidx)
 2505                 pvo_pt->pte_hi &= ~PTE_HID;
 2506         else
 2507                 pvo_pt->pte_hi |= PTE_HID;
 2508 
 2509         /*
 2510          * Synchronize the sacrifice PTE with its PVO, then mark both
 2511          * invalid. The PVO will be reused when/if the VM system comes
 2512          * here after a fault.
 2513          */
 2514         pt = &moea_pteg_table[victim_idx >> 3].pt[victim_idx & 7];
 2515 
 2516         if (pt->pte_hi != victim_pvo->pvo_pte.pte.pte_hi)
 2517             panic("Victim PVO doesn't match PTE! PVO: %8x, PTE: %8x", victim_pvo->pvo_pte.pte.pte_hi, pt->pte_hi);
 2518 
 2519         /*
 2520          * Set the new PTE.
 2521          */
 2522         moea_pte_unset(pt, &victim_pvo->pvo_pte.pte, victim_pvo->pvo_vaddr);
 2523         PVO_PTEGIDX_CLR(victim_pvo);
 2524         moea_pte_overflow++;
 2525         moea_pte_set(pt, pvo_pt);
 2526 
 2527         return (victim_idx & 7);
 2528 }
 2529 
 2530 static boolean_t
 2531 moea_query_bit(vm_page_t m, int ptebit)
 2532 {
 2533         struct  pvo_entry *pvo;
 2534         struct  pte *pt;
 2535 
 2536         rw_assert(&pvh_global_lock, RA_WLOCKED);
 2537         if (moea_attr_fetch(m) & ptebit)
 2538                 return (TRUE);
 2539 
 2540         LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
 2541                 /*
 2542                  * See if we saved the bit off.  If so, cache it and return
 2543                  * success.
 2544                  */
 2545                 if (pvo->pvo_pte.pte.pte_lo & ptebit) {
 2546                         moea_attr_save(m, ptebit);
 2547                         return (TRUE);
 2548                 }
 2549         }
 2550 
 2551         /*
 2552          * No luck, now go through the hard part of looking at the PTEs
 2553          * themselves.  Sync so that any pending REF/CHG bits are flushed to
 2554          * the PTEs.
 2555          */
 2556         powerpc_sync();
 2557         LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
 2558                 /*
 2559                  * See if this pvo has a valid PTE.  if so, fetch the
 2560                  * REF/CHG bits from the valid PTE.  If the appropriate
 2561                  * ptebit is set, cache it and return success.
 2562                  */
 2563                 pt = moea_pvo_to_pte(pvo, -1);
 2564                 if (pt != NULL) {
 2565                         moea_pte_synch(pt, &pvo->pvo_pte.pte);
 2566                         mtx_unlock(&moea_table_mutex);
 2567                         if (pvo->pvo_pte.pte.pte_lo & ptebit) {
 2568                                 moea_attr_save(m, ptebit);
 2569                                 return (TRUE);
 2570                         }
 2571                 }
 2572         }
 2573 
 2574         return (FALSE);
 2575 }
 2576 
 2577 static u_int
 2578 moea_clear_bit(vm_page_t m, int ptebit)
 2579 {
 2580         u_int   count;
 2581         struct  pvo_entry *pvo;
 2582         struct  pte *pt;
 2583 
 2584         rw_assert(&pvh_global_lock, RA_WLOCKED);
 2585 
 2586         /*
 2587          * Clear the cached value.
 2588          */
 2589         moea_attr_clear(m, ptebit);
 2590 
 2591         /*
 2592          * Sync so that any pending REF/CHG bits are flushed to the PTEs (so
 2593          * we can reset the right ones).  note that since the pvo entries and
 2594          * list heads are accessed via BAT0 and are never placed in the page
 2595          * table, we don't have to worry about further accesses setting the
 2596          * REF/CHG bits.
 2597          */
 2598         powerpc_sync();
 2599 
 2600         /*
 2601          * For each pvo entry, clear the pvo's ptebit.  If this pvo has a
 2602          * valid pte clear the ptebit from the valid pte.
 2603          */
 2604         count = 0;
 2605         LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
 2606                 pt = moea_pvo_to_pte(pvo, -1);
 2607                 if (pt != NULL) {
 2608                         moea_pte_synch(pt, &pvo->pvo_pte.pte);
 2609                         if (pvo->pvo_pte.pte.pte_lo & ptebit) {
 2610                                 count++;
 2611                                 moea_pte_clear(pt, PVO_VADDR(pvo), ptebit);
 2612                         }
 2613                         mtx_unlock(&moea_table_mutex);
 2614                 }
 2615                 pvo->pvo_pte.pte.pte_lo &= ~ptebit;
 2616         }
 2617 
 2618         return (count);
 2619 }
 2620 
 2621 /*
 2622  * Return true if the physical range is encompassed by the battable[idx]
 2623  */
 2624 static int
 2625 moea_bat_mapped(int idx, vm_paddr_t pa, vm_size_t size)
 2626 {
 2627         u_int prot;
 2628         u_int32_t start;
 2629         u_int32_t end;
 2630         u_int32_t bat_ble;
 2631 
 2632         /*
 2633          * Return immediately if not a valid mapping
 2634          */
 2635         if (!(battable[idx].batu & BAT_Vs))
 2636                 return (EINVAL);
 2637 
 2638         /*
 2639          * The BAT entry must be cache-inhibited, guarded, and r/w
 2640          * so it can function as an i/o page
 2641          */
 2642         prot = battable[idx].batl & (BAT_I|BAT_G|BAT_PP_RW);
 2643         if (prot != (BAT_I|BAT_G|BAT_PP_RW))
 2644                 return (EPERM);
 2645 
 2646         /*
 2647          * The address should be within the BAT range. Assume that the
 2648          * start address in the BAT has the correct alignment (thus
 2649          * not requiring masking)
 2650          */
 2651         start = battable[idx].batl & BAT_PBS;
 2652         bat_ble = (battable[idx].batu & ~(BAT_EBS)) | 0x03;
 2653         end = start | (bat_ble << 15) | 0x7fff;
 2654 
 2655         if ((pa < start) || ((pa + size) > end))
 2656                 return (ERANGE);
 2657 
 2658         return (0);
 2659 }
 2660 
 2661 boolean_t
 2662 moea_dev_direct_mapped(vm_paddr_t pa, vm_size_t size)
 2663 {
 2664         int i;
 2665 
 2666         /*
 2667          * This currently does not work for entries that
 2668          * overlap 256M BAT segments.
 2669          */
 2670 
 2671         for(i = 0; i < 16; i++)
 2672                 if (moea_bat_mapped(i, pa, size) == 0)
 2673                         return (0);
 2674 
 2675         return (EFAULT);
 2676 }
 2677 
 2678 /*
 2679  * Map a set of physical memory pages into the kernel virtual
 2680  * address space. Return a pointer to where it is mapped. This
 2681  * routine is intended to be used for mapping device memory,
 2682  * NOT real memory.
 2683  */
 2684 void *
 2685 moea_mapdev(vm_paddr_t pa, vm_size_t size)
 2686 {
 2687 
 2688         return (moea_mapdev_attr(pa, size, VM_MEMATTR_DEFAULT));
 2689 }
 2690 
 2691 void *
 2692 moea_mapdev_attr(vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
 2693 {
 2694         vm_offset_t va, tmpva, ppa, offset;
 2695         int i;
 2696 
 2697         ppa = trunc_page(pa);
 2698         offset = pa & PAGE_MASK;
 2699         size = roundup(offset + size, PAGE_SIZE);
 2700 
 2701         /*
 2702          * If the physical address lies within a valid BAT table entry,
 2703          * return the 1:1 mapping. This currently doesn't work
 2704          * for regions that overlap 256M BAT segments.
 2705          */
 2706         for (i = 0; i < 16; i++) {
 2707                 if (moea_bat_mapped(i, pa, size) == 0)
 2708                         return ((void *) pa);
 2709         }
 2710 
 2711         va = kva_alloc(size);
 2712         if (!va)
 2713                 panic("moea_mapdev: Couldn't alloc kernel virtual memory");
 2714 
 2715         for (tmpva = va; size > 0;) {
 2716                 moea_kenter_attr(tmpva, ppa, ma);
 2717                 tlbie(tmpva);
 2718                 size -= PAGE_SIZE;
 2719                 tmpva += PAGE_SIZE;
 2720                 ppa += PAGE_SIZE;
 2721         }
 2722 
 2723         return ((void *)(va + offset));
 2724 }
 2725 
 2726 void
 2727 moea_unmapdev(void *p, vm_size_t size)
 2728 {
 2729         vm_offset_t base, offset, va;
 2730 
 2731         /*
 2732          * If this is outside kernel virtual space, then it's a
 2733          * battable entry and doesn't require unmapping
 2734          */
 2735         va = (vm_offset_t)p;
 2736         if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= virtual_end)) {
 2737                 base = trunc_page(va);
 2738                 offset = va & PAGE_MASK;
 2739                 size = roundup(offset + size, PAGE_SIZE);
 2740                 moea_qremove(base, atop(size));
 2741                 kva_free(base, size);
 2742         }
 2743 }
 2744 
 2745 static void
 2746 moea_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
 2747 {
 2748         struct pvo_entry *pvo;
 2749         vm_offset_t lim;
 2750         vm_paddr_t pa;
 2751         vm_size_t len;
 2752 
 2753         PMAP_LOCK(pm);
 2754         while (sz > 0) {
 2755                 lim = round_page(va + 1);
 2756                 len = MIN(lim - va, sz);
 2757                 pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
 2758                 if (pvo != NULL) {
 2759                         pa = PVO_PADDR(pvo) | (va & ADDR_POFF);
 2760                         moea_syncicache(pa, len);
 2761                 }
 2762                 va += len;
 2763                 sz -= len;
 2764         }
 2765         PMAP_UNLOCK(pm);
 2766 }
 2767 
 2768 void
 2769 moea_dumpsys_map(vm_paddr_t pa, size_t sz, void **va)
 2770 {
 2771 
 2772         *va = (void *)pa;
 2773 }
 2774 
 2775 extern struct dump_pa dump_map[PHYS_AVAIL_SZ + 1];
 2776 
 2777 void
 2778 moea_scan_init()
 2779 {
 2780         struct pvo_entry *pvo;
 2781         vm_offset_t va;
 2782         int i;
 2783 
 2784         if (!do_minidump) {
 2785                 /* Initialize phys. segments for dumpsys(). */
 2786                 memset(&dump_map, 0, sizeof(dump_map));
 2787                 mem_regions(&pregions, &pregions_sz, &regions, &regions_sz);
 2788                 for (i = 0; i < pregions_sz; i++) {
 2789                         dump_map[i].pa_start = pregions[i].mr_start;
 2790                         dump_map[i].pa_size = pregions[i].mr_size;
 2791                 }
 2792                 return;
 2793         }
 2794 
 2795         /* Virtual segments for minidumps: */
 2796         memset(&dump_map, 0, sizeof(dump_map));
 2797 
 2798         /* 1st: kernel .data and .bss. */
 2799         dump_map[0].pa_start = trunc_page((uintptr_t)_etext);
 2800         dump_map[0].pa_size =
 2801             round_page((uintptr_t)_end) - dump_map[0].pa_start;
 2802 
 2803         /* 2nd: msgbuf and tables (see pmap_bootstrap()). */
 2804         dump_map[1].pa_start = (vm_paddr_t)msgbufp->msg_ptr;
 2805         dump_map[1].pa_size = round_page(msgbufp->msg_size);
 2806 
 2807         /* 3rd: kernel VM. */
 2808         va = dump_map[1].pa_start + dump_map[1].pa_size;
 2809         /* Find start of next chunk (from va). */
 2810         while (va < virtual_end) {
 2811                 /* Don't dump the buffer cache. */
 2812                 if (va >= kmi.buffer_sva && va < kmi.buffer_eva) {
 2813                         va = kmi.buffer_eva;
 2814                         continue;
 2815                 }
 2816                 pvo = moea_pvo_find_va(kernel_pmap, va & ~ADDR_POFF, NULL);
 2817                 if (pvo != NULL && (pvo->pvo_pte.pte.pte_hi & PTE_VALID))
 2818                         break;
 2819                 va += PAGE_SIZE;
 2820         }
 2821         if (va < virtual_end) {
 2822                 dump_map[2].pa_start = va;
 2823                 va += PAGE_SIZE;
 2824                 /* Find last page in chunk. */
 2825                 while (va < virtual_end) {
 2826                         /* Don't run into the buffer cache. */
 2827                         if (va == kmi.buffer_sva)
 2828                                 break;
 2829                         pvo = moea_pvo_find_va(kernel_pmap, va & ~ADDR_POFF,
 2830                             NULL);
 2831                         if (pvo == NULL ||
 2832                             !(pvo->pvo_pte.pte.pte_hi & PTE_VALID))
 2833                                 break;
 2834                         va += PAGE_SIZE;
 2835                 }
 2836                 dump_map[2].pa_size = va - dump_map[2].pa_start;
 2837         }
 2838 }

Cache object: 69823ce07756d1af4e9c2702c5369fc0


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.