The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/sparc64/sparc64/pmap.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 1991 Regents of the University of California.
    3  * All rights reserved.
    4  * Copyright (c) 1994 John S. Dyson
    5  * All rights reserved.
    6  * Copyright (c) 1994 David Greenman
    7  * All rights reserved.
    8  *
    9  * This code is derived from software contributed to Berkeley by
   10  * the Systems Programming Group of the University of Utah Computer
   11  * Science Department and William Jolitz of UUNET Technologies Inc.
   12  *
   13  * Redistribution and use in source and binary forms, with or without
   14  * modification, are permitted provided that the following conditions
   15  * are met:
   16  * 1. Redistributions of source code must retain the above copyright
   17  *    notice, this list of conditions and the following disclaimer.
   18  * 2. Redistributions in binary form must reproduce the above copyright
   19  *    notice, this list of conditions and the following disclaimer in the
   20  *    documentation and/or other materials provided with the distribution.
   21  * 3. All advertising materials mentioning features or use of this software
   22  *    must display the following acknowledgement:
   23  *      This product includes software developed by the University of
   24  *      California, Berkeley and its contributors.
   25  * 4. Neither the name of the University nor the names of its contributors
   26  *    may be used to endorse or promote products derived from this software
   27  *    without specific prior written permission.
   28  *
   29  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   39  * SUCH DAMAGE.
   40  *
   41  *      from:   @(#)pmap.c      7.7 (Berkeley)  5/12/91
   42  * $FreeBSD: releng/5.2/sys/sparc64/sparc64/pmap.c 120722 2003-10-03 22:46:53Z alc $
   43  */
   44 
   45 /*
   46  * Manages physical address maps.
   47  *
   48  * In addition to hardware address maps, this module is called upon to
   49  * provide software-use-only maps which may or may not be stored in the
   50  * same form as hardware maps.  These pseudo-maps are used to store
   51  * intermediate results from copy operations to and from address spaces.
   52  *
   53  * Since the information managed by this module is also stored by the
   54  * logical address mapping module, this module may throw away valid virtual
   55  * to physical mappings at almost any time.  However, invalidations of
   56  * mappings must be done as requested.
   57  *
   58  * In order to cope with hardware architectures which make virtual to
   59  * physical map invalidates expensive, this module may delay invalidate
   60  * reduced protection operations until such time as they are actually
   61  * necessary.  This module is given full information as to which processors
   62  * are currently using which maps, and to when physical maps must be made
   63  * correct.
   64  */
   65 
   66 #include "opt_kstack_pages.h"
   67 #include "opt_msgbuf.h"
   68 #include "opt_pmap.h"
   69 
   70 #include <sys/param.h>
   71 #include <sys/kernel.h>
   72 #include <sys/ktr.h>
   73 #include <sys/lock.h>
   74 #include <sys/msgbuf.h>
   75 #include <sys/mutex.h>
   76 #include <sys/proc.h>
   77 #include <sys/smp.h>
   78 #include <sys/sysctl.h>
   79 #include <sys/systm.h>
   80 #include <sys/vmmeter.h>
   81 
   82 #include <dev/ofw/openfirm.h>
   83 
   84 #include <vm/vm.h> 
   85 #include <vm/vm_param.h>
   86 #include <vm/vm_kern.h>
   87 #include <vm/vm_page.h>
   88 #include <vm/vm_map.h>
   89 #include <vm/vm_object.h>
   90 #include <vm/vm_extern.h>
   91 #include <vm/vm_pageout.h>
   92 #include <vm/vm_pager.h>
   93 
   94 #include <machine/cache.h>
   95 #include <machine/frame.h>
   96 #include <machine/instr.h>
   97 #include <machine/md_var.h>
   98 #include <machine/metadata.h>
   99 #include <machine/ofw_mem.h>
  100 #include <machine/smp.h>
  101 #include <machine/tlb.h>
  102 #include <machine/tte.h>
  103 #include <machine/tsb.h>
  104 
  105 #define PMAP_DEBUG
  106 
  107 #ifndef PMAP_SHPGPERPROC
  108 #define PMAP_SHPGPERPROC        200
  109 #endif
  110 
  111 /*
  112  * Virtual and physical address of message buffer.
  113  */
  114 struct msgbuf *msgbufp;
  115 vm_paddr_t msgbuf_phys;
  116 
  117 /*
  118  * Physical addresses of first and last available physical page.
  119  */
  120 vm_paddr_t avail_start;
  121 vm_paddr_t avail_end;
  122 
  123 int pmap_pagedaemon_waken;
  124 
  125 /*
  126  * Map of physical memory reagions.
  127  */
  128 vm_paddr_t phys_avail[128];
  129 static struct ofw_mem_region mra[128];
  130 struct ofw_mem_region sparc64_memreg[128];
  131 int sparc64_nmemreg;
  132 static struct ofw_map translations[128];
  133 static int translations_size;
  134 
  135 static vm_offset_t pmap_idle_map;
  136 static vm_offset_t pmap_temp_map_1;
  137 static vm_offset_t pmap_temp_map_2;
  138 
  139 /*
  140  * First and last available kernel virtual addresses.
  141  */
  142 vm_offset_t virtual_avail;
  143 vm_offset_t virtual_end;
  144 vm_offset_t kernel_vm_end;
  145 
  146 vm_offset_t vm_max_kernel_address;
  147 
  148 /*
  149  * Kernel pmap.
  150  */
  151 struct pmap kernel_pmap_store;
  152 
  153 /*
  154  * Allocate physical memory for use in pmap_bootstrap.
  155  */
  156 static vm_paddr_t pmap_bootstrap_alloc(vm_size_t size);
  157 
  158 extern int tl1_immu_miss_patch_1[];
  159 extern int tl1_immu_miss_patch_2[];
  160 extern int tl1_dmmu_miss_patch_1[];
  161 extern int tl1_dmmu_miss_patch_2[];
  162 extern int tl1_dmmu_prot_patch_1[];
  163 extern int tl1_dmmu_prot_patch_2[];
  164 
  165 /*
  166  * If user pmap is processed with pmap_remove and with pmap_remove and the
  167  * resident count drops to 0, there are no more pages to remove, so we
  168  * need not continue.
  169  */
  170 #define PMAP_REMOVE_DONE(pm) \
  171         ((pm) != kernel_pmap && (pm)->pm_stats.resident_count == 0)
  172 
  173 /*
  174  * The threshold (in bytes) above which tsb_foreach() is used in pmap_remove()
  175  * and pmap_protect() instead of trying each virtual address.
  176  */
  177 #define PMAP_TSB_THRESH ((TSB_SIZE / 2) * PAGE_SIZE)
  178 
  179 SYSCTL_NODE(_debug, OID_AUTO, pmap_stats, CTLFLAG_RD, 0, "");
  180 
  181 PMAP_STATS_VAR(pmap_nenter);
  182 PMAP_STATS_VAR(pmap_nenter_update);
  183 PMAP_STATS_VAR(pmap_nenter_replace);
  184 PMAP_STATS_VAR(pmap_nenter_new);
  185 PMAP_STATS_VAR(pmap_nkenter);
  186 PMAP_STATS_VAR(pmap_nkenter_oc);
  187 PMAP_STATS_VAR(pmap_nkenter_stupid);
  188 PMAP_STATS_VAR(pmap_nkremove);
  189 PMAP_STATS_VAR(pmap_nqenter);
  190 PMAP_STATS_VAR(pmap_nqremove);
  191 PMAP_STATS_VAR(pmap_ncache_enter);
  192 PMAP_STATS_VAR(pmap_ncache_enter_c);
  193 PMAP_STATS_VAR(pmap_ncache_enter_oc);
  194 PMAP_STATS_VAR(pmap_ncache_enter_cc);
  195 PMAP_STATS_VAR(pmap_ncache_enter_coc);
  196 PMAP_STATS_VAR(pmap_ncache_enter_nc);
  197 PMAP_STATS_VAR(pmap_ncache_enter_cnc);
  198 PMAP_STATS_VAR(pmap_ncache_remove);
  199 PMAP_STATS_VAR(pmap_ncache_remove_c);
  200 PMAP_STATS_VAR(pmap_ncache_remove_oc);
  201 PMAP_STATS_VAR(pmap_ncache_remove_cc);
  202 PMAP_STATS_VAR(pmap_ncache_remove_coc);
  203 PMAP_STATS_VAR(pmap_ncache_remove_nc);
  204 PMAP_STATS_VAR(pmap_nzero_page);
  205 PMAP_STATS_VAR(pmap_nzero_page_c);
  206 PMAP_STATS_VAR(pmap_nzero_page_oc);
  207 PMAP_STATS_VAR(pmap_nzero_page_nc);
  208 PMAP_STATS_VAR(pmap_nzero_page_area);
  209 PMAP_STATS_VAR(pmap_nzero_page_area_c);
  210 PMAP_STATS_VAR(pmap_nzero_page_area_oc);
  211 PMAP_STATS_VAR(pmap_nzero_page_area_nc);
  212 PMAP_STATS_VAR(pmap_nzero_page_idle);
  213 PMAP_STATS_VAR(pmap_nzero_page_idle_c);
  214 PMAP_STATS_VAR(pmap_nzero_page_idle_oc);
  215 PMAP_STATS_VAR(pmap_nzero_page_idle_nc);
  216 PMAP_STATS_VAR(pmap_ncopy_page);
  217 PMAP_STATS_VAR(pmap_ncopy_page_c);
  218 PMAP_STATS_VAR(pmap_ncopy_page_oc);
  219 PMAP_STATS_VAR(pmap_ncopy_page_nc);
  220 PMAP_STATS_VAR(pmap_ncopy_page_dc);
  221 PMAP_STATS_VAR(pmap_ncopy_page_doc);
  222 PMAP_STATS_VAR(pmap_ncopy_page_sc);
  223 PMAP_STATS_VAR(pmap_ncopy_page_soc);
  224 
  225 PMAP_STATS_VAR(pmap_nnew_thread);
  226 PMAP_STATS_VAR(pmap_nnew_thread_oc);
  227 
  228 /*
  229  * Quick sort callout for comparing memory regions.
  230  */
  231 static int mr_cmp(const void *a, const void *b);
  232 static int om_cmp(const void *a, const void *b);
  233 static int
  234 mr_cmp(const void *a, const void *b)
  235 {
  236         const struct ofw_mem_region *mra;
  237         const struct ofw_mem_region *mrb;
  238 
  239         mra = a;
  240         mrb = b;
  241         if (mra->mr_start < mrb->mr_start)
  242                 return (-1);
  243         else if (mra->mr_start > mrb->mr_start)
  244                 return (1);
  245         else
  246                 return (0);
  247 }
  248 static int
  249 om_cmp(const void *a, const void *b)
  250 {
  251         const struct ofw_map *oma;
  252         const struct ofw_map *omb;
  253 
  254         oma = a;
  255         omb = b;
  256         if (oma->om_start < omb->om_start)
  257                 return (-1);
  258         else if (oma->om_start > omb->om_start)
  259                 return (1);
  260         else
  261                 return (0);
  262 }
  263 
  264 /*
  265  * Bootstrap the system enough to run with virtual memory.
  266  */
  267 void
  268 pmap_bootstrap(vm_offset_t ekva)
  269 {
  270         struct pmap *pm;
  271         struct tte *tp;
  272         vm_offset_t off;
  273         vm_offset_t va;
  274         vm_paddr_t pa;
  275         vm_size_t physsz;
  276         vm_size_t virtsz;
  277         ihandle_t pmem;
  278         ihandle_t vmem;
  279         int sz;
  280         int i;
  281         int j;
  282 
  283         /*
  284          * Find out what physical memory is available from the prom and
  285          * initialize the phys_avail array.  This must be done before
  286          * pmap_bootstrap_alloc is called.
  287          */
  288         if ((pmem = OF_finddevice("/memory")) == -1)
  289                 panic("pmap_bootstrap: finddevice /memory");
  290         if ((sz = OF_getproplen(pmem, "available")) == -1)
  291                 panic("pmap_bootstrap: getproplen /memory/available");
  292         if (sizeof(phys_avail) < sz)
  293                 panic("pmap_bootstrap: phys_avail too small");
  294         if (sizeof(mra) < sz)
  295                 panic("pmap_bootstrap: mra too small");
  296         bzero(mra, sz);
  297         if (OF_getprop(pmem, "available", mra, sz) == -1)
  298                 panic("pmap_bootstrap: getprop /memory/available");
  299         sz /= sizeof(*mra);
  300         CTR0(KTR_PMAP, "pmap_bootstrap: physical memory");
  301         qsort(mra, sz, sizeof (*mra), mr_cmp);
  302         physsz = 0;
  303         getenv_quad("hw.physmem", &physmem);
  304         for (i = 0, j = 0; i < sz; i++, j += 2) {
  305                 CTR2(KTR_PMAP, "start=%#lx size=%#lx", mra[i].mr_start,
  306                     mra[i].mr_size);
  307                 if (physmem != 0 && btoc(physsz + mra[i].mr_size) >= physmem) {
  308                         if (btoc(physsz) < physmem) {
  309                                 phys_avail[j] = mra[i].mr_start;
  310                                 phys_avail[j + 1] = mra[i].mr_start +
  311                                     (ctob(physmem) - physsz);
  312                                 physsz = ctob(physmem);
  313                         }
  314                         break;
  315                 }
  316                 phys_avail[j] = mra[i].mr_start;
  317                 phys_avail[j + 1] = mra[i].mr_start + mra[i].mr_size;
  318                 physsz += mra[i].mr_size;
  319         }
  320         physmem = btoc(physsz);
  321 
  322         /*
  323          * Calculate the size of kernel virtual memory, and the size and mask
  324          * for the kernel tsb.
  325          */
  326         virtsz = roundup(physsz, PAGE_SIZE_4M << (PAGE_SHIFT - TTE_SHIFT));
  327         vm_max_kernel_address = VM_MIN_KERNEL_ADDRESS + virtsz;
  328         tsb_kernel_size = virtsz >> (PAGE_SHIFT - TTE_SHIFT);
  329         tsb_kernel_mask = (tsb_kernel_size >> TTE_SHIFT) - 1;
  330 
  331         /*
  332          * Allocate the kernel tsb and lock it in the tlb.
  333          */
  334         pa = pmap_bootstrap_alloc(tsb_kernel_size);
  335         if (pa & PAGE_MASK_4M)
  336                 panic("pmap_bootstrap: tsb unaligned\n");
  337         tsb_kernel_phys = pa;
  338         tsb_kernel = (struct tte *)(VM_MIN_KERNEL_ADDRESS - tsb_kernel_size);
  339         pmap_map_tsb();
  340         bzero(tsb_kernel, tsb_kernel_size);
  341 
  342         /*
  343          * Allocate and map the message buffer.
  344          */
  345         msgbuf_phys = pmap_bootstrap_alloc(MSGBUF_SIZE);
  346         msgbufp = (struct msgbuf *)TLB_PHYS_TO_DIRECT(msgbuf_phys);
  347 
  348         /*
  349          * Patch the virtual address and the tsb mask into the trap table.
  350          */
  351 
  352 #define SETHI(rd, imm22) \
  353         (EIF_OP(IOP_FORM2) | EIF_F2_RD(rd) | EIF_F2_OP2(INS0_SETHI) | \
  354             EIF_IMM((imm22) >> 10, 22))
  355 #define OR_R_I_R(rd, imm13, rs1) \
  356         (EIF_OP(IOP_MISC) | EIF_F3_RD(rd) | EIF_F3_OP3(INS2_OR) | \
  357             EIF_F3_RS1(rs1) | EIF_F3_I(1) | EIF_IMM(imm13, 13))
  358 
  359 #define PATCH(addr) do { \
  360         if (addr[0] != SETHI(IF_F2_RD(addr[0]), 0x0) || \
  361             addr[1] != OR_R_I_R(IF_F3_RD(addr[1]), 0x0, IF_F3_RS1(addr[1])) || \
  362             addr[2] != SETHI(IF_F2_RD(addr[2]), 0x0)) \
  363                 panic("pmap_boostrap: patched instructions have changed"); \
  364         addr[0] |= EIF_IMM((tsb_kernel_mask) >> 10, 22); \
  365         addr[1] |= EIF_IMM(tsb_kernel_mask, 10); \
  366         addr[2] |= EIF_IMM(((vm_offset_t)tsb_kernel) >> 10, 22); \
  367         flush(addr); \
  368         flush(addr + 1); \
  369         flush(addr + 2); \
  370 } while (0)
  371 
  372         PATCH(tl1_immu_miss_patch_1);
  373         PATCH(tl1_immu_miss_patch_2);
  374         PATCH(tl1_dmmu_miss_patch_1);
  375         PATCH(tl1_dmmu_miss_patch_2);
  376         PATCH(tl1_dmmu_prot_patch_1);
  377         PATCH(tl1_dmmu_prot_patch_2);
  378         
  379         /*
  380          * Enter fake 8k pages for the 4MB kernel pages, so that
  381          * pmap_kextract() will work for them.
  382          */
  383         for (i = 0; i < kernel_tlb_slots; i++) {
  384                 pa = kernel_tlbs[i].te_pa;
  385                 va = kernel_tlbs[i].te_va;
  386                 for (off = 0; off < PAGE_SIZE_4M; off += PAGE_SIZE) {
  387                         tp = tsb_kvtotte(va + off);
  388                         tp->tte_vpn = TV_VPN(va + off, TS_8K);
  389                         tp->tte_data = TD_V | TD_8K | TD_PA(pa + off) |
  390                             TD_REF | TD_SW | TD_CP | TD_CV | TD_P | TD_W;
  391                 }
  392         }
  393 
  394         /*
  395          * Set the start and end of kva.  The kernel is loaded at the first
  396          * available 4 meg super page, so round up to the end of the page.
  397          */
  398         virtual_avail = roundup2(ekva, PAGE_SIZE_4M);
  399         virtual_end = vm_max_kernel_address;
  400         kernel_vm_end = vm_max_kernel_address;
  401 
  402         /*
  403          * Allocate kva space for temporary mappings.
  404          */
  405         pmap_idle_map = virtual_avail;
  406         virtual_avail += PAGE_SIZE * DCACHE_COLORS;
  407         pmap_temp_map_1 = virtual_avail;
  408         virtual_avail += PAGE_SIZE * DCACHE_COLORS;
  409         pmap_temp_map_2 = virtual_avail;
  410         virtual_avail += PAGE_SIZE * DCACHE_COLORS;
  411 
  412         /*
  413          * Allocate a kernel stack with guard page for thread0 and map it into
  414          * the kernel tsb.  We must ensure that the virtual address is coloured
  415          * properly, since we're allocating from phys_avail so the memory won't
  416          * have an associated vm_page_t.
  417          */
  418         pa = pmap_bootstrap_alloc(roundup(KSTACK_PAGES, DCACHE_COLORS) *
  419             PAGE_SIZE);
  420         kstack0_phys = pa;
  421         virtual_avail += roundup(KSTACK_GUARD_PAGES, DCACHE_COLORS) *
  422             PAGE_SIZE;
  423         kstack0 = virtual_avail;
  424         virtual_avail += roundup(KSTACK_PAGES, DCACHE_COLORS) * PAGE_SIZE;
  425         KASSERT(DCACHE_COLOR(kstack0) == DCACHE_COLOR(kstack0_phys),
  426             ("pmap_bootstrap: kstack0 miscoloured"));
  427         for (i = 0; i < KSTACK_PAGES; i++) {
  428                 pa = kstack0_phys + i * PAGE_SIZE;
  429                 va = kstack0 + i * PAGE_SIZE;
  430                 tp = tsb_kvtotte(va);
  431                 tp->tte_vpn = TV_VPN(va, TS_8K);
  432                 tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_REF | TD_SW |
  433                     TD_CP | TD_CV | TD_P | TD_W;
  434         }
  435 
  436         /*
  437          * Calculate the first and last available physical addresses.
  438          */
  439         avail_start = phys_avail[0];
  440         for (i = 0; phys_avail[i + 2] != 0; i += 2)
  441                 ;
  442         avail_end = phys_avail[i + 1];
  443         Maxmem = sparc64_btop(avail_end);
  444 
  445         /*
  446          * Add the prom mappings to the kernel tsb.
  447          */
  448         if ((vmem = OF_finddevice("/virtual-memory")) == -1)
  449                 panic("pmap_bootstrap: finddevice /virtual-memory");
  450         if ((sz = OF_getproplen(vmem, "translations")) == -1)
  451                 panic("pmap_bootstrap: getproplen translations");
  452         if (sizeof(translations) < sz)
  453                 panic("pmap_bootstrap: translations too small");
  454         bzero(translations, sz);
  455         if (OF_getprop(vmem, "translations", translations, sz) == -1)
  456                 panic("pmap_bootstrap: getprop /virtual-memory/translations");
  457         sz /= sizeof(*translations);
  458         translations_size = sz;
  459         CTR0(KTR_PMAP, "pmap_bootstrap: translations");
  460         qsort(translations, sz, sizeof (*translations), om_cmp);
  461         for (i = 0; i < sz; i++) {
  462                 CTR3(KTR_PMAP,
  463                     "translation: start=%#lx size=%#lx tte=%#lx",
  464                     translations[i].om_start, translations[i].om_size,
  465                     translations[i].om_tte);
  466                 if (translations[i].om_start < VM_MIN_PROM_ADDRESS ||
  467                     translations[i].om_start > VM_MAX_PROM_ADDRESS)
  468                         continue;
  469                 for (off = 0; off < translations[i].om_size;
  470                     off += PAGE_SIZE) {
  471                         va = translations[i].om_start + off;
  472                         tp = tsb_kvtotte(va);
  473                         tp->tte_vpn = TV_VPN(va, TS_8K);
  474                         tp->tte_data =
  475                             ((translations[i].om_tte &
  476                               ~(TD_SOFT_MASK << TD_SOFT_SHIFT)) | TD_EXEC) +
  477                             off;
  478                 }
  479         }
  480 
  481         /*
  482          * Get the available physical memory ranges from /memory/reg. These
  483          * are only used for kernel dumps, but it may not be wise to do prom
  484          * calls in that situation.
  485          */
  486         if ((sz = OF_getproplen(pmem, "reg")) == -1)
  487                 panic("pmap_bootstrap: getproplen /memory/reg");
  488         if (sizeof(sparc64_memreg) < sz)
  489                 panic("pmap_bootstrap: sparc64_memreg too small");
  490         if (OF_getprop(pmem, "reg", sparc64_memreg, sz) == -1)
  491                 panic("pmap_bootstrap: getprop /memory/reg");
  492         sparc64_nmemreg = sz / sizeof(*sparc64_memreg);
  493 
  494         /*
  495          * Initialize the kernel pmap (which is statically allocated).
  496          */
  497         pm = kernel_pmap;
  498         for (i = 0; i < MAXCPU; i++)
  499                 pm->pm_context[i] = TLB_CTX_KERNEL;
  500         pm->pm_active = ~0;
  501 
  502         /* XXX flush all non-locked tlb entries */
  503 }
  504 
  505 void
  506 pmap_map_tsb(void)
  507 {
  508         vm_offset_t va;
  509         vm_paddr_t pa;
  510         u_long data;
  511         u_long s;
  512         int i;
  513 
  514         s = intr_disable();
  515 
  516         /*
  517          * Map the 4mb tsb pages.
  518          */
  519         for (i = 0; i < tsb_kernel_size; i += PAGE_SIZE_4M) {
  520                 va = (vm_offset_t)tsb_kernel + i;
  521                 pa = tsb_kernel_phys + i;
  522                 data = TD_V | TD_4M | TD_PA(pa) | TD_L | TD_CP | TD_CV |
  523                     TD_P | TD_W;
  524                 /* XXX - cheetah */
  525                 stxa(AA_DMMU_TAR, ASI_DMMU, TLB_TAR_VA(va) |
  526                     TLB_TAR_CTX(TLB_CTX_KERNEL));
  527                 stxa_sync(0, ASI_DTLB_DATA_IN_REG, data);
  528         }
  529 
  530         /*
  531          * Set the secondary context to be the kernel context (needed for
  532          * fp block operations in the kernel and the cache code).
  533          */
  534         stxa(AA_DMMU_SCXR, ASI_DMMU, TLB_CTX_KERNEL);
  535         membar(Sync);
  536 
  537         intr_restore(s);
  538 }
  539 
  540 /*
  541  * Allocate a physical page of memory directly from the phys_avail map.
  542  * Can only be called from pmap_bootstrap before avail start and end are
  543  * calculated.
  544  */
  545 static vm_paddr_t
  546 pmap_bootstrap_alloc(vm_size_t size)
  547 {
  548         vm_paddr_t pa;
  549         int i;
  550 
  551         size = round_page(size);
  552         for (i = 0; phys_avail[i + 1] != 0; i += 2) {
  553                 if (phys_avail[i + 1] - phys_avail[i] < size)
  554                         continue;
  555                 pa = phys_avail[i];
  556                 phys_avail[i] += size;
  557                 return (pa);
  558         }
  559         panic("pmap_bootstrap_alloc");
  560 }
  561 
  562 /*
  563  * Initialize the pmap module.
  564  */
  565 void
  566 pmap_init(vm_paddr_t phys_start, vm_paddr_t phys_end)
  567 {
  568         vm_offset_t addr;
  569         vm_size_t size;
  570         int result;
  571         int i;
  572 
  573         for (i = 0; i < vm_page_array_size; i++) {
  574                 vm_page_t m;
  575 
  576                 m = &vm_page_array[i];
  577                 TAILQ_INIT(&m->md.tte_list);
  578                 m->md.color = DCACHE_COLOR(VM_PAGE_TO_PHYS(m));
  579                 m->md.flags = 0;
  580                 m->md.pmap = NULL;
  581         }
  582 
  583         for (i = 0; i < translations_size; i++) {
  584                 addr = translations[i].om_start;
  585                 size = translations[i].om_size;
  586                 if (addr < VM_MIN_PROM_ADDRESS || addr > VM_MAX_PROM_ADDRESS)
  587                         continue;
  588                 result = vm_map_find(kernel_map, NULL, 0, &addr, size, FALSE,
  589                     VM_PROT_ALL, VM_PROT_ALL, 0);
  590                 if (result != KERN_SUCCESS || addr != translations[i].om_start)
  591                         panic("pmap_init: vm_map_find");
  592         }
  593 }
  594 
  595 /*
  596  * Initialize the address space (zone) for the pv_entries.  Set a
  597  * high water mark so that the system can recover from excessive
  598  * numbers of pv entries.
  599  */
  600 void
  601 pmap_init2(void)
  602 {
  603 }
  604 
  605 /*
  606  * Extract the physical page address associated with the given
  607  * map/virtual_address pair.
  608  */
  609 vm_paddr_t
  610 pmap_extract(pmap_t pm, vm_offset_t va)
  611 {
  612         struct tte *tp;
  613 
  614         if (pm == kernel_pmap)
  615                 return (pmap_kextract(va));
  616         tp = tsb_tte_lookup(pm, va);
  617         if (tp == NULL)
  618                 return (0);
  619         else
  620                 return (TTE_GET_PA(tp) | (va & TTE_GET_PAGE_MASK(tp)));
  621 }
  622 
  623 /*
  624  * Atomically extract and hold the physical page with the given
  625  * pmap and virtual address pair if that mapping permits the given
  626  * protection.
  627  */
  628 vm_page_t
  629 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
  630 {
  631         vm_paddr_t pa;
  632         vm_page_t m;
  633 
  634         m = NULL;
  635         mtx_lock(&Giant);
  636         if ((pa = pmap_extract(pmap, va)) != 0) {
  637                 m = PHYS_TO_VM_PAGE(pa);
  638                 vm_page_lock_queues();
  639                 vm_page_hold(m);
  640                 vm_page_unlock_queues();
  641         }
  642         mtx_unlock(&Giant);
  643         return (m);
  644 }
  645 
  646 /*
  647  * Extract the physical page address associated with the given kernel virtual
  648  * address.
  649  */
  650 vm_paddr_t
  651 pmap_kextract(vm_offset_t va)
  652 {
  653         struct tte *tp;
  654 
  655         if (va >= VM_MIN_DIRECT_ADDRESS)
  656                 return (TLB_DIRECT_TO_PHYS(va));
  657         tp = tsb_kvtotte(va);
  658         if ((tp->tte_data & TD_V) == 0)
  659                 return (0);
  660         return (TTE_GET_PA(tp) | (va & TTE_GET_PAGE_MASK(tp)));
  661 }
  662 
  663 int
  664 pmap_cache_enter(vm_page_t m, vm_offset_t va)
  665 {
  666         struct tte *tp;
  667         int color;
  668 
  669         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
  670         KASSERT((m->flags & PG_FICTITIOUS) == 0,
  671             ("pmap_cache_enter: fake page"));
  672         PMAP_STATS_INC(pmap_ncache_enter);
  673 
  674         /*
  675          * Find the color for this virtual address and note the added mapping.
  676          */
  677         color = DCACHE_COLOR(va);
  678         m->md.colors[color]++;
  679 
  680         /*
  681          * If all existing mappings have the same color, the mapping is
  682          * cacheable.
  683          */
  684         if (m->md.color == color) {
  685                 KASSERT(m->md.colors[DCACHE_OTHER_COLOR(color)] == 0,
  686                     ("pmap_cache_enter: cacheable, mappings of other color"));
  687                 if (m->md.color == DCACHE_COLOR(VM_PAGE_TO_PHYS(m)))
  688                         PMAP_STATS_INC(pmap_ncache_enter_c);
  689                 else
  690                         PMAP_STATS_INC(pmap_ncache_enter_oc);
  691                 return (1);
  692         }
  693 
  694         /*
  695          * If there are no mappings of the other color, and the page still has
  696          * the wrong color, this must be a new mapping.  Change the color to
  697          * match the new mapping, which is cacheable.  We must flush the page
  698          * from the cache now.
  699          */
  700         if (m->md.colors[DCACHE_OTHER_COLOR(color)] == 0) {
  701                 KASSERT(m->md.colors[color] == 1,
  702                     ("pmap_cache_enter: changing color, not new mapping"));
  703                 dcache_page_inval(VM_PAGE_TO_PHYS(m));
  704                 m->md.color = color;
  705                 if (m->md.color == DCACHE_COLOR(VM_PAGE_TO_PHYS(m)))
  706                         PMAP_STATS_INC(pmap_ncache_enter_cc);
  707                 else
  708                         PMAP_STATS_INC(pmap_ncache_enter_coc);
  709                 return (1);
  710         }
  711 
  712         /*
  713          * If the mapping is already non-cacheable, just return.
  714          */     
  715         if (m->md.color == -1) {
  716                 PMAP_STATS_INC(pmap_ncache_enter_nc);
  717                 return (0);
  718         }
  719 
  720         PMAP_STATS_INC(pmap_ncache_enter_cnc);
  721 
  722         /*
  723          * Mark all mappings as uncacheable, flush any lines with the other
  724          * color out of the dcache, and set the color to none (-1).
  725          */
  726         TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
  727                 atomic_clear_long(&tp->tte_data, TD_CV);
  728                 tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
  729         }
  730         dcache_page_inval(VM_PAGE_TO_PHYS(m));
  731         m->md.color = -1;
  732         return (0);
  733 }
  734 
  735 void
  736 pmap_cache_remove(vm_page_t m, vm_offset_t va)
  737 {
  738         struct tte *tp;
  739         int color;
  740 
  741         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
  742         CTR3(KTR_PMAP, "pmap_cache_remove: m=%p va=%#lx c=%d", m, va,
  743             m->md.colors[DCACHE_COLOR(va)]);
  744         KASSERT((m->flags & PG_FICTITIOUS) == 0,
  745             ("pmap_cache_remove: fake page"));
  746         KASSERT(m->md.colors[DCACHE_COLOR(va)] > 0,
  747             ("pmap_cache_remove: no mappings %d <= 0",
  748             m->md.colors[DCACHE_COLOR(va)]));
  749         PMAP_STATS_INC(pmap_ncache_remove);
  750 
  751         /*
  752          * Find the color for this virtual address and note the removal of
  753          * the mapping.
  754          */
  755         color = DCACHE_COLOR(va);
  756         m->md.colors[color]--;
  757 
  758         /*
  759          * If the page is cacheable, just return and keep the same color, even
  760          * if there are no longer any mappings.
  761          */
  762         if (m->md.color != -1) {
  763                 if (m->md.color == DCACHE_COLOR(VM_PAGE_TO_PHYS(m)))
  764                         PMAP_STATS_INC(pmap_ncache_remove_c);
  765                 else
  766                         PMAP_STATS_INC(pmap_ncache_remove_oc);
  767                 return;
  768         }
  769 
  770         KASSERT(m->md.colors[DCACHE_OTHER_COLOR(color)] != 0,
  771             ("pmap_cache_remove: uncacheable, no mappings of other color"));
  772 
  773         /*
  774          * If the page is not cacheable (color is -1), and the number of
  775          * mappings for this color is not zero, just return.  There are
  776          * mappings of the other color still, so remain non-cacheable.
  777          */
  778         if (m->md.colors[color] != 0) {
  779                 PMAP_STATS_INC(pmap_ncache_remove_nc);
  780                 return;
  781         }
  782 
  783         /*
  784          * The number of mappings for this color is now zero.  Recache the
  785          * other colored mappings, and change the page color to the other
  786          * color.  There should be no lines in the data cache for this page,
  787          * so flushing should not be needed.
  788          */
  789         TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
  790                 atomic_set_long(&tp->tte_data, TD_CV);
  791                 tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
  792         }
  793         m->md.color = DCACHE_OTHER_COLOR(color);
  794 
  795         if (m->md.color == DCACHE_COLOR(VM_PAGE_TO_PHYS(m)))
  796                 PMAP_STATS_INC(pmap_ncache_remove_cc);
  797         else
  798                 PMAP_STATS_INC(pmap_ncache_remove_coc);
  799 }
  800 
  801 /*
  802  * Map a wired page into kernel virtual address space.
  803  */
  804 void
  805 pmap_kenter(vm_offset_t va, vm_page_t m)
  806 {
  807         vm_offset_t ova;
  808         struct tte *tp;
  809         vm_page_t om;
  810         u_long data;
  811 
  812         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
  813         PMAP_STATS_INC(pmap_nkenter);
  814         tp = tsb_kvtotte(va);
  815         CTR4(KTR_PMAP, "pmap_kenter: va=%#lx pa=%#lx tp=%p data=%#lx",
  816             va, VM_PAGE_TO_PHYS(m), tp, tp->tte_data);
  817         if (m->pc != DCACHE_COLOR(va)) {
  818                 CTR6(KTR_CT2,
  819         "pmap_kenter: off colour va=%#lx pa=%#lx o=%p oc=%#lx ot=%d pi=%#lx",
  820                     va, VM_PAGE_TO_PHYS(m), m->object,
  821                     m->object ? m->object->pg_color : -1,
  822                     m->object ? m->object->type : -1,
  823                     m->pindex);
  824                 PMAP_STATS_INC(pmap_nkenter_oc);
  825         }
  826         if ((tp->tte_data & TD_V) != 0) {
  827                 om = PHYS_TO_VM_PAGE(TTE_GET_PA(tp));
  828                 ova = TTE_GET_VA(tp);
  829                 if (m == om && va == ova) {
  830                         PMAP_STATS_INC(pmap_nkenter_stupid);
  831                         return;
  832                 }
  833                 TAILQ_REMOVE(&om->md.tte_list, tp, tte_link);
  834                 pmap_cache_remove(om, ova);
  835                 if (va != ova)
  836                         tlb_page_demap(kernel_pmap, ova);
  837         }
  838         data = TD_V | TD_8K | VM_PAGE_TO_PHYS(m) | TD_REF | TD_SW | TD_CP |
  839             TD_P | TD_W;
  840         if (pmap_cache_enter(m, va) != 0)
  841                 data |= TD_CV;
  842         tp->tte_vpn = TV_VPN(va, TS_8K);
  843         tp->tte_data = data;
  844         TAILQ_INSERT_TAIL(&m->md.tte_list, tp, tte_link);
  845 }
  846 
  847 /*
  848  * Map a wired page into kernel virtual address space. This additionally
  849  * takes a flag argument wich is or'ed to the TTE data. This is used by
  850  * bus_space_map().
  851  * NOTE: if the mapping is non-cacheable, it's the caller's responsibility
  852  * to flush entries that might still be in the cache, if applicable.
  853  */
  854 void
  855 pmap_kenter_flags(vm_offset_t va, vm_paddr_t pa, u_long flags)
  856 {
  857         struct tte *tp;
  858 
  859         tp = tsb_kvtotte(va);
  860         CTR4(KTR_PMAP, "pmap_kenter_flags: va=%#lx pa=%#lx tp=%p data=%#lx",
  861             va, pa, tp, tp->tte_data);
  862         tp->tte_vpn = TV_VPN(va, TS_8K);
  863         tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_REF | TD_P | flags;
  864 }
  865 
  866 /*
  867  * Remove a wired page from kernel virtual address space.
  868  */
  869 void
  870 pmap_kremove(vm_offset_t va)
  871 {
  872         struct tte *tp;
  873         vm_page_t m;
  874 
  875         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
  876         PMAP_STATS_INC(pmap_nkremove);
  877         tp = tsb_kvtotte(va);
  878         CTR3(KTR_PMAP, "pmap_kremove: va=%#lx tp=%p data=%#lx", va, tp,
  879             tp->tte_data);
  880         if ((tp->tte_data & TD_V) == 0)
  881                 return;
  882         m = PHYS_TO_VM_PAGE(TTE_GET_PA(tp));
  883         TAILQ_REMOVE(&m->md.tte_list, tp, tte_link);
  884         pmap_cache_remove(m, va);
  885         TTE_ZERO(tp);
  886 }
  887 
  888 /*
  889  * Inverse of pmap_kenter_flags, used by bus_space_unmap().
  890  */
  891 void
  892 pmap_kremove_flags(vm_offset_t va)
  893 {
  894         struct tte *tp;
  895 
  896         tp = tsb_kvtotte(va);
  897         CTR3(KTR_PMAP, "pmap_kremove: va=%#lx tp=%p data=%#lx", va, tp,
  898             tp->tte_data);
  899         TTE_ZERO(tp);
  900 }
  901 
  902 /*
  903  * Map a range of physical addresses into kernel virtual address space.
  904  *
  905  * The value passed in *virt is a suggested virtual address for the mapping.
  906  * Architectures which can support a direct-mapped physical to virtual region
  907  * can return the appropriate address within that region, leaving '*virt'
  908  * unchanged.
  909  */
  910 vm_offset_t
  911 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
  912 {
  913 
  914         return (TLB_PHYS_TO_DIRECT(start));
  915 }
  916 
  917 /*
  918  * Map a list of wired pages into kernel virtual address space.  This is
  919  * intended for temporary mappings which do not need page modification or
  920  * references recorded.  Existing mappings in the region are overwritten.
  921  */
  922 void
  923 pmap_qenter(vm_offset_t sva, vm_page_t *m, int count)
  924 {
  925         vm_offset_t va;
  926         int locked;
  927 
  928         PMAP_STATS_INC(pmap_nqenter);
  929         va = sva;
  930         if (!(locked = mtx_owned(&vm_page_queue_mtx)))
  931                 vm_page_lock_queues();
  932         while (count-- > 0) {
  933                 pmap_kenter(va, *m);
  934                 va += PAGE_SIZE;
  935                 m++;
  936         }
  937         if (!locked)
  938                 vm_page_unlock_queues();
  939         tlb_range_demap(kernel_pmap, sva, va);
  940 }
  941 
  942 /*
  943  * Remove page mappings from kernel virtual address space.  Intended for
  944  * temporary mappings entered by pmap_qenter.
  945  */
  946 void
  947 pmap_qremove(vm_offset_t sva, int count)
  948 {
  949         vm_offset_t va;
  950         int locked;
  951 
  952         PMAP_STATS_INC(pmap_nqremove);
  953         va = sva;
  954         if (!(locked = mtx_owned(&vm_page_queue_mtx)))
  955                 vm_page_lock_queues();
  956         while (count-- > 0) {
  957                 pmap_kremove(va);
  958                 va += PAGE_SIZE;
  959         }
  960         if (!locked)
  961                 vm_page_unlock_queues();
  962         tlb_range_demap(kernel_pmap, sva, va);
  963 }
  964 
  965 /*
  966  * Initialize the pmap associated with process 0.
  967  */
  968 void
  969 pmap_pinit0(pmap_t pm)
  970 {
  971         int i;
  972 
  973         for (i = 0; i < MAXCPU; i++)
  974                 pm->pm_context[i] = 0;
  975         pm->pm_active = 0;
  976         pm->pm_tsb = NULL;
  977         pm->pm_tsb_obj = NULL;
  978         bzero(&pm->pm_stats, sizeof(pm->pm_stats));
  979 }
  980 
  981 /*
  982  * Initialize a preallocated and zeroed pmap structure, uch as one in a
  983  * vmspace structure.
  984  */
  985 void
  986 pmap_pinit(pmap_t pm)
  987 {
  988         vm_page_t ma[TSB_PAGES];
  989         vm_page_t m;
  990         int i;
  991 
  992         /*
  993          * Allocate kva space for the tsb.
  994          */
  995         if (pm->pm_tsb == NULL) {
  996                 pm->pm_tsb = (struct tte *)kmem_alloc_pageable(kernel_map,
  997                     TSB_BSIZE);
  998         }
  999 
 1000         /*
 1001          * Allocate an object for it.
 1002          */
 1003         if (pm->pm_tsb_obj == NULL)
 1004                 pm->pm_tsb_obj = vm_object_allocate(OBJT_DEFAULT, TSB_PAGES);
 1005 
 1006         VM_OBJECT_LOCK(pm->pm_tsb_obj);
 1007         for (i = 0; i < TSB_PAGES; i++) {
 1008                 m = vm_page_grab(pm->pm_tsb_obj, i,
 1009                     VM_ALLOC_RETRY | VM_ALLOC_WIRED | VM_ALLOC_ZERO);
 1010                 if ((m->flags & PG_ZERO) == 0)
 1011                         pmap_zero_page(m);
 1012 
 1013                 vm_page_lock_queues();
 1014                 vm_page_flag_clear(m, PG_BUSY);
 1015                 m->valid = VM_PAGE_BITS_ALL;
 1016                 m->md.pmap = pm;
 1017                 vm_page_unlock_queues();
 1018 
 1019                 ma[i] = m;
 1020         }
 1021         VM_OBJECT_UNLOCK(pm->pm_tsb_obj);
 1022         pmap_qenter((vm_offset_t)pm->pm_tsb, ma, TSB_PAGES);
 1023 
 1024         for (i = 0; i < MAXCPU; i++)
 1025                 pm->pm_context[i] = -1;
 1026         pm->pm_active = 0;
 1027         bzero(&pm->pm_stats, sizeof(pm->pm_stats));
 1028 }
 1029 
 1030 void
 1031 pmap_pinit2(pmap_t pmap)
 1032 {
 1033         /* XXX: Remove this stub when no longer called */
 1034 }
 1035 
 1036 /*
 1037  * Release any resources held by the given physical map.
 1038  * Called when a pmap initialized by pmap_pinit is being released.
 1039  * Should only be called if the map contains no valid mappings.
 1040  */
 1041 void
 1042 pmap_release(pmap_t pm)
 1043 {
 1044         vm_object_t obj;
 1045         vm_page_t m;
 1046 
 1047         CTR2(KTR_PMAP, "pmap_release: ctx=%#x tsb=%p",
 1048             pm->pm_context[PCPU_GET(cpuid)], pm->pm_tsb);
 1049         KASSERT(pmap_resident_count(pm) == 0,
 1050             ("pmap_release: resident pages %ld != 0",
 1051             pmap_resident_count(pm)));
 1052         obj = pm->pm_tsb_obj;
 1053         VM_OBJECT_LOCK(obj);
 1054         KASSERT(obj->ref_count == 1, ("pmap_release: tsbobj ref count != 1"));
 1055         while (!TAILQ_EMPTY(&obj->memq)) {
 1056                 m = TAILQ_FIRST(&obj->memq);
 1057                 vm_page_lock_queues();
 1058                 if (vm_page_sleep_if_busy(m, FALSE, "pmaprl"))
 1059                         continue;
 1060                 vm_page_busy(m);
 1061                 KASSERT(m->hold_count == 0,
 1062                     ("pmap_release: freeing held tsb page"));
 1063                 m->md.pmap = NULL;
 1064                 m->wire_count--;
 1065                 atomic_subtract_int(&cnt.v_wire_count, 1);
 1066                 vm_page_free_zero(m);
 1067                 vm_page_unlock_queues();
 1068         }
 1069         VM_OBJECT_UNLOCK(obj);
 1070         pmap_qremove((vm_offset_t)pm->pm_tsb, TSB_PAGES);
 1071 }
 1072 
 1073 /*
 1074  * Grow the number of kernel page table entries.  Unneeded.
 1075  */
 1076 void
 1077 pmap_growkernel(vm_offset_t addr)
 1078 {
 1079 
 1080         panic("pmap_growkernel: can't grow kernel");
 1081 }
 1082 
 1083 int
 1084 pmap_remove_tte(struct pmap *pm, struct pmap *pm2, struct tte *tp,
 1085                 vm_offset_t va)
 1086 {
 1087         vm_page_t m;
 1088         u_long data;
 1089 
 1090         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1091         data = atomic_readandclear_long(&tp->tte_data);
 1092         if ((data & TD_FAKE) == 0) {
 1093                 m = PHYS_TO_VM_PAGE(TD_PA(data));
 1094                 TAILQ_REMOVE(&m->md.tte_list, tp, tte_link);
 1095                 if ((data & TD_WIRED) != 0)
 1096                         pm->pm_stats.wired_count--;
 1097                 if ((data & TD_PV) != 0) {
 1098                         if ((data & TD_W) != 0 && pmap_track_modified(pm, va))
 1099                                 vm_page_dirty(m);
 1100                         if ((data & TD_REF) != 0)
 1101                                 vm_page_flag_set(m, PG_REFERENCED);
 1102                         if (TAILQ_EMPTY(&m->md.tte_list))
 1103                                 vm_page_flag_clear(m, PG_WRITEABLE);
 1104                         pm->pm_stats.resident_count--;
 1105                 }
 1106                 pmap_cache_remove(m, va);
 1107         }
 1108         TTE_ZERO(tp);
 1109         if (PMAP_REMOVE_DONE(pm))
 1110                 return (0);
 1111         return (1);
 1112 }
 1113 
 1114 /*
 1115  * Remove the given range of addresses from the specified map.
 1116  */
 1117 void
 1118 pmap_remove(pmap_t pm, vm_offset_t start, vm_offset_t end)
 1119 {
 1120         struct tte *tp;
 1121         vm_offset_t va;
 1122 
 1123         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1124         CTR3(KTR_PMAP, "pmap_remove: ctx=%#lx start=%#lx end=%#lx",
 1125             pm->pm_context[PCPU_GET(cpuid)], start, end);
 1126         if (PMAP_REMOVE_DONE(pm))
 1127                 return;
 1128         if (end - start > PMAP_TSB_THRESH) {
 1129                 tsb_foreach(pm, NULL, start, end, pmap_remove_tte);
 1130                 tlb_context_demap(pm);
 1131         } else {
 1132                 for (va = start; va < end; va += PAGE_SIZE) {
 1133                         if ((tp = tsb_tte_lookup(pm, va)) != NULL) {
 1134                                 if (!pmap_remove_tte(pm, NULL, tp, va))
 1135                                         break;
 1136                         }
 1137                 }
 1138                 tlb_range_demap(pm, start, end - 1);
 1139         }
 1140 }
 1141 
 1142 void
 1143 pmap_remove_all(vm_page_t m)
 1144 {
 1145         struct pmap *pm;
 1146         struct tte *tpn;
 1147         struct tte *tp;
 1148         vm_offset_t va;
 1149 
 1150         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1151         for (tp = TAILQ_FIRST(&m->md.tte_list); tp != NULL; tp = tpn) {
 1152                 tpn = TAILQ_NEXT(tp, tte_link);
 1153                 if ((tp->tte_data & TD_PV) == 0)
 1154                         continue;
 1155                 pm = TTE_GET_PMAP(tp);
 1156                 va = TTE_GET_VA(tp);
 1157                 if ((tp->tte_data & TD_WIRED) != 0)
 1158                         pm->pm_stats.wired_count--;
 1159                 if ((tp->tte_data & TD_REF) != 0)
 1160                         vm_page_flag_set(m, PG_REFERENCED);
 1161                 if ((tp->tte_data & TD_W) != 0 &&
 1162                     pmap_track_modified(pm, va))
 1163                         vm_page_dirty(m);
 1164                 tp->tte_data &= ~TD_V;
 1165                 tlb_page_demap(pm, va);
 1166                 TAILQ_REMOVE(&m->md.tte_list, tp, tte_link);
 1167                 pm->pm_stats.resident_count--;
 1168                 pmap_cache_remove(m, va);
 1169                 TTE_ZERO(tp);
 1170         }
 1171         vm_page_flag_clear(m, PG_WRITEABLE);
 1172 }
 1173 
 1174 int
 1175 pmap_protect_tte(struct pmap *pm, struct pmap *pm2, struct tte *tp,
 1176                  vm_offset_t va)
 1177 {
 1178         u_long data;
 1179         vm_page_t m;
 1180 
 1181         data = atomic_clear_long(&tp->tte_data, TD_REF | TD_SW | TD_W);
 1182         if ((data & TD_PV) != 0) {
 1183                 m = PHYS_TO_VM_PAGE(TD_PA(data));
 1184                 if ((data & TD_REF) != 0)
 1185                         vm_page_flag_set(m, PG_REFERENCED);
 1186                 if ((data & TD_W) != 0 && pmap_track_modified(pm, va))
 1187                         vm_page_dirty(m);
 1188         }
 1189         return (1);
 1190 }
 1191 
 1192 /*
 1193  * Set the physical protection on the specified range of this map as requested.
 1194  */
 1195 void
 1196 pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
 1197 {
 1198         vm_offset_t va;
 1199         struct tte *tp;
 1200 
 1201         CTR4(KTR_PMAP, "pmap_protect: ctx=%#lx sva=%#lx eva=%#lx prot=%#lx",
 1202             pm->pm_context[PCPU_GET(cpuid)], sva, eva, prot);
 1203 
 1204         if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
 1205                 pmap_remove(pm, sva, eva);
 1206                 return;
 1207         }
 1208 
 1209         if (prot & VM_PROT_WRITE)
 1210                 return;
 1211 
 1212         if (eva - sva > PMAP_TSB_THRESH) {
 1213                 tsb_foreach(pm, NULL, sva, eva, pmap_protect_tte);
 1214                 tlb_context_demap(pm);
 1215         } else {
 1216                 for (va = sva; va < eva; va += PAGE_SIZE) {
 1217                         if ((tp = tsb_tte_lookup(pm, va)) != NULL)
 1218                                 pmap_protect_tte(pm, NULL, tp, va);
 1219                 }
 1220                 tlb_range_demap(pm, sva, eva - 1);
 1221         }
 1222 }
 1223 
 1224 /*
 1225  * Map the given physical page at the specified virtual address in the
 1226  * target pmap with the protection requested.  If specified the page
 1227  * will be wired down.
 1228  */
 1229 void
 1230 pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
 1231            boolean_t wired)
 1232 {
 1233         struct tte *tp;
 1234         vm_paddr_t pa;
 1235         u_long data;
 1236         int i;
 1237 
 1238         PMAP_STATS_INC(pmap_nenter);
 1239         pa = VM_PAGE_TO_PHYS(m);
 1240 
 1241         /*
 1242          * If this is a fake page from the device_pager, but it covers actual
 1243          * physical memory, convert to the real backing page.
 1244          */
 1245         if ((m->flags & PG_FICTITIOUS) != 0) {
 1246                 for (i = 0; phys_avail[i + 1] != 0; i += 2) {
 1247                         if (pa >= phys_avail[i] && pa <= phys_avail[i + 1]) {
 1248                                 m = PHYS_TO_VM_PAGE(pa);
 1249                                 break;
 1250                         }
 1251                 }
 1252         }
 1253 
 1254         CTR6(KTR_PMAP,
 1255             "pmap_enter: ctx=%p m=%p va=%#lx pa=%#lx prot=%#x wired=%d",
 1256             pm->pm_context[PCPU_GET(cpuid)], m, va, pa, prot, wired);
 1257 
 1258         /*
 1259          * If there is an existing mapping, and the physical address has not
 1260          * changed, must be protection or wiring change.
 1261          */
 1262         if ((tp = tsb_tte_lookup(pm, va)) != NULL && TTE_GET_PA(tp) == pa) {
 1263                 CTR0(KTR_PMAP, "pmap_enter: update");
 1264                 PMAP_STATS_INC(pmap_nenter_update);
 1265 
 1266                 /*
 1267                  * Wiring change, just update stats.
 1268                  */
 1269                 if (wired) {
 1270                         if ((tp->tte_data & TD_WIRED) == 0) {
 1271                                 tp->tte_data |= TD_WIRED;
 1272                                 pm->pm_stats.wired_count++;
 1273                         }
 1274                 } else {
 1275                         if ((tp->tte_data & TD_WIRED) != 0) {
 1276                                 tp->tte_data &= ~TD_WIRED;
 1277                                 pm->pm_stats.wired_count--;
 1278                         }
 1279                 }
 1280 
 1281                 /*
 1282                  * Save the old bits and clear the ones we're interested in.
 1283                  */
 1284                 data = tp->tte_data;
 1285                 tp->tte_data &= ~(TD_EXEC | TD_SW | TD_W);
 1286 
 1287                 /*
 1288                  * If we're turning off write permissions, sense modify status.
 1289                  */
 1290                 if ((prot & VM_PROT_WRITE) != 0) {
 1291                         tp->tte_data |= TD_SW;
 1292                         if (wired) {
 1293                                 tp->tte_data |= TD_W;
 1294                         }
 1295                 } else if ((data & TD_W) != 0 &&
 1296                     pmap_track_modified(pm, va)) {
 1297                         vm_page_dirty(m);
 1298                 }
 1299 
 1300                 /*
 1301                  * If we're turning on execute permissions, flush the icache.
 1302                  */
 1303                 if ((prot & VM_PROT_EXECUTE) != 0) {
 1304                         if ((data & TD_EXEC) == 0) {
 1305                                 icache_page_inval(pa);
 1306                         }
 1307                         tp->tte_data |= TD_EXEC;
 1308                 }
 1309 
 1310                 /*
 1311                  * Delete the old mapping.
 1312                  */
 1313                 tlb_page_demap(pm, TTE_GET_VA(tp));
 1314         } else {
 1315                 /*
 1316                  * If there is an existing mapping, but its for a different
 1317                  * phsyical address, delete the old mapping.
 1318                  */
 1319                 if (tp != NULL) {
 1320                         CTR0(KTR_PMAP, "pmap_enter: replace");
 1321                         PMAP_STATS_INC(pmap_nenter_replace);
 1322                         vm_page_lock_queues();
 1323                         pmap_remove_tte(pm, NULL, tp, va);
 1324                         vm_page_unlock_queues();
 1325                         tlb_page_demap(pm, va);
 1326                 } else {
 1327                         CTR0(KTR_PMAP, "pmap_enter: new");
 1328                         PMAP_STATS_INC(pmap_nenter_new);
 1329                 }
 1330 
 1331                 /*
 1332                  * Now set up the data and install the new mapping.
 1333                  */
 1334                 data = TD_V | TD_8K | TD_PA(pa);
 1335                 if (pm == kernel_pmap)
 1336                         data |= TD_P;
 1337                 if (prot & VM_PROT_WRITE)
 1338                         data |= TD_SW;
 1339                 if (prot & VM_PROT_EXECUTE) {
 1340                         data |= TD_EXEC;
 1341                         icache_page_inval(pa);
 1342                 }
 1343 
 1344                 /*
 1345                  * If its wired update stats.  We also don't need reference or
 1346                  * modify tracking for wired mappings, so set the bits now.
 1347                  */
 1348                 if (wired) {
 1349                         pm->pm_stats.wired_count++;
 1350                         data |= TD_REF | TD_WIRED;
 1351                         if ((prot & VM_PROT_WRITE) != 0)
 1352                                 data |= TD_W;
 1353                 }
 1354 
 1355                 tsb_tte_enter(pm, m, va, TS_8K, data);
 1356         }
 1357 }
 1358 
 1359 vm_page_t
 1360 pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_page_t mpte)
 1361 {
 1362 
 1363         pmap_enter(pm, va, m, VM_PROT_READ | VM_PROT_EXECUTE, FALSE);
 1364         return (NULL);
 1365 }
 1366 
 1367 void
 1368 pmap_object_init_pt(pmap_t pm, vm_offset_t addr, vm_object_t object,
 1369                     vm_pindex_t pindex, vm_size_t size)
 1370 {
 1371 
 1372         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
 1373         KASSERT(object->type == OBJT_DEVICE,
 1374             ("pmap_object_init_pt: non-device object"));
 1375 }
 1376 
 1377 /*
 1378  * Change the wiring attribute for a map/virtual-address pair.
 1379  * The mapping must already exist in the pmap.
 1380  */
 1381 void
 1382 pmap_change_wiring(pmap_t pm, vm_offset_t va, boolean_t wired)
 1383 {
 1384         struct tte *tp;
 1385         u_long data;
 1386 
 1387         if ((tp = tsb_tte_lookup(pm, va)) != NULL) {
 1388                 if (wired) {
 1389                         data = atomic_set_long(&tp->tte_data, TD_WIRED);
 1390                         if ((data & TD_WIRED) == 0)
 1391                                 pm->pm_stats.wired_count++;
 1392                 } else {
 1393                         data = atomic_clear_long(&tp->tte_data, TD_WIRED);
 1394                         if ((data & TD_WIRED) != 0)
 1395                                 pm->pm_stats.wired_count--;
 1396                 }
 1397         }
 1398 }
 1399 
 1400 static int
 1401 pmap_copy_tte(pmap_t src_pmap, pmap_t dst_pmap, struct tte *tp, vm_offset_t va)
 1402 {
 1403         vm_page_t m;
 1404         u_long data;
 1405 
 1406         if ((tp->tte_data & TD_FAKE) != 0)
 1407                 return (1);
 1408         if (tsb_tte_lookup(dst_pmap, va) == NULL) {
 1409                 data = tp->tte_data &
 1410                     ~(TD_PV | TD_REF | TD_SW | TD_CV | TD_W);
 1411                 m = PHYS_TO_VM_PAGE(TTE_GET_PA(tp));
 1412                 tsb_tte_enter(dst_pmap, m, va, TS_8K, data);
 1413         }
 1414         return (1);
 1415 }
 1416 
 1417 void
 1418 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
 1419           vm_size_t len, vm_offset_t src_addr)
 1420 {
 1421         struct tte *tp;
 1422         vm_offset_t va;
 1423 
 1424         if (dst_addr != src_addr)
 1425                 return;
 1426         if (len > PMAP_TSB_THRESH) {
 1427                 tsb_foreach(src_pmap, dst_pmap, src_addr, src_addr + len,
 1428                     pmap_copy_tte);
 1429                 tlb_context_demap(dst_pmap);
 1430         } else {
 1431                 for (va = src_addr; va < src_addr + len; va += PAGE_SIZE) {
 1432                         if ((tp = tsb_tte_lookup(src_pmap, va)) != NULL)
 1433                                 pmap_copy_tte(src_pmap, dst_pmap, tp, va);
 1434                 }
 1435                 tlb_range_demap(dst_pmap, src_addr, src_addr + len - 1);
 1436         }
 1437 }
 1438 
 1439 void
 1440 pmap_zero_page(vm_page_t m)
 1441 {
 1442         struct tte *tp;
 1443         vm_offset_t va;
 1444         vm_paddr_t pa;
 1445 
 1446         KASSERT((m->flags & PG_FICTITIOUS) == 0,
 1447             ("pmap_zero_page: fake page"));
 1448         PMAP_STATS_INC(pmap_nzero_page);
 1449         pa = VM_PAGE_TO_PHYS(m);
 1450         if (m->md.color == -1) {
 1451                 PMAP_STATS_INC(pmap_nzero_page_nc);
 1452                 aszero(ASI_PHYS_USE_EC, pa, PAGE_SIZE);
 1453         } else if (m->md.color == DCACHE_COLOR(pa)) {
 1454                 PMAP_STATS_INC(pmap_nzero_page_c);
 1455                 va = TLB_PHYS_TO_DIRECT(pa);
 1456                 cpu_block_zero((void *)va, PAGE_SIZE);
 1457         } else {
 1458                 PMAP_STATS_INC(pmap_nzero_page_oc);
 1459                 va = pmap_temp_map_1 + (m->md.color * PAGE_SIZE);
 1460                 tp = tsb_kvtotte(va);
 1461                 tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_CP | TD_CV | TD_W;
 1462                 tp->tte_vpn = TV_VPN(va, TS_8K);
 1463                 cpu_block_zero((void *)va, PAGE_SIZE);
 1464                 tlb_page_demap(kernel_pmap, va);
 1465         }
 1466 }
 1467 
 1468 void
 1469 pmap_zero_page_area(vm_page_t m, int off, int size)
 1470 {
 1471         struct tte *tp;
 1472         vm_offset_t va;
 1473         vm_paddr_t pa;
 1474 
 1475         KASSERT((m->flags & PG_FICTITIOUS) == 0,
 1476             ("pmap_zero_page_area: fake page"));
 1477         KASSERT(off + size <= PAGE_SIZE, ("pmap_zero_page_area: bad off/size"));
 1478         PMAP_STATS_INC(pmap_nzero_page_area);
 1479         pa = VM_PAGE_TO_PHYS(m);
 1480         if (m->md.color == -1) {
 1481                 PMAP_STATS_INC(pmap_nzero_page_area_nc);
 1482                 aszero(ASI_PHYS_USE_EC, pa + off, size);
 1483         } else if (m->md.color == DCACHE_COLOR(pa)) {
 1484                 PMAP_STATS_INC(pmap_nzero_page_area_c);
 1485                 va = TLB_PHYS_TO_DIRECT(pa);
 1486                 bzero((void *)(va + off), size);
 1487         } else {
 1488                 PMAP_STATS_INC(pmap_nzero_page_area_oc);
 1489                 va = pmap_temp_map_1 + (m->md.color * PAGE_SIZE);
 1490                 tp = tsb_kvtotte(va);
 1491                 tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_CP | TD_CV | TD_W;
 1492                 tp->tte_vpn = TV_VPN(va, TS_8K);
 1493                 bzero((void *)(va + off), size);
 1494                 tlb_page_demap(kernel_pmap, va);
 1495         }
 1496 }
 1497 
 1498 void
 1499 pmap_zero_page_idle(vm_page_t m)
 1500 {
 1501         struct tte *tp;
 1502         vm_offset_t va;
 1503         vm_paddr_t pa;
 1504 
 1505         KASSERT((m->flags & PG_FICTITIOUS) == 0,
 1506             ("pmap_zero_page_idle: fake page"));
 1507         PMAP_STATS_INC(pmap_nzero_page_idle);
 1508         pa = VM_PAGE_TO_PHYS(m);
 1509         if (m->md.color == -1) {
 1510                 PMAP_STATS_INC(pmap_nzero_page_idle_nc);
 1511                 aszero(ASI_PHYS_USE_EC, pa, PAGE_SIZE);
 1512         } else if (m->md.color == DCACHE_COLOR(pa)) {
 1513                 PMAP_STATS_INC(pmap_nzero_page_idle_c);
 1514                 va = TLB_PHYS_TO_DIRECT(pa);
 1515                 cpu_block_zero((void *)va, PAGE_SIZE);
 1516         } else {
 1517                 PMAP_STATS_INC(pmap_nzero_page_idle_oc);
 1518                 va = pmap_idle_map + (m->md.color * PAGE_SIZE);
 1519                 tp = tsb_kvtotte(va);
 1520                 tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_CP | TD_CV | TD_W;
 1521                 tp->tte_vpn = TV_VPN(va, TS_8K);
 1522                 cpu_block_zero((void *)va, PAGE_SIZE);
 1523                 tlb_page_demap(kernel_pmap, va);
 1524         }
 1525 }
 1526 
 1527 void
 1528 pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
 1529 {
 1530         vm_offset_t vdst;
 1531         vm_offset_t vsrc;
 1532         vm_paddr_t pdst;
 1533         vm_paddr_t psrc;
 1534         struct tte *tp;
 1535 
 1536         KASSERT((mdst->flags & PG_FICTITIOUS) == 0,
 1537             ("pmap_copy_page: fake dst page"));
 1538         KASSERT((msrc->flags & PG_FICTITIOUS) == 0,
 1539             ("pmap_copy_page: fake src page"));
 1540         PMAP_STATS_INC(pmap_ncopy_page);
 1541         pdst = VM_PAGE_TO_PHYS(mdst);
 1542         psrc = VM_PAGE_TO_PHYS(msrc);
 1543         if (msrc->md.color == -1 && mdst->md.color == -1) {
 1544                 PMAP_STATS_INC(pmap_ncopy_page_nc);
 1545                 ascopy(ASI_PHYS_USE_EC, psrc, pdst, PAGE_SIZE);
 1546         } else if (msrc->md.color == DCACHE_COLOR(psrc) &&
 1547             mdst->md.color == DCACHE_COLOR(pdst)) {
 1548                 PMAP_STATS_INC(pmap_ncopy_page_c);
 1549                 vdst = TLB_PHYS_TO_DIRECT(pdst);
 1550                 vsrc = TLB_PHYS_TO_DIRECT(psrc);
 1551                 cpu_block_copy((void *)vsrc, (void *)vdst, PAGE_SIZE);
 1552         } else if (msrc->md.color == -1) {
 1553                 if (mdst->md.color == DCACHE_COLOR(pdst)) {
 1554                         PMAP_STATS_INC(pmap_ncopy_page_dc);
 1555                         vdst = TLB_PHYS_TO_DIRECT(pdst);
 1556                         ascopyfrom(ASI_PHYS_USE_EC, psrc, (void *)vdst,
 1557                             PAGE_SIZE);
 1558                 } else {
 1559                         PMAP_STATS_INC(pmap_ncopy_page_doc);
 1560                         vdst = pmap_temp_map_1 + (mdst->md.color * PAGE_SIZE);
 1561                         tp = tsb_kvtotte(vdst);
 1562                         tp->tte_data =
 1563                             TD_V | TD_8K | TD_PA(pdst) | TD_CP | TD_CV | TD_W;
 1564                         tp->tte_vpn = TV_VPN(vdst, TS_8K);
 1565                         ascopyfrom(ASI_PHYS_USE_EC, psrc, (void *)vdst,
 1566                             PAGE_SIZE);
 1567                         tlb_page_demap(kernel_pmap, vdst);
 1568                 }
 1569         } else if (mdst->md.color == -1) {
 1570                 if (msrc->md.color == DCACHE_COLOR(psrc)) {
 1571                         PMAP_STATS_INC(pmap_ncopy_page_sc);
 1572                         vsrc = TLB_PHYS_TO_DIRECT(psrc);
 1573                         ascopyto((void *)vsrc, ASI_PHYS_USE_EC, pdst,
 1574                             PAGE_SIZE);
 1575                 } else {
 1576                         PMAP_STATS_INC(pmap_ncopy_page_soc);
 1577                         vsrc = pmap_temp_map_1 + (msrc->md.color * PAGE_SIZE);
 1578                         tp = tsb_kvtotte(vsrc);
 1579                         tp->tte_data =
 1580                             TD_V | TD_8K | TD_PA(psrc) | TD_CP | TD_CV | TD_W;
 1581                         tp->tte_vpn = TV_VPN(vsrc, TS_8K);
 1582                         ascopyto((void *)vsrc, ASI_PHYS_USE_EC, pdst,
 1583                             PAGE_SIZE);
 1584                         tlb_page_demap(kernel_pmap, vsrc);
 1585                 }
 1586         } else {
 1587                 PMAP_STATS_INC(pmap_ncopy_page_oc);
 1588                 vdst = pmap_temp_map_1 + (mdst->md.color * PAGE_SIZE);
 1589                 tp = tsb_kvtotte(vdst);
 1590                 tp->tte_data =
 1591                     TD_V | TD_8K | TD_PA(pdst) | TD_CP | TD_CV | TD_W;
 1592                 tp->tte_vpn = TV_VPN(vdst, TS_8K);
 1593                 vsrc = pmap_temp_map_2 + (msrc->md.color * PAGE_SIZE);
 1594                 tp = tsb_kvtotte(vsrc);
 1595                 tp->tte_data =
 1596                     TD_V | TD_8K | TD_PA(psrc) | TD_CP | TD_CV | TD_W;
 1597                 tp->tte_vpn = TV_VPN(vsrc, TS_8K);
 1598                 cpu_block_copy((void *)vsrc, (void *)vdst, PAGE_SIZE);
 1599                 tlb_page_demap(kernel_pmap, vdst);
 1600                 tlb_page_demap(kernel_pmap, vsrc);
 1601         }
 1602 }
 1603 
 1604 /*
 1605  * Returns true if the pmap's pv is one of the first
 1606  * 16 pvs linked to from this page.  This count may
 1607  * be changed upwards or downwards in the future; it
 1608  * is only necessary that true be returned for a small
 1609  * subset of pmaps for proper page aging.
 1610  */
 1611 boolean_t
 1612 pmap_page_exists_quick(pmap_t pm, vm_page_t m)
 1613 {
 1614         struct tte *tp;
 1615         int loops;
 1616 
 1617         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1618         if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
 1619                 return (FALSE);
 1620         loops = 0;
 1621         TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
 1622                 if ((tp->tte_data & TD_PV) == 0)
 1623                         continue;
 1624                 if (TTE_GET_PMAP(tp) == pm)
 1625                         return (TRUE);
 1626                 if (++loops >= 16)
 1627                         break;
 1628         }
 1629         return (FALSE);
 1630 }
 1631 
 1632 /*
 1633  * Remove all pages from specified address space, this aids process exit
 1634  * speeds.  This is much faster than pmap_remove n the case of running down
 1635  * an entire address space.  Only works for the current pmap.
 1636  */
 1637 void
 1638 pmap_remove_pages(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
 1639 {
 1640 }
 1641 
 1642 /*
 1643  * Lower the permission for all mappings to a given page.
 1644  */
 1645 void
 1646 pmap_page_protect(vm_page_t m, vm_prot_t prot)
 1647 {
 1648 
 1649         KASSERT((m->flags & PG_FICTITIOUS) == 0,
 1650             ("pmap_page_protect: fake page"));
 1651         if ((prot & VM_PROT_WRITE) == 0) {
 1652                 if (prot & (VM_PROT_READ | VM_PROT_EXECUTE))
 1653                         pmap_clear_write(m);
 1654                 else
 1655                         pmap_remove_all(m);
 1656         }
 1657 }
 1658 
 1659 /*
 1660  *      pmap_ts_referenced:
 1661  *
 1662  *      Return a count of reference bits for a page, clearing those bits.
 1663  *      It is not necessary for every reference bit to be cleared, but it
 1664  *      is necessary that 0 only be returned when there are truly no
 1665  *      reference bits set.
 1666  *
 1667  *      XXX: The exact number of bits to check and clear is a matter that
 1668  *      should be tested and standardized at some point in the future for
 1669  *      optimal aging of shared pages.
 1670  */
 1671 
 1672 int
 1673 pmap_ts_referenced(vm_page_t m)
 1674 {
 1675         struct tte *tpf;
 1676         struct tte *tpn;
 1677         struct tte *tp;
 1678         u_long data;
 1679         int count;
 1680 
 1681         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1682         if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
 1683                 return (0);
 1684         count = 0;
 1685         if ((tp = TAILQ_FIRST(&m->md.tte_list)) != NULL) {
 1686                 tpf = tp;
 1687                 do {
 1688                         tpn = TAILQ_NEXT(tp, tte_link);
 1689                         TAILQ_REMOVE(&m->md.tte_list, tp, tte_link);
 1690                         TAILQ_INSERT_TAIL(&m->md.tte_list, tp, tte_link);
 1691                         if ((tp->tte_data & TD_PV) == 0 ||
 1692                             !pmap_track_modified(TTE_GET_PMAP(tp),
 1693                              TTE_GET_VA(tp)))
 1694                                 continue;
 1695                         data = atomic_clear_long(&tp->tte_data, TD_REF);
 1696                         if ((data & TD_REF) != 0 && ++count > 4)
 1697                                 break;
 1698                 } while ((tp = tpn) != NULL && tp != tpf);
 1699         }
 1700         return (count);
 1701 }
 1702 
 1703 boolean_t
 1704 pmap_is_modified(vm_page_t m)
 1705 {
 1706         struct tte *tp;
 1707 
 1708         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1709         if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
 1710                 return (FALSE);
 1711         TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
 1712                 if ((tp->tte_data & TD_PV) == 0 ||
 1713                     !pmap_track_modified(TTE_GET_PMAP(tp), TTE_GET_VA(tp)))
 1714                         continue;
 1715                 if ((tp->tte_data & TD_W) != 0)
 1716                         return (TRUE);
 1717         }
 1718         return (FALSE);
 1719 }
 1720 
 1721 /*
 1722  *      pmap_is_prefaultable:
 1723  *
 1724  *      Return whether or not the specified virtual address is elgible
 1725  *      for prefault.
 1726  */
 1727 boolean_t
 1728 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
 1729 {
 1730 
 1731         return (FALSE);
 1732 }
 1733 
 1734 void
 1735 pmap_clear_modify(vm_page_t m)
 1736 {
 1737         struct tte *tp;
 1738         u_long data;
 1739 
 1740         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1741         if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
 1742                 return;
 1743         TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
 1744                 if ((tp->tte_data & TD_PV) == 0)
 1745                         continue;
 1746                 data = atomic_clear_long(&tp->tte_data, TD_W);
 1747                 if ((data & TD_W) != 0)
 1748                         tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
 1749         }
 1750 }
 1751 
 1752 void
 1753 pmap_clear_reference(vm_page_t m)
 1754 {
 1755         struct tte *tp;
 1756         u_long data;
 1757 
 1758         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1759         if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
 1760                 return;
 1761         TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
 1762                 if ((tp->tte_data & TD_PV) == 0)
 1763                         continue;
 1764                 data = atomic_clear_long(&tp->tte_data, TD_REF);
 1765                 if ((data & TD_REF) != 0)
 1766                         tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
 1767         }
 1768 }
 1769 
 1770 void
 1771 pmap_clear_write(vm_page_t m)
 1772 {
 1773         struct tte *tp;
 1774         u_long data;
 1775 
 1776         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1777         if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 ||
 1778             (m->flags & PG_WRITEABLE) == 0)
 1779                 return;
 1780         TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
 1781                 if ((tp->tte_data & TD_PV) == 0)
 1782                         continue;
 1783                 data = atomic_clear_long(&tp->tte_data, TD_SW | TD_W);
 1784                 if ((data & TD_W) != 0) {
 1785                         if (pmap_track_modified(TTE_GET_PMAP(tp),
 1786                             TTE_GET_VA(tp)))
 1787                                 vm_page_dirty(m);
 1788                         tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
 1789                 }
 1790         }
 1791         vm_page_flag_clear(m, PG_WRITEABLE);
 1792 }
 1793 
 1794 int
 1795 pmap_mincore(pmap_t pm, vm_offset_t addr)
 1796 {
 1797         /* TODO; */
 1798         return (0);
 1799 }
 1800 
 1801 /*
 1802  * Activate a user pmap.  The pmap must be activated before its address space
 1803  * can be accessed in any way.
 1804  */
 1805 void
 1806 pmap_activate(struct thread *td)
 1807 {
 1808         struct vmspace *vm;
 1809         struct pmap *pm;
 1810         int context;
 1811 
 1812         vm = td->td_proc->p_vmspace;
 1813         pm = vmspace_pmap(vm);
 1814 
 1815         mtx_lock_spin(&sched_lock);
 1816 
 1817         context = PCPU_GET(tlb_ctx);
 1818         if (context == PCPU_GET(tlb_ctx_max)) {
 1819                 tlb_flush_user();
 1820                 context = PCPU_GET(tlb_ctx_min);
 1821         }
 1822         PCPU_SET(tlb_ctx, context + 1);
 1823 
 1824         pm->pm_context[PCPU_GET(cpuid)] = context;
 1825         pm->pm_active |= PCPU_GET(cpumask);
 1826         PCPU_SET(vmspace, vm);
 1827 
 1828         stxa(AA_DMMU_TSB, ASI_DMMU, pm->pm_tsb);
 1829         stxa(AA_IMMU_TSB, ASI_IMMU, pm->pm_tsb);
 1830         stxa(AA_DMMU_PCXR, ASI_DMMU, context);
 1831         membar(Sync);
 1832 
 1833         mtx_unlock_spin(&sched_lock);
 1834 }
 1835 
 1836 vm_offset_t
 1837 pmap_addr_hint(vm_object_t object, vm_offset_t va, vm_size_t size)
 1838 {
 1839 
 1840         return (va);
 1841 }

Cache object: 411ef8f1e48fe5de956e8d566c126cd4


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.