The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/sparc64/sparc64/pmap.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1991 Regents of the University of California.
    3  * All rights reserved.
    4  * Copyright (c) 1994 John S. Dyson
    5  * All rights reserved.
    6  * Copyright (c) 1994 David Greenman
    7  * All rights reserved.
    8  *
    9  * This code is derived from software contributed to Berkeley by
   10  * the Systems Programming Group of the University of Utah Computer
   11  * Science Department and William Jolitz of UUNET Technologies Inc.
   12  *
   13  * Redistribution and use in source and binary forms, with or without
   14  * modification, are permitted provided that the following conditions
   15  * are met:
   16  * 1. Redistributions of source code must retain the above copyright
   17  *    notice, this list of conditions and the following disclaimer.
   18  * 2. Redistributions in binary form must reproduce the above copyright
   19  *    notice, this list of conditions and the following disclaimer in the
   20  *    documentation and/or other materials provided with the distribution.
   21  * 3. All advertising materials mentioning features or use of this software
   22  *    must display the following acknowledgement:
   23  *      This product includes software developed by the University of
   24  *      California, Berkeley and its contributors.
   25  * 4. Neither the name of the University nor the names of its contributors
   26  *    may be used to endorse or promote products derived from this software
   27  *    without specific prior written permission.
   28  *
   29  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   39  * SUCH DAMAGE.
   40  *
   41  *      from:   @(#)pmap.c      7.7 (Berkeley)  5/12/91
   42  * $FreeBSD$
   43  */
   44 
   45 /*
   46  * Manages physical address maps.
   47  *
   48  * In addition to hardware address maps, this module is called upon to
   49  * provide software-use-only maps which may or may not be stored in the
   50  * same form as hardware maps.  These pseudo-maps are used to store
   51  * intermediate results from copy operations to and from address spaces.
   52  *
   53  * Since the information managed by this module is also stored by the
   54  * logical address mapping module, this module may throw away valid virtual
   55  * to physical mappings at almost any time.  However, invalidations of
   56  * mappings must be done as requested.
   57  *
   58  * In order to cope with hardware architectures which make virtual to
   59  * physical map invalidates expensive, this module may delay invalidate
   60  * reduced protection operations until such time as they are actually
   61  * necessary.  This module is given full information as to which processors
   62  * are currently using which maps, and to when physical maps must be made
   63  * correct.
   64  */
   65 
   66 #include "opt_kstack_pages.h"
   67 #include "opt_msgbuf.h"
   68 #include "opt_pmap.h"
   69 
   70 #include <sys/param.h>
   71 #include <sys/kernel.h>
   72 #include <sys/ktr.h>
   73 #include <sys/lock.h>
   74 #include <sys/msgbuf.h>
   75 #include <sys/mutex.h>
   76 #include <sys/proc.h>
   77 #include <sys/smp.h>
   78 #include <sys/sysctl.h>
   79 #include <sys/systm.h>
   80 #include <sys/vmmeter.h>
   81 
   82 #include <dev/ofw/openfirm.h>
   83 
   84 #include <vm/vm.h> 
   85 #include <vm/vm_param.h>
   86 #include <vm/vm_kern.h>
   87 #include <vm/vm_page.h>
   88 #include <vm/vm_map.h>
   89 #include <vm/vm_object.h>
   90 #include <vm/vm_extern.h>
   91 #include <vm/vm_pageout.h>
   92 #include <vm/vm_pager.h>
   93 
   94 #include <machine/cache.h>
   95 #include <machine/frame.h>
   96 #include <machine/instr.h>
   97 #include <machine/md_var.h>
   98 #include <machine/metadata.h>
   99 #include <machine/ofw_mem.h>
  100 #include <machine/smp.h>
  101 #include <machine/tlb.h>
  102 #include <machine/tte.h>
  103 #include <machine/tsb.h>
  104 
  105 #define PMAP_DEBUG
  106 
  107 #ifndef PMAP_SHPGPERPROC
  108 #define PMAP_SHPGPERPROC        200
  109 #endif
  110 
  111 /* XXX */
  112 #include "opt_sched.h"
  113 #ifndef SCHED_4BSD
  114 #error "sparc64 only works with SCHED_4BSD which uses a global scheduler lock."
  115 #endif
  116 extern struct mtx sched_lock;
  117 
  118 /*
  119  * Virtual and physical address of message buffer.
  120  */
  121 struct msgbuf *msgbufp;
  122 vm_paddr_t msgbuf_phys;
  123 
  124 /*
  125  * Map of physical memory reagions.
  126  */
  127 vm_paddr_t phys_avail[128];
  128 static struct ofw_mem_region mra[128];
  129 struct ofw_mem_region sparc64_memreg[128];
  130 int sparc64_nmemreg;
  131 static struct ofw_map translations[128];
  132 static int translations_size;
  133 
  134 static vm_offset_t pmap_idle_map;
  135 static vm_offset_t pmap_temp_map_1;
  136 static vm_offset_t pmap_temp_map_2;
  137 
  138 /*
  139  * First and last available kernel virtual addresses.
  140  */
  141 vm_offset_t virtual_avail;
  142 vm_offset_t virtual_end;
  143 vm_offset_t kernel_vm_end;
  144 
  145 vm_offset_t vm_max_kernel_address;
  146 
  147 /*
  148  * Kernel pmap.
  149  */
  150 struct pmap kernel_pmap_store;
  151 
  152 /*
  153  * Allocate physical memory for use in pmap_bootstrap.
  154  */
  155 static vm_paddr_t pmap_bootstrap_alloc(vm_size_t size);
  156 
  157 /*
  158  * Map the given physical page at the specified virtual address in the
  159  * target pmap with the protection requested.  If specified the page
  160  * will be wired down.
  161  *
  162  * The page queues and pmap must be locked.
  163  */
  164 static void pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_page_t m,
  165     vm_prot_t prot, boolean_t wired);
  166 
  167 extern int tl1_immu_miss_patch_1[];
  168 extern int tl1_immu_miss_patch_2[];
  169 extern int tl1_dmmu_miss_patch_1[];
  170 extern int tl1_dmmu_miss_patch_2[];
  171 extern int tl1_dmmu_prot_patch_1[];
  172 extern int tl1_dmmu_prot_patch_2[];
  173 
  174 /*
  175  * If user pmap is processed with pmap_remove and with pmap_remove and the
  176  * resident count drops to 0, there are no more pages to remove, so we
  177  * need not continue.
  178  */
  179 #define PMAP_REMOVE_DONE(pm) \
  180         ((pm) != kernel_pmap && (pm)->pm_stats.resident_count == 0)
  181 
  182 /*
  183  * The threshold (in bytes) above which tsb_foreach() is used in pmap_remove()
  184  * and pmap_protect() instead of trying each virtual address.
  185  */
  186 #define PMAP_TSB_THRESH ((TSB_SIZE / 2) * PAGE_SIZE)
  187 
  188 SYSCTL_NODE(_debug, OID_AUTO, pmap_stats, CTLFLAG_RD, 0, "");
  189 
  190 PMAP_STATS_VAR(pmap_nenter);
  191 PMAP_STATS_VAR(pmap_nenter_update);
  192 PMAP_STATS_VAR(pmap_nenter_replace);
  193 PMAP_STATS_VAR(pmap_nenter_new);
  194 PMAP_STATS_VAR(pmap_nkenter);
  195 PMAP_STATS_VAR(pmap_nkenter_oc);
  196 PMAP_STATS_VAR(pmap_nkenter_stupid);
  197 PMAP_STATS_VAR(pmap_nkremove);
  198 PMAP_STATS_VAR(pmap_nqenter);
  199 PMAP_STATS_VAR(pmap_nqremove);
  200 PMAP_STATS_VAR(pmap_ncache_enter);
  201 PMAP_STATS_VAR(pmap_ncache_enter_c);
  202 PMAP_STATS_VAR(pmap_ncache_enter_oc);
  203 PMAP_STATS_VAR(pmap_ncache_enter_cc);
  204 PMAP_STATS_VAR(pmap_ncache_enter_coc);
  205 PMAP_STATS_VAR(pmap_ncache_enter_nc);
  206 PMAP_STATS_VAR(pmap_ncache_enter_cnc);
  207 PMAP_STATS_VAR(pmap_ncache_remove);
  208 PMAP_STATS_VAR(pmap_ncache_remove_c);
  209 PMAP_STATS_VAR(pmap_ncache_remove_oc);
  210 PMAP_STATS_VAR(pmap_ncache_remove_cc);
  211 PMAP_STATS_VAR(pmap_ncache_remove_coc);
  212 PMAP_STATS_VAR(pmap_ncache_remove_nc);
  213 PMAP_STATS_VAR(pmap_nzero_page);
  214 PMAP_STATS_VAR(pmap_nzero_page_c);
  215 PMAP_STATS_VAR(pmap_nzero_page_oc);
  216 PMAP_STATS_VAR(pmap_nzero_page_nc);
  217 PMAP_STATS_VAR(pmap_nzero_page_area);
  218 PMAP_STATS_VAR(pmap_nzero_page_area_c);
  219 PMAP_STATS_VAR(pmap_nzero_page_area_oc);
  220 PMAP_STATS_VAR(pmap_nzero_page_area_nc);
  221 PMAP_STATS_VAR(pmap_nzero_page_idle);
  222 PMAP_STATS_VAR(pmap_nzero_page_idle_c);
  223 PMAP_STATS_VAR(pmap_nzero_page_idle_oc);
  224 PMAP_STATS_VAR(pmap_nzero_page_idle_nc);
  225 PMAP_STATS_VAR(pmap_ncopy_page);
  226 PMAP_STATS_VAR(pmap_ncopy_page_c);
  227 PMAP_STATS_VAR(pmap_ncopy_page_oc);
  228 PMAP_STATS_VAR(pmap_ncopy_page_nc);
  229 PMAP_STATS_VAR(pmap_ncopy_page_dc);
  230 PMAP_STATS_VAR(pmap_ncopy_page_doc);
  231 PMAP_STATS_VAR(pmap_ncopy_page_sc);
  232 PMAP_STATS_VAR(pmap_ncopy_page_soc);
  233 
  234 PMAP_STATS_VAR(pmap_nnew_thread);
  235 PMAP_STATS_VAR(pmap_nnew_thread_oc);
  236 
  237 /*
  238  * Quick sort callout for comparing memory regions.
  239  */
  240 static int mr_cmp(const void *a, const void *b);
  241 static int om_cmp(const void *a, const void *b);
  242 static int
  243 mr_cmp(const void *a, const void *b)
  244 {
  245         const struct ofw_mem_region *mra;
  246         const struct ofw_mem_region *mrb;
  247 
  248         mra = a;
  249         mrb = b;
  250         if (mra->mr_start < mrb->mr_start)
  251                 return (-1);
  252         else if (mra->mr_start > mrb->mr_start)
  253                 return (1);
  254         else
  255                 return (0);
  256 }
  257 static int
  258 om_cmp(const void *a, const void *b)
  259 {
  260         const struct ofw_map *oma;
  261         const struct ofw_map *omb;
  262 
  263         oma = a;
  264         omb = b;
  265         if (oma->om_start < omb->om_start)
  266                 return (-1);
  267         else if (oma->om_start > omb->om_start)
  268                 return (1);
  269         else
  270                 return (0);
  271 }
  272 
  273 /*
  274  * Bootstrap the system enough to run with virtual memory.
  275  */
  276 void
  277 pmap_bootstrap(vm_offset_t ekva)
  278 {
  279         struct pmap *pm;
  280         struct tte *tp;
  281         vm_offset_t off;
  282         vm_offset_t va;
  283         vm_paddr_t pa;
  284         vm_size_t physsz;
  285         vm_size_t virtsz;
  286         ihandle_t pmem;
  287         ihandle_t vmem;
  288         int sz;
  289         int i;
  290         int j;
  291 
  292         /*
  293          * Find out what physical memory is available from the prom and
  294          * initialize the phys_avail array.  This must be done before
  295          * pmap_bootstrap_alloc is called.
  296          */
  297         if ((pmem = OF_finddevice("/memory")) == -1)
  298                 panic("pmap_bootstrap: finddevice /memory");
  299         if ((sz = OF_getproplen(pmem, "available")) == -1)
  300                 panic("pmap_bootstrap: getproplen /memory/available");
  301         if (sizeof(phys_avail) < sz)
  302                 panic("pmap_bootstrap: phys_avail too small");
  303         if (sizeof(mra) < sz)
  304                 panic("pmap_bootstrap: mra too small");
  305         bzero(mra, sz);
  306         if (OF_getprop(pmem, "available", mra, sz) == -1)
  307                 panic("pmap_bootstrap: getprop /memory/available");
  308         sz /= sizeof(*mra);
  309         CTR0(KTR_PMAP, "pmap_bootstrap: physical memory");
  310         qsort(mra, sz, sizeof (*mra), mr_cmp);
  311         physsz = 0;
  312         getenv_quad("hw.physmem", &physmem);
  313         physmem = btoc(physmem);
  314         for (i = 0, j = 0; i < sz; i++, j += 2) {
  315                 CTR2(KTR_PMAP, "start=%#lx size=%#lx", mra[i].mr_start,
  316                     mra[i].mr_size);
  317                 if (physmem != 0 && btoc(physsz + mra[i].mr_size) >= physmem) {
  318                         if (btoc(physsz) < physmem) {
  319                                 phys_avail[j] = mra[i].mr_start;
  320                                 phys_avail[j + 1] = mra[i].mr_start +
  321                                     (ctob(physmem) - physsz);
  322                                 physsz = ctob(physmem);
  323                         }
  324                         break;
  325                 }
  326                 phys_avail[j] = mra[i].mr_start;
  327                 phys_avail[j + 1] = mra[i].mr_start + mra[i].mr_size;
  328                 physsz += mra[i].mr_size;
  329         }
  330         physmem = btoc(physsz);
  331 
  332         /*
  333          * Calculate the size of kernel virtual memory, and the size and mask
  334          * for the kernel tsb.
  335          */
  336         virtsz = roundup(physsz, PAGE_SIZE_4M << (PAGE_SHIFT - TTE_SHIFT));
  337         vm_max_kernel_address = VM_MIN_KERNEL_ADDRESS + virtsz;
  338         tsb_kernel_size = virtsz >> (PAGE_SHIFT - TTE_SHIFT);
  339         tsb_kernel_mask = (tsb_kernel_size >> TTE_SHIFT) - 1;
  340 
  341         /*
  342          * Allocate the kernel tsb and lock it in the tlb.
  343          */
  344         pa = pmap_bootstrap_alloc(tsb_kernel_size);
  345         if (pa & PAGE_MASK_4M)
  346                 panic("pmap_bootstrap: tsb unaligned\n");
  347         tsb_kernel_phys = pa;
  348         tsb_kernel = (struct tte *)(VM_MIN_KERNEL_ADDRESS - tsb_kernel_size);
  349         pmap_map_tsb();
  350         bzero(tsb_kernel, tsb_kernel_size);
  351 
  352         /*
  353          * Allocate and map the message buffer.
  354          */
  355         msgbuf_phys = pmap_bootstrap_alloc(MSGBUF_SIZE);
  356         msgbufp = (struct msgbuf *)TLB_PHYS_TO_DIRECT(msgbuf_phys);
  357 
  358         /*
  359          * Patch the virtual address and the tsb mask into the trap table.
  360          */
  361 
  362 #define SETHI(rd, imm22) \
  363         (EIF_OP(IOP_FORM2) | EIF_F2_RD(rd) | EIF_F2_OP2(INS0_SETHI) | \
  364             EIF_IMM((imm22) >> 10, 22))
  365 #define OR_R_I_R(rd, imm13, rs1) \
  366         (EIF_OP(IOP_MISC) | EIF_F3_RD(rd) | EIF_F3_OP3(INS2_OR) | \
  367             EIF_F3_RS1(rs1) | EIF_F3_I(1) | EIF_IMM(imm13, 13))
  368 
  369 #define PATCH(addr) do { \
  370         if (addr[0] != SETHI(IF_F2_RD(addr[0]), 0x0) || \
  371             addr[1] != OR_R_I_R(IF_F3_RD(addr[1]), 0x0, IF_F3_RS1(addr[1])) || \
  372             addr[2] != SETHI(IF_F2_RD(addr[2]), 0x0)) \
  373                 panic("pmap_boostrap: patched instructions have changed"); \
  374         addr[0] |= EIF_IMM((tsb_kernel_mask) >> 10, 22); \
  375         addr[1] |= EIF_IMM(tsb_kernel_mask, 10); \
  376         addr[2] |= EIF_IMM(((vm_offset_t)tsb_kernel) >> 10, 22); \
  377         flush(addr); \
  378         flush(addr + 1); \
  379         flush(addr + 2); \
  380 } while (0)
  381 
  382         PATCH(tl1_immu_miss_patch_1);
  383         PATCH(tl1_immu_miss_patch_2);
  384         PATCH(tl1_dmmu_miss_patch_1);
  385         PATCH(tl1_dmmu_miss_patch_2);
  386         PATCH(tl1_dmmu_prot_patch_1);
  387         PATCH(tl1_dmmu_prot_patch_2);
  388         
  389         /*
  390          * Enter fake 8k pages for the 4MB kernel pages, so that
  391          * pmap_kextract() will work for them.
  392          */
  393         for (i = 0; i < kernel_tlb_slots; i++) {
  394                 pa = kernel_tlbs[i].te_pa;
  395                 va = kernel_tlbs[i].te_va;
  396                 for (off = 0; off < PAGE_SIZE_4M; off += PAGE_SIZE) {
  397                         tp = tsb_kvtotte(va + off);
  398                         tp->tte_vpn = TV_VPN(va + off, TS_8K);
  399                         tp->tte_data = TD_V | TD_8K | TD_PA(pa + off) |
  400                             TD_REF | TD_SW | TD_CP | TD_CV | TD_P | TD_W;
  401                 }
  402         }
  403 
  404         /*
  405          * Set the start and end of kva.  The kernel is loaded at the first
  406          * available 4 meg super page, so round up to the end of the page.
  407          */
  408         virtual_avail = roundup2(ekva, PAGE_SIZE_4M);
  409         virtual_end = vm_max_kernel_address;
  410         kernel_vm_end = vm_max_kernel_address;
  411 
  412         /*
  413          * Allocate kva space for temporary mappings.
  414          */
  415         pmap_idle_map = virtual_avail;
  416         virtual_avail += PAGE_SIZE * DCACHE_COLORS;
  417         pmap_temp_map_1 = virtual_avail;
  418         virtual_avail += PAGE_SIZE * DCACHE_COLORS;
  419         pmap_temp_map_2 = virtual_avail;
  420         virtual_avail += PAGE_SIZE * DCACHE_COLORS;
  421 
  422         /*
  423          * Allocate a kernel stack with guard page for thread0 and map it into
  424          * the kernel tsb.  We must ensure that the virtual address is coloured
  425          * properly, since we're allocating from phys_avail so the memory won't
  426          * have an associated vm_page_t.
  427          */
  428         pa = pmap_bootstrap_alloc(roundup(KSTACK_PAGES, DCACHE_COLORS) *
  429             PAGE_SIZE);
  430         kstack0_phys = pa;
  431         virtual_avail += roundup(KSTACK_GUARD_PAGES, DCACHE_COLORS) *
  432             PAGE_SIZE;
  433         kstack0 = virtual_avail;
  434         virtual_avail += roundup(KSTACK_PAGES, DCACHE_COLORS) * PAGE_SIZE;
  435         KASSERT(DCACHE_COLOR(kstack0) == DCACHE_COLOR(kstack0_phys),
  436             ("pmap_bootstrap: kstack0 miscoloured"));
  437         for (i = 0; i < KSTACK_PAGES; i++) {
  438                 pa = kstack0_phys + i * PAGE_SIZE;
  439                 va = kstack0 + i * PAGE_SIZE;
  440                 tp = tsb_kvtotte(va);
  441                 tp->tte_vpn = TV_VPN(va, TS_8K);
  442                 tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_REF | TD_SW |
  443                     TD_CP | TD_CV | TD_P | TD_W;
  444         }
  445 
  446         /*
  447          * Calculate the last available physical address.
  448          */
  449         for (i = 0; phys_avail[i + 2] != 0; i += 2)
  450                 ;
  451         Maxmem = sparc64_btop(phys_avail[i + 1]);
  452 
  453         /*
  454          * Add the prom mappings to the kernel tsb.
  455          */
  456         if ((vmem = OF_finddevice("/virtual-memory")) == -1)
  457                 panic("pmap_bootstrap: finddevice /virtual-memory");
  458         if ((sz = OF_getproplen(vmem, "translations")) == -1)
  459                 panic("pmap_bootstrap: getproplen translations");
  460         if (sizeof(translations) < sz)
  461                 panic("pmap_bootstrap: translations too small");
  462         bzero(translations, sz);
  463         if (OF_getprop(vmem, "translations", translations, sz) == -1)
  464                 panic("pmap_bootstrap: getprop /virtual-memory/translations");
  465         sz /= sizeof(*translations);
  466         translations_size = sz;
  467         CTR0(KTR_PMAP, "pmap_bootstrap: translations");
  468         qsort(translations, sz, sizeof (*translations), om_cmp);
  469         for (i = 0; i < sz; i++) {
  470                 CTR3(KTR_PMAP,
  471                     "translation: start=%#lx size=%#lx tte=%#lx",
  472                     translations[i].om_start, translations[i].om_size,
  473                     translations[i].om_tte);
  474                 if (translations[i].om_start < VM_MIN_PROM_ADDRESS ||
  475                     translations[i].om_start > VM_MAX_PROM_ADDRESS)
  476                         continue;
  477                 for (off = 0; off < translations[i].om_size;
  478                     off += PAGE_SIZE) {
  479                         va = translations[i].om_start + off;
  480                         tp = tsb_kvtotte(va);
  481                         tp->tte_vpn = TV_VPN(va, TS_8K);
  482                         tp->tte_data =
  483                             ((translations[i].om_tte &
  484                               ~(TD_SOFT_MASK << TD_SOFT_SHIFT)) | TD_EXEC) +
  485                             off;
  486                 }
  487         }
  488 
  489         /*
  490          * Get the available physical memory ranges from /memory/reg. These
  491          * are only used for kernel dumps, but it may not be wise to do prom
  492          * calls in that situation.
  493          */
  494         if ((sz = OF_getproplen(pmem, "reg")) == -1)
  495                 panic("pmap_bootstrap: getproplen /memory/reg");
  496         if (sizeof(sparc64_memreg) < sz)
  497                 panic("pmap_bootstrap: sparc64_memreg too small");
  498         if (OF_getprop(pmem, "reg", sparc64_memreg, sz) == -1)
  499                 panic("pmap_bootstrap: getprop /memory/reg");
  500         sparc64_nmemreg = sz / sizeof(*sparc64_memreg);
  501 
  502         /*
  503          * Initialize the kernel pmap (which is statically allocated).
  504          * NOTE: PMAP_LOCK_INIT() is needed as part of the initialization
  505          * but sparc64 start up is not ready to initialize mutexes yet.
  506          * It is called in machdep.c.
  507          */
  508         pm = kernel_pmap;
  509         for (i = 0; i < MAXCPU; i++)
  510                 pm->pm_context[i] = TLB_CTX_KERNEL;
  511         pm->pm_active = ~0;
  512 
  513         /* XXX flush all non-locked tlb entries */
  514 }
  515 
  516 void
  517 pmap_map_tsb(void)
  518 {
  519         vm_offset_t va;
  520         vm_paddr_t pa;
  521         u_long data;
  522         u_long s;
  523         int i;
  524 
  525         s = intr_disable();
  526 
  527         /*
  528          * Map the 4mb tsb pages.
  529          */
  530         for (i = 0; i < tsb_kernel_size; i += PAGE_SIZE_4M) {
  531                 va = (vm_offset_t)tsb_kernel + i;
  532                 pa = tsb_kernel_phys + i;
  533                 data = TD_V | TD_4M | TD_PA(pa) | TD_L | TD_CP | TD_CV |
  534                     TD_P | TD_W;
  535                 /* XXX - cheetah */
  536                 stxa(AA_DMMU_TAR, ASI_DMMU, TLB_TAR_VA(va) |
  537                     TLB_TAR_CTX(TLB_CTX_KERNEL));
  538                 stxa_sync(0, ASI_DTLB_DATA_IN_REG, data);
  539         }
  540 
  541         /*
  542          * Set the secondary context to be the kernel context (needed for
  543          * fp block operations in the kernel and the cache code).
  544          */
  545         stxa(AA_DMMU_SCXR, ASI_DMMU, TLB_CTX_KERNEL);
  546         membar(Sync);
  547 
  548         intr_restore(s);
  549 }
  550 
  551 /*
  552  * Allocate a physical page of memory directly from the phys_avail map.
  553  * Can only be called from pmap_bootstrap before avail start and end are
  554  * calculated.
  555  */
  556 static vm_paddr_t
  557 pmap_bootstrap_alloc(vm_size_t size)
  558 {
  559         vm_paddr_t pa;
  560         int i;
  561 
  562         size = round_page(size);
  563         for (i = 0; phys_avail[i + 1] != 0; i += 2) {
  564                 if (phys_avail[i + 1] - phys_avail[i] < size)
  565                         continue;
  566                 pa = phys_avail[i];
  567                 phys_avail[i] += size;
  568                 return (pa);
  569         }
  570         panic("pmap_bootstrap_alloc");
  571 }
  572 
  573 /*
  574  * Initialize a vm_page's machine-dependent fields.
  575  */
  576 void
  577 pmap_page_init(vm_page_t m)
  578 {
  579 
  580         TAILQ_INIT(&m->md.tte_list);
  581         m->md.color = DCACHE_COLOR(VM_PAGE_TO_PHYS(m));
  582         m->md.flags = 0;
  583         m->md.pmap = NULL;
  584 }
  585 
  586 /*
  587  * Initialize the pmap module.
  588  */
  589 void
  590 pmap_init(void)
  591 {
  592         vm_offset_t addr;
  593         vm_size_t size;
  594         int result;
  595         int i;
  596 
  597         for (i = 0; i < translations_size; i++) {
  598                 addr = translations[i].om_start;
  599                 size = translations[i].om_size;
  600                 if (addr < VM_MIN_PROM_ADDRESS || addr > VM_MAX_PROM_ADDRESS)
  601                         continue;
  602                 result = vm_map_find(kernel_map, NULL, 0, &addr, size, FALSE,
  603                     VM_PROT_ALL, VM_PROT_ALL, 0);
  604                 if (result != KERN_SUCCESS || addr != translations[i].om_start)
  605                         panic("pmap_init: vm_map_find");
  606         }
  607 }
  608 
  609 /*
  610  * Extract the physical page address associated with the given
  611  * map/virtual_address pair.
  612  */
  613 vm_paddr_t
  614 pmap_extract(pmap_t pm, vm_offset_t va)
  615 {
  616         struct tte *tp;
  617         vm_paddr_t pa;
  618 
  619         if (pm == kernel_pmap)
  620                 return (pmap_kextract(va));
  621         PMAP_LOCK(pm);
  622         tp = tsb_tte_lookup(pm, va);
  623         if (tp == NULL)
  624                 pa = 0;
  625         else
  626                 pa = TTE_GET_PA(tp) | (va & TTE_GET_PAGE_MASK(tp));
  627         PMAP_UNLOCK(pm);
  628         return (pa);
  629 }
  630 
  631 /*
  632  * Atomically extract and hold the physical page with the given
  633  * pmap and virtual address pair if that mapping permits the given
  634  * protection.
  635  */
  636 vm_page_t
  637 pmap_extract_and_hold(pmap_t pm, vm_offset_t va, vm_prot_t prot)
  638 {
  639         struct tte *tp;
  640         vm_page_t m;
  641 
  642         m = NULL;
  643         vm_page_lock_queues();
  644         if (pm == kernel_pmap) {
  645                 if (va >= VM_MIN_DIRECT_ADDRESS) {
  646                         tp = NULL;
  647                         m = PHYS_TO_VM_PAGE(TLB_DIRECT_TO_PHYS(va));
  648                         vm_page_hold(m);
  649                 } else {
  650                         tp = tsb_kvtotte(va);
  651                         if ((tp->tte_data & TD_V) == 0)
  652                                 tp = NULL;
  653                 }
  654         } else {
  655                 PMAP_LOCK(pm);
  656                 tp = tsb_tte_lookup(pm, va);
  657         }
  658         if (tp != NULL && ((tp->tte_data & TD_SW) ||
  659             (prot & VM_PROT_WRITE) == 0)) {
  660                 m = PHYS_TO_VM_PAGE(TTE_GET_PA(tp));
  661                 vm_page_hold(m);
  662         }
  663         vm_page_unlock_queues();
  664         if (pm != kernel_pmap)
  665                 PMAP_UNLOCK(pm);
  666         return (m);
  667 }
  668 
  669 /*
  670  * Extract the physical page address associated with the given kernel virtual
  671  * address.
  672  */
  673 vm_paddr_t
  674 pmap_kextract(vm_offset_t va)
  675 {
  676         struct tte *tp;
  677 
  678         if (va >= VM_MIN_DIRECT_ADDRESS)
  679                 return (TLB_DIRECT_TO_PHYS(va));
  680         tp = tsb_kvtotte(va);
  681         if ((tp->tte_data & TD_V) == 0)
  682                 return (0);
  683         return (TTE_GET_PA(tp) | (va & TTE_GET_PAGE_MASK(tp)));
  684 }
  685 
  686 int
  687 pmap_cache_enter(vm_page_t m, vm_offset_t va)
  688 {
  689         struct tte *tp;
  690         int color;
  691 
  692         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
  693         KASSERT((m->flags & PG_FICTITIOUS) == 0,
  694             ("pmap_cache_enter: fake page"));
  695         PMAP_STATS_INC(pmap_ncache_enter);
  696 
  697         /*
  698          * Find the color for this virtual address and note the added mapping.
  699          */
  700         color = DCACHE_COLOR(va);
  701         m->md.colors[color]++;
  702 
  703         /*
  704          * If all existing mappings have the same color, the mapping is
  705          * cacheable.
  706          */
  707         if (m->md.color == color) {
  708                 KASSERT(m->md.colors[DCACHE_OTHER_COLOR(color)] == 0,
  709                     ("pmap_cache_enter: cacheable, mappings of other color"));
  710                 if (m->md.color == DCACHE_COLOR(VM_PAGE_TO_PHYS(m)))
  711                         PMAP_STATS_INC(pmap_ncache_enter_c);
  712                 else
  713                         PMAP_STATS_INC(pmap_ncache_enter_oc);
  714                 return (1);
  715         }
  716 
  717         /*
  718          * If there are no mappings of the other color, and the page still has
  719          * the wrong color, this must be a new mapping.  Change the color to
  720          * match the new mapping, which is cacheable.  We must flush the page
  721          * from the cache now.
  722          */
  723         if (m->md.colors[DCACHE_OTHER_COLOR(color)] == 0) {
  724                 KASSERT(m->md.colors[color] == 1,
  725                     ("pmap_cache_enter: changing color, not new mapping"));
  726                 dcache_page_inval(VM_PAGE_TO_PHYS(m));
  727                 m->md.color = color;
  728                 if (m->md.color == DCACHE_COLOR(VM_PAGE_TO_PHYS(m)))
  729                         PMAP_STATS_INC(pmap_ncache_enter_cc);
  730                 else
  731                         PMAP_STATS_INC(pmap_ncache_enter_coc);
  732                 return (1);
  733         }
  734 
  735         /*
  736          * If the mapping is already non-cacheable, just return.
  737          */     
  738         if (m->md.color == -1) {
  739                 PMAP_STATS_INC(pmap_ncache_enter_nc);
  740                 return (0);
  741         }
  742 
  743         PMAP_STATS_INC(pmap_ncache_enter_cnc);
  744 
  745         /*
  746          * Mark all mappings as uncacheable, flush any lines with the other
  747          * color out of the dcache, and set the color to none (-1).
  748          */
  749         TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
  750                 atomic_clear_long(&tp->tte_data, TD_CV);
  751                 tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
  752         }
  753         dcache_page_inval(VM_PAGE_TO_PHYS(m));
  754         m->md.color = -1;
  755         return (0);
  756 }
  757 
  758 void
  759 pmap_cache_remove(vm_page_t m, vm_offset_t va)
  760 {
  761         struct tte *tp;
  762         int color;
  763 
  764         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
  765         CTR3(KTR_PMAP, "pmap_cache_remove: m=%p va=%#lx c=%d", m, va,
  766             m->md.colors[DCACHE_COLOR(va)]);
  767         KASSERT((m->flags & PG_FICTITIOUS) == 0,
  768             ("pmap_cache_remove: fake page"));
  769         KASSERT(m->md.colors[DCACHE_COLOR(va)] > 0,
  770             ("pmap_cache_remove: no mappings %d <= 0",
  771             m->md.colors[DCACHE_COLOR(va)]));
  772         PMAP_STATS_INC(pmap_ncache_remove);
  773 
  774         /*
  775          * Find the color for this virtual address and note the removal of
  776          * the mapping.
  777          */
  778         color = DCACHE_COLOR(va);
  779         m->md.colors[color]--;
  780 
  781         /*
  782          * If the page is cacheable, just return and keep the same color, even
  783          * if there are no longer any mappings.
  784          */
  785         if (m->md.color != -1) {
  786                 if (m->md.color == DCACHE_COLOR(VM_PAGE_TO_PHYS(m)))
  787                         PMAP_STATS_INC(pmap_ncache_remove_c);
  788                 else
  789                         PMAP_STATS_INC(pmap_ncache_remove_oc);
  790                 return;
  791         }
  792 
  793         KASSERT(m->md.colors[DCACHE_OTHER_COLOR(color)] != 0,
  794             ("pmap_cache_remove: uncacheable, no mappings of other color"));
  795 
  796         /*
  797          * If the page is not cacheable (color is -1), and the number of
  798          * mappings for this color is not zero, just return.  There are
  799          * mappings of the other color still, so remain non-cacheable.
  800          */
  801         if (m->md.colors[color] != 0) {
  802                 PMAP_STATS_INC(pmap_ncache_remove_nc);
  803                 return;
  804         }
  805 
  806         /*
  807          * The number of mappings for this color is now zero.  Recache the
  808          * other colored mappings, and change the page color to the other
  809          * color.  There should be no lines in the data cache for this page,
  810          * so flushing should not be needed.
  811          */
  812         TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
  813                 atomic_set_long(&tp->tte_data, TD_CV);
  814                 tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
  815         }
  816         m->md.color = DCACHE_OTHER_COLOR(color);
  817 
  818         if (m->md.color == DCACHE_COLOR(VM_PAGE_TO_PHYS(m)))
  819                 PMAP_STATS_INC(pmap_ncache_remove_cc);
  820         else
  821                 PMAP_STATS_INC(pmap_ncache_remove_coc);
  822 }
  823 
  824 /*
  825  * Map a wired page into kernel virtual address space.
  826  */
  827 void
  828 pmap_kenter(vm_offset_t va, vm_page_t m)
  829 {
  830         vm_offset_t ova;
  831         struct tte *tp;
  832         vm_page_t om;
  833         u_long data;
  834 
  835         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
  836         PMAP_STATS_INC(pmap_nkenter);
  837         tp = tsb_kvtotte(va);
  838         CTR4(KTR_PMAP, "pmap_kenter: va=%#lx pa=%#lx tp=%p data=%#lx",
  839             va, VM_PAGE_TO_PHYS(m), tp, tp->tte_data);
  840         if (DCACHE_COLOR(VM_PAGE_TO_PHYS(m)) != DCACHE_COLOR(va)) {
  841                 CTR6(KTR_CT2,
  842         "pmap_kenter: off colour va=%#lx pa=%#lx o=%p oc=%#lx ot=%d pi=%#lx",
  843                     va, VM_PAGE_TO_PHYS(m), m->object,
  844                     m->object ? m->object->pg_color : -1,
  845                     m->object ? m->object->type : -1,
  846                     m->pindex);
  847                 PMAP_STATS_INC(pmap_nkenter_oc);
  848         }
  849         if ((tp->tte_data & TD_V) != 0) {
  850                 om = PHYS_TO_VM_PAGE(TTE_GET_PA(tp));
  851                 ova = TTE_GET_VA(tp);
  852                 if (m == om && va == ova) {
  853                         PMAP_STATS_INC(pmap_nkenter_stupid);
  854                         return;
  855                 }
  856                 TAILQ_REMOVE(&om->md.tte_list, tp, tte_link);
  857                 pmap_cache_remove(om, ova);
  858                 if (va != ova)
  859                         tlb_page_demap(kernel_pmap, ova);
  860         }
  861         data = TD_V | TD_8K | VM_PAGE_TO_PHYS(m) | TD_REF | TD_SW | TD_CP |
  862             TD_P | TD_W;
  863         if (pmap_cache_enter(m, va) != 0)
  864                 data |= TD_CV;
  865         tp->tte_vpn = TV_VPN(va, TS_8K);
  866         tp->tte_data = data;
  867         TAILQ_INSERT_TAIL(&m->md.tte_list, tp, tte_link);
  868 }
  869 
  870 /*
  871  * Map a wired page into kernel virtual address space. This additionally
  872  * takes a flag argument wich is or'ed to the TTE data. This is used by
  873  * bus_space_map().
  874  * NOTE: if the mapping is non-cacheable, it's the caller's responsibility
  875  * to flush entries that might still be in the cache, if applicable.
  876  */
  877 void
  878 pmap_kenter_flags(vm_offset_t va, vm_paddr_t pa, u_long flags)
  879 {
  880         struct tte *tp;
  881 
  882         tp = tsb_kvtotte(va);
  883         CTR4(KTR_PMAP, "pmap_kenter_flags: va=%#lx pa=%#lx tp=%p data=%#lx",
  884             va, pa, tp, tp->tte_data);
  885         tp->tte_vpn = TV_VPN(va, TS_8K);
  886         tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_REF | TD_P | flags;
  887 }
  888 
  889 /*
  890  * Remove a wired page from kernel virtual address space.
  891  */
  892 void
  893 pmap_kremove(vm_offset_t va)
  894 {
  895         struct tte *tp;
  896         vm_page_t m;
  897 
  898         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
  899         PMAP_STATS_INC(pmap_nkremove);
  900         tp = tsb_kvtotte(va);
  901         CTR3(KTR_PMAP, "pmap_kremove: va=%#lx tp=%p data=%#lx", va, tp,
  902             tp->tte_data);
  903         if ((tp->tte_data & TD_V) == 0)
  904                 return;
  905         m = PHYS_TO_VM_PAGE(TTE_GET_PA(tp));
  906         TAILQ_REMOVE(&m->md.tte_list, tp, tte_link);
  907         pmap_cache_remove(m, va);
  908         TTE_ZERO(tp);
  909 }
  910 
  911 /*
  912  * Inverse of pmap_kenter_flags, used by bus_space_unmap().
  913  */
  914 void
  915 pmap_kremove_flags(vm_offset_t va)
  916 {
  917         struct tte *tp;
  918 
  919         tp = tsb_kvtotte(va);
  920         CTR3(KTR_PMAP, "pmap_kremove: va=%#lx tp=%p data=%#lx", va, tp,
  921             tp->tte_data);
  922         TTE_ZERO(tp);
  923 }
  924 
  925 /*
  926  * Map a range of physical addresses into kernel virtual address space.
  927  *
  928  * The value passed in *virt is a suggested virtual address for the mapping.
  929  * Architectures which can support a direct-mapped physical to virtual region
  930  * can return the appropriate address within that region, leaving '*virt'
  931  * unchanged.
  932  */
  933 vm_offset_t
  934 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
  935 {
  936 
  937         return (TLB_PHYS_TO_DIRECT(start));
  938 }
  939 
  940 /*
  941  * Map a list of wired pages into kernel virtual address space.  This is
  942  * intended for temporary mappings which do not need page modification or
  943  * references recorded.  Existing mappings in the region are overwritten.
  944  */
  945 void
  946 pmap_qenter(vm_offset_t sva, vm_page_t *m, int count)
  947 {
  948         vm_offset_t va;
  949         int locked;
  950 
  951         PMAP_STATS_INC(pmap_nqenter);
  952         va = sva;
  953         if (!(locked = mtx_owned(&vm_page_queue_mtx)))
  954                 vm_page_lock_queues();
  955         while (count-- > 0) {
  956                 pmap_kenter(va, *m);
  957                 va += PAGE_SIZE;
  958                 m++;
  959         }
  960         if (!locked)
  961                 vm_page_unlock_queues();
  962         tlb_range_demap(kernel_pmap, sva, va);
  963 }
  964 
  965 /*
  966  * Remove page mappings from kernel virtual address space.  Intended for
  967  * temporary mappings entered by pmap_qenter.
  968  */
  969 void
  970 pmap_qremove(vm_offset_t sva, int count)
  971 {
  972         vm_offset_t va;
  973         int locked;
  974 
  975         PMAP_STATS_INC(pmap_nqremove);
  976         va = sva;
  977         if (!(locked = mtx_owned(&vm_page_queue_mtx)))
  978                 vm_page_lock_queues();
  979         while (count-- > 0) {
  980                 pmap_kremove(va);
  981                 va += PAGE_SIZE;
  982         }
  983         if (!locked)
  984                 vm_page_unlock_queues();
  985         tlb_range_demap(kernel_pmap, sva, va);
  986 }
  987 
  988 /*
  989  * Initialize the pmap associated with process 0.
  990  */
  991 void
  992 pmap_pinit0(pmap_t pm)
  993 {
  994         int i;
  995 
  996         PMAP_LOCK_INIT(pm);
  997         for (i = 0; i < MAXCPU; i++)
  998                 pm->pm_context[i] = 0;
  999         pm->pm_active = 0;
 1000         pm->pm_tsb = NULL;
 1001         pm->pm_tsb_obj = NULL;
 1002         bzero(&pm->pm_stats, sizeof(pm->pm_stats));
 1003 }
 1004 
 1005 /*
 1006  * Initialize a preallocated and zeroed pmap structure, such as one in a
 1007  * vmspace structure.
 1008  */
 1009 int
 1010 pmap_pinit(pmap_t pm)
 1011 {
 1012         vm_page_t ma[TSB_PAGES];
 1013         vm_page_t m;
 1014         int i;
 1015 
 1016         PMAP_LOCK_INIT(pm);
 1017 
 1018         /*
 1019          * Allocate kva space for the tsb.
 1020          */
 1021         if (pm->pm_tsb == NULL) {
 1022                 pm->pm_tsb = (struct tte *)kmem_alloc_nofault(kernel_map,
 1023                     TSB_BSIZE);
 1024                 if (pm->pm_tsb == NULL) {
 1025                         PMAP_LOCK_DESTROY(pm);
 1026                         return (0);
 1027                 }
 1028         }
 1029 
 1030         /*
 1031          * Allocate an object for it.
 1032          */
 1033         if (pm->pm_tsb_obj == NULL)
 1034                 pm->pm_tsb_obj = vm_object_allocate(OBJT_DEFAULT, TSB_PAGES);
 1035 
 1036         VM_OBJECT_LOCK(pm->pm_tsb_obj);
 1037         for (i = 0; i < TSB_PAGES; i++) {
 1038                 m = vm_page_grab(pm->pm_tsb_obj, i, VM_ALLOC_NOBUSY |
 1039                     VM_ALLOC_RETRY | VM_ALLOC_WIRED | VM_ALLOC_ZERO);
 1040                 m->valid = VM_PAGE_BITS_ALL;
 1041                 m->md.pmap = pm;
 1042                 ma[i] = m;
 1043         }
 1044         VM_OBJECT_UNLOCK(pm->pm_tsb_obj);
 1045         pmap_qenter((vm_offset_t)pm->pm_tsb, ma, TSB_PAGES);
 1046 
 1047         for (i = 0; i < MAXCPU; i++)
 1048                 pm->pm_context[i] = -1;
 1049         pm->pm_active = 0;
 1050         bzero(&pm->pm_stats, sizeof(pm->pm_stats));
 1051         return (1);
 1052 }
 1053 
 1054 /*
 1055  * Release any resources held by the given physical map.
 1056  * Called when a pmap initialized by pmap_pinit is being released.
 1057  * Should only be called if the map contains no valid mappings.
 1058  */
 1059 void
 1060 pmap_release(pmap_t pm)
 1061 {
 1062         vm_object_t obj;
 1063         vm_page_t m;
 1064         struct pcpu *pc;
 1065 
 1066         CTR2(KTR_PMAP, "pmap_release: ctx=%#x tsb=%p",
 1067             pm->pm_context[PCPU_GET(cpuid)], pm->pm_tsb);
 1068         KASSERT(pmap_resident_count(pm) == 0,
 1069             ("pmap_release: resident pages %ld != 0",
 1070             pmap_resident_count(pm)));
 1071 
 1072         /*
 1073          * After the pmap was freed, it might be reallocated to a new process.
 1074          * When switching, this might lead us to wrongly assume that we need
 1075          * not switch contexts because old and new pmap pointer are equal.
 1076          * Therefore, make sure that this pmap is not referenced by any PCPU
 1077          * pointer any more. This could happen in two cases:
 1078          * - A process that referenced the pmap is currently exiting on a CPU.
 1079          *   However, it is guaranteed to not switch in any more after setting
 1080          *   its state to PRS_ZOMBIE.
 1081          * - A process that referenced this pmap ran on a CPU, but we switched
 1082          *   to a kernel thread, leaving the pmap pointer unchanged.
 1083          */
 1084         mtx_lock_spin(&sched_lock);
 1085         SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
 1086                 if (pc->pc_pmap == pm)
 1087                         pc->pc_pmap = NULL;
 1088         }
 1089         mtx_unlock_spin(&sched_lock);
 1090 
 1091         obj = pm->pm_tsb_obj;
 1092         VM_OBJECT_LOCK(obj);
 1093         KASSERT(obj->ref_count == 1, ("pmap_release: tsbobj ref count != 1"));
 1094         while (!TAILQ_EMPTY(&obj->memq)) {
 1095                 m = TAILQ_FIRST(&obj->memq);
 1096                 vm_page_lock_queues();
 1097                 if (vm_page_sleep_if_busy(m, FALSE, "pmaprl"))
 1098                         continue;
 1099                 KASSERT(m->hold_count == 0,
 1100                     ("pmap_release: freeing held tsb page"));
 1101                 m->md.pmap = NULL;
 1102                 m->wire_count--;
 1103                 atomic_subtract_int(&cnt.v_wire_count, 1);
 1104                 vm_page_free_zero(m);
 1105                 vm_page_unlock_queues();
 1106         }
 1107         VM_OBJECT_UNLOCK(obj);
 1108         pmap_qremove((vm_offset_t)pm->pm_tsb, TSB_PAGES);
 1109         PMAP_LOCK_DESTROY(pm);
 1110 }
 1111 
 1112 /*
 1113  * Grow the number of kernel page table entries.  Unneeded.
 1114  */
 1115 void
 1116 pmap_growkernel(vm_offset_t addr)
 1117 {
 1118 
 1119         panic("pmap_growkernel: can't grow kernel");
 1120 }
 1121 
 1122 int
 1123 pmap_remove_tte(struct pmap *pm, struct pmap *pm2, struct tte *tp,
 1124                 vm_offset_t va)
 1125 {
 1126         vm_page_t m;
 1127         u_long data;
 1128 
 1129         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1130         data = atomic_readandclear_long(&tp->tte_data);
 1131         if ((data & TD_FAKE) == 0) {
 1132                 m = PHYS_TO_VM_PAGE(TD_PA(data));
 1133                 TAILQ_REMOVE(&m->md.tte_list, tp, tte_link);
 1134                 if ((data & TD_WIRED) != 0)
 1135                         pm->pm_stats.wired_count--;
 1136                 if ((data & TD_PV) != 0) {
 1137                         if ((data & TD_W) != 0)
 1138                                 vm_page_dirty(m);
 1139                         if ((data & TD_REF) != 0)
 1140                                 vm_page_flag_set(m, PG_REFERENCED);
 1141                         if (TAILQ_EMPTY(&m->md.tte_list))
 1142                                 vm_page_flag_clear(m, PG_WRITEABLE);
 1143                         pm->pm_stats.resident_count--;
 1144                 }
 1145                 pmap_cache_remove(m, va);
 1146         }
 1147         TTE_ZERO(tp);
 1148         if (PMAP_REMOVE_DONE(pm))
 1149                 return (0);
 1150         return (1);
 1151 }
 1152 
 1153 /*
 1154  * Remove the given range of addresses from the specified map.
 1155  */
 1156 void
 1157 pmap_remove(pmap_t pm, vm_offset_t start, vm_offset_t end)
 1158 {
 1159         struct tte *tp;
 1160         vm_offset_t va;
 1161 
 1162         CTR3(KTR_PMAP, "pmap_remove: ctx=%#lx start=%#lx end=%#lx",
 1163             pm->pm_context[PCPU_GET(cpuid)], start, end);
 1164         if (PMAP_REMOVE_DONE(pm))
 1165                 return;
 1166         vm_page_lock_queues();
 1167         PMAP_LOCK(pm);
 1168         if (end - start > PMAP_TSB_THRESH) {
 1169                 tsb_foreach(pm, NULL, start, end, pmap_remove_tte);
 1170                 tlb_context_demap(pm);
 1171         } else {
 1172                 for (va = start; va < end; va += PAGE_SIZE) {
 1173                         if ((tp = tsb_tte_lookup(pm, va)) != NULL) {
 1174                                 if (!pmap_remove_tte(pm, NULL, tp, va))
 1175                                         break;
 1176                         }
 1177                 }
 1178                 tlb_range_demap(pm, start, end - 1);
 1179         }
 1180         PMAP_UNLOCK(pm);
 1181         vm_page_unlock_queues();
 1182 }
 1183 
 1184 void
 1185 pmap_remove_all(vm_page_t m)
 1186 {
 1187         struct pmap *pm;
 1188         struct tte *tpn;
 1189         struct tte *tp;
 1190         vm_offset_t va;
 1191 
 1192         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1193         for (tp = TAILQ_FIRST(&m->md.tte_list); tp != NULL; tp = tpn) {
 1194                 tpn = TAILQ_NEXT(tp, tte_link);
 1195                 if ((tp->tte_data & TD_PV) == 0)
 1196                         continue;
 1197                 pm = TTE_GET_PMAP(tp);
 1198                 va = TTE_GET_VA(tp);
 1199                 PMAP_LOCK(pm);
 1200                 if ((tp->tte_data & TD_WIRED) != 0)
 1201                         pm->pm_stats.wired_count--;
 1202                 if ((tp->tte_data & TD_REF) != 0)
 1203                         vm_page_flag_set(m, PG_REFERENCED);
 1204                 if ((tp->tte_data & TD_W) != 0)
 1205                         vm_page_dirty(m);
 1206                 tp->tte_data &= ~TD_V;
 1207                 tlb_page_demap(pm, va);
 1208                 TAILQ_REMOVE(&m->md.tte_list, tp, tte_link);
 1209                 pm->pm_stats.resident_count--;
 1210                 pmap_cache_remove(m, va);
 1211                 TTE_ZERO(tp);
 1212                 PMAP_UNLOCK(pm);
 1213         }
 1214         vm_page_flag_clear(m, PG_WRITEABLE);
 1215 }
 1216 
 1217 int
 1218 pmap_protect_tte(struct pmap *pm, struct pmap *pm2, struct tte *tp,
 1219                  vm_offset_t va)
 1220 {
 1221         u_long data;
 1222         vm_page_t m;
 1223 
 1224         data = atomic_clear_long(&tp->tte_data, TD_REF | TD_SW | TD_W);
 1225         if ((data & TD_PV) != 0) {
 1226                 m = PHYS_TO_VM_PAGE(TD_PA(data));
 1227                 if ((data & TD_REF) != 0)
 1228                         vm_page_flag_set(m, PG_REFERENCED);
 1229                 if ((data & TD_W) != 0)
 1230                         vm_page_dirty(m);
 1231         }
 1232         return (1);
 1233 }
 1234 
 1235 /*
 1236  * Set the physical protection on the specified range of this map as requested.
 1237  */
 1238 void
 1239 pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
 1240 {
 1241         vm_offset_t va;
 1242         struct tte *tp;
 1243 
 1244         CTR4(KTR_PMAP, "pmap_protect: ctx=%#lx sva=%#lx eva=%#lx prot=%#lx",
 1245             pm->pm_context[PCPU_GET(cpuid)], sva, eva, prot);
 1246 
 1247         if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
 1248                 pmap_remove(pm, sva, eva);
 1249                 return;
 1250         }
 1251 
 1252         if (prot & VM_PROT_WRITE)
 1253                 return;
 1254 
 1255         vm_page_lock_queues();
 1256         PMAP_LOCK(pm);
 1257         if (eva - sva > PMAP_TSB_THRESH) {
 1258                 tsb_foreach(pm, NULL, sva, eva, pmap_protect_tte);
 1259                 tlb_context_demap(pm);
 1260         } else {
 1261                 for (va = sva; va < eva; va += PAGE_SIZE) {
 1262                         if ((tp = tsb_tte_lookup(pm, va)) != NULL)
 1263                                 pmap_protect_tte(pm, NULL, tp, va);
 1264                 }
 1265                 tlb_range_demap(pm, sva, eva - 1);
 1266         }
 1267         PMAP_UNLOCK(pm);
 1268         vm_page_unlock_queues();
 1269 }
 1270 
 1271 /*
 1272  * Map the given physical page at the specified virtual address in the
 1273  * target pmap with the protection requested.  If specified the page
 1274  * will be wired down.
 1275  */
 1276 void
 1277 pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
 1278            boolean_t wired)
 1279 {
 1280 
 1281         vm_page_lock_queues();
 1282         PMAP_LOCK(pm);
 1283         pmap_enter_locked(pm, va, m, prot, wired);
 1284         vm_page_unlock_queues();
 1285         PMAP_UNLOCK(pm);
 1286 }
 1287 
 1288 /*
 1289  * Map the given physical page at the specified virtual address in the
 1290  * target pmap with the protection requested.  If specified the page
 1291  * will be wired down.
 1292  *
 1293  * The page queues and pmap must be locked.
 1294  */
 1295 static void
 1296 pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
 1297     boolean_t wired)
 1298 {
 1299         struct tte *tp;
 1300         vm_paddr_t pa;
 1301         u_long data;
 1302         int i;
 1303 
 1304         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1305         PMAP_LOCK_ASSERT(pm, MA_OWNED);
 1306         PMAP_STATS_INC(pmap_nenter);
 1307         pa = VM_PAGE_TO_PHYS(m);
 1308 
 1309         /*
 1310          * If this is a fake page from the device_pager, but it covers actual
 1311          * physical memory, convert to the real backing page.
 1312          */
 1313         if ((m->flags & PG_FICTITIOUS) != 0) {
 1314                 for (i = 0; phys_avail[i + 1] != 0; i += 2) {
 1315                         if (pa >= phys_avail[i] && pa <= phys_avail[i + 1]) {
 1316                                 m = PHYS_TO_VM_PAGE(pa);
 1317                                 break;
 1318                         }
 1319                 }
 1320         }
 1321 
 1322         CTR6(KTR_PMAP,
 1323             "pmap_enter: ctx=%p m=%p va=%#lx pa=%#lx prot=%#x wired=%d",
 1324             pm->pm_context[PCPU_GET(cpuid)], m, va, pa, prot, wired);
 1325 
 1326         /*
 1327          * If there is an existing mapping, and the physical address has not
 1328          * changed, must be protection or wiring change.
 1329          */
 1330         if ((tp = tsb_tte_lookup(pm, va)) != NULL && TTE_GET_PA(tp) == pa) {
 1331                 CTR0(KTR_PMAP, "pmap_enter: update");
 1332                 PMAP_STATS_INC(pmap_nenter_update);
 1333 
 1334                 /*
 1335                  * Wiring change, just update stats.
 1336                  */
 1337                 if (wired) {
 1338                         if ((tp->tte_data & TD_WIRED) == 0) {
 1339                                 tp->tte_data |= TD_WIRED;
 1340                                 pm->pm_stats.wired_count++;
 1341                         }
 1342                 } else {
 1343                         if ((tp->tte_data & TD_WIRED) != 0) {
 1344                                 tp->tte_data &= ~TD_WIRED;
 1345                                 pm->pm_stats.wired_count--;
 1346                         }
 1347                 }
 1348 
 1349                 /*
 1350                  * Save the old bits and clear the ones we're interested in.
 1351                  */
 1352                 data = tp->tte_data;
 1353                 tp->tte_data &= ~(TD_EXEC | TD_SW | TD_W);
 1354 
 1355                 /*
 1356                  * If we're turning off write permissions, sense modify status.
 1357                  */
 1358                 if ((prot & VM_PROT_WRITE) != 0) {
 1359                         tp->tte_data |= TD_SW;
 1360                         if (wired) {
 1361                                 tp->tte_data |= TD_W;
 1362                         }
 1363                         vm_page_flag_set(m, PG_WRITEABLE);
 1364                 } else if ((data & TD_W) != 0) {
 1365                         vm_page_dirty(m);
 1366                 }
 1367 
 1368                 /*
 1369                  * If we're turning on execute permissions, flush the icache.
 1370                  */
 1371                 if ((prot & VM_PROT_EXECUTE) != 0) {
 1372                         if ((data & TD_EXEC) == 0) {
 1373                                 icache_page_inval(pa);
 1374                         }
 1375                         tp->tte_data |= TD_EXEC;
 1376                 }
 1377 
 1378                 /*
 1379                  * Delete the old mapping.
 1380                  */
 1381                 tlb_page_demap(pm, TTE_GET_VA(tp));
 1382         } else {
 1383                 /*
 1384                  * If there is an existing mapping, but its for a different
 1385                  * phsyical address, delete the old mapping.
 1386                  */
 1387                 if (tp != NULL) {
 1388                         CTR0(KTR_PMAP, "pmap_enter: replace");
 1389                         PMAP_STATS_INC(pmap_nenter_replace);
 1390                         pmap_remove_tte(pm, NULL, tp, va);
 1391                         tlb_page_demap(pm, va);
 1392                 } else {
 1393                         CTR0(KTR_PMAP, "pmap_enter: new");
 1394                         PMAP_STATS_INC(pmap_nenter_new);
 1395                 }
 1396 
 1397                 /*
 1398                  * Now set up the data and install the new mapping.
 1399                  */
 1400                 data = TD_V | TD_8K | TD_PA(pa);
 1401                 if (pm == kernel_pmap)
 1402                         data |= TD_P;
 1403                 if ((prot & VM_PROT_WRITE) != 0) {
 1404                         data |= TD_SW;
 1405                         vm_page_flag_set(m, PG_WRITEABLE);
 1406                 }
 1407                 if (prot & VM_PROT_EXECUTE) {
 1408                         data |= TD_EXEC;
 1409                         icache_page_inval(pa);
 1410                 }
 1411 
 1412                 /*
 1413                  * If its wired update stats.  We also don't need reference or
 1414                  * modify tracking for wired mappings, so set the bits now.
 1415                  */
 1416                 if (wired) {
 1417                         pm->pm_stats.wired_count++;
 1418                         data |= TD_REF | TD_WIRED;
 1419                         if ((prot & VM_PROT_WRITE) != 0)
 1420                                 data |= TD_W;
 1421                 }
 1422 
 1423                 tsb_tte_enter(pm, m, va, TS_8K, data);
 1424         }
 1425 }
 1426 
 1427 /*
 1428  * Maps a sequence of resident pages belonging to the same object.
 1429  * The sequence begins with the given page m_start.  This page is
 1430  * mapped at the given virtual address start.  Each subsequent page is
 1431  * mapped at a virtual address that is offset from start by the same
 1432  * amount as the page is offset from m_start within the object.  The
 1433  * last page in the sequence is the page with the largest offset from
 1434  * m_start that can be mapped at a virtual address less than the given
 1435  * virtual address end.  Not every virtual page between start and end
 1436  * is mapped; only those for which a resident page exists with the
 1437  * corresponding offset from m_start are mapped.
 1438  */
 1439 void
 1440 pmap_enter_object(pmap_t pm, vm_offset_t start, vm_offset_t end,
 1441     vm_page_t m_start, vm_prot_t prot)
 1442 {
 1443         vm_page_t m;
 1444         vm_pindex_t diff, psize;
 1445 
 1446         psize = atop(end - start);
 1447         m = m_start;
 1448         PMAP_LOCK(pm);
 1449         while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
 1450                 pmap_enter_locked(pm, start + ptoa(diff), m, prot &
 1451                     (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
 1452                 m = TAILQ_NEXT(m, listq);
 1453         }
 1454         PMAP_UNLOCK(pm);
 1455 }
 1456 
 1457 void
 1458 pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot)
 1459 {
 1460 
 1461         PMAP_LOCK(pm);
 1462         pmap_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
 1463             FALSE);
 1464         PMAP_UNLOCK(pm);
 1465 }
 1466 
 1467 void
 1468 pmap_object_init_pt(pmap_t pm, vm_offset_t addr, vm_object_t object,
 1469                     vm_pindex_t pindex, vm_size_t size)
 1470 {
 1471 
 1472         VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
 1473         KASSERT(object->type == OBJT_DEVICE,
 1474             ("pmap_object_init_pt: non-device object"));
 1475 }
 1476 
 1477 /*
 1478  * Change the wiring attribute for a map/virtual-address pair.
 1479  * The mapping must already exist in the pmap.
 1480  */
 1481 void
 1482 pmap_change_wiring(pmap_t pm, vm_offset_t va, boolean_t wired)
 1483 {
 1484         struct tte *tp;
 1485         u_long data;
 1486 
 1487         PMAP_LOCK(pm);
 1488         if ((tp = tsb_tte_lookup(pm, va)) != NULL) {
 1489                 if (wired) {
 1490                         data = atomic_set_long(&tp->tte_data, TD_WIRED);
 1491                         if ((data & TD_WIRED) == 0)
 1492                                 pm->pm_stats.wired_count++;
 1493                 } else {
 1494                         data = atomic_clear_long(&tp->tte_data, TD_WIRED);
 1495                         if ((data & TD_WIRED) != 0)
 1496                                 pm->pm_stats.wired_count--;
 1497                 }
 1498         }
 1499         PMAP_UNLOCK(pm);
 1500 }
 1501 
 1502 static int
 1503 pmap_copy_tte(pmap_t src_pmap, pmap_t dst_pmap, struct tte *tp, vm_offset_t va)
 1504 {
 1505         vm_page_t m;
 1506         u_long data;
 1507 
 1508         if ((tp->tte_data & TD_FAKE) != 0)
 1509                 return (1);
 1510         if (tsb_tte_lookup(dst_pmap, va) == NULL) {
 1511                 data = tp->tte_data &
 1512                     ~(TD_PV | TD_REF | TD_SW | TD_CV | TD_W);
 1513                 m = PHYS_TO_VM_PAGE(TTE_GET_PA(tp));
 1514                 tsb_tte_enter(dst_pmap, m, va, TS_8K, data);
 1515         }
 1516         return (1);
 1517 }
 1518 
 1519 void
 1520 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
 1521           vm_size_t len, vm_offset_t src_addr)
 1522 {
 1523         struct tte *tp;
 1524         vm_offset_t va;
 1525 
 1526         if (dst_addr != src_addr)
 1527                 return;
 1528         vm_page_lock_queues();
 1529         if (dst_pmap < src_pmap) {
 1530                 PMAP_LOCK(dst_pmap);
 1531                 PMAP_LOCK(src_pmap);
 1532         } else {
 1533                 PMAP_LOCK(src_pmap);
 1534                 PMAP_LOCK(dst_pmap);
 1535         }
 1536         if (len > PMAP_TSB_THRESH) {
 1537                 tsb_foreach(src_pmap, dst_pmap, src_addr, src_addr + len,
 1538                     pmap_copy_tte);
 1539                 tlb_context_demap(dst_pmap);
 1540         } else {
 1541                 for (va = src_addr; va < src_addr + len; va += PAGE_SIZE) {
 1542                         if ((tp = tsb_tte_lookup(src_pmap, va)) != NULL)
 1543                                 pmap_copy_tte(src_pmap, dst_pmap, tp, va);
 1544                 }
 1545                 tlb_range_demap(dst_pmap, src_addr, src_addr + len - 1);
 1546         }
 1547         vm_page_unlock_queues();
 1548         PMAP_UNLOCK(src_pmap);
 1549         PMAP_UNLOCK(dst_pmap);
 1550 }
 1551 
 1552 void
 1553 pmap_zero_page(vm_page_t m)
 1554 {
 1555         struct tte *tp;
 1556         vm_offset_t va;
 1557         vm_paddr_t pa;
 1558 
 1559         KASSERT((m->flags & PG_FICTITIOUS) == 0,
 1560             ("pmap_zero_page: fake page"));
 1561         PMAP_STATS_INC(pmap_nzero_page);
 1562         pa = VM_PAGE_TO_PHYS(m);
 1563         if (m->md.color == -1) {
 1564                 PMAP_STATS_INC(pmap_nzero_page_nc);
 1565                 aszero(ASI_PHYS_USE_EC, pa, PAGE_SIZE);
 1566         } else if (m->md.color == DCACHE_COLOR(pa)) {
 1567                 PMAP_STATS_INC(pmap_nzero_page_c);
 1568                 va = TLB_PHYS_TO_DIRECT(pa);
 1569                 cpu_block_zero((void *)va, PAGE_SIZE);
 1570         } else {
 1571                 PMAP_STATS_INC(pmap_nzero_page_oc);
 1572                 PMAP_LOCK(kernel_pmap);
 1573                 va = pmap_temp_map_1 + (m->md.color * PAGE_SIZE);
 1574                 tp = tsb_kvtotte(va);
 1575                 tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_CP | TD_CV | TD_W;
 1576                 tp->tte_vpn = TV_VPN(va, TS_8K);
 1577                 cpu_block_zero((void *)va, PAGE_SIZE);
 1578                 tlb_page_demap(kernel_pmap, va);
 1579                 PMAP_UNLOCK(kernel_pmap);
 1580         }
 1581 }
 1582 
 1583 void
 1584 pmap_zero_page_area(vm_page_t m, int off, int size)
 1585 {
 1586         struct tte *tp;
 1587         vm_offset_t va;
 1588         vm_paddr_t pa;
 1589 
 1590         KASSERT((m->flags & PG_FICTITIOUS) == 0,
 1591             ("pmap_zero_page_area: fake page"));
 1592         KASSERT(off + size <= PAGE_SIZE, ("pmap_zero_page_area: bad off/size"));
 1593         PMAP_STATS_INC(pmap_nzero_page_area);
 1594         pa = VM_PAGE_TO_PHYS(m);
 1595         if (m->md.color == -1) {
 1596                 PMAP_STATS_INC(pmap_nzero_page_area_nc);
 1597                 aszero(ASI_PHYS_USE_EC, pa + off, size);
 1598         } else if (m->md.color == DCACHE_COLOR(pa)) {
 1599                 PMAP_STATS_INC(pmap_nzero_page_area_c);
 1600                 va = TLB_PHYS_TO_DIRECT(pa);
 1601                 bzero((void *)(va + off), size);
 1602         } else {
 1603                 PMAP_STATS_INC(pmap_nzero_page_area_oc);
 1604                 PMAP_LOCK(kernel_pmap);
 1605                 va = pmap_temp_map_1 + (m->md.color * PAGE_SIZE);
 1606                 tp = tsb_kvtotte(va);
 1607                 tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_CP | TD_CV | TD_W;
 1608                 tp->tte_vpn = TV_VPN(va, TS_8K);
 1609                 bzero((void *)(va + off), size);
 1610                 tlb_page_demap(kernel_pmap, va);
 1611                 PMAP_UNLOCK(kernel_pmap);
 1612         }
 1613 }
 1614 
 1615 void
 1616 pmap_zero_page_idle(vm_page_t m)
 1617 {
 1618         struct tte *tp;
 1619         vm_offset_t va;
 1620         vm_paddr_t pa;
 1621 
 1622         KASSERT((m->flags & PG_FICTITIOUS) == 0,
 1623             ("pmap_zero_page_idle: fake page"));
 1624         PMAP_STATS_INC(pmap_nzero_page_idle);
 1625         pa = VM_PAGE_TO_PHYS(m);
 1626         if (m->md.color == -1) {
 1627                 PMAP_STATS_INC(pmap_nzero_page_idle_nc);
 1628                 aszero(ASI_PHYS_USE_EC, pa, PAGE_SIZE);
 1629         } else if (m->md.color == DCACHE_COLOR(pa)) {
 1630                 PMAP_STATS_INC(pmap_nzero_page_idle_c);
 1631                 va = TLB_PHYS_TO_DIRECT(pa);
 1632                 cpu_block_zero((void *)va, PAGE_SIZE);
 1633         } else {
 1634                 PMAP_STATS_INC(pmap_nzero_page_idle_oc);
 1635                 va = pmap_idle_map + (m->md.color * PAGE_SIZE);
 1636                 tp = tsb_kvtotte(va);
 1637                 tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_CP | TD_CV | TD_W;
 1638                 tp->tte_vpn = TV_VPN(va, TS_8K);
 1639                 cpu_block_zero((void *)va, PAGE_SIZE);
 1640                 tlb_page_demap(kernel_pmap, va);
 1641         }
 1642 }
 1643 
 1644 void
 1645 pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
 1646 {
 1647         vm_offset_t vdst;
 1648         vm_offset_t vsrc;
 1649         vm_paddr_t pdst;
 1650         vm_paddr_t psrc;
 1651         struct tte *tp;
 1652 
 1653         KASSERT((mdst->flags & PG_FICTITIOUS) == 0,
 1654             ("pmap_copy_page: fake dst page"));
 1655         KASSERT((msrc->flags & PG_FICTITIOUS) == 0,
 1656             ("pmap_copy_page: fake src page"));
 1657         PMAP_STATS_INC(pmap_ncopy_page);
 1658         pdst = VM_PAGE_TO_PHYS(mdst);
 1659         psrc = VM_PAGE_TO_PHYS(msrc);
 1660         if (msrc->md.color == -1 && mdst->md.color == -1) {
 1661                 PMAP_STATS_INC(pmap_ncopy_page_nc);
 1662                 ascopy(ASI_PHYS_USE_EC, psrc, pdst, PAGE_SIZE);
 1663         } else if (msrc->md.color == DCACHE_COLOR(psrc) &&
 1664             mdst->md.color == DCACHE_COLOR(pdst)) {
 1665                 PMAP_STATS_INC(pmap_ncopy_page_c);
 1666                 vdst = TLB_PHYS_TO_DIRECT(pdst);
 1667                 vsrc = TLB_PHYS_TO_DIRECT(psrc);
 1668                 cpu_block_copy((void *)vsrc, (void *)vdst, PAGE_SIZE);
 1669         } else if (msrc->md.color == -1) {
 1670                 if (mdst->md.color == DCACHE_COLOR(pdst)) {
 1671                         PMAP_STATS_INC(pmap_ncopy_page_dc);
 1672                         vdst = TLB_PHYS_TO_DIRECT(pdst);
 1673                         ascopyfrom(ASI_PHYS_USE_EC, psrc, (void *)vdst,
 1674                             PAGE_SIZE);
 1675                 } else {
 1676                         PMAP_STATS_INC(pmap_ncopy_page_doc);
 1677                         PMAP_LOCK(kernel_pmap);
 1678                         vdst = pmap_temp_map_1 + (mdst->md.color * PAGE_SIZE);
 1679                         tp = tsb_kvtotte(vdst);
 1680                         tp->tte_data =
 1681                             TD_V | TD_8K | TD_PA(pdst) | TD_CP | TD_CV | TD_W;
 1682                         tp->tte_vpn = TV_VPN(vdst, TS_8K);
 1683                         ascopyfrom(ASI_PHYS_USE_EC, psrc, (void *)vdst,
 1684                             PAGE_SIZE);
 1685                         tlb_page_demap(kernel_pmap, vdst);
 1686                         PMAP_UNLOCK(kernel_pmap);
 1687                 }
 1688         } else if (mdst->md.color == -1) {
 1689                 if (msrc->md.color == DCACHE_COLOR(psrc)) {
 1690                         PMAP_STATS_INC(pmap_ncopy_page_sc);
 1691                         vsrc = TLB_PHYS_TO_DIRECT(psrc);
 1692                         ascopyto((void *)vsrc, ASI_PHYS_USE_EC, pdst,
 1693                             PAGE_SIZE);
 1694                 } else {
 1695                         PMAP_STATS_INC(pmap_ncopy_page_soc);
 1696                         PMAP_LOCK(kernel_pmap);
 1697                         vsrc = pmap_temp_map_1 + (msrc->md.color * PAGE_SIZE);
 1698                         tp = tsb_kvtotte(vsrc);
 1699                         tp->tte_data =
 1700                             TD_V | TD_8K | TD_PA(psrc) | TD_CP | TD_CV | TD_W;
 1701                         tp->tte_vpn = TV_VPN(vsrc, TS_8K);
 1702                         ascopyto((void *)vsrc, ASI_PHYS_USE_EC, pdst,
 1703                             PAGE_SIZE);
 1704                         tlb_page_demap(kernel_pmap, vsrc);
 1705                         PMAP_UNLOCK(kernel_pmap);
 1706                 }
 1707         } else {
 1708                 PMAP_STATS_INC(pmap_ncopy_page_oc);
 1709                 PMAP_LOCK(kernel_pmap);
 1710                 vdst = pmap_temp_map_1 + (mdst->md.color * PAGE_SIZE);
 1711                 tp = tsb_kvtotte(vdst);
 1712                 tp->tte_data =
 1713                     TD_V | TD_8K | TD_PA(pdst) | TD_CP | TD_CV | TD_W;
 1714                 tp->tte_vpn = TV_VPN(vdst, TS_8K);
 1715                 vsrc = pmap_temp_map_2 + (msrc->md.color * PAGE_SIZE);
 1716                 tp = tsb_kvtotte(vsrc);
 1717                 tp->tte_data =
 1718                     TD_V | TD_8K | TD_PA(psrc) | TD_CP | TD_CV | TD_W;
 1719                 tp->tte_vpn = TV_VPN(vsrc, TS_8K);
 1720                 cpu_block_copy((void *)vsrc, (void *)vdst, PAGE_SIZE);
 1721                 tlb_page_demap(kernel_pmap, vdst);
 1722                 tlb_page_demap(kernel_pmap, vsrc);
 1723                 PMAP_UNLOCK(kernel_pmap);
 1724         }
 1725 }
 1726 
 1727 /*
 1728  * Returns true if the pmap's pv is one of the first
 1729  * 16 pvs linked to from this page.  This count may
 1730  * be changed upwards or downwards in the future; it
 1731  * is only necessary that true be returned for a small
 1732  * subset of pmaps for proper page aging.
 1733  */
 1734 boolean_t
 1735 pmap_page_exists_quick(pmap_t pm, vm_page_t m)
 1736 {
 1737         struct tte *tp;
 1738         int loops;
 1739 
 1740         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1741         if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
 1742                 return (FALSE);
 1743         loops = 0;
 1744         TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
 1745                 if ((tp->tte_data & TD_PV) == 0)
 1746                         continue;
 1747                 if (TTE_GET_PMAP(tp) == pm)
 1748                         return (TRUE);
 1749                 if (++loops >= 16)
 1750                         break;
 1751         }
 1752         return (FALSE);
 1753 }
 1754 
 1755 /*
 1756  * Remove all pages from specified address space, this aids process exit
 1757  * speeds.  This is much faster than pmap_remove n the case of running down
 1758  * an entire address space.  Only works for the current pmap.
 1759  */
 1760 void
 1761 pmap_remove_pages(pmap_t pm)
 1762 {
 1763 }
 1764 
 1765 /*
 1766  * Returns TRUE if the given page has a managed mapping.
 1767  */
 1768 boolean_t
 1769 pmap_page_is_mapped(vm_page_t m)
 1770 {
 1771         struct tte *tp;
 1772 
 1773         if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
 1774                 return (FALSE);
 1775         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1776         TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
 1777                 if ((tp->tte_data & TD_PV) != 0)
 1778                         return (TRUE);
 1779         }
 1780         return (FALSE);
 1781 }
 1782 
 1783 /*
 1784  *      pmap_ts_referenced:
 1785  *
 1786  *      Return a count of reference bits for a page, clearing those bits.
 1787  *      It is not necessary for every reference bit to be cleared, but it
 1788  *      is necessary that 0 only be returned when there are truly no
 1789  *      reference bits set.
 1790  *
 1791  *      XXX: The exact number of bits to check and clear is a matter that
 1792  *      should be tested and standardized at some point in the future for
 1793  *      optimal aging of shared pages.
 1794  */
 1795 
 1796 int
 1797 pmap_ts_referenced(vm_page_t m)
 1798 {
 1799         struct tte *tpf;
 1800         struct tte *tpn;
 1801         struct tte *tp;
 1802         u_long data;
 1803         int count;
 1804 
 1805         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1806         if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
 1807                 return (0);
 1808         count = 0;
 1809         if ((tp = TAILQ_FIRST(&m->md.tte_list)) != NULL) {
 1810                 tpf = tp;
 1811                 do {
 1812                         tpn = TAILQ_NEXT(tp, tte_link);
 1813                         TAILQ_REMOVE(&m->md.tte_list, tp, tte_link);
 1814                         TAILQ_INSERT_TAIL(&m->md.tte_list, tp, tte_link);
 1815                         if ((tp->tte_data & TD_PV) == 0)
 1816                                 continue;
 1817                         data = atomic_clear_long(&tp->tte_data, TD_REF);
 1818                         if ((data & TD_REF) != 0 && ++count > 4)
 1819                                 break;
 1820                 } while ((tp = tpn) != NULL && tp != tpf);
 1821         }
 1822         return (count);
 1823 }
 1824 
 1825 boolean_t
 1826 pmap_is_modified(vm_page_t m)
 1827 {
 1828         struct tte *tp;
 1829 
 1830         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1831         if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
 1832                 return (FALSE);
 1833         TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
 1834                 if ((tp->tte_data & TD_PV) == 0)
 1835                         continue;
 1836                 if ((tp->tte_data & TD_W) != 0)
 1837                         return (TRUE);
 1838         }
 1839         return (FALSE);
 1840 }
 1841 
 1842 /*
 1843  *      pmap_is_prefaultable:
 1844  *
 1845  *      Return whether or not the specified virtual address is elgible
 1846  *      for prefault.
 1847  */
 1848 boolean_t
 1849 pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
 1850 {
 1851 
 1852         return (FALSE);
 1853 }
 1854 
 1855 void
 1856 pmap_clear_modify(vm_page_t m)
 1857 {
 1858         struct tte *tp;
 1859         u_long data;
 1860 
 1861         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1862         if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
 1863                 return;
 1864         TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
 1865                 if ((tp->tte_data & TD_PV) == 0)
 1866                         continue;
 1867                 data = atomic_clear_long(&tp->tte_data, TD_W);
 1868                 if ((data & TD_W) != 0)
 1869                         tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
 1870         }
 1871 }
 1872 
 1873 void
 1874 pmap_clear_reference(vm_page_t m)
 1875 {
 1876         struct tte *tp;
 1877         u_long data;
 1878 
 1879         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1880         if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
 1881                 return;
 1882         TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
 1883                 if ((tp->tte_data & TD_PV) == 0)
 1884                         continue;
 1885                 data = atomic_clear_long(&tp->tte_data, TD_REF);
 1886                 if ((data & TD_REF) != 0)
 1887                         tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
 1888         }
 1889 }
 1890 
 1891 void
 1892 pmap_remove_write(vm_page_t m)
 1893 {
 1894         struct tte *tp;
 1895         u_long data;
 1896 
 1897         mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 1898         if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 ||
 1899             (m->flags & PG_WRITEABLE) == 0)
 1900                 return;
 1901         TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
 1902                 if ((tp->tte_data & TD_PV) == 0)
 1903                         continue;
 1904                 data = atomic_clear_long(&tp->tte_data, TD_SW | TD_W);
 1905                 if ((data & TD_W) != 0) {
 1906                         vm_page_dirty(m);
 1907                         tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
 1908                 }
 1909         }
 1910         vm_page_flag_clear(m, PG_WRITEABLE);
 1911 }
 1912 
 1913 int
 1914 pmap_mincore(pmap_t pm, vm_offset_t addr)
 1915 {
 1916         /* TODO; */
 1917         return (0);
 1918 }
 1919 
 1920 /*
 1921  * Activate a user pmap.  The pmap must be activated before its address space
 1922  * can be accessed in any way.
 1923  */
 1924 void
 1925 pmap_activate(struct thread *td)
 1926 {
 1927         struct vmspace *vm;
 1928         struct pmap *pm;
 1929         int context;
 1930 
 1931         vm = td->td_proc->p_vmspace;
 1932         pm = vmspace_pmap(vm);
 1933 
 1934         mtx_lock_spin(&sched_lock);
 1935 
 1936         context = PCPU_GET(tlb_ctx);
 1937         if (context == PCPU_GET(tlb_ctx_max)) {
 1938                 tlb_flush_user();
 1939                 context = PCPU_GET(tlb_ctx_min);
 1940         }
 1941         PCPU_SET(tlb_ctx, context + 1);
 1942 
 1943         pm->pm_context[PCPU_GET(cpuid)] = context;
 1944         pm->pm_active |= PCPU_GET(cpumask);
 1945         PCPU_SET(pmap, pm);
 1946 
 1947         stxa(AA_DMMU_TSB, ASI_DMMU, pm->pm_tsb);
 1948         stxa(AA_IMMU_TSB, ASI_IMMU, pm->pm_tsb);
 1949         stxa(AA_DMMU_PCXR, ASI_DMMU, context);
 1950         membar(Sync);
 1951 
 1952         mtx_unlock_spin(&sched_lock);
 1953 }
 1954 
 1955 vm_offset_t
 1956 pmap_addr_hint(vm_object_t object, vm_offset_t va, vm_size_t size)
 1957 {
 1958 
 1959         return (va);
 1960 }

Cache object: 312fe4010c1b21d85257ff7cf5583b1c


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.