The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/powerpc/powerpc/pmap_dispatch.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2005 Peter Grehan
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  *
   14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24  * SUCH DAMAGE.
   25  *
   26  */
   27 
   28 #include <sys/cdefs.h>
   29 __FBSDID("$FreeBSD: releng/10.3/sys/powerpc/powerpc/pmap_dispatch.c 270920 2014-09-01 07:58:15Z kib $");
   30 
   31 /*
   32  * Dispatch MI pmap calls to the appropriate MMU implementation
   33  * through a previously registered kernel object.
   34  *
   35  * Before pmap_bootstrap() can be called, a CPU module must have
   36  * called pmap_mmu_install(). This may be called multiple times:
   37  * the highest priority call will be installed as the default
   38  * MMU handler when pmap_bootstrap() is called.
   39  *
   40  * It is required that mutex_init() be called before pmap_bootstrap(), 
   41  * as the PMAP layer makes extensive use of mutexes.
   42  */
   43 
   44 #include <sys/param.h>
   45 #include <sys/kernel.h>
   46 #include <sys/lock.h>
   47 #include <sys/ktr.h>
   48 #include <sys/mutex.h>
   49 #include <sys/systm.h>
   50 
   51 #include <vm/vm.h>
   52 #include <vm/vm_page.h>
   53 
   54 #include <machine/mmuvar.h>
   55 #include <machine/smp.h>
   56 
   57 #include "mmu_if.h"
   58 
   59 static mmu_def_t        *mmu_def_impl;
   60 static mmu_t            mmu_obj;
   61 static struct mmu_kobj  mmu_kernel_obj;
   62 static struct kobj_ops  mmu_kernel_kops;
   63 
   64 /*
   65  * pmap globals
   66  */
   67 struct pmap kernel_pmap_store;
   68 
   69 struct msgbuf *msgbufp;
   70 vm_offset_t    msgbuf_phys;
   71 
   72 vm_offset_t kernel_vm_end;
   73 vm_offset_t phys_avail[PHYS_AVAIL_SZ];
   74 vm_offset_t virtual_avail;
   75 vm_offset_t virtual_end;
   76 
   77 int pmap_bootstrapped;
   78 
   79 #ifdef AIM
   80 int
   81 pvo_vaddr_compare(struct pvo_entry *a, struct pvo_entry *b)
   82 {
   83         if (PVO_VADDR(a) < PVO_VADDR(b))
   84                 return (-1);
   85         else if (PVO_VADDR(a) > PVO_VADDR(b))
   86                 return (1);
   87         return (0);
   88 }
   89 RB_GENERATE(pvo_tree, pvo_entry, pvo_plink, pvo_vaddr_compare);
   90 #endif
   91         
   92 
   93 void
   94 pmap_advise(pmap_t pmap, vm_offset_t start, vm_offset_t end, int advice)
   95 {
   96 
   97         CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %d)", __func__, pmap, start, end,
   98             advice);
   99         MMU_ADVISE(mmu_obj, pmap, start, end, advice);
  100 }
  101 
  102 void
  103 pmap_clear_modify(vm_page_t m)
  104 {
  105 
  106         CTR2(KTR_PMAP, "%s(%p)", __func__, m);
  107         MMU_CLEAR_MODIFY(mmu_obj, m);
  108 }
  109 
  110 void
  111 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
  112     vm_size_t len, vm_offset_t src_addr)
  113 {
  114 
  115         CTR6(KTR_PMAP, "%s(%p, %p, %#x, %#x, %#x)", __func__, dst_pmap,
  116             src_pmap, dst_addr, len, src_addr);
  117         MMU_COPY(mmu_obj, dst_pmap, src_pmap, dst_addr, len, src_addr);
  118 }
  119 
  120 void
  121 pmap_copy_page(vm_page_t src, vm_page_t dst)
  122 {
  123 
  124         CTR3(KTR_PMAP, "%s(%p, %p)", __func__, src, dst);
  125         MMU_COPY_PAGE(mmu_obj, src, dst);
  126 }
  127 
  128 void
  129 pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
  130     vm_offset_t b_offset, int xfersize)
  131 {
  132 
  133         CTR6(KTR_PMAP, "%s(%p, %#x, %p, %#x, %#x)", __func__, ma,
  134             a_offset, mb, b_offset, xfersize);
  135         MMU_COPY_PAGES(mmu_obj, ma, a_offset, mb, b_offset, xfersize);
  136 }
  137 
  138 int
  139 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t p, vm_prot_t prot,
  140     u_int flags, int8_t psind)
  141 {
  142 
  143         CTR6(KTR_PMAP, "pmap_enter(%p, %#x, %p, %#x, %x, %d)", pmap, va,
  144             p, prot, flags, psind);
  145         return (MMU_ENTER(mmu_obj, pmap, va, p, prot, flags, psind));
  146 }
  147 
  148 void
  149 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
  150     vm_page_t m_start, vm_prot_t prot)
  151 {
  152 
  153         CTR6(KTR_PMAP, "%s(%p, %#x, %#x, %p, %#x)", __func__, pmap, start,
  154             end, m_start, prot);
  155         MMU_ENTER_OBJECT(mmu_obj, pmap, start, end, m_start, prot);
  156 }
  157 
  158 void
  159 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
  160 {
  161 
  162         CTR5(KTR_PMAP, "%s(%p, %#x, %p, %#x)", __func__, pmap, va, m, prot);
  163         MMU_ENTER_QUICK(mmu_obj, pmap, va, m, prot);
  164 }
  165 
  166 vm_paddr_t
  167 pmap_extract(pmap_t pmap, vm_offset_t va)
  168 {
  169 
  170         CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, va);
  171         return (MMU_EXTRACT(mmu_obj, pmap, va));
  172 }
  173 
  174 vm_page_t
  175 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
  176 {
  177 
  178         CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, va, prot);
  179         return (MMU_EXTRACT_AND_HOLD(mmu_obj, pmap, va, prot));
  180 }
  181 
  182 void
  183 pmap_growkernel(vm_offset_t va)
  184 {
  185 
  186         CTR2(KTR_PMAP, "%s(%#x)", __func__, va);
  187         MMU_GROWKERNEL(mmu_obj, va);
  188 }
  189 
  190 void
  191 pmap_init(void)
  192 {
  193 
  194         CTR1(KTR_PMAP, "%s()", __func__);
  195         MMU_INIT(mmu_obj);
  196 }
  197 
  198 boolean_t
  199 pmap_is_modified(vm_page_t m)
  200 {
  201 
  202         CTR2(KTR_PMAP, "%s(%p)", __func__, m);
  203         return (MMU_IS_MODIFIED(mmu_obj, m));
  204 }
  205 
  206 boolean_t
  207 pmap_is_prefaultable(pmap_t pmap, vm_offset_t va)
  208 {
  209 
  210         CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, va);
  211         return (MMU_IS_PREFAULTABLE(mmu_obj, pmap, va));
  212 }
  213 
  214 boolean_t
  215 pmap_is_referenced(vm_page_t m)
  216 {
  217 
  218         CTR2(KTR_PMAP, "%s(%p)", __func__, m);
  219         return (MMU_IS_REFERENCED(mmu_obj, m));
  220 }
  221 
  222 boolean_t
  223 pmap_ts_referenced(vm_page_t m)
  224 {
  225 
  226         CTR2(KTR_PMAP, "%s(%p)", __func__, m);
  227         return (MMU_TS_REFERENCED(mmu_obj, m));
  228 }
  229 
  230 vm_offset_t
  231 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
  232 {
  233 
  234         CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %#x)", __func__, virt, start, end,
  235             prot);
  236         return (MMU_MAP(mmu_obj, virt, start, end, prot));
  237 }
  238 
  239 void
  240 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
  241     vm_pindex_t pindex, vm_size_t size)
  242 {
  243 
  244         CTR6(KTR_PMAP, "%s(%p, %#x, %p, %u, %#x)", __func__, pmap, addr,
  245             object, pindex, size);
  246         MMU_OBJECT_INIT_PT(mmu_obj, pmap, addr, object, pindex, size);
  247 }
  248 
  249 boolean_t
  250 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
  251 {
  252 
  253         CTR3(KTR_PMAP, "%s(%p, %p)", __func__, pmap, m);
  254         return (MMU_PAGE_EXISTS_QUICK(mmu_obj, pmap, m));
  255 }
  256 
  257 void
  258 pmap_page_init(vm_page_t m)
  259 {
  260 
  261         CTR2(KTR_PMAP, "%s(%p)", __func__, m);
  262         MMU_PAGE_INIT(mmu_obj, m);
  263 }
  264 
  265 int
  266 pmap_page_wired_mappings(vm_page_t m)
  267 {
  268 
  269         CTR2(KTR_PMAP, "%s(%p)", __func__, m);
  270         return (MMU_PAGE_WIRED_MAPPINGS(mmu_obj, m));
  271 }
  272 
  273 int
  274 pmap_pinit(pmap_t pmap)
  275 {
  276 
  277         CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
  278         MMU_PINIT(mmu_obj, pmap);
  279         return (1);
  280 }
  281 
  282 void
  283 pmap_pinit0(pmap_t pmap)
  284 {
  285 
  286         CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
  287         MMU_PINIT0(mmu_obj, pmap);
  288 }
  289 
  290 void
  291 pmap_protect(pmap_t pmap, vm_offset_t start, vm_offset_t end, vm_prot_t prot)
  292 {
  293 
  294         CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %#x)", __func__, pmap, start, end,
  295             prot);
  296         MMU_PROTECT(mmu_obj, pmap, start, end, prot);
  297 }
  298 
  299 void
  300 pmap_qenter(vm_offset_t start, vm_page_t *m, int count)
  301 {
  302 
  303         CTR4(KTR_PMAP, "%s(%#x, %p, %d)", __func__, start, m, count);
  304         MMU_QENTER(mmu_obj, start, m, count);
  305 }
  306 
  307 void
  308 pmap_qremove(vm_offset_t start, int count)
  309 {
  310 
  311         CTR3(KTR_PMAP, "%s(%#x, %d)", __func__, start, count);
  312         MMU_QREMOVE(mmu_obj, start, count);
  313 }
  314 
  315 void
  316 pmap_release(pmap_t pmap)
  317 {
  318 
  319         CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
  320         MMU_RELEASE(mmu_obj, pmap);
  321 }
  322 
  323 void
  324 pmap_remove(pmap_t pmap, vm_offset_t start, vm_offset_t end)
  325 {
  326 
  327         CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, start, end);
  328         MMU_REMOVE(mmu_obj, pmap, start, end);
  329 }
  330 
  331 void
  332 pmap_remove_all(vm_page_t m)
  333 {
  334 
  335         CTR2(KTR_PMAP, "%s(%p)", __func__, m);
  336         MMU_REMOVE_ALL(mmu_obj, m);
  337 }
  338 
  339 void
  340 pmap_remove_pages(pmap_t pmap)
  341 {
  342 
  343         CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
  344         MMU_REMOVE_PAGES(mmu_obj, pmap);
  345 }
  346 
  347 void
  348 pmap_remove_write(vm_page_t m)
  349 {
  350 
  351         CTR2(KTR_PMAP, "%s(%p)", __func__, m);
  352         MMU_REMOVE_WRITE(mmu_obj, m);
  353 }
  354 
  355 void
  356 pmap_unwire(pmap_t pmap, vm_offset_t start, vm_offset_t end)
  357 {
  358 
  359         CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, start, end);
  360         MMU_UNWIRE(mmu_obj, pmap, start, end);
  361 }
  362 
  363 void
  364 pmap_zero_page(vm_page_t m)
  365 {
  366 
  367         CTR2(KTR_PMAP, "%s(%p)", __func__, m);
  368         MMU_ZERO_PAGE(mmu_obj, m);
  369 }
  370 
  371 void
  372 pmap_zero_page_area(vm_page_t m, int off, int size)
  373 {
  374 
  375         CTR4(KTR_PMAP, "%s(%p, %d, %d)", __func__, m, off, size);
  376         MMU_ZERO_PAGE_AREA(mmu_obj, m, off, size);
  377 }
  378 
  379 void
  380 pmap_zero_page_idle(vm_page_t m)
  381 {
  382 
  383         CTR2(KTR_PMAP, "%s(%p)", __func__, m);
  384         MMU_ZERO_PAGE_IDLE(mmu_obj, m);
  385 }
  386 
  387 int
  388 pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
  389 {
  390 
  391         CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, addr);
  392         return (MMU_MINCORE(mmu_obj, pmap, addr, locked_pa));
  393 }
  394 
  395 void
  396 pmap_activate(struct thread *td)
  397 {
  398 
  399         CTR2(KTR_PMAP, "%s(%p)", __func__, td);
  400         MMU_ACTIVATE(mmu_obj, td);
  401 }
  402 
  403 void
  404 pmap_deactivate(struct thread *td)
  405 {
  406 
  407         CTR2(KTR_PMAP, "%s(%p)", __func__, td);
  408         MMU_DEACTIVATE(mmu_obj, td);
  409 }
  410 
  411 /*
  412  *      Increase the starting virtual address of the given mapping if a
  413  *      different alignment might result in more superpage mappings.
  414  */
  415 void
  416 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
  417     vm_offset_t *addr, vm_size_t size)
  418 {
  419 
  420         CTR5(KTR_PMAP, "%s(%p, %#x, %p, %#x)", __func__, object, offset, addr,
  421             size);
  422         MMU_ALIGN_SUPERPAGE(mmu_obj, object, offset, addr, size);
  423 }
  424 
  425 /*
  426  * Routines used in machine-dependent code
  427  */
  428 void
  429 pmap_bootstrap(vm_offset_t start, vm_offset_t end)
  430 {
  431         mmu_obj = &mmu_kernel_obj;
  432 
  433         /*
  434          * Take care of compiling the selected class, and
  435          * then statically initialise the MMU object
  436          */
  437         kobj_class_compile_static(mmu_def_impl, &mmu_kernel_kops);
  438         kobj_init_static((kobj_t)mmu_obj, mmu_def_impl);
  439 
  440         MMU_BOOTSTRAP(mmu_obj, start, end);
  441 }
  442 
  443 void
  444 pmap_cpu_bootstrap(int ap)
  445 {
  446         /*
  447          * No KTR here because our console probably doesn't work yet
  448          */
  449 
  450         return (MMU_CPU_BOOTSTRAP(mmu_obj, ap));
  451 }
  452 
  453 void *
  454 pmap_mapdev(vm_paddr_t pa, vm_size_t size)
  455 {
  456 
  457         CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size);
  458         return (MMU_MAPDEV(mmu_obj, pa, size));
  459 }
  460 
  461 void *
  462 pmap_mapdev_attr(vm_offset_t pa, vm_size_t size, vm_memattr_t attr)
  463 {
  464 
  465         CTR4(KTR_PMAP, "%s(%#x, %#x, %#x)", __func__, pa, size, attr);
  466         return (MMU_MAPDEV_ATTR(mmu_obj, pa, size, attr));
  467 }
  468 
  469 void
  470 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
  471 {
  472 
  473         CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, m, ma);
  474         return (MMU_PAGE_SET_MEMATTR(mmu_obj, m, ma));
  475 }
  476 
  477 void
  478 pmap_unmapdev(vm_offset_t va, vm_size_t size)
  479 {
  480 
  481         CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, va, size);
  482         MMU_UNMAPDEV(mmu_obj, va, size);
  483 }
  484 
  485 vm_paddr_t
  486 pmap_kextract(vm_offset_t va)
  487 {
  488 
  489         CTR2(KTR_PMAP, "%s(%#x)", __func__, va);
  490         return (MMU_KEXTRACT(mmu_obj, va));
  491 }
  492 
  493 void
  494 pmap_kenter(vm_offset_t va, vm_paddr_t pa)
  495 {
  496 
  497         CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, va, pa);
  498         MMU_KENTER(mmu_obj, va, pa);
  499 }
  500 
  501 void
  502 pmap_kenter_attr(vm_offset_t va, vm_offset_t pa, vm_memattr_t ma)
  503 {
  504 
  505         CTR4(KTR_PMAP, "%s(%#x, %#x, %#x)", __func__, va, pa, ma);
  506         MMU_KENTER_ATTR(mmu_obj, va, pa, ma);
  507 }
  508 
  509 boolean_t
  510 pmap_dev_direct_mapped(vm_paddr_t pa, vm_size_t size)
  511 {
  512 
  513         CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size);
  514         return (MMU_DEV_DIRECT_MAPPED(mmu_obj, pa, size));
  515 }
  516 
  517 void
  518 pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
  519 {
  520  
  521         CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pm, va, sz);
  522         return (MMU_SYNC_ICACHE(mmu_obj, pm, va, sz));
  523 }
  524 
  525 vm_offset_t
  526 pmap_dumpsys_map(struct pmap_md *md, vm_size_t ofs, vm_size_t *sz)
  527 {
  528 
  529         CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, md, ofs, *sz);
  530         return (MMU_DUMPSYS_MAP(mmu_obj, md, ofs, sz));
  531 }
  532 
  533 void
  534 pmap_dumpsys_unmap(struct pmap_md *md, vm_size_t ofs, vm_offset_t va)
  535 {
  536 
  537         CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, md, ofs, va);
  538         return (MMU_DUMPSYS_UNMAP(mmu_obj, md, ofs, va));
  539 }
  540 
  541 struct pmap_md *
  542 pmap_scan_md(struct pmap_md *prev)
  543 {
  544 
  545         CTR2(KTR_PMAP, "%s(%p)", __func__, prev);
  546         return (MMU_SCAN_MD(mmu_obj, prev));
  547 }
  548 
  549 /*
  550  * MMU install routines. Highest priority wins, equal priority also
  551  * overrides allowing last-set to win.
  552  */
  553 SET_DECLARE(mmu_set, mmu_def_t);
  554 
  555 boolean_t
  556 pmap_mmu_install(char *name, int prio)
  557 {
  558         mmu_def_t       **mmupp, *mmup;
  559         static int      curr_prio = 0;
  560 
  561         /*
  562          * Try and locate the MMU kobj corresponding to the name
  563          */
  564         SET_FOREACH(mmupp, mmu_set) {
  565                 mmup = *mmupp;
  566 
  567                 if (mmup->name &&
  568                     !strcmp(mmup->name, name) &&
  569                     (prio >= curr_prio || mmu_def_impl == NULL)) {
  570                         curr_prio = prio;
  571                         mmu_def_impl = mmup;
  572                         return (TRUE);
  573                 }
  574         }
  575 
  576         return (FALSE);
  577 }
  578 
  579 int unmapped_buf_allowed;

Cache object: 41b2922da7a6a8a2f912fe99a35d9648


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.