The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/powerpc/powerpc/pmap_dispatch.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    3  *
    4  * Copyright (c) 2005 Peter Grehan
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice, this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   26  * SUCH DAMAGE.
   27  *
   28  */
   29 
   30 #include <sys/cdefs.h>
   31 __FBSDID("$FreeBSD: releng/12.0/sys/powerpc/powerpc/pmap_dispatch.c 337051 2018-08-01 18:45:51Z kib $");
   32 
   33 /*
   34  * Dispatch MI pmap calls to the appropriate MMU implementation
   35  * through a previously registered kernel object.
   36  *
   37  * Before pmap_bootstrap() can be called, a CPU module must have
   38  * called pmap_mmu_install(). This may be called multiple times:
   39  * the highest priority call will be installed as the default
   40  * MMU handler when pmap_bootstrap() is called.
   41  *
   42  * It is required that mutex_init() be called before pmap_bootstrap(), 
   43  * as the PMAP layer makes extensive use of mutexes.
   44  */
   45 
   46 #include <sys/param.h>
   47 #include <sys/kernel.h>
   48 #include <sys/conf.h>
   49 #include <sys/lock.h>
   50 #include <sys/kerneldump.h>
   51 #include <sys/ktr.h>
   52 #include <sys/mutex.h>
   53 #include <sys/systm.h>
   54 
   55 #include <vm/vm.h>
   56 #include <vm/vm_page.h>
   57 
   58 #include <machine/dump.h>
   59 #include <machine/md_var.h>
   60 #include <machine/mmuvar.h>
   61 #include <machine/smp.h>
   62 
   63 #include "mmu_if.h"
   64 
   65 static mmu_def_t        *mmu_def_impl;
   66 static mmu_t            mmu_obj;
   67 static struct mmu_kobj  mmu_kernel_obj;
   68 static struct kobj_ops  mmu_kernel_kops;
   69 
   70 /*
   71  * pmap globals
   72  */
   73 struct pmap kernel_pmap_store;
   74 
   75 vm_offset_t    msgbuf_phys;
   76 
   77 vm_offset_t kernel_vm_end;
   78 vm_paddr_t phys_avail[PHYS_AVAIL_SZ];
   79 vm_offset_t virtual_avail;
   80 vm_offset_t virtual_end;
   81 
   82 int pmap_bootstrapped;
   83 
   84 #ifdef AIM
   85 int
   86 pvo_vaddr_compare(struct pvo_entry *a, struct pvo_entry *b)
   87 {
   88         if (PVO_VADDR(a) < PVO_VADDR(b))
   89                 return (-1);
   90         else if (PVO_VADDR(a) > PVO_VADDR(b))
   91                 return (1);
   92         return (0);
   93 }
   94 RB_GENERATE(pvo_tree, pvo_entry, pvo_plink, pvo_vaddr_compare);
   95 #endif
   96         
   97 
   98 void
   99 pmap_advise(pmap_t pmap, vm_offset_t start, vm_offset_t end, int advice)
  100 {
  101 
  102         CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %d)", __func__, pmap, start, end,
  103             advice);
  104         MMU_ADVISE(mmu_obj, pmap, start, end, advice);
  105 }
  106 
  107 void
  108 pmap_clear_modify(vm_page_t m)
  109 {
  110 
  111         CTR2(KTR_PMAP, "%s(%p)", __func__, m);
  112         MMU_CLEAR_MODIFY(mmu_obj, m);
  113 }
  114 
  115 void
  116 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
  117     vm_size_t len, vm_offset_t src_addr)
  118 {
  119 
  120         CTR6(KTR_PMAP, "%s(%p, %p, %#x, %#x, %#x)", __func__, dst_pmap,
  121             src_pmap, dst_addr, len, src_addr);
  122         MMU_COPY(mmu_obj, dst_pmap, src_pmap, dst_addr, len, src_addr);
  123 }
  124 
  125 void
  126 pmap_copy_page(vm_page_t src, vm_page_t dst)
  127 {
  128 
  129         CTR3(KTR_PMAP, "%s(%p, %p)", __func__, src, dst);
  130         MMU_COPY_PAGE(mmu_obj, src, dst);
  131 }
  132 
  133 void
  134 pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
  135     vm_offset_t b_offset, int xfersize)
  136 {
  137 
  138         CTR6(KTR_PMAP, "%s(%p, %#x, %p, %#x, %#x)", __func__, ma,
  139             a_offset, mb, b_offset, xfersize);
  140         MMU_COPY_PAGES(mmu_obj, ma, a_offset, mb, b_offset, xfersize);
  141 }
  142 
  143 int
  144 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t p, vm_prot_t prot,
  145     u_int flags, int8_t psind)
  146 {
  147 
  148         CTR6(KTR_PMAP, "pmap_enter(%p, %#x, %p, %#x, %x, %d)", pmap, va,
  149             p, prot, flags, psind);
  150         return (MMU_ENTER(mmu_obj, pmap, va, p, prot, flags, psind));
  151 }
  152 
  153 void
  154 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
  155     vm_page_t m_start, vm_prot_t prot)
  156 {
  157 
  158         CTR6(KTR_PMAP, "%s(%p, %#x, %#x, %p, %#x)", __func__, pmap, start,
  159             end, m_start, prot);
  160         MMU_ENTER_OBJECT(mmu_obj, pmap, start, end, m_start, prot);
  161 }
  162 
  163 void
  164 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
  165 {
  166 
  167         CTR5(KTR_PMAP, "%s(%p, %#x, %p, %#x)", __func__, pmap, va, m, prot);
  168         MMU_ENTER_QUICK(mmu_obj, pmap, va, m, prot);
  169 }
  170 
  171 vm_paddr_t
  172 pmap_extract(pmap_t pmap, vm_offset_t va)
  173 {
  174 
  175         CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, va);
  176         return (MMU_EXTRACT(mmu_obj, pmap, va));
  177 }
  178 
  179 vm_page_t
  180 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
  181 {
  182 
  183         CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, va, prot);
  184         return (MMU_EXTRACT_AND_HOLD(mmu_obj, pmap, va, prot));
  185 }
  186 
  187 void
  188 pmap_growkernel(vm_offset_t va)
  189 {
  190 
  191         CTR2(KTR_PMAP, "%s(%#x)", __func__, va);
  192         MMU_GROWKERNEL(mmu_obj, va);
  193 }
  194 
  195 void
  196 pmap_init(void)
  197 {
  198 
  199         CTR1(KTR_PMAP, "%s()", __func__);
  200         MMU_INIT(mmu_obj);
  201 }
  202 
  203 boolean_t
  204 pmap_is_modified(vm_page_t m)
  205 {
  206 
  207         CTR2(KTR_PMAP, "%s(%p)", __func__, m);
  208         return (MMU_IS_MODIFIED(mmu_obj, m));
  209 }
  210 
  211 boolean_t
  212 pmap_is_prefaultable(pmap_t pmap, vm_offset_t va)
  213 {
  214 
  215         CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, va);
  216         return (MMU_IS_PREFAULTABLE(mmu_obj, pmap, va));
  217 }
  218 
  219 boolean_t
  220 pmap_is_referenced(vm_page_t m)
  221 {
  222 
  223         CTR2(KTR_PMAP, "%s(%p)", __func__, m);
  224         return (MMU_IS_REFERENCED(mmu_obj, m));
  225 }
  226 
  227 boolean_t
  228 pmap_ts_referenced(vm_page_t m)
  229 {
  230 
  231         CTR2(KTR_PMAP, "%s(%p)", __func__, m);
  232         return (MMU_TS_REFERENCED(mmu_obj, m));
  233 }
  234 
  235 vm_offset_t
  236 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
  237 {
  238 
  239         CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %#x)", __func__, virt, start, end,
  240             prot);
  241         return (MMU_MAP(mmu_obj, virt, start, end, prot));
  242 }
  243 
  244 void
  245 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
  246     vm_pindex_t pindex, vm_size_t size)
  247 {
  248 
  249         CTR6(KTR_PMAP, "%s(%p, %#x, %p, %u, %#x)", __func__, pmap, addr,
  250             object, pindex, size);
  251         MMU_OBJECT_INIT_PT(mmu_obj, pmap, addr, object, pindex, size);
  252 }
  253 
  254 boolean_t
  255 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
  256 {
  257 
  258         CTR3(KTR_PMAP, "%s(%p, %p)", __func__, pmap, m);
  259         return (MMU_PAGE_EXISTS_QUICK(mmu_obj, pmap, m));
  260 }
  261 
  262 void
  263 pmap_page_init(vm_page_t m)
  264 {
  265 
  266         CTR2(KTR_PMAP, "%s(%p)", __func__, m);
  267         MMU_PAGE_INIT(mmu_obj, m);
  268 }
  269 
  270 int
  271 pmap_page_wired_mappings(vm_page_t m)
  272 {
  273 
  274         CTR2(KTR_PMAP, "%s(%p)", __func__, m);
  275         return (MMU_PAGE_WIRED_MAPPINGS(mmu_obj, m));
  276 }
  277 
  278 int
  279 pmap_pinit(pmap_t pmap)
  280 {
  281 
  282         CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
  283         MMU_PINIT(mmu_obj, pmap);
  284         return (1);
  285 }
  286 
  287 void
  288 pmap_pinit0(pmap_t pmap)
  289 {
  290 
  291         CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
  292         MMU_PINIT0(mmu_obj, pmap);
  293 }
  294 
  295 void
  296 pmap_protect(pmap_t pmap, vm_offset_t start, vm_offset_t end, vm_prot_t prot)
  297 {
  298 
  299         CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %#x)", __func__, pmap, start, end,
  300             prot);
  301         MMU_PROTECT(mmu_obj, pmap, start, end, prot);
  302 }
  303 
  304 void
  305 pmap_qenter(vm_offset_t start, vm_page_t *m, int count)
  306 {
  307 
  308         CTR4(KTR_PMAP, "%s(%#x, %p, %d)", __func__, start, m, count);
  309         MMU_QENTER(mmu_obj, start, m, count);
  310 }
  311 
  312 void
  313 pmap_qremove(vm_offset_t start, int count)
  314 {
  315 
  316         CTR3(KTR_PMAP, "%s(%#x, %d)", __func__, start, count);
  317         MMU_QREMOVE(mmu_obj, start, count);
  318 }
  319 
  320 void
  321 pmap_release(pmap_t pmap)
  322 {
  323 
  324         CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
  325         MMU_RELEASE(mmu_obj, pmap);
  326 }
  327 
  328 void
  329 pmap_remove(pmap_t pmap, vm_offset_t start, vm_offset_t end)
  330 {
  331 
  332         CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, start, end);
  333         MMU_REMOVE(mmu_obj, pmap, start, end);
  334 }
  335 
  336 void
  337 pmap_remove_all(vm_page_t m)
  338 {
  339 
  340         CTR2(KTR_PMAP, "%s(%p)", __func__, m);
  341         MMU_REMOVE_ALL(mmu_obj, m);
  342 }
  343 
  344 void
  345 pmap_remove_pages(pmap_t pmap)
  346 {
  347 
  348         CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
  349         MMU_REMOVE_PAGES(mmu_obj, pmap);
  350 }
  351 
  352 void
  353 pmap_remove_write(vm_page_t m)
  354 {
  355 
  356         CTR2(KTR_PMAP, "%s(%p)", __func__, m);
  357         MMU_REMOVE_WRITE(mmu_obj, m);
  358 }
  359 
  360 void
  361 pmap_unwire(pmap_t pmap, vm_offset_t start, vm_offset_t end)
  362 {
  363 
  364         CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, start, end);
  365         MMU_UNWIRE(mmu_obj, pmap, start, end);
  366 }
  367 
  368 void
  369 pmap_zero_page(vm_page_t m)
  370 {
  371 
  372         CTR2(KTR_PMAP, "%s(%p)", __func__, m);
  373         MMU_ZERO_PAGE(mmu_obj, m);
  374 }
  375 
  376 void
  377 pmap_zero_page_area(vm_page_t m, int off, int size)
  378 {
  379 
  380         CTR4(KTR_PMAP, "%s(%p, %d, %d)", __func__, m, off, size);
  381         MMU_ZERO_PAGE_AREA(mmu_obj, m, off, size);
  382 }
  383 
  384 int
  385 pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
  386 {
  387 
  388         CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, addr);
  389         return (MMU_MINCORE(mmu_obj, pmap, addr, locked_pa));
  390 }
  391 
  392 void
  393 pmap_activate(struct thread *td)
  394 {
  395 
  396         CTR2(KTR_PMAP, "%s(%p)", __func__, td);
  397         MMU_ACTIVATE(mmu_obj, td);
  398 }
  399 
  400 void
  401 pmap_deactivate(struct thread *td)
  402 {
  403 
  404         CTR2(KTR_PMAP, "%s(%p)", __func__, td);
  405         MMU_DEACTIVATE(mmu_obj, td);
  406 }
  407 
  408 /*
  409  *      Increase the starting virtual address of the given mapping if a
  410  *      different alignment might result in more superpage mappings.
  411  */
  412 void
  413 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
  414     vm_offset_t *addr, vm_size_t size)
  415 {
  416 
  417         CTR5(KTR_PMAP, "%s(%p, %#x, %p, %#x)", __func__, object, offset, addr,
  418             size);
  419         MMU_ALIGN_SUPERPAGE(mmu_obj, object, offset, addr, size);
  420 }
  421 
  422 /*
  423  * Routines used in machine-dependent code
  424  */
  425 void
  426 pmap_bootstrap(vm_offset_t start, vm_offset_t end)
  427 {
  428         mmu_obj = &mmu_kernel_obj;
  429 
  430         /*
  431          * Take care of compiling the selected class, and
  432          * then statically initialise the MMU object
  433          */
  434         kobj_class_compile_static(mmu_def_impl, &mmu_kernel_kops);
  435         kobj_init_static((kobj_t)mmu_obj, mmu_def_impl);
  436 
  437         MMU_BOOTSTRAP(mmu_obj, start, end);
  438 }
  439 
  440 void
  441 pmap_cpu_bootstrap(int ap)
  442 {
  443         /*
  444          * No KTR here because our console probably doesn't work yet
  445          */
  446 
  447         return (MMU_CPU_BOOTSTRAP(mmu_obj, ap));
  448 }
  449 
  450 void *
  451 pmap_mapdev(vm_paddr_t pa, vm_size_t size)
  452 {
  453 
  454         CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size);
  455         return (MMU_MAPDEV(mmu_obj, pa, size));
  456 }
  457 
  458 void *
  459 pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, vm_memattr_t attr)
  460 {
  461 
  462         CTR4(KTR_PMAP, "%s(%#x, %#x, %#x)", __func__, pa, size, attr);
  463         return (MMU_MAPDEV_ATTR(mmu_obj, pa, size, attr));
  464 }
  465 
  466 void
  467 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
  468 {
  469 
  470         CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, m, ma);
  471         return (MMU_PAGE_SET_MEMATTR(mmu_obj, m, ma));
  472 }
  473 
  474 void
  475 pmap_unmapdev(vm_offset_t va, vm_size_t size)
  476 {
  477 
  478         CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, va, size);
  479         MMU_UNMAPDEV(mmu_obj, va, size);
  480 }
  481 
  482 vm_paddr_t
  483 pmap_kextract(vm_offset_t va)
  484 {
  485 
  486         CTR2(KTR_PMAP, "%s(%#x)", __func__, va);
  487         return (MMU_KEXTRACT(mmu_obj, va));
  488 }
  489 
  490 void
  491 pmap_kenter(vm_offset_t va, vm_paddr_t pa)
  492 {
  493 
  494         CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, va, pa);
  495         MMU_KENTER(mmu_obj, va, pa);
  496 }
  497 
  498 void
  499 pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
  500 {
  501 
  502         CTR4(KTR_PMAP, "%s(%#x, %#x, %#x)", __func__, va, pa, ma);
  503         MMU_KENTER_ATTR(mmu_obj, va, pa, ma);
  504 }
  505 
  506 void
  507 pmap_kremove(vm_offset_t va)
  508 {
  509 
  510         CTR2(KTR_PMAP, "%s(%#x)", __func__, va);
  511         return (MMU_KREMOVE(mmu_obj, va));
  512 }
  513 
  514 int
  515 pmap_map_user_ptr(pmap_t pm, volatile const void *uaddr, void **kaddr,
  516     size_t ulen, size_t *klen)
  517 {
  518 
  519         CTR2(KTR_PMAP, "%s(%p)", __func__, uaddr);
  520         return (MMU_MAP_USER_PTR(mmu_obj, pm, uaddr, kaddr, ulen, klen));
  521 }
  522 
  523 int
  524 pmap_decode_kernel_ptr(vm_offset_t addr, int *is_user, vm_offset_t *decoded)
  525 {
  526 
  527         CTR2(KTR_PMAP, "%s(%#jx)", __func__, (uintmax_t)addr);
  528         return (MMU_DECODE_KERNEL_PTR(mmu_obj, addr, is_user, decoded));
  529 }
  530 
  531 boolean_t
  532 pmap_dev_direct_mapped(vm_paddr_t pa, vm_size_t size)
  533 {
  534 
  535         CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size);
  536         return (MMU_DEV_DIRECT_MAPPED(mmu_obj, pa, size));
  537 }
  538 
  539 void
  540 pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
  541 {
  542  
  543         CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pm, va, sz);
  544         return (MMU_SYNC_ICACHE(mmu_obj, pm, va, sz));
  545 }
  546 
  547 void
  548 dumpsys_map_chunk(vm_paddr_t pa, size_t sz, void **va)
  549 {
  550 
  551         CTR4(KTR_PMAP, "%s(%#jx, %#zx, %p)", __func__, (uintmax_t)pa, sz, va);
  552         return (MMU_DUMPSYS_MAP(mmu_obj, pa, sz, va));
  553 }
  554 
  555 void
  556 dumpsys_unmap_chunk(vm_paddr_t pa, size_t sz, void *va)
  557 {
  558 
  559         CTR4(KTR_PMAP, "%s(%#jx, %#zx, %p)", __func__, (uintmax_t)pa, sz, va);
  560         return (MMU_DUMPSYS_UNMAP(mmu_obj, pa, sz, va));
  561 }
  562 
  563 void
  564 dumpsys_pa_init(void)
  565 {
  566 
  567         CTR1(KTR_PMAP, "%s()", __func__);
  568         return (MMU_SCAN_INIT(mmu_obj));
  569 }
  570 
  571 vm_offset_t
  572 pmap_quick_enter_page(vm_page_t m)
  573 {
  574         CTR2(KTR_PMAP, "%s(%p)", __func__, m);
  575         return (MMU_QUICK_ENTER_PAGE(mmu_obj, m));
  576 }
  577 
  578 void
  579 pmap_quick_remove_page(vm_offset_t addr)
  580 {
  581         CTR2(KTR_PMAP, "%s(%#x)", __func__, addr);
  582         MMU_QUICK_REMOVE_PAGE(mmu_obj, addr);
  583 }
  584 
  585 int
  586 pmap_change_attr(vm_offset_t addr, vm_size_t size, vm_memattr_t mode)
  587 {
  588         CTR4(KTR_PMAP, "%s(%#x, %#zx, %d)", __func__, addr, size, mode);
  589         return (MMU_CHANGE_ATTR(mmu_obj, addr, size, mode));
  590 }
  591 
  592 /*
  593  * MMU install routines. Highest priority wins, equal priority also
  594  * overrides allowing last-set to win.
  595  */
  596 SET_DECLARE(mmu_set, mmu_def_t);
  597 
  598 boolean_t
  599 pmap_mmu_install(char *name, int prio)
  600 {
  601         mmu_def_t       **mmupp, *mmup;
  602         static int      curr_prio = 0;
  603 
  604         /*
  605          * Try and locate the MMU kobj corresponding to the name
  606          */
  607         SET_FOREACH(mmupp, mmu_set) {
  608                 mmup = *mmupp;
  609 
  610                 if (mmup->name &&
  611                     !strcmp(mmup->name, name) &&
  612                     (prio >= curr_prio || mmu_def_impl == NULL)) {
  613                         curr_prio = prio;
  614                         mmu_def_impl = mmup;
  615                         return (TRUE);
  616                 }
  617         }
  618 
  619         return (FALSE);
  620 }
  621 
  622 int unmapped_buf_allowed;
  623 
  624 boolean_t
  625 pmap_is_valid_memattr(pmap_t pmap __unused, vm_memattr_t mode)
  626 {
  627 
  628         switch (mode) {
  629         case VM_MEMATTR_DEFAULT:
  630         case VM_MEMATTR_UNCACHEABLE:
  631         case VM_MEMATTR_CACHEABLE:
  632         case VM_MEMATTR_WRITE_COMBINING:
  633         case VM_MEMATTR_WRITE_BACK:
  634         case VM_MEMATTR_WRITE_THROUGH:
  635         case VM_MEMATTR_PREFETCHABLE:
  636                 return (TRUE);
  637         default:
  638                 return (FALSE);
  639         }
  640 }

Cache object: a25f93bedcc3eb049116c675833bf096


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.