The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/powerpc/powerpc/pmap_dispatch.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2005 Peter Grehan
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  *
   14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24  * SUCH DAMAGE.
   25  *
   26  */
   27 
   28 #include <sys/cdefs.h>
   29 __FBSDID("$FreeBSD: releng/11.1/sys/powerpc/powerpc/pmap_dispatch.c 310442 2016-12-23 03:19:48Z jhibbits $");
   30 
   31 /*
   32  * Dispatch MI pmap calls to the appropriate MMU implementation
   33  * through a previously registered kernel object.
   34  *
   35  * Before pmap_bootstrap() can be called, a CPU module must have
   36  * called pmap_mmu_install(). This may be called multiple times:
   37  * the highest priority call will be installed as the default
   38  * MMU handler when pmap_bootstrap() is called.
   39  *
   40  * It is required that mutex_init() be called before pmap_bootstrap(), 
   41  * as the PMAP layer makes extensive use of mutexes.
   42  */
   43 
   44 #include <sys/param.h>
   45 #include <sys/kernel.h>
   46 #include <sys/conf.h>
   47 #include <sys/lock.h>
   48 #include <sys/kerneldump.h>
   49 #include <sys/ktr.h>
   50 #include <sys/mutex.h>
   51 #include <sys/systm.h>
   52 
   53 #include <vm/vm.h>
   54 #include <vm/vm_page.h>
   55 
   56 #include <machine/dump.h>
   57 #include <machine/md_var.h>
   58 #include <machine/mmuvar.h>
   59 #include <machine/smp.h>
   60 
   61 #include "mmu_if.h"
   62 
   63 static mmu_def_t        *mmu_def_impl;
   64 static mmu_t            mmu_obj;
   65 static struct mmu_kobj  mmu_kernel_obj;
   66 static struct kobj_ops  mmu_kernel_kops;
   67 
   68 /*
   69  * pmap globals
   70  */
   71 struct pmap kernel_pmap_store;
   72 
   73 struct msgbuf *msgbufp;
   74 vm_offset_t    msgbuf_phys;
   75 
   76 vm_offset_t kernel_vm_end;
   77 vm_paddr_t phys_avail[PHYS_AVAIL_SZ];
   78 vm_offset_t virtual_avail;
   79 vm_offset_t virtual_end;
   80 
   81 int pmap_bootstrapped;
   82 
   83 #ifdef AIM
   84 int
   85 pvo_vaddr_compare(struct pvo_entry *a, struct pvo_entry *b)
   86 {
   87         if (PVO_VADDR(a) < PVO_VADDR(b))
   88                 return (-1);
   89         else if (PVO_VADDR(a) > PVO_VADDR(b))
   90                 return (1);
   91         return (0);
   92 }
   93 RB_GENERATE(pvo_tree, pvo_entry, pvo_plink, pvo_vaddr_compare);
   94 #endif
   95         
   96 
   97 void
   98 pmap_advise(pmap_t pmap, vm_offset_t start, vm_offset_t end, int advice)
   99 {
  100 
  101         CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %d)", __func__, pmap, start, end,
  102             advice);
  103         MMU_ADVISE(mmu_obj, pmap, start, end, advice);
  104 }
  105 
  106 void
  107 pmap_clear_modify(vm_page_t m)
  108 {
  109 
  110         CTR2(KTR_PMAP, "%s(%p)", __func__, m);
  111         MMU_CLEAR_MODIFY(mmu_obj, m);
  112 }
  113 
  114 void
  115 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
  116     vm_size_t len, vm_offset_t src_addr)
  117 {
  118 
  119         CTR6(KTR_PMAP, "%s(%p, %p, %#x, %#x, %#x)", __func__, dst_pmap,
  120             src_pmap, dst_addr, len, src_addr);
  121         MMU_COPY(mmu_obj, dst_pmap, src_pmap, dst_addr, len, src_addr);
  122 }
  123 
  124 void
  125 pmap_copy_page(vm_page_t src, vm_page_t dst)
  126 {
  127 
  128         CTR3(KTR_PMAP, "%s(%p, %p)", __func__, src, dst);
  129         MMU_COPY_PAGE(mmu_obj, src, dst);
  130 }
  131 
  132 void
  133 pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
  134     vm_offset_t b_offset, int xfersize)
  135 {
  136 
  137         CTR6(KTR_PMAP, "%s(%p, %#x, %p, %#x, %#x)", __func__, ma,
  138             a_offset, mb, b_offset, xfersize);
  139         MMU_COPY_PAGES(mmu_obj, ma, a_offset, mb, b_offset, xfersize);
  140 }
  141 
  142 int
  143 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t p, vm_prot_t prot,
  144     u_int flags, int8_t psind)
  145 {
  146 
  147         CTR6(KTR_PMAP, "pmap_enter(%p, %#x, %p, %#x, %x, %d)", pmap, va,
  148             p, prot, flags, psind);
  149         return (MMU_ENTER(mmu_obj, pmap, va, p, prot, flags, psind));
  150 }
  151 
  152 void
  153 pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
  154     vm_page_t m_start, vm_prot_t prot)
  155 {
  156 
  157         CTR6(KTR_PMAP, "%s(%p, %#x, %#x, %p, %#x)", __func__, pmap, start,
  158             end, m_start, prot);
  159         MMU_ENTER_OBJECT(mmu_obj, pmap, start, end, m_start, prot);
  160 }
  161 
  162 void
  163 pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
  164 {
  165 
  166         CTR5(KTR_PMAP, "%s(%p, %#x, %p, %#x)", __func__, pmap, va, m, prot);
  167         MMU_ENTER_QUICK(mmu_obj, pmap, va, m, prot);
  168 }
  169 
  170 vm_paddr_t
  171 pmap_extract(pmap_t pmap, vm_offset_t va)
  172 {
  173 
  174         CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, va);
  175         return (MMU_EXTRACT(mmu_obj, pmap, va));
  176 }
  177 
  178 vm_page_t
  179 pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
  180 {
  181 
  182         CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, va, prot);
  183         return (MMU_EXTRACT_AND_HOLD(mmu_obj, pmap, va, prot));
  184 }
  185 
  186 void
  187 pmap_growkernel(vm_offset_t va)
  188 {
  189 
  190         CTR2(KTR_PMAP, "%s(%#x)", __func__, va);
  191         MMU_GROWKERNEL(mmu_obj, va);
  192 }
  193 
  194 void
  195 pmap_init(void)
  196 {
  197 
  198         CTR1(KTR_PMAP, "%s()", __func__);
  199         MMU_INIT(mmu_obj);
  200 }
  201 
  202 boolean_t
  203 pmap_is_modified(vm_page_t m)
  204 {
  205 
  206         CTR2(KTR_PMAP, "%s(%p)", __func__, m);
  207         return (MMU_IS_MODIFIED(mmu_obj, m));
  208 }
  209 
  210 boolean_t
  211 pmap_is_prefaultable(pmap_t pmap, vm_offset_t va)
  212 {
  213 
  214         CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, va);
  215         return (MMU_IS_PREFAULTABLE(mmu_obj, pmap, va));
  216 }
  217 
  218 boolean_t
  219 pmap_is_referenced(vm_page_t m)
  220 {
  221 
  222         CTR2(KTR_PMAP, "%s(%p)", __func__, m);
  223         return (MMU_IS_REFERENCED(mmu_obj, m));
  224 }
  225 
  226 boolean_t
  227 pmap_ts_referenced(vm_page_t m)
  228 {
  229 
  230         CTR2(KTR_PMAP, "%s(%p)", __func__, m);
  231         return (MMU_TS_REFERENCED(mmu_obj, m));
  232 }
  233 
  234 vm_offset_t
  235 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
  236 {
  237 
  238         CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %#x)", __func__, virt, start, end,
  239             prot);
  240         return (MMU_MAP(mmu_obj, virt, start, end, prot));
  241 }
  242 
  243 void
  244 pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
  245     vm_pindex_t pindex, vm_size_t size)
  246 {
  247 
  248         CTR6(KTR_PMAP, "%s(%p, %#x, %p, %u, %#x)", __func__, pmap, addr,
  249             object, pindex, size);
  250         MMU_OBJECT_INIT_PT(mmu_obj, pmap, addr, object, pindex, size);
  251 }
  252 
  253 boolean_t
  254 pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
  255 {
  256 
  257         CTR3(KTR_PMAP, "%s(%p, %p)", __func__, pmap, m);
  258         return (MMU_PAGE_EXISTS_QUICK(mmu_obj, pmap, m));
  259 }
  260 
  261 void
  262 pmap_page_init(vm_page_t m)
  263 {
  264 
  265         CTR2(KTR_PMAP, "%s(%p)", __func__, m);
  266         MMU_PAGE_INIT(mmu_obj, m);
  267 }
  268 
  269 int
  270 pmap_page_wired_mappings(vm_page_t m)
  271 {
  272 
  273         CTR2(KTR_PMAP, "%s(%p)", __func__, m);
  274         return (MMU_PAGE_WIRED_MAPPINGS(mmu_obj, m));
  275 }
  276 
  277 int
  278 pmap_pinit(pmap_t pmap)
  279 {
  280 
  281         CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
  282         MMU_PINIT(mmu_obj, pmap);
  283         return (1);
  284 }
  285 
  286 void
  287 pmap_pinit0(pmap_t pmap)
  288 {
  289 
  290         CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
  291         MMU_PINIT0(mmu_obj, pmap);
  292 }
  293 
  294 void
  295 pmap_protect(pmap_t pmap, vm_offset_t start, vm_offset_t end, vm_prot_t prot)
  296 {
  297 
  298         CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %#x)", __func__, pmap, start, end,
  299             prot);
  300         MMU_PROTECT(mmu_obj, pmap, start, end, prot);
  301 }
  302 
  303 void
  304 pmap_qenter(vm_offset_t start, vm_page_t *m, int count)
  305 {
  306 
  307         CTR4(KTR_PMAP, "%s(%#x, %p, %d)", __func__, start, m, count);
  308         MMU_QENTER(mmu_obj, start, m, count);
  309 }
  310 
  311 void
  312 pmap_qremove(vm_offset_t start, int count)
  313 {
  314 
  315         CTR3(KTR_PMAP, "%s(%#x, %d)", __func__, start, count);
  316         MMU_QREMOVE(mmu_obj, start, count);
  317 }
  318 
  319 void
  320 pmap_release(pmap_t pmap)
  321 {
  322 
  323         CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
  324         MMU_RELEASE(mmu_obj, pmap);
  325 }
  326 
  327 void
  328 pmap_remove(pmap_t pmap, vm_offset_t start, vm_offset_t end)
  329 {
  330 
  331         CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, start, end);
  332         MMU_REMOVE(mmu_obj, pmap, start, end);
  333 }
  334 
  335 void
  336 pmap_remove_all(vm_page_t m)
  337 {
  338 
  339         CTR2(KTR_PMAP, "%s(%p)", __func__, m);
  340         MMU_REMOVE_ALL(mmu_obj, m);
  341 }
  342 
  343 void
  344 pmap_remove_pages(pmap_t pmap)
  345 {
  346 
  347         CTR2(KTR_PMAP, "%s(%p)", __func__, pmap);
  348         MMU_REMOVE_PAGES(mmu_obj, pmap);
  349 }
  350 
  351 void
  352 pmap_remove_write(vm_page_t m)
  353 {
  354 
  355         CTR2(KTR_PMAP, "%s(%p)", __func__, m);
  356         MMU_REMOVE_WRITE(mmu_obj, m);
  357 }
  358 
  359 void
  360 pmap_unwire(pmap_t pmap, vm_offset_t start, vm_offset_t end)
  361 {
  362 
  363         CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, start, end);
  364         MMU_UNWIRE(mmu_obj, pmap, start, end);
  365 }
  366 
  367 void
  368 pmap_zero_page(vm_page_t m)
  369 {
  370 
  371         CTR2(KTR_PMAP, "%s(%p)", __func__, m);
  372         MMU_ZERO_PAGE(mmu_obj, m);
  373 }
  374 
  375 void
  376 pmap_zero_page_area(vm_page_t m, int off, int size)
  377 {
  378 
  379         CTR4(KTR_PMAP, "%s(%p, %d, %d)", __func__, m, off, size);
  380         MMU_ZERO_PAGE_AREA(mmu_obj, m, off, size);
  381 }
  382 
  383 void
  384 pmap_zero_page_idle(vm_page_t m)
  385 {
  386 
  387         CTR2(KTR_PMAP, "%s(%p)", __func__, m);
  388         MMU_ZERO_PAGE_IDLE(mmu_obj, m);
  389 }
  390 
  391 int
  392 pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
  393 {
  394 
  395         CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, addr);
  396         return (MMU_MINCORE(mmu_obj, pmap, addr, locked_pa));
  397 }
  398 
  399 void
  400 pmap_activate(struct thread *td)
  401 {
  402 
  403         CTR2(KTR_PMAP, "%s(%p)", __func__, td);
  404         MMU_ACTIVATE(mmu_obj, td);
  405 }
  406 
  407 void
  408 pmap_deactivate(struct thread *td)
  409 {
  410 
  411         CTR2(KTR_PMAP, "%s(%p)", __func__, td);
  412         MMU_DEACTIVATE(mmu_obj, td);
  413 }
  414 
  415 /*
  416  *      Increase the starting virtual address of the given mapping if a
  417  *      different alignment might result in more superpage mappings.
  418  */
  419 void
  420 pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
  421     vm_offset_t *addr, vm_size_t size)
  422 {
  423 
  424         CTR5(KTR_PMAP, "%s(%p, %#x, %p, %#x)", __func__, object, offset, addr,
  425             size);
  426         MMU_ALIGN_SUPERPAGE(mmu_obj, object, offset, addr, size);
  427 }
  428 
  429 /*
  430  * Routines used in machine-dependent code
  431  */
  432 void
  433 pmap_bootstrap(vm_offset_t start, vm_offset_t end)
  434 {
  435         mmu_obj = &mmu_kernel_obj;
  436 
  437         /*
  438          * Take care of compiling the selected class, and
  439          * then statically initialise the MMU object
  440          */
  441         kobj_class_compile_static(mmu_def_impl, &mmu_kernel_kops);
  442         kobj_init_static((kobj_t)mmu_obj, mmu_def_impl);
  443 
  444         MMU_BOOTSTRAP(mmu_obj, start, end);
  445 }
  446 
  447 void
  448 pmap_cpu_bootstrap(int ap)
  449 {
  450         /*
  451          * No KTR here because our console probably doesn't work yet
  452          */
  453 
  454         return (MMU_CPU_BOOTSTRAP(mmu_obj, ap));
  455 }
  456 
  457 void *
  458 pmap_mapdev(vm_paddr_t pa, vm_size_t size)
  459 {
  460 
  461         CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size);
  462         return (MMU_MAPDEV(mmu_obj, pa, size));
  463 }
  464 
  465 void *
  466 pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, vm_memattr_t attr)
  467 {
  468 
  469         CTR4(KTR_PMAP, "%s(%#x, %#x, %#x)", __func__, pa, size, attr);
  470         return (MMU_MAPDEV_ATTR(mmu_obj, pa, size, attr));
  471 }
  472 
  473 void
  474 pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
  475 {
  476 
  477         CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, m, ma);
  478         return (MMU_PAGE_SET_MEMATTR(mmu_obj, m, ma));
  479 }
  480 
  481 void
  482 pmap_unmapdev(vm_offset_t va, vm_size_t size)
  483 {
  484 
  485         CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, va, size);
  486         MMU_UNMAPDEV(mmu_obj, va, size);
  487 }
  488 
  489 vm_paddr_t
  490 pmap_kextract(vm_offset_t va)
  491 {
  492 
  493         CTR2(KTR_PMAP, "%s(%#x)", __func__, va);
  494         return (MMU_KEXTRACT(mmu_obj, va));
  495 }
  496 
  497 void
  498 pmap_kenter(vm_offset_t va, vm_paddr_t pa)
  499 {
  500 
  501         CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, va, pa);
  502         MMU_KENTER(mmu_obj, va, pa);
  503 }
  504 
  505 void
  506 pmap_kenter_attr(vm_offset_t va, vm_offset_t pa, vm_memattr_t ma)
  507 {
  508 
  509         CTR4(KTR_PMAP, "%s(%#x, %#x, %#x)", __func__, va, pa, ma);
  510         MMU_KENTER_ATTR(mmu_obj, va, pa, ma);
  511 }
  512 
  513 void
  514 pmap_kremove(vm_offset_t va)
  515 {
  516 
  517         CTR2(KTR_PMAP, "%s(%#x)", __func__, va);
  518         return (MMU_KREMOVE(mmu_obj, va));
  519 }
  520 
  521 boolean_t
  522 pmap_dev_direct_mapped(vm_paddr_t pa, vm_size_t size)
  523 {
  524 
  525         CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size);
  526         return (MMU_DEV_DIRECT_MAPPED(mmu_obj, pa, size));
  527 }
  528 
  529 void
  530 pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
  531 {
  532  
  533         CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pm, va, sz);
  534         return (MMU_SYNC_ICACHE(mmu_obj, pm, va, sz));
  535 }
  536 
  537 void
  538 dumpsys_map_chunk(vm_paddr_t pa, size_t sz, void **va)
  539 {
  540 
  541         CTR4(KTR_PMAP, "%s(%#jx, %#zx, %p)", __func__, (uintmax_t)pa, sz, va);
  542         return (MMU_DUMPSYS_MAP(mmu_obj, pa, sz, va));
  543 }
  544 
  545 void
  546 dumpsys_unmap_chunk(vm_paddr_t pa, size_t sz, void *va)
  547 {
  548 
  549         CTR4(KTR_PMAP, "%s(%#jx, %#zx, %p)", __func__, (uintmax_t)pa, sz, va);
  550         return (MMU_DUMPSYS_UNMAP(mmu_obj, pa, sz, va));
  551 }
  552 
  553 void
  554 dumpsys_pa_init(void)
  555 {
  556 
  557         CTR1(KTR_PMAP, "%s()", __func__);
  558         return (MMU_SCAN_INIT(mmu_obj));
  559 }
  560 
  561 vm_offset_t
  562 pmap_quick_enter_page(vm_page_t m)
  563 {
  564         CTR2(KTR_PMAP, "%s(%p)", __func__, m);
  565         return (MMU_QUICK_ENTER_PAGE(mmu_obj, m));
  566 }
  567 
  568 void
  569 pmap_quick_remove_page(vm_offset_t addr)
  570 {
  571         CTR2(KTR_PMAP, "%s(%#x)", __func__, addr);
  572         MMU_QUICK_REMOVE_PAGE(mmu_obj, addr);
  573 }
  574 
  575 int
  576 pmap_change_attr(vm_offset_t addr, vm_size_t size, vm_memattr_t mode)
  577 {
  578         CTR4(KTR_PMAP, "%s(%#x, %#zx, %d)", __func__, addr, size, mode);
  579         return (MMU_CHANGE_ATTR(mmu_obj, addr, size, mode));
  580 }
  581 
  582 /*
  583  * MMU install routines. Highest priority wins, equal priority also
  584  * overrides allowing last-set to win.
  585  */
  586 SET_DECLARE(mmu_set, mmu_def_t);
  587 
  588 boolean_t
  589 pmap_mmu_install(char *name, int prio)
  590 {
  591         mmu_def_t       **mmupp, *mmup;
  592         static int      curr_prio = 0;
  593 
  594         /*
  595          * Try and locate the MMU kobj corresponding to the name
  596          */
  597         SET_FOREACH(mmupp, mmu_set) {
  598                 mmup = *mmupp;
  599 
  600                 if (mmup->name &&
  601                     !strcmp(mmup->name, name) &&
  602                     (prio >= curr_prio || mmu_def_impl == NULL)) {
  603                         curr_prio = prio;
  604                         mmu_def_impl = mmup;
  605                         return (TRUE);
  606                 }
  607         }
  608 
  609         return (FALSE);
  610 }
  611 
  612 int unmapped_buf_allowed;

Cache object: 5b6174c0f534b92c9138d1d7ad03d0d4


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.