The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/powerpc/powerpc/mmu_if.m

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 #-
    2 # Copyright (c) 2005 Peter Grehan
    3 # All rights reserved.
    4 #
    5 # Redistribution and use in source and binary forms, with or without
    6 # modification, are permitted provided that the following conditions
    7 # are met:
    8 # 1. Redistributions of source code must retain the above copyright
    9 #    notice, this list of conditions and the following disclaimer.
   10 # 2. Redistributions in binary form must reproduce the above copyright
   11 #    notice, this list of conditions and the following disclaimer in the
   12 #    documentation and/or other materials provided with the distribution.
   13 #
   14 # THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   15 # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17 # ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   18 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20 # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21 # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22 # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23 # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24 # SUCH DAMAGE.
   25 #
   26 # $FreeBSD: releng/12.0/sys/powerpc/powerpc/mmu_if.m 328530 2018-01-29 04:33:41Z nwhitehorn $
   27 #
   28 
   29 #include <sys/param.h>
   30 #include <sys/lock.h>
   31 #include <sys/mutex.h>
   32 #include <sys/systm.h>
   33 
   34 #include <vm/vm.h>
   35 #include <vm/vm_page.h>
   36 
   37 #include <machine/mmuvar.h>
   38 
   39 /**
   40  * @defgroup MMU mmu - KObj methods for PowerPC MMU implementations
   41  * @brief A set of methods required by all MMU implementations. These
   42  * are basically direct call-thru's from the pmap machine-dependent
   43  * code.
   44  * Thanks to Bruce M Simpson's pmap man pages for routine descriptions.
   45  *@{
   46  */
   47 
   48 INTERFACE mmu;
   49 
   50 #
   51 # Default implementations of some methods
   52 #
   53 CODE {
   54         static void mmu_null_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap,
   55             vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr)
   56         {
   57                 return;
   58         }
   59 
   60         static void mmu_null_growkernel(mmu_t mmu, vm_offset_t addr)
   61         {
   62                 return;
   63         }
   64 
   65         static void mmu_null_init(mmu_t mmu)
   66         {
   67                 return;
   68         }
   69 
   70         static boolean_t mmu_null_is_prefaultable(mmu_t mmu, pmap_t pmap,
   71             vm_offset_t va)
   72         {
   73                 return (FALSE);
   74         }
   75 
   76         static void mmu_null_object_init_pt(mmu_t mmu, pmap_t pmap,
   77             vm_offset_t addr, vm_object_t object, vm_pindex_t index,
   78             vm_size_t size)
   79         {
   80                 return;
   81         }
   82 
   83         static void mmu_null_page_init(mmu_t mmu, vm_page_t m)
   84         {
   85                 return;
   86         }
   87 
   88         static void mmu_null_remove_pages(mmu_t mmu, pmap_t pmap)
   89         {
   90                 return;
   91         }
   92 
   93         static int mmu_null_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
   94             vm_paddr_t *locked_pa)
   95         {
   96                 return (0);
   97         }
   98 
   99         static void mmu_null_deactivate(struct thread *td)
  100         {
  101                 return;
  102         }
  103 
  104         static void mmu_null_align_superpage(mmu_t mmu, vm_object_t object,
  105             vm_ooffset_t offset, vm_offset_t *addr, vm_size_t size)
  106         {
  107                 return;
  108         }
  109 
  110         static void *mmu_null_mapdev_attr(mmu_t mmu, vm_paddr_t pa,
  111             vm_size_t size, vm_memattr_t ma)
  112         {
  113                 return MMU_MAPDEV(mmu, pa, size);
  114         }
  115 
  116         static void mmu_null_kenter_attr(mmu_t mmu, vm_offset_t va,
  117             vm_paddr_t pa, vm_memattr_t ma)
  118         {
  119                 MMU_KENTER(mmu, va, pa);
  120         }
  121 
  122         static void mmu_null_page_set_memattr(mmu_t mmu, vm_page_t m,
  123             vm_memattr_t ma)
  124         {
  125                 return;
  126         }
  127 
  128         static int mmu_null_change_attr(mmu_t mmu, vm_offset_t va,
  129             vm_size_t sz, vm_memattr_t mode)
  130         {
  131                 return (0);
  132         }
  133 };
  134 
  135 
  136 /**
  137  * @brief Apply the given advice to the specified range of addresses within
  138  * the given pmap.  Depending on the advice, clear the referenced and/or
  139  * modified flags in each mapping and set the mapped page's dirty field.
  140  *
  141  * @param _pmap         physical map
  142  * @param _start        virtual range start
  143  * @param _end          virtual range end
  144  * @param _advice       advice to apply
  145  */
  146 METHOD void advise {
  147         mmu_t           _mmu;
  148         pmap_t          _pmap;
  149         vm_offset_t     _start;
  150         vm_offset_t     _end;
  151         int             _advice;
  152 };
  153 
  154 
  155 /**
  156  * @brief Clear the 'modified' bit on the given physical page
  157  *
  158  * @param _pg           physical page
  159  */
  160 METHOD void clear_modify {
  161         mmu_t           _mmu;
  162         vm_page_t       _pg;
  163 };
  164 
  165 
  166 /**
  167  * @brief Clear the write and modified bits in each of the given
  168  * physical page's mappings
  169  *
  170  * @param _pg           physical page
  171  */
  172 METHOD void remove_write {
  173         mmu_t           _mmu;
  174         vm_page_t       _pg;
  175 };
  176 
  177 
  178 /**
  179  * @brief Copy the address range given by the source physical map, virtual
  180  * address and length to the destination physical map and virtual address.
  181  * This routine is optional (xxx default null implementation ?)
  182  *
  183  * @param _dst_pmap     destination physical map
  184  * @param _src_pmap     source physical map
  185  * @param _dst_addr     destination virtual address
  186  * @param _len          size of range
  187  * @param _src_addr     source virtual address
  188  */
  189 METHOD void copy {
  190         mmu_t           _mmu;
  191         pmap_t          _dst_pmap;
  192         pmap_t          _src_pmap;
  193         vm_offset_t     _dst_addr;
  194         vm_size_t       _len;
  195         vm_offset_t     _src_addr;
  196 } DEFAULT mmu_null_copy;
  197 
  198 
  199 /**
  200  * @brief Copy the source physical page to the destination physical page
  201  *
  202  * @param _src          source physical page
  203  * @param _dst          destination physical page
  204  */
  205 METHOD void copy_page {
  206         mmu_t           _mmu;
  207         vm_page_t       _src;
  208         vm_page_t       _dst;
  209 };
  210 
  211 METHOD void copy_pages {
  212         mmu_t           _mmu;
  213         vm_page_t       *_ma;
  214         vm_offset_t     _a_offset;
  215         vm_page_t       *_mb;
  216         vm_offset_t     _b_offset;
  217         int             _xfersize;
  218 };
  219 
  220 /**
  221  * @brief Create a mapping between a virtual/physical address pair in the
  222  * passed physical map with the specified protection and wiring
  223  *
  224  * @param _pmap         physical map
  225  * @param _va           mapping virtual address
  226  * @param _p            mapping physical page
  227  * @param _prot         mapping page protection
  228  * @param _flags        pmap_enter flags
  229  * @param _psind        superpage size index
  230  */
  231 METHOD int enter {
  232         mmu_t           _mmu;
  233         pmap_t          _pmap;
  234         vm_offset_t     _va;
  235         vm_page_t       _p;
  236         vm_prot_t       _prot;
  237         u_int           _flags;
  238         int8_t          _psind;
  239 };
  240 
  241 
  242 /**
  243  * @brief Maps a sequence of resident pages belonging to the same object.
  244  *
  245  * @param _pmap         physical map
  246  * @param _start        virtual range start
  247  * @param _end          virtual range end
  248  * @param _m_start      physical page mapped at start
  249  * @param _prot         mapping page protection
  250  */
  251 METHOD void enter_object {
  252         mmu_t           _mmu;
  253         pmap_t          _pmap;
  254         vm_offset_t     _start;
  255         vm_offset_t     _end;
  256         vm_page_t       _m_start;
  257         vm_prot_t       _prot;
  258 };
  259 
  260 
  261 /**
  262  * @brief A faster entry point for page mapping where it is possible
  263  * to short-circuit some of the tests in pmap_enter.
  264  *
  265  * @param _pmap         physical map (and also currently active pmap)
  266  * @param _va           mapping virtual address
  267  * @param _pg           mapping physical page
  268  * @param _prot         new page protection - used to see if page is exec.
  269  */
  270 METHOD void enter_quick {
  271         mmu_t           _mmu;
  272         pmap_t          _pmap;
  273         vm_offset_t     _va;
  274         vm_page_t       _pg;
  275         vm_prot_t       _prot;
  276 };
  277 
  278 
  279 /**
  280  * @brief Reverse map the given virtual address, returning the physical
  281  * page associated with the address if a mapping exists.
  282  *
  283  * @param _pmap         physical map
  284  * @param _va           mapping virtual address
  285  *
  286  * @retval 0            No mapping found
  287  * @retval addr         The mapping physical address
  288  */
  289 METHOD vm_paddr_t extract {
  290         mmu_t           _mmu;
  291         pmap_t          _pmap;
  292         vm_offset_t     _va;
  293 };
  294 
  295 
  296 /**
  297  * @brief Reverse map the given virtual address, returning the
  298  * physical page if found. The page must be held (by calling
  299  * vm_page_hold) if the page protection matches the given protection
  300  *
  301  * @param _pmap         physical map
  302  * @param _va           mapping virtual address
  303  * @param _prot         protection used to determine if physical page
  304  *                      should be locked
  305  *
  306  * @retval NULL         No mapping found
  307  * @retval page         Pointer to physical page. Held if protections match
  308  */
  309 METHOD vm_page_t extract_and_hold {
  310         mmu_t           _mmu;
  311         pmap_t          _pmap;
  312         vm_offset_t     _va;
  313         vm_prot_t       _prot;
  314 };
  315 
  316 
  317 /**
  318  * @brief Increase kernel virtual address space to the given virtual address.
  319  * Not really required for PowerPC, so optional unless the MMU implementation
  320  * can use it.
  321  *
  322  * @param _va           new upper limit for kernel virtual address space
  323  */
  324 METHOD void growkernel {
  325         mmu_t           _mmu;
  326         vm_offset_t     _va;
  327 } DEFAULT mmu_null_growkernel;
  328 
  329 
  330 /**
  331  * @brief Called from vm_mem_init. Zone allocation is available at
  332  * this stage so a convenient time to create zones. This routine is
  333  * for MMU-implementation convenience and is optional.
  334  */
  335 METHOD void init {
  336         mmu_t           _mmu;
  337 } DEFAULT mmu_null_init;
  338 
  339 
  340 /**
  341  * @brief Return if the page has been marked by MMU hardware to have been
  342  * modified
  343  *
  344  * @param _pg           physical page to test
  345  *
  346  * @retval boolean      TRUE if page has been modified
  347  */
  348 METHOD boolean_t is_modified {
  349         mmu_t           _mmu;
  350         vm_page_t       _pg;
  351 };
  352 
  353 
  354 /**
  355  * @brief Return whether the specified virtual address is a candidate to be
  356  * prefaulted in. This routine is optional.
  357  *
  358  * @param _pmap         physical map
  359  * @param _va           virtual address to test
  360  *
  361  * @retval boolean      TRUE if the address is a candidate.
  362  */
  363 METHOD boolean_t is_prefaultable {
  364         mmu_t           _mmu;
  365         pmap_t          _pmap;
  366         vm_offset_t     _va;
  367 } DEFAULT mmu_null_is_prefaultable;
  368 
  369 
  370 /**
  371  * @brief Return whether or not the specified physical page was referenced
  372  * in any physical maps.
  373  *
  374  * @params _pg          physical page
  375  *
  376  * @retval boolean      TRUE if page has been referenced
  377  */
  378 METHOD boolean_t is_referenced {
  379         mmu_t           _mmu;
  380         vm_page_t       _pg;
  381 };
  382 
  383 
  384 /**
  385  * @brief Return a count of referenced bits for a page, clearing those bits.
  386  * Not all referenced bits need to be cleared, but it is necessary that 0
  387  * only be returned when there are none set.
  388  *
  389  * @params _m           physical page
  390  *
  391  * @retval int          count of referenced bits
  392  */
  393 METHOD int ts_referenced {
  394         mmu_t           _mmu;
  395         vm_page_t       _pg;
  396 };
  397 
  398 
  399 /**
  400  * @brief Map the requested physical address range into kernel virtual
  401  * address space. The value in _virt is taken as a hint. The virtual
  402  * address of the range is returned, or NULL if the mapping could not
  403  * be created. The range can be direct-mapped if that is supported.
  404  *
  405  * @param *_virt        Hint for start virtual address, and also return
  406  *                      value
  407  * @param _start        physical address range start
  408  * @param _end          physical address range end
  409  * @param _prot         protection of range (currently ignored)
  410  *
  411  * @retval NULL         could not map the area
  412  * @retval addr, *_virt mapping start virtual address
  413  */
  414 METHOD vm_offset_t map {
  415         mmu_t           _mmu;
  416         vm_offset_t     *_virt;
  417         vm_paddr_t      _start;
  418         vm_paddr_t      _end;
  419         int             _prot;
  420 };
  421 
  422 
  423 /**
  424  * @brief Used to create a contiguous set of read-only mappings for a
  425  * given object to try and eliminate a cascade of on-demand faults as
  426  * the object is accessed sequentially. This routine is optional.
  427  *
  428  * @param _pmap         physical map
  429  * @param _addr         mapping start virtual address
  430  * @param _object       device-backed V.M. object to be mapped
  431  * @param _pindex       page-index within object of mapping start
  432  * @param _size         size in bytes of mapping
  433  */
  434 METHOD void object_init_pt {
  435         mmu_t           _mmu;
  436         pmap_t          _pmap;
  437         vm_offset_t     _addr;
  438         vm_object_t     _object;
  439         vm_pindex_t     _pindex;
  440         vm_size_t       _size;
  441 } DEFAULT mmu_null_object_init_pt;
  442 
  443 
  444 /**
  445  * @brief Used to determine if the specified page has a mapping for the
  446  * given physical map, by scanning the list of reverse-mappings from the
  447  * page. The list is scanned to a maximum of 16 entries.
  448  *
  449  * @param _pmap         physical map
  450  * @param _pg           physical page
  451  *
  452  * @retval bool         TRUE if the physical map was found in the first 16
  453  *                      reverse-map list entries off the physical page.
  454  */
  455 METHOD boolean_t page_exists_quick {
  456         mmu_t           _mmu;
  457         pmap_t          _pmap;
  458         vm_page_t       _pg;
  459 };
  460 
  461 
  462 /**
  463  * @brief Initialise the machine-dependent section of the physical page
  464  * data structure. This routine is optional.
  465  *
  466  * @param _pg           physical page
  467  */
  468 METHOD void page_init {
  469         mmu_t           _mmu;
  470         vm_page_t       _pg;
  471 } DEFAULT mmu_null_page_init;
  472 
  473 
  474 /**
  475  * @brief Count the number of managed mappings to the given physical
  476  * page that are wired.
  477  *
  478  * @param _pg           physical page
  479  *
  480  * @retval int          the number of wired, managed mappings to the
  481  *                      given physical page
  482  */
  483 METHOD int page_wired_mappings {
  484         mmu_t           _mmu;
  485         vm_page_t       _pg;
  486 };
  487 
  488 
  489 /**
  490  * @brief Initialise a physical map data structure
  491  *
  492  * @param _pmap         physical map
  493  */
  494 METHOD void pinit {
  495         mmu_t           _mmu;
  496         pmap_t          _pmap;
  497 };
  498 
  499 
  500 /**
  501  * @brief Initialise the physical map for process 0, the initial process
  502  * in the system.
  503  * XXX default to pinit ?
  504  *
  505  * @param _pmap         physical map
  506  */
  507 METHOD void pinit0 {
  508         mmu_t           _mmu;
  509         pmap_t          _pmap;
  510 };
  511 
  512 
  513 /**
  514  * @brief Set the protection for physical pages in the given virtual address
  515  * range to the given value.
  516  *
  517  * @param _pmap         physical map
  518  * @param _start        virtual range start
  519  * @param _end          virtual range end
  520  * @param _prot         new page protection
  521  */
  522 METHOD void protect {
  523         mmu_t           _mmu;
  524         pmap_t          _pmap;
  525         vm_offset_t     _start;
  526         vm_offset_t     _end;
  527         vm_prot_t       _prot;
  528 };
  529 
  530 
  531 /**
  532  * @brief Create a mapping in kernel virtual address space for the given array
  533  * of wired physical pages.
  534  *
  535  * @param _start        mapping virtual address start
  536  * @param *_m           array of physical page pointers
  537  * @param _count        array elements
  538  */
  539 METHOD void qenter {
  540         mmu_t           _mmu;
  541         vm_offset_t     _start;
  542         vm_page_t       *_pg;
  543         int             _count;
  544 };
  545 
  546 
  547 /**
  548  * @brief Remove the temporary mappings created by qenter.
  549  *
  550  * @param _start        mapping virtual address start
  551  * @param _count        number of pages in mapping
  552  */
  553 METHOD void qremove {
  554         mmu_t           _mmu;
  555         vm_offset_t     _start;
  556         int             _count;
  557 };
  558 
  559 
  560 /**
  561  * @brief Release per-pmap resources, e.g. mutexes, allocated memory etc. There
  562  * should be no existing mappings for the physical map at this point
  563  *
  564  * @param _pmap         physical map
  565  */
  566 METHOD void release {
  567         mmu_t           _mmu;
  568         pmap_t          _pmap;
  569 };
  570 
  571 
  572 /**
  573  * @brief Remove all mappings in the given physical map for the start/end
  574  * virtual address range. The range will be page-aligned.
  575  *
  576  * @param _pmap         physical map
  577  * @param _start        mapping virtual address start
  578  * @param _end          mapping virtual address end
  579  */
  580 METHOD void remove {
  581         mmu_t           _mmu;
  582         pmap_t          _pmap;
  583         vm_offset_t     _start;
  584         vm_offset_t     _end;
  585 };
  586 
  587 
  588 /**
  589  * @brief Traverse the reverse-map list off the given physical page and
  590  * remove all mappings. Clear the PGA_WRITEABLE attribute from the page.
  591  *
  592  * @param _pg           physical page
  593  */
  594 METHOD void remove_all {
  595         mmu_t           _mmu;
  596         vm_page_t       _pg;
  597 };
  598 
  599 
  600 /**
  601  * @brief Remove all mappings in the given start/end virtual address range
  602  * for the given physical map. Similar to the remove method, but it used
  603  * when tearing down all mappings in an address space. This method is
  604  * optional, since pmap_remove will be called for each valid vm_map in
  605  * the address space later.
  606  *
  607  * @param _pmap         physical map
  608  * @param _start        mapping virtual address start
  609  * @param _end          mapping virtual address end
  610  */
  611 METHOD void remove_pages {
  612         mmu_t           _mmu;
  613         pmap_t          _pmap;
  614 } DEFAULT mmu_null_remove_pages;
  615 
  616 
  617 /**
  618  * @brief Clear the wired attribute from the mappings for the specified range
  619  * of addresses in the given pmap.
  620  *
  621  * @param _pmap         physical map
  622  * @param _start        virtual range start
  623  * @param _end          virtual range end
  624  */
  625 METHOD void unwire {
  626         mmu_t           _mmu;
  627         pmap_t          _pmap;
  628         vm_offset_t     _start;
  629         vm_offset_t     _end;
  630 };
  631 
  632 
  633 /**
  634  * @brief Zero a physical page. It is not assumed that the page is mapped,
  635  * so a temporary (or direct) mapping may need to be used.
  636  *
  637  * @param _pg           physical page
  638  */
  639 METHOD void zero_page {
  640         mmu_t           _mmu;
  641         vm_page_t       _pg;
  642 };
  643 
  644 
  645 /**
  646  * @brief Zero a portion of a physical page, starting at a given offset and
  647  * for a given size (multiples of 512 bytes for 4k pages).
  648  *
  649  * @param _pg           physical page
  650  * @param _off          byte offset from start of page
  651  * @param _size         size of area to zero
  652  */
  653 METHOD void zero_page_area {
  654         mmu_t           _mmu;
  655         vm_page_t       _pg;
  656         int             _off;
  657         int             _size;
  658 };
  659 
  660 
  661 /**
  662  * @brief Extract mincore(2) information from a mapping.
  663  *
  664  * @param _pmap         physical map
  665  * @param _addr         page virtual address
  666  * @param _locked_pa    page physical address
  667  *
  668  * @retval 0            no result
  669  * @retval non-zero     mincore(2) flag values
  670  */
  671 METHOD int mincore {
  672         mmu_t           _mmu;
  673         pmap_t          _pmap;
  674         vm_offset_t     _addr;
  675         vm_paddr_t      *_locked_pa;
  676 } DEFAULT mmu_null_mincore;
  677 
  678 
  679 /**
  680  * @brief Perform any operations required to allow a physical map to be used
  681  * before it's address space is accessed.
  682  *
  683  * @param _td           thread associated with physical map
  684  */
  685 METHOD void activate {
  686         mmu_t           _mmu;
  687         struct thread   *_td;
  688 };
  689 
  690 /**
  691  * @brief Perform any operations required to deactivate a physical map,
  692  * for instance as it is context-switched out.
  693  *
  694  * @param _td           thread associated with physical map
  695  */
  696 METHOD void deactivate {
  697         mmu_t           _mmu;
  698         struct thread   *_td;
  699 } DEFAULT mmu_null_deactivate;
  700 
  701 /**
  702  * @brief Return a hint for the best virtual address to map a tentative
  703  * virtual address range in a given VM object. The default is to just
  704  * return the given tentative start address.
  705  *
  706  * @param _obj          VM backing object
  707  * @param _offset       starting offset with the VM object
  708  * @param _addr         initial guess at virtual address
  709  * @param _size         size of virtual address range
  710  */
  711 METHOD void align_superpage {
  712         mmu_t           _mmu;
  713         vm_object_t     _obj;
  714         vm_ooffset_t    _offset;
  715         vm_offset_t     *_addr;
  716         vm_size_t       _size;
  717 } DEFAULT mmu_null_align_superpage;
  718 
  719 
  720 
  721 
  722 /**
  723  * INTERNAL INTERFACES
  724  */
  725 
  726 /**
  727  * @brief Bootstrap the VM system. At the completion of this routine, the
  728  * kernel will be running in its own address space with full control over
  729  * paging.
  730  *
  731  * @param _start        start of reserved memory (obsolete ???)
  732  * @param _end          end of reserved memory (obsolete ???)
  733  *                      XXX I think the intent of these was to allow
  734  *                      the memory used by kernel text+data+bss and
  735  *                      loader variables/load-time kld's to be carved out
  736  *                      of available physical mem.
  737  *
  738  */
  739 METHOD void bootstrap {
  740         mmu_t           _mmu;
  741         vm_offset_t     _start;
  742         vm_offset_t     _end;
  743 };
  744 
  745 /**
  746  * @brief Set up the MMU on the current CPU. Only called by the PMAP layer
  747  * for alternate CPUs on SMP systems.
  748  *
  749  * @param _ap           Set to 1 if the CPU being set up is an AP
  750  *
  751  */
  752 METHOD void cpu_bootstrap {
  753         mmu_t           _mmu;
  754         int             _ap;
  755 };
  756 
  757 
  758 /**
  759  * @brief Create a kernel mapping for a given physical address range.
  760  * Called by bus code on behalf of device drivers. The mapping does not
  761  * have to be a virtual address: it can be a direct-mapped physical address
  762  * if that is supported by the MMU.
  763  *
  764  * @param _pa           start physical address
  765  * @param _size         size in bytes of mapping
  766  *
  767  * @retval addr         address of mapping.
  768  */
  769 METHOD void * mapdev {
  770         mmu_t           _mmu;
  771         vm_paddr_t      _pa;
  772         vm_size_t       _size;
  773 };
  774 
  775 /**
  776  * @brief Create a kernel mapping for a given physical address range.
  777  * Called by bus code on behalf of device drivers. The mapping does not
  778  * have to be a virtual address: it can be a direct-mapped physical address
  779  * if that is supported by the MMU.
  780  *
  781  * @param _pa           start physical address
  782  * @param _size         size in bytes of mapping
  783  * @param _attr         cache attributes
  784  *
  785  * @retval addr         address of mapping.
  786  */
  787 METHOD void * mapdev_attr {
  788         mmu_t           _mmu;
  789         vm_paddr_t      _pa;
  790         vm_size_t       _size;
  791         vm_memattr_t    _attr;
  792 } DEFAULT mmu_null_mapdev_attr;
  793 
  794 /**
  795  * @brief Change cache control attributes for a page. Should modify all
  796  * mappings for that page.
  797  *
  798  * @param _m            page to modify
  799  * @param _ma           new cache control attributes
  800  */
  801 METHOD void page_set_memattr {
  802         mmu_t           _mmu;
  803         vm_page_t       _pg;
  804         vm_memattr_t    _ma;
  805 } DEFAULT mmu_null_page_set_memattr;
  806 
  807 /**
  808  * @brief Remove the mapping created by mapdev. Called when a driver
  809  * is unloaded.
  810  *
  811  * @param _va           Mapping address returned from mapdev
  812  * @param _size         size in bytes of mapping
  813  */
  814 METHOD void unmapdev {
  815         mmu_t           _mmu;
  816         vm_offset_t     _va;
  817         vm_size_t       _size;
  818 };
  819 
  820 /**
  821  * @brief Provide a kernel-space pointer that can be used to access the
  822  * given userland address. The kernel accessible length returned in klen
  823  * may be less than the requested length of the userland buffer (ulen). If
  824  * so, retry with a higher address to get access to the later parts of the
  825  * buffer. Returns EFAULT if no mapping can be made, else zero.
  826  *
  827  * @param _pm           PMAP for the user pointer.
  828  * @param _uaddr        Userland address to map.
  829  * @param _kaddr        Corresponding kernel address.
  830  * @param _ulen         Length of user buffer.
  831  * @param _klen         Available subset of ulen with _kaddr.
  832  */
  833 METHOD int map_user_ptr {
  834         mmu_t           _mmu;
  835         pmap_t          _pm;
  836         volatile const void *_uaddr;
  837         void            **_kaddr;
  838         size_t          _ulen;
  839         size_t          *_klen;
  840 };
  841 
  842 /**
  843  * @brief Decode a kernel pointer, as visible to the current thread,
  844  * by setting whether it corresponds to a user or kernel address and
  845  * the address in the respective memory maps to which the address as
  846  * seen in the kernel corresponds. This is essentially the inverse of
  847  * MMU_MAP_USER_PTR() above and is used in kernel-space fault handling.
  848  * Returns 0 on success or EFAULT if the address could not be mapped. 
  849  */
  850 METHOD int decode_kernel_ptr {
  851         mmu_t           _mmu;
  852         vm_offset_t     addr;
  853         int             *is_user;
  854         vm_offset_t     *decoded_addr;
  855 };
  856 
  857 /**
  858  * @brief Reverse-map a kernel virtual address
  859  *
  860  * @param _va           kernel virtual address to reverse-map
  861  *
  862  * @retval pa           physical address corresponding to mapping
  863  */
  864 METHOD vm_paddr_t kextract {
  865         mmu_t           _mmu;
  866         vm_offset_t     _va;
  867 };
  868 
  869 
  870 /**
  871  * @brief Map a wired page into kernel virtual address space
  872  *
  873  * @param _va           mapping virtual address
  874  * @param _pa           mapping physical address
  875  */
  876 METHOD void kenter {
  877         mmu_t           _mmu;
  878         vm_offset_t     _va;
  879         vm_paddr_t      _pa;
  880 };
  881 
  882 /**
  883  * @brief Map a wired page into kernel virtual address space
  884  *
  885  * @param _va           mapping virtual address
  886  * @param _pa           mapping physical address
  887  * @param _ma           mapping cache control attributes
  888  */
  889 METHOD void kenter_attr {
  890         mmu_t           _mmu;
  891         vm_offset_t     _va;
  892         vm_paddr_t      _pa;
  893         vm_memattr_t    _ma;
  894 } DEFAULT mmu_null_kenter_attr;
  895 
  896 /**
  897  * @brief Unmap a wired page from kernel virtual address space
  898  *
  899  * @param _va           mapped virtual address
  900  */
  901 METHOD void kremove {
  902         mmu_t           _mmu;
  903         vm_offset_t     _va;
  904 };
  905 
  906 /**
  907  * @brief Determine if the given physical address range has been direct-mapped.
  908  *
  909  * @param _pa           physical address start
  910  * @param _size         physical address range size
  911  *
  912  * @retval bool         TRUE if the range is direct-mapped.
  913  */
  914 METHOD boolean_t dev_direct_mapped {
  915         mmu_t           _mmu;
  916         vm_paddr_t      _pa;
  917         vm_size_t       _size;
  918 };
  919 
  920 
  921 /**
  922  * @brief Enforce instruction cache coherency. Typically called after a
  923  * region of memory has been modified and before execution of or within
  924  * that region is attempted. Setting breakpoints in a process through
  925  * ptrace(2) is one example of when the instruction cache needs to be
  926  * made coherent.
  927  *
  928  * @param _pm           the physical map of the virtual address
  929  * @param _va           the virtual address of the modified region
  930  * @param _sz           the size of the modified region
  931  */
  932 METHOD void sync_icache {
  933         mmu_t           _mmu;
  934         pmap_t          _pm;
  935         vm_offset_t     _va;
  936         vm_size_t       _sz;
  937 };
  938 
  939 
  940 /**
  941  * @brief Create temporary memory mapping for use by dumpsys().
  942  *
  943  * @param _pa           The physical page to map.
  944  * @param _sz           The requested size of the mapping.
  945  * @param _va           The virtual address of the mapping.
  946  */
  947 METHOD void dumpsys_map {
  948         mmu_t           _mmu;
  949         vm_paddr_t      _pa;
  950         size_t          _sz;
  951         void            **_va;
  952 };
  953 
  954 
  955 /**
  956  * @brief Remove temporary dumpsys() mapping.
  957  *
  958  * @param _pa           The physical page to map.
  959  * @param _sz           The requested size of the mapping.
  960  * @param _va           The virtual address of the mapping.
  961  */
  962 METHOD void dumpsys_unmap {
  963         mmu_t           _mmu;
  964         vm_paddr_t      _pa;
  965         size_t          _sz;
  966         void            *_va;
  967 };
  968 
  969 
  970 /**
  971  * @brief Initialize memory chunks for dumpsys.
  972  */
  973 METHOD void scan_init {
  974         mmu_t           _mmu;
  975 };
  976 
  977 /**
  978  * @brief Create a temporary thread-local KVA mapping of a single page.
  979  *
  980  * @param _pg           The physical page to map
  981  *
  982  * @retval addr         The temporary KVA
  983  */
  984 METHOD vm_offset_t quick_enter_page {
  985         mmu_t           _mmu;
  986         vm_page_t       _pg;
  987 };
  988 
  989 /**
  990  * @brief Undo a mapping created by quick_enter_page
  991  *
  992  * @param _va           The mapped KVA
  993  */
  994 METHOD void quick_remove_page {
  995         mmu_t           _mmu;
  996         vm_offset_t     _va;
  997 };
  998 
  999 /**
 1000  * @brief Change the specified virtual address range's memory type.
 1001  *
 1002  * @param _va           The virtual base address to change
 1003  *
 1004  * @param _sz           Size of the region to change
 1005  *
 1006  * @param _mode         New mode to set on the VA range
 1007  *
 1008  * @retval error        0 on success, EINVAL or ENOMEM on error.
 1009  */
 1010 METHOD int change_attr {
 1011         mmu_t           _mmu;
 1012         vm_offset_t     _va;
 1013         vm_size_t       _sz;
 1014         vm_memattr_t    _mode;
 1015 } DEFAULT mmu_null_change_attr;
 1016 

Cache object: a30647cc8ff9be2d14216cb4a8cc1d73


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.