The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/powerpc/powerpc/mmu_if.m

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 #-
    2 # Copyright (c) 2005 Peter Grehan
    3 # All rights reserved.
    4 #
    5 # Redistribution and use in source and binary forms, with or without
    6 # modification, are permitted provided that the following conditions
    7 # are met:
    8 # 1. Redistributions of source code must retain the above copyright
    9 #    notice, this list of conditions and the following disclaimer.
   10 # 2. Redistributions in binary form must reproduce the above copyright
   11 #    notice, this list of conditions and the following disclaimer in the
   12 #    documentation and/or other materials provided with the distribution.
   13 #
   14 # THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   15 # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17 # ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   18 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20 # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21 # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22 # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23 # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24 # SUCH DAMAGE.
   25 #
   26 # $FreeBSD: releng/9.0/sys/powerpc/powerpc/mmu_if.m 225418 2011-09-06 10:30:11Z kib $
   27 #
   28 
   29 #include <sys/param.h>
   30 #include <sys/lock.h>
   31 #include <sys/mutex.h>
   32 #include <sys/systm.h>
   33 
   34 #include <vm/vm.h>
   35 #include <vm/vm_page.h>
   36 
   37 #include <machine/mmuvar.h>
   38 
   39 /**
   40  * @defgroup MMU mmu - KObj methods for PowerPC MMU implementations
   41  * @brief A set of methods required by all MMU implementations. These
   42  * are basically direct call-thru's from the pmap machine-dependent
   43  * code.
   44  * Thanks to Bruce M Simpson's pmap man pages for routine descriptions.
   45  *@{
   46  */
   47 
   48 INTERFACE mmu;
   49 
   50 #
   51 # Default implementations of some methods
   52 #
   53 CODE {
   54         static void mmu_null_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap,
   55             vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr)
   56         {
   57                 return;
   58         }
   59 
   60         static void mmu_null_growkernel(mmu_t mmu, vm_offset_t addr)
   61         {
   62                 return;
   63         }
   64 
   65         static void mmu_null_init(mmu_t mmu)
   66         {
   67                 return;
   68         }
   69 
   70         static boolean_t mmu_null_is_prefaultable(mmu_t mmu, pmap_t pmap,
   71             vm_offset_t va)
   72         {
   73                 return (FALSE);
   74         }
   75 
   76         static void mmu_null_object_init_pt(mmu_t mmu, pmap_t pmap,
   77             vm_offset_t addr, vm_object_t object, vm_pindex_t index,
   78             vm_size_t size)
   79         {
   80                 return;
   81         }
   82 
   83         static void mmu_null_page_init(mmu_t mmu, vm_page_t m)
   84         {
   85                 return;
   86         }
   87 
   88         static void mmu_null_remove_pages(mmu_t mmu, pmap_t pmap)
   89         {
   90                 return;
   91         }
   92 
   93         static int mmu_null_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
   94             vm_paddr_t *locked_pa)
   95         {
   96                 return (0);
   97         }
   98 
   99         static void mmu_null_deactivate(struct thread *td)
  100         {
  101                 return;
  102         }
  103 
  104         static void mmu_null_align_superpage(mmu_t mmu, vm_object_t object,
  105             vm_ooffset_t offset, vm_offset_t *addr, vm_size_t size)
  106         {
  107                 return;
  108         }
  109 
  110         static struct pmap_md *mmu_null_scan_md(mmu_t mmu, struct pmap_md *p)
  111         {
  112                 return (NULL);
  113         }
  114 
  115         static void *mmu_null_mapdev_attr(mmu_t mmu, vm_offset_t pa,
  116             vm_size_t size, vm_memattr_t ma)
  117         {
  118                 return MMU_MAPDEV(mmu, pa, size);
  119         }
  120 
  121         static void mmu_null_kenter_attr(mmu_t mmu, vm_offset_t va,
  122             vm_offset_t pa, vm_memattr_t ma)
  123         {
  124                 MMU_KENTER(mmu, va, pa);
  125         }
  126 
  127         static void mmu_null_page_set_memattr(mmu_t mmu, vm_page_t m,
  128             vm_memattr_t ma)
  129         {
  130                 return;
  131         }
  132 };
  133 
  134 
  135 /**
  136  * @brief Change the wiring attribute for the page in the given physical
  137  * map and virtual address.
  138  *
  139  * @param _pmap         physical map of page
  140  * @param _va           page virtual address
  141  * @param _wired        TRUE to increment wired count, FALSE to decrement
  142  */
  143 METHOD void change_wiring {
  144         mmu_t           _mmu;
  145         pmap_t          _pmap;
  146         vm_offset_t     _va;
  147         boolean_t       _wired;
  148 };
  149 
  150 
  151 /**
  152  * @brief Clear the 'modified' bit on the given physical page
  153  *
  154  * @param _pg           physical page
  155  */
  156 METHOD void clear_modify {
  157         mmu_t           _mmu;
  158         vm_page_t       _pg;
  159 };
  160 
  161 
  162 /**
  163  * @brief Clear the 'referenced' bit on the given physical page
  164  *
  165  * @param _pg           physical page
  166  */
  167 METHOD void clear_reference {
  168         mmu_t           _mmu;
  169         vm_page_t       _pg;
  170 };
  171 
  172 
  173 /**
  174  * @brief Clear the write and modified bits in each of the given
  175  * physical page's mappings
  176  *
  177  * @param _pg           physical page
  178  */
  179 METHOD void remove_write {
  180         mmu_t           _mmu;
  181         vm_page_t       _pg;
  182 };
  183 
  184 
  185 /**
  186  * @brief Copy the address range given by the source physical map, virtual
  187  * address and length to the destination physical map and virtual address.
  188  * This routine is optional (xxx default null implementation ?)
  189  *
  190  * @param _dst_pmap     destination physical map
  191  * @param _src_pmap     source physical map
  192  * @param _dst_addr     destination virtual address
  193  * @param _len          size of range
  194  * @param _src_addr     source virtual address
  195  */
  196 METHOD void copy {
  197         mmu_t           _mmu;
  198         pmap_t          _dst_pmap;
  199         pmap_t          _src_pmap;
  200         vm_offset_t     _dst_addr;
  201         vm_size_t       _len;
  202         vm_offset_t     _src_addr;
  203 } DEFAULT mmu_null_copy;
  204 
  205 
  206 /**
  207  * @brief Copy the source physical page to the destination physical page
  208  *
  209  * @param _src          source physical page
  210  * @param _dst          destination physical page
  211  */
  212 METHOD void copy_page {
  213         mmu_t           _mmu;
  214         vm_page_t       _src;
  215         vm_page_t       _dst;
  216 };
  217 
  218 
  219 /**
  220  * @brief Create a mapping between a virtual/physical address pair in the
  221  * passed physical map with the specified protection and wiring
  222  *
  223  * @param _pmap         physical map
  224  * @param _va           mapping virtual address
  225  * @param _p            mapping physical page
  226  * @param _prot         mapping page protection
  227  * @param _wired        TRUE if page will be wired
  228  */
  229 METHOD void enter {
  230         mmu_t           _mmu;
  231         pmap_t          _pmap;
  232         vm_offset_t     _va;
  233         vm_page_t       _p;
  234         vm_prot_t       _prot;
  235         boolean_t       _wired;
  236 };
  237 
  238 
  239 /**
  240  * @brief Maps a sequence of resident pages belonging to the same object.
  241  *
  242  * @param _pmap         physical map
  243  * @param _start        virtual range start
  244  * @param _end          virtual range end
  245  * @param _m_start      physical page mapped at start
  246  * @param _prot         mapping page protection
  247  */
  248 METHOD void enter_object {
  249         mmu_t           _mmu;
  250         pmap_t          _pmap;
  251         vm_offset_t     _start;
  252         vm_offset_t     _end;
  253         vm_page_t       _m_start;
  254         vm_prot_t       _prot;
  255 };
  256 
  257 
  258 /**
  259  * @brief A faster entry point for page mapping where it is possible
  260  * to short-circuit some of the tests in pmap_enter.
  261  *
  262  * @param _pmap         physical map (and also currently active pmap)
  263  * @param _va           mapping virtual address
  264  * @param _pg           mapping physical page
  265  * @param _prot         new page protection - used to see if page is exec.
  266  */
  267 METHOD void enter_quick {
  268         mmu_t           _mmu;
  269         pmap_t          _pmap;
  270         vm_offset_t     _va;
  271         vm_page_t       _pg;
  272         vm_prot_t       _prot;
  273 };
  274 
  275 
  276 /**
  277  * @brief Reverse map the given virtual address, returning the physical
  278  * page associated with the address if a mapping exists.
  279  *
  280  * @param _pmap         physical map
  281  * @param _va           mapping virtual address
  282  *
  283  * @retval 0            No mapping found
  284  * @retval addr         The mapping physical address
  285  */
  286 METHOD vm_paddr_t extract {
  287         mmu_t           _mmu;
  288         pmap_t          _pmap;
  289         vm_offset_t     _va;
  290 };
  291 
  292 
  293 /**
  294  * @brief Reverse map the given virtual address, returning the
  295  * physical page if found. The page must be held (by calling
  296  * vm_page_hold) if the page protection matches the given protection
  297  *
  298  * @param _pmap         physical map
  299  * @param _va           mapping virtual address
  300  * @param _prot         protection used to determine if physical page
  301  *                      should be locked
  302  *
  303  * @retval NULL         No mapping found
  304  * @retval page         Pointer to physical page. Held if protections match
  305  */
  306 METHOD vm_page_t extract_and_hold {
  307         mmu_t           _mmu;
  308         pmap_t          _pmap;
  309         vm_offset_t     _va;
  310         vm_prot_t       _prot;
  311 };
  312 
  313 
  314 /**
  315  * @brief Increase kernel virtual address space to the given virtual address.
  316  * Not really required for PowerPC, so optional unless the MMU implementation
  317  * can use it.
  318  *
  319  * @param _va           new upper limit for kernel virtual address space
  320  */
  321 METHOD void growkernel {
  322         mmu_t           _mmu;
  323         vm_offset_t     _va;
  324 } DEFAULT mmu_null_growkernel;
  325 
  326 
  327 /**
  328  * @brief Called from vm_mem_init. Zone allocation is available at
  329  * this stage so a convenient time to create zones. This routine is
  330  * for MMU-implementation convenience and is optional.
  331  */
  332 METHOD void init {
  333         mmu_t           _mmu;
  334 } DEFAULT mmu_null_init;
  335 
  336 
  337 /**
  338  * @brief Return if the page has been marked by MMU hardware to have been
  339  * modified
  340  *
  341  * @param _pg           physical page to test
  342  *
  343  * @retval boolean      TRUE if page has been modified
  344  */
  345 METHOD boolean_t is_modified {
  346         mmu_t           _mmu;
  347         vm_page_t       _pg;
  348 };
  349 
  350 
  351 /**
  352  * @brief Return whether the specified virtual address is a candidate to be
  353  * prefaulted in. This routine is optional.
  354  *
  355  * @param _pmap         physical map
  356  * @param _va           virtual address to test
  357  *
  358  * @retval boolean      TRUE if the address is a candidate.
  359  */
  360 METHOD boolean_t is_prefaultable {
  361         mmu_t           _mmu;
  362         pmap_t          _pmap;
  363         vm_offset_t     _va;
  364 } DEFAULT mmu_null_is_prefaultable;
  365 
  366 
  367 /**
  368  * @brief Return whether or not the specified physical page was referenced
  369  * in any physical maps.
  370  *
  371  * @params _pg          physical page
  372  *
  373  * @retval boolean      TRUE if page has been referenced
  374  */
  375 METHOD boolean_t is_referenced {
  376         mmu_t           _mmu;
  377         vm_page_t       _pg;
  378 };
  379 
  380 
  381 /**
  382  * @brief Return a count of referenced bits for a page, clearing those bits.
  383  * Not all referenced bits need to be cleared, but it is necessary that 0
  384  * only be returned when there are none set.
  385  *
  386  * @params _m           physical page
  387  *
  388  * @retval int          count of referenced bits
  389  */
  390 METHOD boolean_t ts_referenced {
  391         mmu_t           _mmu;
  392         vm_page_t       _pg;
  393 };
  394 
  395 
  396 /**
  397  * @brief Map the requested physical address range into kernel virtual
  398  * address space. The value in _virt is taken as a hint. The virtual
  399  * address of the range is returned, or NULL if the mapping could not
  400  * be created. The range can be direct-mapped if that is supported.
  401  *
  402  * @param *_virt        Hint for start virtual address, and also return
  403  *                      value
  404  * @param _start        physical address range start
  405  * @param _end          physical address range end
  406  * @param _prot         protection of range (currently ignored)
  407  *
  408  * @retval NULL         could not map the area
  409  * @retval addr, *_virt mapping start virtual address
  410  */
  411 METHOD vm_offset_t map {
  412         mmu_t           _mmu;
  413         vm_offset_t     *_virt;
  414         vm_paddr_t      _start;
  415         vm_paddr_t      _end;
  416         int             _prot;
  417 };
  418 
  419 
  420 /**
  421  * @brief Used to create a contiguous set of read-only mappings for a
  422  * given object to try and eliminate a cascade of on-demand faults as
  423  * the object is accessed sequentially. This routine is optional.
  424  *
  425  * @param _pmap         physical map
  426  * @param _addr         mapping start virtual address
  427  * @param _object       device-backed V.M. object to be mapped
  428  * @param _pindex       page-index within object of mapping start
  429  * @param _size         size in bytes of mapping
  430  */
  431 METHOD void object_init_pt {
  432         mmu_t           _mmu;
  433         pmap_t          _pmap;
  434         vm_offset_t     _addr;
  435         vm_object_t     _object;
  436         vm_pindex_t     _pindex;
  437         vm_size_t       _size;
  438 } DEFAULT mmu_null_object_init_pt;
  439 
  440 
  441 /**
  442  * @brief Used to determine if the specified page has a mapping for the
  443  * given physical map, by scanning the list of reverse-mappings from the
  444  * page. The list is scanned to a maximum of 16 entries.
  445  *
  446  * @param _pmap         physical map
  447  * @param _pg           physical page
  448  *
  449  * @retval bool         TRUE if the physical map was found in the first 16
  450  *                      reverse-map list entries off the physical page.
  451  */
  452 METHOD boolean_t page_exists_quick {
  453         mmu_t           _mmu;
  454         pmap_t          _pmap;
  455         vm_page_t       _pg;
  456 };
  457 
  458 
  459 /**
  460  * @brief Initialise the machine-dependent section of the physical page
  461  * data structure. This routine is optional.
  462  *
  463  * @param _pg           physical page
  464  */
  465 METHOD void page_init {
  466         mmu_t           _mmu;
  467         vm_page_t       _pg;
  468 } DEFAULT mmu_null_page_init;
  469 
  470 
  471 /**
  472  * @brief Count the number of managed mappings to the given physical
  473  * page that are wired.
  474  *
  475  * @param _pg           physical page
  476  *
  477  * @retval int          the number of wired, managed mappings to the
  478  *                      given physical page
  479  */
  480 METHOD int page_wired_mappings {
  481         mmu_t           _mmu;
  482         vm_page_t       _pg;
  483 };
  484 
  485 
  486 /**
  487  * @brief Initialise a physical map data structure
  488  *
  489  * @param _pmap         physical map
  490  */
  491 METHOD void pinit {
  492         mmu_t           _mmu;
  493         pmap_t          _pmap;
  494 };
  495 
  496 
  497 /**
  498  * @brief Initialise the physical map for process 0, the initial process
  499  * in the system.
  500  * XXX default to pinit ?
  501  *
  502  * @param _pmap         physical map
  503  */
  504 METHOD void pinit0 {
  505         mmu_t           _mmu;
  506         pmap_t          _pmap;
  507 };
  508 
  509 
  510 /**
  511  * @brief Set the protection for physical pages in the given virtual address
  512  * range to the given value.
  513  *
  514  * @param _pmap         physical map
  515  * @param _start        virtual range start
  516  * @param _end          virtual range end
  517  * @param _prot         new page protection
  518  */
  519 METHOD void protect {
  520         mmu_t           _mmu;
  521         pmap_t          _pmap;
  522         vm_offset_t     _start;
  523         vm_offset_t     _end;
  524         vm_prot_t       _prot;
  525 };
  526 
  527 
  528 /**
  529  * @brief Create a mapping in kernel virtual address space for the given array
  530  * of wired physical pages.
  531  *
  532  * @param _start        mapping virtual address start
  533  * @param *_m           array of physical page pointers
  534  * @param _count        array elements
  535  */
  536 METHOD void qenter {
  537         mmu_t           _mmu;
  538         vm_offset_t     _start;
  539         vm_page_t       *_pg;
  540         int             _count;
  541 };
  542 
  543 
  544 /**
  545  * @brief Remove the temporary mappings created by qenter.
  546  *
  547  * @param _start        mapping virtual address start
  548  * @param _count        number of pages in mapping
  549  */
  550 METHOD void qremove {
  551         mmu_t           _mmu;
  552         vm_offset_t     _start;
  553         int             _count;
  554 };
  555 
  556 
  557 /**
  558  * @brief Release per-pmap resources, e.g. mutexes, allocated memory etc. There
  559  * should be no existing mappings for the physical map at this point
  560  *
  561  * @param _pmap         physical map
  562  */
  563 METHOD void release {
  564         mmu_t           _mmu;
  565         pmap_t          _pmap;
  566 };
  567 
  568 
  569 /**
  570  * @brief Remove all mappings in the given physical map for the start/end
  571  * virtual address range. The range will be page-aligned.
  572  *
  573  * @param _pmap         physical map
  574  * @param _start        mapping virtual address start
  575  * @param _end          mapping virtual address end
  576  */
  577 METHOD void remove {
  578         mmu_t           _mmu;
  579         pmap_t          _pmap;
  580         vm_offset_t     _start;
  581         vm_offset_t     _end;
  582 };
  583 
  584 
  585 /**
  586  * @brief Traverse the reverse-map list off the given physical page and
  587  * remove all mappings. Clear the PGA_WRITEABLE attribute from the page.
  588  *
  589  * @param _pg           physical page
  590  */
  591 METHOD void remove_all {
  592         mmu_t           _mmu;
  593         vm_page_t       _pg;
  594 };
  595 
  596 
  597 /**
  598  * @brief Remove all mappings in the given start/end virtual address range
  599  * for the given physical map. Similar to the remove method, but it used
  600  * when tearing down all mappings in an address space. This method is
  601  * optional, since pmap_remove will be called for each valid vm_map in
  602  * the address space later.
  603  *
  604  * @param _pmap         physical map
  605  * @param _start        mapping virtual address start
  606  * @param _end          mapping virtual address end
  607  */
  608 METHOD void remove_pages {
  609         mmu_t           _mmu;
  610         pmap_t          _pmap;
  611 } DEFAULT mmu_null_remove_pages;
  612 
  613 
  614 /**
  615  * @brief Zero a physical page. It is not assumed that the page is mapped,
  616  * so a temporary (or direct) mapping may need to be used.
  617  *
  618  * @param _pg           physical page
  619  */
  620 METHOD void zero_page {
  621         mmu_t           _mmu;
  622         vm_page_t       _pg;
  623 };
  624 
  625 
  626 /**
  627  * @brief Zero a portion of a physical page, starting at a given offset and
  628  * for a given size (multiples of 512 bytes for 4k pages).
  629  *
  630  * @param _pg           physical page
  631  * @param _off          byte offset from start of page
  632  * @param _size         size of area to zero
  633  */
  634 METHOD void zero_page_area {
  635         mmu_t           _mmu;
  636         vm_page_t       _pg;
  637         int             _off;
  638         int             _size;
  639 };
  640 
  641 
  642 /**
  643  * @brief Called from the idle loop to zero pages. XXX I think locking
  644  * constraints might be different here compared to zero_page.
  645  *
  646  * @param _pg           physical page
  647  */
  648 METHOD void zero_page_idle {
  649         mmu_t           _mmu;
  650         vm_page_t       _pg;
  651 };
  652 
  653 
  654 /**
  655  * @brief Extract mincore(2) information from a mapping.
  656  *
  657  * @param _pmap         physical map
  658  * @param _addr         page virtual address
  659  * @param _locked_pa    page physical address
  660  *
  661  * @retval 0            no result
  662  * @retval non-zero     mincore(2) flag values
  663  */
  664 METHOD int mincore {
  665         mmu_t           _mmu;
  666         pmap_t          _pmap;
  667         vm_offset_t     _addr;
  668         vm_paddr_t      *_locked_pa;
  669 } DEFAULT mmu_null_mincore;
  670 
  671 
  672 /**
  673  * @brief Perform any operations required to allow a physical map to be used
  674  * before it's address space is accessed.
  675  *
  676  * @param _td           thread associated with physical map
  677  */
  678 METHOD void activate {
  679         mmu_t           _mmu;
  680         struct thread   *_td;
  681 };
  682 
  683 /**
  684  * @brief Perform any operations required to deactivate a physical map,
  685  * for instance as it is context-switched out.
  686  *
  687  * @param _td           thread associated with physical map
  688  */
  689 METHOD void deactivate {
  690         mmu_t           _mmu;
  691         struct thread   *_td;
  692 } DEFAULT mmu_null_deactivate;
  693 
  694 /**
  695  * @brief Return a hint for the best virtual address to map a tentative
  696  * virtual address range in a given VM object. The default is to just
  697  * return the given tentative start address.
  698  *
  699  * @param _obj          VM backing object
  700  * @param _offset       starting offset with the VM object
  701  * @param _addr         initial guess at virtual address
  702  * @param _size         size of virtual address range
  703  */
  704 METHOD void align_superpage {
  705         mmu_t           _mmu;
  706         vm_object_t     _obj;
  707         vm_ooffset_t    _offset;
  708         vm_offset_t     *_addr;
  709         vm_size_t       _size;
  710 } DEFAULT mmu_null_align_superpage;
  711 
  712 
  713 
  714 
  715 /**
  716  * INTERNAL INTERFACES
  717  */
  718 
  719 /**
  720  * @brief Bootstrap the VM system. At the completion of this routine, the
  721  * kernel will be running in it's own address space with full control over
  722  * paging.
  723  *
  724  * @param _start        start of reserved memory (obsolete ???)
  725  * @param _end          end of reserved memory (obsolete ???)
  726  *                      XXX I think the intent of these was to allow
  727  *                      the memory used by kernel text+data+bss and
  728  *                      loader variables/load-time kld's to be carved out
  729  *                      of available physical mem.
  730  *
  731  */
  732 METHOD void bootstrap {
  733         mmu_t           _mmu;
  734         vm_offset_t     _start;
  735         vm_offset_t     _end;
  736 };
  737 
  738 /**
  739  * @brief Set up the MMU on the current CPU. Only called by the PMAP layer
  740  * for alternate CPUs on SMP systems.
  741  *
  742  * @param _ap           Set to 1 if the CPU being set up is an AP
  743  *
  744  */
  745 METHOD void cpu_bootstrap {
  746         mmu_t           _mmu;
  747         int             _ap;
  748 };
  749 
  750 
  751 /**
  752  * @brief Create a kernel mapping for a given physical address range.
  753  * Called by bus code on behalf of device drivers. The mapping does not
  754  * have to be a virtual address: it can be a direct-mapped physical address
  755  * if that is supported by the MMU.
  756  *
  757  * @param _pa           start physical address
  758  * @param _size         size in bytes of mapping
  759  *
  760  * @retval addr         address of mapping.
  761  */
  762 METHOD void * mapdev {
  763         mmu_t           _mmu;
  764         vm_offset_t     _pa;
  765         vm_size_t       _size;
  766 };
  767 
  768 /**
  769  * @brief Create a kernel mapping for a given physical address range.
  770  * Called by bus code on behalf of device drivers. The mapping does not
  771  * have to be a virtual address: it can be a direct-mapped physical address
  772  * if that is supported by the MMU.
  773  *
  774  * @param _pa           start physical address
  775  * @param _size         size in bytes of mapping
  776  * @param _attr         cache attributes
  777  *
  778  * @retval addr         address of mapping.
  779  */
  780 METHOD void * mapdev_attr {
  781         mmu_t           _mmu;
  782         vm_offset_t     _pa;
  783         vm_size_t       _size;
  784         vm_memattr_t    _attr;
  785 } DEFAULT mmu_null_mapdev_attr;
  786 
  787 /**
  788  * @brief Change cache control attributes for a page. Should modify all
  789  * mappings for that page.
  790  *
  791  * @param _m            page to modify
  792  * @param _ma           new cache control attributes
  793  */
  794 METHOD void page_set_memattr {
  795         mmu_t           _mmu;
  796         vm_page_t       _pg;
  797         vm_memattr_t    _ma;
  798 } DEFAULT mmu_null_page_set_memattr;
  799 
  800 /**
  801  * @brief Remove the mapping created by mapdev. Called when a driver
  802  * is unloaded.
  803  *
  804  * @param _va           Mapping address returned from mapdev
  805  * @param _size         size in bytes of mapping
  806  */
  807 METHOD void unmapdev {
  808         mmu_t           _mmu;
  809         vm_offset_t     _va;
  810         vm_size_t       _size;
  811 };
  812 
  813 
  814 /**
  815  * @brief Reverse-map a kernel virtual address
  816  *
  817  * @param _va           kernel virtual address to reverse-map
  818  *
  819  * @retval pa           physical address corresponding to mapping
  820  */
  821 METHOD vm_offset_t kextract {
  822         mmu_t           _mmu;
  823         vm_offset_t     _va;
  824 };
  825 
  826 
  827 /**
  828  * @brief Map a wired page into kernel virtual address space
  829  *
  830  * @param _va           mapping virtual address
  831  * @param _pa           mapping physical address
  832  */
  833 METHOD void kenter {
  834         mmu_t           _mmu;
  835         vm_offset_t     _va;
  836         vm_offset_t     _pa;
  837 };
  838 
  839 /**
  840  * @brief Map a wired page into kernel virtual address space
  841  *
  842  * @param _va           mapping virtual address
  843  * @param _pa           mapping physical address
  844  * @param _ma           mapping cache control attributes
  845  */
  846 METHOD void kenter_attr {
  847         mmu_t           _mmu;
  848         vm_offset_t     _va;
  849         vm_offset_t     _pa;
  850         vm_memattr_t    _ma;
  851 } DEFAULT mmu_null_kenter_attr;
  852 
  853 /**
  854  * @brief Determine if the given physical address range has been direct-mapped.
  855  *
  856  * @param _pa           physical address start
  857  * @param _size         physical address range size
  858  *
  859  * @retval bool         TRUE if the range is direct-mapped.
  860  */
  861 METHOD boolean_t dev_direct_mapped {
  862         mmu_t           _mmu;
  863         vm_offset_t     _pa;
  864         vm_size_t       _size;
  865 };
  866 
  867 
  868 /**
  869  * @brief Enforce instruction cache coherency. Typically called after a
  870  * region of memory has been modified and before execution of or within
  871  * that region is attempted. Setting breakpoints in a process through
  872  * ptrace(2) is one example of when the instruction cache needs to be
  873  * made coherent.
  874  *
  875  * @param _pm           the physical map of the virtual address
  876  * @param _va           the virtual address of the modified region
  877  * @param _sz           the size of the modified region
  878  */
  879 METHOD void sync_icache {
  880         mmu_t           _mmu;
  881         pmap_t          _pm;
  882         vm_offset_t     _va;
  883         vm_size_t       _sz;
  884 };
  885 
  886 
  887 /**
  888  * @brief Create temporary memory mapping for use by dumpsys().
  889  *
  890  * @param _md           The memory chunk in which the mapping lies.
  891  * @param _ofs          The offset within the chunk of the mapping.
  892  * @param _sz           The requested size of the mapping.
  893  *
  894  * @retval vm_offset_t  The virtual address of the mapping.
  895  *                      
  896  * The sz argument is modified to reflect the actual size of the
  897  * mapping.
  898  */
  899 METHOD vm_offset_t dumpsys_map {
  900         mmu_t           _mmu;
  901         struct pmap_md  *_md;
  902         vm_size_t       _ofs;
  903         vm_size_t       *_sz;
  904 };
  905 
  906 
  907 /**
  908  * @brief Remove temporary dumpsys() mapping.
  909  *
  910  * @param _md           The memory chunk in which the mapping lies.
  911  * @param _ofs          The offset within the chunk of the mapping.
  912  * @param _va           The virtual address of the mapping.
  913  */
  914 METHOD void dumpsys_unmap {
  915         mmu_t           _mmu;
  916         struct pmap_md  *_md;
  917         vm_size_t       _ofs;
  918         vm_offset_t     _va;
  919 };
  920 
  921 
  922 /**
  923  * @brief Scan/iterate memory chunks.
  924  *
  925  * @param _prev         The previously returned chunk or NULL.
  926  *
  927  * @retval              The next (or first when _prev is NULL) chunk.
  928  */
  929 METHOD struct pmap_md * scan_md {
  930         mmu_t           _mmu;
  931         struct pmap_md  *_prev;
  932 } DEFAULT mmu_null_scan_md;

Cache object: 2f83545154379f611829532321bbc714


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.