The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/powerpc/powerpc/mmu_if.m

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 #-
    2 # Copyright (c) 2005 Peter Grehan
    3 # All rights reserved.
    4 #
    5 # Redistribution and use in source and binary forms, with or without
    6 # modification, are permitted provided that the following conditions
    7 # are met:
    8 # 1. Redistributions of source code must retain the above copyright
    9 #    notice, this list of conditions and the following disclaimer.
   10 # 2. Redistributions in binary form must reproduce the above copyright
   11 #    notice, this list of conditions and the following disclaimer in the
   12 #    documentation and/or other materials provided with the distribution.
   13 #
   14 # THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   15 # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17 # ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   18 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20 # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21 # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22 # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23 # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24 # SUCH DAMAGE.
   25 #
   26 # $FreeBSD: releng/8.2/sys/powerpc/powerpc/mmu_if.m 213979 2010-10-17 15:22:59Z nwhitehorn $
   27 #
   28 
   29 #include <sys/param.h>
   30 #include <sys/lock.h>
   31 #include <sys/mutex.h>
   32 #include <sys/systm.h>
   33 
   34 #include <vm/vm.h>
   35 #include <vm/vm_page.h>
   36 
   37 #include <machine/mmuvar.h>
   38 
   39 /**
   40  * @defgroup MMU mmu - KObj methods for PowerPC MMU implementations
   41  * @brief A set of methods required by all MMU implementations. These
   42  * are basically direct call-thru's from the pmap machine-dependent
   43  * code.
   44  * Thanks to Bruce M Simpson's pmap man pages for routine descriptions.
   45  *@{
   46  */
   47 
   48 INTERFACE mmu;
   49 
   50 #
   51 # Default implementations of some methods
   52 #
   53 CODE {
   54         static void mmu_null_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap,
   55             vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr)
   56         {
   57                 return;
   58         }
   59 
   60         static void mmu_null_growkernel(mmu_t mmu, vm_offset_t addr)
   61         {
   62                 return;
   63         }
   64 
   65         static void mmu_null_init(mmu_t mmu)
   66         {
   67                 return;
   68         }
   69 
   70         static boolean_t mmu_null_is_prefaultable(mmu_t mmu, pmap_t pmap,
   71             vm_offset_t va)
   72         {
   73                 return (FALSE);
   74         }
   75 
   76         static void mmu_null_object_init_pt(mmu_t mmu, pmap_t pmap,
   77             vm_offset_t addr, vm_object_t object, vm_pindex_t index,
   78             vm_size_t size)
   79         {
   80                 return;
   81         }
   82 
   83         static void mmu_null_page_init(mmu_t mmu, vm_page_t m)
   84         {
   85                 return;
   86         }
   87 
   88         static void mmu_null_remove_pages(mmu_t mmu, pmap_t pmap)
   89         {
   90                 return;
   91         }
   92 
   93         static int mmu_null_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr)
   94         {
   95                 return (0);
   96         }
   97 
   98         static void mmu_null_deactivate(struct thread *td)
   99         {
  100                 return;
  101         }
  102 
  103         static void mmu_null_align_superpage(mmu_t mmu, vm_object_t object,
  104             vm_ooffset_t offset, vm_offset_t *addr, vm_size_t size)
  105         {
  106                 return;
  107         }
  108 
  109         static struct pmap_md *mmu_null_scan_md(mmu_t mmu, struct pmap_md *p)
  110         {
  111                 return (NULL);
  112         }
  113 
  114         static void *mmu_null_mapdev_attr(mmu_t mmu, vm_offset_t pa,
  115             vm_size_t size, vm_memattr_t ma)
  116         {
  117                 return MMU_MAPDEV(mmu, pa, size);
  118         }
  119 
  120         static void mmu_null_kenter_attr(mmu_t mmu, vm_offset_t va,
  121             vm_offset_t pa, vm_memattr_t ma)
  122         {
  123                 MMU_KENTER(mmu, va, pa);
  124         }
  125 
  126         static void mmu_null_page_set_memattr(mmu_t mmu, vm_page_t m,
  127             vm_memattr_t ma)
  128         {
  129                 return;
  130         }
  131 };
  132 
  133 
  134 /**
  135  * @brief Change the wiring attribute for the page in the given physical
  136  * map and virtual address.
  137  *
  138  * @param _pmap         physical map of page
  139  * @param _va           page virtual address
  140  * @param _wired        TRUE to increment wired count, FALSE to decrement
  141  */
  142 METHOD void change_wiring {
  143         mmu_t           _mmu;
  144         pmap_t          _pmap;
  145         vm_offset_t     _va;
  146         boolean_t       _wired;
  147 };
  148 
  149 
  150 /**
  151  * @brief Clear the 'modified' bit on the given physical page
  152  *
  153  * @param _pg           physical page
  154  */
  155 METHOD void clear_modify {
  156         mmu_t           _mmu;
  157         vm_page_t       _pg;
  158 };
  159 
  160 
  161 /**
  162  * @brief Clear the 'referenced' bit on the given physical page
  163  *
  164  * @param _pg           physical page
  165  */
  166 METHOD void clear_reference {
  167         mmu_t           _mmu;
  168         vm_page_t       _pg;
  169 };
  170 
  171 
  172 /**
  173  * @brief Clear the write and modified bits in each of the given
  174  * physical page's mappings
  175  *
  176  * @param _pg           physical page
  177  */
  178 METHOD void remove_write {
  179         mmu_t           _mmu;
  180         vm_page_t       _pg;
  181 };
  182 
  183 
  184 /**
  185  * @brief Copy the address range given by the source physical map, virtual
  186  * address and length to the destination physical map and virtual address.
  187  * This routine is optional (xxx default null implementation ?)
  188  *
  189  * @param _dst_pmap     destination physical map
  190  * @param _src_pmap     source physical map
  191  * @param _dst_addr     destination virtual address
  192  * @param _len          size of range
  193  * @param _src_addr     source virtual address
  194  */
  195 METHOD void copy {
  196         mmu_t           _mmu;
  197         pmap_t          _dst_pmap;
  198         pmap_t          _src_pmap;
  199         vm_offset_t     _dst_addr;
  200         vm_size_t       _len;
  201         vm_offset_t     _src_addr;
  202 } DEFAULT mmu_null_copy;
  203 
  204 
  205 /**
  206  * @brief Copy the source physical page to the destination physical page
  207  *
  208  * @param _src          source physical page
  209  * @param _dst          destination physical page
  210  */
  211 METHOD void copy_page {
  212         mmu_t           _mmu;
  213         vm_page_t       _src;
  214         vm_page_t       _dst;
  215 };
  216 
  217 
  218 /**
  219  * @brief Create a mapping between a virtual/physical address pair in the
  220  * passed physical map with the specified protection and wiring
  221  *
  222  * @param _pmap         physical map
  223  * @param _va           mapping virtual address
  224  * @param _p            mapping physical page
  225  * @param _prot         mapping page protection
  226  * @param _wired        TRUE if page will be wired
  227  */
  228 METHOD void enter {
  229         mmu_t           _mmu;
  230         pmap_t          _pmap;
  231         vm_offset_t     _va;
  232         vm_page_t       _p;
  233         vm_prot_t       _prot;
  234         boolean_t       _wired;
  235 };
  236 
  237 
  238 /**
  239  * @brief Maps a sequence of resident pages belonging to the same object.
  240  *
  241  * @param _pmap         physical map
  242  * @param _start        virtual range start
  243  * @param _end          virtual range end
  244  * @param _m_start      physical page mapped at start
  245  * @param _prot         mapping page protection
  246  */
  247 METHOD void enter_object {
  248         mmu_t           _mmu;
  249         pmap_t          _pmap;
  250         vm_offset_t     _start;
  251         vm_offset_t     _end;
  252         vm_page_t       _m_start;
  253         vm_prot_t       _prot;
  254 };
  255 
  256 
  257 /**
  258  * @brief A faster entry point for page mapping where it is possible
  259  * to short-circuit some of the tests in pmap_enter.
  260  *
  261  * @param _pmap         physical map (and also currently active pmap)
  262  * @param _va           mapping virtual address
  263  * @param _pg           mapping physical page
  264  * @param _prot         new page protection - used to see if page is exec.
  265  */
  266 METHOD void enter_quick {
  267         mmu_t           _mmu;
  268         pmap_t          _pmap;
  269         vm_offset_t     _va;
  270         vm_page_t       _pg;
  271         vm_prot_t       _prot;
  272 };
  273 
  274 
  275 /**
  276  * @brief Reverse map the given virtual address, returning the physical
  277  * page associated with the address if a mapping exists.
  278  *
  279  * @param _pmap         physical map
  280  * @param _va           mapping virtual address
  281  *
  282  * @retval 0            No mapping found
  283  * @retval addr         The mapping physical address
  284  */
  285 METHOD vm_paddr_t extract {
  286         mmu_t           _mmu;
  287         pmap_t          _pmap;
  288         vm_offset_t     _va;
  289 };
  290 
  291 
  292 /**
  293  * @brief Reverse map the given virtual address, returning the
  294  * physical page if found. The page must be held (by calling
  295  * vm_page_hold) if the page protection matches the given protection
  296  *
  297  * @param _pmap         physical map
  298  * @param _va           mapping virtual address
  299  * @param _prot         protection used to determine if physical page
  300  *                      should be locked
  301  *
  302  * @retval NULL         No mapping found
  303  * @retval page         Pointer to physical page. Held if protections match
  304  */
  305 METHOD vm_page_t extract_and_hold {
  306         mmu_t           _mmu;
  307         pmap_t          _pmap;
  308         vm_offset_t     _va;
  309         vm_prot_t       _prot;
  310 };
  311 
  312 
  313 /**
  314  * @brief Increase kernel virtual address space to the given virtual address.
  315  * Not really required for PowerPC, so optional unless the MMU implementation
  316  * can use it.
  317  *
  318  * @param _va           new upper limit for kernel virtual address space
  319  */
  320 METHOD void growkernel {
  321         mmu_t           _mmu;
  322         vm_offset_t     _va;
  323 } DEFAULT mmu_null_growkernel;
  324 
  325 
  326 /**
  327  * @brief Called from vm_mem_init. Zone allocation is available at
  328  * this stage so a convenient time to create zones. This routine is
  329  * for MMU-implementation convenience and is optional.
  330  */
  331 METHOD void init {
  332         mmu_t           _mmu;
  333 } DEFAULT mmu_null_init;
  334 
  335 
  336 /**
  337  * @brief Return if the page has been marked by MMU hardware to have been
  338  * modified
  339  *
  340  * @param _pg           physical page to test
  341  *
  342  * @retval boolean      TRUE if page has been modified
  343  */
  344 METHOD boolean_t is_modified {
  345         mmu_t           _mmu;
  346         vm_page_t       _pg;
  347 };
  348 
  349 
  350 /**
  351  * @brief Return whether the specified virtual address is a candidate to be
  352  * prefaulted in. This routine is optional.
  353  *
  354  * @param _pmap         physical map
  355  * @param _va           virtual address to test
  356  *
  357  * @retval boolean      TRUE if the address is a candidate.
  358  */
  359 METHOD boolean_t is_prefaultable {
  360         mmu_t           _mmu;
  361         pmap_t          _pmap;
  362         vm_offset_t     _va;
  363 } DEFAULT mmu_null_is_prefaultable;
  364 
  365 
  366 /**
  367  * @brief Return a count of referenced bits for a page, clearing those bits.
  368  * Not all referenced bits need to be cleared, but it is necessary that 0
  369  * only be returned when there are none set.
  370  *
  371  * @params _m           physical page
  372  *
  373  * @retval int          count of referenced bits
  374  */
  375 METHOD boolean_t ts_referenced {
  376         mmu_t           _mmu;
  377         vm_page_t       _pg;
  378 };
  379 
  380 
  381 /**
  382  * @brief Map the requested physical address range into kernel virtual
  383  * address space. The value in _virt is taken as a hint. The virtual
  384  * address of the range is returned, or NULL if the mapping could not
  385  * be created. The range can be direct-mapped if that is supported.
  386  *
  387  * @param *_virt        Hint for start virtual address, and also return
  388  *                      value
  389  * @param _start        physical address range start
  390  * @param _end          physical address range end
  391  * @param _prot         protection of range (currently ignored)
  392  *
  393  * @retval NULL         could not map the area
  394  * @retval addr, *_virt mapping start virtual address
  395  */
  396 METHOD vm_offset_t map {
  397         mmu_t           _mmu;
  398         vm_offset_t     *_virt;
  399         vm_paddr_t      _start;
  400         vm_paddr_t      _end;
  401         int             _prot;
  402 };
  403 
  404 
  405 /**
  406  * @brief Used to create a contiguous set of read-only mappings for a
  407  * given object to try and eliminate a cascade of on-demand faults as
  408  * the object is accessed sequentially. This routine is optional.
  409  *
  410  * @param _pmap         physical map
  411  * @param _addr         mapping start virtual address
  412  * @param _object       device-backed V.M. object to be mapped
  413  * @param _pindex       page-index within object of mapping start
  414  * @param _size         size in bytes of mapping
  415  */
  416 METHOD void object_init_pt {
  417         mmu_t           _mmu;
  418         pmap_t          _pmap;
  419         vm_offset_t     _addr;
  420         vm_object_t     _object;
  421         vm_pindex_t     _pindex;
  422         vm_size_t       _size;
  423 } DEFAULT mmu_null_object_init_pt;
  424 
  425 
  426 /**
  427  * @brief Used to determine if the specified page has a mapping for the
  428  * given physical map, by scanning the list of reverse-mappings from the
  429  * page. The list is scanned to a maximum of 16 entries.
  430  *
  431  * @param _pmap         physical map
  432  * @param _pg           physical page
  433  *
  434  * @retval bool         TRUE if the physical map was found in the first 16
  435  *                      reverse-map list entries off the physical page.
  436  */
  437 METHOD boolean_t page_exists_quick {
  438         mmu_t           _mmu;
  439         pmap_t          _pmap;
  440         vm_page_t       _pg;
  441 };
  442 
  443 
  444 /**
  445  * @brief Initialise the machine-dependent section of the physical page
  446  * data structure. This routine is optional.
  447  *
  448  * @param _pg           physical page
  449  */
  450 METHOD void page_init {
  451         mmu_t           _mmu;
  452         vm_page_t       _pg;
  453 } DEFAULT mmu_null_page_init;
  454 
  455 
  456 /**
  457  * @brief Count the number of managed mappings to the given physical
  458  * page that are wired.
  459  *
  460  * @param _pg           physical page
  461  *
  462  * @retval int          the number of wired, managed mappings to the
  463  *                      given physical page
  464  */
  465 METHOD int page_wired_mappings {
  466         mmu_t           _mmu;
  467         vm_page_t       _pg;
  468 };
  469 
  470 
  471 /**
  472  * @brief Initialise a physical map data structure
  473  *
  474  * @param _pmap         physical map
  475  */
  476 METHOD void pinit {
  477         mmu_t           _mmu;
  478         pmap_t          _pmap;
  479 };
  480 
  481 
  482 /**
  483  * @brief Initialise the physical map for process 0, the initial process
  484  * in the system.
  485  * XXX default to pinit ?
  486  *
  487  * @param _pmap         physical map
  488  */
  489 METHOD void pinit0 {
  490         mmu_t           _mmu;
  491         pmap_t          _pmap;
  492 };
  493 
  494 
  495 /**
  496  * @brief Set the protection for physical pages in the given virtual address
  497  * range to the given value.
  498  *
  499  * @param _pmap         physical map
  500  * @param _start        virtual range start
  501  * @param _end          virtual range end
  502  * @param _prot         new page protection
  503  */
  504 METHOD void protect {
  505         mmu_t           _mmu;
  506         pmap_t          _pmap;
  507         vm_offset_t     _start;
  508         vm_offset_t     _end;
  509         vm_prot_t       _prot;
  510 };
  511 
  512 
  513 /**
  514  * @brief Create a mapping in kernel virtual address space for the given array
  515  * of wired physical pages.
  516  *
  517  * @param _start        mapping virtual address start
  518  * @param *_m           array of physical page pointers
  519  * @param _count        array elements
  520  */
  521 METHOD void qenter {
  522         mmu_t           _mmu;
  523         vm_offset_t     _start;
  524         vm_page_t       *_pg;
  525         int             _count;
  526 };
  527 
  528 
  529 /**
  530  * @brief Remove the temporary mappings created by qenter.
  531  *
  532  * @param _start        mapping virtual address start
  533  * @param _count        number of pages in mapping
  534  */
  535 METHOD void qremove {
  536         mmu_t           _mmu;
  537         vm_offset_t     _start;
  538         int             _count;
  539 };
  540 
  541 
  542 /**
  543  * @brief Release per-pmap resources, e.g. mutexes, allocated memory etc. There
  544  * should be no existing mappings for the physical map at this point
  545  *
  546  * @param _pmap         physical map
  547  */
  548 METHOD void release {
  549         mmu_t           _mmu;
  550         pmap_t          _pmap;
  551 };
  552 
  553 
  554 /**
  555  * @brief Remove all mappings in the given physical map for the start/end
  556  * virtual address range. The range will be page-aligned.
  557  *
  558  * @param _pmap         physical map
  559  * @param _start        mapping virtual address start
  560  * @param _end          mapping virtual address end
  561  */
  562 METHOD void remove {
  563         mmu_t           _mmu;
  564         pmap_t          _pmap;
  565         vm_offset_t     _start;
  566         vm_offset_t     _end;
  567 };
  568 
  569 
  570 /**
  571  * @brief Traverse the reverse-map list off the given physical page and
  572  * remove all mappings. Clear the PG_WRITEABLE attribute from the page.
  573  *
  574  * @param _pg           physical page
  575  */
  576 METHOD void remove_all {
  577         mmu_t           _mmu;
  578         vm_page_t       _pg;
  579 };
  580 
  581 
  582 /**
  583  * @brief Remove all mappings in the given start/end virtual address range
  584  * for the given physical map. Similar to the remove method, but it used
  585  * when tearing down all mappings in an address space. This method is
  586  * optional, since pmap_remove will be called for each valid vm_map in
  587  * the address space later.
  588  *
  589  * @param _pmap         physical map
  590  * @param _start        mapping virtual address start
  591  * @param _end          mapping virtual address end
  592  */
  593 METHOD void remove_pages {
  594         mmu_t           _mmu;
  595         pmap_t          _pmap;
  596 } DEFAULT mmu_null_remove_pages;
  597 
  598 
  599 /**
  600  * @brief Zero a physical page. It is not assumed that the page is mapped,
  601  * so a temporary (or direct) mapping may need to be used.
  602  *
  603  * @param _pg           physical page
  604  */
  605 METHOD void zero_page {
  606         mmu_t           _mmu;
  607         vm_page_t       _pg;
  608 };
  609 
  610 
  611 /**
  612  * @brief Zero a portion of a physical page, starting at a given offset and
  613  * for a given size (multiples of 512 bytes for 4k pages).
  614  *
  615  * @param _pg           physical page
  616  * @param _off          byte offset from start of page
  617  * @param _size         size of area to zero
  618  */
  619 METHOD void zero_page_area {
  620         mmu_t           _mmu;
  621         vm_page_t       _pg;
  622         int             _off;
  623         int             _size;
  624 };
  625 
  626 
  627 /**
  628  * @brief Called from the idle loop to zero pages. XXX I think locking
  629  * constraints might be different here compared to zero_page.
  630  *
  631  * @param _pg           physical page
  632  */
  633 METHOD void zero_page_idle {
  634         mmu_t           _mmu;
  635         vm_page_t       _pg;
  636 };
  637 
  638 
  639 /**
  640  * @brief Extract mincore(2) information from a mapping. This routine is
  641  * optional and is an optimisation: the mincore code will call is_modified
  642  * and ts_referenced if no result is returned.
  643  *
  644  * @param _pmap         physical map
  645  * @param _addr         page virtual address
  646  *
  647  * @retval 0            no result
  648  * @retval non-zero     mincore(2) flag values
  649  */
  650 METHOD int mincore {
  651         mmu_t           _mmu;
  652         pmap_t          _pmap;
  653         vm_offset_t     _addr;
  654 } DEFAULT mmu_null_mincore;
  655 
  656 
  657 /**
  658  * @brief Perform any operations required to allow a physical map to be used
  659  * before it's address space is accessed.
  660  *
  661  * @param _td           thread associated with physical map
  662  */
  663 METHOD void activate {
  664         mmu_t           _mmu;
  665         struct thread   *_td;
  666 };
  667 
  668 /**
  669  * @brief Perform any operations required to deactivate a physical map,
  670  * for instance as it is context-switched out.
  671  *
  672  * @param _td           thread associated with physical map
  673  */
  674 METHOD void deactivate {
  675         mmu_t           _mmu;
  676         struct thread   *_td;
  677 } DEFAULT mmu_null_deactivate;
  678 
  679 /**
  680  * @brief Return a hint for the best virtual address to map a tentative
  681  * virtual address range in a given VM object. The default is to just
  682  * return the given tentative start address.
  683  *
  684  * @param _obj          VM backing object
  685  * @param _offset       starting offset with the VM object
  686  * @param _addr         initial guess at virtual address
  687  * @param _size         size of virtual address range
  688  */
  689 METHOD void align_superpage {
  690         mmu_t           _mmu;
  691         vm_object_t     _obj;
  692         vm_ooffset_t    _offset;
  693         vm_offset_t     *_addr;
  694         vm_size_t       _size;
  695 } DEFAULT mmu_null_align_superpage;
  696 
  697 
  698 
  699 
  700 /**
  701  * INTERNAL INTERFACES
  702  */
  703 
  704 /**
  705  * @brief Bootstrap the VM system. At the completion of this routine, the
  706  * kernel will be running in it's own address space with full control over
  707  * paging.
  708  *
  709  * @param _start        start of reserved memory (obsolete ???)
  710  * @param _end          end of reserved memory (obsolete ???)
  711  *                      XXX I think the intent of these was to allow
  712  *                      the memory used by kernel text+data+bss and
  713  *                      loader variables/load-time kld's to be carved out
  714  *                      of available physical mem.
  715  *
  716  */
  717 METHOD void bootstrap {
  718         mmu_t           _mmu;
  719         vm_offset_t     _start;
  720         vm_offset_t     _end;
  721 };
  722 
  723 /**
  724  * @brief Set up the MMU on the current CPU. Only called by the PMAP layer
  725  * for alternate CPUs on SMP systems.
  726  *
  727  * @param _ap           Set to 1 if the CPU being set up is an AP
  728  *
  729  */
  730 METHOD void cpu_bootstrap {
  731         mmu_t           _mmu;
  732         int             _ap;
  733 };
  734 
  735 
  736 /**
  737  * @brief Create a kernel mapping for a given physical address range.
  738  * Called by bus code on behalf of device drivers. The mapping does not
  739  * have to be a virtual address: it can be a direct-mapped physical address
  740  * if that is supported by the MMU.
  741  *
  742  * @param _pa           start physical address
  743  * @param _size         size in bytes of mapping
  744  *
  745  * @retval addr         address of mapping.
  746  */
  747 METHOD void * mapdev {
  748         mmu_t           _mmu;
  749         vm_offset_t     _pa;
  750         vm_size_t       _size;
  751 };
  752 
  753 /**
  754  * @brief Create a kernel mapping for a given physical address range.
  755  * Called by bus code on behalf of device drivers. The mapping does not
  756  * have to be a virtual address: it can be a direct-mapped physical address
  757  * if that is supported by the MMU.
  758  *
  759  * @param _pa           start physical address
  760  * @param _size         size in bytes of mapping
  761  * @param _attr         cache attributes
  762  *
  763  * @retval addr         address of mapping.
  764  */
  765 METHOD void * mapdev_attr {
  766         mmu_t           _mmu;
  767         vm_offset_t     _pa;
  768         vm_size_t       _size;
  769         vm_memattr_t    _attr;
  770 } DEFAULT mmu_null_mapdev_attr;
  771 
  772 /**
  773  * @brief Change cache control attributes for a page. Should modify all
  774  * mappings for that page.
  775  *
  776  * @param _m            page to modify
  777  * @param _ma           new cache control attributes
  778  */
  779 METHOD void page_set_memattr {
  780         mmu_t           _mmu;
  781         vm_page_t       _pg;
  782         vm_memattr_t    _ma;
  783 } DEFAULT mmu_null_page_set_memattr;
  784 
  785 /**
  786  * @brief Remove the mapping created by mapdev. Called when a driver
  787  * is unloaded.
  788  *
  789  * @param _va           Mapping address returned from mapdev
  790  * @param _size         size in bytes of mapping
  791  */
  792 METHOD void unmapdev {
  793         mmu_t           _mmu;
  794         vm_offset_t     _va;
  795         vm_size_t       _size;
  796 };
  797 
  798 
  799 /**
  800  * @brief Reverse-map a kernel virtual address
  801  *
  802  * @param _va           kernel virtual address to reverse-map
  803  *
  804  * @retval pa           physical address corresponding to mapping
  805  */
  806 METHOD vm_offset_t kextract {
  807         mmu_t           _mmu;
  808         vm_offset_t     _va;
  809 };
  810 
  811 
  812 /**
  813  * @brief Map a wired page into kernel virtual address space
  814  *
  815  * @param _va           mapping virtual address
  816  * @param _pa           mapping physical address
  817  */
  818 METHOD void kenter {
  819         mmu_t           _mmu;
  820         vm_offset_t     _va;
  821         vm_offset_t     _pa;
  822 };
  823 
  824 /**
  825  * @brief Map a wired page into kernel virtual address space
  826  *
  827  * @param _va           mapping virtual address
  828  * @param _pa           mapping physical address
  829  * @param _ma           mapping cache control attributes
  830  */
  831 METHOD void kenter_attr {
  832         mmu_t           _mmu;
  833         vm_offset_t     _va;
  834         vm_offset_t     _pa;
  835         vm_memattr_t    _ma;
  836 } DEFAULT mmu_null_kenter_attr;
  837 
  838 /**
  839  * @brief Determine if the given physical address range has been direct-mapped.
  840  *
  841  * @param _pa           physical address start
  842  * @param _size         physical address range size
  843  *
  844  * @retval bool         TRUE if the range is direct-mapped.
  845  */
  846 METHOD boolean_t dev_direct_mapped {
  847         mmu_t           _mmu;
  848         vm_offset_t     _pa;
  849         vm_size_t       _size;
  850 };
  851 
  852 
  853 /**
  854  * @brief Enforce instruction cache coherency. Typically called after a
  855  * region of memory has been modified and before execution of or within
  856  * that region is attempted. Setting breakpoints in a process through
  857  * ptrace(2) is one example of when the instruction cache needs to be
  858  * made coherent.
  859  *
  860  * @param _pm           the physical map of the virtual address
  861  * @param _va           the virtual address of the modified region
  862  * @param _sz           the size of the modified region
  863  */
  864 METHOD void sync_icache {
  865         mmu_t           _mmu;
  866         pmap_t          _pm;
  867         vm_offset_t     _va;
  868         vm_size_t       _sz;
  869 };
  870 
  871 
  872 /**
  873  * @brief Create temporary memory mapping for use by dumpsys().
  874  *
  875  * @param _md           The memory chunk in which the mapping lies.
  876  * @param _ofs          The offset within the chunk of the mapping.
  877  * @param _sz           The requested size of the mapping.
  878  *
  879  * @retval vm_offset_t  The virtual address of the mapping.
  880  *                      
  881  * The sz argument is modified to reflect the actual size of the
  882  * mapping.
  883  */
  884 METHOD vm_offset_t dumpsys_map {
  885         mmu_t           _mmu;
  886         struct pmap_md  *_md;
  887         vm_size_t       _ofs;
  888         vm_size_t       *_sz;
  889 };
  890 
  891 
  892 /**
  893  * @brief Remove temporary dumpsys() mapping.
  894  *
  895  * @param _md           The memory chunk in which the mapping lies.
  896  * @param _ofs          The offset within the chunk of the mapping.
  897  * @param _va           The virtual address of the mapping.
  898  */
  899 METHOD void dumpsys_unmap {
  900         mmu_t           _mmu;
  901         struct pmap_md  *_md;
  902         vm_size_t       _ofs;
  903         vm_offset_t     _va;
  904 };
  905 
  906 
  907 /**
  908  * @brief Scan/iterate memory chunks.
  909  *
  910  * @param _prev         The previously returned chunk or NULL.
  911  *
  912  * @retval              The next (or first when _prev is NULL) chunk.
  913  */
  914 METHOD struct pmap_md * scan_md {
  915         mmu_t           _mmu;
  916         struct pmap_md  *_prev;
  917 } DEFAULT mmu_null_scan_md;

Cache object: 194438fa2546ee3759b727af05122bdd


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.