The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/powerpc/powerpc/mmu_if.m

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 #-
    2 # Copyright (c) 2005 Peter Grehan
    3 # All rights reserved.
    4 #
    5 # Redistribution and use in source and binary forms, with or without
    6 # modification, are permitted provided that the following conditions
    7 # are met:
    8 # 1. Redistributions of source code must retain the above copyright
    9 #    notice, this list of conditions and the following disclaimer.
   10 # 2. Redistributions in binary form must reproduce the above copyright
   11 #    notice, this list of conditions and the following disclaimer in the
   12 #    documentation and/or other materials provided with the distribution.
   13 #
   14 # THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   15 # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17 # ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   18 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20 # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21 # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22 # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23 # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24 # SUCH DAMAGE.
   25 #
   26 # $FreeBSD: releng/8.1/sys/powerpc/powerpc/mmu_if.m 205956 2010-03-31 02:43:58Z marcel $
   27 #
   28 
   29 #include <sys/param.h>
   30 #include <sys/lock.h>
   31 #include <sys/mutex.h>
   32 #include <sys/systm.h>
   33 
   34 #include <vm/vm.h>
   35 #include <vm/vm_page.h>
   36 
   37 #include <machine/mmuvar.h>
   38 
   39 /**
   40  * @defgroup MMU mmu - KObj methods for PowerPC MMU implementations
   41  * @brief A set of methods required by all MMU implementations. These
   42  * are basically direct call-thru's from the pmap machine-dependent
   43  * code.
   44  * Thanks to Bruce M Simpson's pmap man pages for routine descriptions.
   45  *@{
   46  */
   47 
   48 INTERFACE mmu;
   49 
   50 #
   51 # Default implementations of some methods
   52 #
   53 CODE {
   54         static void mmu_null_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap,
   55             vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr)
   56         {
   57                 return;
   58         }
   59 
   60         static void mmu_null_growkernel(mmu_t mmu, vm_offset_t addr)
   61         {
   62                 return;
   63         }
   64 
   65         static void mmu_null_init(mmu_t mmu)
   66         {
   67                 return;
   68         }
   69 
   70         static boolean_t mmu_null_is_prefaultable(mmu_t mmu, pmap_t pmap,
   71             vm_offset_t va)
   72         {
   73                 return (FALSE);
   74         }
   75 
   76         static void mmu_null_object_init_pt(mmu_t mmu, pmap_t pmap,
   77             vm_offset_t addr, vm_object_t object, vm_pindex_t index,
   78             vm_size_t size)
   79         {
   80                 return;
   81         }
   82 
   83         static void mmu_null_page_init(mmu_t mmu, vm_page_t m)
   84         {
   85                 return;
   86         }
   87 
   88         static void mmu_null_remove_pages(mmu_t mmu, pmap_t pmap)
   89         {
   90                 return;
   91         }
   92 
   93         static int mmu_null_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr)
   94         {
   95                 return (0);
   96         }
   97 
   98         static void mmu_null_deactivate(struct thread *td)
   99         {
  100                 return;
  101         }
  102 
  103         static void mmu_null_align_superpage(mmu_t mmu, vm_object_t object,
  104             vm_ooffset_t offset, vm_offset_t *addr, vm_size_t size)
  105         {
  106                 return;
  107         }
  108 
  109         static struct pmap_md *mmu_null_scan_md(mmu_t mmu, struct pmap_md *p)
  110         {
  111                 return (NULL);
  112         }
  113 };
  114 
  115 
  116 /**
  117  * @brief Change the wiring attribute for the page in the given physical
  118  * map and virtual address.
  119  *
  120  * @param _pmap         physical map of page
  121  * @param _va           page virtual address
  122  * @param _wired        TRUE to increment wired count, FALSE to decrement
  123  */
  124 METHOD void change_wiring {
  125         mmu_t           _mmu;
  126         pmap_t          _pmap;
  127         vm_offset_t     _va;
  128         boolean_t       _wired;
  129 };
  130 
  131 
  132 /**
  133  * @brief Clear the 'modified' bit on the given physical page
  134  *
  135  * @param _pg           physical page
  136  */
  137 METHOD void clear_modify {
  138         mmu_t           _mmu;
  139         vm_page_t       _pg;
  140 };
  141 
  142 
  143 /**
  144  * @brief Clear the 'referenced' bit on the given physical page
  145  *
  146  * @param _pg           physical page
  147  */
  148 METHOD void clear_reference {
  149         mmu_t           _mmu;
  150         vm_page_t       _pg;
  151 };
  152 
  153 
  154 /**
  155  * @brief Clear the write and modified bits in each of the given
  156  * physical page's mappings
  157  *
  158  * @param _pg           physical page
  159  */
  160 METHOD void remove_write {
  161         mmu_t           _mmu;
  162         vm_page_t       _pg;
  163 };
  164 
  165 
  166 /**
  167  * @brief Copy the address range given by the source physical map, virtual
  168  * address and length to the destination physical map and virtual address.
  169  * This routine is optional (xxx default null implementation ?)
  170  *
  171  * @param _dst_pmap     destination physical map
  172  * @param _src_pmap     source physical map
  173  * @param _dst_addr     destination virtual address
  174  * @param _len          size of range
  175  * @param _src_addr     source virtual address
  176  */
  177 METHOD void copy {
  178         mmu_t           _mmu;
  179         pmap_t          _dst_pmap;
  180         pmap_t          _src_pmap;
  181         vm_offset_t     _dst_addr;
  182         vm_size_t       _len;
  183         vm_offset_t     _src_addr;
  184 } DEFAULT mmu_null_copy;
  185 
  186 
  187 /**
  188  * @brief Copy the source physical page to the destination physical page
  189  *
  190  * @param _src          source physical page
  191  * @param _dst          destination physical page
  192  */
  193 METHOD void copy_page {
  194         mmu_t           _mmu;
  195         vm_page_t       _src;
  196         vm_page_t       _dst;
  197 };
  198 
  199 
  200 /**
  201  * @brief Create a mapping between a virtual/physical address pair in the
  202  * passed physical map with the specified protection and wiring
  203  *
  204  * @param _pmap         physical map
  205  * @param _va           mapping virtual address
  206  * @param _p            mapping physical page
  207  * @param _prot         mapping page protection
  208  * @param _wired        TRUE if page will be wired
  209  */
  210 METHOD void enter {
  211         mmu_t           _mmu;
  212         pmap_t          _pmap;
  213         vm_offset_t     _va;
  214         vm_page_t       _p;
  215         vm_prot_t       _prot;
  216         boolean_t       _wired;
  217 };
  218 
  219 
  220 /**
  221  * @brief Maps a sequence of resident pages belonging to the same object.
  222  *
  223  * @param _pmap         physical map
  224  * @param _start        virtual range start
  225  * @param _end          virtual range end
  226  * @param _m_start      physical page mapped at start
  227  * @param _prot         mapping page protection
  228  */
  229 METHOD void enter_object {
  230         mmu_t           _mmu;
  231         pmap_t          _pmap;
  232         vm_offset_t     _start;
  233         vm_offset_t     _end;
  234         vm_page_t       _m_start;
  235         vm_prot_t       _prot;
  236 };
  237 
  238 
  239 /**
  240  * @brief A faster entry point for page mapping where it is possible
  241  * to short-circuit some of the tests in pmap_enter.
  242  *
  243  * @param _pmap         physical map (and also currently active pmap)
  244  * @param _va           mapping virtual address
  245  * @param _pg           mapping physical page
  246  * @param _prot         new page protection - used to see if page is exec.
  247  */
  248 METHOD void enter_quick {
  249         mmu_t           _mmu;
  250         pmap_t          _pmap;
  251         vm_offset_t     _va;
  252         vm_page_t       _pg;
  253         vm_prot_t       _prot;
  254 };
  255 
  256 
  257 /**
  258  * @brief Reverse map the given virtual address, returning the physical
  259  * page associated with the address if a mapping exists.
  260  *
  261  * @param _pmap         physical map
  262  * @param _va           mapping virtual address
  263  *
  264  * @retval 0            No mapping found
  265  * @retval addr         The mapping physical address
  266  */
  267 METHOD vm_paddr_t extract {
  268         mmu_t           _mmu;
  269         pmap_t          _pmap;
  270         vm_offset_t     _va;
  271 };
  272 
  273 
  274 /**
  275  * @brief Reverse map the given virtual address, returning the
  276  * physical page if found. The page must be held (by calling
  277  * vm_page_hold) if the page protection matches the given protection
  278  *
  279  * @param _pmap         physical map
  280  * @param _va           mapping virtual address
  281  * @param _prot         protection used to determine if physical page
  282  *                      should be locked
  283  *
  284  * @retval NULL         No mapping found
  285  * @retval page         Pointer to physical page. Held if protections match
  286  */
  287 METHOD vm_page_t extract_and_hold {
  288         mmu_t           _mmu;
  289         pmap_t          _pmap;
  290         vm_offset_t     _va;
  291         vm_prot_t       _prot;
  292 };
  293 
  294 
  295 /**
  296  * @brief Increase kernel virtual address space to the given virtual address.
  297  * Not really required for PowerPC, so optional unless the MMU implementation
  298  * can use it.
  299  *
  300  * @param _va           new upper limit for kernel virtual address space
  301  */
  302 METHOD void growkernel {
  303         mmu_t           _mmu;
  304         vm_offset_t     _va;
  305 } DEFAULT mmu_null_growkernel;
  306 
  307 
  308 /**
  309  * @brief Called from vm_mem_init. Zone allocation is available at
  310  * this stage so a convenient time to create zones. This routine is
  311  * for MMU-implementation convenience and is optional.
  312  */
  313 METHOD void init {
  314         mmu_t           _mmu;
  315 } DEFAULT mmu_null_init;
  316 
  317 
  318 /**
  319  * @brief Return if the page has been marked by MMU hardware to have been
  320  * modified
  321  *
  322  * @param _pg           physical page to test
  323  *
  324  * @retval boolean      TRUE if page has been modified
  325  */
  326 METHOD boolean_t is_modified {
  327         mmu_t           _mmu;
  328         vm_page_t       _pg;
  329 };
  330 
  331 
  332 /**
  333  * @brief Return whether the specified virtual address is a candidate to be
  334  * prefaulted in. This routine is optional.
  335  *
  336  * @param _pmap         physical map
  337  * @param _va           virtual address to test
  338  *
  339  * @retval boolean      TRUE if the address is a candidate.
  340  */
  341 METHOD boolean_t is_prefaultable {
  342         mmu_t           _mmu;
  343         pmap_t          _pmap;
  344         vm_offset_t     _va;
  345 } DEFAULT mmu_null_is_prefaultable;
  346 
  347 
  348 /**
  349  * @brief Return a count of referenced bits for a page, clearing those bits.
  350  * Not all referenced bits need to be cleared, but it is necessary that 0
  351  * only be returned when there are none set.
  352  *
  353  * @params _m           physical page
  354  *
  355  * @retval int          count of referenced bits
  356  */
  357 METHOD boolean_t ts_referenced {
  358         mmu_t           _mmu;
  359         vm_page_t       _pg;
  360 };
  361 
  362 
  363 /**
  364  * @brief Map the requested physical address range into kernel virtual
  365  * address space. The value in _virt is taken as a hint. The virtual
  366  * address of the range is returned, or NULL if the mapping could not
  367  * be created. The range can be direct-mapped if that is supported.
  368  *
  369  * @param *_virt        Hint for start virtual address, and also return
  370  *                      value
  371  * @param _start        physical address range start
  372  * @param _end          physical address range end
  373  * @param _prot         protection of range (currently ignored)
  374  *
  375  * @retval NULL         could not map the area
  376  * @retval addr, *_virt mapping start virtual address
  377  */
  378 METHOD vm_offset_t map {
  379         mmu_t           _mmu;
  380         vm_offset_t     *_virt;
  381         vm_paddr_t      _start;
  382         vm_paddr_t      _end;
  383         int             _prot;
  384 };
  385 
  386 
  387 /**
  388  * @brief Used to create a contiguous set of read-only mappings for a
  389  * given object to try and eliminate a cascade of on-demand faults as
  390  * the object is accessed sequentially. This routine is optional.
  391  *
  392  * @param _pmap         physical map
  393  * @param _addr         mapping start virtual address
  394  * @param _object       device-backed V.M. object to be mapped
  395  * @param _pindex       page-index within object of mapping start
  396  * @param _size         size in bytes of mapping
  397  */
  398 METHOD void object_init_pt {
  399         mmu_t           _mmu;
  400         pmap_t          _pmap;
  401         vm_offset_t     _addr;
  402         vm_object_t     _object;
  403         vm_pindex_t     _pindex;
  404         vm_size_t       _size;
  405 } DEFAULT mmu_null_object_init_pt;
  406 
  407 
  408 /**
  409  * @brief Used to determine if the specified page has a mapping for the
  410  * given physical map, by scanning the list of reverse-mappings from the
  411  * page. The list is scanned to a maximum of 16 entries.
  412  *
  413  * @param _pmap         physical map
  414  * @param _pg           physical page
  415  *
  416  * @retval bool         TRUE if the physical map was found in the first 16
  417  *                      reverse-map list entries off the physical page.
  418  */
  419 METHOD boolean_t page_exists_quick {
  420         mmu_t           _mmu;
  421         pmap_t          _pmap;
  422         vm_page_t       _pg;
  423 };
  424 
  425 
  426 /**
  427  * @brief Initialise the machine-dependent section of the physical page
  428  * data structure. This routine is optional.
  429  *
  430  * @param _pg           physical page
  431  */
  432 METHOD void page_init {
  433         mmu_t           _mmu;
  434         vm_page_t       _pg;
  435 } DEFAULT mmu_null_page_init;
  436 
  437 
  438 /**
  439  * @brief Count the number of managed mappings to the given physical
  440  * page that are wired.
  441  *
  442  * @param _pg           physical page
  443  *
  444  * @retval int          the number of wired, managed mappings to the
  445  *                      given physical page
  446  */
  447 METHOD int page_wired_mappings {
  448         mmu_t           _mmu;
  449         vm_page_t       _pg;
  450 };
  451 
  452 
  453 /**
  454  * @brief Initialise a physical map data structure
  455  *
  456  * @param _pmap         physical map
  457  */
  458 METHOD void pinit {
  459         mmu_t           _mmu;
  460         pmap_t          _pmap;
  461 };
  462 
  463 
  464 /**
  465  * @brief Initialise the physical map for process 0, the initial process
  466  * in the system.
  467  * XXX default to pinit ?
  468  *
  469  * @param _pmap         physical map
  470  */
  471 METHOD void pinit0 {
  472         mmu_t           _mmu;
  473         pmap_t          _pmap;
  474 };
  475 
  476 
  477 /**
  478  * @brief Set the protection for physical pages in the given virtual address
  479  * range to the given value.
  480  *
  481  * @param _pmap         physical map
  482  * @param _start        virtual range start
  483  * @param _end          virtual range end
  484  * @param _prot         new page protection
  485  */
  486 METHOD void protect {
  487         mmu_t           _mmu;
  488         pmap_t          _pmap;
  489         vm_offset_t     _start;
  490         vm_offset_t     _end;
  491         vm_prot_t       _prot;
  492 };
  493 
  494 
  495 /**
  496  * @brief Create a mapping in kernel virtual address space for the given array
  497  * of wired physical pages.
  498  *
  499  * @param _start        mapping virtual address start
  500  * @param *_m           array of physical page pointers
  501  * @param _count        array elements
  502  */
  503 METHOD void qenter {
  504         mmu_t           _mmu;
  505         vm_offset_t     _start;
  506         vm_page_t       *_pg;
  507         int             _count;
  508 };
  509 
  510 
  511 /**
  512  * @brief Remove the temporary mappings created by qenter.
  513  *
  514  * @param _start        mapping virtual address start
  515  * @param _count        number of pages in mapping
  516  */
  517 METHOD void qremove {
  518         mmu_t           _mmu;
  519         vm_offset_t     _start;
  520         int             _count;
  521 };
  522 
  523 
  524 /**
  525  * @brief Release per-pmap resources, e.g. mutexes, allocated memory etc. There
  526  * should be no existing mappings for the physical map at this point
  527  *
  528  * @param _pmap         physical map
  529  */
  530 METHOD void release {
  531         mmu_t           _mmu;
  532         pmap_t          _pmap;
  533 };
  534 
  535 
  536 /**
  537  * @brief Remove all mappings in the given physical map for the start/end
  538  * virtual address range. The range will be page-aligned.
  539  *
  540  * @param _pmap         physical map
  541  * @param _start        mapping virtual address start
  542  * @param _end          mapping virtual address end
  543  */
  544 METHOD void remove {
  545         mmu_t           _mmu;
  546         pmap_t          _pmap;
  547         vm_offset_t     _start;
  548         vm_offset_t     _end;
  549 };
  550 
  551 
  552 /**
  553  * @brief Traverse the reverse-map list off the given physical page and
  554  * remove all mappings. Clear the PG_WRITEABLE attribute from the page.
  555  *
  556  * @param _pg           physical page
  557  */
  558 METHOD void remove_all {
  559         mmu_t           _mmu;
  560         vm_page_t       _pg;
  561 };
  562 
  563 
  564 /**
  565  * @brief Remove all mappings in the given start/end virtual address range
  566  * for the given physical map. Similar to the remove method, but it used
  567  * when tearing down all mappings in an address space. This method is
  568  * optional, since pmap_remove will be called for each valid vm_map in
  569  * the address space later.
  570  *
  571  * @param _pmap         physical map
  572  * @param _start        mapping virtual address start
  573  * @param _end          mapping virtual address end
  574  */
  575 METHOD void remove_pages {
  576         mmu_t           _mmu;
  577         pmap_t          _pmap;
  578 } DEFAULT mmu_null_remove_pages;
  579 
  580 
  581 /**
  582  * @brief Zero a physical page. It is not assumed that the page is mapped,
  583  * so a temporary (or direct) mapping may need to be used.
  584  *
  585  * @param _pg           physical page
  586  */
  587 METHOD void zero_page {
  588         mmu_t           _mmu;
  589         vm_page_t       _pg;
  590 };
  591 
  592 
  593 /**
  594  * @brief Zero a portion of a physical page, starting at a given offset and
  595  * for a given size (multiples of 512 bytes for 4k pages).
  596  *
  597  * @param _pg           physical page
  598  * @param _off          byte offset from start of page
  599  * @param _size         size of area to zero
  600  */
  601 METHOD void zero_page_area {
  602         mmu_t           _mmu;
  603         vm_page_t       _pg;
  604         int             _off;
  605         int             _size;
  606 };
  607 
  608 
  609 /**
  610  * @brief Called from the idle loop to zero pages. XXX I think locking
  611  * constraints might be different here compared to zero_page.
  612  *
  613  * @param _pg           physical page
  614  */
  615 METHOD void zero_page_idle {
  616         mmu_t           _mmu;
  617         vm_page_t       _pg;
  618 };
  619 
  620 
  621 /**
  622  * @brief Extract mincore(2) information from a mapping. This routine is
  623  * optional and is an optimisation: the mincore code will call is_modified
  624  * and ts_referenced if no result is returned.
  625  *
  626  * @param _pmap         physical map
  627  * @param _addr         page virtual address
  628  *
  629  * @retval 0            no result
  630  * @retval non-zero     mincore(2) flag values
  631  */
  632 METHOD int mincore {
  633         mmu_t           _mmu;
  634         pmap_t          _pmap;
  635         vm_offset_t     _addr;
  636 } DEFAULT mmu_null_mincore;
  637 
  638 
  639 /**
  640  * @brief Perform any operations required to allow a physical map to be used
  641  * before it's address space is accessed.
  642  *
  643  * @param _td           thread associated with physical map
  644  */
  645 METHOD void activate {
  646         mmu_t           _mmu;
  647         struct thread   *_td;
  648 };
  649 
  650 /**
  651  * @brief Perform any operations required to deactivate a physical map,
  652  * for instance as it is context-switched out.
  653  *
  654  * @param _td           thread associated with physical map
  655  */
  656 METHOD void deactivate {
  657         mmu_t           _mmu;
  658         struct thread   *_td;
  659 } DEFAULT mmu_null_deactivate;
  660 
  661 /**
  662  * @brief Return a hint for the best virtual address to map a tentative
  663  * virtual address range in a given VM object. The default is to just
  664  * return the given tentative start address.
  665  *
  666  * @param _obj          VM backing object
  667  * @param _offset       starting offset with the VM object
  668  * @param _addr         initial guess at virtual address
  669  * @param _size         size of virtual address range
  670  */
  671 METHOD void align_superpage {
  672         mmu_t           _mmu;
  673         vm_object_t     _obj;
  674         vm_ooffset_t    _offset;
  675         vm_offset_t     *_addr;
  676         vm_size_t       _size;
  677 } DEFAULT mmu_null_align_superpage;
  678 
  679 
  680 
  681 
  682 /**
  683  * INTERNAL INTERFACES
  684  */
  685 
  686 /**
  687  * @brief Bootstrap the VM system. At the completion of this routine, the
  688  * kernel will be running in it's own address space with full control over
  689  * paging.
  690  *
  691  * @param _start        start of reserved memory (obsolete ???)
  692  * @param _end          end of reserved memory (obsolete ???)
  693  *                      XXX I think the intent of these was to allow
  694  *                      the memory used by kernel text+data+bss and
  695  *                      loader variables/load-time kld's to be carved out
  696  *                      of available physical mem.
  697  *
  698  */
  699 METHOD void bootstrap {
  700         mmu_t           _mmu;
  701         vm_offset_t     _start;
  702         vm_offset_t     _end;
  703 };
  704 
  705 /**
  706  * @brief Set up the MMU on the current CPU. Only called by the PMAP layer
  707  * for alternate CPUs on SMP systems.
  708  *
  709  * @param _ap           Set to 1 if the CPU being set up is an AP
  710  *
  711  */
  712 METHOD void cpu_bootstrap {
  713         mmu_t           _mmu;
  714         int             _ap;
  715 };
  716 
  717 
  718 /**
  719  * @brief Create a kernel mapping for a given physical address range.
  720  * Called by bus code on behalf of device drivers. The mapping does not
  721  * have to be a virtual address: it can be a direct-mapped physical address
  722  * if that is supported by the MMU.
  723  *
  724  * @param _pa           start physical address
  725  * @param _size         size in bytes of mapping
  726  *
  727  * @retval addr         address of mapping.
  728  */
  729 METHOD void * mapdev {
  730         mmu_t           _mmu;
  731         vm_offset_t     _pa;
  732         vm_size_t       _size;
  733 };
  734 
  735 
  736 /**
  737  * @brief Remove the mapping created by mapdev. Called when a driver
  738  * is unloaded.
  739  *
  740  * @param _va           Mapping address returned from mapdev
  741  * @param _size         size in bytes of mapping
  742  */
  743 METHOD void unmapdev {
  744         mmu_t           _mmu;
  745         vm_offset_t     _va;
  746         vm_size_t       _size;
  747 };
  748 
  749 
  750 /**
  751  * @brief Reverse-map a kernel virtual address
  752  *
  753  * @param _va           kernel virtual address to reverse-map
  754  *
  755  * @retval pa           physical address corresponding to mapping
  756  */
  757 METHOD vm_offset_t kextract {
  758         mmu_t           _mmu;
  759         vm_offset_t     _va;
  760 };
  761 
  762 
  763 /**
  764  * @brief Map a wired page into kernel virtual address space
  765  *
  766  * @param _va           mapping virtual address
  767  * @param _pa           mapping physical address
  768  */
  769 METHOD void kenter {
  770         mmu_t           _mmu;
  771         vm_offset_t     _va;
  772         vm_offset_t     _pa;
  773 };
  774 
  775 
  776 /**
  777  * @brief Determine if the given physical address range has been direct-mapped.
  778  *
  779  * @param _pa           physical address start
  780  * @param _size         physical address range size
  781  *
  782  * @retval bool         TRUE if the range is direct-mapped.
  783  */
  784 METHOD boolean_t dev_direct_mapped {
  785         mmu_t           _mmu;
  786         vm_offset_t     _pa;
  787         vm_size_t       _size;
  788 };
  789 
  790 
  791 /**
  792  * @brief Enforce instruction cache coherency. Typically called after a
  793  * region of memory has been modified and before execution of or within
  794  * that region is attempted. Setting breakpoints in a process through
  795  * ptrace(2) is one example of when the instruction cache needs to be
  796  * made coherent.
  797  *
  798  * @param _pm           the physical map of the virtual address
  799  * @param _va           the virtual address of the modified region
  800  * @param _sz           the size of the modified region
  801  */
  802 METHOD void sync_icache {
  803         mmu_t           _mmu;
  804         pmap_t          _pm;
  805         vm_offset_t     _va;
  806         vm_size_t       _sz;
  807 };
  808 
  809 
  810 /**
  811  * @brief Create temporary memory mapping for use by dumpsys().
  812  *
  813  * @param _md           The memory chunk in which the mapping lies.
  814  * @param _ofs          The offset within the chunk of the mapping.
  815  * @param _sz           The requested size of the mapping.
  816  *
  817  * @retval vm_offset_t  The virtual address of the mapping.
  818  *                      
  819  * The sz argument is modified to reflect the actual size of the
  820  * mapping.
  821  */
  822 METHOD vm_offset_t dumpsys_map {
  823         mmu_t           _mmu;
  824         struct pmap_md  *_md;
  825         vm_size_t       _ofs;
  826         vm_size_t       *_sz;
  827 };
  828 
  829 
  830 /**
  831  * @brief Remove temporary dumpsys() mapping.
  832  *
  833  * @param _md           The memory chunk in which the mapping lies.
  834  * @param _ofs          The offset within the chunk of the mapping.
  835  * @param _va           The virtual address of the mapping.
  836  */
  837 METHOD void dumpsys_unmap {
  838         mmu_t           _mmu;
  839         struct pmap_md  *_md;
  840         vm_size_t       _ofs;
  841         vm_offset_t     _va;
  842 };
  843 
  844 
  845 /**
  846  * @brief Scan/iterate memory chunks.
  847  *
  848  * @param _prev         The previously returned chunk or NULL.
  849  *
  850  * @retval              The next (or first when _prev is NULL) chunk.
  851  */
  852 METHOD struct pmap_md * scan_md {
  853         mmu_t           _mmu;
  854         struct pmap_md  *_prev;
  855 } DEFAULT mmu_null_scan_md;

Cache object: 0adf52449743a9ebbc7d994152d50ab1


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.