The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/arm/include/pmap_var.h

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright 2014 Svatopluk Kraus <onwahe@gmail.com>
    3  * Copyright 2014 Michal Meloun <meloun@miracle.cz>
    4  * All rights reserved.
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice, this list of conditions and the following disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  *
   15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   25  * SUCH DAMAGE.
   26  *
   27  * $FreeBSD$
   28  */
   29 
   30 #ifndef _MACHINE_PMAP_VAR_H_
   31 #define _MACHINE_PMAP_VAR_H_
   32 
   33 #include <machine/cpu-v6.h>
   34 #include <machine/pte-v6.h>
   35 /*
   36  *  Various PMAP defines, exports, and inline functions
   37  *  definitions also usable in other MD code.
   38  */
   39 
   40 /*  A number of pages in L1 page table. */
   41 #define NPG_IN_PT1      (NB_IN_PT1 / PAGE_SIZE)
   42 
   43 /*  A number of L2 page tables in a page. */
   44 #define NPT2_IN_PG      (PAGE_SIZE / NB_IN_PT2)
   45 
   46 /*  A number of L2 page table entries in a page. */
   47 #define NPTE2_IN_PG     (NPT2_IN_PG * NPTE2_IN_PT2)
   48 
   49 #ifdef _KERNEL
   50 
   51 /*
   52  *  A L2 page tables page contains NPT2_IN_PG L2 page tables. Masking of
   53  *  pte1_idx by PT2PG_MASK gives us an index to associated L2 page table
   54  *  in a page. The PT2PG_SHIFT definition depends on NPT2_IN_PG strictly.
   55  *  I.e., (1 << PT2PG_SHIFT) == NPT2_IN_PG must be fulfilled.
   56  */
   57 #define PT2PG_SHIFT     2
   58 #define PT2PG_MASK      ((1 << PT2PG_SHIFT) - 1)
   59 
   60 /*
   61  *  A PT2TAB holds all allocated L2 page table pages in a pmap.
   62  *  Right shifting of virtual address by PT2TAB_SHIFT gives us an index
   63  *  to L2 page table page in PT2TAB which holds the address mapping.
   64  */
   65 #define PT2TAB_ENTRIES  (NPTE1_IN_PT1 / NPT2_IN_PG)
   66 #define PT2TAB_SHIFT    (PTE1_SHIFT + PT2PG_SHIFT)
   67 
   68 /*
   69  *  All allocated L2 page table pages in a pmap are mapped into PT2MAP space.
   70  *  An virtual address right shifting by PT2MAP_SHIFT gives us an index to PTE2
   71  *  which maps the address.
   72  */
   73 #define PT2MAP_SIZE     (NPTE1_IN_PT1 * NB_IN_PT2)
   74 #define PT2MAP_SHIFT    PTE2_SHIFT
   75 
   76 extern pt1_entry_t *kern_pt1;
   77 extern pt2_entry_t *kern_pt2tab;
   78 extern pt2_entry_t *PT2MAP;
   79 
   80 /*
   81  *  Virtual interface for L1 page table management.
   82  */
   83 
   84 static __inline u_int
   85 pte1_index(vm_offset_t va)
   86 {
   87 
   88         return (va >> PTE1_SHIFT);
   89 }
   90 
   91 static __inline pt1_entry_t *
   92 pte1_ptr(pt1_entry_t *pt1, vm_offset_t va)
   93 {
   94 
   95         return (pt1 + pte1_index(va));
   96 }
   97 
   98 static __inline vm_offset_t
   99 pte1_trunc(vm_offset_t va)
  100 {
  101 
  102         return (va & PTE1_FRAME);
  103 }
  104 
  105 static __inline vm_offset_t
  106 pte1_roundup(vm_offset_t va)
  107 {
  108 
  109         return ((va + PTE1_OFFSET) & PTE1_FRAME);
  110 }
  111 
  112 /*
  113  *  Virtual interface for L1 page table entries management.
  114  *
  115  *  XXX: Some of the following functions now with a synchronization barrier
  116  *  are called in a loop, so it could be useful to have two versions of them.
  117  *  One with the barrier and one without the barrier. In this case, pure
  118  *  barrier pte1_sync() should be implemented as well.
  119  */
  120 static __inline void
  121 pte1_sync(pt1_entry_t *pte1p)
  122 {
  123 
  124         dsb();
  125 #ifndef PMAP_PTE_NOCACHE
  126         if (!cpuinfo.coherent_walk)
  127                 dcache_wb_pou((vm_offset_t)pte1p, sizeof(*pte1p));
  128 #endif
  129 }
  130 
  131 static __inline void
  132 pte1_sync_range(pt1_entry_t *pte1p, vm_size_t size)
  133 {
  134 
  135         dsb();
  136 #ifndef PMAP_PTE_NOCACHE
  137         if (!cpuinfo.coherent_walk)
  138                 dcache_wb_pou((vm_offset_t)pte1p, size);
  139 #endif
  140 }
  141 
  142 static __inline void
  143 pte1_store(pt1_entry_t *pte1p, pt1_entry_t pte1)
  144 {
  145 
  146         dmb();
  147         *pte1p = pte1;
  148         pte1_sync(pte1p);
  149 }
  150 
  151 static __inline void
  152 pte1_clear(pt1_entry_t *pte1p)
  153 {
  154 
  155         pte1_store(pte1p, 0);
  156 }
  157 
  158 static __inline void
  159 pte1_clear_bit(pt1_entry_t *pte1p, uint32_t bit)
  160 {
  161 
  162         *pte1p &= ~bit;
  163         pte1_sync(pte1p);
  164 }
  165 
  166 static __inline boolean_t
  167 pte1_is_link(pt1_entry_t pte1)
  168 {
  169 
  170         return ((pte1 & L1_TYPE_MASK) == L1_TYPE_C);
  171 }
  172 
  173 static __inline int
  174 pte1_is_section(pt1_entry_t pte1)
  175 {
  176 
  177         return ((pte1 & L1_TYPE_MASK) == L1_TYPE_S);
  178 }
  179 
  180 static __inline boolean_t
  181 pte1_is_dirty(pt1_entry_t pte1)
  182 {
  183 
  184         return ((pte1 & (PTE1_NM | PTE1_RO)) == 0);
  185 }
  186 
  187 static __inline boolean_t
  188 pte1_is_global(pt1_entry_t pte1)
  189 {
  190 
  191         return ((pte1 & PTE1_NG) == 0);
  192 }
  193 
  194 static __inline boolean_t
  195 pte1_is_valid(pt1_entry_t pte1)
  196 {
  197         int l1_type;
  198 
  199         l1_type = pte1 & L1_TYPE_MASK;
  200         return ((l1_type == L1_TYPE_C) || (l1_type == L1_TYPE_S));
  201 }
  202 
  203 static __inline boolean_t
  204 pte1_is_wired(pt1_entry_t pte1)
  205 {
  206 
  207         return (pte1 & PTE1_W);
  208 }
  209 
  210 static __inline pt1_entry_t
  211 pte1_load(pt1_entry_t *pte1p)
  212 {
  213         pt1_entry_t pte1;
  214 
  215         pte1 = *pte1p;
  216         return (pte1);
  217 }
  218 
  219 static __inline pt1_entry_t
  220 pte1_load_clear(pt1_entry_t *pte1p)
  221 {
  222         pt1_entry_t opte1;
  223 
  224         opte1 = *pte1p;
  225         *pte1p = 0;
  226         pte1_sync(pte1p);
  227         return (opte1);
  228 }
  229 
  230 static __inline void
  231 pte1_set_bit(pt1_entry_t *pte1p, uint32_t bit)
  232 {
  233 
  234         *pte1p |= bit;
  235         pte1_sync(pte1p);
  236 }
  237 
  238 static __inline vm_paddr_t
  239 pte1_pa(pt1_entry_t pte1)
  240 {
  241 
  242         return ((vm_paddr_t)(pte1 & PTE1_FRAME));
  243 }
  244 
  245 static __inline vm_paddr_t
  246 pte1_link_pa(pt1_entry_t pte1)
  247 {
  248 
  249         return ((vm_paddr_t)(pte1 & L1_C_ADDR_MASK));
  250 }
  251 
  252 /*
  253  *  Virtual interface for L2 page table entries management.
  254  *
  255  *  XXX: Some of the following functions now with a synchronization barrier
  256  *  are called in a loop, so it could be useful to have two versions of them.
  257  *  One with the barrier and one without the barrier.
  258  */
  259 
  260 static __inline void
  261 pte2_sync(pt2_entry_t *pte2p)
  262 {
  263 
  264         dsb();
  265 #ifndef PMAP_PTE_NOCACHE
  266         if (!cpuinfo.coherent_walk)
  267                 dcache_wb_pou((vm_offset_t)pte2p, sizeof(*pte2p));
  268 #endif
  269 }
  270 
  271 static __inline void
  272 pte2_sync_range(pt2_entry_t *pte2p, vm_size_t size)
  273 {
  274 
  275         dsb();
  276 #ifndef PMAP_PTE_NOCACHE
  277         if (!cpuinfo.coherent_walk)
  278                 dcache_wb_pou((vm_offset_t)pte2p, size);
  279 #endif
  280 }
  281 
  282 static __inline void
  283 pte2_store(pt2_entry_t *pte2p, pt2_entry_t pte2)
  284 {
  285 
  286         dmb();
  287         *pte2p = pte2;
  288         pte2_sync(pte2p);
  289 }
  290 
  291 static __inline void
  292 pte2_clear(pt2_entry_t *pte2p)
  293 {
  294 
  295         pte2_store(pte2p, 0);
  296 }
  297 
  298 static __inline void
  299 pte2_clear_bit(pt2_entry_t *pte2p, uint32_t bit)
  300 {
  301 
  302         *pte2p &= ~bit;
  303         pte2_sync(pte2p);
  304 }
  305 
  306 static __inline boolean_t
  307 pte2_is_dirty(pt2_entry_t pte2)
  308 {
  309 
  310         return ((pte2 & (PTE2_NM | PTE2_RO)) == 0);
  311 }
  312 
  313 static __inline boolean_t
  314 pte2_is_global(pt2_entry_t pte2)
  315 {
  316 
  317         return ((pte2 & PTE2_NG) == 0);
  318 }
  319 
  320 static __inline boolean_t
  321 pte2_is_valid(pt2_entry_t pte2)
  322 {
  323 
  324         return (pte2 & PTE2_V);
  325 }
  326 
  327 static __inline boolean_t
  328 pte2_is_wired(pt2_entry_t pte2)
  329 {
  330 
  331         return (pte2 & PTE2_W);
  332 }
  333 
  334 static __inline pt2_entry_t
  335 pte2_load(pt2_entry_t *pte2p)
  336 {
  337         pt2_entry_t pte2;
  338 
  339         pte2 = *pte2p;
  340         return (pte2);
  341 }
  342 
  343 static __inline pt2_entry_t
  344 pte2_load_clear(pt2_entry_t *pte2p)
  345 {
  346         pt2_entry_t opte2;
  347 
  348         opte2 = *pte2p;
  349         *pte2p = 0;
  350         pte2_sync(pte2p);
  351         return (opte2);
  352 }
  353 
  354 static __inline void
  355 pte2_set_bit(pt2_entry_t *pte2p, uint32_t bit)
  356 {
  357 
  358         *pte2p |= bit;
  359         pte2_sync(pte2p);
  360 }
  361 
  362 static __inline void
  363 pte2_set_wired(pt2_entry_t *pte2p, boolean_t wired)
  364 {
  365 
  366         /*
  367          * Wired bit is transparent for page table walk,
  368          * so pte2_sync() is not needed.
  369          */
  370         if (wired)
  371                 *pte2p |= PTE2_W;
  372         else
  373                 *pte2p &= ~PTE2_W;
  374 }
  375 
  376 static __inline vm_paddr_t
  377 pte2_pa(pt2_entry_t pte2)
  378 {
  379 
  380         return ((vm_paddr_t)(pte2 & PTE2_FRAME));
  381 }
  382 
  383 static __inline u_int
  384 pte2_attr(pt2_entry_t pte2)
  385 {
  386 
  387         return ((u_int)(pte2 & PTE2_ATTR_MASK));
  388 }
  389 
  390 /*
  391  *  Virtual interface for L2 page tables mapping management.
  392  */
  393 
  394 static __inline u_int
  395 pt2tab_index(vm_offset_t va)
  396 {
  397 
  398         return (va >> PT2TAB_SHIFT);
  399 }
  400 
  401 static __inline pt2_entry_t *
  402 pt2tab_entry(pt2_entry_t *pt2tab, vm_offset_t va)
  403 {
  404 
  405         return (pt2tab + pt2tab_index(va));
  406 }
  407 
  408 static __inline void
  409 pt2tab_store(pt2_entry_t *pte2p, pt2_entry_t pte2)
  410 {
  411 
  412         pte2_store(pte2p,pte2);
  413 }
  414 
  415 static __inline pt2_entry_t
  416 pt2tab_load(pt2_entry_t *pte2p)
  417 {
  418 
  419         return (pte2_load(pte2p));
  420 }
  421 
  422 static __inline pt2_entry_t
  423 pt2tab_load_clear(pt2_entry_t *pte2p)
  424 {
  425 
  426         return (pte2_load_clear(pte2p));
  427 }
  428 
  429 static __inline u_int
  430 pt2map_index(vm_offset_t va)
  431 {
  432 
  433         return (va >> PT2MAP_SHIFT);
  434 }
  435 
  436 static __inline pt2_entry_t *
  437 pt2map_entry(vm_offset_t va)
  438 {
  439 
  440         return (PT2MAP + pt2map_index(va));
  441 }
  442 
  443 /*
  444  *  Virtual interface for pmap structure & kernel shortcuts.
  445  */
  446 
  447 static __inline pt1_entry_t *
  448 pmap_pte1(pmap_t pmap, vm_offset_t va)
  449 {
  450 
  451         return (pte1_ptr(pmap->pm_pt1, va));
  452 }
  453 
  454 static __inline pt1_entry_t *
  455 kern_pte1(vm_offset_t va)
  456 {
  457 
  458         return (pte1_ptr(kern_pt1, va));
  459 }
  460 
  461 static __inline pt2_entry_t *
  462 pmap_pt2tab_entry(pmap_t pmap, vm_offset_t va)
  463 {
  464 
  465         return (pt2tab_entry(pmap->pm_pt2tab, va));
  466 }
  467 
  468 static __inline pt2_entry_t *
  469 kern_pt2tab_entry(vm_offset_t va)
  470 {
  471 
  472         return (pt2tab_entry(kern_pt2tab, va));
  473 }
  474 
  475 static __inline vm_page_t
  476 pmap_pt2_page(pmap_t pmap, vm_offset_t va)
  477 {
  478         pt2_entry_t pte2;
  479 
  480         pte2 = pte2_load(pmap_pt2tab_entry(pmap, va));
  481         return (PHYS_TO_VM_PAGE(pte2 & PTE2_FRAME));
  482 }
  483 
  484 static __inline vm_page_t
  485 kern_pt2_page(vm_offset_t va)
  486 {
  487         pt2_entry_t pte2;
  488 
  489         pte2 = pte2_load(kern_pt2tab_entry(va));
  490         return (PHYS_TO_VM_PAGE(pte2 & PTE2_FRAME));
  491 }
  492 
  493 #endif  /* _KERNEL */
  494 #endif  /* !_MACHINE_PMAP_VAR_H_ */

Cache object: f6529258c3231cc7a54721ef8b5e4ec8


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.