The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/powerpc/include/pmap.h

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-3-Clause AND BSD-4-Clause
    3  *
    4  * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
    5  * All rights reserved.
    6  *
    7  * Adapted for Freescale's e500 core CPUs.
    8  *
    9  * Redistribution and use in source and binary forms, with or without
   10  * modification, are permitted provided that the following conditions
   11  * are met:
   12  * 1. Redistributions of source code must retain the above copyright
   13  *    notice, this list of conditions and the following disclaimer.
   14  * 2. Redistributions in binary form must reproduce the above copyright
   15  *    notice, this list of conditions and the following disclaimer in the
   16  *    documentation and/or other materials provided with the distribution.
   17  * 3. The name of the author may not be used to endorse or promote products
   18  *    derived from this software without specific prior written permission.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
   23  * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
   24  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
   25  * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
   26  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
   27  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
   28  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
   29  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   30  *
   31  * $FreeBSD$
   32  */
   33 /*-
   34  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
   35  * Copyright (C) 1995, 1996 TooLs GmbH.
   36  * All rights reserved.
   37  *
   38  * Redistribution and use in source and binary forms, with or without
   39  * modification, are permitted provided that the following conditions
   40  * are met:
   41  * 1. Redistributions of source code must retain the above copyright
   42  *    notice, this list of conditions and the following disclaimer.
   43  * 2. Redistributions in binary form must reproduce the above copyright
   44  *    notice, this list of conditions and the following disclaimer in the
   45  *    documentation and/or other materials provided with the distribution.
   46  * 3. All advertising materials mentioning features or use of this software
   47  *    must display the following acknowledgement:
   48  *      This product includes software developed by TooLs GmbH.
   49  * 4. The name of TooLs GmbH may not be used to endorse or promote products
   50  *    derived from this software without specific prior written permission.
   51  *
   52  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
   53  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   54  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   55  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
   56  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
   57  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
   58  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
   59  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
   60  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
   61  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   62  *
   63  *      from: $NetBSD: pmap.h,v 1.17 2000/03/30 16:18:24 jdolecek Exp $
   64  */
   65 
   66 #ifndef _MACHINE_PMAP_H_
   67 #define _MACHINE_PMAP_H_
   68 
   69 #include <sys/queue.h>
   70 #include <sys/tree.h>
   71 #include <sys/_cpuset.h>
   72 #include <sys/_lock.h>
   73 #include <sys/_mutex.h>
   74 #include <machine/sr.h>
   75 #include <machine/pte.h>
   76 #include <machine/slb.h>
   77 #include <machine/tlb.h>
   78 #include <machine/vmparam.h>
   79 #ifdef __powerpc64__
   80 #include <vm/vm_radix.h>
   81 #endif
   82 
   83 /*
   84  * The radix page table structure is described by levels 1-4.
   85  * See Fig 33. on p. 1002 of Power ISA v3.0B
   86  *
   87  * Page directories and tables must be size aligned.
   88  */
   89 
   90 /* Root page directory - 64k   -- each entry covers 512GB */
   91 typedef uint64_t pml1_entry_t;
   92 /* l2 page directory - 4k      -- each entry covers 1GB */
   93 typedef uint64_t pml2_entry_t;
   94 /* l3 page directory - 4k      -- each entry covers 2MB */
   95 typedef uint64_t pml3_entry_t;
   96 /* l4 page directory - 256B/4k -- each entry covers 64k/4k */
   97 typedef uint64_t pml4_entry_t;
   98 
   99 typedef uint64_t pt_entry_t;
  100 
  101 struct pmap;
  102 typedef struct pmap *pmap_t;
  103 
  104 #define PMAP_ENTER_QUICK_LOCKED 0x10000000
  105 
  106 #if !defined(NPMAPS)
  107 #define NPMAPS          32768
  108 #endif /* !defined(NPMAPS) */
  109 
  110 struct  slbtnode;
  111 
  112 struct pvo_entry {
  113         LIST_ENTRY(pvo_entry) pvo_vlink;        /* Link to common virt page */
  114 #ifndef __powerpc64__
  115         LIST_ENTRY(pvo_entry) pvo_olink;        /* Link to overflow entry */
  116 #endif
  117         union {
  118                 RB_ENTRY(pvo_entry) pvo_plink;  /* Link to pmap entries */
  119                 SLIST_ENTRY(pvo_entry) pvo_dlink; /* Link to delete enty */
  120         };
  121         struct {
  122 #ifndef __powerpc64__
  123                 /* 32-bit fields */
  124                 pte_t       pte;
  125 #endif
  126                 /* 64-bit fields */
  127                 uintptr_t   slot;
  128                 vm_paddr_t  pa;
  129                 vm_prot_t   prot;
  130         } pvo_pte;
  131         pmap_t          pvo_pmap;               /* Owning pmap */
  132         vm_offset_t     pvo_vaddr;              /* VA of entry */
  133         uint64_t        pvo_vpn;                /* Virtual page number */
  134 };
  135 LIST_HEAD(pvo_head, pvo_entry);
  136 SLIST_HEAD(pvo_dlist, pvo_entry);
  137 RB_HEAD(pvo_tree, pvo_entry);
  138 int pvo_vaddr_compare(struct pvo_entry *, struct pvo_entry *);
  139 RB_PROTOTYPE(pvo_tree, pvo_entry, pvo_plink, pvo_vaddr_compare);
  140 
  141 /* Used by 32-bit PMAP */
  142 #define PVO_PTEGIDX_MASK        0x007UL         /* which PTEG slot */
  143 #define PVO_PTEGIDX_VALID       0x008UL         /* slot is valid */
  144 /* Used by 64-bit PMAP */
  145 #define PVO_HID                 0x008UL         /* PVO entry in alternate hash*/
  146 /* Used by both */
  147 #define PVO_WIRED               0x010UL         /* PVO entry is wired */
  148 #define PVO_MANAGED             0x020UL         /* PVO entry is managed */
  149 #define PVO_BOOTSTRAP           0x080UL         /* PVO entry allocated during
  150                                                    bootstrap */
  151 #define PVO_DEAD                0x100UL         /* waiting to be deleted */
  152 #define PVO_LARGE               0x200UL         /* large page */
  153 #define PVO_VADDR(pvo)          ((pvo)->pvo_vaddr & ~ADDR_POFF)
  154 #define PVO_PTEGIDX_GET(pvo)    ((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK)
  155 #define PVO_PTEGIDX_ISSET(pvo)  ((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID)
  156 #define PVO_PTEGIDX_CLR(pvo)    \
  157         ((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK)))
  158 #define PVO_PTEGIDX_SET(pvo, i) \
  159         ((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID))
  160 #define PVO_VSID(pvo)           ((pvo)->pvo_vpn >> 16)
  161 
  162 struct  pmap {
  163         struct          pmap_statistics pm_stats;
  164         struct  mtx     pm_mtx;
  165         cpuset_t        pm_active;
  166         union {
  167                 struct {
  168                     #ifdef __powerpc64__
  169                         struct slbtnode *pm_slb_tree_root;
  170                         struct slb      **pm_slb;
  171                         int             pm_slb_len;
  172                     #else
  173                         register_t      pm_sr[16];
  174                     #endif
  175 
  176                         struct pmap     *pmap_phys;
  177                         struct pvo_tree pmap_pvo;
  178                 };
  179 #ifdef __powerpc64__
  180                 /* Radix support */
  181                 struct {
  182                         pml1_entry_t    *pm_pml1;       /* KVA of root page directory */
  183                         struct vm_radix  pm_radix;      /* spare page table pages */
  184                         TAILQ_HEAD(,pv_chunk)   pm_pvchunk;     /* list of mappings in pmap */
  185                         uint64_t        pm_pid; /* PIDR value */
  186                         int pm_flags;
  187                 };
  188 #endif
  189                 struct {
  190                         /* TID to identify this pmap entries in TLB */
  191                         tlbtid_t        pm_tid[MAXCPU];
  192 
  193 #ifdef __powerpc64__
  194                         /*
  195                          * Page table directory,
  196                          * array of pointers to page directories.
  197                          */
  198                         pte_t ****pm_root;
  199 #else
  200                         /*
  201                          * Page table directory,
  202                          * array of pointers to page tables.
  203                          */
  204                         pte_t           **pm_pdir;
  205 
  206                         /* List of allocated ptbl bufs (ptbl kva regions). */
  207                         TAILQ_HEAD(, ptbl_buf)  pm_ptbl_list;
  208 #endif
  209                 };
  210         } __aligned(CACHE_LINE_SIZE);
  211 };
  212 
  213 /*
  214  * pv_entries are allocated in chunks per-process.  This avoids the
  215  * need to track per-pmap assignments.
  216  */
  217 #define _NPCPV  126
  218 #define _NPCM   howmany(_NPCPV, 64)
  219 
  220 #define PV_CHUNK_HEADER                                                 \
  221         pmap_t                  pc_pmap;                                \
  222         TAILQ_ENTRY(pv_chunk)   pc_list;                                \
  223         uint64_t                pc_map[_NPCM];  /* bitmap; 1 = free */  \
  224         TAILQ_ENTRY(pv_chunk)   pc_lru;
  225 
  226 struct pv_entry {
  227         pmap_t pv_pmap;
  228         vm_offset_t pv_va;
  229         TAILQ_ENTRY(pv_entry) pv_link;
  230 };
  231 typedef struct pv_entry *pv_entry_t;
  232 
  233 struct pv_chunk_header {
  234         PV_CHUNK_HEADER
  235 };
  236 struct pv_chunk {
  237         PV_CHUNK_HEADER
  238         uint64_t        reserved;
  239         struct pv_entry         pc_pventry[_NPCPV];
  240 };
  241 
  242 struct  md_page {
  243         union {
  244                 struct {
  245                         volatile int32_t mdpg_attrs;
  246                         vm_memattr_t     mdpg_cache_attrs;
  247                         struct  pvo_head mdpg_pvoh;
  248                         int             pv_gen;   /* (p) */
  249                 };
  250                 struct {
  251                         int                     pv_tracked;
  252                 };
  253         };
  254         TAILQ_HEAD(, pv_entry)  pv_list;  /* (p) */
  255 };
  256 
  257 #ifdef AIM
  258 #define pmap_page_get_memattr(m)        ((m)->md.mdpg_cache_attrs)
  259 #else
  260 #define pmap_page_get_memattr(m)        VM_MEMATTR_DEFAULT
  261 #endif /* AIM */
  262 
  263 /*
  264  * Return the VSID corresponding to a given virtual address.
  265  * If no VSID is currently defined, it will allocate one, and add
  266  * it to a free slot if available.
  267  *
  268  * NB: The PMAP MUST be locked already.
  269  */
  270 uint64_t va_to_vsid(pmap_t pm, vm_offset_t va);
  271 
  272 /* Lock-free, non-allocating lookup routines */
  273 uint64_t kernel_va_to_slbv(vm_offset_t va);
  274 struct slb *user_va_to_slb_entry(pmap_t pm, vm_offset_t va);
  275 
  276 uint64_t allocate_user_vsid(pmap_t pm, uint64_t esid, int large);
  277 void    free_vsid(pmap_t pm, uint64_t esid, int large);
  278 void    slb_insert_user(pmap_t pm, struct slb *slb);
  279 void    slb_insert_kernel(uint64_t slbe, uint64_t slbv);
  280 
  281 struct slbtnode *slb_alloc_tree(void);
  282 void     slb_free_tree(pmap_t pm);
  283 struct slb **slb_alloc_user_cache(void);
  284 void    slb_free_user_cache(struct slb **);
  285 
  286 extern  struct pmap kernel_pmap_store;
  287 #define kernel_pmap     (&kernel_pmap_store)
  288 
  289 #ifdef _KERNEL
  290 
  291 #define PMAP_LOCK(pmap)         mtx_lock(&(pmap)->pm_mtx)
  292 #define PMAP_LOCK_ASSERT(pmap, type) \
  293                                 mtx_assert(&(pmap)->pm_mtx, (type))
  294 #define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx)
  295 #define PMAP_LOCK_INIT(pmap)    mtx_init(&(pmap)->pm_mtx, \
  296                                     (pmap == kernel_pmap) ? "kernelpmap" : \
  297                                     "pmap", NULL, MTX_DEF | MTX_DUPOK)
  298 #define PMAP_LOCKED(pmap)       mtx_owned(&(pmap)->pm_mtx)
  299 #define PMAP_MTX(pmap)          (&(pmap)->pm_mtx)
  300 #define PMAP_TRYLOCK(pmap)      mtx_trylock(&(pmap)->pm_mtx)
  301 #define PMAP_UNLOCK(pmap)       mtx_unlock(&(pmap)->pm_mtx)
  302 
  303 #define pmap_page_is_write_mapped(m)    (((m)->a.flags & PGA_WRITEABLE) != 0)
  304 
  305 #define pmap_vm_page_alloc_check(m)
  306 
  307 void            pmap_bootstrap(vm_offset_t, vm_offset_t);
  308 void            pmap_kenter(vm_offset_t va, vm_paddr_t pa);
  309 void            pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, vm_memattr_t);
  310 void            pmap_kremove(vm_offset_t);
  311 void            *pmap_mapdev(vm_paddr_t, vm_size_t);
  312 void            *pmap_mapdev_attr(vm_paddr_t, vm_size_t, vm_memattr_t);
  313 void            pmap_unmapdev(void *, vm_size_t);
  314 void            pmap_page_set_memattr(vm_page_t, vm_memattr_t);
  315 int             pmap_change_attr(vm_offset_t, vm_size_t, vm_memattr_t);
  316 int             pmap_map_user_ptr(pmap_t pm, volatile const void *uaddr,
  317                     void **kaddr, size_t ulen, size_t *klen);
  318 int             pmap_decode_kernel_ptr(vm_offset_t addr, int *is_user,
  319                     vm_offset_t *decoded_addr);
  320 void            pmap_deactivate(struct thread *);
  321 vm_paddr_t      pmap_kextract(vm_offset_t);
  322 int             pmap_dev_direct_mapped(vm_paddr_t, vm_size_t);
  323 boolean_t       pmap_mmu_install(char *name, int prio);
  324 void            pmap_mmu_init(void);
  325 const char      *pmap_mmu_name(void);
  326 bool            pmap_ps_enabled(pmap_t pmap);
  327 int             pmap_nofault(pmap_t pmap, vm_offset_t va, vm_prot_t flags);
  328 boolean_t       pmap_page_is_mapped(vm_page_t m);
  329 
  330 void            pmap_page_array_startup(long count);
  331 
  332 #define vtophys(va)     pmap_kextract((vm_offset_t)(va))
  333 
  334 extern  vm_offset_t virtual_avail;
  335 extern  vm_offset_t virtual_end;
  336 extern  caddr_t crashdumpmap;
  337 
  338 extern  vm_offset_t msgbuf_phys;
  339 
  340 extern  int pmap_bootstrapped;
  341 extern  int radix_mmu;
  342 extern  int superpages_enabled;
  343 
  344 #ifdef AIM
  345 void pmap_early_io_map_init(void);
  346 #endif
  347 vm_offset_t pmap_early_io_map(vm_paddr_t pa, vm_size_t size);
  348 void pmap_early_io_unmap(vm_offset_t va, vm_size_t size);
  349 void pmap_track_page(pmap_t pmap, vm_offset_t va);
  350 void pmap_page_print_mappings(vm_page_t m);
  351 void pmap_tlbie_all(void);
  352 
  353 static inline int
  354 pmap_vmspace_copy(pmap_t dst_pmap __unused, pmap_t src_pmap __unused)
  355 {
  356 
  357         return (0);
  358 }
  359 
  360 #endif
  361 
  362 #endif /* !_MACHINE_PMAP_H_ */

Cache object: 81d68b35c422e5edaa5a59458508a1db


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.