The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/mm/slab.h

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 #ifndef MM_SLAB_H
    2 #define MM_SLAB_H
    3 /*
    4  * Internal slab definitions
    5  */
    6 
    7 /*
    8  * State of the slab allocator.
    9  *
   10  * This is used to describe the states of the allocator during bootup.
   11  * Allocators use this to gradually bootstrap themselves. Most allocators
   12  * have the problem that the structures used for managing slab caches are
   13  * allocated from slab caches themselves.
   14  */
   15 enum slab_state {
   16         DOWN,                   /* No slab functionality yet */
   17         PARTIAL,                /* SLUB: kmem_cache_node available */
   18         PARTIAL_ARRAYCACHE,     /* SLAB: kmalloc size for arraycache available */
   19         PARTIAL_L3,             /* SLAB: kmalloc size for l3 struct available */
   20         UP,                     /* Slab caches usable but not all extras yet */
   21         FULL                    /* Everything is working */
   22 };
   23 
   24 extern enum slab_state slab_state;
   25 
   26 /* The slab cache mutex protects the management structures during changes */
   27 extern struct mutex slab_mutex;
   28 
   29 /* The list of all slab caches on the system */
   30 extern struct list_head slab_caches;
   31 
   32 /* The slab cache that manages slab cache information */
   33 extern struct kmem_cache *kmem_cache;
   34 
   35 unsigned long calculate_alignment(unsigned long flags,
   36                 unsigned long align, unsigned long size);
   37 
   38 /* Functions provided by the slab allocators */
   39 extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags);
   40 
   41 extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size,
   42                         unsigned long flags);
   43 extern void create_boot_cache(struct kmem_cache *, const char *name,
   44                         size_t size, unsigned long flags);
   45 
   46 struct mem_cgroup;
   47 #ifdef CONFIG_SLUB
   48 struct kmem_cache *
   49 __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
   50                    size_t align, unsigned long flags, void (*ctor)(void *));
   51 #else
   52 static inline struct kmem_cache *
   53 __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
   54                    size_t align, unsigned long flags, void (*ctor)(void *))
   55 { return NULL; }
   56 #endif
   57 
   58 
   59 /* Legal flag mask for kmem_cache_create(), for various configurations */
   60 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
   61                          SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
   62 
   63 #if defined(CONFIG_DEBUG_SLAB)
   64 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
   65 #elif defined(CONFIG_SLUB_DEBUG)
   66 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
   67                           SLAB_TRACE | SLAB_DEBUG_FREE)
   68 #else
   69 #define SLAB_DEBUG_FLAGS (0)
   70 #endif
   71 
   72 #if defined(CONFIG_SLAB)
   73 #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
   74                           SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | SLAB_NOTRACK)
   75 #elif defined(CONFIG_SLUB)
   76 #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
   77                           SLAB_TEMPORARY | SLAB_NOTRACK)
   78 #else
   79 #define SLAB_CACHE_FLAGS (0)
   80 #endif
   81 
   82 #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
   83 
   84 int __kmem_cache_shutdown(struct kmem_cache *);
   85 
   86 struct seq_file;
   87 struct file;
   88 
   89 struct slabinfo {
   90         unsigned long active_objs;
   91         unsigned long num_objs;
   92         unsigned long active_slabs;
   93         unsigned long num_slabs;
   94         unsigned long shared_avail;
   95         unsigned int limit;
   96         unsigned int batchcount;
   97         unsigned int shared;
   98         unsigned int objects_per_slab;
   99         unsigned int cache_order;
  100 };
  101 
  102 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
  103 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
  104 ssize_t slabinfo_write(struct file *file, const char __user *buffer,
  105                        size_t count, loff_t *ppos);
  106 
  107 #ifdef CONFIG_MEMCG_KMEM
  108 static inline bool is_root_cache(struct kmem_cache *s)
  109 {
  110         return !s->memcg_params || s->memcg_params->is_root_cache;
  111 }
  112 
  113 static inline bool cache_match_memcg(struct kmem_cache *cachep,
  114                                      struct mem_cgroup *memcg)
  115 {
  116         return (is_root_cache(cachep) && !memcg) ||
  117                                 (cachep->memcg_params->memcg == memcg);
  118 }
  119 
  120 static inline void memcg_bind_pages(struct kmem_cache *s, int order)
  121 {
  122         if (!is_root_cache(s))
  123                 atomic_add(1 << order, &s->memcg_params->nr_pages);
  124 }
  125 
  126 static inline void memcg_release_pages(struct kmem_cache *s, int order)
  127 {
  128         if (is_root_cache(s))
  129                 return;
  130 
  131         if (atomic_sub_and_test((1 << order), &s->memcg_params->nr_pages))
  132                 mem_cgroup_destroy_cache(s);
  133 }
  134 
  135 static inline bool slab_equal_or_root(struct kmem_cache *s,
  136                                         struct kmem_cache *p)
  137 {
  138         return (p == s) ||
  139                 (s->memcg_params && (p == s->memcg_params->root_cache));
  140 }
  141 
  142 /*
  143  * We use suffixes to the name in memcg because we can't have caches
  144  * created in the system with the same name. But when we print them
  145  * locally, better refer to them with the base name
  146  */
  147 static inline const char *cache_name(struct kmem_cache *s)
  148 {
  149         if (!is_root_cache(s))
  150                 return s->memcg_params->root_cache->name;
  151         return s->name;
  152 }
  153 
  154 static inline struct kmem_cache *cache_from_memcg(struct kmem_cache *s, int idx)
  155 {
  156         return s->memcg_params->memcg_caches[idx];
  157 }
  158 
  159 static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
  160 {
  161         if (is_root_cache(s))
  162                 return s;
  163         return s->memcg_params->root_cache;
  164 }
  165 #else
  166 static inline bool is_root_cache(struct kmem_cache *s)
  167 {
  168         return true;
  169 }
  170 
  171 static inline bool cache_match_memcg(struct kmem_cache *cachep,
  172                                      struct mem_cgroup *memcg)
  173 {
  174         return true;
  175 }
  176 
  177 static inline void memcg_bind_pages(struct kmem_cache *s, int order)
  178 {
  179 }
  180 
  181 static inline void memcg_release_pages(struct kmem_cache *s, int order)
  182 {
  183 }
  184 
  185 static inline bool slab_equal_or_root(struct kmem_cache *s,
  186                                       struct kmem_cache *p)
  187 {
  188         return true;
  189 }
  190 
  191 static inline const char *cache_name(struct kmem_cache *s)
  192 {
  193         return s->name;
  194 }
  195 
  196 static inline struct kmem_cache *cache_from_memcg(struct kmem_cache *s, int idx)
  197 {
  198         return NULL;
  199 }
  200 
  201 static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
  202 {
  203         return s;
  204 }
  205 #endif
  206 
  207 static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
  208 {
  209         struct kmem_cache *cachep;
  210         struct page *page;
  211 
  212         /*
  213          * When kmemcg is not being used, both assignments should return the
  214          * same value. but we don't want to pay the assignment price in that
  215          * case. If it is not compiled in, the compiler should be smart enough
  216          * to not do even the assignment. In that case, slab_equal_or_root
  217          * will also be a constant.
  218          */
  219         if (!memcg_kmem_enabled() && !unlikely(s->flags & SLAB_DEBUG_FREE))
  220                 return s;
  221 
  222         page = virt_to_head_page(x);
  223         cachep = page->slab_cache;
  224         if (slab_equal_or_root(cachep, s))
  225                 return cachep;
  226 
  227         pr_err("%s: Wrong slab cache. %s but object is from %s\n",
  228                 __FUNCTION__, cachep->name, s->name);
  229         WARN_ON_ONCE(1);
  230         return s;
  231 }
  232 #endif

Cache object: 04423d6b3c9607d66d4d351adc8de6b5


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.