1 /*
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 *
10 * The SPL is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 *
15 * The SPL is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * for more details.
19 *
20 * You should have received a copy of the GNU General Public License along
21 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
22 */
23
24 #ifndef _SPL_KMEM_CACHE_H
25 #define _SPL_KMEM_CACHE_H
26
27 #include <sys/taskq.h>
28
29 /*
30 * Slab allocation interfaces. The SPL slab differs from the standard
31 * Linux SLAB or SLUB primarily in that each cache may be backed by slabs
32 * allocated from the physical or virtual memory address space. The virtual
33 * slabs allow for good behavior when allocation large objects of identical
34 * size. This slab implementation also supports both constructors and
35 * destructors which the Linux slab does not.
36 */
37 typedef enum kmc_bit {
38 KMC_BIT_NODEBUG = 1, /* Default behavior */
39 KMC_BIT_KVMEM = 7, /* Use kvmalloc linux allocator */
40 KMC_BIT_SLAB = 8, /* Use Linux slab cache */
41 KMC_BIT_DEADLOCKED = 14, /* Deadlock detected */
42 KMC_BIT_GROWING = 15, /* Growing in progress */
43 KMC_BIT_REAPING = 16, /* Reaping in progress */
44 KMC_BIT_DESTROY = 17, /* Destroy in progress */
45 KMC_BIT_TOTAL = 18, /* Proc handler helper bit */
46 KMC_BIT_ALLOC = 19, /* Proc handler helper bit */
47 KMC_BIT_MAX = 20, /* Proc handler helper bit */
48 } kmc_bit_t;
49
50 /* kmem move callback return values */
51 typedef enum kmem_cbrc {
52 KMEM_CBRC_YES = 0, /* Object moved */
53 KMEM_CBRC_NO = 1, /* Object not moved */
54 KMEM_CBRC_LATER = 2, /* Object not moved, try again later */
55 KMEM_CBRC_DONT_NEED = 3, /* Neither object is needed */
56 KMEM_CBRC_DONT_KNOW = 4, /* Object unknown */
57 } kmem_cbrc_t;
58
59 #define KMC_NODEBUG (1 << KMC_BIT_NODEBUG)
60 #define KMC_KVMEM (1 << KMC_BIT_KVMEM)
61 #define KMC_SLAB (1 << KMC_BIT_SLAB)
62 #define KMC_DEADLOCKED (1 << KMC_BIT_DEADLOCKED)
63 #define KMC_GROWING (1 << KMC_BIT_GROWING)
64 #define KMC_REAPING (1 << KMC_BIT_REAPING)
65 #define KMC_DESTROY (1 << KMC_BIT_DESTROY)
66 #define KMC_TOTAL (1 << KMC_BIT_TOTAL)
67 #define KMC_ALLOC (1 << KMC_BIT_ALLOC)
68 #define KMC_MAX (1 << KMC_BIT_MAX)
69
70 #define KMC_REAP_CHUNK INT_MAX
71 #define KMC_DEFAULT_SEEKS 1
72
73 #define KMC_RECLAIM_ONCE 0x1 /* Force a single shrinker pass */
74
75 extern struct list_head spl_kmem_cache_list;
76 extern struct rw_semaphore spl_kmem_cache_sem;
77
78 #define SKM_MAGIC 0x2e2e2e2e
79 #define SKO_MAGIC 0x20202020
80 #define SKS_MAGIC 0x22222222
81 #define SKC_MAGIC 0x2c2c2c2c
82
83 #define SPL_KMEM_CACHE_OBJ_PER_SLAB 8 /* Target objects per slab */
84 #define SPL_KMEM_CACHE_ALIGN 8 /* Default object alignment */
85 #ifdef _LP64
86 #define SPL_KMEM_CACHE_MAX_SIZE 32 /* Max slab size in MB */
87 #else
88 #define SPL_KMEM_CACHE_MAX_SIZE 4 /* Max slab size in MB */
89 #endif
90
91 #define SPL_MAX_ORDER (MAX_ORDER - 3)
92 #define SPL_MAX_ORDER_NR_PAGES (1 << (SPL_MAX_ORDER - 1))
93
94 #ifdef CONFIG_SLUB
95 #define SPL_MAX_KMEM_CACHE_ORDER PAGE_ALLOC_COSTLY_ORDER
96 #define SPL_MAX_KMEM_ORDER_NR_PAGES (1 << (SPL_MAX_KMEM_CACHE_ORDER - 1))
97 #else
98 #define SPL_MAX_KMEM_ORDER_NR_PAGES (KMALLOC_MAX_SIZE >> PAGE_SHIFT)
99 #endif
100
101 typedef int (*spl_kmem_ctor_t)(void *, void *, int);
102 typedef void (*spl_kmem_dtor_t)(void *, void *);
103
104 typedef struct spl_kmem_magazine {
105 uint32_t skm_magic; /* Sanity magic */
106 uint32_t skm_avail; /* Available objects */
107 uint32_t skm_size; /* Magazine size */
108 uint32_t skm_refill; /* Batch refill size */
109 struct spl_kmem_cache *skm_cache; /* Owned by cache */
110 unsigned int skm_cpu; /* Owned by cpu */
111 void *skm_objs[0]; /* Object pointers */
112 } spl_kmem_magazine_t;
113
114 typedef struct spl_kmem_obj {
115 uint32_t sko_magic; /* Sanity magic */
116 void *sko_addr; /* Buffer address */
117 struct spl_kmem_slab *sko_slab; /* Owned by slab */
118 struct list_head sko_list; /* Free object list linkage */
119 } spl_kmem_obj_t;
120
121 typedef struct spl_kmem_slab {
122 uint32_t sks_magic; /* Sanity magic */
123 uint32_t sks_objs; /* Objects per slab */
124 struct spl_kmem_cache *sks_cache; /* Owned by cache */
125 struct list_head sks_list; /* Slab list linkage */
126 struct list_head sks_free_list; /* Free object list */
127 unsigned long sks_age; /* Last modify jiffie */
128 uint32_t sks_ref; /* Ref count used objects */
129 } spl_kmem_slab_t;
130
131 typedef struct spl_kmem_alloc {
132 struct spl_kmem_cache *ska_cache; /* Owned by cache */
133 int ska_flags; /* Allocation flags */
134 taskq_ent_t ska_tqe; /* Task queue entry */
135 } spl_kmem_alloc_t;
136
137 typedef struct spl_kmem_emergency {
138 struct rb_node ske_node; /* Emergency tree linkage */
139 unsigned long ske_obj; /* Buffer address */
140 } spl_kmem_emergency_t;
141
142 typedef struct spl_kmem_cache {
143 uint32_t skc_magic; /* Sanity magic */
144 uint32_t skc_name_size; /* Name length */
145 char *skc_name; /* Name string */
146 spl_kmem_magazine_t **skc_mag; /* Per-CPU warm cache */
147 uint32_t skc_mag_size; /* Magazine size */
148 uint32_t skc_mag_refill; /* Magazine refill count */
149 spl_kmem_ctor_t skc_ctor; /* Constructor */
150 spl_kmem_dtor_t skc_dtor; /* Destructor */
151 void *skc_private; /* Private data */
152 void *skc_vmp; /* Unused */
153 struct kmem_cache *skc_linux_cache; /* Linux slab cache if used */
154 unsigned long skc_flags; /* Flags */
155 uint32_t skc_obj_size; /* Object size */
156 uint32_t skc_obj_align; /* Object alignment */
157 uint32_t skc_slab_objs; /* Objects per slab */
158 uint32_t skc_slab_size; /* Slab size */
159 atomic_t skc_ref; /* Ref count callers */
160 taskqid_t skc_taskqid; /* Slab reclaim task */
161 struct list_head skc_list; /* List of caches linkage */
162 struct list_head skc_complete_list; /* Completely alloc'ed */
163 struct list_head skc_partial_list; /* Partially alloc'ed */
164 struct rb_root skc_emergency_tree; /* Min sized objects */
165 spinlock_t skc_lock; /* Cache lock */
166 spl_wait_queue_head_t skc_waitq; /* Allocation waiters */
167 uint64_t skc_slab_fail; /* Slab alloc failures */
168 uint64_t skc_slab_create; /* Slab creates */
169 uint64_t skc_slab_destroy; /* Slab destroys */
170 uint64_t skc_slab_total; /* Slab total current */
171 uint64_t skc_slab_alloc; /* Slab alloc current */
172 uint64_t skc_slab_max; /* Slab max historic */
173 uint64_t skc_obj_total; /* Obj total current */
174 uint64_t skc_obj_alloc; /* Obj alloc current */
175 struct percpu_counter skc_linux_alloc; /* Linux-backed Obj alloc */
176 uint64_t skc_obj_max; /* Obj max historic */
177 uint64_t skc_obj_deadlock; /* Obj emergency deadlocks */
178 uint64_t skc_obj_emergency; /* Obj emergency current */
179 uint64_t skc_obj_emergency_max; /* Obj emergency max */
180 } spl_kmem_cache_t;
181 #define kmem_cache_t spl_kmem_cache_t
182
183 extern spl_kmem_cache_t *spl_kmem_cache_create(const char *name, size_t size,
184 size_t align, spl_kmem_ctor_t ctor, spl_kmem_dtor_t dtor,
185 void *reclaim, void *priv, void *vmp, int flags);
186 extern void spl_kmem_cache_set_move(spl_kmem_cache_t *,
187 kmem_cbrc_t (*)(void *, void *, size_t, void *));
188 extern void spl_kmem_cache_destroy(spl_kmem_cache_t *skc);
189 extern void *spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags);
190 extern void spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj);
191 extern void spl_kmem_cache_set_allocflags(spl_kmem_cache_t *skc, gfp_t flags);
192 extern void spl_kmem_cache_reap_now(spl_kmem_cache_t *skc);
193 extern void spl_kmem_reap(void);
194 extern uint64_t spl_kmem_cache_inuse(kmem_cache_t *cache);
195 extern uint64_t spl_kmem_cache_entry_size(kmem_cache_t *cache);
196
197 #define kmem_cache_create(name, size, align, ctor, dtor, rclm, priv, vmp, fl) \
198 spl_kmem_cache_create(name, size, align, ctor, dtor, rclm, priv, vmp, fl)
199 #define kmem_cache_set_move(skc, move) spl_kmem_cache_set_move(skc, move)
200 #define kmem_cache_destroy(skc) spl_kmem_cache_destroy(skc)
201 #define kmem_cache_alloc(skc, flags) spl_kmem_cache_alloc(skc, flags)
202 #define kmem_cache_free(skc, obj) spl_kmem_cache_free(skc, obj)
203 #define kmem_cache_reap_now(skc) spl_kmem_cache_reap_now(skc)
204 #define kmem_reap() spl_kmem_reap()
205
206 /*
207 * The following functions are only available for internal use.
208 */
209 extern int spl_kmem_cache_init(void);
210 extern void spl_kmem_cache_fini(void);
211
212 #endif /* _SPL_KMEM_CACHE_H */
Cache object: fdec6934eba21c85788a3c49a5636d25
|