FreeBSD/Linux Kernel Cross Reference
sys/sys/pool.h
1 /* $NetBSD: pool.h,v 1.45.8.1 2006/03/10 13:19:42 tron Exp $ */
2
3 /*-
4 * Copyright (c) 1997, 1998, 1999, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #ifndef _SYS_POOL_H_
41 #define _SYS_POOL_H_
42
43 #ifdef _KERNEL
44 #define __POOL_EXPOSE
45 #endif
46
47 #if defined(_KERNEL_OPT)
48 #include "opt_pool.h"
49 #endif
50
51 #ifdef __POOL_EXPOSE
52 #include <sys/lock.h>
53 #include <sys/queue.h>
54 #include <sys/time.h>
55 #include <sys/tree.h>
56 #endif
57
58 #define PCG_NOBJECTS 16
59
60 #define POOL_PADDR_INVALID ((paddr_t) -1)
61
62 #ifdef __POOL_EXPOSE
63 /* The pool cache group. */
64 struct pool_cache_group {
65 TAILQ_ENTRY(pool_cache_group)
66 pcg_list; /* link in the pool cache's group list */
67 u_int pcg_avail; /* # available objects */
68 /* pointers to the objects */
69 struct {
70 void *pcgo_va; /* cache object virtual address */
71 paddr_t pcgo_pa;/* cache object physical address */
72 } pcg_objects[PCG_NOBJECTS];
73 };
74
75 struct pool_cache {
76 TAILQ_ENTRY(pool_cache)
77 pc_poollist; /* entry on pool's group list */
78 TAILQ_HEAD(, pool_cache_group)
79 pc_grouplist; /* Cache group list */
80 struct pool_cache_group
81 *pc_allocfrom; /* group to allocate from */
82 struct pool_cache_group
83 *pc_freeto; /* grop to free to */
84 struct pool *pc_pool; /* parent pool */
85 struct simplelock pc_slock; /* mutex */
86
87 int (*pc_ctor)(void *, void *, int);
88 void (*pc_dtor)(void *, void *);
89 void *pc_arg;
90
91 /* Statistics. */
92 unsigned long pc_hits; /* cache hits */
93 unsigned long pc_misses; /* cache misses */
94
95 unsigned long pc_ngroups; /* # cache groups */
96
97 unsigned long pc_nitems; /* # objects currently in cache */
98 };
99
100 struct pool_allocator {
101 void *(*pa_alloc)(struct pool *, int);
102 void (*pa_free)(struct pool *, void *);
103 unsigned int pa_pagesz;
104
105 /* The following fields are for internal use only. */
106 struct simplelock pa_slock;
107 TAILQ_HEAD(, pool) pa_list; /* list of pools using this allocator */
108 int pa_flags;
109 #define PA_INITIALIZED 0x01
110 #define PA_WANT 0x02 /* wakeup any sleeping pools on free */
111 int pa_pagemask;
112 int pa_pageshift;
113 };
114
115 LIST_HEAD(pool_pagelist,pool_item_header);
116
117 struct pool {
118 TAILQ_ENTRY(pool)
119 pr_poollist;
120 struct pool_pagelist
121 pr_emptypages; /* Empty pages */
122 struct pool_pagelist
123 pr_fullpages; /* Full pages */
124 struct pool_pagelist
125 pr_partpages; /* Partially-allocated pages */
126 struct pool_item_header *pr_curpage;
127 struct pool *pr_phpool; /* Pool item header pool */
128 TAILQ_HEAD(,pool_cache)
129 pr_cachelist; /* Caches for this pool */
130 unsigned int pr_size; /* Size of item */
131 unsigned int pr_align; /* Requested alignment, must be 2^n */
132 unsigned int pr_itemoffset; /* Align this offset in item */
133 unsigned int pr_minitems; /* minimum # of items to keep */
134 unsigned int pr_minpages; /* same in page units */
135 unsigned int pr_maxpages; /* maximum # of pages to keep */
136 unsigned int pr_npages; /* # of pages allocated */
137 unsigned int pr_itemsperpage;/* # items that fit in a page */
138 unsigned int pr_slack; /* unused space in a page */
139 unsigned int pr_nitems; /* number of available items in pool */
140 unsigned int pr_nout; /* # items currently allocated */
141 unsigned int pr_hardlimit; /* hard limit to number of allocated
142 items */
143 struct pool_allocator *pr_alloc;/* back-end allocator */
144 TAILQ_ENTRY(pool) pr_alloc_list;/* link on allocator's pool list */
145
146 /* Drain hook. */
147 void (*pr_drain_hook)(void *, int);
148 void *pr_drain_hook_arg;
149
150 const char *pr_wchan; /* tsleep(9) identifier */
151 unsigned int pr_flags; /* r/w flags */
152 unsigned int pr_roflags; /* r/o flags */
153 #define PR_NOWAIT 0x00 /* for symmetry */
154 #define PR_WAITOK 0x02
155 #define PR_WANTED 0x04
156 #define PR_PHINPAGE 0x40
157 #define PR_LOGGING 0x80
158 #define PR_LIMITFAIL 0x100 /* even if waiting, fail if we hit limit */
159 #define PR_RECURSIVE 0x200 /* pool contains pools, for vmstat(8) */
160 #define PR_NOTOUCH 0x400 /* don't use free items to keep internal state*/
161
162 /*
163 * `pr_slock' protects the pool's data structures when removing
164 * items from or returning items to the pool, or when reading
165 * or updating read/write fields in the pool descriptor.
166 *
167 * We assume back-end page allocators provide their own locking
168 * scheme. They will be called with the pool descriptor _unlocked_,
169 * since the page allocators may block.
170 */
171 struct simplelock pr_slock;
172
173 SPLAY_HEAD(phtree, pool_item_header) pr_phtree;
174
175 int pr_maxcolor; /* Cache colouring */
176 int pr_curcolor;
177 int pr_phoffset; /* Offset in page of page header */
178
179 /*
180 * Warning message to be issued, and a per-time-delta rate cap,
181 * if the hard limit is reached.
182 */
183 const char *pr_hardlimit_warning;
184 struct timeval pr_hardlimit_ratecap;
185 struct timeval pr_hardlimit_warning_last;
186
187 /*
188 * Instrumentation
189 */
190 unsigned long pr_nget; /* # of successful requests */
191 unsigned long pr_nfail; /* # of unsuccessful requests */
192 unsigned long pr_nput; /* # of releases */
193 unsigned long pr_npagealloc; /* # of pages allocated */
194 unsigned long pr_npagefree; /* # of pages released */
195 unsigned int pr_hiwat; /* max # of pages in pool */
196 unsigned long pr_nidle; /* # of idle pages */
197
198 /*
199 * Diagnostic aides.
200 */
201 struct pool_log *pr_log;
202 int pr_curlogentry;
203 int pr_logsize;
204
205 const char *pr_entered_file; /* reentrancy check */
206 long pr_entered_line;
207 };
208 #endif /* __POOL_EXPOSE */
209
210 #ifdef _KERNEL
211 /*
212 * pool_allocator_kmem is the default that all pools get unless
213 * otherwise specified. pool_allocator_nointr is provided for
214 * pools that know they will never be accessed in interrupt
215 * context.
216 */
217 extern struct pool_allocator pool_allocator_kmem;
218 extern struct pool_allocator pool_allocator_nointr;
219 #ifdef POOL_SUBPAGE
220 /* The above are subpage allocators in this case. */
221 extern struct pool_allocator pool_allocator_kmem_fullpage;
222 extern struct pool_allocator pool_allocator_nointr_fullpage;
223 #endif
224
225 struct link_pool_init { /* same as args to pool_init() */
226 struct pool *pp;
227 size_t size;
228 u_int align;
229 u_int align_offset;
230 int flags;
231 const char *wchan;
232 struct pool_allocator *palloc;
233 };
234 #define POOL_INIT(pp, size, align, align_offset, flags, wchan, palloc) \
235 struct pool pp; \
236 static const struct link_pool_init _link_ ## pp[1] = { \
237 { &pp, size, align, align_offset, flags, wchan, palloc } \
238 }; \
239 __link_set_add_rodata(pools, _link_ ## pp)
240
241 void link_pool_init(void);
242
243 void pool_init(struct pool *, size_t, u_int, u_int,
244 int, const char *, struct pool_allocator *);
245 void pool_destroy(struct pool *);
246
247 void pool_set_drain_hook(struct pool *,
248 void (*)(void *, int), void *);
249
250 void *pool_get(struct pool *, int);
251 void pool_put(struct pool *, void *);
252 int pool_reclaim(struct pool *);
253
254 #ifdef POOL_DIAGNOSTIC
255 /*
256 * These versions do reentrancy checking.
257 */
258 void *_pool_get(struct pool *, int, const char *, long);
259 void _pool_put(struct pool *, void *, const char *, long);
260 int _pool_reclaim(struct pool *, const char *, long);
261 #define pool_get(h, f) _pool_get((h), (f), __FILE__, __LINE__)
262 #define pool_put(h, v) _pool_put((h), (v), __FILE__, __LINE__)
263 #define pool_reclaim(h) _pool_reclaim((h), __FILE__, __LINE__)
264 #endif /* POOL_DIAGNOSTIC */
265
266 int pool_prime(struct pool *, int);
267 void pool_setlowat(struct pool *, int);
268 void pool_sethiwat(struct pool *, int);
269 void pool_sethardlimit(struct pool *, int, const char *, int);
270 void pool_drain(void *);
271
272 /*
273 * Debugging and diagnostic aides.
274 */
275 void pool_print(struct pool *, const char *);
276 void pool_printit(struct pool *, const char *,
277 void (*)(const char *, ...));
278 int pool_chk(struct pool *, const char *);
279
280 /*
281 * Pool cache routines.
282 */
283 void pool_cache_init(struct pool_cache *, struct pool *,
284 int (*)(void *, void *, int),
285 void (*)(void *, void *),
286 void *);
287 void pool_cache_destroy(struct pool_cache *);
288 void *pool_cache_get_paddr(struct pool_cache *, int, paddr_t *);
289 #define pool_cache_get(pc, f) pool_cache_get_paddr((pc), (f), NULL)
290 void pool_cache_put_paddr(struct pool_cache *, void *, paddr_t);
291 #define pool_cache_put(pc, o) pool_cache_put_paddr((pc), (o), \
292 POOL_PADDR_INVALID)
293 void pool_cache_destruct_object(struct pool_cache *, void *);
294 void pool_cache_invalidate(struct pool_cache *);
295 #endif /* _KERNEL */
296
297 #endif /* _SYS_POOL_H_ */
Cache object: 03655e4f87a9d64609f7b8e67ce28568
|