FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_page.h
1 /*
2 * Mach Operating System
3 * Copyright (c) 1993-1988 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie Mellon
24 * the rights to redistribute these changes.
25 */
26 /*
27 * HISTORY
28 * $Log: vm_page.h,v $
29 * Revision 2.13 93/01/14 18:02:02 danner
30 * Added ANSI prototypes.
31 * [92/12/30 dbg]
32 * 64bit cleanup.
33 * [92/12/01 af]
34 *
35 * Revision 2.12 92/01/14 16:48:28 rpd
36 * Added vm_page_info declaration.
37 * [92/01/14 rpd]
38 *
39 * Revision 2.11 91/07/01 08:28:05 jsb
40 * Change unused was_absent field to precious field.
41 *
42 * Remove extraneous include of kern/thread.h.
43 * [91/06/21 dlb]
44 *
45 * Revision 2.10 91/05/18 14:41:37 rpd
46 * Removed vm_page_fictitious_zone.
47 * [91/05/16 rpd]
48 *
49 * Added vm_page_fictitious_addr.
50 * Removed VM_PAGE_TO_PHYS.
51 * [91/04/10 rpd]
52 *
53 * Changed vm_page_free_wanted to unsigned int.
54 * [91/04/05 rpd]
55 * Added vm_page_grab_fictitious, etc.
56 * [91/03/29 rpd]
57 * Removed VM_PAGE_DEBUG and associated variables,
58 * leaving a dummy VM_PAGE_CHECK definition.
59 * Added vm_page_attach.
60 * [91/03/22 rpd]
61 *
62 * Revision 2.9 91/05/14 17:50:46 mrt
63 * Correcting copyright
64 *
65 * Revision 2.8 91/03/16 15:06:38 rpd
66 * Added continuation argument to VM_PAGE_WAIT, vm_page_wait.
67 * [91/02/05 rpd]
68 *
69 * Revision 2.7 91/02/05 17:59:48 mrt
70 * Changed to new Mach copyright
71 * [91/02/01 16:34:06 mrt]
72 *
73 * Revision 2.6 91/01/08 16:45:48 rpd
74 * Changed to singly-linked VP bucket chains.
75 * [91/01/03 rpd]
76 * Changed vm_wait/VM_WAIT to vm_page_wait/VM_PAGE_WAIT.
77 * [90/11/13 rpd]
78 *
79 * Revision 2.5 90/10/25 14:50:43 rwd
80 * Removed the max_mapping field of pages.
81 * [90/10/22 rpd]
82 *
83 * Revision 2.4 90/10/12 13:06:41 rpd
84 * Added software reference bit to the page structure.
85 * [90/10/08 rpd]
86 *
87 * Revision 2.3 90/02/22 20:06:43 dbg
88 * Rename PAGE_WAKEUP to PAGE_WAKEUP_DONE to reflect the fact that
89 * it clears the busy flag. Add new PAGE_WAKEUP that only does the
90 * wakeup, and leaves the busy flag alone.
91 * [89/12/13 dlb]
92 *
93 * Revision 2.2 90/01/11 11:48:21 dbg
94 * Add changes from mainline:
95 *
96 * Add vm_page_module_init() declaration.
97 * Change vm_page_zero_fill() to return void.
98 * [89/08/07 mwyoung]
99 * Added was_absent, overwriting fields.
100 * [89/05/15 mwyoung]
101 * Separate "private" from "fictitious" page attributes.
102 * [89/04/22 mwyoung]
103 * Move dirty bit under the object lock (from the page
104 * queues lock).
105 * [89/04/22 mwyoung]
106 * Changes for MACH_KERNEL:
107 * . Removed non-XP declarations.
108 * [89/04/28 dbg]
109 *
110 * Revision 2.1 89/08/03 16:45:49 rwd
111 * Created.
112 *
113 * Revision 2.10 89/04/18 21:26:59 mwyoung
114 * Recent history:
115 * Add vm_page_fictitious_zone; remove clean, owner fields.
116 * History condensation: no relevant history.
117 * [89/04/18 mwyoung]
118 *
119 * Revision 2.4 88/12/19 03:00:55 mwyoung
120 */
121 /*
122 * File: vm/vm_page.h
123 * Author: Avadis Tevanian, Jr., Michael Wayne Young
124 * Date: 1985
125 *
126 * Resident memory system definitions.
127 */
128
129 #ifndef _VM_VM_PAGE_H_
130 #define _VM_VM_PAGE_H_
131
132 #include <mach_vm_debug.h>
133
134 #include <mach/boolean.h>
135 #include <mach/vm_prot.h>
136 #include <mach/vm_param.h>
137 #include <vm/vm_object.h>
138 #include <kern/queue.h>
139 #include <kern/lock.h>
140 #include <kern/zalloc.h>
141
142 #include <kern/macro_help.h>
143 #include <kern/sched_prim.h> /* definitions of wait/wakeup */
144
145 #if MACH_VM_DEBUG
146 #include <mach_debug/hash_info.h>
147 #endif
148
149 /*
150 * Management of resident (logical) pages.
151 *
152 * A small structure is kept for each resident
153 * page, indexed by page number. Each structure
154 * is an element of several lists:
155 *
156 * A hash table bucket used to quickly
157 * perform object/offset lookups
158 *
159 * A list of all pages for a given object,
160 * so they can be quickly deactivated at
161 * time of deallocation.
162 *
163 * An ordered list of pages due for pageout.
164 *
165 * In addition, the structure contains the object
166 * and offset to which this page belongs (for pageout),
167 * and sundry status bits.
168 *
169 * Fields in this structure are locked either by the lock on the
170 * object that the page belongs to (O) or by the lock on the page
171 * queues (P). [Some fields require that both locks be held to
172 * change that field; holding either lock is sufficient to read.]
173 */
174
175 struct vm_page {
176 queue_chain_t pageq; /* queue info for FIFO
177 * queue or free list (P) */
178 queue_chain_t listq; /* all pages in same object (O) */
179 struct vm_page *next; /* VP bucket link (O) */
180
181 vm_object_t object; /* which object am I in (O,P) */
182 vm_offset_t offset; /* offset into that object (O,P) */
183
184 unsigned int wire_count:16, /* how many wired down maps use me?
185 (O&P) */
186 /* boolean_t */ inactive:1, /* page is in inactive list (P) */
187 active:1, /* page is in active list (P) */
188 laundry:1, /* page is being cleaned now (P)*/
189 free:1, /* page is on free list (P) */
190 reference:1, /* page has been used (P) */
191 :0; /* (force to 'long' boundary) */
192 #ifdef ns32000
193 int pad; /* extra space for ns32000 bit ops */
194 #endif /* ns32000 */
195
196 unsigned int
197 /* boolean_t */ busy:1, /* page is in transit (O) */
198 wanted:1, /* someone is waiting for page (O) */
199 tabled:1, /* page is in VP table (O) */
200 fictitious:1, /* Physical page doesn't exist (O) */
201 private:1, /* Page should not be returned to
202 * the free list (O) */
203 absent:1, /* Data has been requested, but is
204 * not yet available (O) */
205 error:1, /* Data manager was unable to provide
206 * data due to error (O) */
207 dirty:1, /* Page must be cleaned (O) */
208 precious:1, /* Page is precious; data must be
209 * returned even if clean (O) */
210 overwriting:1, /* Request to unlock has been made
211 * without having data. (O)
212 * [See vm_object_overwrite] */
213 :0;
214
215 vm_offset_t phys_addr; /* Physical address of page, passed
216 * to pmap_enter (read-only) */
217 vm_prot_t page_lock; /* Uses prohibited by data manager (O) */
218 vm_prot_t unlock_request; /* Outstanding unlock request (O) */
219 };
220
221 typedef struct vm_page *vm_page_t;
222
223 #define VM_PAGE_NULL ((vm_page_t) 0)
224
225 /*
226 * For debugging, this macro can be defined to perform
227 * some useful check on a page structure.
228 */
229
230 #define VM_PAGE_CHECK(mem)
231
232 /*
233 * Each pageable resident page falls into one of three lists:
234 *
235 * free
236 * Available for allocation now.
237 * inactive
238 * Not referenced in any map, but still has an
239 * object/offset-page mapping, and may be dirty.
240 * This is the list of pages that should be
241 * paged out next.
242 * active
243 * A list of pages which have been placed in
244 * at least one physical map. This list is
245 * ordered, in LRU-like fashion.
246 */
247
248 extern
249 vm_page_t vm_page_queue_free; /* memory free queue */
250 extern
251 vm_page_t vm_page_queue_fictitious; /* fictitious free queue */
252 extern
253 queue_head_t vm_page_queue_active; /* active memory queue */
254 extern
255 queue_head_t vm_page_queue_inactive; /* inactive memory queue */
256
257 extern
258 vm_offset_t first_phys_addr; /* physical address for first_page */
259 extern
260 vm_offset_t last_phys_addr; /* physical address for last_page */
261
262 extern
263 int vm_page_free_count; /* How many pages are free? */
264 extern
265 int vm_page_fictitious_count;/* How many fictitious pages are free? */
266 extern
267 int vm_page_active_count; /* How many pages are active? */
268 extern
269 int vm_page_inactive_count; /* How many pages are inactive? */
270 extern
271 int vm_page_wire_count; /* How many pages are wired? */
272 extern
273 int vm_page_free_target; /* How many do we want free? */
274 extern
275 int vm_page_free_min; /* When to wakeup pageout */
276 extern
277 int vm_page_inactive_target;/* How many do we want inactive? */
278 extern
279 int vm_page_free_reserved; /* How many pages reserved to do pageout */
280 extern
281 int vm_page_laundry_count; /* How many pages being laundered? */
282
283 decl_simple_lock_data(extern,vm_page_queue_lock)/* lock on active and inactive
284 page queues */
285 decl_simple_lock_data(extern,vm_page_queue_free_lock)
286 /* lock on free page queue */
287
288 extern unsigned int vm_page_free_wanted;
289 /* how many threads are waiting for memory */
290
291 extern vm_offset_t vm_page_fictitious_addr;
292 /* (fake) phys_addr of fictitious pages */
293
294 extern void vm_page_bootstrap(
295 vm_offset_t *startp,
296 vm_offset_t *endp);
297 extern void vm_page_module_init(void);
298
299 extern void vm_page_create(
300 vm_offset_t start,
301 vm_offset_t end);
302 extern vm_page_t vm_page_lookup(
303 vm_object_t object,
304 vm_offset_t offset);
305 extern vm_page_t vm_page_grab_fictitious(void);
306 extern void vm_page_release_fictitious(vm_page_t);
307 extern boolean_t vm_page_convert(vm_page_t);
308 extern void vm_page_more_fictitious(void);
309 extern vm_page_t vm_page_grab(void);
310 extern void vm_page_release(vm_page_t);
311 extern void vm_page_wait(void (*)(void));
312 extern vm_page_t vm_page_alloc(
313 vm_object_t object,
314 vm_offset_t offset);
315 extern void vm_page_init(
316 vm_page_t mem,
317 vm_offset_t phys_addr);
318 extern void vm_page_free(vm_page_t);
319 extern void vm_page_activate(vm_page_t);
320 extern void vm_page_deactivate(vm_page_t);
321 extern void vm_page_rename(
322 vm_page_t mem,
323 vm_object_t new_object,
324 vm_offset_t new_offset);
325 extern void vm_page_insert(
326 vm_page_t mem,
327 vm_object_t object,
328 vm_offset_t offset);
329 extern void vm_page_remove(
330 vm_page_t mem);
331
332 extern void vm_page_zero_fill(vm_page_t);
333 extern void vm_page_copy(vm_page_t src_m, vm_page_t dest_m);
334
335 extern void vm_page_wire(vm_page_t);
336 extern void vm_page_unwire(vm_page_t);
337
338 extern void vm_set_page_size(void);
339
340 #if MACH_VM_DEBUG
341 extern unsigned int vm_page_info(
342 hash_info_bucket_t *info,
343 unsigned int count);
344 #endif
345
346 /*
347 * Functions implemented as macros
348 */
349
350 #define PAGE_ASSERT_WAIT(m, interruptible) \
351 MACRO_BEGIN \
352 (m)->wanted = TRUE; \
353 assert_wait((event_t) (m), (interruptible)); \
354 MACRO_END
355
356 #define PAGE_WAKEUP_DONE(m) \
357 MACRO_BEGIN \
358 (m)->busy = FALSE; \
359 if ((m)->wanted) { \
360 (m)->wanted = FALSE; \
361 thread_wakeup(((event_t) m)); \
362 } \
363 MACRO_END
364
365 #define PAGE_WAKEUP(m) \
366 MACRO_BEGIN \
367 if ((m)->wanted) { \
368 (m)->wanted = FALSE; \
369 thread_wakeup((event_t) (m)); \
370 } \
371 MACRO_END
372
373 #define VM_PAGE_FREE(p) \
374 MACRO_BEGIN \
375 vm_page_lock_queues(); \
376 vm_page_free(p); \
377 vm_page_unlock_queues(); \
378 MACRO_END
379
380 /*
381 * Macro to be used in place of pmap_enter()
382 */
383
384 #define PMAP_ENTER(pmap, virtual_address, page, protection, wired) \
385 MACRO_BEGIN \
386 pmap_enter( \
387 (pmap), \
388 (virtual_address), \
389 (page)->phys_addr, \
390 (protection) & ~(page)->page_lock, \
391 (wired) \
392 ); \
393 MACRO_END
394
395 #define VM_PAGE_WAIT(continuation) vm_page_wait(continuation)
396
397 #define vm_page_lock_queues() simple_lock(&vm_page_queue_lock)
398 #define vm_page_unlock_queues() simple_unlock(&vm_page_queue_lock)
399
400 #define VM_PAGE_QUEUES_REMOVE(mem) \
401 MACRO_BEGIN \
402 if (mem->active) { \
403 queue_remove(&vm_page_queue_active, \
404 mem, vm_page_t, pageq); \
405 mem->active = FALSE; \
406 vm_page_active_count--; \
407 } \
408 \
409 if (mem->inactive) { \
410 queue_remove(&vm_page_queue_inactive, \
411 mem, vm_page_t, pageq); \
412 mem->inactive = FALSE; \
413 vm_page_inactive_count--; \
414 } \
415 MACRO_END
416
417 #endif /* _VM_VM_PAGE_H_ */
Cache object: b5c450082bd4b588fa39b5c3372374c9
|