FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_page.h
1 /*
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * from: @(#)vm_page.h 8.2 (Berkeley) 12/13/93
37 *
38 *
39 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40 * All rights reserved.
41 *
42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43 *
44 * Permission to use, copy, modify and distribute this software and
45 * its documentation is hereby granted, provided that both the copyright
46 * notice and this permission notice appear in all copies of the
47 * software, derivative works or modified versions, and any portions
48 * thereof, and that both notices appear in supporting documentation.
49 *
50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53 *
54 * Carnegie Mellon requests users of this software to return to
55 *
56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
57 * School of Computer Science
58 * Carnegie Mellon University
59 * Pittsburgh PA 15213-3890
60 *
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
63 *
64 * $FreeBSD: src/sys/vm/vm_page.h,v 1.33.2.2 1999/09/05 08:24:35 peter Exp $
65 */
66
67 /*
68 * Resident memory system definitions.
69 */
70
71 #ifndef _VM_PAGE_
72 #define _VM_PAGE_
73
74 #include <vm/pmap.h>
75 /*
76 * Management of resident (logical) pages.
77 *
78 * A small structure is kept for each resident
79 * page, indexed by page number. Each structure
80 * is an element of several lists:
81 *
82 * A hash table bucket used to quickly
83 * perform object/offset lookups
84 *
85 * A list of all pages for a given object,
86 * so they can be quickly deactivated at
87 * time of deallocation.
88 *
89 * An ordered list of pages due for pageout.
90 *
91 * In addition, the structure contains the object
92 * and offset to which this page belongs (for pageout),
93 * and sundry status bits.
94 *
95 * Fields in this structure are locked either by the lock on the
96 * object that the page belongs to (O) or by the lock on the page
97 * queues (P).
98 */
99
100 TAILQ_HEAD(pglist, vm_page);
101
102 struct vm_page {
103 TAILQ_ENTRY(vm_page) pageq; /* queue info for FIFO queue or free list (P) */
104 TAILQ_ENTRY(vm_page) hashq; /* hash table links (O) */
105 TAILQ_ENTRY(vm_page) listq; /* pages in same object (O) */
106
107 vm_object_t object; /* which object am I in (O,P) */
108 vm_pindex_t pindex; /* offset into object (O,P) */
109 vm_offset_t phys_addr; /* physical address of page */
110 u_short queue; /* page queue index */
111 u_short flags, /* see below */
112 pc; /* page color */
113 u_short wire_count; /* wired down maps refs (P) */
114 short hold_count; /* page hold count */
115 u_char act_count; /* page usage count */
116 u_char busy; /* page busy count */
117 /* NOTE that these must support one bit per DEV_BSIZE in a page!!! */
118 /* so, on normal X86 kernels, they must be at least 8 bits wide */
119 u_char valid; /* map of valid DEV_BSIZE chunks */
120 u_char dirty; /* map of dirty DEV_BSIZE chunks */
121 };
122
123 /*
124 * Page coloring parameters
125 */
126 /* Each of PQ_FREE, PQ_ZERO and PQ_CACHE have PQ_HASH_SIZE entries */
127
128 #if 0 /* Page coloring is broken in 2.2.x */
129 /* Define one of the following */
130 #if defined(PQ_LARGECACHE)
131 #define PQ_PRIME1 31 /* Prime number somewhat less than PQ_HASH_SIZE */
132 #define PQ_PRIME2 23 /* Prime number somewhat less than PQ_HASH_SIZE */
133 #define PQ_PRIME3 17 /* Prime number somewhat less than PQ_HASH_SIZE */
134 #define PQ_L2_SIZE 128 /* A number of colors opt for 512K cache */
135 #define PQ_L1_SIZE 2 /* Two page L1 cache */
136 #endif
137
138
139 /*
140 * Use 'options PQ_NOOPT' to disable page coloring
141 */
142 #if defined(PQ_NOOPT)
143 #define PQ_PRIME1 1
144 #define PQ_PRIME2 1
145 #define PQ_PRIME3 1
146 #define PQ_L2_SIZE 1
147 #define PQ_L1_SIZE 1
148 #endif
149
150 #if defined(PQ_NORMALCACHE)
151 #define PQ_PRIME1 5 /* Prime number somewhat less than PQ_HASH_SIZE */
152 #define PQ_PRIME2 3 /* Prime number somewhat less than PQ_HASH_SIZE */
153 #define PQ_PRIME3 11 /* Prime number somewhat less than PQ_HASH_SIZE */
154 #define PQ_L2_SIZE 16 /* A reasonable number of colors (opt for 64K cache) */
155 #define PQ_L1_SIZE 2 /* Two page L1 cache */
156 #endif
157
158 #if defined(PQ_MEDIUMCACHE) || !defined(PQ_L2_SIZE)
159 #define PQ_PRIME1 13 /* Prime number somewhat less than PQ_HASH_SIZE */
160 #define PQ_PRIME2 7 /* Prime number somewhat less than PQ_HASH_SIZE */
161 #define PQ_PRIME3 5 /* Prime number somewhat less than PQ_HASH_SIZE */
162 #define PQ_L2_SIZE 64 /* A number of colors opt for 256K cache */
163 #define PQ_L1_SIZE 2 /* Two page L1 cache */
164 #endif
165 #endif
166
167 #define PQ_PRIME1 1
168 #define PQ_PRIME2 1
169 #define PQ_PRIME3 1
170 #define PQ_L2_SIZE 1
171 #define PQ_L1_SIZE 1
172
173 #define PQ_L2_MASK (PQ_L2_SIZE - 1)
174
175 #define PQ_NONE 0
176 #define PQ_FREE 1
177 #define PQ_ZERO (1 + PQ_L2_SIZE)
178 #define PQ_INACTIVE (1 + 2*PQ_L2_SIZE)
179 #define PQ_ACTIVE (2 + 2*PQ_L2_SIZE)
180 #define PQ_CACHE (3 + 2*PQ_L2_SIZE)
181 #define PQ_COUNT (3 + 3*PQ_L2_SIZE)
182
183 extern struct vpgqueues {
184 struct pglist *pl;
185 int *cnt;
186 int *lcnt;
187 } vm_page_queues[PQ_COUNT];
188
189 /*
190 * These are the flags defined for vm_page.
191 *
192 * Note: PG_FILLED and PG_DIRTY are added for the filesystems.
193 */
194 #define PG_BUSY 0x01 /* page is in transit (O) */
195 #define PG_WANTED 0x02 /* someone is waiting for page (O) */
196 #define PG_TABLED 0x04 /* page is in VP table (O) */
197 #define PG_FICTITIOUS 0x08 /* physical page doesn't exist (O) */
198 #define PG_WRITEABLE 0x10 /* page is mapped writeable */
199 #define PG_MAPPED 0x20 /* page is mapped */
200 #define PG_ZERO 0x40 /* page is zeroed */
201 #define PG_REFERENCED 0x80 /* page has been referenced */
202 #define PG_CLEANCHK 0x100 /* page has been checked for cleaning */
203
204 /*
205 * Misc constants.
206 */
207
208 #define ACT_DECLINE 1
209 #define ACT_ADVANCE 3
210 #define ACT_INIT 5
211 #define ACT_MAX 64
212 #define PFCLUSTER_BEHIND 3
213 #define PFCLUSTER_AHEAD 3
214
215 #ifdef KERNEL
216 /*
217 * Each pageable resident page falls into one of four lists:
218 *
219 * free
220 * Available for allocation now.
221 *
222 * The following are all LRU sorted:
223 *
224 * cache
225 * Almost available for allocation. Still in an
226 * object, but clean and immediately freeable at
227 * non-interrupt times.
228 *
229 * inactive
230 * Low activity, candidates for reclamation.
231 * This is the list of pages that should be
232 * paged out next.
233 *
234 * active
235 * Pages that are "active" i.e. they have been
236 * recently referenced.
237 *
238 * zero
239 * Pages that are really free and have been pre-zeroed
240 *
241 */
242
243 extern struct pglist vm_page_queue_free[PQ_L2_SIZE];/* memory free queue */
244 extern struct pglist vm_page_queue_zero[PQ_L2_SIZE];/* zeroed memory free queue */
245 extern struct pglist vm_page_queue_active; /* active memory queue */
246 extern struct pglist vm_page_queue_inactive; /* inactive memory queue */
247 extern struct pglist vm_page_queue_cache[PQ_L2_SIZE];/* cache memory queue */
248
249 extern int vm_page_zero_count;
250
251 extern vm_page_t vm_page_array; /* First resident page in table */
252 extern long first_page; /* first physical page number */
253
254 /* ... represented in vm_page_array */
255 extern long last_page; /* last physical page number */
256
257 /* ... represented in vm_page_array */
258 /* [INCLUSIVE] */
259 extern vm_offset_t first_phys_addr; /* physical address for first_page */
260 extern vm_offset_t last_phys_addr; /* physical address for last_page */
261
262 #define VM_PAGE_TO_PHYS(entry) ((entry)->phys_addr)
263
264 #define IS_VM_PHYSADDR(pa) \
265 ((pa) >= first_phys_addr && (pa) <= last_phys_addr)
266
267 #define PHYS_TO_VM_PAGE(pa) \
268 (&vm_page_array[atop(pa) - first_page ])
269
270 /*
271 * Functions implemented as macros
272 */
273
274 #define PAGE_ASSERT_WAIT(m, interruptible) { \
275 (m)->flags |= PG_WANTED; \
276 assert_wait((int) (m), (interruptible)); \
277 }
278
279 #define PAGE_WAKEUP(m) { \
280 (m)->flags &= ~PG_BUSY; \
281 if ((m)->flags & PG_WANTED) { \
282 (m)->flags &= ~PG_WANTED; \
283 wakeup((caddr_t) (m)); \
284 } \
285 }
286
287 #if PAGE_SIZE == 4096
288 #define VM_PAGE_BITS_ALL 0xff
289 #endif
290
291 #if PAGE_SIZE == 8192
292 #define VM_PAGE_BITS_ALL 0xffff
293 #endif
294
295 #define VM_ALLOC_NORMAL 0
296 #define VM_ALLOC_INTERRUPT 1
297 #define VM_ALLOC_SYSTEM 2
298 #define VM_ALLOC_ZERO 3
299
300 void vm_page_activate __P((vm_page_t));
301 vm_page_t vm_page_alloc __P((vm_object_t, vm_pindex_t, int));
302 void vm_page_cache __P((register vm_page_t));
303 static __inline void vm_page_copy __P((vm_page_t, vm_page_t));
304 void vm_page_deactivate __P((vm_page_t));
305 void vm_page_free __P((vm_page_t));
306 void vm_page_free_zero __P((vm_page_t));
307 void vm_page_insert __P((vm_page_t, vm_object_t, vm_pindex_t));
308 vm_page_t vm_page_lookup __P((vm_object_t, vm_pindex_t));
309 void vm_page_remove __P((vm_page_t));
310 void vm_page_rename __P((vm_page_t, vm_object_t, vm_pindex_t));
311 vm_offset_t vm_page_startup __P((vm_offset_t, vm_offset_t, vm_offset_t));
312 void vm_page_unwire __P((vm_page_t));
313 void vm_page_wire __P((vm_page_t));
314 void vm_page_unqueue __P((vm_page_t));
315 void vm_page_unqueue_nowakeup __P((vm_page_t));
316 void vm_page_set_validclean __P((vm_page_t, int, int));
317 void vm_page_set_invalid __P((vm_page_t, int, int));
318 static __inline boolean_t vm_page_zero_fill __P((vm_page_t));
319 int vm_page_is_valid __P((vm_page_t, int, int));
320 void vm_page_test_dirty __P((vm_page_t));
321 int vm_page_bits __P((int, int));
322 vm_page_t vm_page_list_find __P((int, int));
323 int vm_page_queue_index __P((vm_offset_t, int));
324 vm_page_t vm_page_select __P((vm_object_t, vm_pindex_t, int));
325
326 /*
327 * Keep page from being freed by the page daemon
328 * much of the same effect as wiring, except much lower
329 * overhead and should be used only for *very* temporary
330 * holding ("wiring").
331 */
332 static __inline void
333 vm_page_hold(vm_page_t mem)
334 {
335 mem->hold_count++;
336 }
337
338 #ifdef DIAGNOSTIC
339 #include <sys/systm.h> /* make GCC shut up */
340 #endif
341
342 static __inline void
343 vm_page_unhold(vm_page_t mem)
344 {
345 #ifdef DIAGNOSTIC
346 if (--mem->hold_count < 0)
347 panic("vm_page_unhold: hold count < 0!!!");
348 #else
349 --mem->hold_count;
350 #endif
351 }
352
353 static __inline void
354 vm_page_protect(vm_page_t mem, int prot)
355 {
356 if (prot == VM_PROT_NONE) {
357 if (mem->flags & (PG_WRITEABLE|PG_MAPPED)) {
358 pmap_page_protect(VM_PAGE_TO_PHYS(mem), prot);
359 mem->flags &= ~(PG_WRITEABLE|PG_MAPPED);
360 }
361 } else if ((prot == VM_PROT_READ) && (mem->flags & PG_WRITEABLE)) {
362 pmap_page_protect(VM_PAGE_TO_PHYS(mem), prot);
363 mem->flags &= ~PG_WRITEABLE;
364 }
365 }
366
367 /*
368 * vm_page_zero_fill:
369 *
370 * Zero-fill the specified page.
371 * Written as a standard pagein routine, to
372 * be used by the zero-fill object.
373 */
374 static __inline boolean_t
375 vm_page_zero_fill(m)
376 vm_page_t m;
377 {
378 pmap_zero_page(VM_PAGE_TO_PHYS(m));
379 return (TRUE);
380 }
381
382 /*
383 * vm_page_copy:
384 *
385 * Copy one page to another
386 */
387 static __inline void
388 vm_page_copy(src_m, dest_m)
389 vm_page_t src_m;
390 vm_page_t dest_m;
391 {
392 pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m));
393 dest_m->valid = VM_PAGE_BITS_ALL;
394 }
395
396 #endif /* KERNEL */
397 #endif /* !_VM_PAGE_ */
Cache object: 76393cd7f9f1bab6af292465d5aa269f
|