FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_page2.h
1 /*-
2 * Copyright (c) 1982, 1986, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)vmmeter.h 8.2 (Berkeley) 7/10/94
34 * $FreeBSD: src/sys/sys/vmmeter.h,v 1.21.2.2 2002/10/10 19:28:21 dillon Exp $
35 */
36
37 #ifndef _VM_VM_PAGE2_H_
38 #define _VM_VM_PAGE2_H_
39
40 #ifndef _SYS_VMMETER_H_
41 #include <sys/vmmeter.h>
42 #endif
43 #ifndef _SYS_QUEUE_H_
44 #include <sys/queue.h>
45 #endif
46 #ifndef _VM_VM_PAGE_H_
47 #include <vm/vm_page.h>
48 #endif
49 #ifndef _SYS_SPINLOCK_H_
50 #include <sys/spinlock.h>
51 #endif
52 #ifndef _SYS_SPINLOCK2_H_
53 #include <sys/spinlock2.h>
54 #endif
55
56 #ifdef _KERNEL
57
58 /*
59 * Return TRUE if we are under our severe low-free-pages threshold
60 *
61 * This causes user processes to stall to avoid exhausting memory that
62 * the kernel might need.
63 *
64 * reserved < severe < minimum < target < paging_target
65 */
66 static __inline
67 int
68 vm_page_count_severe(void)
69 {
70 return (vmstats.v_free_severe >
71 vmstats.v_free_count + vmstats.v_cache_count ||
72 vmstats.v_free_reserved > vmstats.v_free_count);
73 }
74
75 /*
76 * Return TRUE if we are under our minimum low-free-pages threshold.
77 * This activates the pageout demon. The pageout demon tries to
78 * reach the target but may stop once it satisfies the minimum.
79 *
80 * reserved < severe < minimum < target < paging_target
81 */
82 static __inline
83 int
84 vm_page_count_min(int donotcount)
85 {
86 return (vmstats.v_free_min + donotcount >
87 (vmstats.v_free_count + vmstats.v_cache_count) ||
88 vmstats.v_free_reserved > vmstats.v_free_count);
89 }
90
91 /*
92 * Return TRUE if we are under our free page target. The pageout demon
93 * tries to reach the target but may stop once it gets past the min.
94 *
95 * User threads doing normal allocations might wait based on this
96 * function but MUST NOT wait in a loop based on this function as the
97 * VM load may prevent the target from being reached.
98 */
99 static __inline
100 int
101 vm_page_count_target(void)
102 {
103 return (vmstats.v_free_target >
104 (vmstats.v_free_count + vmstats.v_cache_count) ||
105 vmstats.v_free_reserved > vmstats.v_free_count);
106 }
107
108 /*
109 * Return the number of pages the pageout daemon needs to move into the
110 * cache or free lists. A negative number means we have sufficient free
111 * pages.
112 *
113 * The target free+cache is greater than vm_page_count_target(). The
114 * frontend uses vm_page_count_target() while the backend continue freeing
115 * based on vm_paging_target().
116 *
117 * This function DOES NOT return TRUE or FALSE.
118 */
119 static __inline
120 int
121 vm_paging_target(void)
122 {
123 return (
124 (vmstats.v_free_target + vmstats.v_cache_min) -
125 (vmstats.v_free_count + vmstats.v_cache_count)
126 );
127 }
128
129 /*
130 * Return TRUE if hysteresis dictates we should nominally wakeup the
131 * pageout daemon to start working on freeing up some memory. This
132 * routine should NOT be used to determine when to block on the VM system.
133 * We want to wakeup the pageout daemon before we might otherwise block.
134 *
135 * Paging begins when cache+free drops below cache_min + free_min.
136 */
137 static __inline
138 int
139 vm_paging_needed(void)
140 {
141 if (vmstats.v_free_min + vmstats.v_cache_min >
142 vmstats.v_free_count + vmstats.v_cache_count) {
143 return 1;
144 }
145 if (vmstats.v_free_min > vmstats.v_free_count)
146 return 1;
147 return 0;
148 }
149
150 static __inline
151 void
152 vm_page_event(vm_page_t m, vm_page_event_t event)
153 {
154 if (m->flags & PG_ACTIONLIST)
155 vm_page_event_internal(m, event);
156 }
157
158 static __inline
159 void
160 vm_page_init_action(vm_page_t m, vm_page_action_t action,
161 void (*func)(vm_page_t, vm_page_action_t), void *data)
162 {
163 action->m = m;
164 action->func = func;
165 action->data = data;
166 }
167
168 /*
169 * Clear dirty bits in the VM page but truncate the
170 * end to a DEV_BSIZE'd boundary.
171 *
172 * Used when reading data in, typically via getpages.
173 * The partial device block at the end of the truncation
174 * range should not lose its dirty bit.
175 *
176 * NOTE: This function does not clear the pmap modified bit.
177 */
178 static __inline
179 void
180 vm_page_clear_dirty_end_nonincl(vm_page_t m, int base, int size)
181 {
182 size = (base + size) & ~DEV_BMASK;
183 if (base < size)
184 vm_page_clear_dirty(m, base, size - base);
185 }
186
187 /*
188 * Clear dirty bits in the VM page but truncate the
189 * beginning to a DEV_BSIZE'd boundary.
190 *
191 * Used when truncating a buffer. The partial device
192 * block at the beginning of the truncation range
193 * should not lose its dirty bit.
194 *
195 * NOTE: This function does not clear the pmap modified bit.
196 */
197 static __inline
198 void
199 vm_page_clear_dirty_beg_nonincl(vm_page_t m, int base, int size)
200 {
201 size += base;
202 base = (base + DEV_BMASK) & ~DEV_BMASK;
203 if (base < size)
204 vm_page_clear_dirty(m, base, size - base);
205 }
206
207 static __inline
208 void
209 vm_page_spin_lock(vm_page_t m)
210 {
211 spin_pool_lock(m);
212 }
213
214 static __inline
215 void
216 vm_page_spin_unlock(vm_page_t m)
217 {
218 spin_pool_unlock(m);
219 }
220
221 /*
222 * Wire a vm_page that is already wired. Does not require a busied
223 * page.
224 */
225 static __inline
226 void
227 vm_page_wire_quick(vm_page_t m)
228 {
229 if (atomic_fetchadd_int(&m->wire_count, 1) == 0)
230 panic("vm_page_wire_quick: wire_count was 0");
231 }
232
233 /*
234 * Unwire a vm_page quickly, does not require a busied page.
235 *
236 * This routine refuses to drop the wire_count to 0 and will return
237 * TRUE if it would have had to (instead of decrementing it to 0).
238 * The caller can then busy the page and deal with it.
239 */
240 static __inline
241 int
242 vm_page_unwire_quick(vm_page_t m)
243 {
244 KKASSERT(m->wire_count > 0);
245 for (;;) {
246 u_int wire_count = m->wire_count;
247
248 cpu_ccfence();
249 if (wire_count == 1)
250 return TRUE;
251 if (atomic_cmpset_int(&m->wire_count, wire_count, wire_count - 1))
252 return FALSE;
253 }
254 }
255
256 /*
257 * Functions implemented as macros
258 */
259
260 static __inline void
261 vm_page_flag_set(vm_page_t m, unsigned int bits)
262 {
263 atomic_set_int(&(m)->flags, bits);
264 }
265
266 static __inline void
267 vm_page_flag_clear(vm_page_t m, unsigned int bits)
268 {
269 atomic_clear_int(&(m)->flags, bits);
270 }
271
272 /*
273 * Wakeup anyone waiting for the page after potentially unbusying
274 * (hard or soft) or doing other work on a page that might make a
275 * waiter ready. The setting of PG_WANTED is integrated into the
276 * related flags and it can't be set once the flags are already
277 * clear, so there should be no races here.
278 */
279
280 static __inline void
281 vm_page_flash(vm_page_t m)
282 {
283 if (m->flags & PG_WANTED) {
284 vm_page_flag_clear(m, PG_WANTED);
285 wakeup(m);
286 }
287 }
288
289 /*
290 * Reduce the protection of a page. This routine never raises the
291 * protection and therefore can be safely called if the page is already
292 * at VM_PROT_NONE (it will be a NOP effectively ).
293 *
294 * VM_PROT_NONE will remove all user mappings of a page. This is often
295 * necessary when a page changes state (for example, turns into a copy-on-write
296 * page or needs to be frozen for write I/O) in order to force a fault, or
297 * to force a page's dirty bits to be synchronized and avoid hardware
298 * (modified/accessed) bit update races with pmap changes.
299 *
300 * Since 'prot' is usually a constant, this inline usually winds up optimizing
301 * out the primary conditional.
302 *
303 * WARNING: VM_PROT_NONE can block, but will loop until all mappings have
304 * been cleared. Callers should be aware that other page related elements
305 * might have changed, however.
306 */
307 static __inline void
308 vm_page_protect(vm_page_t m, int prot)
309 {
310 KKASSERT(m->flags & PG_BUSY);
311 if (prot == VM_PROT_NONE) {
312 if (m->flags & (PG_WRITEABLE|PG_MAPPED)) {
313 pmap_page_protect(m, VM_PROT_NONE);
314 /* PG_WRITEABLE & PG_MAPPED cleared by call */
315 }
316 } else if ((prot == VM_PROT_READ) && (m->flags & PG_WRITEABLE)) {
317 pmap_page_protect(m, VM_PROT_READ);
318 /* PG_WRITEABLE cleared by call */
319 }
320 }
321
322 /*
323 * Zero-fill the specified page. The entire contents of the page will be
324 * zero'd out.
325 */
326 static __inline boolean_t
327 vm_page_zero_fill(vm_page_t m)
328 {
329 pmap_zero_page(VM_PAGE_TO_PHYS(m));
330 return (TRUE);
331 }
332
333 /*
334 * Copy the contents of src_m to dest_m. The pages must be stable but spl
335 * and other protections depend on context.
336 */
337 static __inline void
338 vm_page_copy(vm_page_t src_m, vm_page_t dest_m)
339 {
340 pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m));
341 dest_m->valid = VM_PAGE_BITS_ALL;
342 dest_m->dirty = VM_PAGE_BITS_ALL;
343 }
344
345 /*
346 * Free a page. The page must be marked BUSY.
347 *
348 * Always clear PG_ZERO when freeing a page, which ensures the flag is not
349 * set unless we are absolutely certain the page is zerod. This is
350 * particularly important when the vm_page_alloc*() code moves pages from
351 * PQ_CACHE to PQ_FREE.
352 */
353 static __inline void
354 vm_page_free(vm_page_t m)
355 {
356 vm_page_flag_clear(m, PG_ZERO);
357 vm_page_free_toq(m);
358 }
359
360 /*
361 * Free a page to the zerod-pages queue. The caller must ensure that the
362 * page has been zerod.
363 */
364 static __inline void
365 vm_page_free_zero(vm_page_t m)
366 {
367 #ifdef PMAP_DEBUG
368 #ifdef PHYS_TO_DMAP
369 char *p = (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
370 int i;
371
372 for (i = 0; i < PAGE_SIZE; i++) {
373 if (p[i] != 0) {
374 panic("non-zero page in vm_page_free_zero()");
375 }
376 }
377 #endif
378 #endif
379 vm_page_flag_set(m, PG_ZERO);
380 vm_page_free_toq(m);
381 }
382
383 /*
384 * Set page to not be dirty. Note: does not clear pmap modify bits .
385 */
386 static __inline void
387 vm_page_undirty(vm_page_t m)
388 {
389 m->dirty = 0;
390 }
391
392 #endif /* _KERNEL */
393 #endif /* _VM_VM_PAGE2_H_ */
394
Cache object: 849160a52c063adce87ff90de5b363f5
|