FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_page.h
1 /*-
2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
3 *
4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * The Mach Operating System project at Carnegie-Mellon University.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * from: @(#)vm_page.h 8.2 (Berkeley) 12/13/93
35 *
36 *
37 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38 * All rights reserved.
39 *
40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
41 *
42 * Permission to use, copy, modify and distribute this software and
43 * its documentation is hereby granted, provided that both the copyright
44 * notice and this permission notice appear in all copies of the
45 * software, derivative works or modified versions, and any portions
46 * thereof, and that both notices appear in supporting documentation.
47 *
48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51 *
52 * Carnegie Mellon requests users of this software to return to
53 *
54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
55 * School of Computer Science
56 * Carnegie Mellon University
57 * Pittsburgh PA 15213-3890
58 *
59 * any improvements or extensions that they make and grant Carnegie the
60 * rights to redistribute these changes.
61 *
62 * $FreeBSD: releng/12.0/sys/vm/vm_page.h 338499 2018-09-06 16:17:45Z markj $
63 */
64
65 /*
66 * Resident memory system definitions.
67 */
68
69 #ifndef _VM_PAGE_
70 #define _VM_PAGE_
71
72 #include <vm/pmap.h>
73
74 /*
75 * Management of resident (logical) pages.
76 *
77 * A small structure is kept for each resident
78 * page, indexed by page number. Each structure
79 * is an element of several collections:
80 *
81 * A radix tree used to quickly
82 * perform object/offset lookups
83 *
84 * A list of all pages for a given object,
85 * so they can be quickly deactivated at
86 * time of deallocation.
87 *
88 * An ordered list of pages due for pageout.
89 *
90 * In addition, the structure contains the object
91 * and offset to which this page belongs (for pageout),
92 * and sundry status bits.
93 *
94 * In general, operations on this structure's mutable fields are
95 * synchronized using either one of or a combination of the lock on the
96 * object that the page belongs to (O), the page lock (P),
97 * the per-domain lock for the free queues (F), or the page's queue
98 * lock (Q). The physical address of a page is used to select its page
99 * lock from a pool. The queue lock for a page depends on the value of
100 * its queue field and described in detail below. If a field is
101 * annotated below with two of these locks, then holding either lock is
102 * sufficient for read access, but both locks are required for write
103 * access. An annotation of (C) indicates that the field is immutable.
104 *
105 * In contrast, the synchronization of accesses to the page's
106 * dirty field is machine dependent (M). In the
107 * machine-independent layer, the lock on the object that the
108 * page belongs to must be held in order to operate on the field.
109 * However, the pmap layer is permitted to set all bits within
110 * the field without holding that lock. If the underlying
111 * architecture does not support atomic read-modify-write
112 * operations on the field's type, then the machine-independent
113 * layer uses a 32-bit atomic on the aligned 32-bit word that
114 * contains the dirty field. In the machine-independent layer,
115 * the implementation of read-modify-write operations on the
116 * field is encapsulated in vm_page_clear_dirty_mask().
117 *
118 * The page structure contains two counters which prevent page reuse.
119 * Both counters are protected by the page lock (P). The hold
120 * counter counts transient references obtained via a pmap lookup, and
121 * is also used to prevent page reclamation in situations where it is
122 * undesirable to block other accesses to the page. The wire counter
123 * is used to implement mlock(2) and is non-zero for pages containing
124 * kernel memory. Pages that are wired or held will not be reclaimed
125 * or laundered by the page daemon, but are treated differently during
126 * a page queue scan: held pages remain at their position in the queue,
127 * while wired pages are removed from the queue and must later be
128 * re-enqueued appropriately by the unwiring thread. It is legal to
129 * call vm_page_free() on a held page; doing so causes it to be removed
130 * from its object and page queue, and the page is released to the
131 * allocator once the last hold reference is dropped. In contrast,
132 * wired pages may not be freed.
133 *
134 * In some pmap implementations, the wire count of a page table page is
135 * used to track the number of populated entries.
136 *
137 * The busy lock is an embedded reader-writer lock which protects the
138 * page's contents and identity (i.e., its <object, pindex> tuple) and
139 * interlocks with the object lock (O). In particular, a page may be
140 * busied or unbusied only with the object write lock held. To avoid
141 * bloating the page structure, the busy lock lacks some of the
142 * features available to the kernel's general-purpose synchronization
143 * primitives. As a result, busy lock ordering rules are not verified,
144 * lock recursion is not detected, and an attempt to xbusy a busy page
145 * or sbusy an xbusy page results will trigger a panic rather than
146 * causing the thread to block. vm_page_sleep_if_busy() can be used to
147 * sleep until the page's busy state changes, after which the caller
148 * must re-lookup the page and re-evaluate its state.
149 *
150 * The queue field is the index of the page queue containing the
151 * page, or PQ_NONE if the page is not enqueued. The queue lock of a
152 * page is the page queue lock corresponding to the page queue index,
153 * or the page lock (P) for the page if it is not enqueued. To modify
154 * the queue field, the queue lock for the old value of the field must
155 * be held. It is invalid for a page's queue field to transition
156 * between two distinct page queue indices. That is, when updating
157 * the queue field, either the new value or the old value must be
158 * PQ_NONE.
159 *
160 * To avoid contention on page queue locks, page queue operations
161 * (enqueue, dequeue, requeue) are batched using per-CPU queues.
162 * A deferred operation is requested by inserting an entry into a
163 * batch queue; the entry is simply a pointer to the page, and the
164 * request type is encoded in the page's aflags field using the values
165 * in PGA_QUEUE_STATE_MASK. The type-stability of struct vm_pages is
166 * crucial to this scheme since the processing of entries in a given
167 * batch queue may be deferred indefinitely. In particular, a page
168 * may be freed before its pending batch queue entries have been
169 * processed. The page lock (P) must be held to schedule a batched
170 * queue operation, and the page queue lock must be held in order to
171 * process batch queue entries for the page queue.
172 */
173
174 #if PAGE_SIZE == 4096
175 #define VM_PAGE_BITS_ALL 0xffu
176 typedef uint8_t vm_page_bits_t;
177 #elif PAGE_SIZE == 8192
178 #define VM_PAGE_BITS_ALL 0xffffu
179 typedef uint16_t vm_page_bits_t;
180 #elif PAGE_SIZE == 16384
181 #define VM_PAGE_BITS_ALL 0xffffffffu
182 typedef uint32_t vm_page_bits_t;
183 #elif PAGE_SIZE == 32768
184 #define VM_PAGE_BITS_ALL 0xfffffffffffffffflu
185 typedef uint64_t vm_page_bits_t;
186 #endif
187
188 struct vm_page {
189 union {
190 TAILQ_ENTRY(vm_page) q; /* page queue or free list (Q) */
191 struct {
192 SLIST_ENTRY(vm_page) ss; /* private slists */
193 void *pv;
194 } s;
195 struct {
196 u_long p;
197 u_long v;
198 } memguard;
199 } plinks;
200 TAILQ_ENTRY(vm_page) listq; /* pages in same object (O) */
201 vm_object_t object; /* which object am I in (O,P) */
202 vm_pindex_t pindex; /* offset into object (O,P) */
203 vm_paddr_t phys_addr; /* physical address of page (C) */
204 struct md_page md; /* machine dependent stuff */
205 u_int wire_count; /* wired down maps refs (P) */
206 volatile u_int busy_lock; /* busy owners lock */
207 uint16_t hold_count; /* page hold count (P) */
208 uint16_t flags; /* page PG_* flags (P) */
209 uint8_t aflags; /* access is atomic */
210 uint8_t oflags; /* page VPO_* flags (O) */
211 uint8_t queue; /* page queue index (Q) */
212 int8_t psind; /* pagesizes[] index (O) */
213 int8_t segind; /* vm_phys segment index (C) */
214 uint8_t order; /* index of the buddy queue (F) */
215 uint8_t pool; /* vm_phys freepool index (F) */
216 u_char act_count; /* page usage count (P) */
217 /* NOTE that these must support one bit per DEV_BSIZE in a page */
218 /* so, on normal X86 kernels, they must be at least 8 bits wide */
219 vm_page_bits_t valid; /* map of valid DEV_BSIZE chunks (O) */
220 vm_page_bits_t dirty; /* map of dirty DEV_BSIZE chunks (M) */
221 };
222
223 /*
224 * Page flags stored in oflags:
225 *
226 * Access to these page flags is synchronized by the lock on the object
227 * containing the page (O).
228 *
229 * Note: VPO_UNMANAGED (used by OBJT_DEVICE, OBJT_PHYS and OBJT_SG)
230 * indicates that the page is not under PV management but
231 * otherwise should be treated as a normal page. Pages not
232 * under PV management cannot be paged out via the
233 * object/vm_page_t because there is no knowledge of their pte
234 * mappings, and such pages are also not on any PQ queue.
235 *
236 */
237 #define VPO_KMEM_EXEC 0x01 /* kmem mapping allows execution */
238 #define VPO_SWAPSLEEP 0x02 /* waiting for swap to finish */
239 #define VPO_UNMANAGED 0x04 /* no PV management for page */
240 #define VPO_SWAPINPROG 0x08 /* swap I/O in progress on page */
241 #define VPO_NOSYNC 0x10 /* do not collect for syncer */
242
243 /*
244 * Busy page implementation details.
245 * The algorithm is taken mostly by rwlock(9) and sx(9) locks implementation,
246 * even if the support for owner identity is removed because of size
247 * constraints. Checks on lock recursion are then not possible, while the
248 * lock assertions effectiveness is someway reduced.
249 */
250 #define VPB_BIT_SHARED 0x01
251 #define VPB_BIT_EXCLUSIVE 0x02
252 #define VPB_BIT_WAITERS 0x04
253 #define VPB_BIT_FLAGMASK \
254 (VPB_BIT_SHARED | VPB_BIT_EXCLUSIVE | VPB_BIT_WAITERS)
255
256 #define VPB_SHARERS_SHIFT 3
257 #define VPB_SHARERS(x) \
258 (((x) & ~VPB_BIT_FLAGMASK) >> VPB_SHARERS_SHIFT)
259 #define VPB_SHARERS_WORD(x) ((x) << VPB_SHARERS_SHIFT | VPB_BIT_SHARED)
260 #define VPB_ONE_SHARER (1 << VPB_SHARERS_SHIFT)
261
262 #define VPB_SINGLE_EXCLUSIVER VPB_BIT_EXCLUSIVE
263
264 #define VPB_UNBUSIED VPB_SHARERS_WORD(0)
265
266 #define PQ_NONE 255
267 #define PQ_INACTIVE 0
268 #define PQ_ACTIVE 1
269 #define PQ_LAUNDRY 2
270 #define PQ_UNSWAPPABLE 3
271 #define PQ_COUNT 4
272
273 #ifndef VM_PAGE_HAVE_PGLIST
274 TAILQ_HEAD(pglist, vm_page);
275 #define VM_PAGE_HAVE_PGLIST
276 #endif
277 SLIST_HEAD(spglist, vm_page);
278
279 #ifdef _KERNEL
280 extern vm_page_t bogus_page;
281 #endif /* _KERNEL */
282
283 extern struct mtx_padalign pa_lock[];
284
285 #if defined(__arm__)
286 #define PDRSHIFT PDR_SHIFT
287 #elif !defined(PDRSHIFT)
288 #define PDRSHIFT 21
289 #endif
290
291 #define pa_index(pa) ((pa) >> PDRSHIFT)
292 #define PA_LOCKPTR(pa) ((struct mtx *)(&pa_lock[pa_index(pa) % PA_LOCK_COUNT]))
293 #define PA_LOCKOBJPTR(pa) ((struct lock_object *)PA_LOCKPTR((pa)))
294 #define PA_LOCK(pa) mtx_lock(PA_LOCKPTR(pa))
295 #define PA_TRYLOCK(pa) mtx_trylock(PA_LOCKPTR(pa))
296 #define PA_UNLOCK(pa) mtx_unlock(PA_LOCKPTR(pa))
297 #define PA_UNLOCK_COND(pa) \
298 do { \
299 if ((pa) != 0) { \
300 PA_UNLOCK((pa)); \
301 (pa) = 0; \
302 } \
303 } while (0)
304
305 #define PA_LOCK_ASSERT(pa, a) mtx_assert(PA_LOCKPTR(pa), (a))
306
307 #if defined(KLD_MODULE) && !defined(KLD_TIED)
308 #define vm_page_lock(m) vm_page_lock_KBI((m), LOCK_FILE, LOCK_LINE)
309 #define vm_page_unlock(m) vm_page_unlock_KBI((m), LOCK_FILE, LOCK_LINE)
310 #define vm_page_trylock(m) vm_page_trylock_KBI((m), LOCK_FILE, LOCK_LINE)
311 #else /* !KLD_MODULE */
312 #define vm_page_lockptr(m) (PA_LOCKPTR(VM_PAGE_TO_PHYS((m))))
313 #define vm_page_lock(m) mtx_lock(vm_page_lockptr((m)))
314 #define vm_page_unlock(m) mtx_unlock(vm_page_lockptr((m)))
315 #define vm_page_trylock(m) mtx_trylock(vm_page_lockptr((m)))
316 #endif
317 #if defined(INVARIANTS)
318 #define vm_page_assert_locked(m) \
319 vm_page_assert_locked_KBI((m), __FILE__, __LINE__)
320 #define vm_page_lock_assert(m, a) \
321 vm_page_lock_assert_KBI((m), (a), __FILE__, __LINE__)
322 #else
323 #define vm_page_assert_locked(m)
324 #define vm_page_lock_assert(m, a)
325 #endif
326
327 /*
328 * The vm_page's aflags are updated using atomic operations. To set or clear
329 * these flags, the functions vm_page_aflag_set() and vm_page_aflag_clear()
330 * must be used. Neither these flags nor these functions are part of the KBI.
331 *
332 * PGA_REFERENCED may be cleared only if the page is locked. It is set by
333 * both the MI and MD VM layers. However, kernel loadable modules should not
334 * directly set this flag. They should call vm_page_reference() instead.
335 *
336 * PGA_WRITEABLE is set exclusively on managed pages by pmap_enter().
337 * When it does so, the object must be locked, or the page must be
338 * exclusive busied. The MI VM layer must never access this flag
339 * directly. Instead, it should call pmap_page_is_write_mapped().
340 *
341 * PGA_EXECUTABLE may be set by pmap routines, and indicates that a page has
342 * at least one executable mapping. It is not consumed by the MI VM layer.
343 *
344 * PGA_ENQUEUED is set and cleared when a page is inserted into or removed
345 * from a page queue, respectively. It determines whether the plinks.q field
346 * of the page is valid. To set or clear this flag, the queue lock for the
347 * page must be held: the page queue lock corresponding to the page's "queue"
348 * field if its value is not PQ_NONE, and the page lock otherwise.
349 *
350 * PGA_DEQUEUE is set when the page is scheduled to be dequeued from a page
351 * queue, and cleared when the dequeue request is processed. A page may
352 * have PGA_DEQUEUE set and PGA_ENQUEUED cleared, for instance if a dequeue
353 * is requested after the page is scheduled to be enqueued but before it is
354 * actually inserted into the page queue. The page lock must be held to set
355 * this flag, and the queue lock for the page must be held to clear it.
356 *
357 * PGA_REQUEUE is set when the page is scheduled to be enqueued or requeued
358 * in its page queue. The page lock must be held to set this flag, and the
359 * queue lock for the page must be held to clear it.
360 *
361 * PGA_REQUEUE_HEAD is a special flag for enqueuing pages near the head of
362 * the inactive queue, thus bypassing LRU. The page lock must be held to
363 * set this flag, and the queue lock for the page must be held to clear it.
364 */
365 #define PGA_WRITEABLE 0x01 /* page may be mapped writeable */
366 #define PGA_REFERENCED 0x02 /* page has been referenced */
367 #define PGA_EXECUTABLE 0x04 /* page may be mapped executable */
368 #define PGA_ENQUEUED 0x08 /* page is enqueued in a page queue */
369 #define PGA_DEQUEUE 0x10 /* page is due to be dequeued */
370 #define PGA_REQUEUE 0x20 /* page is due to be requeued */
371 #define PGA_REQUEUE_HEAD 0x40 /* page requeue should bypass LRU */
372
373 #define PGA_QUEUE_STATE_MASK (PGA_ENQUEUED | PGA_DEQUEUE | PGA_REQUEUE | \
374 PGA_REQUEUE_HEAD)
375
376 /*
377 * Page flags. If changed at any other time than page allocation or
378 * freeing, the modification must be protected by the vm_page lock.
379 */
380 #define PG_FICTITIOUS 0x0004 /* physical page doesn't exist */
381 #define PG_ZERO 0x0008 /* page is zeroed */
382 #define PG_MARKER 0x0010 /* special queue marker page */
383 #define PG_NODUMP 0x0080 /* don't include this page in a dump */
384 #define PG_UNHOLDFREE 0x0100 /* delayed free of a held page */
385
386 /*
387 * Misc constants.
388 */
389 #define ACT_DECLINE 1
390 #define ACT_ADVANCE 3
391 #define ACT_INIT 5
392 #define ACT_MAX 64
393
394 #ifdef _KERNEL
395
396 #include <sys/systm.h>
397
398 #include <machine/atomic.h>
399
400 /*
401 * Each pageable resident page falls into one of five lists:
402 *
403 * free
404 * Available for allocation now.
405 *
406 * inactive
407 * Low activity, candidates for reclamation.
408 * This list is approximately LRU ordered.
409 *
410 * laundry
411 * This is the list of pages that should be
412 * paged out next.
413 *
414 * unswappable
415 * Dirty anonymous pages that cannot be paged
416 * out because no swap device is configured.
417 *
418 * active
419 * Pages that are "active", i.e., they have been
420 * recently referenced.
421 *
422 */
423
424 extern vm_page_t vm_page_array; /* First resident page in table */
425 extern long vm_page_array_size; /* number of vm_page_t's */
426 extern long first_page; /* first physical page number */
427
428 #define VM_PAGE_TO_PHYS(entry) ((entry)->phys_addr)
429
430 /*
431 * PHYS_TO_VM_PAGE() returns the vm_page_t object that represents a memory
432 * page to which the given physical address belongs. The correct vm_page_t
433 * object is returned for addresses that are not page-aligned.
434 */
435 vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa);
436
437 /*
438 * Page allocation parameters for vm_page for the functions
439 * vm_page_alloc(), vm_page_grab(), vm_page_alloc_contig() and
440 * vm_page_alloc_freelist(). Some functions support only a subset
441 * of the flags, and ignore others, see the flags legend.
442 *
443 * The meaning of VM_ALLOC_ZERO differs slightly between the vm_page_alloc*()
444 * and the vm_page_grab*() functions. See these functions for details.
445 *
446 * Bits 0 - 1 define class.
447 * Bits 2 - 15 dedicated for flags.
448 * Legend:
449 * (a) - vm_page_alloc() supports the flag.
450 * (c) - vm_page_alloc_contig() supports the flag.
451 * (f) - vm_page_alloc_freelist() supports the flag.
452 * (g) - vm_page_grab() supports the flag.
453 * (p) - vm_page_grab_pages() supports the flag.
454 * Bits above 15 define the count of additional pages that the caller
455 * intends to allocate.
456 */
457 #define VM_ALLOC_NORMAL 0
458 #define VM_ALLOC_INTERRUPT 1
459 #define VM_ALLOC_SYSTEM 2
460 #define VM_ALLOC_CLASS_MASK 3
461 #define VM_ALLOC_WAITOK 0x0008 /* (acf) Sleep and retry */
462 #define VM_ALLOC_WAITFAIL 0x0010 /* (acf) Sleep and return error */
463 #define VM_ALLOC_WIRED 0x0020 /* (acfgp) Allocate a wired page */
464 #define VM_ALLOC_ZERO 0x0040 /* (acfgp) Allocate a prezeroed page */
465 #define VM_ALLOC_NOOBJ 0x0100 /* (acg) No associated object */
466 #define VM_ALLOC_NOBUSY 0x0200 /* (acgp) Do not excl busy the page */
467 #define VM_ALLOC_IGN_SBUSY 0x1000 /* (gp) Ignore shared busy flag */
468 #define VM_ALLOC_NODUMP 0x2000 /* (ag) don't include in dump */
469 #define VM_ALLOC_SBUSY 0x4000 /* (acgp) Shared busy the page */
470 #define VM_ALLOC_NOWAIT 0x8000 /* (acfgp) Do not sleep */
471 #define VM_ALLOC_COUNT_SHIFT 16
472 #define VM_ALLOC_COUNT(count) ((count) << VM_ALLOC_COUNT_SHIFT)
473
474 #ifdef M_NOWAIT
475 static inline int
476 malloc2vm_flags(int malloc_flags)
477 {
478 int pflags;
479
480 KASSERT((malloc_flags & M_USE_RESERVE) == 0 ||
481 (malloc_flags & M_NOWAIT) != 0,
482 ("M_USE_RESERVE requires M_NOWAIT"));
483 pflags = (malloc_flags & M_USE_RESERVE) != 0 ? VM_ALLOC_INTERRUPT :
484 VM_ALLOC_SYSTEM;
485 if ((malloc_flags & M_ZERO) != 0)
486 pflags |= VM_ALLOC_ZERO;
487 if ((malloc_flags & M_NODUMP) != 0)
488 pflags |= VM_ALLOC_NODUMP;
489 if ((malloc_flags & M_NOWAIT))
490 pflags |= VM_ALLOC_NOWAIT;
491 if ((malloc_flags & M_WAITOK))
492 pflags |= VM_ALLOC_WAITOK;
493 return (pflags);
494 }
495 #endif
496
497 /*
498 * Predicates supported by vm_page_ps_test():
499 *
500 * PS_ALL_DIRTY is true only if the entire (super)page is dirty.
501 * However, it can be spuriously false when the (super)page has become
502 * dirty in the pmap but that information has not been propagated to the
503 * machine-independent layer.
504 */
505 #define PS_ALL_DIRTY 0x1
506 #define PS_ALL_VALID 0x2
507 #define PS_NONE_BUSY 0x4
508
509 void vm_page_busy_downgrade(vm_page_t m);
510 void vm_page_busy_sleep(vm_page_t m, const char *msg, bool nonshared);
511 void vm_page_flash(vm_page_t m);
512 void vm_page_hold(vm_page_t mem);
513 void vm_page_unhold(vm_page_t mem);
514 void vm_page_free(vm_page_t m);
515 void vm_page_free_zero(vm_page_t m);
516
517 void vm_page_activate (vm_page_t);
518 void vm_page_advise(vm_page_t m, int advice);
519 vm_page_t vm_page_alloc(vm_object_t, vm_pindex_t, int);
520 vm_page_t vm_page_alloc_domain(vm_object_t, vm_pindex_t, int, int);
521 vm_page_t vm_page_alloc_after(vm_object_t, vm_pindex_t, int, vm_page_t);
522 vm_page_t vm_page_alloc_domain_after(vm_object_t, vm_pindex_t, int, int,
523 vm_page_t);
524 vm_page_t vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
525 u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
526 vm_paddr_t boundary, vm_memattr_t memattr);
527 vm_page_t vm_page_alloc_contig_domain(vm_object_t object,
528 vm_pindex_t pindex, int domain, int req, u_long npages, vm_paddr_t low,
529 vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
530 vm_memattr_t memattr);
531 vm_page_t vm_page_alloc_freelist(int, int);
532 vm_page_t vm_page_alloc_freelist_domain(int, int, int);
533 bool vm_page_blacklist_add(vm_paddr_t pa, bool verbose);
534 void vm_page_change_lock(vm_page_t m, struct mtx **mtx);
535 vm_page_t vm_page_grab (vm_object_t, vm_pindex_t, int);
536 int vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags,
537 vm_page_t *ma, int count);
538 void vm_page_deactivate(vm_page_t);
539 void vm_page_deactivate_noreuse(vm_page_t);
540 void vm_page_dequeue(vm_page_t m);
541 void vm_page_dequeue_deferred(vm_page_t m);
542 void vm_page_drain_pqbatch(void);
543 vm_page_t vm_page_find_least(vm_object_t, vm_pindex_t);
544 bool vm_page_free_prep(vm_page_t m);
545 vm_page_t vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr);
546 void vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr);
547 int vm_page_insert (vm_page_t, vm_object_t, vm_pindex_t);
548 void vm_page_launder(vm_page_t m);
549 vm_page_t vm_page_lookup (vm_object_t, vm_pindex_t);
550 vm_page_t vm_page_next(vm_page_t m);
551 int vm_page_pa_tryrelock(pmap_t, vm_paddr_t, vm_paddr_t *);
552 struct vm_pagequeue *vm_page_pagequeue(vm_page_t m);
553 vm_page_t vm_page_prev(vm_page_t m);
554 bool vm_page_ps_test(vm_page_t m, int flags, vm_page_t skip_m);
555 void vm_page_putfake(vm_page_t m);
556 void vm_page_readahead_finish(vm_page_t m);
557 bool vm_page_reclaim_contig(int req, u_long npages, vm_paddr_t low,
558 vm_paddr_t high, u_long alignment, vm_paddr_t boundary);
559 bool vm_page_reclaim_contig_domain(int domain, int req, u_long npages,
560 vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary);
561 void vm_page_reference(vm_page_t m);
562 void vm_page_remove (vm_page_t);
563 int vm_page_rename (vm_page_t, vm_object_t, vm_pindex_t);
564 vm_page_t vm_page_replace(vm_page_t mnew, vm_object_t object,
565 vm_pindex_t pindex);
566 void vm_page_requeue(vm_page_t m);
567 int vm_page_sbusied(vm_page_t m);
568 vm_page_t vm_page_scan_contig(u_long npages, vm_page_t m_start,
569 vm_page_t m_end, u_long alignment, vm_paddr_t boundary, int options);
570 void vm_page_set_valid_range(vm_page_t m, int base, int size);
571 int vm_page_sleep_if_busy(vm_page_t m, const char *msg);
572 vm_offset_t vm_page_startup(vm_offset_t vaddr);
573 void vm_page_sunbusy(vm_page_t m);
574 bool vm_page_try_to_free(vm_page_t m);
575 int vm_page_trysbusy(vm_page_t m);
576 void vm_page_unhold_pages(vm_page_t *ma, int count);
577 void vm_page_unswappable(vm_page_t m);
578 bool vm_page_unwire(vm_page_t m, uint8_t queue);
579 bool vm_page_unwire_noq(vm_page_t m);
580 void vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr);
581 void vm_page_wire (vm_page_t);
582 void vm_page_xunbusy_hard(vm_page_t m);
583 void vm_page_xunbusy_maybelocked(vm_page_t m);
584 void vm_page_set_validclean (vm_page_t, int, int);
585 void vm_page_clear_dirty (vm_page_t, int, int);
586 void vm_page_set_invalid (vm_page_t, int, int);
587 int vm_page_is_valid (vm_page_t, int, int);
588 void vm_page_test_dirty (vm_page_t);
589 vm_page_bits_t vm_page_bits(int base, int size);
590 void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid);
591 void vm_page_free_toq(vm_page_t m);
592 void vm_page_free_pages_toq(struct spglist *free, bool update_wire_count);
593
594 void vm_page_dirty_KBI(vm_page_t m);
595 void vm_page_lock_KBI(vm_page_t m, const char *file, int line);
596 void vm_page_unlock_KBI(vm_page_t m, const char *file, int line);
597 int vm_page_trylock_KBI(vm_page_t m, const char *file, int line);
598 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
599 void vm_page_assert_locked_KBI(vm_page_t m, const char *file, int line);
600 void vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line);
601 #endif
602
603 #define vm_page_assert_sbusied(m) \
604 KASSERT(vm_page_sbusied(m), \
605 ("vm_page_assert_sbusied: page %p not shared busy @ %s:%d", \
606 (m), __FILE__, __LINE__))
607
608 #define vm_page_assert_unbusied(m) \
609 KASSERT(!vm_page_busied(m), \
610 ("vm_page_assert_unbusied: page %p busy @ %s:%d", \
611 (m), __FILE__, __LINE__))
612
613 #define vm_page_assert_xbusied(m) \
614 KASSERT(vm_page_xbusied(m), \
615 ("vm_page_assert_xbusied: page %p not exclusive busy @ %s:%d", \
616 (m), __FILE__, __LINE__))
617
618 #define vm_page_busied(m) \
619 ((m)->busy_lock != VPB_UNBUSIED)
620
621 #define vm_page_sbusy(m) do { \
622 if (!vm_page_trysbusy(m)) \
623 panic("%s: page %p failed shared busying", __func__, \
624 (m)); \
625 } while (0)
626
627 #define vm_page_tryxbusy(m) \
628 (atomic_cmpset_acq_int(&(m)->busy_lock, VPB_UNBUSIED, \
629 VPB_SINGLE_EXCLUSIVER))
630
631 #define vm_page_xbusied(m) \
632 (((m)->busy_lock & VPB_SINGLE_EXCLUSIVER) != 0)
633
634 #define vm_page_xbusy(m) do { \
635 if (!vm_page_tryxbusy(m)) \
636 panic("%s: page %p failed exclusive busying", __func__, \
637 (m)); \
638 } while (0)
639
640 /* Note: page m's lock must not be owned by the caller. */
641 #define vm_page_xunbusy(m) do { \
642 if (!atomic_cmpset_rel_int(&(m)->busy_lock, \
643 VPB_SINGLE_EXCLUSIVER, VPB_UNBUSIED)) \
644 vm_page_xunbusy_hard(m); \
645 } while (0)
646
647 #ifdef INVARIANTS
648 void vm_page_object_lock_assert(vm_page_t m);
649 #define VM_PAGE_OBJECT_LOCK_ASSERT(m) vm_page_object_lock_assert(m)
650 void vm_page_assert_pga_writeable(vm_page_t m, uint8_t bits);
651 #define VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits) \
652 vm_page_assert_pga_writeable(m, bits)
653 #else
654 #define VM_PAGE_OBJECT_LOCK_ASSERT(m) (void)0
655 #define VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits) (void)0
656 #endif
657
658 /*
659 * We want to use atomic updates for the aflags field, which is 8 bits wide.
660 * However, not all architectures support atomic operations on 8-bit
661 * destinations. In order that we can easily use a 32-bit operation, we
662 * require that the aflags field be 32-bit aligned.
663 */
664 CTASSERT(offsetof(struct vm_page, aflags) % sizeof(uint32_t) == 0);
665
666 /*
667 * Clear the given bits in the specified page.
668 */
669 static inline void
670 vm_page_aflag_clear(vm_page_t m, uint8_t bits)
671 {
672 uint32_t *addr, val;
673
674 /*
675 * The PGA_REFERENCED flag can only be cleared if the page is locked.
676 */
677 if ((bits & PGA_REFERENCED) != 0)
678 vm_page_assert_locked(m);
679
680 /*
681 * Access the whole 32-bit word containing the aflags field with an
682 * atomic update. Parallel non-atomic updates to the other fields
683 * within this word are handled properly by the atomic update.
684 */
685 addr = (void *)&m->aflags;
686 KASSERT(((uintptr_t)addr & (sizeof(uint32_t) - 1)) == 0,
687 ("vm_page_aflag_clear: aflags is misaligned"));
688 val = bits;
689 #if BYTE_ORDER == BIG_ENDIAN
690 val <<= 24;
691 #endif
692 atomic_clear_32(addr, val);
693 }
694
695 /*
696 * Set the given bits in the specified page.
697 */
698 static inline void
699 vm_page_aflag_set(vm_page_t m, uint8_t bits)
700 {
701 uint32_t *addr, val;
702
703 VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits);
704
705 /*
706 * Access the whole 32-bit word containing the aflags field with an
707 * atomic update. Parallel non-atomic updates to the other fields
708 * within this word are handled properly by the atomic update.
709 */
710 addr = (void *)&m->aflags;
711 KASSERT(((uintptr_t)addr & (sizeof(uint32_t) - 1)) == 0,
712 ("vm_page_aflag_set: aflags is misaligned"));
713 val = bits;
714 #if BYTE_ORDER == BIG_ENDIAN
715 val <<= 24;
716 #endif
717 atomic_set_32(addr, val);
718 }
719
720 /*
721 * vm_page_dirty:
722 *
723 * Set all bits in the page's dirty field.
724 *
725 * The object containing the specified page must be locked if the
726 * call is made from the machine-independent layer.
727 *
728 * See vm_page_clear_dirty_mask().
729 */
730 static __inline void
731 vm_page_dirty(vm_page_t m)
732 {
733
734 /* Use vm_page_dirty_KBI() under INVARIANTS to save memory. */
735 #if (defined(KLD_MODULE) && !defined(KLD_TIED)) || defined(INVARIANTS)
736 vm_page_dirty_KBI(m);
737 #else
738 m->dirty = VM_PAGE_BITS_ALL;
739 #endif
740 }
741
742 /*
743 * vm_page_undirty:
744 *
745 * Set page to not be dirty. Note: does not clear pmap modify bits
746 */
747 static __inline void
748 vm_page_undirty(vm_page_t m)
749 {
750
751 VM_PAGE_OBJECT_LOCK_ASSERT(m);
752 m->dirty = 0;
753 }
754
755 static inline void
756 vm_page_replace_checked(vm_page_t mnew, vm_object_t object, vm_pindex_t pindex,
757 vm_page_t mold)
758 {
759 vm_page_t mret;
760
761 mret = vm_page_replace(mnew, object, pindex);
762 KASSERT(mret == mold,
763 ("invalid page replacement, mold=%p, mret=%p", mold, mret));
764
765 /* Unused if !INVARIANTS. */
766 (void)mold;
767 (void)mret;
768 }
769
770 /*
771 * vm_page_queue:
772 *
773 * Return the index of the queue containing m. This index is guaranteed
774 * not to change while the page lock is held.
775 */
776 static inline uint8_t
777 vm_page_queue(vm_page_t m)
778 {
779
780 vm_page_assert_locked(m);
781
782 if ((m->aflags & PGA_DEQUEUE) != 0)
783 return (PQ_NONE);
784 atomic_thread_fence_acq();
785 return (m->queue);
786 }
787
788 static inline bool
789 vm_page_active(vm_page_t m)
790 {
791
792 return (vm_page_queue(m) == PQ_ACTIVE);
793 }
794
795 static inline bool
796 vm_page_inactive(vm_page_t m)
797 {
798
799 return (vm_page_queue(m) == PQ_INACTIVE);
800 }
801
802 static inline bool
803 vm_page_in_laundry(vm_page_t m)
804 {
805 uint8_t queue;
806
807 queue = vm_page_queue(m);
808 return (queue == PQ_LAUNDRY || queue == PQ_UNSWAPPABLE);
809 }
810
811 /*
812 * vm_page_held:
813 *
814 * Return true if a reference prevents the page from being reclaimable.
815 */
816 static inline bool
817 vm_page_held(vm_page_t m)
818 {
819
820 return (m->hold_count > 0 || m->wire_count > 0);
821 }
822
823 #endif /* _KERNEL */
824 #endif /* !_VM_PAGE_ */
Cache object: e3d77634b194faf51efd2709689cf94c
|