FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_object.h
1 /*-
2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
3 *
4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * The Mach Operating System project at Carnegie-Mellon University.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * from: @(#)vm_object.h 8.3 (Berkeley) 1/12/94
35 *
36 *
37 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38 * All rights reserved.
39 *
40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
41 *
42 * Permission to use, copy, modify and distribute this software and
43 * its documentation is hereby granted, provided that both the copyright
44 * notice and this permission notice appear in all copies of the
45 * software, derivative works or modified versions, and any portions
46 * thereof, and that both notices appear in supporting documentation.
47 *
48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51 *
52 * Carnegie Mellon requests users of this software to return to
53 *
54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
55 * School of Computer Science
56 * Carnegie Mellon University
57 * Pittsburgh PA 15213-3890
58 *
59 * any improvements or extensions that they make and grant Carnegie the
60 * rights to redistribute these changes.
61 *
62 * $FreeBSD: releng/12.0/sys/vm/vm_object.h 328954 2018-02-06 22:10:07Z jeff $
63 */
64
65 /*
66 * Virtual memory object module definitions.
67 */
68
69 #ifndef _VM_OBJECT_
70 #define _VM_OBJECT_
71
72 #include <sys/queue.h>
73 #include <sys/_lock.h>
74 #include <sys/_mutex.h>
75 #include <sys/_pctrie.h>
76 #include <sys/_rwlock.h>
77 #include <sys/_domainset.h>
78
79 #include <vm/_vm_radix.h>
80
81 /*
82 * Types defined:
83 *
84 * vm_object_t Virtual memory object.
85 *
86 * List of locks
87 * (c) const until freed
88 * (o) per-object lock
89 * (f) free pages queue mutex
90 *
91 */
92
93 #ifndef VM_PAGE_HAVE_PGLIST
94 TAILQ_HEAD(pglist, vm_page);
95 #define VM_PAGE_HAVE_PGLIST
96 #endif
97
98 struct vm_object {
99 struct rwlock lock;
100 TAILQ_ENTRY(vm_object) object_list; /* list of all objects */
101 LIST_HEAD(, vm_object) shadow_head; /* objects that this is a shadow for */
102 LIST_ENTRY(vm_object) shadow_list; /* chain of shadow objects */
103 struct pglist memq; /* list of resident pages */
104 struct vm_radix rtree; /* root of the resident page radix trie*/
105 vm_pindex_t size; /* Object size */
106 struct domainset_ref domain; /* NUMA policy. */
107 int generation; /* generation ID */
108 int ref_count; /* How many refs?? */
109 int shadow_count; /* how many objects that this is a shadow for */
110 vm_memattr_t memattr; /* default memory attribute for pages */
111 objtype_t type; /* type of pager */
112 u_short flags; /* see below */
113 u_short pg_color; /* (c) color of first page in obj */
114 u_int paging_in_progress; /* Paging (in or out) so don't collapse or destroy */
115 int resident_page_count; /* number of resident pages */
116 struct vm_object *backing_object; /* object that I'm a shadow of */
117 vm_ooffset_t backing_object_offset;/* Offset in backing object */
118 TAILQ_ENTRY(vm_object) pager_object_list; /* list of all objects of this pager type */
119 LIST_HEAD(, vm_reserv) rvq; /* list of reservations */
120 void *handle;
121 union {
122 /*
123 * VNode pager
124 *
125 * vnp_size - current size of file
126 */
127 struct {
128 off_t vnp_size;
129 vm_ooffset_t writemappings;
130 } vnp;
131
132 /*
133 * Device pager
134 *
135 * devp_pglist - list of allocated pages
136 */
137 struct {
138 TAILQ_HEAD(, vm_page) devp_pglist;
139 struct cdev_pager_ops *ops;
140 struct cdev *dev;
141 } devp;
142
143 /*
144 * SG pager
145 *
146 * sgp_pglist - list of allocated pages
147 */
148 struct {
149 TAILQ_HEAD(, vm_page) sgp_pglist;
150 } sgp;
151
152 /*
153 * Swap pager
154 *
155 * swp_tmpfs - back-pointer to the tmpfs vnode,
156 * if any, which uses the vm object
157 * as backing store. The handle
158 * cannot be reused for linking,
159 * because the vnode can be
160 * reclaimed and recreated, making
161 * the handle changed and hash-chain
162 * invalid.
163 *
164 * swp_blks - pc-trie of the allocated swap blocks.
165 *
166 */
167 struct {
168 void *swp_tmpfs;
169 struct pctrie swp_blks;
170 } swp;
171 } un_pager;
172 struct ucred *cred;
173 vm_ooffset_t charge;
174 void *umtx_data;
175 };
176
177 /*
178 * Flags
179 */
180 #define OBJ_FICTITIOUS 0x0001 /* (c) contains fictitious pages */
181 #define OBJ_UNMANAGED 0x0002 /* (c) contains unmanaged pages */
182 #define OBJ_POPULATE 0x0004 /* pager implements populate() */
183 #define OBJ_DEAD 0x0008 /* dead objects (during rundown) */
184 #define OBJ_NOSPLIT 0x0010 /* dont split this object */
185 #define OBJ_UMTXDEAD 0x0020 /* umtx pshared was terminated */
186 #define OBJ_PIPWNT 0x0040 /* paging in progress wanted */
187 #define OBJ_PG_DTOR 0x0080 /* dont reset object, leave that for dtor */
188 #define OBJ_MIGHTBEDIRTY 0x0100 /* object might be dirty, only for vnode */
189 #define OBJ_TMPFS_NODE 0x0200 /* object belongs to tmpfs VREG node */
190 #define OBJ_TMPFS_DIRTY 0x0400 /* dirty tmpfs obj */
191 #define OBJ_COLORED 0x1000 /* pg_color is defined */
192 #define OBJ_ONEMAPPING 0x2000 /* One USE (a single, non-forked) mapping flag */
193 #define OBJ_DISCONNECTWNT 0x4000 /* disconnect from vnode wanted */
194 #define OBJ_TMPFS 0x8000 /* has tmpfs vnode allocated */
195
196 /*
197 * Helpers to perform conversion between vm_object page indexes and offsets.
198 * IDX_TO_OFF() converts an index into an offset.
199 * OFF_TO_IDX() converts an offset into an index. Since offsets are signed
200 * by default, the sign propagation in OFF_TO_IDX(), when applied to
201 * negative offsets, is intentional and returns a vm_object page index
202 * that cannot be created by a userspace mapping.
203 * UOFF_TO_IDX() treats the offset as an unsigned value and converts it
204 * into an index accordingly. Use it only when the full range of offset
205 * values are allowed. Currently, this only applies to device mappings.
206 * OBJ_MAX_SIZE specifies the maximum page index corresponding to the
207 * maximum unsigned offset.
208 */
209 #define IDX_TO_OFF(idx) (((vm_ooffset_t)(idx)) << PAGE_SHIFT)
210 #define OFF_TO_IDX(off) ((vm_pindex_t)(((vm_ooffset_t)(off)) >> PAGE_SHIFT))
211 #define UOFF_TO_IDX(off) (((vm_pindex_t)(off)) >> PAGE_SHIFT)
212 #define OBJ_MAX_SIZE (UOFF_TO_IDX(UINT64_MAX) + 1)
213
214 #ifdef _KERNEL
215
216 #define OBJPC_SYNC 0x1 /* sync I/O */
217 #define OBJPC_INVAL 0x2 /* invalidate */
218 #define OBJPC_NOSYNC 0x4 /* skip if VPO_NOSYNC */
219
220 /*
221 * The following options are supported by vm_object_page_remove().
222 */
223 #define OBJPR_CLEANONLY 0x1 /* Don't remove dirty pages. */
224 #define OBJPR_NOTMAPPED 0x2 /* Don't unmap pages. */
225
226 TAILQ_HEAD(object_q, vm_object);
227
228 extern struct object_q vm_object_list; /* list of allocated objects */
229 extern struct mtx vm_object_list_mtx; /* lock for object list and count */
230
231 extern struct vm_object kernel_object_store;
232
233 /* kernel and kmem are aliased for backwards KPI compat. */
234 #define kernel_object (&kernel_object_store)
235 #define kmem_object (&kernel_object_store)
236
237 #define VM_OBJECT_ASSERT_LOCKED(object) \
238 rw_assert(&(object)->lock, RA_LOCKED)
239 #define VM_OBJECT_ASSERT_RLOCKED(object) \
240 rw_assert(&(object)->lock, RA_RLOCKED)
241 #define VM_OBJECT_ASSERT_WLOCKED(object) \
242 rw_assert(&(object)->lock, RA_WLOCKED)
243 #define VM_OBJECT_ASSERT_UNLOCKED(object) \
244 rw_assert(&(object)->lock, RA_UNLOCKED)
245 #define VM_OBJECT_LOCK_DOWNGRADE(object) \
246 rw_downgrade(&(object)->lock)
247 #define VM_OBJECT_RLOCK(object) \
248 rw_rlock(&(object)->lock)
249 #define VM_OBJECT_RUNLOCK(object) \
250 rw_runlock(&(object)->lock)
251 #define VM_OBJECT_SLEEP(object, wchan, pri, wmesg, timo) \
252 rw_sleep((wchan), &(object)->lock, (pri), (wmesg), (timo))
253 #define VM_OBJECT_TRYRLOCK(object) \
254 rw_try_rlock(&(object)->lock)
255 #define VM_OBJECT_TRYWLOCK(object) \
256 rw_try_wlock(&(object)->lock)
257 #define VM_OBJECT_TRYUPGRADE(object) \
258 rw_try_upgrade(&(object)->lock)
259 #define VM_OBJECT_WLOCK(object) \
260 rw_wlock(&(object)->lock)
261 #define VM_OBJECT_WOWNED(object) \
262 rw_wowned(&(object)->lock)
263 #define VM_OBJECT_WUNLOCK(object) \
264 rw_wunlock(&(object)->lock)
265
266 /*
267 * The object must be locked or thread private.
268 */
269 static __inline void
270 vm_object_set_flag(vm_object_t object, u_short bits)
271 {
272
273 object->flags |= bits;
274 }
275
276 /*
277 * Conditionally set the object's color, which (1) enables the allocation
278 * of physical memory reservations for anonymous objects and larger-than-
279 * superpage-sized named objects and (2) determines the first page offset
280 * within the object at which a reservation may be allocated. In other
281 * words, the color determines the alignment of the object with respect
282 * to the largest superpage boundary. When mapping named objects, like
283 * files or POSIX shared memory objects, the color should be set to zero
284 * before a virtual address is selected for the mapping. In contrast,
285 * for anonymous objects, the color may be set after the virtual address
286 * is selected.
287 *
288 * The object must be locked.
289 */
290 static __inline void
291 vm_object_color(vm_object_t object, u_short color)
292 {
293
294 if ((object->flags & OBJ_COLORED) == 0) {
295 object->pg_color = color;
296 object->flags |= OBJ_COLORED;
297 }
298 }
299
300 static __inline bool
301 vm_object_reserv(vm_object_t object)
302 {
303
304 if (object != NULL &&
305 (object->flags & (OBJ_COLORED | OBJ_FICTITIOUS)) == OBJ_COLORED) {
306 return (true);
307 }
308 return (false);
309 }
310
311 void vm_object_clear_flag(vm_object_t object, u_short bits);
312 void vm_object_pip_add(vm_object_t object, short i);
313 void vm_object_pip_subtract(vm_object_t object, short i);
314 void vm_object_pip_wakeup(vm_object_t object);
315 void vm_object_pip_wakeupn(vm_object_t object, short i);
316 void vm_object_pip_wait(vm_object_t object, char *waitid);
317
318 void umtx_shm_object_init(vm_object_t object);
319 void umtx_shm_object_terminated(vm_object_t object);
320 extern int umtx_shm_vnobj_persistent;
321
322 vm_object_t vm_object_allocate (objtype_t, vm_pindex_t);
323 boolean_t vm_object_coalesce(vm_object_t, vm_ooffset_t, vm_size_t, vm_size_t,
324 boolean_t);
325 void vm_object_collapse (vm_object_t);
326 void vm_object_deallocate (vm_object_t);
327 void vm_object_destroy (vm_object_t);
328 void vm_object_terminate (vm_object_t);
329 void vm_object_set_writeable_dirty (vm_object_t);
330 void vm_object_init (void);
331 void vm_object_madvise(vm_object_t, vm_pindex_t, vm_pindex_t, int);
332 boolean_t vm_object_page_clean(vm_object_t object, vm_ooffset_t start,
333 vm_ooffset_t end, int flags);
334 void vm_object_page_noreuse(vm_object_t object, vm_pindex_t start,
335 vm_pindex_t end);
336 void vm_object_page_remove(vm_object_t object, vm_pindex_t start,
337 vm_pindex_t end, int options);
338 boolean_t vm_object_populate(vm_object_t, vm_pindex_t, vm_pindex_t);
339 void vm_object_print(long addr, boolean_t have_addr, long count, char *modif);
340 void vm_object_reference (vm_object_t);
341 void vm_object_reference_locked(vm_object_t);
342 int vm_object_set_memattr(vm_object_t object, vm_memattr_t memattr);
343 void vm_object_shadow (vm_object_t *, vm_ooffset_t *, vm_size_t);
344 void vm_object_split(vm_map_entry_t);
345 boolean_t vm_object_sync(vm_object_t, vm_ooffset_t, vm_size_t, boolean_t,
346 boolean_t);
347 void vm_object_unwire(vm_object_t object, vm_ooffset_t offset,
348 vm_size_t length, uint8_t queue);
349 struct vnode *vm_object_vnode(vm_object_t object);
350 #endif /* _KERNEL */
351
352 #endif /* _VM_OBJECT_ */
Cache object: 88c106c08e11b7a62abfea1f0081d381
|