FreeBSD/Linux Kernel Cross Reference
sys/uvm/uvm_map.h
1 /* $NetBSD: uvm_map.h,v 1.39 2004/02/10 01:30:49 matt Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California.
6 *
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by Charles D. Cranor,
23 * Washington University, the University of California, Berkeley and
24 * its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * @(#)vm_map.h 8.3 (Berkeley) 3/15/94
42 * from: Id: uvm_map.h,v 1.1.2.3 1998/02/07 01:16:55 chs Exp
43 *
44 *
45 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46 * All rights reserved.
47 *
48 * Permission to use, copy, modify and distribute this software and
49 * its documentation is hereby granted, provided that both the copyright
50 * notice and this permission notice appear in all copies of the
51 * software, derivative works or modified versions, and any portions
52 * thereof, and that both notices appear in supporting documentation.
53 *
54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57 *
58 * Carnegie Mellon requests users of this software to return to
59 *
60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
61 * School of Computer Science
62 * Carnegie Mellon University
63 * Pittsburgh PA 15213-3890
64 *
65 * any improvements or extensions that they make and grant Carnegie the
66 * rights to redistribute these changes.
67 */
68
69 #ifndef _UVM_UVM_MAP_H_
70 #define _UVM_UVM_MAP_H_
71
72 /*
73 * uvm_map.h
74 */
75
76 #ifdef _KERNEL
77
78 /*
79 * macros
80 */
81
82 /*
83 * UVM_MAP_CLIP_START: ensure that the entry begins at or after
84 * the starting address, if it doesn't we split the entry.
85 *
86 * => map must be locked by caller
87 */
88
89 #define UVM_MAP_CLIP_START(MAP,ENTRY,VA) { \
90 if ((VA) > (ENTRY)->start) uvm_map_clip_start(MAP,ENTRY,VA); }
91
92 /*
93 * UVM_MAP_CLIP_END: ensure that the entry ends at or before
94 * the ending address, if it does't we split the entry.
95 *
96 * => map must be locked by caller
97 */
98
99 #define UVM_MAP_CLIP_END(MAP,ENTRY,VA) { \
100 if ((VA) < (ENTRY)->end) uvm_map_clip_end(MAP,ENTRY,VA); }
101
102 /*
103 * extract flags
104 */
105 #define UVM_EXTRACT_REMOVE 0x1 /* remove mapping from old map */
106 #define UVM_EXTRACT_CONTIG 0x2 /* try to keep it contig */
107 #define UVM_EXTRACT_QREF 0x4 /* use quick refs */
108 #define UVM_EXTRACT_FIXPROT 0x8 /* set prot to maxprot as we go */
109
110 #endif /* _KERNEL */
111
112 #include <sys/tree.h>
113
114 #include <uvm/uvm_anon.h>
115
116 /*
117 * Address map entries consist of start and end addresses,
118 * a VM object (or sharing map) and offset into that object,
119 * and user-exported inheritance and protection information.
120 * Also included is control information for virtual copy operations.
121 */
122 struct vm_map_entry {
123 RB_ENTRY(vm_map_entry) rb_entry; /* tree information */
124 vaddr_t ownspace; /* free space after */
125 vaddr_t space; /* space in subtree */
126 struct vm_map_entry *prev; /* previous entry */
127 struct vm_map_entry *next; /* next entry */
128 vaddr_t start; /* start address */
129 vaddr_t end; /* end address */
130 union {
131 struct uvm_object *uvm_obj; /* uvm object */
132 struct vm_map *sub_map; /* belongs to another map */
133 } object; /* object I point to */
134 voff_t offset; /* offset into object */
135 int etype; /* entry type */
136 vm_prot_t protection; /* protection code */
137 vm_prot_t max_protection; /* maximum protection */
138 vm_inherit_t inheritance; /* inheritance */
139 int wired_count; /* can be paged if == 0 */
140 struct vm_aref aref; /* anonymous overlay */
141 int advice; /* madvise advice */
142 #define uvm_map_entry_stop_copy flags
143 u_int8_t flags; /* flags */
144
145 #define UVM_MAP_STATIC 0x01 /* static map entry */
146 #define UVM_MAP_KMEM 0x02 /* from kmem entry pool */
147 #define UVM_MAP_NOMERGE 0x10 /* this entry is not mergable */
148
149 };
150
151 #define VM_MAPENT_ISWIRED(entry) ((entry)->wired_count != 0)
152
153 /*
154 * Maps are doubly-linked lists of map entries, kept sorted
155 * by address. A single hint is provided to start
156 * searches again from the last successful search,
157 * insertion, or removal.
158 *
159 * LOCKING PROTOCOL NOTES:
160 * -----------------------
161 *
162 * VM map locking is a little complicated. There are both shared
163 * and exclusive locks on maps. However, it is sometimes required
164 * to downgrade an exclusive lock to a shared lock, and upgrade to
165 * an exclusive lock again (to perform error recovery). However,
166 * another thread *must not* queue itself to receive an exclusive
167 * lock while before we upgrade back to exclusive, otherwise the
168 * error recovery becomes extremely difficult, if not impossible.
169 *
170 * In order to prevent this scenario, we introduce the notion of
171 * a `busy' map. A `busy' map is read-locked, but other threads
172 * attempting to write-lock wait for this flag to clear before
173 * entering the lock manager. A map may only be marked busy
174 * when the map is write-locked (and then the map must be downgraded
175 * to read-locked), and may only be marked unbusy by the thread
176 * which marked it busy (holding *either* a read-lock or a
177 * write-lock, the latter being gained by an upgrade).
178 *
179 * Access to the map `flags' member is controlled by the `flags_lock'
180 * simple lock. Note that some flags are static (set once at map
181 * creation time, and never changed), and thus require no locking
182 * to check those flags. All flags which are r/w must be set or
183 * cleared while the `flags_lock' is asserted. Additional locking
184 * requirements are:
185 *
186 * VM_MAP_PAGEABLE r/o static flag; no locking required
187 *
188 * VM_MAP_INTRSAFE r/o static flag; no locking required
189 *
190 * VM_MAP_WIREFUTURE r/w; may only be set or cleared when
191 * map is write-locked. may be tested
192 * without asserting `flags_lock'.
193 *
194 * VM_MAP_BUSY r/w; may only be set when map is
195 * write-locked, may only be cleared by
196 * thread which set it, map read-locked
197 * or write-locked. must be tested
198 * while `flags_lock' is asserted.
199 *
200 * VM_MAP_WANTLOCK r/w; may only be set when the map
201 * is busy, and thread is attempting
202 * to write-lock. must be tested
203 * while `flags_lock' is asserted.
204 *
205 * VM_MAP_DYING r/o; set when a vmspace is being
206 * destroyed to indicate that updates
207 * to the pmap can be skipped.
208 *
209 * VM_MAP_TOPDOWN r/o; set when the vmspace is
210 * created if the unspecified map
211 * allocations are to be arranged in
212 * a "top down" manner.
213 */
214 struct vm_map {
215 struct pmap * pmap; /* Physical map */
216 struct lock lock; /* Lock for map data */
217 RB_HEAD(uvm_tree, vm_map_entry) rbhead; /* Tree for entries */
218 struct vm_map_entry header; /* List of entries */
219 int nentries; /* Number of entries */
220 vsize_t size; /* virtual size */
221 int ref_count; /* Reference count */
222 struct simplelock ref_lock; /* Lock for ref_count field */
223 struct vm_map_entry * hint; /* hint for quick lookups */
224 struct simplelock hint_lock; /* lock for hint storage */
225 struct vm_map_entry * first_free; /* First free space hint */
226 int flags; /* flags */
227 struct simplelock flags_lock; /* Lock for flags field */
228 unsigned int timestamp; /* Version number */
229 #define min_offset header.end
230 #define max_offset header.start
231 };
232
233 /* vm_map flags */
234 #define VM_MAP_PAGEABLE 0x01 /* ro: entries are pageable */
235 #define VM_MAP_INTRSAFE 0x02 /* ro: interrupt safe map */
236 #define VM_MAP_WIREFUTURE 0x04 /* rw: wire future mappings */
237 #define VM_MAP_BUSY 0x08 /* rw: map is busy */
238 #define VM_MAP_WANTLOCK 0x10 /* rw: want to write-lock */
239 #define VM_MAP_DYING 0x20 /* rw: map is being destroyed */
240 #define VM_MAP_TOPDOWN 0x40 /* ro: arrange map top-down */
241
242 /* XXX: number of kernel maps and entries to statically allocate */
243
244 #if !defined(MAX_KMAPENT)
245 #if (50 + (2 * NPROC) > 1000)
246 #define MAX_KMAPENT (50 + (2 * NPROC))
247 #else
248 #define MAX_KMAPENT 1000 /* XXXCDC: no crash */
249 #endif
250 #endif /* !defined MAX_KMAPENT */
251
252 #ifdef _KERNEL
253 #define vm_map_modflags(map, set, clear) \
254 do { \
255 simple_lock(&(map)->flags_lock); \
256 (map)->flags = ((map)->flags | (set)) & ~(clear); \
257 simple_unlock(&(map)->flags_lock); \
258 } while (/*CONSTCOND*/ 0)
259 #endif /* _KERNEL */
260
261 /*
262 * handle inline options
263 */
264
265 #ifdef UVM_MAP_INLINE
266 #define MAP_INLINE static __inline
267 #else
268 #define MAP_INLINE /* nothing */
269 #endif /* UVM_MAP_INLINE */
270
271 /*
272 * globals:
273 */
274
275 #ifdef _KERNEL
276
277 #ifdef PMAP_GROWKERNEL
278 extern vaddr_t uvm_maxkaddr;
279 #endif
280
281 /*
282 * protos: the following prototypes define the interface to vm_map
283 */
284
285 MAP_INLINE
286 void uvm_map_deallocate(struct vm_map *);
287
288 int uvm_map_clean(struct vm_map *, vaddr_t, vaddr_t, int);
289 void uvm_map_clip_start(struct vm_map *, struct vm_map_entry *,
290 vaddr_t);
291 void uvm_map_clip_end(struct vm_map *, struct vm_map_entry *,
292 vaddr_t);
293 MAP_INLINE
294 struct vm_map *uvm_map_create(pmap_t, vaddr_t, vaddr_t, int);
295 int uvm_map_extract(struct vm_map *, vaddr_t, vsize_t,
296 struct vm_map *, vaddr_t *, int);
297 struct vm_map_entry *
298 uvm_map_findspace(struct vm_map *, vaddr_t, vsize_t,
299 vaddr_t *, struct uvm_object *, voff_t, vsize_t, int);
300 int uvm_map_inherit(struct vm_map *, vaddr_t, vaddr_t,
301 vm_inherit_t);
302 int uvm_map_advice(struct vm_map *, vaddr_t, vaddr_t, int);
303 void uvm_map_init(void);
304 boolean_t uvm_map_lookup_entry(struct vm_map *, vaddr_t,
305 struct vm_map_entry **);
306 MAP_INLINE
307 void uvm_map_reference(struct vm_map *);
308 int uvm_map_replace(struct vm_map *, vaddr_t, vaddr_t,
309 struct vm_map_entry *, int);
310 int uvm_map_reserve(struct vm_map *, vsize_t, vaddr_t, vsize_t,
311 vaddr_t *);
312 void uvm_map_setup(struct vm_map *, vaddr_t, vaddr_t, int);
313 int uvm_map_submap(struct vm_map *, vaddr_t, vaddr_t,
314 struct vm_map *);
315 MAP_INLINE
316 void uvm_unmap(struct vm_map *, vaddr_t, vaddr_t);
317 void uvm_unmap_detach(struct vm_map_entry *,int);
318 void uvm_unmap_remove(struct vm_map *, vaddr_t, vaddr_t,
319 struct vm_map_entry **);
320
321 #endif /* _KERNEL */
322
323 /*
324 * VM map locking operations:
325 *
326 * These operations perform locking on the data portion of the
327 * map.
328 *
329 * vm_map_lock_try: try to lock a map, failing if it is already locked.
330 *
331 * vm_map_lock: acquire an exclusive (write) lock on a map.
332 *
333 * vm_map_lock_read: acquire a shared (read) lock on a map.
334 *
335 * vm_map_unlock: release an exclusive lock on a map.
336 *
337 * vm_map_unlock_read: release a shared lock on a map.
338 *
339 * vm_map_downgrade: downgrade an exclusive lock to a shared lock.
340 *
341 * vm_map_upgrade: upgrade a shared lock to an exclusive lock.
342 *
343 * vm_map_busy: mark a map as busy.
344 *
345 * vm_map_unbusy: clear busy status on a map.
346 *
347 * Note that "intrsafe" maps use only exclusive, spin locks. We simply
348 * use the sleep lock's interlock for this.
349 */
350
351 #ifdef _KERNEL
352 /* XXX: clean up later */
353 #include <sys/time.h>
354 #include <sys/proc.h> /* for tsleep(), wakeup() */
355 #include <sys/systm.h> /* for panic() */
356
357 static __inline boolean_t vm_map_lock_try(struct vm_map *);
358 static __inline void vm_map_lock(struct vm_map *);
359 extern const char vmmapbsy[];
360
361 static __inline boolean_t
362 vm_map_lock_try(struct vm_map *map)
363 {
364 boolean_t rv;
365
366 if (map->flags & VM_MAP_INTRSAFE)
367 rv = simple_lock_try(&map->lock.lk_interlock);
368 else {
369 simple_lock(&map->flags_lock);
370 if (map->flags & VM_MAP_BUSY) {
371 simple_unlock(&map->flags_lock);
372 return (FALSE);
373 }
374 rv = (lockmgr(&map->lock, LK_EXCLUSIVE|LK_NOWAIT|LK_INTERLOCK,
375 &map->flags_lock) == 0);
376 }
377
378 if (rv)
379 map->timestamp++;
380
381 return (rv);
382 }
383
384 static __inline void
385 vm_map_lock(struct vm_map *map)
386 {
387 int error;
388
389 if (map->flags & VM_MAP_INTRSAFE) {
390 simple_lock(&map->lock.lk_interlock);
391 return;
392 }
393
394 try_again:
395 simple_lock(&map->flags_lock);
396 while (map->flags & VM_MAP_BUSY) {
397 map->flags |= VM_MAP_WANTLOCK;
398 ltsleep(&map->flags, PVM, vmmapbsy, 0, &map->flags_lock);
399 }
400
401 error = lockmgr(&map->lock, LK_EXCLUSIVE|LK_SLEEPFAIL|LK_INTERLOCK,
402 &map->flags_lock);
403
404 if (error) {
405 KASSERT(error == ENOLCK);
406 goto try_again;
407 }
408
409 (map)->timestamp++;
410 }
411
412 #ifdef DIAGNOSTIC
413 #define vm_map_lock_read(map) \
414 do { \
415 if ((map)->flags & VM_MAP_INTRSAFE) \
416 panic("vm_map_lock_read: intrsafe Map"); \
417 (void) lockmgr(&(map)->lock, LK_SHARED, NULL); \
418 } while (/*CONSTCOND*/ 0)
419 #else
420 #define vm_map_lock_read(map) \
421 (void) lockmgr(&(map)->lock, LK_SHARED, NULL)
422 #endif
423
424 #define vm_map_unlock(map) \
425 do { \
426 if ((map)->flags & VM_MAP_INTRSAFE) \
427 simple_unlock(&(map)->lock.lk_interlock); \
428 else \
429 (void) lockmgr(&(map)->lock, LK_RELEASE, NULL); \
430 } while (/*CONSTCOND*/ 0)
431
432 #define vm_map_unlock_read(map) \
433 (void) lockmgr(&(map)->lock, LK_RELEASE, NULL)
434
435 #define vm_map_downgrade(map) \
436 (void) lockmgr(&(map)->lock, LK_DOWNGRADE, NULL)
437
438 #ifdef DIAGNOSTIC
439 #define vm_map_upgrade(map) \
440 do { \
441 if (lockmgr(&(map)->lock, LK_UPGRADE, NULL) != 0) \
442 panic("vm_map_upgrade: failed to upgrade lock"); \
443 } while (/*CONSTCOND*/ 0)
444 #else
445 #define vm_map_upgrade(map) \
446 (void) lockmgr(&(map)->lock, LK_UPGRADE, NULL)
447 #endif
448
449 #define vm_map_busy(map) \
450 do { \
451 simple_lock(&(map)->flags_lock); \
452 (map)->flags |= VM_MAP_BUSY; \
453 simple_unlock(&(map)->flags_lock); \
454 } while (/*CONSTCOND*/ 0)
455
456 #define vm_map_unbusy(map) \
457 do { \
458 int oflags; \
459 \
460 simple_lock(&(map)->flags_lock); \
461 oflags = (map)->flags; \
462 (map)->flags &= ~(VM_MAP_BUSY|VM_MAP_WANTLOCK); \
463 simple_unlock(&(map)->flags_lock); \
464 if (oflags & VM_MAP_WANTLOCK) \
465 wakeup(&(map)->flags); \
466 } while (/*CONSTCOND*/ 0)
467 #endif /* _KERNEL */
468
469 /*
470 * Functions implemented as macros
471 */
472 #define vm_map_min(map) ((map)->min_offset)
473 #define vm_map_max(map) ((map)->max_offset)
474 #define vm_map_pmap(map) ((map)->pmap)
475
476 #endif /* _UVM_UVM_MAP_H_ */
Cache object: 89c3d733377cb36b8b41501dc4dda602
|