FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_debug.c
1 /*
2 * Mach Operating System
3 * Copyright (c) 1993,1992,1991,1990 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie Mellon
24 * the rights to redistribute these changes.
25 */
26 /*
27 * HISTORY
28 * $Log: vm_debug.c,v $
29 * Revision 2.14 93/11/17 18:53:14 dbg
30 * Added ANSI function prototypes.
31 * [93/10/27 dbg]
32 *
33 * Revision 2.13 93/01/14 18:00:49 danner
34 * 64bit cleanup.
35 * [92/12/01 af]
36 *
37 * Revision 2.12 92/08/03 18:00:06 jfriedl
38 * removed silly prototypes
39 * [92/08/02 jfriedl]
40 *
41 * Revision 2.11 92/05/21 17:25:28 jfriedl
42 * Added stuff to quiet gcc warnings.
43 * Also removed unused var 'kr' in mach_vm_region_info().
44 * [92/05/16 jfriedl]
45 *
46 * Revision 2.10 92/02/25 14:17:42 elf
47 * Added line to mach_vm_region_info to reflect sharing.
48 * Note: vm_region_info needs to be updated.
49 * [92/02/24 elf]
50 *
51 * Revision 2.9 92/02/23 19:51:55 elf
52 * Eliminate keep_wired argument from vm_map_copyin().
53 * [92/02/21 10:14:21 dlb]
54 *
55 * No more sharing maps.
56 * [92/01/07 11:02:46 dlb]
57 *
58 * Revision 2.7.8.1 92/02/18 19:20:54 jeffreyh
59 * Include thread.h when compiling with VM_OBJECT_DEBUG
60 * [91/09/09 bernadat]
61 *
62 * Revision 2.8 92/01/14 16:47:39 rpd
63 * Changed host_virtual_physical_table_info and
64 * mach_vm_object_pages for CountInOut.
65 * [92/01/14 rpd]
66 *
67 * Removed <mach_debug/page_info.h>.
68 * [92/01/08 rpd]
69 * Fixed mach_vm_region_info for submaps and share maps.
70 * [92/01/02 rpd]
71 *
72 * Replaced the old mach_vm_region_info with new mach_vm_region_info,
73 * mach_vm_object_info, mach_vm_object_pages calls.
74 * Removed vm_mapped_pages_info.
75 * [91/12/30 rpd]
76 *
77 * Revision 2.7 91/08/28 11:17:57 jsb
78 * single_use --> use_old_pageout in vm_object.
79 * [91/08/05 17:44:32 dlb]
80 *
81 * Revision 2.6 91/05/14 17:48:16 mrt
82 * Correcting copyright
83 *
84 * Revision 2.5 91/02/05 17:57:39 mrt
85 * Changed to new Mach copyright
86 * [91/02/01 16:31:12 mrt]
87 *
88 * Revision 2.4 91/01/08 16:44:26 rpd
89 * Added host_virtual_physical_table_info.
90 * [91/01/02 rpd]
91 *
92 * Revision 2.3 90/10/12 13:05:13 rpd
93 * Removed copy_on_write field.
94 * [90/10/08 rpd]
95 *
96 * Revision 2.2 90/06/02 15:10:26 rpd
97 * Moved vm_mapped_pages_info here from vm/vm_map.c.
98 * [90/05/31 rpd]
99 *
100 * Created.
101 * [90/04/20 rpd]
102 *
103 */
104 /*
105 * File: vm/vm_debug.c.
106 * Author: Rich Draves
107 * Date: March, 1990
108 *
109 * Exported kernel calls. See mach_debug/mach_debug.defs.
110 */
111
112 #include <mach/kern_return.h>
113 #include <mach/machine/vm_types.h>
114 #include <mach/memory_object.h>
115 #include <mach/vm_prot.h>
116 #include <mach/vm_inherit.h>
117 #include <mach/vm_param.h>
118 #include <mach_debug/vm_info.h>
119 #include <mach_debug/hash_info.h>
120 #include <kern/host.h>
121 #include <kern/memory.h>
122 #include <kern/task.h>
123 #include <kern/thread.h>
124 #include <vm/vm_map.h>
125 #include <vm/vm_kern.h>
126 #include <vm/vm_object.h>
127 #include <ipc/ipc_port.h>
128
129
130
131 /*
132 * Routine: vm_object_real_name
133 * Purpose:
134 * Convert a VM object to a name port.
135 * Conditions:
136 * Takes object and port locks.
137 * Returns:
138 * A naked send right for the object's name port,
139 * or IP_NULL if the object or its name port is null.
140 */
141
142 ipc_port_t
143 vm_object_real_name(
144 vm_object_t object)
145 {
146 ipc_port_t port = IP_NULL;
147
148 if (object != VM_OBJECT_NULL) {
149 vm_object_lock(object);
150 if (object->pager_name != IP_NULL)
151 port = ipc_port_make_send(object->pager_name);
152 vm_object_unlock(object);
153 }
154
155 return port;
156 }
157
158 /*
159 * Routine: mach_vm_region_info [kernel call]
160 * Purpose:
161 * Retrieve information about a VM region,
162 * including info about the object chain.
163 * Conditions:
164 * Nothing locked.
165 * Returns:
166 * KERN_SUCCESS Retrieve region/object info.
167 * KERN_INVALID_TASK The map is null.
168 * KERN_NO_SPACE There is no entry at/after the address.
169 */
170
171 kern_return_t
172 mach_vm_region_info(
173 vm_map_t map,
174 vm_offset_t address,
175 vm_region_info_t *regionp,
176 ipc_port_t *portp)
177 {
178 vm_map_t cmap; /* current map in traversal */
179 vm_map_t nmap; /* next map to look at */
180 vm_map_entry_t entry; /* entry in current map */
181 vm_object_t object;
182
183 if (map == VM_MAP_NULL)
184 return KERN_INVALID_TASK;
185
186 /* find the entry containing (or following) the address */
187
188 vm_map_lock_read(map);
189 for (cmap = map;;) {
190 /* cmap is read-locked */
191
192 if (!vm_map_lookup_entry(cmap, address, &entry)) {
193 entry = entry->vme_next;
194 if (entry == vm_map_to_entry(cmap)) {
195 if (map == cmap) {
196 vm_map_unlock_read(cmap);
197 return KERN_NO_SPACE;
198 }
199
200 /* back out to top-level & skip this submap */
201
202 address = vm_map_max(cmap);
203 vm_map_unlock_read(cmap);
204 vm_map_lock_read(map);
205 cmap = map;
206 continue;
207 }
208 }
209
210 if (entry->is_sub_map) {
211 /* move down to the sub map */
212
213 nmap = entry->object.sub_map;
214 vm_map_lock_read(nmap);
215 vm_map_unlock_read(cmap);
216 cmap = nmap;
217 continue;
218 } else {
219 break;
220 }
221 /*NOTREACHED*/
222 }
223
224
225 assert(entry->vme_start < entry->vme_end);
226
227 regionp->vri_start = entry->vme_start;
228 regionp->vri_end = entry->vme_end;
229
230 /* attributes from the real entry */
231
232 regionp->vri_protection = entry->protection;
233 regionp->vri_max_protection = entry->max_protection;
234 regionp->vri_inheritance = entry->inheritance;
235 regionp->vri_wired_count = entry->wired_count;
236 regionp->vri_user_wired_count = entry->user_wired_count;
237
238 object = entry->object.vm_object;
239 *portp = vm_object_real_name(object);
240 regionp->vri_object = (vm_offset_t) object;
241 regionp->vri_offset = entry->offset;
242 regionp->vri_needs_copy = entry->needs_copy;
243
244 regionp->vri_sharing = entry->is_shared;
245
246 vm_map_unlock_read(cmap);
247 return KERN_SUCCESS;
248 }
249
250 /*
251 * Routine: mach_vm_object_info [kernel call]
252 * Purpose:
253 * Retrieve information about a VM object.
254 * Conditions:
255 * Nothing locked.
256 * Returns:
257 * KERN_SUCCESS Retrieved object info.
258 * KERN_INVALID_ARGUMENT The object is null.
259 */
260
261 kern_return_t
262 mach_vm_object_info(
263 vm_object_t object,
264 vm_object_info_t *infop,
265 ipc_port_t *shadowp,
266 ipc_port_t *copyp)
267 {
268 vm_object_info_t info;
269 vm_object_info_state_t state;
270 ipc_port_t shadow, copy;
271
272 if (object == VM_OBJECT_NULL)
273 return KERN_INVALID_ARGUMENT;
274
275 /*
276 * Because of lock-ordering/deadlock considerations,
277 * we can't use vm_object_real_name for the copy object.
278 */
279
280 retry:
281 vm_object_lock(object);
282 copy = IP_NULL;
283 if (object->copy != VM_OBJECT_NULL) {
284 if (!vm_object_lock_try(object->copy)) {
285 vm_object_unlock(object);
286 simple_lock_pause(); /* wait a bit */
287 goto retry;
288 }
289
290 if (object->copy->pager_name != IP_NULL)
291 copy = ipc_port_make_send(object->copy->pager_name);
292 vm_object_unlock(object->copy);
293 }
294 shadow = vm_object_real_name(object->shadow);
295
296 info.voi_object = (vm_offset_t) object;
297 info.voi_pagesize = PAGE_SIZE;
298 info.voi_size = object->size;
299 info.voi_ref_count = object->ref_count;
300 info.voi_resident_page_count = object->resident_page_count;
301 info.voi_absent_count = object->absent_count;
302 info.voi_copy = (vm_offset_t) object->copy;
303 info.voi_shadow = (vm_offset_t) object->shadow;
304 info.voi_shadow_offset = object->shadow_offset;
305 info.voi_paging_offset = object->paging_offset;
306 info.voi_copy_strategy = object->copy_strategy;
307 info.voi_last_alloc = object->last_alloc;
308 info.voi_paging_in_progress = object->paging_in_progress;
309
310 state = 0;
311 if (object->pager_created)
312 state |= VOI_STATE_PAGER_CREATED;
313 if (object->pager_initialized)
314 state |= VOI_STATE_PAGER_INITIALIZED;
315 if (object->pager_ready)
316 state |= VOI_STATE_PAGER_READY;
317 if (object->can_persist)
318 state |= VOI_STATE_CAN_PERSIST;
319 if (object->internal)
320 state |= VOI_STATE_INTERNAL;
321 if (object->temporary)
322 state |= VOI_STATE_TEMPORARY;
323 if (object->alive)
324 state |= VOI_STATE_ALIVE;
325 if (object->lock_in_progress)
326 state |= VOI_STATE_LOCK_IN_PROGRESS;
327 if (object->lock_restart)
328 state |= VOI_STATE_LOCK_RESTART;
329 if (object->use_old_pageout)
330 state |= VOI_STATE_USE_OLD_PAGEOUT;
331 info.voi_state = state;
332 vm_object_unlock(object);
333
334 *infop = info;
335 *shadowp = shadow;
336 *copyp = copy;
337 return KERN_SUCCESS;
338 }
339
340 #define VPI_STATE_NODATA (VPI_STATE_BUSY|VPI_STATE_FICTITIOUS| \
341 VPI_STATE_PRIVATE|VPI_STATE_ABSENT)
342
343 /*
344 * Routine: mach_vm_object_pages [kernel call]
345 * Purpose:
346 * Retrieve information about the pages in a VM object.
347 * Conditions:
348 * Nothing locked. Obeys CountInOut protocol.
349 * Returns:
350 * KERN_SUCCESS Retrieved object info.
351 * KERN_INVALID_ARGUMENT The object is null.
352 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
353 */
354
355 kern_return_t
356 mach_vm_object_pages(
357 vm_object_t object,
358 vm_page_info_array_t *pagesp,
359 natural_t *countp)
360 {
361 vm_size_t size;
362 vm_offset_t addr;
363 vm_page_info_t *pages;
364 unsigned int potential, actual, count;
365 vm_page_t p;
366 kern_return_t kr;
367
368 if (object == VM_OBJECT_NULL)
369 return KERN_INVALID_ARGUMENT;
370
371 /* start with in-line memory */
372
373 pages = *pagesp;
374 potential = *countp;
375
376 for (size = 0;;) {
377 vm_object_lock(object);
378 actual = object->resident_page_count;
379 if (actual <= potential)
380 break;
381 vm_object_unlock(object);
382
383 if (pages != *pagesp)
384 kmem_free(ipc_kernel_map, addr, size);
385
386 size = round_page(actual * sizeof *pages);
387 kr = kmem_alloc(ipc_kernel_map, &addr, size);
388 if (kr != KERN_SUCCESS)
389 return kr;
390
391 pages = (vm_page_info_t *) addr;
392 potential = size/sizeof *pages;
393 }
394 /* object is locked, we have enough wired memory */
395
396 count = 0;
397 queue_iterate(&object->memq, p, vm_page_t, listq) {
398 vm_page_info_t *info = &pages[count++];
399 vm_page_info_state_t state = 0;
400
401 info->vpi_offset = p->offset;
402 info->vpi_phys_addr = p->phys_addr;
403 info->vpi_wire_count = p->wire_count;
404 info->vpi_page_lock = p->page_lock;
405 info->vpi_unlock_request = p->unlock_request;
406
407 if (p->busy)
408 state |= VPI_STATE_BUSY;
409 if (p->wanted)
410 state |= VPI_STATE_WANTED;
411 if (p->tabled)
412 state |= VPI_STATE_TABLED;
413 if (p->fictitious)
414 state |= VPI_STATE_FICTITIOUS;
415 if (p->private)
416 state |= VPI_STATE_PRIVATE;
417 if (p->absent)
418 state |= VPI_STATE_ABSENT;
419 if (p->error)
420 state |= VPI_STATE_ERROR;
421 if (p->dirty)
422 state |= VPI_STATE_DIRTY;
423 if (p->precious)
424 state |= VPI_STATE_PRECIOUS;
425 if (p->overwriting)
426 state |= VPI_STATE_OVERWRITING;
427
428 if (((state & (VPI_STATE_NODATA|VPI_STATE_DIRTY)) == 0) &&
429 pmap_is_modified(p->phys_addr)) {
430 state |= VPI_STATE_DIRTY;
431 p->dirty = TRUE;
432 }
433
434 vm_page_lock_queues();
435 if (p->inactive)
436 state |= VPI_STATE_INACTIVE;
437 if (p->active)
438 state |= VPI_STATE_ACTIVE;
439 if (p->laundry)
440 state |= VPI_STATE_LAUNDRY;
441 if (p->free)
442 state |= VPI_STATE_FREE;
443 if (p->reference)
444 state |= VPI_STATE_REFERENCE;
445
446 if (((state & (VPI_STATE_NODATA|VPI_STATE_REFERENCE)) == 0) &&
447 pmap_is_referenced(p->phys_addr)) {
448 state |= VPI_STATE_REFERENCE;
449 p->reference = TRUE;
450 }
451 vm_page_unlock_queues();
452
453 info->vpi_state = state;
454 }
455
456 if (object->resident_page_count != count)
457 panic("mach_vm_object_pages");
458 vm_object_unlock(object);
459
460 if (pages == *pagesp) {
461 /* data fit in-line; nothing to deallocate */
462
463 *countp = actual;
464 } else if (actual == 0) {
465 kmem_free(ipc_kernel_map, addr, size);
466
467 *countp = 0;
468 } else {
469 vm_size_t size_used, rsize_used;
470 vm_map_copy_t copy;
471
472 /* kmem_alloc doesn't zero memory */
473
474 size_used = actual * sizeof *pages;
475 rsize_used = round_page(size_used);
476
477 if (rsize_used != size)
478 kmem_free(ipc_kernel_map,
479 addr + rsize_used, size - rsize_used);
480
481 if (size_used != rsize_used)
482 bzero((char *) (addr + size_used),
483 rsize_used - size_used);
484
485 kr = vm_map_copyin(ipc_kernel_map, addr, rsize_used,
486 TRUE, ©);
487 assert(kr == KERN_SUCCESS);
488
489 *pagesp = (vm_page_info_t *) copy;
490 *countp = actual;
491 }
492
493 return KERN_SUCCESS;
494 }
495
496 /*
497 * Routine: host_virtual_physical_table_info
498 * Purpose:
499 * Return information about the VP table.
500 * Conditions:
501 * Nothing locked. Obeys CountInOut protocol.
502 * Returns:
503 * KERN_SUCCESS Returned information.
504 * KERN_INVALID_HOST The host is null.
505 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
506 */
507
508 kern_return_t
509 host_virtual_physical_table_info(
510 host_t host,
511 hash_info_bucket_array_t *infop,
512 natural_t *countp)
513 {
514 vm_offset_t addr;
515 vm_size_t size = 0;/* '=0' to quiet gcc warnings */
516 hash_info_bucket_t *info;
517 unsigned int potential, actual;
518 kern_return_t kr;
519
520 if (host == HOST_NULL)
521 return KERN_INVALID_HOST;
522
523 /* start with in-line data */
524
525 info = *infop;
526 potential = *countp;
527
528 for (;;) {
529 actual = vm_page_info(info, potential);
530 if (actual <= potential)
531 break;
532
533 /* allocate more memory */
534
535 if (info != *infop)
536 kmem_free(ipc_kernel_map, addr, size);
537
538 size = round_page(actual * sizeof *info);
539 kr = kmem_alloc_pageable(ipc_kernel_map, &addr, size);
540 if (kr != KERN_SUCCESS)
541 return KERN_RESOURCE_SHORTAGE;
542
543 info = (hash_info_bucket_t *) addr;
544 potential = size/sizeof *info;
545 }
546
547 if (info == *infop) {
548 /* data fit in-line; nothing to deallocate */
549
550 *countp = actual;
551 } else if (actual == 0) {
552 kmem_free(ipc_kernel_map, addr, size);
553
554 *countp = 0;
555 } else {
556 vm_map_copy_t copy;
557 vm_size_t used;
558
559 used = round_page(actual * sizeof *info);
560
561 if (used != size)
562 kmem_free(ipc_kernel_map, addr + used, size - used);
563
564 kr = vm_map_copyin(ipc_kernel_map, addr, used,
565 TRUE, ©);
566 assert(kr == KERN_SUCCESS);
567
568 *infop = (hash_info_bucket_t *) copy;
569 *countp = actual;
570 }
571
572 return KERN_SUCCESS;
573 }
Cache object: 477f1d5ba78bdd6994f10fc031ce1e20
|