FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_debug.c
1 /*
2 * Mach Operating System
3 * Copyright (c) 1991,1990 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie Mellon
24 * the rights to redistribute these changes.
25 */
26 /*
27 * HISTORY
28 * $Log: vm_debug.c,v $
29 * Revision 2.13 93/01/14 18:00:49 danner
30 * 64bit cleanup.
31 * [92/12/01 af]
32 *
33 * Revision 2.12 92/08/03 18:00:06 jfriedl
34 * removed silly prototypes
35 * [92/08/02 jfriedl]
36 *
37 * Revision 2.11 92/05/21 17:25:28 jfriedl
38 * Added stuff to quiet gcc warnings.
39 * Also removed unused var 'kr' in mach_vm_region_info().
40 * [92/05/16 jfriedl]
41 *
42 * Revision 2.10 92/02/25 14:17:42 elf
43 * Added line to mach_vm_region_info to reflect sharing.
44 * Note: vm_region_info needs to be updated.
45 * [92/02/24 elf]
46 *
47 * Revision 2.9 92/02/23 19:51:55 elf
48 * Eliminate keep_wired argument from vm_map_copyin().
49 * [92/02/21 10:14:21 dlb]
50 *
51 * No more sharing maps.
52 * [92/01/07 11:02:46 dlb]
53 *
54 * Revision 2.7.8.1 92/02/18 19:20:54 jeffreyh
55 * Include thread.h when compiling with VM_OBJECT_DEBUG
56 * [91/09/09 bernadat]
57 *
58 * Revision 2.8 92/01/14 16:47:39 rpd
59 * Changed host_virtual_physical_table_info and
60 * mach_vm_object_pages for CountInOut.
61 * [92/01/14 rpd]
62 *
63 * Removed <mach_debug/page_info.h>.
64 * [92/01/08 rpd]
65 * Fixed mach_vm_region_info for submaps and share maps.
66 * [92/01/02 rpd]
67 *
68 * Replaced the old mach_vm_region_info with new mach_vm_region_info,
69 * mach_vm_object_info, mach_vm_object_pages calls.
70 * Removed vm_mapped_pages_info.
71 * [91/12/30 rpd]
72 *
73 * Revision 2.7 91/08/28 11:17:57 jsb
74 * single_use --> use_old_pageout in vm_object.
75 * [91/08/05 17:44:32 dlb]
76 *
77 * Revision 2.6 91/05/14 17:48:16 mrt
78 * Correcting copyright
79 *
80 * Revision 2.5 91/02/05 17:57:39 mrt
81 * Changed to new Mach copyright
82 * [91/02/01 16:31:12 mrt]
83 *
84 * Revision 2.4 91/01/08 16:44:26 rpd
85 * Added host_virtual_physical_table_info.
86 * [91/01/02 rpd]
87 *
88 * Revision 2.3 90/10/12 13:05:13 rpd
89 * Removed copy_on_write field.
90 * [90/10/08 rpd]
91 *
92 * Revision 2.2 90/06/02 15:10:26 rpd
93 * Moved vm_mapped_pages_info here from vm/vm_map.c.
94 * [90/05/31 rpd]
95 *
96 * Created.
97 * [90/04/20 rpd]
98 *
99 */
100 /*
101 * File: vm/vm_debug.c.
102 * Author: Rich Draves
103 * Date: March, 1990
104 *
105 * Exported kernel calls. See mach_debug/mach_debug.defs.
106 */
107
108 #include <kern/thread.h>
109 #include <mach/kern_return.h>
110 #include <mach/machine/vm_types.h>
111 #include <mach/memory_object.h>
112 #include <mach/vm_prot.h>
113 #include <mach/vm_inherit.h>
114 #include <mach/vm_param.h>
115 #include <mach_debug/vm_info.h>
116 #include <mach_debug/hash_info.h>
117 #include <vm/vm_map.h>
118 #include <vm/vm_kern.h>
119 #include <vm/vm_object.h>
120 #include <kern/task.h>
121 #include <kern/host.h>
122 #include <ipc/ipc_port.h>
123
124
125
126 /*
127 * Routine: vm_object_real_name
128 * Purpose:
129 * Convert a VM object to a name port.
130 * Conditions:
131 * Takes object and port locks.
132 * Returns:
133 * A naked send right for the object's name port,
134 * or IP_NULL if the object or its name port is null.
135 */
136
137 ipc_port_t
138 vm_object_real_name(object)
139 vm_object_t object;
140 {
141 ipc_port_t port = IP_NULL;
142
143 if (object != VM_OBJECT_NULL) {
144 vm_object_lock(object);
145 if (object->pager_name != IP_NULL)
146 port = ipc_port_make_send(object->pager_name);
147 vm_object_unlock(object);
148 }
149
150 return port;
151 }
152
153 /*
154 * Routine: mach_vm_region_info [kernel call]
155 * Purpose:
156 * Retrieve information about a VM region,
157 * including info about the object chain.
158 * Conditions:
159 * Nothing locked.
160 * Returns:
161 * KERN_SUCCESS Retrieve region/object info.
162 * KERN_INVALID_TASK The map is null.
163 * KERN_NO_SPACE There is no entry at/after the address.
164 */
165
166 kern_return_t
167 mach_vm_region_info(map, address, regionp, portp)
168 vm_map_t map;
169 vm_offset_t address;
170 vm_region_info_t *regionp;
171 ipc_port_t *portp;
172 {
173 vm_map_t cmap; /* current map in traversal */
174 vm_map_t nmap; /* next map to look at */
175 vm_map_entry_t entry; /* entry in current map */
176 vm_object_t object;
177
178 if (map == VM_MAP_NULL)
179 return KERN_INVALID_TASK;
180
181 /* find the entry containing (or following) the address */
182
183 vm_map_lock_read(map);
184 for (cmap = map;;) {
185 /* cmap is read-locked */
186
187 if (!vm_map_lookup_entry(cmap, address, &entry)) {
188 entry = entry->vme_next;
189 if (entry == vm_map_to_entry(cmap)) {
190 if (map == cmap) {
191 vm_map_unlock_read(cmap);
192 return KERN_NO_SPACE;
193 }
194
195 /* back out to top-level & skip this submap */
196
197 address = vm_map_max(cmap);
198 vm_map_unlock_read(cmap);
199 vm_map_lock_read(map);
200 cmap = map;
201 continue;
202 }
203 }
204
205 if (entry->is_sub_map) {
206 /* move down to the sub map */
207
208 nmap = entry->object.sub_map;
209 vm_map_lock_read(nmap);
210 vm_map_unlock_read(cmap);
211 cmap = nmap;
212 continue;
213 } else {
214 break;
215 }
216 /*NOTREACHED*/
217 }
218
219
220 assert(entry->vme_start < entry->vme_end);
221
222 regionp->vri_start = entry->vme_start;
223 regionp->vri_end = entry->vme_end;
224
225 /* attributes from the real entry */
226
227 regionp->vri_protection = entry->protection;
228 regionp->vri_max_protection = entry->max_protection;
229 regionp->vri_inheritance = entry->inheritance;
230 regionp->vri_wired_count = entry->wired_count;
231 regionp->vri_user_wired_count = entry->user_wired_count;
232
233 object = entry->object.vm_object;
234 *portp = vm_object_real_name(object);
235 regionp->vri_object = (vm_offset_t) object;
236 regionp->vri_offset = entry->offset;
237 regionp->vri_needs_copy = entry->needs_copy;
238
239 regionp->vri_sharing = entry->is_shared;
240
241 vm_map_unlock_read(cmap);
242 return KERN_SUCCESS;
243 }
244
245 /*
246 * Routine: mach_vm_object_info [kernel call]
247 * Purpose:
248 * Retrieve information about a VM object.
249 * Conditions:
250 * Nothing locked.
251 * Returns:
252 * KERN_SUCCESS Retrieved object info.
253 * KERN_INVALID_ARGUMENT The object is null.
254 */
255
256 kern_return_t
257 mach_vm_object_info(object, infop, shadowp, copyp)
258 vm_object_t object;
259 vm_object_info_t *infop;
260 ipc_port_t *shadowp;
261 ipc_port_t *copyp;
262 {
263 vm_object_info_t info;
264 vm_object_info_state_t state;
265 ipc_port_t shadow, copy;
266
267 if (object == VM_OBJECT_NULL)
268 return KERN_INVALID_ARGUMENT;
269
270 /*
271 * Because of lock-ordering/deadlock considerations,
272 * we can't use vm_object_real_name for the copy object.
273 */
274
275 retry:
276 vm_object_lock(object);
277 copy = IP_NULL;
278 if (object->copy != VM_OBJECT_NULL) {
279 if (!vm_object_lock_try(object->copy)) {
280 vm_object_unlock(object);
281 simple_lock_pause(); /* wait a bit */
282 goto retry;
283 }
284
285 if (object->copy->pager_name != IP_NULL)
286 copy = ipc_port_make_send(object->copy->pager_name);
287 vm_object_unlock(object->copy);
288 }
289 shadow = vm_object_real_name(object->shadow);
290
291 info.voi_object = (vm_offset_t) object;
292 info.voi_pagesize = PAGE_SIZE;
293 info.voi_size = object->size;
294 info.voi_ref_count = object->ref_count;
295 info.voi_resident_page_count = object->resident_page_count;
296 info.voi_absent_count = object->absent_count;
297 info.voi_copy = (vm_offset_t) object->copy;
298 info.voi_shadow = (vm_offset_t) object->shadow;
299 info.voi_shadow_offset = object->shadow_offset;
300 info.voi_paging_offset = object->paging_offset;
301 info.voi_copy_strategy = object->copy_strategy;
302 info.voi_last_alloc = object->last_alloc;
303 info.voi_paging_in_progress = object->paging_in_progress;
304
305 state = 0;
306 if (object->pager_created)
307 state |= VOI_STATE_PAGER_CREATED;
308 if (object->pager_initialized)
309 state |= VOI_STATE_PAGER_INITIALIZED;
310 if (object->pager_ready)
311 state |= VOI_STATE_PAGER_READY;
312 if (object->can_persist)
313 state |= VOI_STATE_CAN_PERSIST;
314 if (object->internal)
315 state |= VOI_STATE_INTERNAL;
316 if (object->temporary)
317 state |= VOI_STATE_TEMPORARY;
318 if (object->alive)
319 state |= VOI_STATE_ALIVE;
320 if (object->lock_in_progress)
321 state |= VOI_STATE_LOCK_IN_PROGRESS;
322 if (object->lock_restart)
323 state |= VOI_STATE_LOCK_RESTART;
324 if (object->use_old_pageout)
325 state |= VOI_STATE_USE_OLD_PAGEOUT;
326 info.voi_state = state;
327 vm_object_unlock(object);
328
329 *infop = info;
330 *shadowp = shadow;
331 *copyp = copy;
332 return KERN_SUCCESS;
333 }
334
335 #define VPI_STATE_NODATA (VPI_STATE_BUSY|VPI_STATE_FICTITIOUS| \
336 VPI_STATE_PRIVATE|VPI_STATE_ABSENT)
337
338 /*
339 * Routine: mach_vm_object_pages [kernel call]
340 * Purpose:
341 * Retrieve information about the pages in a VM object.
342 * Conditions:
343 * Nothing locked. Obeys CountInOut protocol.
344 * Returns:
345 * KERN_SUCCESS Retrieved object info.
346 * KERN_INVALID_ARGUMENT The object is null.
347 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
348 */
349
350 kern_return_t
351 mach_vm_object_pages(object, pagesp, countp)
352 vm_object_t object;
353 vm_page_info_array_t *pagesp;
354 natural_t *countp;
355 {
356 vm_size_t size;
357 vm_offset_t addr;
358 vm_page_info_t *pages;
359 unsigned int potential, actual, count;
360 vm_page_t p;
361 kern_return_t kr;
362
363 if (object == VM_OBJECT_NULL)
364 return KERN_INVALID_ARGUMENT;
365
366 /* start with in-line memory */
367
368 pages = *pagesp;
369 potential = *countp;
370
371 for (size = 0;;) {
372 vm_object_lock(object);
373 actual = object->resident_page_count;
374 if (actual <= potential)
375 break;
376 vm_object_unlock(object);
377
378 if (pages != *pagesp)
379 kmem_free(ipc_kernel_map, addr, size);
380
381 size = round_page(actual * sizeof *pages);
382 kr = kmem_alloc(ipc_kernel_map, &addr, size);
383 if (kr != KERN_SUCCESS)
384 return kr;
385
386 pages = (vm_page_info_t *) addr;
387 potential = size/sizeof *pages;
388 }
389 /* object is locked, we have enough wired memory */
390
391 count = 0;
392 queue_iterate(&object->memq, p, vm_page_t, listq) {
393 vm_page_info_t *info = &pages[count++];
394 vm_page_info_state_t state = 0;
395
396 info->vpi_offset = p->offset;
397 info->vpi_phys_addr = p->phys_addr;
398 info->vpi_wire_count = p->wire_count;
399 info->vpi_page_lock = p->page_lock;
400 info->vpi_unlock_request = p->unlock_request;
401
402 if (p->busy)
403 state |= VPI_STATE_BUSY;
404 if (p->wanted)
405 state |= VPI_STATE_WANTED;
406 if (p->tabled)
407 state |= VPI_STATE_TABLED;
408 if (p->fictitious)
409 state |= VPI_STATE_FICTITIOUS;
410 if (p->private)
411 state |= VPI_STATE_PRIVATE;
412 if (p->absent)
413 state |= VPI_STATE_ABSENT;
414 if (p->error)
415 state |= VPI_STATE_ERROR;
416 if (p->dirty)
417 state |= VPI_STATE_DIRTY;
418 if (p->precious)
419 state |= VPI_STATE_PRECIOUS;
420 if (p->overwriting)
421 state |= VPI_STATE_OVERWRITING;
422
423 if (((state & (VPI_STATE_NODATA|VPI_STATE_DIRTY)) == 0) &&
424 pmap_is_modified(p->phys_addr)) {
425 state |= VPI_STATE_DIRTY;
426 p->dirty = TRUE;
427 }
428
429 vm_page_lock_queues();
430 if (p->inactive)
431 state |= VPI_STATE_INACTIVE;
432 if (p->active)
433 state |= VPI_STATE_ACTIVE;
434 if (p->laundry)
435 state |= VPI_STATE_LAUNDRY;
436 if (p->free)
437 state |= VPI_STATE_FREE;
438 if (p->reference)
439 state |= VPI_STATE_REFERENCE;
440
441 if (((state & (VPI_STATE_NODATA|VPI_STATE_REFERENCE)) == 0) &&
442 pmap_is_referenced(p->phys_addr)) {
443 state |= VPI_STATE_REFERENCE;
444 p->reference = TRUE;
445 }
446 vm_page_unlock_queues();
447
448 info->vpi_state = state;
449 }
450
451 if (object->resident_page_count != count)
452 panic("mach_vm_object_pages");
453 vm_object_unlock(object);
454
455 if (pages == *pagesp) {
456 /* data fit in-line; nothing to deallocate */
457
458 *countp = actual;
459 } else if (actual == 0) {
460 kmem_free(ipc_kernel_map, addr, size);
461
462 *countp = 0;
463 } else {
464 vm_size_t size_used, rsize_used;
465 vm_map_copy_t copy;
466
467 /* kmem_alloc doesn't zero memory */
468
469 size_used = actual * sizeof *pages;
470 rsize_used = round_page(size_used);
471
472 if (rsize_used != size)
473 kmem_free(ipc_kernel_map,
474 addr + rsize_used, size - rsize_used);
475
476 if (size_used != rsize_used)
477 bzero((char *) (addr + size_used),
478 rsize_used - size_used);
479
480 kr = vm_map_copyin(ipc_kernel_map, addr, rsize_used,
481 TRUE, ©);
482 assert(kr == KERN_SUCCESS);
483
484 *pagesp = (vm_page_info_t *) copy;
485 *countp = actual;
486 }
487
488 return KERN_SUCCESS;
489 }
490
491 /*
492 * Routine: host_virtual_physical_table_info
493 * Purpose:
494 * Return information about the VP table.
495 * Conditions:
496 * Nothing locked. Obeys CountInOut protocol.
497 * Returns:
498 * KERN_SUCCESS Returned information.
499 * KERN_INVALID_HOST The host is null.
500 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
501 */
502
503 kern_return_t
504 host_virtual_physical_table_info(host, infop, countp)
505 host_t host;
506 hash_info_bucket_array_t *infop;
507 natural_t *countp;
508 {
509 vm_offset_t addr;
510 vm_size_t size = 0;/* '=0' to quiet gcc warnings */
511 hash_info_bucket_t *info;
512 unsigned int potential, actual;
513 kern_return_t kr;
514
515 if (host == HOST_NULL)
516 return KERN_INVALID_HOST;
517
518 /* start with in-line data */
519
520 info = *infop;
521 potential = *countp;
522
523 for (;;) {
524 actual = vm_page_info(info, potential);
525 if (actual <= potential)
526 break;
527
528 /* allocate more memory */
529
530 if (info != *infop)
531 kmem_free(ipc_kernel_map, addr, size);
532
533 size = round_page(actual * sizeof *info);
534 kr = kmem_alloc_pageable(ipc_kernel_map, &addr, size);
535 if (kr != KERN_SUCCESS)
536 return KERN_RESOURCE_SHORTAGE;
537
538 info = (hash_info_bucket_t *) addr;
539 potential = size/sizeof *info;
540 }
541
542 if (info == *infop) {
543 /* data fit in-line; nothing to deallocate */
544
545 *countp = actual;
546 } else if (actual == 0) {
547 kmem_free(ipc_kernel_map, addr, size);
548
549 *countp = 0;
550 } else {
551 vm_map_copy_t copy;
552 vm_size_t used;
553
554 used = round_page(actual * sizeof *info);
555
556 if (used != size)
557 kmem_free(ipc_kernel_map, addr + used, size - used);
558
559 kr = vm_map_copyin(ipc_kernel_map, addr, used,
560 TRUE, ©);
561 assert(kr == KERN_SUCCESS);
562
563 *infop = (hash_info_bucket_t *) copy;
564 *countp = actual;
565 }
566
567 return KERN_SUCCESS;
568 }
Cache object: 31e4f3ac986b91a64bb4b9ce57ef1ab3
|