FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_object.c
1 /*-
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * from: @(#)vm_object.c 8.5 (Berkeley) 3/22/94
33 *
34 *
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36 * All rights reserved.
37 *
38 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
39 *
40 * Permission to use, copy, modify and distribute this software and
41 * its documentation is hereby granted, provided that both the copyright
42 * notice and this permission notice appear in all copies of the
43 * software, derivative works or modified versions, and any portions
44 * thereof, and that both notices appear in supporting documentation.
45 *
46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49 *
50 * Carnegie Mellon requests users of this software to return to
51 *
52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
53 * School of Computer Science
54 * Carnegie Mellon University
55 * Pittsburgh PA 15213-3890
56 *
57 * any improvements or extensions that they make and grant Carnegie the
58 * rights to redistribute these changes.
59 */
60
61 /*
62 * Virtual memory object module.
63 */
64
65 #include <sys/cdefs.h>
66 __FBSDID("$FreeBSD: releng/6.2/sys/vm/vm_object.c 162760 2006-09-29 04:37:22Z alc $");
67
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/lock.h>
71 #include <sys/mman.h>
72 #include <sys/mount.h>
73 #include <sys/kernel.h>
74 #include <sys/sysctl.h>
75 #include <sys/mutex.h>
76 #include <sys/proc.h> /* for curproc, pageproc */
77 #include <sys/socket.h>
78 #include <sys/vnode.h>
79 #include <sys/vmmeter.h>
80 #include <sys/sx.h>
81
82 #include <vm/vm.h>
83 #include <vm/vm_param.h>
84 #include <vm/pmap.h>
85 #include <vm/vm_map.h>
86 #include <vm/vm_object.h>
87 #include <vm/vm_page.h>
88 #include <vm/vm_pageout.h>
89 #include <vm/vm_pager.h>
90 #include <vm/swap_pager.h>
91 #include <vm/vm_kern.h>
92 #include <vm/vm_extern.h>
93 #include <vm/uma.h>
94
95 #define EASY_SCAN_FACTOR 8
96
97 #define MSYNC_FLUSH_HARDSEQ 0x01
98 #define MSYNC_FLUSH_SOFTSEQ 0x02
99
100 /*
101 * msync / VM object flushing optimizations
102 */
103 static int msync_flush_flags = MSYNC_FLUSH_HARDSEQ | MSYNC_FLUSH_SOFTSEQ;
104 SYSCTL_INT(_vm, OID_AUTO, msync_flush_flags,
105 CTLFLAG_RW, &msync_flush_flags, 0, "");
106
107 static int old_msync;
108 SYSCTL_INT(_vm, OID_AUTO, old_msync, CTLFLAG_RW, &old_msync, 0,
109 "Use old (insecure) msync behavior");
110
111 static void vm_object_qcollapse(vm_object_t object);
112 static int vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration, int pagerflags);
113
114 /*
115 * Virtual memory objects maintain the actual data
116 * associated with allocated virtual memory. A given
117 * page of memory exists within exactly one object.
118 *
119 * An object is only deallocated when all "references"
120 * are given up. Only one "reference" to a given
121 * region of an object should be writeable.
122 *
123 * Associated with each object is a list of all resident
124 * memory pages belonging to that object; this list is
125 * maintained by the "vm_page" module, and locked by the object's
126 * lock.
127 *
128 * Each object also records a "pager" routine which is
129 * used to retrieve (and store) pages to the proper backing
130 * storage. In addition, objects may be backed by other
131 * objects from which they were virtual-copied.
132 *
133 * The only items within the object structure which are
134 * modified after time of creation are:
135 * reference count locked by object's lock
136 * pager routine locked by object's lock
137 *
138 */
139
140 struct object_q vm_object_list;
141 struct mtx vm_object_list_mtx; /* lock for object list and count */
142
143 struct vm_object kernel_object_store;
144 struct vm_object kmem_object_store;
145
146 static long object_collapses;
147 static long object_bypasses;
148
149 /*
150 * next_index determines the page color that is assigned to the next
151 * allocated object. Accesses to next_index are not synchronized
152 * because the effects of two or more object allocations using
153 * next_index simultaneously are inconsequential. At any given time,
154 * numerous objects have the same page color.
155 */
156 static int next_index;
157
158 static uma_zone_t obj_zone;
159 #define VM_OBJECTS_INIT 256
160
161 static int vm_object_zinit(void *mem, int size, int flags);
162
163 #ifdef INVARIANTS
164 static void vm_object_zdtor(void *mem, int size, void *arg);
165
166 static void
167 vm_object_zdtor(void *mem, int size, void *arg)
168 {
169 vm_object_t object;
170
171 object = (vm_object_t)mem;
172 KASSERT(TAILQ_EMPTY(&object->memq),
173 ("object %p has resident pages",
174 object));
175 KASSERT(object->paging_in_progress == 0,
176 ("object %p paging_in_progress = %d",
177 object, object->paging_in_progress));
178 KASSERT(object->resident_page_count == 0,
179 ("object %p resident_page_count = %d",
180 object, object->resident_page_count));
181 KASSERT(object->shadow_count == 0,
182 ("object %p shadow_count = %d",
183 object, object->shadow_count));
184 }
185 #endif
186
187 static int
188 vm_object_zinit(void *mem, int size, int flags)
189 {
190 vm_object_t object;
191
192 object = (vm_object_t)mem;
193 bzero(&object->mtx, sizeof(object->mtx));
194 VM_OBJECT_LOCK_INIT(object, "standard object");
195
196 /* These are true for any object that has been freed */
197 object->paging_in_progress = 0;
198 object->resident_page_count = 0;
199 object->shadow_count = 0;
200 return (0);
201 }
202
203 void
204 _vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object)
205 {
206 int incr;
207
208 TAILQ_INIT(&object->memq);
209 LIST_INIT(&object->shadow_head);
210
211 object->root = NULL;
212 object->type = type;
213 object->size = size;
214 object->generation = 1;
215 object->ref_count = 1;
216 object->flags = 0;
217 if ((object->type == OBJT_DEFAULT) || (object->type == OBJT_SWAP))
218 object->flags = OBJ_ONEMAPPING;
219 if (size > (PQ_L2_SIZE / 3 + PQ_PRIME1))
220 incr = PQ_L2_SIZE / 3 + PQ_PRIME1;
221 else
222 incr = size;
223 object->pg_color = next_index;
224 next_index = (object->pg_color + incr) & PQ_L2_MASK;
225 object->handle = NULL;
226 object->backing_object = NULL;
227 object->backing_object_offset = (vm_ooffset_t) 0;
228
229 mtx_lock(&vm_object_list_mtx);
230 TAILQ_INSERT_TAIL(&vm_object_list, object, object_list);
231 mtx_unlock(&vm_object_list_mtx);
232 }
233
234 /*
235 * vm_object_init:
236 *
237 * Initialize the VM objects module.
238 */
239 void
240 vm_object_init(void)
241 {
242 TAILQ_INIT(&vm_object_list);
243 mtx_init(&vm_object_list_mtx, "vm object_list", NULL, MTX_DEF);
244
245 VM_OBJECT_LOCK_INIT(&kernel_object_store, "kernel object");
246 _vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
247 kernel_object);
248
249 VM_OBJECT_LOCK_INIT(&kmem_object_store, "kmem object");
250 _vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
251 kmem_object);
252
253 /*
254 * The lock portion of struct vm_object must be type stable due
255 * to vm_pageout_fallback_object_lock locking a vm object
256 * without holding any references to it.
257 */
258 obj_zone = uma_zcreate("VM OBJECT", sizeof (struct vm_object), NULL,
259 #ifdef INVARIANTS
260 vm_object_zdtor,
261 #else
262 NULL,
263 #endif
264 vm_object_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM|UMA_ZONE_NOFREE);
265 uma_prealloc(obj_zone, VM_OBJECTS_INIT);
266 }
267
268 void
269 vm_object_clear_flag(vm_object_t object, u_short bits)
270 {
271
272 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
273 object->flags &= ~bits;
274 }
275
276 void
277 vm_object_pip_add(vm_object_t object, short i)
278 {
279
280 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
281 object->paging_in_progress += i;
282 }
283
284 void
285 vm_object_pip_subtract(vm_object_t object, short i)
286 {
287
288 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
289 object->paging_in_progress -= i;
290 }
291
292 void
293 vm_object_pip_wakeup(vm_object_t object)
294 {
295
296 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
297 object->paging_in_progress--;
298 if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) {
299 vm_object_clear_flag(object, OBJ_PIPWNT);
300 wakeup(object);
301 }
302 }
303
304 void
305 vm_object_pip_wakeupn(vm_object_t object, short i)
306 {
307
308 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
309 if (i)
310 object->paging_in_progress -= i;
311 if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) {
312 vm_object_clear_flag(object, OBJ_PIPWNT);
313 wakeup(object);
314 }
315 }
316
317 void
318 vm_object_pip_wait(vm_object_t object, char *waitid)
319 {
320
321 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
322 while (object->paging_in_progress) {
323 object->flags |= OBJ_PIPWNT;
324 msleep(object, VM_OBJECT_MTX(object), PVM, waitid, 0);
325 }
326 }
327
328 /*
329 * vm_object_allocate:
330 *
331 * Returns a new object with the given size.
332 */
333 vm_object_t
334 vm_object_allocate(objtype_t type, vm_pindex_t size)
335 {
336 vm_object_t object;
337
338 object = (vm_object_t)uma_zalloc(obj_zone, M_WAITOK);
339 _vm_object_allocate(type, size, object);
340 return (object);
341 }
342
343
344 /*
345 * vm_object_reference:
346 *
347 * Gets another reference to the given object. Note: OBJ_DEAD
348 * objects can be referenced during final cleaning.
349 */
350 void
351 vm_object_reference(vm_object_t object)
352 {
353 struct vnode *vp;
354
355 if (object == NULL)
356 return;
357 VM_OBJECT_LOCK(object);
358 object->ref_count++;
359 if (object->type == OBJT_VNODE) {
360 int vfslocked;
361
362 vp = object->handle;
363 VM_OBJECT_UNLOCK(object);
364 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
365 vget(vp, LK_RETRY, curthread);
366 VFS_UNLOCK_GIANT(vfslocked);
367 } else
368 VM_OBJECT_UNLOCK(object);
369 }
370
371 /*
372 * vm_object_reference_locked:
373 *
374 * Gets another reference to the given object.
375 *
376 * The object must be locked.
377 */
378 void
379 vm_object_reference_locked(vm_object_t object)
380 {
381 struct vnode *vp;
382
383 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
384 KASSERT((object->flags & OBJ_DEAD) == 0,
385 ("vm_object_reference_locked: dead object referenced"));
386 object->ref_count++;
387 if (object->type == OBJT_VNODE) {
388 vp = object->handle;
389 vref(vp);
390 }
391 }
392
393 /*
394 * Handle deallocating an object of type OBJT_VNODE.
395 */
396 void
397 vm_object_vndeallocate(vm_object_t object)
398 {
399 struct vnode *vp = (struct vnode *) object->handle;
400
401 VFS_ASSERT_GIANT(vp->v_mount);
402 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
403 KASSERT(object->type == OBJT_VNODE,
404 ("vm_object_vndeallocate: not a vnode object"));
405 KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp"));
406 #ifdef INVARIANTS
407 if (object->ref_count == 0) {
408 vprint("vm_object_vndeallocate", vp);
409 panic("vm_object_vndeallocate: bad object reference count");
410 }
411 #endif
412
413 object->ref_count--;
414 if (object->ref_count == 0) {
415 mp_fixme("Unlocked vflag access.");
416 vp->v_vflag &= ~VV_TEXT;
417 }
418 VM_OBJECT_UNLOCK(object);
419 /*
420 * vrele may need a vop lock
421 */
422 vrele(vp);
423 }
424
425 /*
426 * vm_object_deallocate:
427 *
428 * Release a reference to the specified object,
429 * gained either through a vm_object_allocate
430 * or a vm_object_reference call. When all references
431 * are gone, storage associated with this object
432 * may be relinquished.
433 *
434 * No object may be locked.
435 */
436 void
437 vm_object_deallocate(vm_object_t object)
438 {
439 vm_object_t temp;
440
441 while (object != NULL) {
442 int vfslocked;
443
444 vfslocked = 0;
445 restart:
446 VM_OBJECT_LOCK(object);
447 if (object->type == OBJT_VNODE) {
448 struct vnode *vp = (struct vnode *) object->handle;
449
450 /*
451 * Conditionally acquire Giant for a vnode-backed
452 * object. We have to be careful since the type of
453 * a vnode object can change while the object is
454 * unlocked.
455 */
456 if (VFS_NEEDSGIANT(vp->v_mount) && !vfslocked) {
457 vfslocked = 1;
458 if (!mtx_trylock(&Giant)) {
459 VM_OBJECT_UNLOCK(object);
460 mtx_lock(&Giant);
461 goto restart;
462 }
463 }
464 vm_object_vndeallocate(object);
465 VFS_UNLOCK_GIANT(vfslocked);
466 return;
467 } else
468 /*
469 * This is to handle the case that the object
470 * changed type while we dropped its lock to
471 * obtain Giant.
472 */
473 VFS_UNLOCK_GIANT(vfslocked);
474
475 KASSERT(object->ref_count != 0,
476 ("vm_object_deallocate: object deallocated too many times: %d", object->type));
477
478 /*
479 * If the reference count goes to 0 we start calling
480 * vm_object_terminate() on the object chain.
481 * A ref count of 1 may be a special case depending on the
482 * shadow count being 0 or 1.
483 */
484 object->ref_count--;
485 if (object->ref_count > 1) {
486 VM_OBJECT_UNLOCK(object);
487 return;
488 } else if (object->ref_count == 1) {
489 if (object->shadow_count == 0) {
490 vm_object_set_flag(object, OBJ_ONEMAPPING);
491 } else if ((object->shadow_count == 1) &&
492 (object->handle == NULL) &&
493 (object->type == OBJT_DEFAULT ||
494 object->type == OBJT_SWAP)) {
495 vm_object_t robject;
496
497 robject = LIST_FIRST(&object->shadow_head);
498 KASSERT(robject != NULL,
499 ("vm_object_deallocate: ref_count: %d, shadow_count: %d",
500 object->ref_count,
501 object->shadow_count));
502 if (!VM_OBJECT_TRYLOCK(robject)) {
503 /*
504 * Avoid a potential deadlock.
505 */
506 object->ref_count++;
507 VM_OBJECT_UNLOCK(object);
508 /*
509 * More likely than not the thread
510 * holding robject's lock has lower
511 * priority than the current thread.
512 * Let the lower priority thread run.
513 */
514 tsleep(&proc0, PVM, "vmo_de", 1);
515 continue;
516 }
517 /*
518 * Collapse object into its shadow unless its
519 * shadow is dead. In that case, object will
520 * be deallocated by the thread that is
521 * deallocating its shadow.
522 */
523 if ((robject->flags & OBJ_DEAD) == 0 &&
524 (robject->handle == NULL) &&
525 (robject->type == OBJT_DEFAULT ||
526 robject->type == OBJT_SWAP)) {
527
528 robject->ref_count++;
529 retry:
530 if (robject->paging_in_progress) {
531 VM_OBJECT_UNLOCK(object);
532 vm_object_pip_wait(robject,
533 "objde1");
534 temp = robject->backing_object;
535 if (object == temp) {
536 VM_OBJECT_LOCK(object);
537 goto retry;
538 }
539 } else if (object->paging_in_progress) {
540 VM_OBJECT_UNLOCK(robject);
541 object->flags |= OBJ_PIPWNT;
542 msleep(object,
543 VM_OBJECT_MTX(object),
544 PDROP | PVM, "objde2", 0);
545 VM_OBJECT_LOCK(robject);
546 temp = robject->backing_object;
547 if (object == temp) {
548 VM_OBJECT_LOCK(object);
549 goto retry;
550 }
551 } else
552 VM_OBJECT_UNLOCK(object);
553
554 if (robject->ref_count == 1) {
555 robject->ref_count--;
556 object = robject;
557 goto doterm;
558 }
559 object = robject;
560 vm_object_collapse(object);
561 VM_OBJECT_UNLOCK(object);
562 continue;
563 }
564 VM_OBJECT_UNLOCK(robject);
565 }
566 VM_OBJECT_UNLOCK(object);
567 return;
568 }
569 doterm:
570 temp = object->backing_object;
571 if (temp != NULL) {
572 VM_OBJECT_LOCK(temp);
573 LIST_REMOVE(object, shadow_list);
574 temp->shadow_count--;
575 temp->generation++;
576 VM_OBJECT_UNLOCK(temp);
577 object->backing_object = NULL;
578 }
579 /*
580 * Don't double-terminate, we could be in a termination
581 * recursion due to the terminate having to sync data
582 * to disk.
583 */
584 if ((object->flags & OBJ_DEAD) == 0)
585 vm_object_terminate(object);
586 else
587 VM_OBJECT_UNLOCK(object);
588 object = temp;
589 }
590 }
591
592 /*
593 * vm_object_terminate actually destroys the specified object, freeing
594 * up all previously used resources.
595 *
596 * The object must be locked.
597 * This routine may block.
598 */
599 void
600 vm_object_terminate(vm_object_t object)
601 {
602 vm_page_t p;
603
604 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
605
606 /*
607 * Make sure no one uses us.
608 */
609 vm_object_set_flag(object, OBJ_DEAD);
610
611 /*
612 * wait for the pageout daemon to be done with the object
613 */
614 vm_object_pip_wait(object, "objtrm");
615
616 KASSERT(!object->paging_in_progress,
617 ("vm_object_terminate: pageout in progress"));
618
619 /*
620 * Clean and free the pages, as appropriate. All references to the
621 * object are gone, so we don't need to lock it.
622 */
623 if (object->type == OBJT_VNODE) {
624 struct vnode *vp = (struct vnode *)object->handle;
625
626 /*
627 * Clean pages and flush buffers.
628 */
629 vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
630 VM_OBJECT_UNLOCK(object);
631
632 vinvalbuf(vp, V_SAVE, NULL, 0, 0);
633
634 VM_OBJECT_LOCK(object);
635 }
636
637 KASSERT(object->ref_count == 0,
638 ("vm_object_terminate: object with references, ref_count=%d",
639 object->ref_count));
640
641 /*
642 * Now free any remaining pages. For internal objects, this also
643 * removes them from paging queues. Don't free wired pages, just
644 * remove them from the object.
645 */
646 vm_page_lock_queues();
647 while ((p = TAILQ_FIRST(&object->memq)) != NULL) {
648 KASSERT(!p->busy && (p->flags & PG_BUSY) == 0,
649 ("vm_object_terminate: freeing busy page %p "
650 "p->busy = %d, p->flags %x\n", p, p->busy, p->flags));
651 if (p->wire_count == 0) {
652 vm_page_free(p);
653 cnt.v_pfree++;
654 } else {
655 vm_page_remove(p);
656 }
657 }
658 vm_page_unlock_queues();
659
660 /*
661 * Let the pager know object is dead.
662 */
663 vm_pager_deallocate(object);
664 VM_OBJECT_UNLOCK(object);
665
666 /*
667 * Remove the object from the global object list.
668 */
669 mtx_lock(&vm_object_list_mtx);
670 TAILQ_REMOVE(&vm_object_list, object, object_list);
671 mtx_unlock(&vm_object_list_mtx);
672
673 /*
674 * Free the space for the object.
675 */
676 uma_zfree(obj_zone, object);
677 }
678
679 /*
680 * vm_object_page_clean
681 *
682 * Clean all dirty pages in the specified range of object. Leaves page
683 * on whatever queue it is currently on. If NOSYNC is set then do not
684 * write out pages with PG_NOSYNC set (originally comes from MAP_NOSYNC),
685 * leaving the object dirty.
686 *
687 * When stuffing pages asynchronously, allow clustering. XXX we need a
688 * synchronous clustering mode implementation.
689 *
690 * Odd semantics: if start == end, we clean everything.
691 *
692 * The object must be locked.
693 */
694 void
695 vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int flags)
696 {
697 vm_page_t p, np;
698 vm_pindex_t tstart, tend;
699 vm_pindex_t pi;
700 int clearobjflags;
701 int pagerflags;
702 int curgeneration;
703
704 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
705 if (object->type != OBJT_VNODE ||
706 (object->flags & OBJ_MIGHTBEDIRTY) == 0)
707 return;
708
709 pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) ? VM_PAGER_PUT_SYNC : VM_PAGER_CLUSTER_OK;
710 pagerflags |= (flags & OBJPC_INVAL) ? VM_PAGER_PUT_INVAL : 0;
711
712 vm_object_set_flag(object, OBJ_CLEANING);
713
714 tstart = start;
715 if (end == 0) {
716 tend = object->size;
717 } else {
718 tend = end;
719 }
720
721 vm_page_lock_queues();
722 /*
723 * If the caller is smart and only msync()s a range he knows is
724 * dirty, we may be able to avoid an object scan. This results in
725 * a phenominal improvement in performance. We cannot do this
726 * as a matter of course because the object may be huge - e.g.
727 * the size might be in the gigabytes or terrabytes.
728 */
729 if (msync_flush_flags & MSYNC_FLUSH_HARDSEQ) {
730 vm_pindex_t tscan;
731 int scanlimit;
732 int scanreset;
733
734 scanreset = object->resident_page_count / EASY_SCAN_FACTOR;
735 if (scanreset < 16)
736 scanreset = 16;
737 pagerflags |= VM_PAGER_IGNORE_CLEANCHK;
738
739 scanlimit = scanreset;
740 tscan = tstart;
741 while (tscan < tend) {
742 curgeneration = object->generation;
743 p = vm_page_lookup(object, tscan);
744 if (p == NULL || p->valid == 0 ||
745 (p->queue - p->pc) == PQ_CACHE) {
746 if (--scanlimit == 0)
747 break;
748 ++tscan;
749 continue;
750 }
751 vm_page_test_dirty(p);
752 if ((p->dirty & p->valid) == 0) {
753 if (--scanlimit == 0)
754 break;
755 ++tscan;
756 continue;
757 }
758 /*
759 * If we have been asked to skip nosync pages and
760 * this is a nosync page, we can't continue.
761 */
762 if ((flags & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC)) {
763 if (--scanlimit == 0)
764 break;
765 ++tscan;
766 continue;
767 }
768 scanlimit = scanreset;
769
770 /*
771 * This returns 0 if it was unable to busy the first
772 * page (i.e. had to sleep).
773 */
774 tscan += vm_object_page_collect_flush(object, p, curgeneration, pagerflags);
775 }
776
777 /*
778 * If everything was dirty and we flushed it successfully,
779 * and the requested range is not the entire object, we
780 * don't have to mess with CLEANCHK or MIGHTBEDIRTY and can
781 * return immediately.
782 */
783 if (tscan >= tend && (tstart || tend < object->size)) {
784 vm_page_unlock_queues();
785 vm_object_clear_flag(object, OBJ_CLEANING);
786 return;
787 }
788 pagerflags &= ~VM_PAGER_IGNORE_CLEANCHK;
789 }
790
791 /*
792 * Generally set CLEANCHK interlock and make the page read-only so
793 * we can then clear the object flags.
794 *
795 * However, if this is a nosync mmap then the object is likely to
796 * stay dirty so do not mess with the page and do not clear the
797 * object flags.
798 */
799 clearobjflags = 1;
800 TAILQ_FOREACH(p, &object->memq, listq) {
801 vm_page_flag_set(p, PG_CLEANCHK);
802 if ((flags & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC))
803 clearobjflags = 0;
804 else
805 pmap_page_protect(p, VM_PROT_READ);
806 }
807
808 if (clearobjflags && (tstart == 0) && (tend == object->size)) {
809 struct vnode *vp;
810
811 vm_object_clear_flag(object, OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY);
812 if (object->type == OBJT_VNODE &&
813 (vp = (struct vnode *)object->handle) != NULL) {
814 VI_LOCK(vp);
815 if (vp->v_iflag & VI_OBJDIRTY)
816 vp->v_iflag &= ~VI_OBJDIRTY;
817 VI_UNLOCK(vp);
818 }
819 }
820
821 rescan:
822 curgeneration = object->generation;
823
824 for (p = TAILQ_FIRST(&object->memq); p; p = np) {
825 int n;
826
827 np = TAILQ_NEXT(p, listq);
828
829 again:
830 pi = p->pindex;
831 if (((p->flags & PG_CLEANCHK) == 0) ||
832 (pi < tstart) || (pi >= tend) ||
833 (p->valid == 0) ||
834 ((p->queue - p->pc) == PQ_CACHE)) {
835 vm_page_flag_clear(p, PG_CLEANCHK);
836 continue;
837 }
838
839 vm_page_test_dirty(p);
840 if ((p->dirty & p->valid) == 0) {
841 vm_page_flag_clear(p, PG_CLEANCHK);
842 continue;
843 }
844
845 /*
846 * If we have been asked to skip nosync pages and this is a
847 * nosync page, skip it. Note that the object flags were
848 * not cleared in this case so we do not have to set them.
849 */
850 if ((flags & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC)) {
851 vm_page_flag_clear(p, PG_CLEANCHK);
852 continue;
853 }
854
855 n = vm_object_page_collect_flush(object, p,
856 curgeneration, pagerflags);
857 if (n == 0)
858 goto rescan;
859
860 if (object->generation != curgeneration)
861 goto rescan;
862
863 /*
864 * Try to optimize the next page. If we can't we pick up
865 * our (random) scan where we left off.
866 */
867 if (msync_flush_flags & MSYNC_FLUSH_SOFTSEQ) {
868 if ((p = vm_page_lookup(object, pi + n)) != NULL)
869 goto again;
870 }
871 }
872 vm_page_unlock_queues();
873 #if 0
874 VOP_FSYNC(vp, (pagerflags & VM_PAGER_PUT_SYNC)?MNT_WAIT:0, curproc);
875 #endif
876
877 vm_object_clear_flag(object, OBJ_CLEANING);
878 return;
879 }
880
881 static int
882 vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration, int pagerflags)
883 {
884 int runlen;
885 int maxf;
886 int chkb;
887 int maxb;
888 int i;
889 vm_pindex_t pi;
890 vm_page_t maf[vm_pageout_page_count];
891 vm_page_t mab[vm_pageout_page_count];
892 vm_page_t ma[vm_pageout_page_count];
893
894 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
895 pi = p->pindex;
896 while (vm_page_sleep_if_busy(p, TRUE, "vpcwai")) {
897 vm_page_lock_queues();
898 if (object->generation != curgeneration) {
899 return(0);
900 }
901 }
902 maxf = 0;
903 for(i = 1; i < vm_pageout_page_count; i++) {
904 vm_page_t tp;
905
906 if ((tp = vm_page_lookup(object, pi + i)) != NULL) {
907 if ((tp->flags & PG_BUSY) ||
908 ((pagerflags & VM_PAGER_IGNORE_CLEANCHK) == 0 &&
909 (tp->flags & PG_CLEANCHK) == 0) ||
910 (tp->busy != 0))
911 break;
912 if((tp->queue - tp->pc) == PQ_CACHE) {
913 vm_page_flag_clear(tp, PG_CLEANCHK);
914 break;
915 }
916 vm_page_test_dirty(tp);
917 if ((tp->dirty & tp->valid) == 0) {
918 vm_page_flag_clear(tp, PG_CLEANCHK);
919 break;
920 }
921 maf[ i - 1 ] = tp;
922 maxf++;
923 continue;
924 }
925 break;
926 }
927
928 maxb = 0;
929 chkb = vm_pageout_page_count - maxf;
930 if (chkb) {
931 for(i = 1; i < chkb;i++) {
932 vm_page_t tp;
933
934 if ((tp = vm_page_lookup(object, pi - i)) != NULL) {
935 if ((tp->flags & PG_BUSY) ||
936 ((pagerflags & VM_PAGER_IGNORE_CLEANCHK) == 0 &&
937 (tp->flags & PG_CLEANCHK) == 0) ||
938 (tp->busy != 0))
939 break;
940 if ((tp->queue - tp->pc) == PQ_CACHE) {
941 vm_page_flag_clear(tp, PG_CLEANCHK);
942 break;
943 }
944 vm_page_test_dirty(tp);
945 if ((tp->dirty & tp->valid) == 0) {
946 vm_page_flag_clear(tp, PG_CLEANCHK);
947 break;
948 }
949 mab[ i - 1 ] = tp;
950 maxb++;
951 continue;
952 }
953 break;
954 }
955 }
956
957 for(i = 0; i < maxb; i++) {
958 int index = (maxb - i) - 1;
959 ma[index] = mab[i];
960 vm_page_flag_clear(ma[index], PG_CLEANCHK);
961 }
962 vm_page_flag_clear(p, PG_CLEANCHK);
963 ma[maxb] = p;
964 for(i = 0; i < maxf; i++) {
965 int index = (maxb + i) + 1;
966 ma[index] = maf[i];
967 vm_page_flag_clear(ma[index], PG_CLEANCHK);
968 }
969 runlen = maxb + maxf + 1;
970
971 vm_pageout_flush(ma, runlen, pagerflags);
972 for (i = 0; i < runlen; i++) {
973 if (ma[i]->valid & ma[i]->dirty) {
974 pmap_page_protect(ma[i], VM_PROT_READ);
975 vm_page_flag_set(ma[i], PG_CLEANCHK);
976
977 /*
978 * maxf will end up being the actual number of pages
979 * we wrote out contiguously, non-inclusive of the
980 * first page. We do not count look-behind pages.
981 */
982 if (i >= maxb + 1 && (maxf > i - maxb - 1))
983 maxf = i - maxb - 1;
984 }
985 }
986 return(maxf + 1);
987 }
988
989 /*
990 * Note that there is absolutely no sense in writing out
991 * anonymous objects, so we track down the vnode object
992 * to write out.
993 * We invalidate (remove) all pages from the address space
994 * for semantic correctness.
995 *
996 * Note: certain anonymous maps, such as MAP_NOSYNC maps,
997 * may start out with a NULL object.
998 */
999 void
1000 vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size,
1001 boolean_t syncio, boolean_t invalidate)
1002 {
1003 vm_object_t backing_object;
1004 struct vnode *vp;
1005 struct mount *mp;
1006 int flags;
1007
1008 if (object == NULL)
1009 return;
1010 VM_OBJECT_LOCK(object);
1011 while ((backing_object = object->backing_object) != NULL) {
1012 VM_OBJECT_LOCK(backing_object);
1013 offset += object->backing_object_offset;
1014 VM_OBJECT_UNLOCK(object);
1015 object = backing_object;
1016 if (object->size < OFF_TO_IDX(offset + size))
1017 size = IDX_TO_OFF(object->size) - offset;
1018 }
1019 /*
1020 * Flush pages if writing is allowed, invalidate them
1021 * if invalidation requested. Pages undergoing I/O
1022 * will be ignored by vm_object_page_remove().
1023 *
1024 * We cannot lock the vnode and then wait for paging
1025 * to complete without deadlocking against vm_fault.
1026 * Instead we simply call vm_object_page_remove() and
1027 * allow it to block internally on a page-by-page
1028 * basis when it encounters pages undergoing async
1029 * I/O.
1030 */
1031 if (object->type == OBJT_VNODE &&
1032 (object->flags & OBJ_MIGHTBEDIRTY) != 0) {
1033 int vfslocked;
1034 vp = object->handle;
1035 VM_OBJECT_UNLOCK(object);
1036 (void) vn_start_write(vp, &mp, V_WAIT);
1037 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
1038 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
1039 flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
1040 flags |= invalidate ? OBJPC_INVAL : 0;
1041 VM_OBJECT_LOCK(object);
1042 vm_object_page_clean(object,
1043 OFF_TO_IDX(offset),
1044 OFF_TO_IDX(offset + size + PAGE_MASK),
1045 flags);
1046 VM_OBJECT_UNLOCK(object);
1047 VOP_UNLOCK(vp, 0, curthread);
1048 VFS_UNLOCK_GIANT(vfslocked);
1049 vn_finished_write(mp);
1050 VM_OBJECT_LOCK(object);
1051 }
1052 if ((object->type == OBJT_VNODE ||
1053 object->type == OBJT_DEVICE) && invalidate) {
1054 boolean_t purge;
1055 purge = old_msync || (object->type == OBJT_DEVICE);
1056 vm_object_page_remove(object,
1057 OFF_TO_IDX(offset),
1058 OFF_TO_IDX(offset + size + PAGE_MASK),
1059 purge ? FALSE : TRUE);
1060 }
1061 VM_OBJECT_UNLOCK(object);
1062 }
1063
1064 /*
1065 * vm_object_madvise:
1066 *
1067 * Implements the madvise function at the object/page level.
1068 *
1069 * MADV_WILLNEED (any object)
1070 *
1071 * Activate the specified pages if they are resident.
1072 *
1073 * MADV_DONTNEED (any object)
1074 *
1075 * Deactivate the specified pages if they are resident.
1076 *
1077 * MADV_FREE (OBJT_DEFAULT/OBJT_SWAP objects,
1078 * OBJ_ONEMAPPING only)
1079 *
1080 * Deactivate and clean the specified pages if they are
1081 * resident. This permits the process to reuse the pages
1082 * without faulting or the kernel to reclaim the pages
1083 * without I/O.
1084 */
1085 void
1086 vm_object_madvise(vm_object_t object, vm_pindex_t pindex, int count, int advise)
1087 {
1088 vm_pindex_t end, tpindex;
1089 vm_object_t backing_object, tobject;
1090 vm_page_t m;
1091
1092 if (object == NULL)
1093 return;
1094 VM_OBJECT_LOCK(object);
1095 end = pindex + count;
1096 /*
1097 * Locate and adjust resident pages
1098 */
1099 for (; pindex < end; pindex += 1) {
1100 relookup:
1101 tobject = object;
1102 tpindex = pindex;
1103 shadowlookup:
1104 /*
1105 * MADV_FREE only operates on OBJT_DEFAULT or OBJT_SWAP pages
1106 * and those pages must be OBJ_ONEMAPPING.
1107 */
1108 if (advise == MADV_FREE) {
1109 if ((tobject->type != OBJT_DEFAULT &&
1110 tobject->type != OBJT_SWAP) ||
1111 (tobject->flags & OBJ_ONEMAPPING) == 0) {
1112 goto unlock_tobject;
1113 }
1114 }
1115 m = vm_page_lookup(tobject, tpindex);
1116 if (m == NULL) {
1117 /*
1118 * There may be swap even if there is no backing page
1119 */
1120 if (advise == MADV_FREE && tobject->type == OBJT_SWAP)
1121 swap_pager_freespace(tobject, tpindex, 1);
1122 /*
1123 * next object
1124 */
1125 backing_object = tobject->backing_object;
1126 if (backing_object == NULL)
1127 goto unlock_tobject;
1128 VM_OBJECT_LOCK(backing_object);
1129 tpindex += OFF_TO_IDX(tobject->backing_object_offset);
1130 if (tobject != object)
1131 VM_OBJECT_UNLOCK(tobject);
1132 tobject = backing_object;
1133 goto shadowlookup;
1134 }
1135 /*
1136 * If the page is busy or not in a normal active state,
1137 * we skip it. If the page is not managed there are no
1138 * page queues to mess with. Things can break if we mess
1139 * with pages in any of the below states.
1140 */
1141 vm_page_lock_queues();
1142 if (m->hold_count ||
1143 m->wire_count ||
1144 (m->flags & PG_UNMANAGED) ||
1145 m->valid != VM_PAGE_BITS_ALL) {
1146 vm_page_unlock_queues();
1147 goto unlock_tobject;
1148 }
1149 if ((m->flags & PG_BUSY) || m->busy) {
1150 vm_page_flag_set(m, PG_WANTED | PG_REFERENCED);
1151 if (object != tobject)
1152 VM_OBJECT_UNLOCK(object);
1153 VM_OBJECT_UNLOCK(tobject);
1154 msleep(m, &vm_page_queue_mtx, PDROP | PVM, "madvpo", 0);
1155 VM_OBJECT_LOCK(object);
1156 goto relookup;
1157 }
1158 if (advise == MADV_WILLNEED) {
1159 vm_page_activate(m);
1160 } else if (advise == MADV_DONTNEED) {
1161 vm_page_dontneed(m);
1162 } else if (advise == MADV_FREE) {
1163 /*
1164 * Mark the page clean. This will allow the page
1165 * to be freed up by the system. However, such pages
1166 * are often reused quickly by malloc()/free()
1167 * so we do not do anything that would cause
1168 * a page fault if we can help it.
1169 *
1170 * Specifically, we do not try to actually free
1171 * the page now nor do we try to put it in the
1172 * cache (which would cause a page fault on reuse).
1173 *
1174 * But we do make the page is freeable as we
1175 * can without actually taking the step of unmapping
1176 * it.
1177 */
1178 pmap_clear_modify(m);
1179 m->dirty = 0;
1180 m->act_count = 0;
1181 vm_page_dontneed(m);
1182 }
1183 vm_page_unlock_queues();
1184 if (advise == MADV_FREE && tobject->type == OBJT_SWAP)
1185 swap_pager_freespace(tobject, tpindex, 1);
1186 unlock_tobject:
1187 if (tobject != object)
1188 VM_OBJECT_UNLOCK(tobject);
1189 }
1190 VM_OBJECT_UNLOCK(object);
1191 }
1192
1193 /*
1194 * vm_object_shadow:
1195 *
1196 * Create a new object which is backed by the
1197 * specified existing object range. The source
1198 * object reference is deallocated.
1199 *
1200 * The new object and offset into that object
1201 * are returned in the source parameters.
1202 */
1203 void
1204 vm_object_shadow(
1205 vm_object_t *object, /* IN/OUT */
1206 vm_ooffset_t *offset, /* IN/OUT */
1207 vm_size_t length)
1208 {
1209 vm_object_t source;
1210 vm_object_t result;
1211
1212 source = *object;
1213
1214 /*
1215 * Don't create the new object if the old object isn't shared.
1216 */
1217 if (source != NULL) {
1218 VM_OBJECT_LOCK(source);
1219 if (source->ref_count == 1 &&
1220 source->handle == NULL &&
1221 (source->type == OBJT_DEFAULT ||
1222 source->type == OBJT_SWAP)) {
1223 VM_OBJECT_UNLOCK(source);
1224 return;
1225 }
1226 VM_OBJECT_UNLOCK(source);
1227 }
1228
1229 /*
1230 * Allocate a new object with the given length.
1231 */
1232 result = vm_object_allocate(OBJT_DEFAULT, length);
1233
1234 /*
1235 * The new object shadows the source object, adding a reference to it.
1236 * Our caller changes his reference to point to the new object,
1237 * removing a reference to the source object. Net result: no change
1238 * of reference count.
1239 *
1240 * Try to optimize the result object's page color when shadowing
1241 * in order to maintain page coloring consistency in the combined
1242 * shadowed object.
1243 */
1244 result->backing_object = source;
1245 /*
1246 * Store the offset into the source object, and fix up the offset into
1247 * the new object.
1248 */
1249 result->backing_object_offset = *offset;
1250 if (source != NULL) {
1251 VM_OBJECT_LOCK(source);
1252 LIST_INSERT_HEAD(&source->shadow_head, result, shadow_list);
1253 source->shadow_count++;
1254 source->generation++;
1255 if (length < source->size)
1256 length = source->size;
1257 if (length > PQ_L2_SIZE / 3 + PQ_PRIME1 ||
1258 source->generation > 1)
1259 length = PQ_L2_SIZE / 3 + PQ_PRIME1;
1260 result->pg_color = (source->pg_color +
1261 length * source->generation) & PQ_L2_MASK;
1262 result->flags |= source->flags & OBJ_NEEDGIANT;
1263 VM_OBJECT_UNLOCK(source);
1264 next_index = (result->pg_color + PQ_L2_SIZE / 3 + PQ_PRIME1) &
1265 PQ_L2_MASK;
1266 }
1267
1268
1269 /*
1270 * Return the new things
1271 */
1272 *offset = 0;
1273 *object = result;
1274 }
1275
1276 /*
1277 * vm_object_split:
1278 *
1279 * Split the pages in a map entry into a new object. This affords
1280 * easier removal of unused pages, and keeps object inheritance from
1281 * being a negative impact on memory usage.
1282 */
1283 void
1284 vm_object_split(vm_map_entry_t entry)
1285 {
1286 vm_page_t m;
1287 vm_object_t orig_object, new_object, source;
1288 vm_pindex_t offidxstart, offidxend;
1289 vm_size_t idx, size;
1290
1291 orig_object = entry->object.vm_object;
1292 if (orig_object->type != OBJT_DEFAULT && orig_object->type != OBJT_SWAP)
1293 return;
1294 if (orig_object->ref_count <= 1)
1295 return;
1296 VM_OBJECT_UNLOCK(orig_object);
1297
1298 offidxstart = OFF_TO_IDX(entry->offset);
1299 offidxend = offidxstart + OFF_TO_IDX(entry->end - entry->start);
1300 size = offidxend - offidxstart;
1301
1302 /*
1303 * If swap_pager_copy() is later called, it will convert new_object
1304 * into a swap object.
1305 */
1306 new_object = vm_object_allocate(OBJT_DEFAULT, size);
1307
1308 VM_OBJECT_LOCK(new_object);
1309 VM_OBJECT_LOCK(orig_object);
1310 source = orig_object->backing_object;
1311 if (source != NULL) {
1312 VM_OBJECT_LOCK(source);
1313 LIST_INSERT_HEAD(&source->shadow_head,
1314 new_object, shadow_list);
1315 source->shadow_count++;
1316 source->generation++;
1317 vm_object_reference_locked(source); /* for new_object */
1318 vm_object_clear_flag(source, OBJ_ONEMAPPING);
1319 VM_OBJECT_UNLOCK(source);
1320 new_object->backing_object_offset =
1321 orig_object->backing_object_offset + entry->offset;
1322 new_object->backing_object = source;
1323 }
1324 new_object->flags |= orig_object->flags & OBJ_NEEDGIANT;
1325 vm_page_lock_queues();
1326 for (idx = 0; idx < size; idx++) {
1327 retry:
1328 m = vm_page_lookup(orig_object, offidxstart + idx);
1329 if (m == NULL)
1330 continue;
1331
1332 /*
1333 * We must wait for pending I/O to complete before we can
1334 * rename the page.
1335 *
1336 * We do not have to VM_PROT_NONE the page as mappings should
1337 * not be changed by this operation.
1338 */
1339 if ((m->flags & PG_BUSY) || m->busy) {
1340 vm_page_flag_set(m, PG_WANTED | PG_REFERENCED);
1341 VM_OBJECT_UNLOCK(orig_object);
1342 VM_OBJECT_UNLOCK(new_object);
1343 msleep(m, &vm_page_queue_mtx, PDROP | PVM, "spltwt", 0);
1344 VM_OBJECT_LOCK(new_object);
1345 VM_OBJECT_LOCK(orig_object);
1346 vm_page_lock_queues();
1347 goto retry;
1348 }
1349 vm_page_rename(m, new_object, idx);
1350 /* page automatically made dirty by rename and cache handled */
1351 vm_page_busy(m);
1352 }
1353 vm_page_unlock_queues();
1354 if (orig_object->type == OBJT_SWAP) {
1355 /*
1356 * swap_pager_copy() can sleep, in which case the orig_object's
1357 * and new_object's locks are released and reacquired.
1358 */
1359 swap_pager_copy(orig_object, new_object, offidxstart, 0);
1360 }
1361 VM_OBJECT_UNLOCK(orig_object);
1362 vm_page_lock_queues();
1363 TAILQ_FOREACH(m, &new_object->memq, listq)
1364 vm_page_wakeup(m);
1365 vm_page_unlock_queues();
1366 VM_OBJECT_UNLOCK(new_object);
1367 entry->object.vm_object = new_object;
1368 entry->offset = 0LL;
1369 vm_object_deallocate(orig_object);
1370 VM_OBJECT_LOCK(new_object);
1371 }
1372
1373 #define OBSC_TEST_ALL_SHADOWED 0x0001
1374 #define OBSC_COLLAPSE_NOWAIT 0x0002
1375 #define OBSC_COLLAPSE_WAIT 0x0004
1376
1377 static int
1378 vm_object_backing_scan(vm_object_t object, int op)
1379 {
1380 int r = 1;
1381 vm_page_t p;
1382 vm_object_t backing_object;
1383 vm_pindex_t backing_offset_index;
1384
1385 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1386 VM_OBJECT_LOCK_ASSERT(object->backing_object, MA_OWNED);
1387
1388 backing_object = object->backing_object;
1389 backing_offset_index = OFF_TO_IDX(object->backing_object_offset);
1390
1391 /*
1392 * Initial conditions
1393 */
1394 if (op & OBSC_TEST_ALL_SHADOWED) {
1395 /*
1396 * We do not want to have to test for the existence of
1397 * swap pages in the backing object. XXX but with the
1398 * new swapper this would be pretty easy to do.
1399 *
1400 * XXX what about anonymous MAP_SHARED memory that hasn't
1401 * been ZFOD faulted yet? If we do not test for this, the
1402 * shadow test may succeed! XXX
1403 */
1404 if (backing_object->type != OBJT_DEFAULT) {
1405 return (0);
1406 }
1407 }
1408 if (op & OBSC_COLLAPSE_WAIT) {
1409 vm_object_set_flag(backing_object, OBJ_DEAD);
1410 }
1411
1412 /*
1413 * Our scan
1414 */
1415 p = TAILQ_FIRST(&backing_object->memq);
1416 while (p) {
1417 vm_page_t next = TAILQ_NEXT(p, listq);
1418 vm_pindex_t new_pindex = p->pindex - backing_offset_index;
1419
1420 if (op & OBSC_TEST_ALL_SHADOWED) {
1421 vm_page_t pp;
1422
1423 /*
1424 * Ignore pages outside the parent object's range
1425 * and outside the parent object's mapping of the
1426 * backing object.
1427 *
1428 * note that we do not busy the backing object's
1429 * page.
1430 */
1431 if (
1432 p->pindex < backing_offset_index ||
1433 new_pindex >= object->size
1434 ) {
1435 p = next;
1436 continue;
1437 }
1438
1439 /*
1440 * See if the parent has the page or if the parent's
1441 * object pager has the page. If the parent has the
1442 * page but the page is not valid, the parent's
1443 * object pager must have the page.
1444 *
1445 * If this fails, the parent does not completely shadow
1446 * the object and we might as well give up now.
1447 */
1448
1449 pp = vm_page_lookup(object, new_pindex);
1450 if (
1451 (pp == NULL || pp->valid == 0) &&
1452 !vm_pager_has_page(object, new_pindex, NULL, NULL)
1453 ) {
1454 r = 0;
1455 break;
1456 }
1457 }
1458
1459 /*
1460 * Check for busy page
1461 */
1462 if (op & (OBSC_COLLAPSE_WAIT | OBSC_COLLAPSE_NOWAIT)) {
1463 vm_page_t pp;
1464
1465 if (op & OBSC_COLLAPSE_NOWAIT) {
1466 if ((p->flags & PG_BUSY) ||
1467 !p->valid ||
1468 p->busy) {
1469 p = next;
1470 continue;
1471 }
1472 } else if (op & OBSC_COLLAPSE_WAIT) {
1473 if ((p->flags & PG_BUSY) || p->busy) {
1474 vm_page_lock_queues();
1475 vm_page_flag_set(p,
1476 PG_WANTED | PG_REFERENCED);
1477 VM_OBJECT_UNLOCK(backing_object);
1478 VM_OBJECT_UNLOCK(object);
1479 msleep(p, &vm_page_queue_mtx,
1480 PDROP | PVM, "vmocol", 0);
1481 VM_OBJECT_LOCK(object);
1482 VM_OBJECT_LOCK(backing_object);
1483 /*
1484 * If we slept, anything could have
1485 * happened. Since the object is
1486 * marked dead, the backing offset
1487 * should not have changed so we
1488 * just restart our scan.
1489 */
1490 p = TAILQ_FIRST(&backing_object->memq);
1491 continue;
1492 }
1493 }
1494
1495 KASSERT(
1496 p->object == backing_object,
1497 ("vm_object_backing_scan: object mismatch")
1498 );
1499
1500 /*
1501 * Destroy any associated swap
1502 */
1503 if (backing_object->type == OBJT_SWAP) {
1504 swap_pager_freespace(
1505 backing_object,
1506 p->pindex,
1507 1
1508 );
1509 }
1510
1511 if (
1512 p->pindex < backing_offset_index ||
1513 new_pindex >= object->size
1514 ) {
1515 /*
1516 * Page is out of the parent object's range, we
1517 * can simply destroy it.
1518 */
1519 vm_page_lock_queues();
1520 KASSERT(!pmap_page_is_mapped(p),
1521 ("freeing mapped page %p", p));
1522 if (p->wire_count == 0)
1523 vm_page_free(p);
1524 else
1525 vm_page_remove(p);
1526 vm_page_unlock_queues();
1527 p = next;
1528 continue;
1529 }
1530
1531 pp = vm_page_lookup(object, new_pindex);
1532 if (
1533 pp != NULL ||
1534 vm_pager_has_page(object, new_pindex, NULL, NULL)
1535 ) {
1536 /*
1537 * page already exists in parent OR swap exists
1538 * for this location in the parent. Destroy
1539 * the original page from the backing object.
1540 *
1541 * Leave the parent's page alone
1542 */
1543 vm_page_lock_queues();
1544 KASSERT(!pmap_page_is_mapped(p),
1545 ("freeing mapped page %p", p));
1546 if (p->wire_count == 0)
1547 vm_page_free(p);
1548 else
1549 vm_page_remove(p);
1550 vm_page_unlock_queues();
1551 p = next;
1552 continue;
1553 }
1554
1555 /*
1556 * Page does not exist in parent, rename the
1557 * page from the backing object to the main object.
1558 *
1559 * If the page was mapped to a process, it can remain
1560 * mapped through the rename.
1561 */
1562 vm_page_lock_queues();
1563 vm_page_rename(p, object, new_pindex);
1564 vm_page_unlock_queues();
1565 /* page automatically made dirty by rename */
1566 }
1567 p = next;
1568 }
1569 return (r);
1570 }
1571
1572
1573 /*
1574 * this version of collapse allows the operation to occur earlier and
1575 * when paging_in_progress is true for an object... This is not a complete
1576 * operation, but should plug 99.9% of the rest of the leaks.
1577 */
1578 static void
1579 vm_object_qcollapse(vm_object_t object)
1580 {
1581 vm_object_t backing_object = object->backing_object;
1582
1583 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1584 VM_OBJECT_LOCK_ASSERT(backing_object, MA_OWNED);
1585
1586 if (backing_object->ref_count != 1)
1587 return;
1588
1589 vm_object_backing_scan(object, OBSC_COLLAPSE_NOWAIT);
1590 }
1591
1592 /*
1593 * vm_object_collapse:
1594 *
1595 * Collapse an object with the object backing it.
1596 * Pages in the backing object are moved into the
1597 * parent, and the backing object is deallocated.
1598 */
1599 void
1600 vm_object_collapse(vm_object_t object)
1601 {
1602 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1603
1604 while (TRUE) {
1605 vm_object_t backing_object;
1606
1607 /*
1608 * Verify that the conditions are right for collapse:
1609 *
1610 * The object exists and the backing object exists.
1611 */
1612 if ((backing_object = object->backing_object) == NULL)
1613 break;
1614
1615 /*
1616 * we check the backing object first, because it is most likely
1617 * not collapsable.
1618 */
1619 VM_OBJECT_LOCK(backing_object);
1620 if (backing_object->handle != NULL ||
1621 (backing_object->type != OBJT_DEFAULT &&
1622 backing_object->type != OBJT_SWAP) ||
1623 (backing_object->flags & OBJ_DEAD) ||
1624 object->handle != NULL ||
1625 (object->type != OBJT_DEFAULT &&
1626 object->type != OBJT_SWAP) ||
1627 (object->flags & OBJ_DEAD)) {
1628 VM_OBJECT_UNLOCK(backing_object);
1629 break;
1630 }
1631
1632 if (
1633 object->paging_in_progress != 0 ||
1634 backing_object->paging_in_progress != 0
1635 ) {
1636 vm_object_qcollapse(object);
1637 VM_OBJECT_UNLOCK(backing_object);
1638 break;
1639 }
1640 /*
1641 * We know that we can either collapse the backing object (if
1642 * the parent is the only reference to it) or (perhaps) have
1643 * the parent bypass the object if the parent happens to shadow
1644 * all the resident pages in the entire backing object.
1645 *
1646 * This is ignoring pager-backed pages such as swap pages.
1647 * vm_object_backing_scan fails the shadowing test in this
1648 * case.
1649 */
1650 if (backing_object->ref_count == 1) {
1651 /*
1652 * If there is exactly one reference to the backing
1653 * object, we can collapse it into the parent.
1654 */
1655 vm_object_backing_scan(object, OBSC_COLLAPSE_WAIT);
1656
1657 /*
1658 * Move the pager from backing_object to object.
1659 */
1660 if (backing_object->type == OBJT_SWAP) {
1661 /*
1662 * swap_pager_copy() can sleep, in which case
1663 * the backing_object's and object's locks are
1664 * released and reacquired.
1665 */
1666 swap_pager_copy(
1667 backing_object,
1668 object,
1669 OFF_TO_IDX(object->backing_object_offset), TRUE);
1670 }
1671 /*
1672 * Object now shadows whatever backing_object did.
1673 * Note that the reference to
1674 * backing_object->backing_object moves from within
1675 * backing_object to within object.
1676 */
1677 LIST_REMOVE(object, shadow_list);
1678 backing_object->shadow_count--;
1679 backing_object->generation++;
1680 if (backing_object->backing_object) {
1681 VM_OBJECT_LOCK(backing_object->backing_object);
1682 LIST_REMOVE(backing_object, shadow_list);
1683 LIST_INSERT_HEAD(
1684 &backing_object->backing_object->shadow_head,
1685 object, shadow_list);
1686 /*
1687 * The shadow_count has not changed.
1688 */
1689 backing_object->backing_object->generation++;
1690 VM_OBJECT_UNLOCK(backing_object->backing_object);
1691 }
1692 object->backing_object = backing_object->backing_object;
1693 object->backing_object_offset +=
1694 backing_object->backing_object_offset;
1695
1696 /*
1697 * Discard backing_object.
1698 *
1699 * Since the backing object has no pages, no pager left,
1700 * and no object references within it, all that is
1701 * necessary is to dispose of it.
1702 */
1703 KASSERT(backing_object->ref_count == 1, ("backing_object %p was somehow re-referenced during collapse!", backing_object));
1704 VM_OBJECT_UNLOCK(backing_object);
1705
1706 mtx_lock(&vm_object_list_mtx);
1707 TAILQ_REMOVE(
1708 &vm_object_list,
1709 backing_object,
1710 object_list
1711 );
1712 mtx_unlock(&vm_object_list_mtx);
1713
1714 uma_zfree(obj_zone, backing_object);
1715
1716 object_collapses++;
1717 } else {
1718 vm_object_t new_backing_object;
1719
1720 /*
1721 * If we do not entirely shadow the backing object,
1722 * there is nothing we can do so we give up.
1723 */
1724 if (object->resident_page_count != object->size &&
1725 vm_object_backing_scan(object,
1726 OBSC_TEST_ALL_SHADOWED) == 0) {
1727 VM_OBJECT_UNLOCK(backing_object);
1728 break;
1729 }
1730
1731 /*
1732 * Make the parent shadow the next object in the
1733 * chain. Deallocating backing_object will not remove
1734 * it, since its reference count is at least 2.
1735 */
1736 LIST_REMOVE(object, shadow_list);
1737 backing_object->shadow_count--;
1738 backing_object->generation++;
1739
1740 new_backing_object = backing_object->backing_object;
1741 if ((object->backing_object = new_backing_object) != NULL) {
1742 VM_OBJECT_LOCK(new_backing_object);
1743 LIST_INSERT_HEAD(
1744 &new_backing_object->shadow_head,
1745 object,
1746 shadow_list
1747 );
1748 new_backing_object->shadow_count++;
1749 new_backing_object->generation++;
1750 vm_object_reference_locked(new_backing_object);
1751 VM_OBJECT_UNLOCK(new_backing_object);
1752 object->backing_object_offset +=
1753 backing_object->backing_object_offset;
1754 }
1755
1756 /*
1757 * Drop the reference count on backing_object. Since
1758 * its ref_count was at least 2, it will not vanish.
1759 */
1760 backing_object->ref_count--;
1761 VM_OBJECT_UNLOCK(backing_object);
1762 object_bypasses++;
1763 }
1764
1765 /*
1766 * Try again with this object's new backing object.
1767 */
1768 }
1769 }
1770
1771 /*
1772 * vm_object_page_remove:
1773 *
1774 * Removes all physical pages in the given range from the
1775 * object's list of pages. If the range's end is zero, all
1776 * physical pages from the range's start to the end of the object
1777 * are deleted.
1778 *
1779 * The object must be locked.
1780 */
1781 void
1782 vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
1783 boolean_t clean_only)
1784 {
1785 vm_page_t p, next;
1786
1787 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1788 if (object->resident_page_count == 0)
1789 return;
1790
1791 /*
1792 * Since physically-backed objects do not use managed pages, we can't
1793 * remove pages from the object (we must instead remove the page
1794 * references, and then destroy the object).
1795 */
1796 KASSERT(object->type != OBJT_PHYS,
1797 ("attempt to remove pages from a physical object"));
1798
1799 vm_object_pip_add(object, 1);
1800 again:
1801 vm_page_lock_queues();
1802 if ((p = TAILQ_FIRST(&object->memq)) != NULL) {
1803 if (p->pindex < start) {
1804 p = vm_page_splay(start, object->root);
1805 if ((object->root = p)->pindex < start)
1806 p = TAILQ_NEXT(p, listq);
1807 }
1808 }
1809 /*
1810 * Assert: the variable p is either (1) the page with the
1811 * least pindex greater than or equal to the parameter pindex
1812 * or (2) NULL.
1813 */
1814 for (;
1815 p != NULL && (p->pindex < end || end == 0);
1816 p = next) {
1817 next = TAILQ_NEXT(p, listq);
1818
1819 if (p->wire_count != 0) {
1820 pmap_remove_all(p);
1821 if (!clean_only)
1822 p->valid = 0;
1823 continue;
1824 }
1825 if (vm_page_sleep_if_busy(p, TRUE, "vmopar"))
1826 goto again;
1827 if (clean_only && p->valid) {
1828 pmap_page_protect(p, VM_PROT_READ | VM_PROT_EXECUTE);
1829 if (p->valid & p->dirty)
1830 continue;
1831 }
1832 pmap_remove_all(p);
1833 vm_page_free(p);
1834 }
1835 vm_page_unlock_queues();
1836 vm_object_pip_wakeup(object);
1837 }
1838
1839 /*
1840 * Routine: vm_object_coalesce
1841 * Function: Coalesces two objects backing up adjoining
1842 * regions of memory into a single object.
1843 *
1844 * returns TRUE if objects were combined.
1845 *
1846 * NOTE: Only works at the moment if the second object is NULL -
1847 * if it's not, which object do we lock first?
1848 *
1849 * Parameters:
1850 * prev_object First object to coalesce
1851 * prev_offset Offset into prev_object
1852 * prev_size Size of reference to prev_object
1853 * next_size Size of reference to the second object
1854 *
1855 * Conditions:
1856 * The object must *not* be locked.
1857 */
1858 boolean_t
1859 vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset,
1860 vm_size_t prev_size, vm_size_t next_size)
1861 {
1862 vm_pindex_t next_pindex;
1863
1864 if (prev_object == NULL)
1865 return (TRUE);
1866 VM_OBJECT_LOCK(prev_object);
1867 if (prev_object->type != OBJT_DEFAULT &&
1868 prev_object->type != OBJT_SWAP) {
1869 VM_OBJECT_UNLOCK(prev_object);
1870 return (FALSE);
1871 }
1872
1873 /*
1874 * Try to collapse the object first
1875 */
1876 vm_object_collapse(prev_object);
1877
1878 /*
1879 * Can't coalesce if: . more than one reference . paged out . shadows
1880 * another object . has a copy elsewhere (any of which mean that the
1881 * pages not mapped to prev_entry may be in use anyway)
1882 */
1883 if (prev_object->backing_object != NULL) {
1884 VM_OBJECT_UNLOCK(prev_object);
1885 return (FALSE);
1886 }
1887
1888 prev_size >>= PAGE_SHIFT;
1889 next_size >>= PAGE_SHIFT;
1890 next_pindex = OFF_TO_IDX(prev_offset) + prev_size;
1891
1892 if ((prev_object->ref_count > 1) &&
1893 (prev_object->size != next_pindex)) {
1894 VM_OBJECT_UNLOCK(prev_object);
1895 return (FALSE);
1896 }
1897
1898 /*
1899 * Remove any pages that may still be in the object from a previous
1900 * deallocation.
1901 */
1902 if (next_pindex < prev_object->size) {
1903 vm_object_page_remove(prev_object,
1904 next_pindex,
1905 next_pindex + next_size, FALSE);
1906 if (prev_object->type == OBJT_SWAP)
1907 swap_pager_freespace(prev_object,
1908 next_pindex, next_size);
1909 }
1910
1911 /*
1912 * Extend the object if necessary.
1913 */
1914 if (next_pindex + next_size > prev_object->size)
1915 prev_object->size = next_pindex + next_size;
1916
1917 VM_OBJECT_UNLOCK(prev_object);
1918 return (TRUE);
1919 }
1920
1921 void
1922 vm_object_set_writeable_dirty(vm_object_t object)
1923 {
1924 struct vnode *vp;
1925
1926 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1927 if ((object->flags & (OBJ_MIGHTBEDIRTY|OBJ_WRITEABLE)) ==
1928 (OBJ_MIGHTBEDIRTY|OBJ_WRITEABLE))
1929 return;
1930 vm_object_set_flag(object, OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY);
1931 if (object->type == OBJT_VNODE &&
1932 (vp = (struct vnode *)object->handle) != NULL) {
1933 VI_LOCK(vp);
1934 vp->v_iflag |= VI_OBJDIRTY;
1935 VI_UNLOCK(vp);
1936 }
1937 }
1938
1939 #include "opt_ddb.h"
1940 #ifdef DDB
1941 #include <sys/kernel.h>
1942
1943 #include <sys/cons.h>
1944
1945 #include <ddb/ddb.h>
1946
1947 static int
1948 _vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry)
1949 {
1950 vm_map_t tmpm;
1951 vm_map_entry_t tmpe;
1952 vm_object_t obj;
1953 int entcount;
1954
1955 if (map == 0)
1956 return 0;
1957
1958 if (entry == 0) {
1959 tmpe = map->header.next;
1960 entcount = map->nentries;
1961 while (entcount-- && (tmpe != &map->header)) {
1962 if (_vm_object_in_map(map, object, tmpe)) {
1963 return 1;
1964 }
1965 tmpe = tmpe->next;
1966 }
1967 } else if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
1968 tmpm = entry->object.sub_map;
1969 tmpe = tmpm->header.next;
1970 entcount = tmpm->nentries;
1971 while (entcount-- && tmpe != &tmpm->header) {
1972 if (_vm_object_in_map(tmpm, object, tmpe)) {
1973 return 1;
1974 }
1975 tmpe = tmpe->next;
1976 }
1977 } else if ((obj = entry->object.vm_object) != NULL) {
1978 for (; obj; obj = obj->backing_object)
1979 if (obj == object) {
1980 return 1;
1981 }
1982 }
1983 return 0;
1984 }
1985
1986 static int
1987 vm_object_in_map(vm_object_t object)
1988 {
1989 struct proc *p;
1990
1991 /* sx_slock(&allproc_lock); */
1992 LIST_FOREACH(p, &allproc, p_list) {
1993 if (!p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */)
1994 continue;
1995 if (_vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) {
1996 /* sx_sunlock(&allproc_lock); */
1997 return 1;
1998 }
1999 }
2000 /* sx_sunlock(&allproc_lock); */
2001 if (_vm_object_in_map(kernel_map, object, 0))
2002 return 1;
2003 if (_vm_object_in_map(kmem_map, object, 0))
2004 return 1;
2005 if (_vm_object_in_map(pager_map, object, 0))
2006 return 1;
2007 if (_vm_object_in_map(buffer_map, object, 0))
2008 return 1;
2009 return 0;
2010 }
2011
2012 DB_SHOW_COMMAND(vmochk, vm_object_check)
2013 {
2014 vm_object_t object;
2015
2016 /*
2017 * make sure that internal objs are in a map somewhere
2018 * and none have zero ref counts.
2019 */
2020 TAILQ_FOREACH(object, &vm_object_list, object_list) {
2021 if (object->handle == NULL &&
2022 (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) {
2023 if (object->ref_count == 0) {
2024 db_printf("vmochk: internal obj has zero ref count: %ld\n",
2025 (long)object->size);
2026 }
2027 if (!vm_object_in_map(object)) {
2028 db_printf(
2029 "vmochk: internal obj is not in a map: "
2030 "ref: %d, size: %lu: 0x%lx, backing_object: %p\n",
2031 object->ref_count, (u_long)object->size,
2032 (u_long)object->size,
2033 (void *)object->backing_object);
2034 }
2035 }
2036 }
2037 }
2038
2039 /*
2040 * vm_object_print: [ debug ]
2041 */
2042 DB_SHOW_COMMAND(object, vm_object_print_static)
2043 {
2044 /* XXX convert args. */
2045 vm_object_t object = (vm_object_t)addr;
2046 boolean_t full = have_addr;
2047
2048 vm_page_t p;
2049
2050 /* XXX count is an (unused) arg. Avoid shadowing it. */
2051 #define count was_count
2052
2053 int count;
2054
2055 if (object == NULL)
2056 return;
2057
2058 db_iprintf(
2059 "Object %p: type=%d, size=0x%jx, res=%d, ref=%d, flags=0x%x\n",
2060 object, (int)object->type, (uintmax_t)object->size,
2061 object->resident_page_count, object->ref_count, object->flags);
2062 db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%jx\n",
2063 object->shadow_count,
2064 object->backing_object ? object->backing_object->ref_count : 0,
2065 object->backing_object, (uintmax_t)object->backing_object_offset);
2066
2067 if (!full)
2068 return;
2069
2070 db_indent += 2;
2071 count = 0;
2072 TAILQ_FOREACH(p, &object->memq, listq) {
2073 if (count == 0)
2074 db_iprintf("memory:=");
2075 else if (count == 6) {
2076 db_printf("\n");
2077 db_iprintf(" ...");
2078 count = 0;
2079 } else
2080 db_printf(",");
2081 count++;
2082
2083 db_printf("(off=0x%jx,page=0x%jx)",
2084 (uintmax_t)p->pindex, (uintmax_t)VM_PAGE_TO_PHYS(p));
2085 }
2086 if (count != 0)
2087 db_printf("\n");
2088 db_indent -= 2;
2089 }
2090
2091 /* XXX. */
2092 #undef count
2093
2094 /* XXX need this non-static entry for calling from vm_map_print. */
2095 void
2096 vm_object_print(
2097 /* db_expr_t */ long addr,
2098 boolean_t have_addr,
2099 /* db_expr_t */ long count,
2100 char *modif)
2101 {
2102 vm_object_print_static(addr, have_addr, count, modif);
2103 }
2104
2105 DB_SHOW_COMMAND(vmopag, vm_object_print_pages)
2106 {
2107 vm_object_t object;
2108 int nl = 0;
2109 int c;
2110
2111 TAILQ_FOREACH(object, &vm_object_list, object_list) {
2112 vm_pindex_t idx, fidx;
2113 vm_pindex_t osize;
2114 vm_paddr_t pa = -1, padiff;
2115 int rcount;
2116 vm_page_t m;
2117
2118 db_printf("new object: %p\n", (void *)object);
2119 if (nl > 18) {
2120 c = cngetc();
2121 if (c != ' ')
2122 return;
2123 nl = 0;
2124 }
2125 nl++;
2126 rcount = 0;
2127 fidx = 0;
2128 osize = object->size;
2129 if (osize > 128)
2130 osize = 128;
2131 for (idx = 0; idx < osize; idx++) {
2132 m = vm_page_lookup(object, idx);
2133 if (m == NULL) {
2134 if (rcount) {
2135 db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
2136 (long)fidx, rcount, (long)pa);
2137 if (nl > 18) {
2138 c = cngetc();
2139 if (c != ' ')
2140 return;
2141 nl = 0;
2142 }
2143 nl++;
2144 rcount = 0;
2145 }
2146 continue;
2147 }
2148
2149
2150 if (rcount &&
2151 (VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) {
2152 ++rcount;
2153 continue;
2154 }
2155 if (rcount) {
2156 padiff = pa + rcount * PAGE_SIZE - VM_PAGE_TO_PHYS(m);
2157 padiff >>= PAGE_SHIFT;
2158 padiff &= PQ_L2_MASK;
2159 if (padiff == 0) {
2160 pa = VM_PAGE_TO_PHYS(m) - rcount * PAGE_SIZE;
2161 ++rcount;
2162 continue;
2163 }
2164 db_printf(" index(%ld)run(%d)pa(0x%lx)",
2165 (long)fidx, rcount, (long)pa);
2166 db_printf("pd(%ld)\n", (long)padiff);
2167 if (nl > 18) {
2168 c = cngetc();
2169 if (c != ' ')
2170 return;
2171 nl = 0;
2172 }
2173 nl++;
2174 }
2175 fidx = idx;
2176 pa = VM_PAGE_TO_PHYS(m);
2177 rcount = 1;
2178 }
2179 if (rcount) {
2180 db_printf(" index(%ld)run(%d)pa(0x%lx)\n",
2181 (long)fidx, rcount, (long)pa);
2182 if (nl > 18) {
2183 c = cngetc();
2184 if (c != ' ')
2185 return;
2186 nl = 0;
2187 }
2188 nl++;
2189 }
2190 }
2191 }
2192 #endif /* DDB */
Cache object: eeeacbf7740a93989d93d98305b9f0b0
|