FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_pageout.c
1 /*-
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 * Copyright (c) 1994 John S. Dyson
5 * All rights reserved.
6 * Copyright (c) 1994 David Greenman
7 * All rights reserved.
8 * Copyright (c) 2005 Yahoo! Technologies Norway AS
9 * All rights reserved.
10 *
11 * This code is derived from software contributed to Berkeley by
12 * The Mach Operating System project at Carnegie-Mellon University.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
22 * 3. All advertising materials mentioning features or use of this software
23 * must display the following acknowledgement:
24 * This product includes software developed by the University of
25 * California, Berkeley and its contributors.
26 * 4. Neither the name of the University nor the names of its contributors
27 * may be used to endorse or promote products derived from this software
28 * without specific prior written permission.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 * SUCH DAMAGE.
41 *
42 * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91
43 *
44 *
45 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46 * All rights reserved.
47 *
48 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
49 *
50 * Permission to use, copy, modify and distribute this software and
51 * its documentation is hereby granted, provided that both the copyright
52 * notice and this permission notice appear in all copies of the
53 * software, derivative works or modified versions, and any portions
54 * thereof, and that both notices appear in supporting documentation.
55 *
56 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
57 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
58 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
59 *
60 * Carnegie Mellon requests users of this software to return to
61 *
62 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
63 * School of Computer Science
64 * Carnegie Mellon University
65 * Pittsburgh PA 15213-3890
66 *
67 * any improvements or extensions that they make and grant Carnegie the
68 * rights to redistribute these changes.
69 */
70
71 /*
72 * The proverbial page-out daemon.
73 */
74
75 #include <sys/cdefs.h>
76 __FBSDID("$FreeBSD: releng/9.0/sys/vm/vm_pageout.c 225418 2011-09-06 10:30:11Z kib $");
77
78 #include "opt_vm.h"
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/kernel.h>
82 #include <sys/eventhandler.h>
83 #include <sys/lock.h>
84 #include <sys/mutex.h>
85 #include <sys/proc.h>
86 #include <sys/kthread.h>
87 #include <sys/ktr.h>
88 #include <sys/mount.h>
89 #include <sys/racct.h>
90 #include <sys/resourcevar.h>
91 #include <sys/sched.h>
92 #include <sys/signalvar.h>
93 #include <sys/vnode.h>
94 #include <sys/vmmeter.h>
95 #include <sys/sx.h>
96 #include <sys/sysctl.h>
97
98 #include <vm/vm.h>
99 #include <vm/vm_param.h>
100 #include <vm/vm_object.h>
101 #include <vm/vm_page.h>
102 #include <vm/vm_map.h>
103 #include <vm/vm_pageout.h>
104 #include <vm/vm_pager.h>
105 #include <vm/swap_pager.h>
106 #include <vm/vm_extern.h>
107 #include <vm/uma.h>
108
109 /*
110 * System initialization
111 */
112
113 /* the kernel process "vm_pageout"*/
114 static void vm_pageout(void);
115 static int vm_pageout_clean(vm_page_t);
116 static void vm_pageout_scan(int pass);
117
118 struct proc *pageproc;
119
120 static struct kproc_desc page_kp = {
121 "pagedaemon",
122 vm_pageout,
123 &pageproc
124 };
125 SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, kproc_start,
126 &page_kp);
127
128 #if !defined(NO_SWAPPING)
129 /* the kernel process "vm_daemon"*/
130 static void vm_daemon(void);
131 static struct proc *vmproc;
132
133 static struct kproc_desc vm_kp = {
134 "vmdaemon",
135 vm_daemon,
136 &vmproc
137 };
138 SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp);
139 #endif
140
141
142 int vm_pages_needed; /* Event on which pageout daemon sleeps */
143 int vm_pageout_deficit; /* Estimated number of pages deficit */
144 int vm_pageout_pages_needed; /* flag saying that the pageout daemon needs pages */
145
146 #if !defined(NO_SWAPPING)
147 static int vm_pageout_req_swapout; /* XXX */
148 static int vm_daemon_needed;
149 static struct mtx vm_daemon_mtx;
150 /* Allow for use by vm_pageout before vm_daemon is initialized. */
151 MTX_SYSINIT(vm_daemon, &vm_daemon_mtx, "vm daemon", MTX_DEF);
152 #endif
153 static int vm_max_launder = 32;
154 static int vm_pageout_stats_max=0, vm_pageout_stats_interval = 0;
155 static int vm_pageout_full_stats_interval = 0;
156 static int vm_pageout_algorithm=0;
157 static int defer_swap_pageouts=0;
158 static int disable_swap_pageouts=0;
159
160 #if defined(NO_SWAPPING)
161 static int vm_swap_enabled=0;
162 static int vm_swap_idle_enabled=0;
163 #else
164 static int vm_swap_enabled=1;
165 static int vm_swap_idle_enabled=0;
166 #endif
167
168 SYSCTL_INT(_vm, VM_PAGEOUT_ALGORITHM, pageout_algorithm,
169 CTLFLAG_RW, &vm_pageout_algorithm, 0, "LRU page mgmt");
170
171 SYSCTL_INT(_vm, OID_AUTO, max_launder,
172 CTLFLAG_RW, &vm_max_launder, 0, "Limit dirty flushes in pageout");
173
174 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_max,
175 CTLFLAG_RW, &vm_pageout_stats_max, 0, "Max pageout stats scan length");
176
177 SYSCTL_INT(_vm, OID_AUTO, pageout_full_stats_interval,
178 CTLFLAG_RW, &vm_pageout_full_stats_interval, 0, "Interval for full stats scan");
179
180 SYSCTL_INT(_vm, OID_AUTO, pageout_stats_interval,
181 CTLFLAG_RW, &vm_pageout_stats_interval, 0, "Interval for partial stats scan");
182
183 #if defined(NO_SWAPPING)
184 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
185 CTLFLAG_RD, &vm_swap_enabled, 0, "Enable entire process swapout");
186 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
187 CTLFLAG_RD, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria");
188 #else
189 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled,
190 CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout");
191 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled,
192 CTLFLAG_RW, &vm_swap_idle_enabled, 0, "Allow swapout on idle criteria");
193 #endif
194
195 SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts,
196 CTLFLAG_RW, &defer_swap_pageouts, 0, "Give preference to dirty pages in mem");
197
198 SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
199 CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages");
200
201 static int pageout_lock_miss;
202 SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss,
203 CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout");
204
205 #define VM_PAGEOUT_PAGE_COUNT 16
206 int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT;
207
208 int vm_page_max_wired; /* XXX max # of wired pages system-wide */
209 SYSCTL_INT(_vm, OID_AUTO, max_wired,
210 CTLFLAG_RW, &vm_page_max_wired, 0, "System-wide limit to wired page count");
211
212 #if !defined(NO_SWAPPING)
213 static void vm_pageout_map_deactivate_pages(vm_map_t, long);
214 static void vm_pageout_object_deactivate_pages(pmap_t, vm_object_t, long);
215 static void vm_req_vmdaemon(int req);
216 #endif
217 static void vm_pageout_page_stats(void);
218
219 /*
220 * Initialize a dummy page for marking the caller's place in the specified
221 * paging queue. In principle, this function only needs to set the flag
222 * PG_MARKER. Nonetheless, it sets the flag VPO_BUSY and initializes the hold
223 * count to one as safety precautions.
224 */
225 static void
226 vm_pageout_init_marker(vm_page_t marker, u_short queue)
227 {
228
229 bzero(marker, sizeof(*marker));
230 marker->flags = PG_MARKER;
231 marker->oflags = VPO_BUSY;
232 marker->queue = queue;
233 marker->hold_count = 1;
234 }
235
236 /*
237 * vm_pageout_fallback_object_lock:
238 *
239 * Lock vm object currently associated with `m'. VM_OBJECT_TRYLOCK is
240 * known to have failed and page queue must be either PQ_ACTIVE or
241 * PQ_INACTIVE. To avoid lock order violation, unlock the page queues
242 * while locking the vm object. Use marker page to detect page queue
243 * changes and maintain notion of next page on page queue. Return
244 * TRUE if no changes were detected, FALSE otherwise. vm object is
245 * locked on return.
246 *
247 * This function depends on both the lock portion of struct vm_object
248 * and normal struct vm_page being type stable.
249 */
250 boolean_t
251 vm_pageout_fallback_object_lock(vm_page_t m, vm_page_t *next)
252 {
253 struct vm_page marker;
254 boolean_t unchanged;
255 u_short queue;
256 vm_object_t object;
257
258 queue = m->queue;
259 vm_pageout_init_marker(&marker, queue);
260 object = m->object;
261
262 TAILQ_INSERT_AFTER(&vm_page_queues[queue].pl,
263 m, &marker, pageq);
264 vm_page_unlock_queues();
265 vm_page_unlock(m);
266 VM_OBJECT_LOCK(object);
267 vm_page_lock(m);
268 vm_page_lock_queues();
269
270 /* Page queue might have changed. */
271 *next = TAILQ_NEXT(&marker, pageq);
272 unchanged = (m->queue == queue &&
273 m->object == object &&
274 &marker == TAILQ_NEXT(m, pageq));
275 TAILQ_REMOVE(&vm_page_queues[queue].pl,
276 &marker, pageq);
277 return (unchanged);
278 }
279
280 /*
281 * Lock the page while holding the page queue lock. Use marker page
282 * to detect page queue changes and maintain notion of next page on
283 * page queue. Return TRUE if no changes were detected, FALSE
284 * otherwise. The page is locked on return. The page queue lock might
285 * be dropped and reacquired.
286 *
287 * This function depends on normal struct vm_page being type stable.
288 */
289 boolean_t
290 vm_pageout_page_lock(vm_page_t m, vm_page_t *next)
291 {
292 struct vm_page marker;
293 boolean_t unchanged;
294 u_short queue;
295
296 vm_page_lock_assert(m, MA_NOTOWNED);
297 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
298
299 if (vm_page_trylock(m))
300 return (TRUE);
301
302 queue = m->queue;
303 vm_pageout_init_marker(&marker, queue);
304
305 TAILQ_INSERT_AFTER(&vm_page_queues[queue].pl, m, &marker, pageq);
306 vm_page_unlock_queues();
307 vm_page_lock(m);
308 vm_page_lock_queues();
309
310 /* Page queue might have changed. */
311 *next = TAILQ_NEXT(&marker, pageq);
312 unchanged = (m->queue == queue && &marker == TAILQ_NEXT(m, pageq));
313 TAILQ_REMOVE(&vm_page_queues[queue].pl, &marker, pageq);
314 return (unchanged);
315 }
316
317 /*
318 * vm_pageout_clean:
319 *
320 * Clean the page and remove it from the laundry.
321 *
322 * We set the busy bit to cause potential page faults on this page to
323 * block. Note the careful timing, however, the busy bit isn't set till
324 * late and we cannot do anything that will mess with the page.
325 */
326 static int
327 vm_pageout_clean(vm_page_t m)
328 {
329 vm_object_t object;
330 vm_page_t mc[2*vm_pageout_page_count], pb, ps;
331 int pageout_count;
332 int ib, is, page_base;
333 vm_pindex_t pindex = m->pindex;
334
335 vm_page_lock_assert(m, MA_OWNED);
336 object = m->object;
337 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
338
339 /*
340 * It doesn't cost us anything to pageout OBJT_DEFAULT or OBJT_SWAP
341 * with the new swapper, but we could have serious problems paging
342 * out other object types if there is insufficient memory.
343 *
344 * Unfortunately, checking free memory here is far too late, so the
345 * check has been moved up a procedural level.
346 */
347
348 /*
349 * Can't clean the page if it's busy or held.
350 */
351 KASSERT(m->busy == 0 && (m->oflags & VPO_BUSY) == 0,
352 ("vm_pageout_clean: page %p is busy", m));
353 KASSERT(m->hold_count == 0, ("vm_pageout_clean: page %p is held", m));
354 vm_page_unlock(m);
355
356 mc[vm_pageout_page_count] = pb = ps = m;
357 pageout_count = 1;
358 page_base = vm_pageout_page_count;
359 ib = 1;
360 is = 1;
361
362 /*
363 * Scan object for clusterable pages.
364 *
365 * We can cluster ONLY if: ->> the page is NOT
366 * clean, wired, busy, held, or mapped into a
367 * buffer, and one of the following:
368 * 1) The page is inactive, or a seldom used
369 * active page.
370 * -or-
371 * 2) we force the issue.
372 *
373 * During heavy mmap/modification loads the pageout
374 * daemon can really fragment the underlying file
375 * due to flushing pages out of order and not trying
376 * align the clusters (which leave sporatic out-of-order
377 * holes). To solve this problem we do the reverse scan
378 * first and attempt to align our cluster, then do a
379 * forward scan if room remains.
380 */
381 more:
382 while (ib && pageout_count < vm_pageout_page_count) {
383 vm_page_t p;
384
385 if (ib > pindex) {
386 ib = 0;
387 break;
388 }
389
390 if ((p = vm_page_prev(pb)) == NULL ||
391 (p->oflags & VPO_BUSY) != 0 || p->busy != 0) {
392 ib = 0;
393 break;
394 }
395 vm_page_lock(p);
396 vm_page_test_dirty(p);
397 if (p->dirty == 0 ||
398 p->queue != PQ_INACTIVE ||
399 p->hold_count != 0) { /* may be undergoing I/O */
400 vm_page_unlock(p);
401 ib = 0;
402 break;
403 }
404 vm_page_unlock(p);
405 mc[--page_base] = pb = p;
406 ++pageout_count;
407 ++ib;
408 /*
409 * alignment boundry, stop here and switch directions. Do
410 * not clear ib.
411 */
412 if ((pindex - (ib - 1)) % vm_pageout_page_count == 0)
413 break;
414 }
415
416 while (pageout_count < vm_pageout_page_count &&
417 pindex + is < object->size) {
418 vm_page_t p;
419
420 if ((p = vm_page_next(ps)) == NULL ||
421 (p->oflags & VPO_BUSY) != 0 || p->busy != 0)
422 break;
423 vm_page_lock(p);
424 vm_page_test_dirty(p);
425 if (p->dirty == 0 ||
426 p->queue != PQ_INACTIVE ||
427 p->hold_count != 0) { /* may be undergoing I/O */
428 vm_page_unlock(p);
429 break;
430 }
431 vm_page_unlock(p);
432 mc[page_base + pageout_count] = ps = p;
433 ++pageout_count;
434 ++is;
435 }
436
437 /*
438 * If we exhausted our forward scan, continue with the reverse scan
439 * when possible, even past a page boundry. This catches boundry
440 * conditions.
441 */
442 if (ib && pageout_count < vm_pageout_page_count)
443 goto more;
444
445 /*
446 * we allow reads during pageouts...
447 */
448 return (vm_pageout_flush(&mc[page_base], pageout_count, 0, 0, NULL));
449 }
450
451 /*
452 * vm_pageout_flush() - launder the given pages
453 *
454 * The given pages are laundered. Note that we setup for the start of
455 * I/O ( i.e. busy the page ), mark it read-only, and bump the object
456 * reference count all in here rather then in the parent. If we want
457 * the parent to do more sophisticated things we may have to change
458 * the ordering.
459 *
460 * Returned runlen is the count of pages between mreq and first
461 * page after mreq with status VM_PAGER_AGAIN.
462 */
463 int
464 vm_pageout_flush(vm_page_t *mc, int count, int flags, int mreq, int *prunlen)
465 {
466 vm_object_t object = mc[0]->object;
467 int pageout_status[count];
468 int numpagedout = 0;
469 int i, runlen;
470
471 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
472 mtx_assert(&vm_page_queue_mtx, MA_NOTOWNED);
473
474 /*
475 * Initiate I/O. Bump the vm_page_t->busy counter and
476 * mark the pages read-only.
477 *
478 * We do not have to fixup the clean/dirty bits here... we can
479 * allow the pager to do it after the I/O completes.
480 *
481 * NOTE! mc[i]->dirty may be partial or fragmented due to an
482 * edge case with file fragments.
483 */
484 for (i = 0; i < count; i++) {
485 KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL,
486 ("vm_pageout_flush: partially invalid page %p index %d/%d",
487 mc[i], i, count));
488 vm_page_io_start(mc[i]);
489 pmap_remove_write(mc[i]);
490 }
491 vm_object_pip_add(object, count);
492
493 vm_pager_put_pages(object, mc, count, flags, pageout_status);
494
495 runlen = count - mreq;
496 for (i = 0; i < count; i++) {
497 vm_page_t mt = mc[i];
498
499 KASSERT(pageout_status[i] == VM_PAGER_PEND ||
500 (mt->aflags & PGA_WRITEABLE) == 0,
501 ("vm_pageout_flush: page %p is not write protected", mt));
502 switch (pageout_status[i]) {
503 case VM_PAGER_OK:
504 case VM_PAGER_PEND:
505 numpagedout++;
506 break;
507 case VM_PAGER_BAD:
508 /*
509 * Page outside of range of object. Right now we
510 * essentially lose the changes by pretending it
511 * worked.
512 */
513 vm_page_undirty(mt);
514 break;
515 case VM_PAGER_ERROR:
516 case VM_PAGER_FAIL:
517 /*
518 * If page couldn't be paged out, then reactivate the
519 * page so it doesn't clog the inactive list. (We
520 * will try paging out it again later).
521 */
522 vm_page_lock(mt);
523 vm_page_activate(mt);
524 vm_page_unlock(mt);
525 break;
526 case VM_PAGER_AGAIN:
527 if (i >= mreq && i - mreq < runlen)
528 runlen = i - mreq;
529 break;
530 }
531
532 /*
533 * If the operation is still going, leave the page busy to
534 * block all other accesses. Also, leave the paging in
535 * progress indicator set so that we don't attempt an object
536 * collapse.
537 */
538 if (pageout_status[i] != VM_PAGER_PEND) {
539 vm_object_pip_wakeup(object);
540 vm_page_io_finish(mt);
541 if (vm_page_count_severe()) {
542 vm_page_lock(mt);
543 vm_page_try_to_cache(mt);
544 vm_page_unlock(mt);
545 }
546 }
547 }
548 if (prunlen != NULL)
549 *prunlen = runlen;
550 return (numpagedout);
551 }
552
553 #if !defined(NO_SWAPPING)
554 /*
555 * vm_pageout_object_deactivate_pages
556 *
557 * Deactivate enough pages to satisfy the inactive target
558 * requirements.
559 *
560 * The object and map must be locked.
561 */
562 static void
563 vm_pageout_object_deactivate_pages(pmap_t pmap, vm_object_t first_object,
564 long desired)
565 {
566 vm_object_t backing_object, object;
567 vm_page_t p;
568 int actcount, remove_mode;
569
570 VM_OBJECT_LOCK_ASSERT(first_object, MA_OWNED);
571 if (first_object->type == OBJT_DEVICE ||
572 first_object->type == OBJT_SG)
573 return;
574 for (object = first_object;; object = backing_object) {
575 if (pmap_resident_count(pmap) <= desired)
576 goto unlock_return;
577 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
578 if (object->type == OBJT_PHYS || object->paging_in_progress)
579 goto unlock_return;
580
581 remove_mode = 0;
582 if (object->shadow_count > 1)
583 remove_mode = 1;
584 /*
585 * Scan the object's entire memory queue.
586 */
587 TAILQ_FOREACH(p, &object->memq, listq) {
588 if (pmap_resident_count(pmap) <= desired)
589 goto unlock_return;
590 if ((p->oflags & VPO_BUSY) != 0 || p->busy != 0)
591 continue;
592 PCPU_INC(cnt.v_pdpages);
593 vm_page_lock(p);
594 if (p->wire_count != 0 || p->hold_count != 0 ||
595 !pmap_page_exists_quick(pmap, p)) {
596 vm_page_unlock(p);
597 continue;
598 }
599 actcount = pmap_ts_referenced(p);
600 if ((p->aflags & PGA_REFERENCED) != 0) {
601 if (actcount == 0)
602 actcount = 1;
603 vm_page_aflag_clear(p, PGA_REFERENCED);
604 }
605 if (p->queue != PQ_ACTIVE && actcount != 0) {
606 vm_page_activate(p);
607 p->act_count += actcount;
608 } else if (p->queue == PQ_ACTIVE) {
609 if (actcount == 0) {
610 p->act_count -= min(p->act_count,
611 ACT_DECLINE);
612 if (!remove_mode &&
613 (vm_pageout_algorithm ||
614 p->act_count == 0)) {
615 pmap_remove_all(p);
616 vm_page_deactivate(p);
617 } else {
618 vm_page_lock_queues();
619 vm_page_requeue(p);
620 vm_page_unlock_queues();
621 }
622 } else {
623 vm_page_activate(p);
624 if (p->act_count < ACT_MAX -
625 ACT_ADVANCE)
626 p->act_count += ACT_ADVANCE;
627 vm_page_lock_queues();
628 vm_page_requeue(p);
629 vm_page_unlock_queues();
630 }
631 } else if (p->queue == PQ_INACTIVE)
632 pmap_remove_all(p);
633 vm_page_unlock(p);
634 }
635 if ((backing_object = object->backing_object) == NULL)
636 goto unlock_return;
637 VM_OBJECT_LOCK(backing_object);
638 if (object != first_object)
639 VM_OBJECT_UNLOCK(object);
640 }
641 unlock_return:
642 if (object != first_object)
643 VM_OBJECT_UNLOCK(object);
644 }
645
646 /*
647 * deactivate some number of pages in a map, try to do it fairly, but
648 * that is really hard to do.
649 */
650 static void
651 vm_pageout_map_deactivate_pages(map, desired)
652 vm_map_t map;
653 long desired;
654 {
655 vm_map_entry_t tmpe;
656 vm_object_t obj, bigobj;
657 int nothingwired;
658
659 if (!vm_map_trylock(map))
660 return;
661
662 bigobj = NULL;
663 nothingwired = TRUE;
664
665 /*
666 * first, search out the biggest object, and try to free pages from
667 * that.
668 */
669 tmpe = map->header.next;
670 while (tmpe != &map->header) {
671 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
672 obj = tmpe->object.vm_object;
673 if (obj != NULL && VM_OBJECT_TRYLOCK(obj)) {
674 if (obj->shadow_count <= 1 &&
675 (bigobj == NULL ||
676 bigobj->resident_page_count < obj->resident_page_count)) {
677 if (bigobj != NULL)
678 VM_OBJECT_UNLOCK(bigobj);
679 bigobj = obj;
680 } else
681 VM_OBJECT_UNLOCK(obj);
682 }
683 }
684 if (tmpe->wired_count > 0)
685 nothingwired = FALSE;
686 tmpe = tmpe->next;
687 }
688
689 if (bigobj != NULL) {
690 vm_pageout_object_deactivate_pages(map->pmap, bigobj, desired);
691 VM_OBJECT_UNLOCK(bigobj);
692 }
693 /*
694 * Next, hunt around for other pages to deactivate. We actually
695 * do this search sort of wrong -- .text first is not the best idea.
696 */
697 tmpe = map->header.next;
698 while (tmpe != &map->header) {
699 if (pmap_resident_count(vm_map_pmap(map)) <= desired)
700 break;
701 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
702 obj = tmpe->object.vm_object;
703 if (obj != NULL) {
704 VM_OBJECT_LOCK(obj);
705 vm_pageout_object_deactivate_pages(map->pmap, obj, desired);
706 VM_OBJECT_UNLOCK(obj);
707 }
708 }
709 tmpe = tmpe->next;
710 }
711
712 /*
713 * Remove all mappings if a process is swapped out, this will free page
714 * table pages.
715 */
716 if (desired == 0 && nothingwired) {
717 tmpe = map->header.next;
718 while (tmpe != &map->header) {
719 pmap_remove(vm_map_pmap(map), tmpe->start, tmpe->end);
720 tmpe = tmpe->next;
721 }
722 }
723 vm_map_unlock(map);
724 }
725 #endif /* !defined(NO_SWAPPING) */
726
727 /*
728 * vm_pageout_scan does the dirty work for the pageout daemon.
729 */
730 static void
731 vm_pageout_scan(int pass)
732 {
733 vm_page_t m, next;
734 struct vm_page marker;
735 int page_shortage, maxscan, pcount;
736 int addl_page_shortage, addl_page_shortage_init;
737 vm_object_t object;
738 int actcount;
739 int vnodes_skipped = 0;
740 int maxlaunder;
741
742 /*
743 * Decrease registered cache sizes.
744 */
745 EVENTHANDLER_INVOKE(vm_lowmem, 0);
746 /*
747 * We do this explicitly after the caches have been drained above.
748 */
749 uma_reclaim();
750
751 addl_page_shortage_init = atomic_readandclear_int(&vm_pageout_deficit);
752
753 /*
754 * Calculate the number of pages we want to either free or move
755 * to the cache.
756 */
757 page_shortage = vm_paging_target() + addl_page_shortage_init;
758
759 vm_pageout_init_marker(&marker, PQ_INACTIVE);
760
761 /*
762 * Start scanning the inactive queue for pages we can move to the
763 * cache or free. The scan will stop when the target is reached or
764 * we have scanned the entire inactive queue. Note that m->act_count
765 * is not used to form decisions for the inactive queue, only for the
766 * active queue.
767 *
768 * maxlaunder limits the number of dirty pages we flush per scan.
769 * For most systems a smaller value (16 or 32) is more robust under
770 * extreme memory and disk pressure because any unnecessary writes
771 * to disk can result in extreme performance degredation. However,
772 * systems with excessive dirty pages (especially when MAP_NOSYNC is
773 * used) will die horribly with limited laundering. If the pageout
774 * daemon cannot clean enough pages in the first pass, we let it go
775 * all out in succeeding passes.
776 */
777 if ((maxlaunder = vm_max_launder) <= 1)
778 maxlaunder = 1;
779 if (pass)
780 maxlaunder = 10000;
781 vm_page_lock_queues();
782 rescan0:
783 addl_page_shortage = addl_page_shortage_init;
784 maxscan = cnt.v_inactive_count;
785
786 for (m = TAILQ_FIRST(&vm_page_queues[PQ_INACTIVE].pl);
787 m != NULL && maxscan-- > 0 && page_shortage > 0;
788 m = next) {
789
790 cnt.v_pdpages++;
791
792 if (m->queue != PQ_INACTIVE)
793 goto rescan0;
794
795 next = TAILQ_NEXT(m, pageq);
796
797 /*
798 * skip marker pages
799 */
800 if (m->flags & PG_MARKER)
801 continue;
802
803 /*
804 * Lock the page.
805 */
806 if (!vm_pageout_page_lock(m, &next)) {
807 vm_page_unlock(m);
808 addl_page_shortage++;
809 continue;
810 }
811
812 /*
813 * A held page may be undergoing I/O, so skip it.
814 */
815 if (m->hold_count) {
816 vm_page_unlock(m);
817 vm_page_requeue(m);
818 addl_page_shortage++;
819 continue;
820 }
821
822 /*
823 * Don't mess with busy pages, keep in the front of the
824 * queue, most likely are being paged out.
825 */
826 object = m->object;
827 if (!VM_OBJECT_TRYLOCK(object) &&
828 (!vm_pageout_fallback_object_lock(m, &next) ||
829 m->hold_count != 0)) {
830 VM_OBJECT_UNLOCK(object);
831 vm_page_unlock(m);
832 addl_page_shortage++;
833 continue;
834 }
835 if (m->busy || (m->oflags & VPO_BUSY)) {
836 vm_page_unlock(m);
837 VM_OBJECT_UNLOCK(object);
838 addl_page_shortage++;
839 continue;
840 }
841
842 /*
843 * If the object is not being used, we ignore previous
844 * references.
845 */
846 if (object->ref_count == 0) {
847 vm_page_aflag_clear(m, PGA_REFERENCED);
848 KASSERT(!pmap_page_is_mapped(m),
849 ("vm_pageout_scan: page %p is mapped", m));
850
851 /*
852 * Otherwise, if the page has been referenced while in the
853 * inactive queue, we bump the "activation count" upwards,
854 * making it less likely that the page will be added back to
855 * the inactive queue prematurely again. Here we check the
856 * page tables (or emulated bits, if any), given the upper
857 * level VM system not knowing anything about existing
858 * references.
859 */
860 } else if (((m->aflags & PGA_REFERENCED) == 0) &&
861 (actcount = pmap_ts_referenced(m))) {
862 vm_page_activate(m);
863 vm_page_unlock(m);
864 m->act_count += actcount + ACT_ADVANCE;
865 VM_OBJECT_UNLOCK(object);
866 continue;
867 }
868
869 /*
870 * If the upper level VM system knows about any page
871 * references, we activate the page. We also set the
872 * "activation count" higher than normal so that we will less
873 * likely place pages back onto the inactive queue again.
874 */
875 if ((m->aflags & PGA_REFERENCED) != 0) {
876 vm_page_aflag_clear(m, PGA_REFERENCED);
877 actcount = pmap_ts_referenced(m);
878 vm_page_activate(m);
879 vm_page_unlock(m);
880 m->act_count += actcount + ACT_ADVANCE + 1;
881 VM_OBJECT_UNLOCK(object);
882 continue;
883 }
884
885 /*
886 * If the upper level VM system does not believe that the page
887 * is fully dirty, but it is mapped for write access, then we
888 * consult the pmap to see if the page's dirty status should
889 * be updated.
890 */
891 if (m->dirty != VM_PAGE_BITS_ALL &&
892 (m->aflags & PGA_WRITEABLE) != 0) {
893 /*
894 * Avoid a race condition: Unless write access is
895 * removed from the page, another processor could
896 * modify it before all access is removed by the call
897 * to vm_page_cache() below. If vm_page_cache() finds
898 * that the page has been modified when it removes all
899 * access, it panics because it cannot cache dirty
900 * pages. In principle, we could eliminate just write
901 * access here rather than all access. In the expected
902 * case, when there are no last instant modifications
903 * to the page, removing all access will be cheaper
904 * overall.
905 */
906 if (pmap_is_modified(m))
907 vm_page_dirty(m);
908 else if (m->dirty == 0)
909 pmap_remove_all(m);
910 }
911
912 if (m->valid == 0) {
913 /*
914 * Invalid pages can be easily freed
915 */
916 vm_page_free(m);
917 cnt.v_dfree++;
918 --page_shortage;
919 } else if (m->dirty == 0) {
920 /*
921 * Clean pages can be placed onto the cache queue.
922 * This effectively frees them.
923 */
924 vm_page_cache(m);
925 --page_shortage;
926 } else if ((m->flags & PG_WINATCFLS) == 0 && pass == 0) {
927 /*
928 * Dirty pages need to be paged out, but flushing
929 * a page is extremely expensive verses freeing
930 * a clean page. Rather then artificially limiting
931 * the number of pages we can flush, we instead give
932 * dirty pages extra priority on the inactive queue
933 * by forcing them to be cycled through the queue
934 * twice before being flushed, after which the
935 * (now clean) page will cycle through once more
936 * before being freed. This significantly extends
937 * the thrash point for a heavily loaded machine.
938 */
939 m->flags |= PG_WINATCFLS;
940 vm_page_requeue(m);
941 } else if (maxlaunder > 0) {
942 /*
943 * We always want to try to flush some dirty pages if
944 * we encounter them, to keep the system stable.
945 * Normally this number is small, but under extreme
946 * pressure where there are insufficient clean pages
947 * on the inactive queue, we may have to go all out.
948 */
949 int swap_pageouts_ok, vfslocked = 0;
950 struct vnode *vp = NULL;
951 struct mount *mp = NULL;
952
953 if ((object->type != OBJT_SWAP) && (object->type != OBJT_DEFAULT)) {
954 swap_pageouts_ok = 1;
955 } else {
956 swap_pageouts_ok = !(defer_swap_pageouts || disable_swap_pageouts);
957 swap_pageouts_ok |= (!disable_swap_pageouts && defer_swap_pageouts &&
958 vm_page_count_min());
959
960 }
961
962 /*
963 * We don't bother paging objects that are "dead".
964 * Those objects are in a "rundown" state.
965 */
966 if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) {
967 vm_page_unlock(m);
968 VM_OBJECT_UNLOCK(object);
969 vm_page_requeue(m);
970 continue;
971 }
972
973 /*
974 * Following operations may unlock
975 * vm_page_queue_mtx, invalidating the 'next'
976 * pointer. To prevent an inordinate number
977 * of restarts we use our marker to remember
978 * our place.
979 *
980 */
981 TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE].pl,
982 m, &marker, pageq);
983 /*
984 * The object is already known NOT to be dead. It
985 * is possible for the vget() to block the whole
986 * pageout daemon, but the new low-memory handling
987 * code should prevent it.
988 *
989 * The previous code skipped locked vnodes and, worse,
990 * reordered pages in the queue. This results in
991 * completely non-deterministic operation and, on a
992 * busy system, can lead to extremely non-optimal
993 * pageouts. For example, it can cause clean pages
994 * to be freed and dirty pages to be moved to the end
995 * of the queue. Since dirty pages are also moved to
996 * the end of the queue once-cleaned, this gives
997 * way too large a weighting to defering the freeing
998 * of dirty pages.
999 *
1000 * We can't wait forever for the vnode lock, we might
1001 * deadlock due to a vn_read() getting stuck in
1002 * vm_wait while holding this vnode. We skip the
1003 * vnode if we can't get it in a reasonable amount
1004 * of time.
1005 */
1006 if (object->type == OBJT_VNODE) {
1007 vm_page_unlock_queues();
1008 vm_page_unlock(m);
1009 vp = object->handle;
1010 if (vp->v_type == VREG &&
1011 vn_start_write(vp, &mp, V_NOWAIT) != 0) {
1012 mp = NULL;
1013 ++pageout_lock_miss;
1014 if (object->flags & OBJ_MIGHTBEDIRTY)
1015 vnodes_skipped++;
1016 vm_page_lock_queues();
1017 goto unlock_and_continue;
1018 }
1019 KASSERT(mp != NULL,
1020 ("vp %p with NULL v_mount", vp));
1021 vm_object_reference_locked(object);
1022 VM_OBJECT_UNLOCK(object);
1023 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
1024 if (vget(vp, LK_EXCLUSIVE | LK_TIMELOCK,
1025 curthread)) {
1026 VM_OBJECT_LOCK(object);
1027 vm_page_lock_queues();
1028 ++pageout_lock_miss;
1029 if (object->flags & OBJ_MIGHTBEDIRTY)
1030 vnodes_skipped++;
1031 vp = NULL;
1032 goto unlock_and_continue;
1033 }
1034 VM_OBJECT_LOCK(object);
1035 vm_page_lock(m);
1036 vm_page_lock_queues();
1037 /*
1038 * The page might have been moved to another
1039 * queue during potential blocking in vget()
1040 * above. The page might have been freed and
1041 * reused for another vnode.
1042 */
1043 if (m->queue != PQ_INACTIVE ||
1044 m->object != object ||
1045 TAILQ_NEXT(m, pageq) != &marker) {
1046 vm_page_unlock(m);
1047 if (object->flags & OBJ_MIGHTBEDIRTY)
1048 vnodes_skipped++;
1049 goto unlock_and_continue;
1050 }
1051
1052 /*
1053 * The page may have been busied during the
1054 * blocking in vget(). We don't move the
1055 * page back onto the end of the queue so that
1056 * statistics are more correct if we don't.
1057 */
1058 if (m->busy || (m->oflags & VPO_BUSY)) {
1059 vm_page_unlock(m);
1060 goto unlock_and_continue;
1061 }
1062
1063 /*
1064 * If the page has become held it might
1065 * be undergoing I/O, so skip it
1066 */
1067 if (m->hold_count) {
1068 vm_page_unlock(m);
1069 vm_page_requeue(m);
1070 if (object->flags & OBJ_MIGHTBEDIRTY)
1071 vnodes_skipped++;
1072 goto unlock_and_continue;
1073 }
1074 }
1075
1076 /*
1077 * If a page is dirty, then it is either being washed
1078 * (but not yet cleaned) or it is still in the
1079 * laundry. If it is still in the laundry, then we
1080 * start the cleaning operation.
1081 *
1082 * decrement page_shortage on success to account for
1083 * the (future) cleaned page. Otherwise we could wind
1084 * up laundering or cleaning too many pages.
1085 */
1086 vm_page_unlock_queues();
1087 if (vm_pageout_clean(m) != 0) {
1088 --page_shortage;
1089 --maxlaunder;
1090 }
1091 vm_page_lock_queues();
1092 unlock_and_continue:
1093 vm_page_lock_assert(m, MA_NOTOWNED);
1094 VM_OBJECT_UNLOCK(object);
1095 if (mp != NULL) {
1096 vm_page_unlock_queues();
1097 if (vp != NULL)
1098 vput(vp);
1099 VFS_UNLOCK_GIANT(vfslocked);
1100 vm_object_deallocate(object);
1101 vn_finished_write(mp);
1102 vm_page_lock_queues();
1103 }
1104 next = TAILQ_NEXT(&marker, pageq);
1105 TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl,
1106 &marker, pageq);
1107 vm_page_lock_assert(m, MA_NOTOWNED);
1108 continue;
1109 }
1110 vm_page_unlock(m);
1111 VM_OBJECT_UNLOCK(object);
1112 }
1113
1114 /*
1115 * Compute the number of pages we want to try to move from the
1116 * active queue to the inactive queue.
1117 */
1118 page_shortage = vm_paging_target() +
1119 cnt.v_inactive_target - cnt.v_inactive_count;
1120 page_shortage += addl_page_shortage;
1121
1122 /*
1123 * Scan the active queue for things we can deactivate. We nominally
1124 * track the per-page activity counter and use it to locate
1125 * deactivation candidates.
1126 */
1127 pcount = cnt.v_active_count;
1128 m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl);
1129 mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1130
1131 while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) {
1132
1133 KASSERT(m->queue == PQ_ACTIVE,
1134 ("vm_pageout_scan: page %p isn't active", m));
1135
1136 next = TAILQ_NEXT(m, pageq);
1137 if ((m->flags & PG_MARKER) != 0) {
1138 m = next;
1139 continue;
1140 }
1141 if (!vm_pageout_page_lock(m, &next)) {
1142 vm_page_unlock(m);
1143 m = next;
1144 continue;
1145 }
1146 object = m->object;
1147 if (!VM_OBJECT_TRYLOCK(object) &&
1148 !vm_pageout_fallback_object_lock(m, &next)) {
1149 VM_OBJECT_UNLOCK(object);
1150 vm_page_unlock(m);
1151 m = next;
1152 continue;
1153 }
1154
1155 /*
1156 * Don't deactivate pages that are busy.
1157 */
1158 if ((m->busy != 0) ||
1159 (m->oflags & VPO_BUSY) ||
1160 (m->hold_count != 0)) {
1161 vm_page_unlock(m);
1162 VM_OBJECT_UNLOCK(object);
1163 vm_page_requeue(m);
1164 m = next;
1165 continue;
1166 }
1167
1168 /*
1169 * The count for pagedaemon pages is done after checking the
1170 * page for eligibility...
1171 */
1172 cnt.v_pdpages++;
1173
1174 /*
1175 * Check to see "how much" the page has been used.
1176 */
1177 actcount = 0;
1178 if (object->ref_count != 0) {
1179 if (m->aflags & PGA_REFERENCED) {
1180 actcount += 1;
1181 }
1182 actcount += pmap_ts_referenced(m);
1183 if (actcount) {
1184 m->act_count += ACT_ADVANCE + actcount;
1185 if (m->act_count > ACT_MAX)
1186 m->act_count = ACT_MAX;
1187 }
1188 }
1189
1190 /*
1191 * Since we have "tested" this bit, we need to clear it now.
1192 */
1193 vm_page_aflag_clear(m, PGA_REFERENCED);
1194
1195 /*
1196 * Only if an object is currently being used, do we use the
1197 * page activation count stats.
1198 */
1199 if (actcount && (object->ref_count != 0)) {
1200 vm_page_requeue(m);
1201 } else {
1202 m->act_count -= min(m->act_count, ACT_DECLINE);
1203 if (vm_pageout_algorithm ||
1204 object->ref_count == 0 ||
1205 m->act_count == 0) {
1206 page_shortage--;
1207 if (object->ref_count == 0) {
1208 KASSERT(!pmap_page_is_mapped(m),
1209 ("vm_pageout_scan: page %p is mapped", m));
1210 if (m->dirty == 0)
1211 vm_page_cache(m);
1212 else
1213 vm_page_deactivate(m);
1214 } else {
1215 vm_page_deactivate(m);
1216 }
1217 } else {
1218 vm_page_requeue(m);
1219 }
1220 }
1221 vm_page_unlock(m);
1222 VM_OBJECT_UNLOCK(object);
1223 m = next;
1224 }
1225 vm_page_unlock_queues();
1226 #if !defined(NO_SWAPPING)
1227 /*
1228 * Idle process swapout -- run once per second.
1229 */
1230 if (vm_swap_idle_enabled) {
1231 static long lsec;
1232 if (time_second != lsec) {
1233 vm_req_vmdaemon(VM_SWAP_IDLE);
1234 lsec = time_second;
1235 }
1236 }
1237 #endif
1238
1239 /*
1240 * If we didn't get enough free pages, and we have skipped a vnode
1241 * in a writeable object, wakeup the sync daemon. And kick swapout
1242 * if we did not get enough free pages.
1243 */
1244 if (vm_paging_target() > 0) {
1245 if (vnodes_skipped && vm_page_count_min())
1246 (void) speedup_syncer();
1247 #if !defined(NO_SWAPPING)
1248 if (vm_swap_enabled && vm_page_count_target())
1249 vm_req_vmdaemon(VM_SWAP_NORMAL);
1250 #endif
1251 }
1252
1253 /*
1254 * If we are critically low on one of RAM or swap and low on
1255 * the other, kill the largest process. However, we avoid
1256 * doing this on the first pass in order to give ourselves a
1257 * chance to flush out dirty vnode-backed pages and to allow
1258 * active pages to be moved to the inactive queue and reclaimed.
1259 */
1260 if (pass != 0 &&
1261 ((swap_pager_avail < 64 && vm_page_count_min()) ||
1262 (swap_pager_full && vm_paging_target() > 0)))
1263 vm_pageout_oom(VM_OOM_MEM);
1264 }
1265
1266
1267 void
1268 vm_pageout_oom(int shortage)
1269 {
1270 struct proc *p, *bigproc;
1271 vm_offset_t size, bigsize;
1272 struct thread *td;
1273 struct vmspace *vm;
1274
1275 /*
1276 * We keep the process bigproc locked once we find it to keep anyone
1277 * from messing with it; however, there is a possibility of
1278 * deadlock if process B is bigproc and one of it's child processes
1279 * attempts to propagate a signal to B while we are waiting for A's
1280 * lock while walking this list. To avoid this, we don't block on
1281 * the process lock but just skip a process if it is already locked.
1282 */
1283 bigproc = NULL;
1284 bigsize = 0;
1285 sx_slock(&allproc_lock);
1286 FOREACH_PROC_IN_SYSTEM(p) {
1287 int breakout;
1288
1289 if (PROC_TRYLOCK(p) == 0)
1290 continue;
1291 /*
1292 * If this is a system, protected or killed process, skip it.
1293 */
1294 if (p->p_state != PRS_NORMAL ||
1295 (p->p_flag & (P_INEXEC | P_PROTECTED | P_SYSTEM)) ||
1296 (p->p_pid == 1) || P_KILLED(p) ||
1297 ((p->p_pid < 48) && (swap_pager_avail != 0))) {
1298 PROC_UNLOCK(p);
1299 continue;
1300 }
1301 /*
1302 * If the process is in a non-running type state,
1303 * don't touch it. Check all the threads individually.
1304 */
1305 breakout = 0;
1306 FOREACH_THREAD_IN_PROC(p, td) {
1307 thread_lock(td);
1308 if (!TD_ON_RUNQ(td) &&
1309 !TD_IS_RUNNING(td) &&
1310 !TD_IS_SLEEPING(td) &&
1311 !TD_IS_SUSPENDED(td)) {
1312 thread_unlock(td);
1313 breakout = 1;
1314 break;
1315 }
1316 thread_unlock(td);
1317 }
1318 if (breakout) {
1319 PROC_UNLOCK(p);
1320 continue;
1321 }
1322 /*
1323 * get the process size
1324 */
1325 vm = vmspace_acquire_ref(p);
1326 if (vm == NULL) {
1327 PROC_UNLOCK(p);
1328 continue;
1329 }
1330 if (!vm_map_trylock_read(&vm->vm_map)) {
1331 vmspace_free(vm);
1332 PROC_UNLOCK(p);
1333 continue;
1334 }
1335 size = vmspace_swap_count(vm);
1336 vm_map_unlock_read(&vm->vm_map);
1337 if (shortage == VM_OOM_MEM)
1338 size += vmspace_resident_count(vm);
1339 vmspace_free(vm);
1340 /*
1341 * if the this process is bigger than the biggest one
1342 * remember it.
1343 */
1344 if (size > bigsize) {
1345 if (bigproc != NULL)
1346 PROC_UNLOCK(bigproc);
1347 bigproc = p;
1348 bigsize = size;
1349 } else
1350 PROC_UNLOCK(p);
1351 }
1352 sx_sunlock(&allproc_lock);
1353 if (bigproc != NULL) {
1354 killproc(bigproc, "out of swap space");
1355 sched_nice(bigproc, PRIO_MIN);
1356 PROC_UNLOCK(bigproc);
1357 wakeup(&cnt.v_free_count);
1358 }
1359 }
1360
1361 /*
1362 * This routine tries to maintain the pseudo LRU active queue,
1363 * so that during long periods of time where there is no paging,
1364 * that some statistic accumulation still occurs. This code
1365 * helps the situation where paging just starts to occur.
1366 */
1367 static void
1368 vm_pageout_page_stats()
1369 {
1370 vm_object_t object;
1371 vm_page_t m,next;
1372 int pcount,tpcount; /* Number of pages to check */
1373 static int fullintervalcount = 0;
1374 int page_shortage;
1375
1376 page_shortage =
1377 (cnt.v_inactive_target + cnt.v_cache_max + cnt.v_free_min) -
1378 (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count);
1379
1380 if (page_shortage <= 0)
1381 return;
1382
1383 vm_page_lock_queues();
1384 pcount = cnt.v_active_count;
1385 fullintervalcount += vm_pageout_stats_interval;
1386 if (fullintervalcount < vm_pageout_full_stats_interval) {
1387 tpcount = (int64_t)vm_pageout_stats_max * cnt.v_active_count /
1388 cnt.v_page_count;
1389 if (pcount > tpcount)
1390 pcount = tpcount;
1391 } else {
1392 fullintervalcount = 0;
1393 }
1394
1395 m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl);
1396 while ((m != NULL) && (pcount-- > 0)) {
1397 int actcount;
1398
1399 KASSERT(m->queue == PQ_ACTIVE,
1400 ("vm_pageout_page_stats: page %p isn't active", m));
1401
1402 next = TAILQ_NEXT(m, pageq);
1403 if ((m->flags & PG_MARKER) != 0) {
1404 m = next;
1405 continue;
1406 }
1407 vm_page_lock_assert(m, MA_NOTOWNED);
1408 if (!vm_pageout_page_lock(m, &next)) {
1409 vm_page_unlock(m);
1410 m = next;
1411 continue;
1412 }
1413 object = m->object;
1414 if (!VM_OBJECT_TRYLOCK(object) &&
1415 !vm_pageout_fallback_object_lock(m, &next)) {
1416 VM_OBJECT_UNLOCK(object);
1417 vm_page_unlock(m);
1418 m = next;
1419 continue;
1420 }
1421
1422 /*
1423 * Don't deactivate pages that are busy.
1424 */
1425 if ((m->busy != 0) ||
1426 (m->oflags & VPO_BUSY) ||
1427 (m->hold_count != 0)) {
1428 vm_page_unlock(m);
1429 VM_OBJECT_UNLOCK(object);
1430 vm_page_requeue(m);
1431 m = next;
1432 continue;
1433 }
1434
1435 actcount = 0;
1436 if (m->aflags & PGA_REFERENCED) {
1437 vm_page_aflag_clear(m, PGA_REFERENCED);
1438 actcount += 1;
1439 }
1440
1441 actcount += pmap_ts_referenced(m);
1442 if (actcount) {
1443 m->act_count += ACT_ADVANCE + actcount;
1444 if (m->act_count > ACT_MAX)
1445 m->act_count = ACT_MAX;
1446 vm_page_requeue(m);
1447 } else {
1448 if (m->act_count == 0) {
1449 /*
1450 * We turn off page access, so that we have
1451 * more accurate RSS stats. We don't do this
1452 * in the normal page deactivation when the
1453 * system is loaded VM wise, because the
1454 * cost of the large number of page protect
1455 * operations would be higher than the value
1456 * of doing the operation.
1457 */
1458 pmap_remove_all(m);
1459 vm_page_deactivate(m);
1460 } else {
1461 m->act_count -= min(m->act_count, ACT_DECLINE);
1462 vm_page_requeue(m);
1463 }
1464 }
1465 vm_page_unlock(m);
1466 VM_OBJECT_UNLOCK(object);
1467 m = next;
1468 }
1469 vm_page_unlock_queues();
1470 }
1471
1472 /*
1473 * vm_pageout is the high level pageout daemon.
1474 */
1475 static void
1476 vm_pageout()
1477 {
1478 int error, pass;
1479
1480 /*
1481 * Initialize some paging parameters.
1482 */
1483 cnt.v_interrupt_free_min = 2;
1484 if (cnt.v_page_count < 2000)
1485 vm_pageout_page_count = 8;
1486
1487 /*
1488 * v_free_reserved needs to include enough for the largest
1489 * swap pager structures plus enough for any pv_entry structs
1490 * when paging.
1491 */
1492 if (cnt.v_page_count > 1024)
1493 cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200;
1494 else
1495 cnt.v_free_min = 4;
1496 cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE +
1497 cnt.v_interrupt_free_min;
1498 cnt.v_free_reserved = vm_pageout_page_count +
1499 cnt.v_pageout_free_min + (cnt.v_page_count / 768);
1500 cnt.v_free_severe = cnt.v_free_min / 2;
1501 cnt.v_free_min += cnt.v_free_reserved;
1502 cnt.v_free_severe += cnt.v_free_reserved;
1503
1504 /*
1505 * v_free_target and v_cache_min control pageout hysteresis. Note
1506 * that these are more a measure of the VM cache queue hysteresis
1507 * then the VM free queue. Specifically, v_free_target is the
1508 * high water mark (free+cache pages).
1509 *
1510 * v_free_reserved + v_cache_min (mostly means v_cache_min) is the
1511 * low water mark, while v_free_min is the stop. v_cache_min must
1512 * be big enough to handle memory needs while the pageout daemon
1513 * is signalled and run to free more pages.
1514 */
1515 if (cnt.v_free_count > 6144)
1516 cnt.v_free_target = 4 * cnt.v_free_min + cnt.v_free_reserved;
1517 else
1518 cnt.v_free_target = 2 * cnt.v_free_min + cnt.v_free_reserved;
1519
1520 if (cnt.v_free_count > 2048) {
1521 cnt.v_cache_min = cnt.v_free_target;
1522 cnt.v_cache_max = 2 * cnt.v_cache_min;
1523 cnt.v_inactive_target = (3 * cnt.v_free_target) / 2;
1524 } else {
1525 cnt.v_cache_min = 0;
1526 cnt.v_cache_max = 0;
1527 cnt.v_inactive_target = cnt.v_free_count / 4;
1528 }
1529 if (cnt.v_inactive_target > cnt.v_free_count / 3)
1530 cnt.v_inactive_target = cnt.v_free_count / 3;
1531
1532 /* XXX does not really belong here */
1533 if (vm_page_max_wired == 0)
1534 vm_page_max_wired = cnt.v_free_count / 3;
1535
1536 if (vm_pageout_stats_max == 0)
1537 vm_pageout_stats_max = cnt.v_free_target;
1538
1539 /*
1540 * Set interval in seconds for stats scan.
1541 */
1542 if (vm_pageout_stats_interval == 0)
1543 vm_pageout_stats_interval = 5;
1544 if (vm_pageout_full_stats_interval == 0)
1545 vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4;
1546
1547 swap_pager_swap_init();
1548 pass = 0;
1549 /*
1550 * The pageout daemon is never done, so loop forever.
1551 */
1552 while (TRUE) {
1553 /*
1554 * If we have enough free memory, wakeup waiters. Do
1555 * not clear vm_pages_needed until we reach our target,
1556 * otherwise we may be woken up over and over again and
1557 * waste a lot of cpu.
1558 */
1559 mtx_lock(&vm_page_queue_free_mtx);
1560 if (vm_pages_needed && !vm_page_count_min()) {
1561 if (!vm_paging_needed())
1562 vm_pages_needed = 0;
1563 wakeup(&cnt.v_free_count);
1564 }
1565 if (vm_pages_needed) {
1566 /*
1567 * Still not done, take a second pass without waiting
1568 * (unlimited dirty cleaning), otherwise sleep a bit
1569 * and try again.
1570 */
1571 ++pass;
1572 if (pass > 1)
1573 msleep(&vm_pages_needed,
1574 &vm_page_queue_free_mtx, PVM, "psleep",
1575 hz / 2);
1576 } else {
1577 /*
1578 * Good enough, sleep & handle stats. Prime the pass
1579 * for the next run.
1580 */
1581 if (pass > 1)
1582 pass = 1;
1583 else
1584 pass = 0;
1585 error = msleep(&vm_pages_needed,
1586 &vm_page_queue_free_mtx, PVM, "psleep",
1587 vm_pageout_stats_interval * hz);
1588 if (error && !vm_pages_needed) {
1589 mtx_unlock(&vm_page_queue_free_mtx);
1590 pass = 0;
1591 vm_pageout_page_stats();
1592 continue;
1593 }
1594 }
1595 if (vm_pages_needed)
1596 cnt.v_pdwakeups++;
1597 mtx_unlock(&vm_page_queue_free_mtx);
1598 vm_pageout_scan(pass);
1599 }
1600 }
1601
1602 /*
1603 * Unless the free page queue lock is held by the caller, this function
1604 * should be regarded as advisory. Specifically, the caller should
1605 * not msleep() on &cnt.v_free_count following this function unless
1606 * the free page queue lock is held until the msleep() is performed.
1607 */
1608 void
1609 pagedaemon_wakeup()
1610 {
1611
1612 if (!vm_pages_needed && curthread->td_proc != pageproc) {
1613 vm_pages_needed = 1;
1614 wakeup(&vm_pages_needed);
1615 }
1616 }
1617
1618 #if !defined(NO_SWAPPING)
1619 static void
1620 vm_req_vmdaemon(int req)
1621 {
1622 static int lastrun = 0;
1623
1624 mtx_lock(&vm_daemon_mtx);
1625 vm_pageout_req_swapout |= req;
1626 if ((ticks > (lastrun + hz)) || (ticks < lastrun)) {
1627 wakeup(&vm_daemon_needed);
1628 lastrun = ticks;
1629 }
1630 mtx_unlock(&vm_daemon_mtx);
1631 }
1632
1633 static void
1634 vm_daemon()
1635 {
1636 struct rlimit rsslim;
1637 struct proc *p;
1638 struct thread *td;
1639 struct vmspace *vm;
1640 int breakout, swapout_flags, tryagain, attempts;
1641 #ifdef RACCT
1642 uint64_t rsize, ravailable;
1643 #endif
1644
1645 while (TRUE) {
1646 mtx_lock(&vm_daemon_mtx);
1647 #ifdef RACCT
1648 msleep(&vm_daemon_needed, &vm_daemon_mtx, PPAUSE, "psleep", hz);
1649 #else
1650 msleep(&vm_daemon_needed, &vm_daemon_mtx, PPAUSE, "psleep", 0);
1651 #endif
1652 swapout_flags = vm_pageout_req_swapout;
1653 vm_pageout_req_swapout = 0;
1654 mtx_unlock(&vm_daemon_mtx);
1655 if (swapout_flags)
1656 swapout_procs(swapout_flags);
1657
1658 /*
1659 * scan the processes for exceeding their rlimits or if
1660 * process is swapped out -- deactivate pages
1661 */
1662 tryagain = 0;
1663 attempts = 0;
1664 again:
1665 attempts++;
1666 sx_slock(&allproc_lock);
1667 FOREACH_PROC_IN_SYSTEM(p) {
1668 vm_pindex_t limit, size;
1669
1670 /*
1671 * if this is a system process or if we have already
1672 * looked at this process, skip it.
1673 */
1674 PROC_LOCK(p);
1675 if (p->p_state != PRS_NORMAL ||
1676 p->p_flag & (P_INEXEC | P_SYSTEM | P_WEXIT)) {
1677 PROC_UNLOCK(p);
1678 continue;
1679 }
1680 /*
1681 * if the process is in a non-running type state,
1682 * don't touch it.
1683 */
1684 breakout = 0;
1685 FOREACH_THREAD_IN_PROC(p, td) {
1686 thread_lock(td);
1687 if (!TD_ON_RUNQ(td) &&
1688 !TD_IS_RUNNING(td) &&
1689 !TD_IS_SLEEPING(td) &&
1690 !TD_IS_SUSPENDED(td)) {
1691 thread_unlock(td);
1692 breakout = 1;
1693 break;
1694 }
1695 thread_unlock(td);
1696 }
1697 if (breakout) {
1698 PROC_UNLOCK(p);
1699 continue;
1700 }
1701 /*
1702 * get a limit
1703 */
1704 lim_rlimit(p, RLIMIT_RSS, &rsslim);
1705 limit = OFF_TO_IDX(
1706 qmin(rsslim.rlim_cur, rsslim.rlim_max));
1707
1708 /*
1709 * let processes that are swapped out really be
1710 * swapped out set the limit to nothing (will force a
1711 * swap-out.)
1712 */
1713 if ((p->p_flag & P_INMEM) == 0)
1714 limit = 0; /* XXX */
1715 vm = vmspace_acquire_ref(p);
1716 PROC_UNLOCK(p);
1717 if (vm == NULL)
1718 continue;
1719
1720 size = vmspace_resident_count(vm);
1721 if (limit >= 0 && size >= limit) {
1722 vm_pageout_map_deactivate_pages(
1723 &vm->vm_map, limit);
1724 }
1725 #ifdef RACCT
1726 rsize = IDX_TO_OFF(size);
1727 PROC_LOCK(p);
1728 racct_set(p, RACCT_RSS, rsize);
1729 ravailable = racct_get_available(p, RACCT_RSS);
1730 PROC_UNLOCK(p);
1731 if (rsize > ravailable) {
1732 /*
1733 * Don't be overly aggressive; this might be
1734 * an innocent process, and the limit could've
1735 * been exceeded by some memory hog. Don't
1736 * try to deactivate more than 1/4th of process'
1737 * resident set size.
1738 */
1739 if (attempts <= 8) {
1740 if (ravailable < rsize - (rsize / 4))
1741 ravailable = rsize - (rsize / 4);
1742 }
1743 vm_pageout_map_deactivate_pages(
1744 &vm->vm_map, OFF_TO_IDX(ravailable));
1745 /* Update RSS usage after paging out. */
1746 size = vmspace_resident_count(vm);
1747 rsize = IDX_TO_OFF(size);
1748 PROC_LOCK(p);
1749 racct_set(p, RACCT_RSS, rsize);
1750 PROC_UNLOCK(p);
1751 if (rsize > ravailable)
1752 tryagain = 1;
1753 }
1754 #endif
1755 vmspace_free(vm);
1756 }
1757 sx_sunlock(&allproc_lock);
1758 if (tryagain != 0 && attempts <= 10)
1759 goto again;
1760 }
1761 }
1762 #endif /* !defined(NO_SWAPPING) */
Cache object: 0a266803b587874ba4f7a9d0ac34b188
|