FreeBSD/Linux Kernel Cross Reference
sys/uvm/uvm_pdaemon.c
1 /* $NetBSD: uvm_pdaemon.c,v 1.80 2006/11/01 10:18:27 yamt Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California.
6 *
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by Charles D. Cranor,
23 * Washington University, the University of California, Berkeley and
24 * its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * @(#)vm_pageout.c 8.5 (Berkeley) 2/14/94
42 * from: Id: uvm_pdaemon.c,v 1.1.2.32 1998/02/06 05:26:30 chs Exp
43 *
44 *
45 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46 * All rights reserved.
47 *
48 * Permission to use, copy, modify and distribute this software and
49 * its documentation is hereby granted, provided that both the copyright
50 * notice and this permission notice appear in all copies of the
51 * software, derivative works or modified versions, and any portions
52 * thereof, and that both notices appear in supporting documentation.
53 *
54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57 *
58 * Carnegie Mellon requests users of this software to return to
59 *
60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
61 * School of Computer Science
62 * Carnegie Mellon University
63 * Pittsburgh PA 15213-3890
64 *
65 * any improvements or extensions that they make and grant Carnegie the
66 * rights to redistribute these changes.
67 */
68
69 /*
70 * uvm_pdaemon.c: the page daemon
71 */
72
73 #include <sys/cdefs.h>
74 __KERNEL_RCSID(0, "$NetBSD: uvm_pdaemon.c,v 1.80 2006/11/01 10:18:27 yamt Exp $");
75
76 #include "opt_uvmhist.h"
77 #include "opt_readahead.h"
78
79 #include <sys/param.h>
80 #include <sys/proc.h>
81 #include <sys/systm.h>
82 #include <sys/kernel.h>
83 #include <sys/pool.h>
84 #include <sys/buf.h>
85
86 #include <uvm/uvm.h>
87 #include <uvm/uvm_pdpolicy.h>
88
89 /*
90 * UVMPD_NUMDIRTYREACTS is how many dirty pages the pagedaemon will reactivate
91 * in a pass thru the inactive list when swap is full. the value should be
92 * "small"... if it's too large we'll cycle the active pages thru the inactive
93 * queue too quickly to for them to be referenced and avoid being freed.
94 */
95
96 #define UVMPD_NUMDIRTYREACTS 16
97
98
99 /*
100 * local prototypes
101 */
102
103 static void uvmpd_scan(void);
104 static void uvmpd_scan_queue(void);
105 static void uvmpd_tune(void);
106
107 /*
108 * XXX hack to avoid hangs when large processes fork.
109 */
110 int uvm_extrapages;
111
112 /*
113 * uvm_wait: wait (sleep) for the page daemon to free some pages
114 *
115 * => should be called with all locks released
116 * => should _not_ be called by the page daemon (to avoid deadlock)
117 */
118
119 void
120 uvm_wait(const char *wmsg)
121 {
122 int timo = 0;
123 int s = splbio();
124
125 /*
126 * check for page daemon going to sleep (waiting for itself)
127 */
128
129 if (curproc == uvm.pagedaemon_proc && uvmexp.paging == 0) {
130 /*
131 * now we have a problem: the pagedaemon wants to go to
132 * sleep until it frees more memory. but how can it
133 * free more memory if it is asleep? that is a deadlock.
134 * we have two options:
135 * [1] panic now
136 * [2] put a timeout on the sleep, thus causing the
137 * pagedaemon to only pause (rather than sleep forever)
138 *
139 * note that option [2] will only help us if we get lucky
140 * and some other process on the system breaks the deadlock
141 * by exiting or freeing memory (thus allowing the pagedaemon
142 * to continue). for now we panic if DEBUG is defined,
143 * otherwise we hope for the best with option [2] (better
144 * yet, this should never happen in the first place!).
145 */
146
147 printf("pagedaemon: deadlock detected!\n");
148 timo = hz >> 3; /* set timeout */
149 #if defined(DEBUG)
150 /* DEBUG: panic so we can debug it */
151 panic("pagedaemon deadlock");
152 #endif
153 }
154
155 simple_lock(&uvm.pagedaemon_lock);
156 wakeup(&uvm.pagedaemon); /* wake the daemon! */
157 UVM_UNLOCK_AND_WAIT(&uvmexp.free, &uvm.pagedaemon_lock, FALSE, wmsg,
158 timo);
159
160 splx(s);
161 }
162
163 /*
164 * uvm_kick_pdaemon: perform checks to determine if we need to
165 * give the pagedaemon a nudge, and do so if necessary.
166 */
167
168 void
169 uvm_kick_pdaemon(void)
170 {
171
172 if (uvmexp.free + uvmexp.paging < uvmexp.freemin ||
173 (uvmexp.free + uvmexp.paging < uvmexp.freetarg &&
174 uvmpdpol_needsscan_p())) {
175 wakeup(&uvm.pagedaemon);
176 }
177 }
178
179 /*
180 * uvmpd_tune: tune paging parameters
181 *
182 * => called when ever memory is added (or removed?) to the system
183 * => caller must call with page queues locked
184 */
185
186 static void
187 uvmpd_tune(void)
188 {
189 UVMHIST_FUNC("uvmpd_tune"); UVMHIST_CALLED(pdhist);
190
191 uvmexp.freemin = uvmexp.npages / 20;
192
193 /* between 16k and 256k */
194 /* XXX: what are these values good for? */
195 uvmexp.freemin = MAX(uvmexp.freemin, (16*1024) >> PAGE_SHIFT);
196 uvmexp.freemin = MIN(uvmexp.freemin, (256*1024) >> PAGE_SHIFT);
197
198 /* Make sure there's always a user page free. */
199 if (uvmexp.freemin < uvmexp.reserve_kernel + 1)
200 uvmexp.freemin = uvmexp.reserve_kernel + 1;
201
202 uvmexp.freetarg = (uvmexp.freemin * 4) / 3;
203 if (uvmexp.freetarg <= uvmexp.freemin)
204 uvmexp.freetarg = uvmexp.freemin + 1;
205
206 uvmexp.freetarg += uvm_extrapages;
207 uvm_extrapages = 0;
208
209 uvmexp.wiredmax = uvmexp.npages / 3;
210 UVMHIST_LOG(pdhist, "<- done, freemin=%d, freetarg=%d, wiredmax=%d",
211 uvmexp.freemin, uvmexp.freetarg, uvmexp.wiredmax, 0);
212 }
213
214 /*
215 * uvm_pageout: the main loop for the pagedaemon
216 */
217
218 void
219 uvm_pageout(void *arg)
220 {
221 int bufcnt, npages = 0;
222 int extrapages = 0;
223 UVMHIST_FUNC("uvm_pageout"); UVMHIST_CALLED(pdhist);
224
225 UVMHIST_LOG(pdhist,"<starting uvm pagedaemon>", 0, 0, 0, 0);
226
227 /*
228 * ensure correct priority and set paging parameters...
229 */
230
231 uvm.pagedaemon_proc = curproc;
232 uvm_lock_pageq();
233 npages = uvmexp.npages;
234 uvmpd_tune();
235 uvm_unlock_pageq();
236
237 /*
238 * main loop
239 */
240
241 for (;;) {
242 simple_lock(&uvm.pagedaemon_lock);
243
244 UVMHIST_LOG(pdhist," <<SLEEPING>>",0,0,0,0);
245 UVM_UNLOCK_AND_WAIT(&uvm.pagedaemon,
246 &uvm.pagedaemon_lock, FALSE, "pgdaemon", 0);
247 uvmexp.pdwoke++;
248 UVMHIST_LOG(pdhist," <<WOKE UP>>",0,0,0,0);
249
250 /*
251 * now lock page queues and recompute inactive count
252 */
253
254 uvm_lock_pageq();
255 if (npages != uvmexp.npages || extrapages != uvm_extrapages) {
256 npages = uvmexp.npages;
257 extrapages = uvm_extrapages;
258 uvmpd_tune();
259 }
260
261 uvmpdpol_tune();
262
263 /*
264 * Estimate a hint. Note that bufmem are returned to
265 * system only when entire pool page is empty.
266 */
267 bufcnt = uvmexp.freetarg - uvmexp.free;
268 if (bufcnt < 0)
269 bufcnt = 0;
270
271 UVMHIST_LOG(pdhist," free/ftarg=%d/%d",
272 uvmexp.free, uvmexp.freetarg, 0,0);
273
274 /*
275 * scan if needed
276 */
277
278 if (uvmexp.free + uvmexp.paging < uvmexp.freetarg ||
279 uvmpdpol_needsscan_p()) {
280 uvmpd_scan();
281 }
282
283 /*
284 * if there's any free memory to be had,
285 * wake up any waiters.
286 */
287
288 if (uvmexp.free > uvmexp.reserve_kernel ||
289 uvmexp.paging == 0) {
290 wakeup(&uvmexp.free);
291 }
292
293 /*
294 * scan done. unlock page queues (the only lock we are holding)
295 */
296
297 uvm_unlock_pageq();
298
299 buf_drain(bufcnt << PAGE_SHIFT);
300
301 /*
302 * drain pool resources now that we're not holding any locks
303 */
304
305 pool_drain(0);
306
307 /*
308 * free any cached u-areas we don't need
309 */
310 uvm_uarea_drain(TRUE);
311
312 }
313 /*NOTREACHED*/
314 }
315
316
317 /*
318 * uvm_aiodone_daemon: main loop for the aiodone daemon.
319 */
320
321 void
322 uvm_aiodone_daemon(void *arg)
323 {
324 int s, free;
325 struct buf *bp, *nbp;
326 UVMHIST_FUNC("uvm_aiodoned"); UVMHIST_CALLED(pdhist);
327
328 for (;;) {
329
330 /*
331 * carefully attempt to go to sleep (without losing "wakeups"!).
332 * we need splbio because we want to make sure the aio_done list
333 * is totally empty before we go to sleep.
334 */
335
336 s = splbio();
337 simple_lock(&uvm.aiodoned_lock);
338 if (TAILQ_FIRST(&uvm.aio_done) == NULL) {
339 UVMHIST_LOG(pdhist," <<SLEEPING>>",0,0,0,0);
340 UVM_UNLOCK_AND_WAIT(&uvm.aiodoned,
341 &uvm.aiodoned_lock, FALSE, "aiodoned", 0);
342 UVMHIST_LOG(pdhist," <<WOKE UP>>",0,0,0,0);
343
344 /* relock aiodoned_lock, still at splbio */
345 simple_lock(&uvm.aiodoned_lock);
346 }
347
348 /*
349 * check for done aio structures
350 */
351
352 bp = TAILQ_FIRST(&uvm.aio_done);
353 if (bp) {
354 TAILQ_INIT(&uvm.aio_done);
355 }
356
357 simple_unlock(&uvm.aiodoned_lock);
358 splx(s);
359
360 /*
361 * process each i/o that's done.
362 */
363
364 free = uvmexp.free;
365 while (bp != NULL) {
366 nbp = TAILQ_NEXT(bp, b_freelist);
367 (*bp->b_iodone)(bp);
368 bp = nbp;
369 }
370 if (free <= uvmexp.reserve_kernel) {
371 s = uvm_lock_fpageq();
372 wakeup(&uvm.pagedaemon);
373 uvm_unlock_fpageq(s);
374 } else {
375 simple_lock(&uvm.pagedaemon_lock);
376 wakeup(&uvmexp.free);
377 simple_unlock(&uvm.pagedaemon_lock);
378 }
379 }
380 }
381
382 /*
383 * uvmpd_trylockowner: trylock the page's owner.
384 *
385 * => called with pageq locked.
386 * => resolve orphaned O->A loaned page.
387 * => return the locked simplelock on success. otherwise, return NULL.
388 */
389
390 struct simplelock *
391 uvmpd_trylockowner(struct vm_page *pg)
392 {
393 struct uvm_object *uobj = pg->uobject;
394 struct simplelock *slock;
395
396 UVM_LOCK_ASSERT_PAGEQ();
397 if (uobj != NULL) {
398 slock = &uobj->vmobjlock;
399 } else {
400 struct vm_anon *anon = pg->uanon;
401
402 KASSERT(anon != NULL);
403 slock = &anon->an_lock;
404 }
405
406 if (!simple_lock_try(slock)) {
407 return NULL;
408 }
409
410 if (uobj == NULL) {
411
412 /*
413 * set PQ_ANON if it isn't set already.
414 */
415
416 if ((pg->pqflags & PQ_ANON) == 0) {
417 KASSERT(pg->loan_count > 0);
418 pg->loan_count--;
419 pg->pqflags |= PQ_ANON;
420 /* anon now owns it */
421 }
422 }
423
424 return slock;
425 }
426
427 #if defined(VMSWAP)
428 struct swapcluster {
429 int swc_slot;
430 int swc_nallocated;
431 int swc_nused;
432 struct vm_page *swc_pages[howmany(MAXPHYS, MIN_PAGE_SIZE)];
433 };
434
435 static void
436 swapcluster_init(struct swapcluster *swc)
437 {
438
439 swc->swc_slot = 0;
440 }
441
442 static int
443 swapcluster_allocslots(struct swapcluster *swc)
444 {
445 int slot;
446 int npages;
447
448 if (swc->swc_slot != 0) {
449 return 0;
450 }
451
452 /* Even with strange MAXPHYS, the shift
453 implicitly rounds down to a page. */
454 npages = MAXPHYS >> PAGE_SHIFT;
455 slot = uvm_swap_alloc(&npages, TRUE);
456 if (slot == 0) {
457 return ENOMEM;
458 }
459 swc->swc_slot = slot;
460 swc->swc_nallocated = npages;
461 swc->swc_nused = 0;
462
463 return 0;
464 }
465
466 static int
467 swapcluster_add(struct swapcluster *swc, struct vm_page *pg)
468 {
469 int slot;
470 struct uvm_object *uobj;
471
472 KASSERT(swc->swc_slot != 0);
473 KASSERT(swc->swc_nused < swc->swc_nallocated);
474 KASSERT((pg->pqflags & PQ_SWAPBACKED) != 0);
475
476 slot = swc->swc_slot + swc->swc_nused;
477 uobj = pg->uobject;
478 if (uobj == NULL) {
479 LOCK_ASSERT(simple_lock_held(&pg->uanon->an_lock));
480 pg->uanon->an_swslot = slot;
481 } else {
482 int result;
483
484 LOCK_ASSERT(simple_lock_held(&uobj->vmobjlock));
485 result = uao_set_swslot(uobj, pg->offset >> PAGE_SHIFT, slot);
486 if (result == -1) {
487 return ENOMEM;
488 }
489 }
490 swc->swc_pages[swc->swc_nused] = pg;
491 swc->swc_nused++;
492
493 return 0;
494 }
495
496 static void
497 swapcluster_flush(struct swapcluster *swc, boolean_t now)
498 {
499 int slot;
500 int nused;
501 int nallocated;
502 int error;
503
504 if (swc->swc_slot == 0) {
505 return;
506 }
507 KASSERT(swc->swc_nused <= swc->swc_nallocated);
508
509 slot = swc->swc_slot;
510 nused = swc->swc_nused;
511 nallocated = swc->swc_nallocated;
512
513 /*
514 * if this is the final pageout we could have a few
515 * unused swap blocks. if so, free them now.
516 */
517
518 if (nused < nallocated) {
519 if (!now) {
520 return;
521 }
522 uvm_swap_free(slot + nused, nallocated - nused);
523 }
524
525 /*
526 * now start the pageout.
527 */
528
529 uvmexp.pdpageouts++;
530 error = uvm_swap_put(slot, swc->swc_pages, nused, 0);
531 KASSERT(error == 0);
532
533 /*
534 * zero swslot to indicate that we are
535 * no longer building a swap-backed cluster.
536 */
537
538 swc->swc_slot = 0;
539 }
540
541 /*
542 * uvmpd_dropswap: free any swap allocated to this page.
543 *
544 * => called with owner locked.
545 * => return TRUE if a page had an associated slot.
546 */
547
548 static boolean_t
549 uvmpd_dropswap(struct vm_page *pg)
550 {
551 boolean_t result = FALSE;
552 struct vm_anon *anon = pg->uanon;
553
554 if ((pg->pqflags & PQ_ANON) && anon->an_swslot) {
555 uvm_swap_free(anon->an_swslot, 1);
556 anon->an_swslot = 0;
557 pg->flags &= ~PG_CLEAN;
558 result = TRUE;
559 } else if (pg->pqflags & PQ_AOBJ) {
560 int slot = uao_set_swslot(pg->uobject,
561 pg->offset >> PAGE_SHIFT, 0);
562 if (slot) {
563 uvm_swap_free(slot, 1);
564 pg->flags &= ~PG_CLEAN;
565 result = TRUE;
566 }
567 }
568
569 return result;
570 }
571
572 /*
573 * uvmpd_trydropswap: try to free any swap allocated to this page.
574 *
575 * => return TRUE if a slot is successfully freed.
576 */
577
578 boolean_t
579 uvmpd_trydropswap(struct vm_page *pg)
580 {
581 struct simplelock *slock;
582 boolean_t result;
583
584 if ((pg->flags & PG_BUSY) != 0) {
585 return FALSE;
586 }
587
588 /*
589 * lock the page's owner.
590 */
591
592 slock = uvmpd_trylockowner(pg);
593 if (slock == NULL) {
594 return FALSE;
595 }
596
597 /*
598 * skip this page if it's busy.
599 */
600
601 if ((pg->flags & PG_BUSY) != 0) {
602 simple_unlock(slock);
603 return FALSE;
604 }
605
606 result = uvmpd_dropswap(pg);
607
608 simple_unlock(slock);
609
610 return result;
611 }
612
613 #endif /* defined(VMSWAP) */
614
615 /*
616 * uvmpd_scan_queue: scan an replace candidate list for pages
617 * to clean or free.
618 *
619 * => called with page queues locked
620 * => we work on meeting our free target by converting inactive pages
621 * into free pages.
622 * => we handle the building of swap-backed clusters
623 */
624
625 static void
626 uvmpd_scan_queue(void)
627 {
628 struct vm_page *p;
629 struct uvm_object *uobj;
630 struct vm_anon *anon;
631 #if defined(VMSWAP)
632 struct swapcluster swc;
633 #endif /* defined(VMSWAP) */
634 int dirtyreacts;
635 struct simplelock *slock;
636 UVMHIST_FUNC("uvmpd_scan_queue"); UVMHIST_CALLED(pdhist);
637
638 /*
639 * swslot is non-zero if we are building a swap cluster. we want
640 * to stay in the loop while we have a page to scan or we have
641 * a swap-cluster to build.
642 */
643
644 #if defined(VMSWAP)
645 swapcluster_init(&swc);
646 #endif /* defined(VMSWAP) */
647
648 dirtyreacts = 0;
649 uvmpdpol_scaninit();
650
651 while (/* CONSTCOND */ 1) {
652
653 /*
654 * see if we've met the free target.
655 */
656
657 if (uvmexp.free + uvmexp.paging >= uvmexp.freetarg << 2 ||
658 dirtyreacts == UVMPD_NUMDIRTYREACTS) {
659 UVMHIST_LOG(pdhist," met free target: "
660 "exit loop", 0, 0, 0, 0);
661 break;
662 }
663
664 p = uvmpdpol_selectvictim();
665 if (p == NULL) {
666 break;
667 }
668 KASSERT(uvmpdpol_pageisqueued_p(p));
669 KASSERT(p->wire_count == 0);
670
671 /*
672 * we are below target and have a new page to consider.
673 */
674
675 anon = p->uanon;
676 uobj = p->uobject;
677
678 /*
679 * first we attempt to lock the object that this page
680 * belongs to. if our attempt fails we skip on to
681 * the next page (no harm done). it is important to
682 * "try" locking the object as we are locking in the
683 * wrong order (pageq -> object) and we don't want to
684 * deadlock.
685 *
686 * the only time we expect to see an ownerless page
687 * (i.e. a page with no uobject and !PQ_ANON) is if an
688 * anon has loaned a page from a uvm_object and the
689 * uvm_object has dropped the ownership. in that
690 * case, the anon can "take over" the loaned page
691 * and make it its own.
692 */
693
694 slock = uvmpd_trylockowner(p);
695 if (slock == NULL) {
696 continue;
697 }
698 if (p->flags & PG_BUSY) {
699 simple_unlock(slock);
700 uvmexp.pdbusy++;
701 continue;
702 }
703
704 /* does the page belong to an object? */
705 if (uobj != NULL) {
706 uvmexp.pdobscan++;
707 } else {
708 #if defined(VMSWAP)
709 KASSERT(anon != NULL);
710 uvmexp.pdanscan++;
711 #else /* defined(VMSWAP) */
712 panic("%s: anon", __func__);
713 #endif /* defined(VMSWAP) */
714 }
715
716
717 /*
718 * we now have the object and the page queues locked.
719 * if the page is not swap-backed, call the object's
720 * pager to flush and free the page.
721 */
722
723 #if defined(READAHEAD_STATS)
724 if ((p->pqflags & PQ_READAHEAD) != 0) {
725 p->pqflags &= ~PQ_READAHEAD;
726 uvm_ra_miss.ev_count++;
727 }
728 #endif /* defined(READAHEAD_STATS) */
729
730 if ((p->pqflags & PQ_SWAPBACKED) == 0) {
731 uvm_unlock_pageq();
732 (void) (uobj->pgops->pgo_put)(uobj, p->offset,
733 p->offset + PAGE_SIZE, PGO_CLEANIT|PGO_FREE);
734 uvm_lock_pageq();
735 continue;
736 }
737
738 /*
739 * the page is swap-backed. remove all the permissions
740 * from the page so we can sync the modified info
741 * without any race conditions. if the page is clean
742 * we can free it now and continue.
743 */
744
745 pmap_page_protect(p, VM_PROT_NONE);
746 if ((p->flags & PG_CLEAN) && pmap_clear_modify(p)) {
747 p->flags &= ~(PG_CLEAN);
748 }
749 if (p->flags & PG_CLEAN) {
750 int slot;
751 int pageidx;
752
753 pageidx = p->offset >> PAGE_SHIFT;
754 uvm_pagefree(p);
755 uvmexp.pdfreed++;
756
757 /*
758 * for anons, we need to remove the page
759 * from the anon ourselves. for aobjs,
760 * pagefree did that for us.
761 */
762
763 if (anon) {
764 KASSERT(anon->an_swslot != 0);
765 anon->an_page = NULL;
766 slot = anon->an_swslot;
767 } else {
768 slot = uao_find_swslot(uobj, pageidx);
769 }
770 simple_unlock(slock);
771
772 if (slot > 0) {
773 /* this page is now only in swap. */
774 simple_lock(&uvm.swap_data_lock);
775 KASSERT(uvmexp.swpgonly < uvmexp.swpginuse);
776 uvmexp.swpgonly++;
777 simple_unlock(&uvm.swap_data_lock);
778 }
779 continue;
780 }
781
782 #if defined(VMSWAP)
783 /*
784 * this page is dirty, skip it if we'll have met our
785 * free target when all the current pageouts complete.
786 */
787
788 if (uvmexp.free + uvmexp.paging > uvmexp.freetarg << 2) {
789 simple_unlock(slock);
790 continue;
791 }
792
793 /*
794 * free any swap space allocated to the page since
795 * we'll have to write it again with its new data.
796 */
797
798 uvmpd_dropswap(p);
799
800 /*
801 * if all pages in swap are only in swap,
802 * the swap space is full and we can't page out
803 * any more swap-backed pages. reactivate this page
804 * so that we eventually cycle all pages through
805 * the inactive queue.
806 */
807
808 if (uvm_swapisfull()) {
809 dirtyreacts++;
810 uvm_pageactivate(p);
811 simple_unlock(slock);
812 continue;
813 }
814
815 /*
816 * start new swap pageout cluster (if necessary).
817 */
818
819 if (swapcluster_allocslots(&swc)) {
820 simple_unlock(slock);
821 dirtyreacts++; /* XXX */
822 continue;
823 }
824
825 /*
826 * at this point, we're definitely going reuse this
827 * page. mark the page busy and delayed-free.
828 * we should remove the page from the page queues
829 * so we don't ever look at it again.
830 * adjust counters and such.
831 */
832
833 p->flags |= PG_BUSY;
834 UVM_PAGE_OWN(p, "scan_queue");
835
836 p->flags |= PG_PAGEOUT;
837 uvmexp.paging++;
838 uvm_pagedequeue(p);
839
840 uvmexp.pgswapout++;
841 uvm_unlock_pageq();
842
843 /*
844 * add the new page to the cluster.
845 */
846
847 if (swapcluster_add(&swc, p)) {
848 p->flags &= ~(PG_BUSY|PG_PAGEOUT);
849 UVM_PAGE_OWN(p, NULL);
850 uvm_lock_pageq();
851 uvmexp.paging--;
852 dirtyreacts++;
853 uvm_pageactivate(p);
854 simple_unlock(slock);
855 continue;
856 }
857 simple_unlock(slock);
858
859 swapcluster_flush(&swc, FALSE);
860 uvm_lock_pageq();
861
862 /*
863 * the pageout is in progress. bump counters and set up
864 * for the next loop.
865 */
866
867 uvmexp.pdpending++;
868
869 #else /* defined(VMSWAP) */
870 uvm_pageactivate(p);
871 simple_unlock(slock);
872 #endif /* defined(VMSWAP) */
873 }
874
875 #if defined(VMSWAP)
876 uvm_unlock_pageq();
877 swapcluster_flush(&swc, TRUE);
878 uvm_lock_pageq();
879 #endif /* defined(VMSWAP) */
880 }
881
882 /*
883 * uvmpd_scan: scan the page queues and attempt to meet our targets.
884 *
885 * => called with pageq's locked
886 */
887
888 static void
889 uvmpd_scan(void)
890 {
891 int swap_shortage, pages_freed;
892 UVMHIST_FUNC("uvmpd_scan"); UVMHIST_CALLED(pdhist);
893
894 uvmexp.pdrevs++;
895
896 #ifndef __SWAP_BROKEN
897
898 /*
899 * swap out some processes if we are below our free target.
900 * we need to unlock the page queues for this.
901 */
902
903 if (uvmexp.free < uvmexp.freetarg && uvmexp.nswapdev != 0) {
904 uvmexp.pdswout++;
905 UVMHIST_LOG(pdhist," free %d < target %d: swapout",
906 uvmexp.free, uvmexp.freetarg, 0, 0);
907 uvm_unlock_pageq();
908 uvm_swapout_threads();
909 uvm_lock_pageq();
910
911 }
912 #endif
913
914 /*
915 * now we want to work on meeting our targets. first we work on our
916 * free target by converting inactive pages into free pages. then
917 * we work on meeting our inactive target by converting active pages
918 * to inactive ones.
919 */
920
921 UVMHIST_LOG(pdhist, " starting 'free' loop",0,0,0,0);
922
923 pages_freed = uvmexp.pdfreed;
924 uvmpd_scan_queue();
925 pages_freed = uvmexp.pdfreed - pages_freed;
926
927 /*
928 * detect if we're not going to be able to page anything out
929 * until we free some swap resources from active pages.
930 */
931
932 swap_shortage = 0;
933 if (uvmexp.free < uvmexp.freetarg &&
934 uvmexp.swpginuse >= uvmexp.swpgavail &&
935 !uvm_swapisfull() &&
936 pages_freed == 0) {
937 swap_shortage = uvmexp.freetarg - uvmexp.free;
938 }
939
940 uvmpdpol_balancequeue(swap_shortage);
941 }
942
943 /*
944 * uvm_reclaimable: decide whether to wait for pagedaemon.
945 *
946 * => return TRUE if it seems to be worth to do uvm_wait.
947 *
948 * XXX should be tunable.
949 * XXX should consider pools, etc?
950 */
951
952 boolean_t
953 uvm_reclaimable(void)
954 {
955 int filepages;
956 int active, inactive;
957
958 /*
959 * if swap is not full, no problem.
960 */
961
962 if (!uvm_swapisfull()) {
963 return TRUE;
964 }
965
966 /*
967 * file-backed pages can be reclaimed even when swap is full.
968 * if we have more than 1/16 of pageable memory or 5MB, try to reclaim.
969 *
970 * XXX assume the worst case, ie. all wired pages are file-backed.
971 *
972 * XXX should consider about other reclaimable memory.
973 * XXX ie. pools, traditional buffer cache.
974 */
975
976 filepages = uvmexp.filepages + uvmexp.execpages - uvmexp.wired;
977 uvm_estimatepageable(&active, &inactive);
978 if (filepages >= MIN((active + inactive) >> 4,
979 5 * 1024 * 1024 >> PAGE_SHIFT)) {
980 return TRUE;
981 }
982
983 /*
984 * kill the process, fail allocation, etc..
985 */
986
987 return FALSE;
988 }
989
990 void
991 uvm_estimatepageable(int *active, int *inactive)
992 {
993
994 uvmpdpol_estimatepageable(active, inactive);
995 }
Cache object: 178ea38ec2c18ec968863ef097876f59
|