FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_swapout.c
1 /*-
2 * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU)
3 *
4 * Copyright (c) 1991 Regents of the University of California.
5 * All rights reserved.
6 * Copyright (c) 1994 John S. Dyson
7 * All rights reserved.
8 * Copyright (c) 1994 David Greenman
9 * All rights reserved.
10 * Copyright (c) 2005 Yahoo! Technologies Norway AS
11 * All rights reserved.
12 *
13 * This code is derived from software contributed to Berkeley by
14 * The Mach Operating System project at Carnegie-Mellon University.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 * 3. All advertising materials mentioning features or use of this software
25 * must display the following acknowledgement:
26 * This product includes software developed by the University of
27 * California, Berkeley and its contributors.
28 * 4. Neither the name of the University nor the names of its contributors
29 * may be used to endorse or promote products derived from this software
30 * without specific prior written permission.
31 *
32 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42 * SUCH DAMAGE.
43 *
44 * from: @(#)vm_pageout.c 7.4 (Berkeley) 5/7/91
45 *
46 *
47 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
48 * All rights reserved.
49 *
50 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
51 *
52 * Permission to use, copy, modify and distribute this software and
53 * its documentation is hereby granted, provided that both the copyright
54 * notice and this permission notice appear in all copies of the
55 * software, derivative works or modified versions, and any portions
56 * thereof, and that both notices appear in supporting documentation.
57 *
58 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
59 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
60 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
61 *
62 * Carnegie Mellon requests users of this software to return to
63 *
64 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
65 * School of Computer Science
66 * Carnegie Mellon University
67 * Pittsburgh PA 15213-3890
68 *
69 * any improvements or extensions that they make and grant Carnegie the
70 * rights to redistribute these changes.
71 */
72
73 #include <sys/cdefs.h>
74 __FBSDID("$FreeBSD$");
75
76 #include "opt_kstack_pages.h"
77 #include "opt_kstack_max_pages.h"
78 #include "opt_vm.h"
79
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/limits.h>
83 #include <sys/kernel.h>
84 #include <sys/eventhandler.h>
85 #include <sys/lock.h>
86 #include <sys/mutex.h>
87 #include <sys/proc.h>
88 #include <sys/kthread.h>
89 #include <sys/ktr.h>
90 #include <sys/mount.h>
91 #include <sys/racct.h>
92 #include <sys/resourcevar.h>
93 #include <sys/refcount.h>
94 #include <sys/sched.h>
95 #include <sys/sdt.h>
96 #include <sys/signalvar.h>
97 #include <sys/smp.h>
98 #include <sys/time.h>
99 #include <sys/vnode.h>
100 #include <sys/vmmeter.h>
101 #include <sys/rwlock.h>
102 #include <sys/sx.h>
103 #include <sys/sysctl.h>
104
105 #include <vm/vm.h>
106 #include <vm/vm_param.h>
107 #include <vm/vm_kern.h>
108 #include <vm/vm_object.h>
109 #include <vm/vm_page.h>
110 #include <vm/vm_map.h>
111 #include <vm/vm_pageout.h>
112 #include <vm/vm_pager.h>
113 #include <vm/vm_phys.h>
114 #include <vm/swap_pager.h>
115 #include <vm/vm_extern.h>
116 #include <vm/uma.h>
117
118 /* the kernel process "vm_daemon" */
119 static void vm_daemon(void);
120 static struct proc *vmproc;
121
122 static struct kproc_desc vm_kp = {
123 "vmdaemon",
124 vm_daemon,
125 &vmproc
126 };
127 SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp);
128
129 static int vm_swap_enabled = 1;
130 static int vm_swap_idle_enabled = 0;
131
132 SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, CTLFLAG_RW,
133 &vm_swap_enabled, 0,
134 "Enable entire process swapout");
135 SYSCTL_INT(_vm, OID_AUTO, swap_idle_enabled, CTLFLAG_RW,
136 &vm_swap_idle_enabled, 0,
137 "Allow swapout on idle criteria");
138
139 /*
140 * Swap_idle_threshold1 is the guaranteed swapped in time for a process
141 */
142 static int swap_idle_threshold1 = 2;
143 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, CTLFLAG_RW,
144 &swap_idle_threshold1, 0,
145 "Guaranteed swapped in time for a process");
146
147 /*
148 * Swap_idle_threshold2 is the time that a process can be idle before
149 * it will be swapped out, if idle swapping is enabled.
150 */
151 static int swap_idle_threshold2 = 10;
152 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, CTLFLAG_RW,
153 &swap_idle_threshold2, 0,
154 "Time before a process will be swapped out");
155
156 static int vm_daemon_timeout = 0;
157 SYSCTL_INT(_vm, OID_AUTO, vmdaemon_timeout, CTLFLAG_RW,
158 &vm_daemon_timeout, 0,
159 "Time between vmdaemon runs");
160
161 static int vm_pageout_req_swapout; /* XXX */
162 static int vm_daemon_needed;
163 static struct mtx vm_daemon_mtx;
164 /* Allow for use by vm_pageout before vm_daemon is initialized. */
165 MTX_SYSINIT(vm_daemon, &vm_daemon_mtx, "vm daemon", MTX_DEF);
166
167 static int swapped_cnt;
168 static int swap_inprogress; /* Pending swap-ins done outside swapper. */
169 static int last_swapin;
170
171 static void swapclear(struct proc *);
172 static int swapout(struct proc *);
173 static void vm_swapout_map_deactivate_pages(vm_map_t, long);
174 static void vm_swapout_object_deactivate(pmap_t, vm_object_t, long);
175 static void swapout_procs(int action);
176 static void vm_req_vmdaemon(int req);
177 static void vm_thread_swapout(struct thread *td);
178
179 static void
180 vm_swapout_object_deactivate_page(pmap_t pmap, vm_page_t m, bool unmap)
181 {
182
183 /*
184 * Ignore unreclaimable wired pages. Repeat the check after busying
185 * since a busy holder may wire the page.
186 */
187 if (vm_page_wired(m) || !vm_page_tryxbusy(m))
188 return;
189
190 if (vm_page_wired(m) || !pmap_page_exists_quick(pmap, m)) {
191 vm_page_xunbusy(m);
192 return;
193 }
194 if (!pmap_is_referenced(m)) {
195 if (!vm_page_active(m))
196 (void)vm_page_try_remove_all(m);
197 else if (unmap && vm_page_try_remove_all(m))
198 vm_page_deactivate(m);
199 }
200 vm_page_xunbusy(m);
201 }
202
203 /*
204 * vm_swapout_object_deactivate
205 *
206 * Deactivate enough pages to satisfy the inactive target
207 * requirements.
208 *
209 * The object and map must be locked.
210 */
211 static void
212 vm_swapout_object_deactivate(pmap_t pmap, vm_object_t first_object,
213 long desired)
214 {
215 vm_object_t backing_object, object;
216 vm_page_t m;
217 bool unmap;
218
219 VM_OBJECT_ASSERT_LOCKED(first_object);
220 if ((first_object->flags & OBJ_FICTITIOUS) != 0)
221 return;
222 for (object = first_object;; object = backing_object) {
223 if (pmap_resident_count(pmap) <= desired)
224 goto unlock_return;
225 VM_OBJECT_ASSERT_LOCKED(object);
226 if ((object->flags & OBJ_UNMANAGED) != 0 ||
227 blockcount_read(&object->paging_in_progress) > 0)
228 goto unlock_return;
229
230 unmap = true;
231 if (object->shadow_count > 1)
232 unmap = false;
233
234 /*
235 * Scan the object's entire memory queue.
236 */
237 TAILQ_FOREACH(m, &object->memq, listq) {
238 if (pmap_resident_count(pmap) <= desired)
239 goto unlock_return;
240 if (should_yield())
241 goto unlock_return;
242 vm_swapout_object_deactivate_page(pmap, m, unmap);
243 }
244 if ((backing_object = object->backing_object) == NULL)
245 goto unlock_return;
246 VM_OBJECT_RLOCK(backing_object);
247 if (object != first_object)
248 VM_OBJECT_RUNLOCK(object);
249 }
250 unlock_return:
251 if (object != first_object)
252 VM_OBJECT_RUNLOCK(object);
253 }
254
255 /*
256 * deactivate some number of pages in a map, try to do it fairly, but
257 * that is really hard to do.
258 */
259 static void
260 vm_swapout_map_deactivate_pages(vm_map_t map, long desired)
261 {
262 vm_map_entry_t tmpe;
263 vm_object_t obj, bigobj;
264 int nothingwired;
265
266 if (!vm_map_trylock_read(map))
267 return;
268
269 bigobj = NULL;
270 nothingwired = TRUE;
271
272 /*
273 * first, search out the biggest object, and try to free pages from
274 * that.
275 */
276 VM_MAP_ENTRY_FOREACH(tmpe, map) {
277 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
278 obj = tmpe->object.vm_object;
279 if (obj != NULL && VM_OBJECT_TRYRLOCK(obj)) {
280 if (obj->shadow_count <= 1 &&
281 (bigobj == NULL ||
282 bigobj->resident_page_count <
283 obj->resident_page_count)) {
284 if (bigobj != NULL)
285 VM_OBJECT_RUNLOCK(bigobj);
286 bigobj = obj;
287 } else
288 VM_OBJECT_RUNLOCK(obj);
289 }
290 }
291 if (tmpe->wired_count > 0)
292 nothingwired = FALSE;
293 }
294
295 if (bigobj != NULL) {
296 vm_swapout_object_deactivate(map->pmap, bigobj, desired);
297 VM_OBJECT_RUNLOCK(bigobj);
298 }
299 /*
300 * Next, hunt around for other pages to deactivate. We actually
301 * do this search sort of wrong -- .text first is not the best idea.
302 */
303 VM_MAP_ENTRY_FOREACH(tmpe, map) {
304 if (pmap_resident_count(vm_map_pmap(map)) <= desired)
305 break;
306 if ((tmpe->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
307 obj = tmpe->object.vm_object;
308 if (obj != NULL) {
309 VM_OBJECT_RLOCK(obj);
310 vm_swapout_object_deactivate(map->pmap, obj,
311 desired);
312 VM_OBJECT_RUNLOCK(obj);
313 }
314 }
315 }
316
317 /*
318 * Remove all mappings if a process is swapped out, this will free page
319 * table pages.
320 */
321 if (desired == 0 && nothingwired) {
322 pmap_remove(vm_map_pmap(map), vm_map_min(map),
323 vm_map_max(map));
324 }
325
326 vm_map_unlock_read(map);
327 }
328
329 /*
330 * Swap out requests
331 */
332 #define VM_SWAP_NORMAL 1
333 #define VM_SWAP_IDLE 2
334
335 void
336 vm_swapout_run(void)
337 {
338
339 if (vm_swap_enabled)
340 vm_req_vmdaemon(VM_SWAP_NORMAL);
341 }
342
343 /*
344 * Idle process swapout -- run once per second when pagedaemons are
345 * reclaiming pages.
346 */
347 void
348 vm_swapout_run_idle(void)
349 {
350 static long lsec;
351
352 if (!vm_swap_idle_enabled || time_second == lsec)
353 return;
354 vm_req_vmdaemon(VM_SWAP_IDLE);
355 lsec = time_second;
356 }
357
358 static void
359 vm_req_vmdaemon(int req)
360 {
361 static int lastrun = 0;
362
363 mtx_lock(&vm_daemon_mtx);
364 vm_pageout_req_swapout |= req;
365 if ((ticks > (lastrun + hz)) || (ticks < lastrun)) {
366 wakeup(&vm_daemon_needed);
367 lastrun = ticks;
368 }
369 mtx_unlock(&vm_daemon_mtx);
370 }
371
372 static void
373 vm_daemon(void)
374 {
375 struct rlimit rsslim;
376 struct proc *p;
377 struct thread *td;
378 struct vmspace *vm;
379 int breakout, swapout_flags, tryagain, attempts;
380 #ifdef RACCT
381 uint64_t rsize, ravailable;
382
383 if (racct_enable && vm_daemon_timeout == 0)
384 vm_daemon_timeout = hz;
385 #endif
386
387 while (TRUE) {
388 mtx_lock(&vm_daemon_mtx);
389 msleep(&vm_daemon_needed, &vm_daemon_mtx, PPAUSE, "psleep",
390 vm_daemon_timeout);
391 swapout_flags = vm_pageout_req_swapout;
392 vm_pageout_req_swapout = 0;
393 mtx_unlock(&vm_daemon_mtx);
394 if (swapout_flags != 0) {
395 /*
396 * Drain the per-CPU page queue batches as a deadlock
397 * avoidance measure.
398 */
399 if ((swapout_flags & VM_SWAP_NORMAL) != 0)
400 vm_page_pqbatch_drain();
401 swapout_procs(swapout_flags);
402 }
403
404 /*
405 * scan the processes for exceeding their rlimits or if
406 * process is swapped out -- deactivate pages
407 */
408 tryagain = 0;
409 attempts = 0;
410 again:
411 attempts++;
412 sx_slock(&allproc_lock);
413 FOREACH_PROC_IN_SYSTEM(p) {
414 vm_pindex_t limit, size;
415
416 /*
417 * if this is a system process or if we have already
418 * looked at this process, skip it.
419 */
420 PROC_LOCK(p);
421 if (p->p_state != PRS_NORMAL ||
422 p->p_flag & (P_INEXEC | P_SYSTEM | P_WEXIT)) {
423 PROC_UNLOCK(p);
424 continue;
425 }
426 /*
427 * if the process is in a non-running type state,
428 * don't touch it.
429 */
430 breakout = 0;
431 FOREACH_THREAD_IN_PROC(p, td) {
432 thread_lock(td);
433 if (!TD_ON_RUNQ(td) &&
434 !TD_IS_RUNNING(td) &&
435 !TD_IS_SLEEPING(td) &&
436 !TD_IS_SUSPENDED(td)) {
437 thread_unlock(td);
438 breakout = 1;
439 break;
440 }
441 thread_unlock(td);
442 }
443 if (breakout) {
444 PROC_UNLOCK(p);
445 continue;
446 }
447 /*
448 * get a limit
449 */
450 lim_rlimit_proc(p, RLIMIT_RSS, &rsslim);
451 limit = OFF_TO_IDX(
452 qmin(rsslim.rlim_cur, rsslim.rlim_max));
453
454 /*
455 * let processes that are swapped out really be
456 * swapped out set the limit to nothing (will force a
457 * swap-out.)
458 */
459 if ((p->p_flag & P_INMEM) == 0)
460 limit = 0; /* XXX */
461 vm = vmspace_acquire_ref(p);
462 _PHOLD_LITE(p);
463 PROC_UNLOCK(p);
464 if (vm == NULL) {
465 PRELE(p);
466 continue;
467 }
468 sx_sunlock(&allproc_lock);
469
470 size = vmspace_resident_count(vm);
471 if (size >= limit) {
472 vm_swapout_map_deactivate_pages(
473 &vm->vm_map, limit);
474 size = vmspace_resident_count(vm);
475 }
476 #ifdef RACCT
477 if (racct_enable) {
478 rsize = IDX_TO_OFF(size);
479 PROC_LOCK(p);
480 if (p->p_state == PRS_NORMAL)
481 racct_set(p, RACCT_RSS, rsize);
482 ravailable = racct_get_available(p, RACCT_RSS);
483 PROC_UNLOCK(p);
484 if (rsize > ravailable) {
485 /*
486 * Don't be overly aggressive; this
487 * might be an innocent process,
488 * and the limit could've been exceeded
489 * by some memory hog. Don't try
490 * to deactivate more than 1/4th
491 * of process' resident set size.
492 */
493 if (attempts <= 8) {
494 if (ravailable < rsize -
495 (rsize / 4)) {
496 ravailable = rsize -
497 (rsize / 4);
498 }
499 }
500 vm_swapout_map_deactivate_pages(
501 &vm->vm_map,
502 OFF_TO_IDX(ravailable));
503 /* Update RSS usage after paging out. */
504 size = vmspace_resident_count(vm);
505 rsize = IDX_TO_OFF(size);
506 PROC_LOCK(p);
507 if (p->p_state == PRS_NORMAL)
508 racct_set(p, RACCT_RSS, rsize);
509 PROC_UNLOCK(p);
510 if (rsize > ravailable)
511 tryagain = 1;
512 }
513 }
514 #endif
515 vmspace_free(vm);
516 sx_slock(&allproc_lock);
517 PRELE(p);
518 }
519 sx_sunlock(&allproc_lock);
520 if (tryagain != 0 && attempts <= 10) {
521 maybe_yield();
522 goto again;
523 }
524 }
525 }
526
527 /*
528 * Allow a thread's kernel stack to be paged out.
529 */
530 static void
531 vm_thread_swapout(struct thread *td)
532 {
533 vm_page_t m;
534 vm_offset_t kaddr;
535 vm_pindex_t pindex;
536 int i, pages;
537
538 cpu_thread_swapout(td);
539 kaddr = td->td_kstack;
540 pages = td->td_kstack_pages;
541 pindex = atop(kaddr - VM_MIN_KERNEL_ADDRESS);
542 pmap_qremove(kaddr, pages);
543 VM_OBJECT_WLOCK(kstack_object);
544 for (i = 0; i < pages; i++) {
545 m = vm_page_lookup(kstack_object, pindex + i);
546 if (m == NULL)
547 panic("vm_thread_swapout: kstack already missing?");
548 vm_page_dirty(m);
549 vm_page_xunbusy_unchecked(m);
550 vm_page_unwire(m, PQ_LAUNDRY);
551 }
552 VM_OBJECT_WUNLOCK(kstack_object);
553 }
554
555 /*
556 * Bring the kernel stack for a specified thread back in.
557 */
558 static void
559 vm_thread_swapin(struct thread *td, int oom_alloc)
560 {
561 vm_page_t ma[KSTACK_MAX_PAGES];
562 vm_offset_t kaddr;
563 int a, count, i, j, pages, rv __diagused;
564
565 kaddr = td->td_kstack;
566 pages = td->td_kstack_pages;
567 vm_thread_stack_back(td->td_domain.dr_policy, kaddr, ma, pages,
568 oom_alloc);
569 for (i = 0; i < pages;) {
570 vm_page_assert_xbusied(ma[i]);
571 if (vm_page_all_valid(ma[i])) {
572 i++;
573 continue;
574 }
575 vm_object_pip_add(kstack_object, 1);
576 for (j = i + 1; j < pages; j++)
577 if (vm_page_all_valid(ma[j]))
578 break;
579 VM_OBJECT_WLOCK(kstack_object);
580 rv = vm_pager_has_page(kstack_object, ma[i]->pindex, NULL, &a);
581 VM_OBJECT_WUNLOCK(kstack_object);
582 KASSERT(rv == 1, ("%s: missing page %p", __func__, ma[i]));
583 count = min(a + 1, j - i);
584 rv = vm_pager_get_pages(kstack_object, ma + i, count, NULL, NULL);
585 KASSERT(rv == VM_PAGER_OK, ("%s: cannot get kstack for proc %d",
586 __func__, td->td_proc->p_pid));
587 vm_object_pip_wakeup(kstack_object);
588 i += count;
589 }
590 pmap_qenter(kaddr, ma, pages);
591 cpu_thread_swapin(td);
592 }
593
594 void
595 faultin(struct proc *p)
596 {
597 struct thread *td;
598 int oom_alloc;
599
600 PROC_LOCK_ASSERT(p, MA_OWNED);
601
602 /*
603 * If another process is swapping in this process,
604 * just wait until it finishes.
605 */
606 if (p->p_flag & P_SWAPPINGIN) {
607 while (p->p_flag & P_SWAPPINGIN)
608 msleep(&p->p_flag, &p->p_mtx, PVM, "faultin", 0);
609 return;
610 }
611
612 if ((p->p_flag & P_INMEM) == 0) {
613 oom_alloc = (p->p_flag & P_WKILLED) != 0 ? VM_ALLOC_SYSTEM :
614 VM_ALLOC_NORMAL;
615
616 /*
617 * Don't let another thread swap process p out while we are
618 * busy swapping it in.
619 */
620 ++p->p_lock;
621 p->p_flag |= P_SWAPPINGIN;
622 PROC_UNLOCK(p);
623 sx_xlock(&allproc_lock);
624 MPASS(swapped_cnt > 0);
625 swapped_cnt--;
626 if (curthread != &thread0)
627 swap_inprogress++;
628 sx_xunlock(&allproc_lock);
629
630 /*
631 * We hold no lock here because the list of threads
632 * can not change while all threads in the process are
633 * swapped out.
634 */
635 FOREACH_THREAD_IN_PROC(p, td)
636 vm_thread_swapin(td, oom_alloc);
637
638 if (curthread != &thread0) {
639 sx_xlock(&allproc_lock);
640 MPASS(swap_inprogress > 0);
641 swap_inprogress--;
642 last_swapin = ticks;
643 sx_xunlock(&allproc_lock);
644 }
645 PROC_LOCK(p);
646 swapclear(p);
647 p->p_swtick = ticks;
648
649 /* Allow other threads to swap p out now. */
650 wakeup(&p->p_flag);
651 --p->p_lock;
652 }
653 }
654
655 /*
656 * This swapin algorithm attempts to swap-in processes only if there
657 * is enough space for them. Of course, if a process waits for a long
658 * time, it will be swapped in anyway.
659 */
660
661 static struct proc *
662 swapper_selector(bool wkilled_only)
663 {
664 struct proc *p, *res;
665 struct thread *td;
666 int ppri, pri, slptime, swtime;
667
668 sx_assert(&allproc_lock, SA_SLOCKED);
669 if (swapped_cnt == 0)
670 return (NULL);
671 res = NULL;
672 ppri = INT_MIN;
673 FOREACH_PROC_IN_SYSTEM(p) {
674 PROC_LOCK(p);
675 if (p->p_state == PRS_NEW || (p->p_flag & (P_SWAPPINGOUT |
676 P_SWAPPINGIN | P_INMEM)) != 0) {
677 PROC_UNLOCK(p);
678 continue;
679 }
680 if (p->p_state == PRS_NORMAL && (p->p_flag & P_WKILLED) != 0) {
681 /*
682 * A swapped-out process might have mapped a
683 * large portion of the system's pages as
684 * anonymous memory. There is no other way to
685 * release the memory other than to kill the
686 * process, for which we need to swap it in.
687 */
688 return (p);
689 }
690 if (wkilled_only) {
691 PROC_UNLOCK(p);
692 continue;
693 }
694 swtime = (ticks - p->p_swtick) / hz;
695 FOREACH_THREAD_IN_PROC(p, td) {
696 /*
697 * An otherwise runnable thread of a process
698 * swapped out has only the TDI_SWAPPED bit set.
699 */
700 thread_lock(td);
701 if (td->td_inhibitors == TDI_SWAPPED) {
702 slptime = (ticks - td->td_slptick) / hz;
703 pri = swtime + slptime;
704 if ((td->td_flags & TDF_SWAPINREQ) == 0)
705 pri -= p->p_nice * 8;
706 /*
707 * if this thread is higher priority
708 * and there is enough space, then select
709 * this process instead of the previous
710 * selection.
711 */
712 if (pri > ppri) {
713 res = p;
714 ppri = pri;
715 }
716 }
717 thread_unlock(td);
718 }
719 PROC_UNLOCK(p);
720 }
721
722 if (res != NULL)
723 PROC_LOCK(res);
724 return (res);
725 }
726
727 #define SWAPIN_INTERVAL (MAXSLP * hz / 2)
728
729 /*
730 * Limit swapper to swap in one non-WKILLED process in MAXSLP/2
731 * interval, assuming that there is:
732 * - at least one domain that is not suffering from a shortage of free memory;
733 * - no parallel swap-ins;
734 * - no other swap-ins in the current SWAPIN_INTERVAL.
735 */
736 static bool
737 swapper_wkilled_only(void)
738 {
739
740 return (vm_page_count_min_set(&all_domains) || swap_inprogress > 0 ||
741 (u_int)(ticks - last_swapin) < SWAPIN_INTERVAL);
742 }
743
744 void
745 swapper(void)
746 {
747 struct proc *p;
748
749 for (;;) {
750 sx_slock(&allproc_lock);
751 p = swapper_selector(swapper_wkilled_only());
752 sx_sunlock(&allproc_lock);
753
754 if (p == NULL) {
755 tsleep(&proc0, PVM, "swapin", SWAPIN_INTERVAL);
756 } else {
757 PROC_LOCK_ASSERT(p, MA_OWNED);
758
759 /*
760 * Another process may be bringing or may have
761 * already brought this process in while we
762 * traverse all threads. Or, this process may
763 * have exited or even being swapped out
764 * again.
765 */
766 if (p->p_state == PRS_NORMAL && (p->p_flag & (P_INMEM |
767 P_SWAPPINGOUT | P_SWAPPINGIN)) == 0) {
768 faultin(p);
769 }
770 PROC_UNLOCK(p);
771 }
772 }
773 }
774
775 /*
776 * First, if any processes have been sleeping or stopped for at least
777 * "swap_idle_threshold1" seconds, they are swapped out. If, however,
778 * no such processes exist, then the longest-sleeping or stopped
779 * process is swapped out. Finally, and only as a last resort, if
780 * there are no sleeping or stopped processes, the longest-resident
781 * process is swapped out.
782 */
783 static void
784 swapout_procs(int action)
785 {
786 struct proc *p;
787 struct thread *td;
788 int slptime;
789 bool didswap, doswap;
790
791 MPASS((action & (VM_SWAP_NORMAL | VM_SWAP_IDLE)) != 0);
792
793 didswap = false;
794 sx_slock(&allproc_lock);
795 FOREACH_PROC_IN_SYSTEM(p) {
796 /*
797 * Filter out not yet fully constructed processes. Do
798 * not swap out held processes. Avoid processes which
799 * are system, exiting, execing, traced, already swapped
800 * out or are in the process of being swapped in or out.
801 */
802 PROC_LOCK(p);
803 if (p->p_state != PRS_NORMAL || p->p_lock != 0 || (p->p_flag &
804 (P_SYSTEM | P_WEXIT | P_INEXEC | P_STOPPED_SINGLE |
805 P_TRACED | P_SWAPPINGOUT | P_SWAPPINGIN | P_INMEM)) !=
806 P_INMEM) {
807 PROC_UNLOCK(p);
808 continue;
809 }
810
811 /*
812 * Further consideration of this process for swap out
813 * requires iterating over its threads. We release
814 * allproc_lock here so that process creation and
815 * destruction are not blocked while we iterate.
816 *
817 * To later reacquire allproc_lock and resume
818 * iteration over the allproc list, we will first have
819 * to release the lock on the process. We place a
820 * hold on the process so that it remains in the
821 * allproc list while it is unlocked.
822 */
823 _PHOLD_LITE(p);
824 sx_sunlock(&allproc_lock);
825
826 /*
827 * Do not swapout a realtime process.
828 * Guarantee swap_idle_threshold1 time in memory.
829 * If the system is under memory stress, or if we are
830 * swapping idle processes >= swap_idle_threshold2,
831 * then swap the process out.
832 */
833 doswap = true;
834 FOREACH_THREAD_IN_PROC(p, td) {
835 thread_lock(td);
836 slptime = (ticks - td->td_slptick) / hz;
837 if (PRI_IS_REALTIME(td->td_pri_class) ||
838 slptime < swap_idle_threshold1 ||
839 !thread_safetoswapout(td) ||
840 ((action & VM_SWAP_NORMAL) == 0 &&
841 slptime < swap_idle_threshold2))
842 doswap = false;
843 thread_unlock(td);
844 if (!doswap)
845 break;
846 }
847 if (doswap && swapout(p) == 0)
848 didswap = true;
849
850 PROC_UNLOCK(p);
851 if (didswap) {
852 sx_xlock(&allproc_lock);
853 swapped_cnt++;
854 sx_downgrade(&allproc_lock);
855 } else
856 sx_slock(&allproc_lock);
857 PRELE(p);
858 }
859 sx_sunlock(&allproc_lock);
860
861 /*
862 * If we swapped something out, and another process needed memory,
863 * then wakeup the sched process.
864 */
865 if (didswap)
866 wakeup(&proc0);
867 }
868
869 static void
870 swapclear(struct proc *p)
871 {
872 struct thread *td;
873
874 PROC_LOCK_ASSERT(p, MA_OWNED);
875
876 FOREACH_THREAD_IN_PROC(p, td) {
877 thread_lock(td);
878 td->td_flags |= TDF_INMEM;
879 td->td_flags &= ~TDF_SWAPINREQ;
880 TD_CLR_SWAPPED(td);
881 if (TD_CAN_RUN(td)) {
882 if (setrunnable(td, 0)) {
883 #ifdef INVARIANTS
884 /*
885 * XXX: We just cleared TDI_SWAPPED
886 * above and set TDF_INMEM, so this
887 * should never happen.
888 */
889 panic("not waking up swapper");
890 #endif
891 }
892 } else
893 thread_unlock(td);
894 }
895 p->p_flag &= ~(P_SWAPPINGIN | P_SWAPPINGOUT);
896 p->p_flag |= P_INMEM;
897 }
898
899 static int
900 swapout(struct proc *p)
901 {
902 struct thread *td;
903
904 PROC_LOCK_ASSERT(p, MA_OWNED);
905
906 /*
907 * The states of this process and its threads may have changed
908 * by now. Assuming that there is only one pageout daemon thread,
909 * this process should still be in memory.
910 */
911 KASSERT((p->p_flag & (P_INMEM | P_SWAPPINGOUT | P_SWAPPINGIN)) ==
912 P_INMEM, ("swapout: lost a swapout race?"));
913
914 /*
915 * Remember the resident count.
916 */
917 p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
918
919 /*
920 * Check and mark all threads before we proceed.
921 */
922 p->p_flag &= ~P_INMEM;
923 p->p_flag |= P_SWAPPINGOUT;
924 FOREACH_THREAD_IN_PROC(p, td) {
925 thread_lock(td);
926 if (!thread_safetoswapout(td)) {
927 thread_unlock(td);
928 swapclear(p);
929 return (EBUSY);
930 }
931 td->td_flags &= ~TDF_INMEM;
932 TD_SET_SWAPPED(td);
933 thread_unlock(td);
934 }
935 td = FIRST_THREAD_IN_PROC(p);
936 ++td->td_ru.ru_nswap;
937 PROC_UNLOCK(p);
938
939 /*
940 * This list is stable because all threads are now prevented from
941 * running. The list is only modified in the context of a running
942 * thread in this process.
943 */
944 FOREACH_THREAD_IN_PROC(p, td)
945 vm_thread_swapout(td);
946
947 PROC_LOCK(p);
948 p->p_flag &= ~P_SWAPPINGOUT;
949 p->p_swtick = ticks;
950 return (0);
951 }
Cache object: cc99bc553e27bc719b7fc2e823b750f6
|