FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_glue.c
1 /*-
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * from: @(#)vm_glue.c 8.6 (Berkeley) 1/5/94
33 *
34 *
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36 * All rights reserved.
37 *
38 * Permission to use, copy, modify and distribute this software and
39 * its documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
43 *
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
46 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
47 *
48 * Carnegie Mellon requests users of this software to return to
49 *
50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
51 * School of Computer Science
52 * Carnegie Mellon University
53 * Pittsburgh PA 15213-3890
54 *
55 * any improvements or extensions that they make and grant Carnegie the
56 * rights to redistribute these changes.
57 */
58
59 #include <sys/cdefs.h>
60 __FBSDID("$FreeBSD$");
61
62 #include "opt_vm.h"
63 #include "opt_kstack_pages.h"
64 #include "opt_kstack_max_pages.h"
65 #include "opt_kstack_usage_prof.h"
66
67 #include <sys/param.h>
68 #include <sys/systm.h>
69 #include <sys/limits.h>
70 #include <sys/lock.h>
71 #include <sys/malloc.h>
72 #include <sys/mutex.h>
73 #include <sys/proc.h>
74 #include <sys/racct.h>
75 #include <sys/resourcevar.h>
76 #include <sys/rwlock.h>
77 #include <sys/sched.h>
78 #include <sys/sf_buf.h>
79 #include <sys/shm.h>
80 #include <sys/vmmeter.h>
81 #include <sys/vmem.h>
82 #include <sys/sx.h>
83 #include <sys/sysctl.h>
84 #include <sys/_kstack_cache.h>
85 #include <sys/eventhandler.h>
86 #include <sys/kernel.h>
87 #include <sys/ktr.h>
88 #include <sys/unistd.h>
89
90 #include <vm/vm.h>
91 #include <vm/vm_param.h>
92 #include <vm/pmap.h>
93 #include <vm/vm_map.h>
94 #include <vm/vm_page.h>
95 #include <vm/vm_pageout.h>
96 #include <vm/vm_object.h>
97 #include <vm/vm_kern.h>
98 #include <vm/vm_extern.h>
99 #include <vm/vm_pager.h>
100 #include <vm/swap_pager.h>
101
102 #include <machine/cpu.h>
103
104 #ifndef NO_SWAPPING
105 static int swapout(struct proc *);
106 static void swapclear(struct proc *);
107 static void vm_thread_swapin(struct thread *td);
108 static void vm_thread_swapout(struct thread *td);
109 #endif
110
111 /*
112 * MPSAFE
113 *
114 * WARNING! This code calls vm_map_check_protection() which only checks
115 * the associated vm_map_entry range. It does not determine whether the
116 * contents of the memory is actually readable or writable. In most cases
117 * just checking the vm_map_entry is sufficient within the kernel's address
118 * space.
119 */
120 int
121 kernacc(addr, len, rw)
122 void *addr;
123 int len, rw;
124 {
125 boolean_t rv;
126 vm_offset_t saddr, eaddr;
127 vm_prot_t prot;
128
129 KASSERT((rw & ~VM_PROT_ALL) == 0,
130 ("illegal ``rw'' argument to kernacc (%x)\n", rw));
131
132 if ((vm_offset_t)addr + len > kernel_map->max_offset ||
133 (vm_offset_t)addr + len < (vm_offset_t)addr)
134 return (FALSE);
135
136 prot = rw;
137 saddr = trunc_page((vm_offset_t)addr);
138 eaddr = round_page((vm_offset_t)addr + len);
139 vm_map_lock_read(kernel_map);
140 rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
141 vm_map_unlock_read(kernel_map);
142 return (rv == TRUE);
143 }
144
145 /*
146 * MPSAFE
147 *
148 * WARNING! This code calls vm_map_check_protection() which only checks
149 * the associated vm_map_entry range. It does not determine whether the
150 * contents of the memory is actually readable or writable. vmapbuf(),
151 * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be
152 * used in conjuction with this call.
153 */
154 int
155 useracc(addr, len, rw)
156 void *addr;
157 int len, rw;
158 {
159 boolean_t rv;
160 vm_prot_t prot;
161 vm_map_t map;
162
163 KASSERT((rw & ~VM_PROT_ALL) == 0,
164 ("illegal ``rw'' argument to useracc (%x)\n", rw));
165 prot = rw;
166 map = &curproc->p_vmspace->vm_map;
167 if ((vm_offset_t)addr + len > vm_map_max(map) ||
168 (vm_offset_t)addr + len < (vm_offset_t)addr) {
169 return (FALSE);
170 }
171 vm_map_lock_read(map);
172 rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr),
173 round_page((vm_offset_t)addr + len), prot);
174 vm_map_unlock_read(map);
175 return (rv == TRUE);
176 }
177
178 int
179 vslock(void *addr, size_t len)
180 {
181 vm_offset_t end, last, start;
182 vm_size_t npages;
183 int error;
184
185 last = (vm_offset_t)addr + len;
186 start = trunc_page((vm_offset_t)addr);
187 end = round_page(last);
188 if (last < (vm_offset_t)addr || end < (vm_offset_t)addr)
189 return (EINVAL);
190 npages = atop(end - start);
191 if (npages > vm_page_max_wired)
192 return (ENOMEM);
193 #if 0
194 /*
195 * XXX - not yet
196 *
197 * The limit for transient usage of wired pages should be
198 * larger than for "permanent" wired pages (mlock()).
199 *
200 * Also, the sysctl code, which is the only present user
201 * of vslock(), does a hard loop on EAGAIN.
202 */
203 if (npages + cnt.v_wire_count > vm_page_max_wired)
204 return (EAGAIN);
205 #endif
206 error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end,
207 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
208 /*
209 * Return EFAULT on error to match copy{in,out}() behaviour
210 * rather than returning ENOMEM like mlock() would.
211 */
212 return (error == KERN_SUCCESS ? 0 : EFAULT);
213 }
214
215 void
216 vsunlock(void *addr, size_t len)
217 {
218
219 /* Rely on the parameter sanity checks performed by vslock(). */
220 (void)vm_map_unwire(&curproc->p_vmspace->vm_map,
221 trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len),
222 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
223 }
224
225 /*
226 * Pin the page contained within the given object at the given offset. If the
227 * page is not resident, allocate and load it using the given object's pager.
228 * Return the pinned page if successful; otherwise, return NULL.
229 */
230 static vm_page_t
231 vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset)
232 {
233 vm_page_t m, ma[1];
234 vm_pindex_t pindex;
235 int rv;
236
237 VM_OBJECT_WLOCK(object);
238 pindex = OFF_TO_IDX(offset);
239 m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL);
240 if (m->valid != VM_PAGE_BITS_ALL) {
241 ma[0] = m;
242 rv = vm_pager_get_pages(object, ma, 1, 0);
243 m = vm_page_lookup(object, pindex);
244 if (m == NULL)
245 goto out;
246 if (rv != VM_PAGER_OK) {
247 vm_page_lock(m);
248 vm_page_free(m);
249 vm_page_unlock(m);
250 m = NULL;
251 goto out;
252 }
253 }
254 vm_page_xunbusy(m);
255 vm_page_lock(m);
256 vm_page_hold(m);
257 vm_page_activate(m);
258 vm_page_unlock(m);
259 out:
260 VM_OBJECT_WUNLOCK(object);
261 return (m);
262 }
263
264 /*
265 * Return a CPU private mapping to the page at the given offset within the
266 * given object. The page is pinned before it is mapped.
267 */
268 struct sf_buf *
269 vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset)
270 {
271 vm_page_t m;
272
273 m = vm_imgact_hold_page(object, offset);
274 if (m == NULL)
275 return (NULL);
276 sched_pin();
277 return (sf_buf_alloc(m, SFB_CPUPRIVATE));
278 }
279
280 /*
281 * Destroy the given CPU private mapping and unpin the page that it mapped.
282 */
283 void
284 vm_imgact_unmap_page(struct sf_buf *sf)
285 {
286 vm_page_t m;
287
288 m = sf_buf_page(sf);
289 sf_buf_free(sf);
290 sched_unpin();
291 vm_page_lock(m);
292 vm_page_unhold(m);
293 vm_page_unlock(m);
294 }
295
296 void
297 vm_sync_icache(vm_map_t map, vm_offset_t va, vm_offset_t sz)
298 {
299
300 pmap_sync_icache(map->pmap, va, sz);
301 }
302
303 struct kstack_cache_entry *kstack_cache;
304 static int kstack_cache_size = 128;
305 static int kstacks;
306 static struct mtx kstack_cache_mtx;
307 MTX_SYSINIT(kstack_cache, &kstack_cache_mtx, "kstkch", MTX_DEF);
308
309 SYSCTL_INT(_vm, OID_AUTO, kstack_cache_size, CTLFLAG_RW, &kstack_cache_size, 0,
310 "");
311 SYSCTL_INT(_vm, OID_AUTO, kstacks, CTLFLAG_RD, &kstacks, 0,
312 "");
313
314 #ifndef KSTACK_MAX_PAGES
315 #define KSTACK_MAX_PAGES 32
316 #endif
317
318 /*
319 * Create the kernel stack (including pcb for i386) for a new thread.
320 * This routine directly affects the fork perf for a process and
321 * create performance for a thread.
322 */
323 int
324 vm_thread_new(struct thread *td, int pages)
325 {
326 vm_object_t ksobj;
327 vm_offset_t ks;
328 vm_page_t m, ma[KSTACK_MAX_PAGES];
329 struct kstack_cache_entry *ks_ce;
330 int i;
331
332 /* Bounds check */
333 if (pages <= 1)
334 pages = KSTACK_PAGES;
335 else if (pages > KSTACK_MAX_PAGES)
336 pages = KSTACK_MAX_PAGES;
337
338 if (pages == KSTACK_PAGES) {
339 mtx_lock(&kstack_cache_mtx);
340 if (kstack_cache != NULL) {
341 ks_ce = kstack_cache;
342 kstack_cache = ks_ce->next_ks_entry;
343 mtx_unlock(&kstack_cache_mtx);
344
345 td->td_kstack_obj = ks_ce->ksobj;
346 td->td_kstack = (vm_offset_t)ks_ce;
347 td->td_kstack_pages = KSTACK_PAGES;
348 return (1);
349 }
350 mtx_unlock(&kstack_cache_mtx);
351 }
352
353 /*
354 * Allocate an object for the kstack.
355 */
356 ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
357
358 /*
359 * Get a kernel virtual address for this thread's kstack.
360 */
361 #if defined(__mips__)
362 /*
363 * We need to align the kstack's mapped address to fit within
364 * a single TLB entry.
365 */
366 if (vmem_xalloc(kernel_arena, (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE,
367 PAGE_SIZE * 2, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX,
368 M_BESTFIT | M_NOWAIT, &ks)) {
369 ks = 0;
370 }
371 #else
372 ks = kva_alloc((pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
373 #endif
374 if (ks == 0) {
375 printf("vm_thread_new: kstack allocation failed\n");
376 vm_object_deallocate(ksobj);
377 return (0);
378 }
379
380 atomic_add_int(&kstacks, 1);
381 if (KSTACK_GUARD_PAGES != 0) {
382 pmap_qremove(ks, KSTACK_GUARD_PAGES);
383 ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
384 }
385 td->td_kstack_obj = ksobj;
386 td->td_kstack = ks;
387 /*
388 * Knowing the number of pages allocated is useful when you
389 * want to deallocate them.
390 */
391 td->td_kstack_pages = pages;
392 /*
393 * For the length of the stack, link in a real page of ram for each
394 * page of stack.
395 */
396 VM_OBJECT_WLOCK(ksobj);
397 for (i = 0; i < pages; i++) {
398 /*
399 * Get a kernel stack page.
400 */
401 m = vm_page_grab(ksobj, i, VM_ALLOC_NOBUSY |
402 VM_ALLOC_NORMAL | VM_ALLOC_WIRED);
403 ma[i] = m;
404 m->valid = VM_PAGE_BITS_ALL;
405 }
406 VM_OBJECT_WUNLOCK(ksobj);
407 pmap_qenter(ks, ma, pages);
408 return (1);
409 }
410
411 static void
412 vm_thread_stack_dispose(vm_object_t ksobj, vm_offset_t ks, int pages)
413 {
414 vm_page_t m;
415 int i;
416
417 atomic_add_int(&kstacks, -1);
418 pmap_qremove(ks, pages);
419 VM_OBJECT_WLOCK(ksobj);
420 for (i = 0; i < pages; i++) {
421 m = vm_page_lookup(ksobj, i);
422 if (m == NULL)
423 panic("vm_thread_dispose: kstack already missing?");
424 vm_page_lock(m);
425 vm_page_unwire(m, 0);
426 vm_page_free(m);
427 vm_page_unlock(m);
428 }
429 VM_OBJECT_WUNLOCK(ksobj);
430 vm_object_deallocate(ksobj);
431 kva_free(ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
432 (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
433 }
434
435 /*
436 * Dispose of a thread's kernel stack.
437 */
438 void
439 vm_thread_dispose(struct thread *td)
440 {
441 vm_object_t ksobj;
442 vm_offset_t ks;
443 struct kstack_cache_entry *ks_ce;
444 int pages;
445
446 pages = td->td_kstack_pages;
447 ksobj = td->td_kstack_obj;
448 ks = td->td_kstack;
449 td->td_kstack = 0;
450 td->td_kstack_pages = 0;
451 if (pages == KSTACK_PAGES && kstacks <= kstack_cache_size) {
452 ks_ce = (struct kstack_cache_entry *)ks;
453 ks_ce->ksobj = ksobj;
454 mtx_lock(&kstack_cache_mtx);
455 ks_ce->next_ks_entry = kstack_cache;
456 kstack_cache = ks_ce;
457 mtx_unlock(&kstack_cache_mtx);
458 return;
459 }
460 vm_thread_stack_dispose(ksobj, ks, pages);
461 }
462
463 static void
464 vm_thread_stack_lowmem(void *nulll)
465 {
466 struct kstack_cache_entry *ks_ce, *ks_ce1;
467
468 mtx_lock(&kstack_cache_mtx);
469 ks_ce = kstack_cache;
470 kstack_cache = NULL;
471 mtx_unlock(&kstack_cache_mtx);
472
473 while (ks_ce != NULL) {
474 ks_ce1 = ks_ce;
475 ks_ce = ks_ce->next_ks_entry;
476
477 vm_thread_stack_dispose(ks_ce1->ksobj, (vm_offset_t)ks_ce1,
478 KSTACK_PAGES);
479 }
480 }
481
482 static void
483 kstack_cache_init(void *nulll)
484 {
485
486 EVENTHANDLER_REGISTER(vm_lowmem, vm_thread_stack_lowmem, NULL,
487 EVENTHANDLER_PRI_ANY);
488 }
489
490 SYSINIT(vm_kstacks, SI_SUB_KTHREAD_INIT, SI_ORDER_ANY, kstack_cache_init, NULL);
491
492 #ifdef KSTACK_USAGE_PROF
493 /*
494 * Track maximum stack used by a thread in kernel.
495 */
496 static int max_kstack_used;
497
498 SYSCTL_INT(_debug, OID_AUTO, max_kstack_used, CTLFLAG_RD,
499 &max_kstack_used, 0,
500 "Maxiumum stack depth used by a thread in kernel");
501
502 void
503 intr_prof_stack_use(struct thread *td, struct trapframe *frame)
504 {
505 vm_offset_t stack_top;
506 vm_offset_t current;
507 int used, prev_used;
508
509 /*
510 * Testing for interrupted kernel mode isn't strictly
511 * needed. It optimizes the execution, since interrupts from
512 * usermode will have only the trap frame on the stack.
513 */
514 if (TRAPF_USERMODE(frame))
515 return;
516
517 stack_top = td->td_kstack + td->td_kstack_pages * PAGE_SIZE;
518 current = (vm_offset_t)(uintptr_t)&stack_top;
519
520 /*
521 * Try to detect if interrupt is using kernel thread stack.
522 * Hardware could use a dedicated stack for interrupt handling.
523 */
524 if (stack_top <= current || current < td->td_kstack)
525 return;
526
527 used = stack_top - current;
528 for (;;) {
529 prev_used = max_kstack_used;
530 if (prev_used >= used)
531 break;
532 if (atomic_cmpset_int(&max_kstack_used, prev_used, used))
533 break;
534 }
535 }
536 #endif /* KSTACK_USAGE_PROF */
537
538 #ifndef NO_SWAPPING
539 /*
540 * Allow a thread's kernel stack to be paged out.
541 */
542 static void
543 vm_thread_swapout(struct thread *td)
544 {
545 vm_object_t ksobj;
546 vm_page_t m;
547 int i, pages;
548
549 cpu_thread_swapout(td);
550 pages = td->td_kstack_pages;
551 ksobj = td->td_kstack_obj;
552 pmap_qremove(td->td_kstack, pages);
553 VM_OBJECT_WLOCK(ksobj);
554 for (i = 0; i < pages; i++) {
555 m = vm_page_lookup(ksobj, i);
556 if (m == NULL)
557 panic("vm_thread_swapout: kstack already missing?");
558 vm_page_dirty(m);
559 vm_page_lock(m);
560 vm_page_unwire(m, 0);
561 vm_page_unlock(m);
562 }
563 VM_OBJECT_WUNLOCK(ksobj);
564 }
565
566 /*
567 * Bring the kernel stack for a specified thread back in.
568 */
569 static void
570 vm_thread_swapin(struct thread *td)
571 {
572 vm_object_t ksobj;
573 vm_page_t ma[KSTACK_MAX_PAGES];
574 int i, j, k, pages, rv;
575
576 pages = td->td_kstack_pages;
577 ksobj = td->td_kstack_obj;
578 VM_OBJECT_WLOCK(ksobj);
579 for (i = 0; i < pages; i++)
580 ma[i] = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL |
581 VM_ALLOC_WIRED);
582 for (i = 0; i < pages; i++) {
583 if (ma[i]->valid != VM_PAGE_BITS_ALL) {
584 vm_page_assert_xbusied(ma[i]);
585 vm_object_pip_add(ksobj, 1);
586 for (j = i + 1; j < pages; j++) {
587 if (ma[j]->valid != VM_PAGE_BITS_ALL)
588 vm_page_assert_xbusied(ma[j]);
589 if (ma[j]->valid == VM_PAGE_BITS_ALL)
590 break;
591 }
592 rv = vm_pager_get_pages(ksobj, ma + i, j - i, 0);
593 if (rv != VM_PAGER_OK)
594 panic("vm_thread_swapin: cannot get kstack for proc: %d",
595 td->td_proc->p_pid);
596 vm_object_pip_wakeup(ksobj);
597 for (k = i; k < j; k++)
598 ma[k] = vm_page_lookup(ksobj, k);
599 vm_page_xunbusy(ma[i]);
600 } else if (vm_page_xbusied(ma[i]))
601 vm_page_xunbusy(ma[i]);
602 }
603 VM_OBJECT_WUNLOCK(ksobj);
604 pmap_qenter(td->td_kstack, ma, pages);
605 cpu_thread_swapin(td);
606 }
607 #endif /* !NO_SWAPPING */
608
609 /*
610 * Implement fork's actions on an address space.
611 * Here we arrange for the address space to be copied or referenced,
612 * allocate a user struct (pcb and kernel stack), then call the
613 * machine-dependent layer to fill those in and make the new process
614 * ready to run. The new process is set up so that it returns directly
615 * to user mode to avoid stack copying and relocation problems.
616 */
617 int
618 vm_forkproc(td, p2, td2, vm2, flags)
619 struct thread *td;
620 struct proc *p2;
621 struct thread *td2;
622 struct vmspace *vm2;
623 int flags;
624 {
625 struct proc *p1 = td->td_proc;
626 int error;
627
628 if ((flags & RFPROC) == 0) {
629 /*
630 * Divorce the memory, if it is shared, essentially
631 * this changes shared memory amongst threads, into
632 * COW locally.
633 */
634 if ((flags & RFMEM) == 0) {
635 if (p1->p_vmspace->vm_refcnt > 1) {
636 error = vmspace_unshare(p1);
637 if (error)
638 return (error);
639 }
640 }
641 cpu_fork(td, p2, td2, flags);
642 return (0);
643 }
644
645 if (flags & RFMEM) {
646 p2->p_vmspace = p1->p_vmspace;
647 atomic_add_int(&p1->p_vmspace->vm_refcnt, 1);
648 }
649
650 while (vm_page_count_severe()) {
651 VM_WAIT;
652 }
653
654 if ((flags & RFMEM) == 0) {
655 p2->p_vmspace = vm2;
656 if (p1->p_vmspace->vm_shm)
657 shmfork(p1, p2);
658 }
659
660 /*
661 * cpu_fork will copy and update the pcb, set up the kernel stack,
662 * and make the child ready to run.
663 */
664 cpu_fork(td, p2, td2, flags);
665 return (0);
666 }
667
668 /*
669 * Called after process has been wait(2)'ed apon and is being reaped.
670 * The idea is to reclaim resources that we could not reclaim while
671 * the process was still executing.
672 */
673 void
674 vm_waitproc(p)
675 struct proc *p;
676 {
677
678 vmspace_exitfree(p); /* and clean-out the vmspace */
679 }
680
681 void
682 faultin(p)
683 struct proc *p;
684 {
685 #ifdef NO_SWAPPING
686
687 PROC_LOCK_ASSERT(p, MA_OWNED);
688 if ((p->p_flag & P_INMEM) == 0)
689 panic("faultin: proc swapped out with NO_SWAPPING!");
690 #else /* !NO_SWAPPING */
691 struct thread *td;
692
693 PROC_LOCK_ASSERT(p, MA_OWNED);
694 /*
695 * If another process is swapping in this process,
696 * just wait until it finishes.
697 */
698 if (p->p_flag & P_SWAPPINGIN) {
699 while (p->p_flag & P_SWAPPINGIN)
700 msleep(&p->p_flag, &p->p_mtx, PVM, "faultin", 0);
701 return;
702 }
703 if ((p->p_flag & P_INMEM) == 0) {
704 /*
705 * Don't let another thread swap process p out while we are
706 * busy swapping it in.
707 */
708 ++p->p_lock;
709 p->p_flag |= P_SWAPPINGIN;
710 PROC_UNLOCK(p);
711
712 /*
713 * We hold no lock here because the list of threads
714 * can not change while all threads in the process are
715 * swapped out.
716 */
717 FOREACH_THREAD_IN_PROC(p, td)
718 vm_thread_swapin(td);
719 PROC_LOCK(p);
720 swapclear(p);
721 p->p_swtick = ticks;
722
723 wakeup(&p->p_flag);
724
725 /* Allow other threads to swap p out now. */
726 --p->p_lock;
727 }
728 #endif /* NO_SWAPPING */
729 }
730
731 /*
732 * This swapin algorithm attempts to swap-in processes only if there
733 * is enough space for them. Of course, if a process waits for a long
734 * time, it will be swapped in anyway.
735 */
736 void
737 swapper(void)
738 {
739 struct proc *p;
740 struct thread *td;
741 struct proc *pp;
742 int slptime;
743 int swtime;
744 int ppri;
745 int pri;
746
747 loop:
748 if (vm_page_count_min()) {
749 VM_WAIT;
750 goto loop;
751 }
752
753 pp = NULL;
754 ppri = INT_MIN;
755 sx_slock(&allproc_lock);
756 FOREACH_PROC_IN_SYSTEM(p) {
757 PROC_LOCK(p);
758 if (p->p_state == PRS_NEW ||
759 p->p_flag & (P_SWAPPINGOUT | P_SWAPPINGIN | P_INMEM)) {
760 PROC_UNLOCK(p);
761 continue;
762 }
763 swtime = (ticks - p->p_swtick) / hz;
764 FOREACH_THREAD_IN_PROC(p, td) {
765 /*
766 * An otherwise runnable thread of a process
767 * swapped out has only the TDI_SWAPPED bit set.
768 *
769 */
770 thread_lock(td);
771 if (td->td_inhibitors == TDI_SWAPPED) {
772 slptime = (ticks - td->td_slptick) / hz;
773 pri = swtime + slptime;
774 if ((td->td_flags & TDF_SWAPINREQ) == 0)
775 pri -= p->p_nice * 8;
776 /*
777 * if this thread is higher priority
778 * and there is enough space, then select
779 * this process instead of the previous
780 * selection.
781 */
782 if (pri > ppri) {
783 pp = p;
784 ppri = pri;
785 }
786 }
787 thread_unlock(td);
788 }
789 PROC_UNLOCK(p);
790 }
791 sx_sunlock(&allproc_lock);
792
793 /*
794 * Nothing to do, back to sleep.
795 */
796 if ((p = pp) == NULL) {
797 tsleep(&proc0, PVM, "swapin", MAXSLP * hz / 2);
798 goto loop;
799 }
800 PROC_LOCK(p);
801
802 /*
803 * Another process may be bringing or may have already
804 * brought this process in while we traverse all threads.
805 * Or, this process may even be being swapped out again.
806 */
807 if (p->p_flag & (P_INMEM | P_SWAPPINGOUT | P_SWAPPINGIN)) {
808 PROC_UNLOCK(p);
809 goto loop;
810 }
811
812 /*
813 * We would like to bring someone in. (only if there is space).
814 * [What checks the space? ]
815 */
816 faultin(p);
817 PROC_UNLOCK(p);
818 goto loop;
819 }
820
821 void
822 kick_proc0(void)
823 {
824
825 wakeup(&proc0);
826 }
827
828 #ifndef NO_SWAPPING
829
830 /*
831 * Swap_idle_threshold1 is the guaranteed swapped in time for a process
832 */
833 static int swap_idle_threshold1 = 2;
834 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, CTLFLAG_RW,
835 &swap_idle_threshold1, 0, "Guaranteed swapped in time for a process");
836
837 /*
838 * Swap_idle_threshold2 is the time that a process can be idle before
839 * it will be swapped out, if idle swapping is enabled.
840 */
841 static int swap_idle_threshold2 = 10;
842 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, CTLFLAG_RW,
843 &swap_idle_threshold2, 0, "Time before a process will be swapped out");
844
845 /*
846 * First, if any processes have been sleeping or stopped for at least
847 * "swap_idle_threshold1" seconds, they are swapped out. If, however,
848 * no such processes exist, then the longest-sleeping or stopped
849 * process is swapped out. Finally, and only as a last resort, if
850 * there are no sleeping or stopped processes, the longest-resident
851 * process is swapped out.
852 */
853 void
854 swapout_procs(action)
855 int action;
856 {
857 struct proc *p;
858 struct thread *td;
859 int didswap = 0;
860
861 retry:
862 sx_slock(&allproc_lock);
863 FOREACH_PROC_IN_SYSTEM(p) {
864 struct vmspace *vm;
865 int minslptime = 100000;
866 int slptime;
867
868 /*
869 * Watch out for a process in
870 * creation. It may have no
871 * address space or lock yet.
872 */
873 if (p->p_state == PRS_NEW)
874 continue;
875 /*
876 * An aio daemon switches its
877 * address space while running.
878 * Perform a quick check whether
879 * a process has P_SYSTEM.
880 */
881 if ((p->p_flag & P_SYSTEM) != 0)
882 continue;
883 /*
884 * Do not swapout a process that
885 * is waiting for VM data
886 * structures as there is a possible
887 * deadlock. Test this first as
888 * this may block.
889 *
890 * Lock the map until swapout
891 * finishes, or a thread of this
892 * process may attempt to alter
893 * the map.
894 */
895 vm = vmspace_acquire_ref(p);
896 if (vm == NULL)
897 continue;
898 if (!vm_map_trylock(&vm->vm_map))
899 goto nextproc1;
900
901 PROC_LOCK(p);
902 if (p->p_lock != 0 ||
903 (p->p_flag & (P_STOPPED_SINGLE|P_TRACED|P_SYSTEM|P_WEXIT)
904 ) != 0) {
905 goto nextproc;
906 }
907 /*
908 * only aiod changes vmspace, however it will be
909 * skipped because of the if statement above checking
910 * for P_SYSTEM
911 */
912 if ((p->p_flag & (P_INMEM|P_SWAPPINGOUT|P_SWAPPINGIN)) != P_INMEM)
913 goto nextproc;
914
915 switch (p->p_state) {
916 default:
917 /* Don't swap out processes in any sort
918 * of 'special' state. */
919 break;
920
921 case PRS_NORMAL:
922 /*
923 * do not swapout a realtime process
924 * Check all the thread groups..
925 */
926 FOREACH_THREAD_IN_PROC(p, td) {
927 thread_lock(td);
928 if (PRI_IS_REALTIME(td->td_pri_class)) {
929 thread_unlock(td);
930 goto nextproc;
931 }
932 slptime = (ticks - td->td_slptick) / hz;
933 /*
934 * Guarantee swap_idle_threshold1
935 * time in memory.
936 */
937 if (slptime < swap_idle_threshold1) {
938 thread_unlock(td);
939 goto nextproc;
940 }
941
942 /*
943 * Do not swapout a process if it is
944 * waiting on a critical event of some
945 * kind or there is a thread whose
946 * pageable memory may be accessed.
947 *
948 * This could be refined to support
949 * swapping out a thread.
950 */
951 if (!thread_safetoswapout(td)) {
952 thread_unlock(td);
953 goto nextproc;
954 }
955 /*
956 * If the system is under memory stress,
957 * or if we are swapping
958 * idle processes >= swap_idle_threshold2,
959 * then swap the process out.
960 */
961 if (((action & VM_SWAP_NORMAL) == 0) &&
962 (((action & VM_SWAP_IDLE) == 0) ||
963 (slptime < swap_idle_threshold2))) {
964 thread_unlock(td);
965 goto nextproc;
966 }
967
968 if (minslptime > slptime)
969 minslptime = slptime;
970 thread_unlock(td);
971 }
972
973 /*
974 * If the pageout daemon didn't free enough pages,
975 * or if this process is idle and the system is
976 * configured to swap proactively, swap it out.
977 */
978 if ((action & VM_SWAP_NORMAL) ||
979 ((action & VM_SWAP_IDLE) &&
980 (minslptime > swap_idle_threshold2))) {
981 if (swapout(p) == 0)
982 didswap++;
983 PROC_UNLOCK(p);
984 vm_map_unlock(&vm->vm_map);
985 vmspace_free(vm);
986 sx_sunlock(&allproc_lock);
987 goto retry;
988 }
989 }
990 nextproc:
991 PROC_UNLOCK(p);
992 vm_map_unlock(&vm->vm_map);
993 nextproc1:
994 vmspace_free(vm);
995 continue;
996 }
997 sx_sunlock(&allproc_lock);
998 /*
999 * If we swapped something out, and another process needed memory,
1000 * then wakeup the sched process.
1001 */
1002 if (didswap)
1003 wakeup(&proc0);
1004 }
1005
1006 static void
1007 swapclear(p)
1008 struct proc *p;
1009 {
1010 struct thread *td;
1011
1012 PROC_LOCK_ASSERT(p, MA_OWNED);
1013
1014 FOREACH_THREAD_IN_PROC(p, td) {
1015 thread_lock(td);
1016 td->td_flags |= TDF_INMEM;
1017 td->td_flags &= ~TDF_SWAPINREQ;
1018 TD_CLR_SWAPPED(td);
1019 if (TD_CAN_RUN(td))
1020 if (setrunnable(td)) {
1021 #ifdef INVARIANTS
1022 /*
1023 * XXX: We just cleared TDI_SWAPPED
1024 * above and set TDF_INMEM, so this
1025 * should never happen.
1026 */
1027 panic("not waking up swapper");
1028 #endif
1029 }
1030 thread_unlock(td);
1031 }
1032 p->p_flag &= ~(P_SWAPPINGIN|P_SWAPPINGOUT);
1033 p->p_flag |= P_INMEM;
1034 }
1035
1036 static int
1037 swapout(p)
1038 struct proc *p;
1039 {
1040 struct thread *td;
1041
1042 PROC_LOCK_ASSERT(p, MA_OWNED);
1043 #if defined(SWAP_DEBUG)
1044 printf("swapping out %d\n", p->p_pid);
1045 #endif
1046
1047 /*
1048 * The states of this process and its threads may have changed
1049 * by now. Assuming that there is only one pageout daemon thread,
1050 * this process should still be in memory.
1051 */
1052 KASSERT((p->p_flag & (P_INMEM|P_SWAPPINGOUT|P_SWAPPINGIN)) == P_INMEM,
1053 ("swapout: lost a swapout race?"));
1054
1055 /*
1056 * remember the process resident count
1057 */
1058 p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
1059 /*
1060 * Check and mark all threads before we proceed.
1061 */
1062 p->p_flag &= ~P_INMEM;
1063 p->p_flag |= P_SWAPPINGOUT;
1064 FOREACH_THREAD_IN_PROC(p, td) {
1065 thread_lock(td);
1066 if (!thread_safetoswapout(td)) {
1067 thread_unlock(td);
1068 swapclear(p);
1069 return (EBUSY);
1070 }
1071 td->td_flags &= ~TDF_INMEM;
1072 TD_SET_SWAPPED(td);
1073 thread_unlock(td);
1074 }
1075 td = FIRST_THREAD_IN_PROC(p);
1076 ++td->td_ru.ru_nswap;
1077 PROC_UNLOCK(p);
1078
1079 /*
1080 * This list is stable because all threads are now prevented from
1081 * running. The list is only modified in the context of a running
1082 * thread in this process.
1083 */
1084 FOREACH_THREAD_IN_PROC(p, td)
1085 vm_thread_swapout(td);
1086
1087 PROC_LOCK(p);
1088 p->p_flag &= ~P_SWAPPINGOUT;
1089 p->p_swtick = ticks;
1090 return (0);
1091 }
1092 #endif /* !NO_SWAPPING */
Cache object: b531cad732f89ea38710155d8997eda7
|