FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_glue.c
1 /*-
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * from: @(#)vm_glue.c 8.6 (Berkeley) 1/5/94
33 *
34 *
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36 * All rights reserved.
37 *
38 * Permission to use, copy, modify and distribute this software and
39 * its documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
43 *
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
46 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
47 *
48 * Carnegie Mellon requests users of this software to return to
49 *
50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
51 * School of Computer Science
52 * Carnegie Mellon University
53 * Pittsburgh PA 15213-3890
54 *
55 * any improvements or extensions that they make and grant Carnegie the
56 * rights to redistribute these changes.
57 */
58
59 #include <sys/cdefs.h>
60 __FBSDID("$FreeBSD: releng/8.1/sys/vm/vm_glue.c 208041 2010-05-13 18:17:01Z kib $");
61
62 #include "opt_vm.h"
63 #include "opt_kstack_pages.h"
64 #include "opt_kstack_max_pages.h"
65
66 #include <sys/param.h>
67 #include <sys/systm.h>
68 #include <sys/limits.h>
69 #include <sys/lock.h>
70 #include <sys/mutex.h>
71 #include <sys/proc.h>
72 #include <sys/resourcevar.h>
73 #include <sys/sched.h>
74 #include <sys/sf_buf.h>
75 #include <sys/shm.h>
76 #include <sys/vmmeter.h>
77 #include <sys/sx.h>
78 #include <sys/sysctl.h>
79
80 #include <sys/eventhandler.h>
81 #include <sys/kernel.h>
82 #include <sys/ktr.h>
83 #include <sys/unistd.h>
84
85 #include <vm/vm.h>
86 #include <vm/vm_param.h>
87 #include <vm/pmap.h>
88 #include <vm/vm_map.h>
89 #include <vm/vm_page.h>
90 #include <vm/vm_pageout.h>
91 #include <vm/vm_object.h>
92 #include <vm/vm_kern.h>
93 #include <vm/vm_extern.h>
94 #include <vm/vm_pager.h>
95 #include <vm/swap_pager.h>
96
97 extern int maxslp;
98
99 /*
100 * System initialization
101 *
102 * Note: proc0 from proc.h
103 */
104 static void vm_init_limits(void *);
105 SYSINIT(vm_limits, SI_SUB_VM_CONF, SI_ORDER_FIRST, vm_init_limits, &proc0);
106
107 /*
108 * THIS MUST BE THE LAST INITIALIZATION ITEM!!!
109 *
110 * Note: run scheduling should be divorced from the vm system.
111 */
112 static void scheduler(void *);
113 SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_ANY, scheduler, NULL);
114
115 #ifndef NO_SWAPPING
116 static int swapout(struct proc *);
117 static void swapclear(struct proc *);
118 #endif
119
120 /*
121 * MPSAFE
122 *
123 * WARNING! This code calls vm_map_check_protection() which only checks
124 * the associated vm_map_entry range. It does not determine whether the
125 * contents of the memory is actually readable or writable. In most cases
126 * just checking the vm_map_entry is sufficient within the kernel's address
127 * space.
128 */
129 int
130 kernacc(addr, len, rw)
131 void *addr;
132 int len, rw;
133 {
134 boolean_t rv;
135 vm_offset_t saddr, eaddr;
136 vm_prot_t prot;
137
138 KASSERT((rw & ~VM_PROT_ALL) == 0,
139 ("illegal ``rw'' argument to kernacc (%x)\n", rw));
140
141 if ((vm_offset_t)addr + len > kernel_map->max_offset ||
142 (vm_offset_t)addr + len < (vm_offset_t)addr)
143 return (FALSE);
144
145 prot = rw;
146 saddr = trunc_page((vm_offset_t)addr);
147 eaddr = round_page((vm_offset_t)addr + len);
148 vm_map_lock_read(kernel_map);
149 rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
150 vm_map_unlock_read(kernel_map);
151 return (rv == TRUE);
152 }
153
154 /*
155 * MPSAFE
156 *
157 * WARNING! This code calls vm_map_check_protection() which only checks
158 * the associated vm_map_entry range. It does not determine whether the
159 * contents of the memory is actually readable or writable. vmapbuf(),
160 * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be
161 * used in conjuction with this call.
162 */
163 int
164 useracc(addr, len, rw)
165 void *addr;
166 int len, rw;
167 {
168 boolean_t rv;
169 vm_prot_t prot;
170 vm_map_t map;
171
172 KASSERT((rw & ~VM_PROT_ALL) == 0,
173 ("illegal ``rw'' argument to useracc (%x)\n", rw));
174 prot = rw;
175 map = &curproc->p_vmspace->vm_map;
176 if ((vm_offset_t)addr + len > vm_map_max(map) ||
177 (vm_offset_t)addr + len < (vm_offset_t)addr) {
178 return (FALSE);
179 }
180 vm_map_lock_read(map);
181 rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr),
182 round_page((vm_offset_t)addr + len), prot);
183 vm_map_unlock_read(map);
184 return (rv == TRUE);
185 }
186
187 int
188 vslock(void *addr, size_t len)
189 {
190 vm_offset_t end, last, start;
191 vm_size_t npages;
192 int error;
193
194 last = (vm_offset_t)addr + len;
195 start = trunc_page((vm_offset_t)addr);
196 end = round_page(last);
197 if (last < (vm_offset_t)addr || end < (vm_offset_t)addr)
198 return (EINVAL);
199 npages = atop(end - start);
200 if (npages > vm_page_max_wired)
201 return (ENOMEM);
202 PROC_LOCK(curproc);
203 if (ptoa(npages +
204 pmap_wired_count(vm_map_pmap(&curproc->p_vmspace->vm_map))) >
205 lim_cur(curproc, RLIMIT_MEMLOCK)) {
206 PROC_UNLOCK(curproc);
207 return (ENOMEM);
208 }
209 PROC_UNLOCK(curproc);
210 #if 0
211 /*
212 * XXX - not yet
213 *
214 * The limit for transient usage of wired pages should be
215 * larger than for "permanent" wired pages (mlock()).
216 *
217 * Also, the sysctl code, which is the only present user
218 * of vslock(), does a hard loop on EAGAIN.
219 */
220 if (npages + cnt.v_wire_count > vm_page_max_wired)
221 return (EAGAIN);
222 #endif
223 error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end,
224 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
225 /*
226 * Return EFAULT on error to match copy{in,out}() behaviour
227 * rather than returning ENOMEM like mlock() would.
228 */
229 return (error == KERN_SUCCESS ? 0 : EFAULT);
230 }
231
232 void
233 vsunlock(void *addr, size_t len)
234 {
235
236 /* Rely on the parameter sanity checks performed by vslock(). */
237 (void)vm_map_unwire(&curproc->p_vmspace->vm_map,
238 trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len),
239 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
240 }
241
242 /*
243 * Pin the page contained within the given object at the given offset. If the
244 * page is not resident, allocate and load it using the given object's pager.
245 * Return the pinned page if successful; otherwise, return NULL.
246 */
247 static vm_page_t
248 vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset)
249 {
250 vm_page_t m, ma[1];
251 vm_pindex_t pindex;
252 int rv;
253
254 VM_OBJECT_LOCK(object);
255 pindex = OFF_TO_IDX(offset);
256 m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
257 if (m->valid != VM_PAGE_BITS_ALL) {
258 ma[0] = m;
259 rv = vm_pager_get_pages(object, ma, 1, 0);
260 m = vm_page_lookup(object, pindex);
261 if (m == NULL)
262 goto out;
263 if (rv != VM_PAGER_OK) {
264 vm_page_lock_queues();
265 vm_page_free(m);
266 vm_page_unlock_queues();
267 m = NULL;
268 goto out;
269 }
270 }
271 vm_page_lock_queues();
272 vm_page_hold(m);
273 vm_page_unlock_queues();
274 vm_page_wakeup(m);
275 out:
276 VM_OBJECT_UNLOCK(object);
277 return (m);
278 }
279
280 /*
281 * Return a CPU private mapping to the page at the given offset within the
282 * given object. The page is pinned before it is mapped.
283 */
284 struct sf_buf *
285 vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset)
286 {
287 vm_page_t m;
288
289 m = vm_imgact_hold_page(object, offset);
290 if (m == NULL)
291 return (NULL);
292 sched_pin();
293 return (sf_buf_alloc(m, SFB_CPUPRIVATE));
294 }
295
296 /*
297 * Destroy the given CPU private mapping and unpin the page that it mapped.
298 */
299 void
300 vm_imgact_unmap_page(struct sf_buf *sf)
301 {
302 vm_page_t m;
303
304 m = sf_buf_page(sf);
305 sf_buf_free(sf);
306 sched_unpin();
307 vm_page_lock_queues();
308 vm_page_unhold(m);
309 vm_page_unlock_queues();
310 }
311
312 void
313 vm_sync_icache(vm_map_t map, vm_offset_t va, vm_offset_t sz)
314 {
315
316 pmap_sync_icache(map->pmap, va, sz);
317 }
318
319 struct kstack_cache_entry {
320 vm_object_t ksobj;
321 struct kstack_cache_entry *next_ks_entry;
322 };
323
324 static struct kstack_cache_entry *kstack_cache;
325 static int kstack_cache_size = 128;
326 static int kstacks;
327 static struct mtx kstack_cache_mtx;
328 SYSCTL_INT(_vm, OID_AUTO, kstack_cache_size, CTLFLAG_RW, &kstack_cache_size, 0,
329 "");
330 SYSCTL_INT(_vm, OID_AUTO, kstacks, CTLFLAG_RD, &kstacks, 0,
331 "");
332
333 #ifndef KSTACK_MAX_PAGES
334 #define KSTACK_MAX_PAGES 32
335 #endif
336
337 /*
338 * Create the kernel stack (including pcb for i386) for a new thread.
339 * This routine directly affects the fork perf for a process and
340 * create performance for a thread.
341 */
342 int
343 vm_thread_new(struct thread *td, int pages)
344 {
345 vm_object_t ksobj;
346 vm_offset_t ks;
347 vm_page_t m, ma[KSTACK_MAX_PAGES];
348 struct kstack_cache_entry *ks_ce;
349 int i;
350
351 /* Bounds check */
352 if (pages <= 1)
353 pages = KSTACK_PAGES;
354 else if (pages > KSTACK_MAX_PAGES)
355 pages = KSTACK_MAX_PAGES;
356
357 if (pages == KSTACK_PAGES) {
358 mtx_lock(&kstack_cache_mtx);
359 if (kstack_cache != NULL) {
360 ks_ce = kstack_cache;
361 kstack_cache = ks_ce->next_ks_entry;
362 mtx_unlock(&kstack_cache_mtx);
363
364 td->td_kstack_obj = ks_ce->ksobj;
365 td->td_kstack = (vm_offset_t)ks_ce;
366 td->td_kstack_pages = KSTACK_PAGES;
367 return (1);
368 }
369 mtx_unlock(&kstack_cache_mtx);
370 }
371
372 /*
373 * Allocate an object for the kstack.
374 */
375 ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
376
377 /*
378 * Get a kernel virtual address for this thread's kstack.
379 */
380 ks = kmem_alloc_nofault(kernel_map,
381 (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
382 if (ks == 0) {
383 printf("vm_thread_new: kstack allocation failed\n");
384 vm_object_deallocate(ksobj);
385 return (0);
386 }
387
388 atomic_add_int(&kstacks, 1);
389 if (KSTACK_GUARD_PAGES != 0) {
390 pmap_qremove(ks, KSTACK_GUARD_PAGES);
391 ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
392 }
393 td->td_kstack_obj = ksobj;
394 td->td_kstack = ks;
395 /*
396 * Knowing the number of pages allocated is useful when you
397 * want to deallocate them.
398 */
399 td->td_kstack_pages = pages;
400 /*
401 * For the length of the stack, link in a real page of ram for each
402 * page of stack.
403 */
404 VM_OBJECT_LOCK(ksobj);
405 for (i = 0; i < pages; i++) {
406 /*
407 * Get a kernel stack page.
408 */
409 m = vm_page_grab(ksobj, i, VM_ALLOC_NOBUSY |
410 VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
411 ma[i] = m;
412 m->valid = VM_PAGE_BITS_ALL;
413 }
414 VM_OBJECT_UNLOCK(ksobj);
415 pmap_qenter(ks, ma, pages);
416 return (1);
417 }
418
419 static void
420 vm_thread_stack_dispose(vm_object_t ksobj, vm_offset_t ks, int pages)
421 {
422 vm_page_t m;
423 int i;
424
425 atomic_add_int(&kstacks, -1);
426 pmap_qremove(ks, pages);
427 VM_OBJECT_LOCK(ksobj);
428 for (i = 0; i < pages; i++) {
429 m = vm_page_lookup(ksobj, i);
430 if (m == NULL)
431 panic("vm_thread_dispose: kstack already missing?");
432 vm_page_lock_queues();
433 vm_page_unwire(m, 0);
434 vm_page_free(m);
435 vm_page_unlock_queues();
436 }
437 VM_OBJECT_UNLOCK(ksobj);
438 vm_object_deallocate(ksobj);
439 kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
440 (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
441 }
442
443 /*
444 * Dispose of a thread's kernel stack.
445 */
446 void
447 vm_thread_dispose(struct thread *td)
448 {
449 vm_object_t ksobj;
450 vm_offset_t ks;
451 struct kstack_cache_entry *ks_ce;
452 int pages;
453
454 pages = td->td_kstack_pages;
455 ksobj = td->td_kstack_obj;
456 ks = td->td_kstack;
457 td->td_kstack = 0;
458 td->td_kstack_pages = 0;
459 if (pages == KSTACK_PAGES && kstacks <= kstack_cache_size) {
460 ks_ce = (struct kstack_cache_entry *)ks;
461 ks_ce->ksobj = ksobj;
462 mtx_lock(&kstack_cache_mtx);
463 ks_ce->next_ks_entry = kstack_cache;
464 kstack_cache = ks_ce;
465 mtx_unlock(&kstack_cache_mtx);
466 return;
467 }
468 vm_thread_stack_dispose(ksobj, ks, pages);
469 }
470
471 static void
472 vm_thread_stack_lowmem(void *nulll)
473 {
474 struct kstack_cache_entry *ks_ce, *ks_ce1;
475
476 mtx_lock(&kstack_cache_mtx);
477 ks_ce = kstack_cache;
478 kstack_cache = NULL;
479 mtx_unlock(&kstack_cache_mtx);
480
481 while (ks_ce != NULL) {
482 ks_ce1 = ks_ce;
483 ks_ce = ks_ce->next_ks_entry;
484
485 vm_thread_stack_dispose(ks_ce1->ksobj, (vm_offset_t)ks_ce1,
486 KSTACK_PAGES);
487 }
488 }
489
490 static void
491 kstack_cache_init(void *nulll)
492 {
493
494 EVENTHANDLER_REGISTER(vm_lowmem, vm_thread_stack_lowmem, NULL,
495 EVENTHANDLER_PRI_ANY);
496 }
497
498 MTX_SYSINIT(kstack_cache, &kstack_cache_mtx, "kstkch", MTX_DEF);
499 SYSINIT(vm_kstacks, SI_SUB_KTHREAD_INIT, SI_ORDER_ANY, kstack_cache_init, NULL);
500
501 /*
502 * Allow a thread's kernel stack to be paged out.
503 */
504 void
505 vm_thread_swapout(struct thread *td)
506 {
507 vm_object_t ksobj;
508 vm_page_t m;
509 int i, pages;
510
511 cpu_thread_swapout(td);
512 pages = td->td_kstack_pages;
513 ksobj = td->td_kstack_obj;
514 pmap_qremove(td->td_kstack, pages);
515 VM_OBJECT_LOCK(ksobj);
516 for (i = 0; i < pages; i++) {
517 m = vm_page_lookup(ksobj, i);
518 if (m == NULL)
519 panic("vm_thread_swapout: kstack already missing?");
520 vm_page_lock_queues();
521 vm_page_dirty(m);
522 vm_page_unwire(m, 0);
523 vm_page_unlock_queues();
524 }
525 VM_OBJECT_UNLOCK(ksobj);
526 }
527
528 /*
529 * Bring the kernel stack for a specified thread back in.
530 */
531 void
532 vm_thread_swapin(struct thread *td)
533 {
534 vm_object_t ksobj;
535 vm_page_t ma[KSTACK_MAX_PAGES];
536 int i, j, k, pages, rv;
537
538 pages = td->td_kstack_pages;
539 ksobj = td->td_kstack_obj;
540 VM_OBJECT_LOCK(ksobj);
541 for (i = 0; i < pages; i++)
542 ma[i] = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY |
543 VM_ALLOC_WIRED);
544 for (i = 0; i < pages; i++) {
545 if (ma[i]->valid != VM_PAGE_BITS_ALL) {
546 KASSERT(ma[i]->oflags & VPO_BUSY,
547 ("lost busy 1"));
548 vm_object_pip_add(ksobj, 1);
549 for (j = i + 1; j < pages; j++) {
550 KASSERT(ma[j]->valid == VM_PAGE_BITS_ALL ||
551 (ma[j]->oflags & VPO_BUSY),
552 ("lost busy 2"));
553 if (ma[j]->valid == VM_PAGE_BITS_ALL)
554 break;
555 }
556 rv = vm_pager_get_pages(ksobj, ma + i, j - i, 0);
557 if (rv != VM_PAGER_OK)
558 panic("vm_thread_swapin: cannot get kstack for proc: %d",
559 td->td_proc->p_pid);
560 vm_object_pip_wakeup(ksobj);
561 for (k = i; k < j; k++)
562 ma[k] = vm_page_lookup(ksobj, k);
563 vm_page_wakeup(ma[i]);
564 } else if (ma[i]->oflags & VPO_BUSY)
565 vm_page_wakeup(ma[i]);
566 }
567 VM_OBJECT_UNLOCK(ksobj);
568 pmap_qenter(td->td_kstack, ma, pages);
569 cpu_thread_swapin(td);
570 }
571
572 /*
573 * Implement fork's actions on an address space.
574 * Here we arrange for the address space to be copied or referenced,
575 * allocate a user struct (pcb and kernel stack), then call the
576 * machine-dependent layer to fill those in and make the new process
577 * ready to run. The new process is set up so that it returns directly
578 * to user mode to avoid stack copying and relocation problems.
579 */
580 int
581 vm_forkproc(td, p2, td2, vm2, flags)
582 struct thread *td;
583 struct proc *p2;
584 struct thread *td2;
585 struct vmspace *vm2;
586 int flags;
587 {
588 struct proc *p1 = td->td_proc;
589 int error;
590
591 if ((flags & RFPROC) == 0) {
592 /*
593 * Divorce the memory, if it is shared, essentially
594 * this changes shared memory amongst threads, into
595 * COW locally.
596 */
597 if ((flags & RFMEM) == 0) {
598 if (p1->p_vmspace->vm_refcnt > 1) {
599 error = vmspace_unshare(p1);
600 if (error)
601 return (error);
602 }
603 }
604 cpu_fork(td, p2, td2, flags);
605 return (0);
606 }
607
608 if (flags & RFMEM) {
609 p2->p_vmspace = p1->p_vmspace;
610 atomic_add_int(&p1->p_vmspace->vm_refcnt, 1);
611 }
612
613 while (vm_page_count_severe()) {
614 VM_WAIT;
615 }
616
617 if ((flags & RFMEM) == 0) {
618 p2->p_vmspace = vm2;
619 if (p1->p_vmspace->vm_shm)
620 shmfork(p1, p2);
621 }
622
623 /*
624 * cpu_fork will copy and update the pcb, set up the kernel stack,
625 * and make the child ready to run.
626 */
627 cpu_fork(td, p2, td2, flags);
628 return (0);
629 }
630
631 /*
632 * Called after process has been wait(2)'ed apon and is being reaped.
633 * The idea is to reclaim resources that we could not reclaim while
634 * the process was still executing.
635 */
636 void
637 vm_waitproc(p)
638 struct proc *p;
639 {
640
641 vmspace_exitfree(p); /* and clean-out the vmspace */
642 }
643
644 /*
645 * Set default limits for VM system.
646 * Called for proc 0, and then inherited by all others.
647 *
648 * XXX should probably act directly on proc0.
649 */
650 static void
651 vm_init_limits(udata)
652 void *udata;
653 {
654 struct proc *p = udata;
655 struct plimit *limp;
656 int rss_limit;
657
658 /*
659 * Set up the initial limits on process VM. Set the maximum resident
660 * set size to be half of (reasonably) available memory. Since this
661 * is a soft limit, it comes into effect only when the system is out
662 * of memory - half of main memory helps to favor smaller processes,
663 * and reduces thrashing of the object cache.
664 */
665 limp = p->p_limit;
666 limp->pl_rlimit[RLIMIT_STACK].rlim_cur = dflssiz;
667 limp->pl_rlimit[RLIMIT_STACK].rlim_max = maxssiz;
668 limp->pl_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz;
669 limp->pl_rlimit[RLIMIT_DATA].rlim_max = maxdsiz;
670 /* limit the limit to no less than 2MB */
671 rss_limit = max(cnt.v_free_count, 512);
672 limp->pl_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit);
673 limp->pl_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY;
674 }
675
676 void
677 faultin(p)
678 struct proc *p;
679 {
680 #ifdef NO_SWAPPING
681
682 PROC_LOCK_ASSERT(p, MA_OWNED);
683 if ((p->p_flag & P_INMEM) == 0)
684 panic("faultin: proc swapped out with NO_SWAPPING!");
685 #else /* !NO_SWAPPING */
686 struct thread *td;
687
688 PROC_LOCK_ASSERT(p, MA_OWNED);
689 /*
690 * If another process is swapping in this process,
691 * just wait until it finishes.
692 */
693 if (p->p_flag & P_SWAPPINGIN) {
694 while (p->p_flag & P_SWAPPINGIN)
695 msleep(&p->p_flag, &p->p_mtx, PVM, "faultin", 0);
696 return;
697 }
698 if ((p->p_flag & P_INMEM) == 0) {
699 /*
700 * Don't let another thread swap process p out while we are
701 * busy swapping it in.
702 */
703 ++p->p_lock;
704 p->p_flag |= P_SWAPPINGIN;
705 PROC_UNLOCK(p);
706
707 /*
708 * We hold no lock here because the list of threads
709 * can not change while all threads in the process are
710 * swapped out.
711 */
712 FOREACH_THREAD_IN_PROC(p, td)
713 vm_thread_swapin(td);
714 PROC_LOCK(p);
715 swapclear(p);
716 p->p_swtick = ticks;
717
718 wakeup(&p->p_flag);
719
720 /* Allow other threads to swap p out now. */
721 --p->p_lock;
722 }
723 #endif /* NO_SWAPPING */
724 }
725
726 /*
727 * This swapin algorithm attempts to swap-in processes only if there
728 * is enough space for them. Of course, if a process waits for a long
729 * time, it will be swapped in anyway.
730 *
731 * Giant is held on entry.
732 */
733 /* ARGSUSED*/
734 static void
735 scheduler(dummy)
736 void *dummy;
737 {
738 struct proc *p;
739 struct thread *td;
740 struct proc *pp;
741 int slptime;
742 int swtime;
743 int ppri;
744 int pri;
745
746 mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED);
747 mtx_unlock(&Giant);
748
749 loop:
750 if (vm_page_count_min()) {
751 VM_WAIT;
752 goto loop;
753 }
754
755 pp = NULL;
756 ppri = INT_MIN;
757 sx_slock(&allproc_lock);
758 FOREACH_PROC_IN_SYSTEM(p) {
759 PROC_LOCK(p);
760 if (p->p_flag & (P_SWAPPINGOUT | P_SWAPPINGIN | P_INMEM)) {
761 PROC_UNLOCK(p);
762 continue;
763 }
764 swtime = (ticks - p->p_swtick) / hz;
765 FOREACH_THREAD_IN_PROC(p, td) {
766 /*
767 * An otherwise runnable thread of a process
768 * swapped out has only the TDI_SWAPPED bit set.
769 *
770 */
771 thread_lock(td);
772 if (td->td_inhibitors == TDI_SWAPPED) {
773 slptime = (ticks - td->td_slptick) / hz;
774 pri = swtime + slptime;
775 if ((td->td_flags & TDF_SWAPINREQ) == 0)
776 pri -= p->p_nice * 8;
777 /*
778 * if this thread is higher priority
779 * and there is enough space, then select
780 * this process instead of the previous
781 * selection.
782 */
783 if (pri > ppri) {
784 pp = p;
785 ppri = pri;
786 }
787 }
788 thread_unlock(td);
789 }
790 PROC_UNLOCK(p);
791 }
792 sx_sunlock(&allproc_lock);
793
794 /*
795 * Nothing to do, back to sleep.
796 */
797 if ((p = pp) == NULL) {
798 tsleep(&proc0, PVM, "sched", maxslp * hz / 2);
799 goto loop;
800 }
801 PROC_LOCK(p);
802
803 /*
804 * Another process may be bringing or may have already
805 * brought this process in while we traverse all threads.
806 * Or, this process may even be being swapped out again.
807 */
808 if (p->p_flag & (P_INMEM | P_SWAPPINGOUT | P_SWAPPINGIN)) {
809 PROC_UNLOCK(p);
810 goto loop;
811 }
812
813 /*
814 * We would like to bring someone in. (only if there is space).
815 * [What checks the space? ]
816 */
817 faultin(p);
818 PROC_UNLOCK(p);
819 goto loop;
820 }
821
822 void
823 kick_proc0(void)
824 {
825
826 wakeup(&proc0);
827 }
828
829 #ifndef NO_SWAPPING
830
831 /*
832 * Swap_idle_threshold1 is the guaranteed swapped in time for a process
833 */
834 static int swap_idle_threshold1 = 2;
835 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, CTLFLAG_RW,
836 &swap_idle_threshold1, 0, "Guaranteed swapped in time for a process");
837
838 /*
839 * Swap_idle_threshold2 is the time that a process can be idle before
840 * it will be swapped out, if idle swapping is enabled.
841 */
842 static int swap_idle_threshold2 = 10;
843 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, CTLFLAG_RW,
844 &swap_idle_threshold2, 0, "Time before a process will be swapped out");
845
846 /*
847 * Swapout is driven by the pageout daemon. Very simple, we find eligible
848 * procs and swap out their stacks. We try to always "swap" at least one
849 * process in case we need the room for a swapin.
850 * If any procs have been sleeping/stopped for at least maxslp seconds,
851 * they are swapped. Else, we swap the longest-sleeping or stopped process,
852 * if any, otherwise the longest-resident process.
853 */
854 void
855 swapout_procs(action)
856 int action;
857 {
858 struct proc *p;
859 struct thread *td;
860 int didswap = 0;
861
862 retry:
863 sx_slock(&allproc_lock);
864 FOREACH_PROC_IN_SYSTEM(p) {
865 struct vmspace *vm;
866 int minslptime = 100000;
867 int slptime;
868
869 /*
870 * Watch out for a process in
871 * creation. It may have no
872 * address space or lock yet.
873 */
874 if (p->p_state == PRS_NEW)
875 continue;
876 /*
877 * An aio daemon switches its
878 * address space while running.
879 * Perform a quick check whether
880 * a process has P_SYSTEM.
881 */
882 if ((p->p_flag & P_SYSTEM) != 0)
883 continue;
884 /*
885 * Do not swapout a process that
886 * is waiting for VM data
887 * structures as there is a possible
888 * deadlock. Test this first as
889 * this may block.
890 *
891 * Lock the map until swapout
892 * finishes, or a thread of this
893 * process may attempt to alter
894 * the map.
895 */
896 vm = vmspace_acquire_ref(p);
897 if (vm == NULL)
898 continue;
899 if (!vm_map_trylock(&vm->vm_map))
900 goto nextproc1;
901
902 PROC_LOCK(p);
903 if (p->p_lock != 0 ||
904 (p->p_flag & (P_STOPPED_SINGLE|P_TRACED|P_SYSTEM|P_WEXIT)
905 ) != 0) {
906 goto nextproc;
907 }
908 /*
909 * only aiod changes vmspace, however it will be
910 * skipped because of the if statement above checking
911 * for P_SYSTEM
912 */
913 if ((p->p_flag & (P_INMEM|P_SWAPPINGOUT|P_SWAPPINGIN)) != P_INMEM)
914 goto nextproc;
915
916 switch (p->p_state) {
917 default:
918 /* Don't swap out processes in any sort
919 * of 'special' state. */
920 break;
921
922 case PRS_NORMAL:
923 /*
924 * do not swapout a realtime process
925 * Check all the thread groups..
926 */
927 FOREACH_THREAD_IN_PROC(p, td) {
928 thread_lock(td);
929 if (PRI_IS_REALTIME(td->td_pri_class)) {
930 thread_unlock(td);
931 goto nextproc;
932 }
933 slptime = (ticks - td->td_slptick) / hz;
934 /*
935 * Guarantee swap_idle_threshold1
936 * time in memory.
937 */
938 if (slptime < swap_idle_threshold1) {
939 thread_unlock(td);
940 goto nextproc;
941 }
942
943 /*
944 * Do not swapout a process if it is
945 * waiting on a critical event of some
946 * kind or there is a thread whose
947 * pageable memory may be accessed.
948 *
949 * This could be refined to support
950 * swapping out a thread.
951 */
952 if (!thread_safetoswapout(td)) {
953 thread_unlock(td);
954 goto nextproc;
955 }
956 /*
957 * If the system is under memory stress,
958 * or if we are swapping
959 * idle processes >= swap_idle_threshold2,
960 * then swap the process out.
961 */
962 if (((action & VM_SWAP_NORMAL) == 0) &&
963 (((action & VM_SWAP_IDLE) == 0) ||
964 (slptime < swap_idle_threshold2))) {
965 thread_unlock(td);
966 goto nextproc;
967 }
968
969 if (minslptime > slptime)
970 minslptime = slptime;
971 thread_unlock(td);
972 }
973
974 /*
975 * If the pageout daemon didn't free enough pages,
976 * or if this process is idle and the system is
977 * configured to swap proactively, swap it out.
978 */
979 if ((action & VM_SWAP_NORMAL) ||
980 ((action & VM_SWAP_IDLE) &&
981 (minslptime > swap_idle_threshold2))) {
982 if (swapout(p) == 0)
983 didswap++;
984 PROC_UNLOCK(p);
985 vm_map_unlock(&vm->vm_map);
986 vmspace_free(vm);
987 sx_sunlock(&allproc_lock);
988 goto retry;
989 }
990 }
991 nextproc:
992 PROC_UNLOCK(p);
993 vm_map_unlock(&vm->vm_map);
994 nextproc1:
995 vmspace_free(vm);
996 continue;
997 }
998 sx_sunlock(&allproc_lock);
999 /*
1000 * If we swapped something out, and another process needed memory,
1001 * then wakeup the sched process.
1002 */
1003 if (didswap)
1004 wakeup(&proc0);
1005 }
1006
1007 static void
1008 swapclear(p)
1009 struct proc *p;
1010 {
1011 struct thread *td;
1012
1013 PROC_LOCK_ASSERT(p, MA_OWNED);
1014
1015 FOREACH_THREAD_IN_PROC(p, td) {
1016 thread_lock(td);
1017 td->td_flags |= TDF_INMEM;
1018 td->td_flags &= ~TDF_SWAPINREQ;
1019 TD_CLR_SWAPPED(td);
1020 if (TD_CAN_RUN(td))
1021 if (setrunnable(td)) {
1022 #ifdef INVARIANTS
1023 /*
1024 * XXX: We just cleared TDI_SWAPPED
1025 * above and set TDF_INMEM, so this
1026 * should never happen.
1027 */
1028 panic("not waking up swapper");
1029 #endif
1030 }
1031 thread_unlock(td);
1032 }
1033 p->p_flag &= ~(P_SWAPPINGIN|P_SWAPPINGOUT);
1034 p->p_flag |= P_INMEM;
1035 }
1036
1037 static int
1038 swapout(p)
1039 struct proc *p;
1040 {
1041 struct thread *td;
1042
1043 PROC_LOCK_ASSERT(p, MA_OWNED);
1044 #if defined(SWAP_DEBUG)
1045 printf("swapping out %d\n", p->p_pid);
1046 #endif
1047
1048 /*
1049 * The states of this process and its threads may have changed
1050 * by now. Assuming that there is only one pageout daemon thread,
1051 * this process should still be in memory.
1052 */
1053 KASSERT((p->p_flag & (P_INMEM|P_SWAPPINGOUT|P_SWAPPINGIN)) == P_INMEM,
1054 ("swapout: lost a swapout race?"));
1055
1056 /*
1057 * remember the process resident count
1058 */
1059 p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
1060 /*
1061 * Check and mark all threads before we proceed.
1062 */
1063 p->p_flag &= ~P_INMEM;
1064 p->p_flag |= P_SWAPPINGOUT;
1065 FOREACH_THREAD_IN_PROC(p, td) {
1066 thread_lock(td);
1067 if (!thread_safetoswapout(td)) {
1068 thread_unlock(td);
1069 swapclear(p);
1070 return (EBUSY);
1071 }
1072 td->td_flags &= ~TDF_INMEM;
1073 TD_SET_SWAPPED(td);
1074 thread_unlock(td);
1075 }
1076 td = FIRST_THREAD_IN_PROC(p);
1077 ++td->td_ru.ru_nswap;
1078 PROC_UNLOCK(p);
1079
1080 /*
1081 * This list is stable because all threads are now prevented from
1082 * running. The list is only modified in the context of a running
1083 * thread in this process.
1084 */
1085 FOREACH_THREAD_IN_PROC(p, td)
1086 vm_thread_swapout(td);
1087
1088 PROC_LOCK(p);
1089 p->p_flag &= ~P_SWAPPINGOUT;
1090 p->p_swtick = ticks;
1091 return (0);
1092 }
1093 #endif /* !NO_SWAPPING */
Cache object: 9ba29355ff68f757b848fcbab59082f1
|