FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_glue.c
1 /*-
2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
3 *
4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * The Mach Operating System project at Carnegie-Mellon University.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * from: @(#)vm_glue.c 8.6 (Berkeley) 1/5/94
35 *
36 *
37 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38 * All rights reserved.
39 *
40 * Permission to use, copy, modify and distribute this software and
41 * its documentation is hereby granted, provided that both the copyright
42 * notice and this permission notice appear in all copies of the
43 * software, derivative works or modified versions, and any portions
44 * thereof, and that both notices appear in supporting documentation.
45 *
46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49 *
50 * Carnegie Mellon requests users of this software to return to
51 *
52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
53 * School of Computer Science
54 * Carnegie Mellon University
55 * Pittsburgh PA 15213-3890
56 *
57 * any improvements or extensions that they make and grant Carnegie the
58 * rights to redistribute these changes.
59 */
60
61 #include <sys/cdefs.h>
62 __FBSDID("$FreeBSD: releng/12.0/sys/vm/vm_glue.c 339998 2018-11-01 15:19:36Z markj $");
63
64 #include "opt_vm.h"
65 #include "opt_kstack_pages.h"
66 #include "opt_kstack_max_pages.h"
67 #include "opt_kstack_usage_prof.h"
68
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/domainset.h>
72 #include <sys/limits.h>
73 #include <sys/lock.h>
74 #include <sys/malloc.h>
75 #include <sys/mutex.h>
76 #include <sys/proc.h>
77 #include <sys/racct.h>
78 #include <sys/resourcevar.h>
79 #include <sys/rwlock.h>
80 #include <sys/sched.h>
81 #include <sys/sf_buf.h>
82 #include <sys/shm.h>
83 #include <sys/vmmeter.h>
84 #include <sys/vmem.h>
85 #include <sys/sx.h>
86 #include <sys/sysctl.h>
87 #include <sys/_kstack_cache.h>
88 #include <sys/eventhandler.h>
89 #include <sys/kernel.h>
90 #include <sys/ktr.h>
91 #include <sys/unistd.h>
92
93 #include <vm/vm.h>
94 #include <vm/vm_param.h>
95 #include <vm/pmap.h>
96 #include <vm/vm_domainset.h>
97 #include <vm/vm_map.h>
98 #include <vm/vm_page.h>
99 #include <vm/vm_pageout.h>
100 #include <vm/vm_object.h>
101 #include <vm/vm_kern.h>
102 #include <vm/vm_extern.h>
103 #include <vm/vm_pager.h>
104 #include <vm/swap_pager.h>
105
106 #include <machine/cpu.h>
107
108 /*
109 * MPSAFE
110 *
111 * WARNING! This code calls vm_map_check_protection() which only checks
112 * the associated vm_map_entry range. It does not determine whether the
113 * contents of the memory is actually readable or writable. In most cases
114 * just checking the vm_map_entry is sufficient within the kernel's address
115 * space.
116 */
117 int
118 kernacc(void *addr, int len, int rw)
119 {
120 boolean_t rv;
121 vm_offset_t saddr, eaddr;
122 vm_prot_t prot;
123
124 KASSERT((rw & ~VM_PROT_ALL) == 0,
125 ("illegal ``rw'' argument to kernacc (%x)\n", rw));
126
127 if ((vm_offset_t)addr + len > vm_map_max(kernel_map) ||
128 (vm_offset_t)addr + len < (vm_offset_t)addr)
129 return (FALSE);
130
131 prot = rw;
132 saddr = trunc_page((vm_offset_t)addr);
133 eaddr = round_page((vm_offset_t)addr + len);
134 vm_map_lock_read(kernel_map);
135 rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
136 vm_map_unlock_read(kernel_map);
137 return (rv == TRUE);
138 }
139
140 /*
141 * MPSAFE
142 *
143 * WARNING! This code calls vm_map_check_protection() which only checks
144 * the associated vm_map_entry range. It does not determine whether the
145 * contents of the memory is actually readable or writable. vmapbuf(),
146 * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be
147 * used in conjunction with this call.
148 */
149 int
150 useracc(void *addr, int len, int rw)
151 {
152 boolean_t rv;
153 vm_prot_t prot;
154 vm_map_t map;
155
156 KASSERT((rw & ~VM_PROT_ALL) == 0,
157 ("illegal ``rw'' argument to useracc (%x)\n", rw));
158 prot = rw;
159 map = &curproc->p_vmspace->vm_map;
160 if ((vm_offset_t)addr + len > vm_map_max(map) ||
161 (vm_offset_t)addr + len < (vm_offset_t)addr) {
162 return (FALSE);
163 }
164 vm_map_lock_read(map);
165 rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr),
166 round_page((vm_offset_t)addr + len), prot);
167 vm_map_unlock_read(map);
168 return (rv == TRUE);
169 }
170
171 int
172 vslock(void *addr, size_t len)
173 {
174 vm_offset_t end, last, start;
175 vm_size_t npages;
176 int error;
177
178 last = (vm_offset_t)addr + len;
179 start = trunc_page((vm_offset_t)addr);
180 end = round_page(last);
181 if (last < (vm_offset_t)addr || end < (vm_offset_t)addr)
182 return (EINVAL);
183 npages = atop(end - start);
184 if (npages > vm_page_max_wired)
185 return (ENOMEM);
186 #if 0
187 /*
188 * XXX - not yet
189 *
190 * The limit for transient usage of wired pages should be
191 * larger than for "permanent" wired pages (mlock()).
192 *
193 * Also, the sysctl code, which is the only present user
194 * of vslock(), does a hard loop on EAGAIN.
195 */
196 if (npages + vm_wire_count() > vm_page_max_wired)
197 return (EAGAIN);
198 #endif
199 error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end,
200 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
201 if (error == KERN_SUCCESS) {
202 curthread->td_vslock_sz += len;
203 return (0);
204 }
205
206 /*
207 * Return EFAULT on error to match copy{in,out}() behaviour
208 * rather than returning ENOMEM like mlock() would.
209 */
210 return (EFAULT);
211 }
212
213 void
214 vsunlock(void *addr, size_t len)
215 {
216
217 /* Rely on the parameter sanity checks performed by vslock(). */
218 MPASS(curthread->td_vslock_sz >= len);
219 curthread->td_vslock_sz -= len;
220 (void)vm_map_unwire(&curproc->p_vmspace->vm_map,
221 trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len),
222 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
223 }
224
225 /*
226 * Pin the page contained within the given object at the given offset. If the
227 * page is not resident, allocate and load it using the given object's pager.
228 * Return the pinned page if successful; otherwise, return NULL.
229 */
230 static vm_page_t
231 vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset)
232 {
233 vm_page_t m;
234 vm_pindex_t pindex;
235 int rv;
236
237 VM_OBJECT_WLOCK(object);
238 pindex = OFF_TO_IDX(offset);
239 m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY);
240 if (m->valid != VM_PAGE_BITS_ALL) {
241 vm_page_xbusy(m);
242 rv = vm_pager_get_pages(object, &m, 1, NULL, NULL);
243 if (rv != VM_PAGER_OK) {
244 vm_page_lock(m);
245 vm_page_free(m);
246 vm_page_unlock(m);
247 m = NULL;
248 goto out;
249 }
250 vm_page_xunbusy(m);
251 }
252 vm_page_lock(m);
253 vm_page_hold(m);
254 vm_page_activate(m);
255 vm_page_unlock(m);
256 out:
257 VM_OBJECT_WUNLOCK(object);
258 return (m);
259 }
260
261 /*
262 * Return a CPU private mapping to the page at the given offset within the
263 * given object. The page is pinned before it is mapped.
264 */
265 struct sf_buf *
266 vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset)
267 {
268 vm_page_t m;
269
270 m = vm_imgact_hold_page(object, offset);
271 if (m == NULL)
272 return (NULL);
273 sched_pin();
274 return (sf_buf_alloc(m, SFB_CPUPRIVATE));
275 }
276
277 /*
278 * Destroy the given CPU private mapping and unpin the page that it mapped.
279 */
280 void
281 vm_imgact_unmap_page(struct sf_buf *sf)
282 {
283 vm_page_t m;
284
285 m = sf_buf_page(sf);
286 sf_buf_free(sf);
287 sched_unpin();
288 vm_page_lock(m);
289 vm_page_unhold(m);
290 vm_page_unlock(m);
291 }
292
293 void
294 vm_sync_icache(vm_map_t map, vm_offset_t va, vm_offset_t sz)
295 {
296
297 pmap_sync_icache(map->pmap, va, sz);
298 }
299
300 struct kstack_cache_entry *kstack_cache;
301 static int kstack_cache_size = 128;
302 static int kstacks, kstack_domain_iter;
303 static struct mtx kstack_cache_mtx;
304 MTX_SYSINIT(kstack_cache, &kstack_cache_mtx, "kstkch", MTX_DEF);
305
306 SYSCTL_INT(_vm, OID_AUTO, kstack_cache_size, CTLFLAG_RW, &kstack_cache_size, 0,
307 "");
308 SYSCTL_INT(_vm, OID_AUTO, kstacks, CTLFLAG_RD, &kstacks, 0,
309 "");
310
311 /*
312 * Create the kernel stack (including pcb for i386) for a new thread.
313 * This routine directly affects the fork perf for a process and
314 * create performance for a thread.
315 */
316 int
317 vm_thread_new(struct thread *td, int pages)
318 {
319 vm_object_t ksobj;
320 vm_offset_t ks;
321 vm_page_t ma[KSTACK_MAX_PAGES];
322 struct kstack_cache_entry *ks_ce;
323 int i;
324
325 /* Bounds check */
326 if (pages <= 1)
327 pages = kstack_pages;
328 else if (pages > KSTACK_MAX_PAGES)
329 pages = KSTACK_MAX_PAGES;
330
331 if (pages == kstack_pages && kstack_cache != NULL) {
332 mtx_lock(&kstack_cache_mtx);
333 if (kstack_cache != NULL) {
334 ks_ce = kstack_cache;
335 kstack_cache = ks_ce->next_ks_entry;
336 mtx_unlock(&kstack_cache_mtx);
337
338 td->td_kstack_obj = ks_ce->ksobj;
339 td->td_kstack = (vm_offset_t)ks_ce;
340 td->td_kstack_pages = kstack_pages;
341 return (1);
342 }
343 mtx_unlock(&kstack_cache_mtx);
344 }
345
346 /*
347 * Allocate an object for the kstack.
348 */
349 ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
350
351 /*
352 * Get a kernel virtual address for this thread's kstack.
353 */
354 #if defined(__mips__)
355 /*
356 * We need to align the kstack's mapped address to fit within
357 * a single TLB entry.
358 */
359 if (vmem_xalloc(kernel_arena, (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE,
360 PAGE_SIZE * 2, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX,
361 M_BESTFIT | M_NOWAIT, &ks)) {
362 ks = 0;
363 }
364 #else
365 ks = kva_alloc((pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
366 #endif
367 if (ks == 0) {
368 printf("vm_thread_new: kstack allocation failed\n");
369 vm_object_deallocate(ksobj);
370 return (0);
371 }
372
373 /*
374 * Ensure that kstack objects can draw pages from any memory
375 * domain. Otherwise a local memory shortage can block a process
376 * swap-in.
377 */
378 if (vm_ndomains > 1) {
379 ksobj->domain.dr_policy = DOMAINSET_RR();
380 ksobj->domain.dr_iter =
381 atomic_fetchadd_int(&kstack_domain_iter, 1);
382 }
383
384 atomic_add_int(&kstacks, 1);
385 if (KSTACK_GUARD_PAGES != 0) {
386 pmap_qremove(ks, KSTACK_GUARD_PAGES);
387 ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
388 }
389 td->td_kstack_obj = ksobj;
390 td->td_kstack = ks;
391 /*
392 * Knowing the number of pages allocated is useful when you
393 * want to deallocate them.
394 */
395 td->td_kstack_pages = pages;
396 /*
397 * For the length of the stack, link in a real page of ram for each
398 * page of stack.
399 */
400 VM_OBJECT_WLOCK(ksobj);
401 (void)vm_page_grab_pages(ksobj, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY |
402 VM_ALLOC_WIRED, ma, pages);
403 for (i = 0; i < pages; i++)
404 ma[i]->valid = VM_PAGE_BITS_ALL;
405 VM_OBJECT_WUNLOCK(ksobj);
406 pmap_qenter(ks, ma, pages);
407 return (1);
408 }
409
410 static void
411 vm_thread_stack_dispose(vm_object_t ksobj, vm_offset_t ks, int pages)
412 {
413 vm_page_t m;
414 int i;
415
416 atomic_add_int(&kstacks, -1);
417 pmap_qremove(ks, pages);
418 VM_OBJECT_WLOCK(ksobj);
419 for (i = 0; i < pages; i++) {
420 m = vm_page_lookup(ksobj, i);
421 if (m == NULL)
422 panic("vm_thread_dispose: kstack already missing?");
423 vm_page_lock(m);
424 vm_page_unwire(m, PQ_NONE);
425 vm_page_free(m);
426 vm_page_unlock(m);
427 }
428 VM_OBJECT_WUNLOCK(ksobj);
429 vm_object_deallocate(ksobj);
430 kva_free(ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
431 (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
432 }
433
434 /*
435 * Dispose of a thread's kernel stack.
436 */
437 void
438 vm_thread_dispose(struct thread *td)
439 {
440 vm_object_t ksobj;
441 vm_offset_t ks;
442 struct kstack_cache_entry *ks_ce;
443 int pages;
444
445 pages = td->td_kstack_pages;
446 ksobj = td->td_kstack_obj;
447 ks = td->td_kstack;
448 td->td_kstack = 0;
449 td->td_kstack_pages = 0;
450 if (pages == kstack_pages && kstacks <= kstack_cache_size) {
451 ks_ce = (struct kstack_cache_entry *)ks;
452 ks_ce->ksobj = ksobj;
453 mtx_lock(&kstack_cache_mtx);
454 ks_ce->next_ks_entry = kstack_cache;
455 kstack_cache = ks_ce;
456 mtx_unlock(&kstack_cache_mtx);
457 return;
458 }
459 vm_thread_stack_dispose(ksobj, ks, pages);
460 }
461
462 static void
463 vm_thread_stack_lowmem(void *nulll)
464 {
465 struct kstack_cache_entry *ks_ce, *ks_ce1;
466
467 mtx_lock(&kstack_cache_mtx);
468 ks_ce = kstack_cache;
469 kstack_cache = NULL;
470 mtx_unlock(&kstack_cache_mtx);
471
472 while (ks_ce != NULL) {
473 ks_ce1 = ks_ce;
474 ks_ce = ks_ce->next_ks_entry;
475
476 vm_thread_stack_dispose(ks_ce1->ksobj, (vm_offset_t)ks_ce1,
477 kstack_pages);
478 }
479 }
480
481 static void
482 kstack_cache_init(void *nulll)
483 {
484
485 EVENTHANDLER_REGISTER(vm_lowmem, vm_thread_stack_lowmem, NULL,
486 EVENTHANDLER_PRI_ANY);
487 }
488
489 SYSINIT(vm_kstacks, SI_SUB_KTHREAD_INIT, SI_ORDER_ANY, kstack_cache_init, NULL);
490
491 #ifdef KSTACK_USAGE_PROF
492 /*
493 * Track maximum stack used by a thread in kernel.
494 */
495 static int max_kstack_used;
496
497 SYSCTL_INT(_debug, OID_AUTO, max_kstack_used, CTLFLAG_RD,
498 &max_kstack_used, 0,
499 "Maxiumum stack depth used by a thread in kernel");
500
501 void
502 intr_prof_stack_use(struct thread *td, struct trapframe *frame)
503 {
504 vm_offset_t stack_top;
505 vm_offset_t current;
506 int used, prev_used;
507
508 /*
509 * Testing for interrupted kernel mode isn't strictly
510 * needed. It optimizes the execution, since interrupts from
511 * usermode will have only the trap frame on the stack.
512 */
513 if (TRAPF_USERMODE(frame))
514 return;
515
516 stack_top = td->td_kstack + td->td_kstack_pages * PAGE_SIZE;
517 current = (vm_offset_t)(uintptr_t)&stack_top;
518
519 /*
520 * Try to detect if interrupt is using kernel thread stack.
521 * Hardware could use a dedicated stack for interrupt handling.
522 */
523 if (stack_top <= current || current < td->td_kstack)
524 return;
525
526 used = stack_top - current;
527 for (;;) {
528 prev_used = max_kstack_used;
529 if (prev_used >= used)
530 break;
531 if (atomic_cmpset_int(&max_kstack_used, prev_used, used))
532 break;
533 }
534 }
535 #endif /* KSTACK_USAGE_PROF */
536
537 /*
538 * Implement fork's actions on an address space.
539 * Here we arrange for the address space to be copied or referenced,
540 * allocate a user struct (pcb and kernel stack), then call the
541 * machine-dependent layer to fill those in and make the new process
542 * ready to run. The new process is set up so that it returns directly
543 * to user mode to avoid stack copying and relocation problems.
544 */
545 int
546 vm_forkproc(struct thread *td, struct proc *p2, struct thread *td2,
547 struct vmspace *vm2, int flags)
548 {
549 struct proc *p1 = td->td_proc;
550 struct domainset *dset;
551 int error;
552
553 if ((flags & RFPROC) == 0) {
554 /*
555 * Divorce the memory, if it is shared, essentially
556 * this changes shared memory amongst threads, into
557 * COW locally.
558 */
559 if ((flags & RFMEM) == 0) {
560 if (p1->p_vmspace->vm_refcnt > 1) {
561 error = vmspace_unshare(p1);
562 if (error)
563 return (error);
564 }
565 }
566 cpu_fork(td, p2, td2, flags);
567 return (0);
568 }
569
570 if (flags & RFMEM) {
571 p2->p_vmspace = p1->p_vmspace;
572 atomic_add_int(&p1->p_vmspace->vm_refcnt, 1);
573 }
574 dset = td2->td_domain.dr_policy;
575 while (vm_page_count_severe_set(&dset->ds_mask)) {
576 vm_wait_doms(&dset->ds_mask);
577 }
578
579 if ((flags & RFMEM) == 0) {
580 p2->p_vmspace = vm2;
581 if (p1->p_vmspace->vm_shm)
582 shmfork(p1, p2);
583 }
584
585 /*
586 * cpu_fork will copy and update the pcb, set up the kernel stack,
587 * and make the child ready to run.
588 */
589 cpu_fork(td, p2, td2, flags);
590 return (0);
591 }
592
593 /*
594 * Called after process has been wait(2)'ed upon and is being reaped.
595 * The idea is to reclaim resources that we could not reclaim while
596 * the process was still executing.
597 */
598 void
599 vm_waitproc(p)
600 struct proc *p;
601 {
602
603 vmspace_exitfree(p); /* and clean-out the vmspace */
604 }
605
606 void
607 kick_proc0(void)
608 {
609
610 wakeup(&proc0);
611 }
Cache object: 5216f2c22c285ffec1babcd60bd7d010
|