FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_glue.c
1 /*-
2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
3 *
4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * The Mach Operating System project at Carnegie-Mellon University.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * from: @(#)vm_glue.c 8.6 (Berkeley) 1/5/94
35 *
36 *
37 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38 * All rights reserved.
39 *
40 * Permission to use, copy, modify and distribute this software and
41 * its documentation is hereby granted, provided that both the copyright
42 * notice and this permission notice appear in all copies of the
43 * software, derivative works or modified versions, and any portions
44 * thereof, and that both notices appear in supporting documentation.
45 *
46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49 *
50 * Carnegie Mellon requests users of this software to return to
51 *
52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
53 * School of Computer Science
54 * Carnegie Mellon University
55 * Pittsburgh PA 15213-3890
56 *
57 * any improvements or extensions that they make and grant Carnegie the
58 * rights to redistribute these changes.
59 */
60
61 #include <sys/cdefs.h>
62 __FBSDID("$FreeBSD$");
63
64 #include "opt_vm.h"
65 #include "opt_kstack_pages.h"
66 #include "opt_kstack_max_pages.h"
67 #include "opt_kstack_usage_prof.h"
68
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/asan.h>
72 #include <sys/domainset.h>
73 #include <sys/limits.h>
74 #include <sys/lock.h>
75 #include <sys/malloc.h>
76 #include <sys/msan.h>
77 #include <sys/mutex.h>
78 #include <sys/proc.h>
79 #include <sys/racct.h>
80 #include <sys/refcount.h>
81 #include <sys/resourcevar.h>
82 #include <sys/rwlock.h>
83 #include <sys/sched.h>
84 #include <sys/sf_buf.h>
85 #include <sys/shm.h>
86 #include <sys/smp.h>
87 #include <sys/vmmeter.h>
88 #include <sys/vmem.h>
89 #include <sys/sx.h>
90 #include <sys/sysctl.h>
91 #include <sys/kernel.h>
92 #include <sys/ktr.h>
93 #include <sys/unistd.h>
94
95 #include <vm/uma.h>
96 #include <vm/vm.h>
97 #include <vm/vm_param.h>
98 #include <vm/pmap.h>
99 #include <vm/vm_domainset.h>
100 #include <vm/vm_map.h>
101 #include <vm/vm_page.h>
102 #include <vm/vm_pageout.h>
103 #include <vm/vm_object.h>
104 #include <vm/vm_kern.h>
105 #include <vm/vm_extern.h>
106 #include <vm/vm_pager.h>
107 #include <vm/swap_pager.h>
108
109 #include <machine/cpu.h>
110
111 /*
112 * MPSAFE
113 *
114 * WARNING! This code calls vm_map_check_protection() which only checks
115 * the associated vm_map_entry range. It does not determine whether the
116 * contents of the memory is actually readable or writable. In most cases
117 * just checking the vm_map_entry is sufficient within the kernel's address
118 * space.
119 */
120 int
121 kernacc(void *addr, int len, int rw)
122 {
123 boolean_t rv;
124 vm_offset_t saddr, eaddr;
125 vm_prot_t prot;
126
127 KASSERT((rw & ~VM_PROT_ALL) == 0,
128 ("illegal ``rw'' argument to kernacc (%x)\n", rw));
129
130 if ((vm_offset_t)addr + len > vm_map_max(kernel_map) ||
131 (vm_offset_t)addr + len < (vm_offset_t)addr)
132 return (FALSE);
133
134 prot = rw;
135 saddr = trunc_page((vm_offset_t)addr);
136 eaddr = round_page((vm_offset_t)addr + len);
137 vm_map_lock_read(kernel_map);
138 rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot);
139 vm_map_unlock_read(kernel_map);
140 return (rv == TRUE);
141 }
142
143 /*
144 * MPSAFE
145 *
146 * WARNING! This code calls vm_map_check_protection() which only checks
147 * the associated vm_map_entry range. It does not determine whether the
148 * contents of the memory is actually readable or writable. vmapbuf(),
149 * vm_fault_quick(), or copyin()/copout()/su*()/fu*() functions should be
150 * used in conjunction with this call.
151 */
152 int
153 useracc(void *addr, int len, int rw)
154 {
155 boolean_t rv;
156 vm_prot_t prot;
157 vm_map_t map;
158
159 KASSERT((rw & ~VM_PROT_ALL) == 0,
160 ("illegal ``rw'' argument to useracc (%x)\n", rw));
161 prot = rw;
162 map = &curproc->p_vmspace->vm_map;
163 if ((vm_offset_t)addr + len > vm_map_max(map) ||
164 (vm_offset_t)addr + len < (vm_offset_t)addr) {
165 return (FALSE);
166 }
167 vm_map_lock_read(map);
168 rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr),
169 round_page((vm_offset_t)addr + len), prot);
170 vm_map_unlock_read(map);
171 return (rv == TRUE);
172 }
173
174 int
175 vslock(void *addr, size_t len)
176 {
177 vm_offset_t end, last, start;
178 vm_size_t npages;
179 int error;
180
181 last = (vm_offset_t)addr + len;
182 start = trunc_page((vm_offset_t)addr);
183 end = round_page(last);
184 if (last < (vm_offset_t)addr || end < (vm_offset_t)addr)
185 return (EINVAL);
186 npages = atop(end - start);
187 if (npages > vm_page_max_user_wired)
188 return (ENOMEM);
189 error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end,
190 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
191 if (error == KERN_SUCCESS) {
192 curthread->td_vslock_sz += len;
193 return (0);
194 }
195
196 /*
197 * Return EFAULT on error to match copy{in,out}() behaviour
198 * rather than returning ENOMEM like mlock() would.
199 */
200 return (EFAULT);
201 }
202
203 void
204 vsunlock(void *addr, size_t len)
205 {
206
207 /* Rely on the parameter sanity checks performed by vslock(). */
208 MPASS(curthread->td_vslock_sz >= len);
209 curthread->td_vslock_sz -= len;
210 (void)vm_map_unwire(&curproc->p_vmspace->vm_map,
211 trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len),
212 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
213 }
214
215 /*
216 * Pin the page contained within the given object at the given offset. If the
217 * page is not resident, allocate and load it using the given object's pager.
218 * Return the pinned page if successful; otherwise, return NULL.
219 */
220 static vm_page_t
221 vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset)
222 {
223 vm_page_t m;
224 vm_pindex_t pindex;
225
226 pindex = OFF_TO_IDX(offset);
227 (void)vm_page_grab_valid_unlocked(&m, object, pindex,
228 VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED);
229 return (m);
230 }
231
232 /*
233 * Return a CPU private mapping to the page at the given offset within the
234 * given object. The page is pinned before it is mapped.
235 */
236 struct sf_buf *
237 vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset)
238 {
239 vm_page_t m;
240
241 m = vm_imgact_hold_page(object, offset);
242 if (m == NULL)
243 return (NULL);
244 sched_pin();
245 return (sf_buf_alloc(m, SFB_CPUPRIVATE));
246 }
247
248 /*
249 * Destroy the given CPU private mapping and unpin the page that it mapped.
250 */
251 void
252 vm_imgact_unmap_page(struct sf_buf *sf)
253 {
254 vm_page_t m;
255
256 m = sf_buf_page(sf);
257 sf_buf_free(sf);
258 sched_unpin();
259 vm_page_unwire(m, PQ_ACTIVE);
260 }
261
262 void
263 vm_sync_icache(vm_map_t map, vm_offset_t va, vm_offset_t sz)
264 {
265
266 pmap_sync_icache(map->pmap, va, sz);
267 }
268
269 vm_object_t kstack_object;
270 static uma_zone_t kstack_cache;
271 static int kstack_cache_size;
272
273 static int
274 sysctl_kstack_cache_size(SYSCTL_HANDLER_ARGS)
275 {
276 int error, oldsize;
277
278 oldsize = kstack_cache_size;
279 error = sysctl_handle_int(oidp, arg1, arg2, req);
280 if (error == 0 && req->newptr && oldsize != kstack_cache_size)
281 uma_zone_set_maxcache(kstack_cache, kstack_cache_size);
282 return (error);
283 }
284 SYSCTL_PROC(_vm, OID_AUTO, kstack_cache_size,
285 CTLTYPE_INT|CTLFLAG_MPSAFE|CTLFLAG_RW, &kstack_cache_size, 0,
286 sysctl_kstack_cache_size, "IU", "Maximum number of cached kernel stacks");
287
288 /*
289 * Create the kernel stack (including pcb for i386) for a new thread.
290 */
291 static vm_offset_t
292 vm_thread_stack_create(struct domainset *ds, int pages)
293 {
294 vm_page_t ma[KSTACK_MAX_PAGES];
295 vm_offset_t ks;
296 int i;
297
298 /*
299 * Get a kernel virtual address for this thread's kstack.
300 */
301 ks = kva_alloc((pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
302 if (ks == 0) {
303 printf("%s: kstack allocation failed\n", __func__);
304 return (0);
305 }
306
307 if (KSTACK_GUARD_PAGES != 0) {
308 pmap_qremove(ks, KSTACK_GUARD_PAGES);
309 ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
310 }
311
312 /*
313 * Allocate physical pages to back the stack.
314 */
315 vm_thread_stack_back(ds, ks, ma, pages, VM_ALLOC_NORMAL);
316 for (i = 0; i < pages; i++)
317 vm_page_valid(ma[i]);
318 pmap_qenter(ks, ma, pages);
319
320 return (ks);
321 }
322
323 static void
324 vm_thread_stack_dispose(vm_offset_t ks, int pages)
325 {
326 vm_page_t m;
327 vm_pindex_t pindex;
328 int i;
329
330 pindex = atop(ks - VM_MIN_KERNEL_ADDRESS);
331
332 pmap_qremove(ks, pages);
333 VM_OBJECT_WLOCK(kstack_object);
334 for (i = 0; i < pages; i++) {
335 m = vm_page_lookup(kstack_object, pindex + i);
336 if (m == NULL)
337 panic("%s: kstack already missing?", __func__);
338 vm_page_xbusy_claim(m);
339 vm_page_unwire_noq(m);
340 vm_page_free(m);
341 }
342 VM_OBJECT_WUNLOCK(kstack_object);
343 kasan_mark((void *)ks, ptoa(pages), ptoa(pages), 0);
344 kva_free(ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
345 (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
346 }
347
348 /*
349 * Allocate the kernel stack for a new thread.
350 */
351 int
352 vm_thread_new(struct thread *td, int pages)
353 {
354 vm_offset_t ks;
355
356 /* Bounds check */
357 if (pages <= 1)
358 pages = kstack_pages;
359 else if (pages > KSTACK_MAX_PAGES)
360 pages = KSTACK_MAX_PAGES;
361
362 ks = 0;
363 if (pages == kstack_pages && kstack_cache != NULL)
364 ks = (vm_offset_t)uma_zalloc(kstack_cache, M_NOWAIT);
365
366 /*
367 * Ensure that kstack objects can draw pages from any memory
368 * domain. Otherwise a local memory shortage can block a process
369 * swap-in.
370 */
371 if (ks == 0)
372 ks = vm_thread_stack_create(DOMAINSET_PREF(PCPU_GET(domain)),
373 pages);
374 if (ks == 0)
375 return (0);
376 td->td_kstack = ks;
377 td->td_kstack_pages = pages;
378 kasan_mark((void *)ks, ptoa(pages), ptoa(pages), 0);
379 kmsan_mark((void *)ks, ptoa(pages), KMSAN_STATE_UNINIT);
380 return (1);
381 }
382
383 /*
384 * Dispose of a thread's kernel stack.
385 */
386 void
387 vm_thread_dispose(struct thread *td)
388 {
389 vm_offset_t ks;
390 int pages;
391
392 pages = td->td_kstack_pages;
393 ks = td->td_kstack;
394 td->td_kstack = 0;
395 td->td_kstack_pages = 0;
396 kasan_mark((void *)ks, 0, ptoa(pages), KASAN_KSTACK_FREED);
397 if (pages == kstack_pages)
398 uma_zfree(kstack_cache, (void *)ks);
399 else
400 vm_thread_stack_dispose(ks, pages);
401 }
402
403 /*
404 * Allocate physical pages, following the specified NUMA policy, to back a
405 * kernel stack.
406 */
407 void
408 vm_thread_stack_back(struct domainset *ds, vm_offset_t ks, vm_page_t ma[],
409 int npages, int req_class)
410 {
411 vm_pindex_t pindex;
412 int n;
413
414 pindex = atop(ks - VM_MIN_KERNEL_ADDRESS);
415
416 VM_OBJECT_WLOCK(kstack_object);
417 for (n = 0; n < npages;) {
418 if (vm_ndomains > 1)
419 kstack_object->domain.dr_policy = ds;
420
421 /*
422 * Use WAITFAIL to force a reset of the domain selection policy
423 * if we had to sleep for pages.
424 */
425 n += vm_page_grab_pages(kstack_object, pindex + n,
426 req_class | VM_ALLOC_WIRED | VM_ALLOC_WAITFAIL,
427 &ma[n], npages - n);
428 }
429 VM_OBJECT_WUNLOCK(kstack_object);
430 }
431
432 static int
433 kstack_import(void *arg, void **store, int cnt, int domain, int flags)
434 {
435 struct domainset *ds;
436 int i;
437
438 if (domain == UMA_ANYDOMAIN)
439 ds = DOMAINSET_RR();
440 else
441 ds = DOMAINSET_PREF(domain);
442
443 for (i = 0; i < cnt; i++) {
444 store[i] = (void *)vm_thread_stack_create(ds, kstack_pages);
445 if (store[i] == NULL)
446 break;
447 }
448 return (i);
449 }
450
451 static void
452 kstack_release(void *arg, void **store, int cnt)
453 {
454 vm_offset_t ks;
455 int i;
456
457 for (i = 0; i < cnt; i++) {
458 ks = (vm_offset_t)store[i];
459 vm_thread_stack_dispose(ks, kstack_pages);
460 }
461 }
462
463 static void
464 kstack_cache_init(void *null)
465 {
466 kstack_object = vm_object_allocate(OBJT_SWAP,
467 atop(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS));
468 kstack_cache = uma_zcache_create("kstack_cache",
469 kstack_pages * PAGE_SIZE, NULL, NULL, NULL, NULL,
470 kstack_import, kstack_release, NULL,
471 UMA_ZONE_FIRSTTOUCH);
472 kstack_cache_size = imax(128, mp_ncpus * 4);
473 uma_zone_set_maxcache(kstack_cache, kstack_cache_size);
474 }
475 SYSINIT(vm_kstacks, SI_SUB_KMEM, SI_ORDER_ANY, kstack_cache_init, NULL);
476
477 #ifdef KSTACK_USAGE_PROF
478 /*
479 * Track maximum stack used by a thread in kernel.
480 */
481 static int max_kstack_used;
482
483 SYSCTL_INT(_debug, OID_AUTO, max_kstack_used, CTLFLAG_RD,
484 &max_kstack_used, 0,
485 "Maximum stack depth used by a thread in kernel");
486
487 void
488 intr_prof_stack_use(struct thread *td, struct trapframe *frame)
489 {
490 vm_offset_t stack_top;
491 vm_offset_t current;
492 int used, prev_used;
493
494 /*
495 * Testing for interrupted kernel mode isn't strictly
496 * needed. It optimizes the execution, since interrupts from
497 * usermode will have only the trap frame on the stack.
498 */
499 if (TRAPF_USERMODE(frame))
500 return;
501
502 stack_top = td->td_kstack + td->td_kstack_pages * PAGE_SIZE;
503 current = (vm_offset_t)(uintptr_t)&stack_top;
504
505 /*
506 * Try to detect if interrupt is using kernel thread stack.
507 * Hardware could use a dedicated stack for interrupt handling.
508 */
509 if (stack_top <= current || current < td->td_kstack)
510 return;
511
512 used = stack_top - current;
513 for (;;) {
514 prev_used = max_kstack_used;
515 if (prev_used >= used)
516 break;
517 if (atomic_cmpset_int(&max_kstack_used, prev_used, used))
518 break;
519 }
520 }
521 #endif /* KSTACK_USAGE_PROF */
522
523 /*
524 * Implement fork's actions on an address space.
525 * Here we arrange for the address space to be copied or referenced,
526 * allocate a user struct (pcb and kernel stack), then call the
527 * machine-dependent layer to fill those in and make the new process
528 * ready to run. The new process is set up so that it returns directly
529 * to user mode to avoid stack copying and relocation problems.
530 */
531 int
532 vm_forkproc(struct thread *td, struct proc *p2, struct thread *td2,
533 struct vmspace *vm2, int flags)
534 {
535 struct proc *p1 = td->td_proc;
536 struct domainset *dset;
537 int error;
538
539 if ((flags & RFPROC) == 0) {
540 /*
541 * Divorce the memory, if it is shared, essentially
542 * this changes shared memory amongst threads, into
543 * COW locally.
544 */
545 if ((flags & RFMEM) == 0) {
546 error = vmspace_unshare(p1);
547 if (error)
548 return (error);
549 }
550 cpu_fork(td, p2, td2, flags);
551 return (0);
552 }
553
554 if (flags & RFMEM) {
555 p2->p_vmspace = p1->p_vmspace;
556 refcount_acquire(&p1->p_vmspace->vm_refcnt);
557 }
558 dset = td2->td_domain.dr_policy;
559 while (vm_page_count_severe_set(&dset->ds_mask)) {
560 vm_wait_doms(&dset->ds_mask, 0);
561 }
562
563 if ((flags & RFMEM) == 0) {
564 p2->p_vmspace = vm2;
565 if (p1->p_vmspace->vm_shm)
566 shmfork(p1, p2);
567 }
568
569 /*
570 * cpu_fork will copy and update the pcb, set up the kernel stack,
571 * and make the child ready to run.
572 */
573 cpu_fork(td, p2, td2, flags);
574 return (0);
575 }
576
577 /*
578 * Called after process has been wait(2)'ed upon and is being reaped.
579 * The idea is to reclaim resources that we could not reclaim while
580 * the process was still executing.
581 */
582 void
583 vm_waitproc(p)
584 struct proc *p;
585 {
586
587 vmspace_exitfree(p); /* and clean-out the vmspace */
588 }
589
590 void
591 kick_proc0(void)
592 {
593
594 wakeup(&proc0);
595 }
Cache object: cafdb81171c96c71adbb22b60fa6cd67
|