FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_kern.c
1 /*-
2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
3 *
4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * The Mach Operating System project at Carnegie-Mellon University.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * from: @(#)vm_kern.c 8.3 (Berkeley) 1/12/94
35 *
36 *
37 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38 * All rights reserved.
39 *
40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
41 *
42 * Permission to use, copy, modify and distribute this software and
43 * its documentation is hereby granted, provided that both the copyright
44 * notice and this permission notice appear in all copies of the
45 * software, derivative works or modified versions, and any portions
46 * thereof, and that both notices appear in supporting documentation.
47 *
48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51 *
52 * Carnegie Mellon requests users of this software to return to
53 *
54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
55 * School of Computer Science
56 * Carnegie Mellon University
57 * Pittsburgh PA 15213-3890
58 *
59 * any improvements or extensions that they make and grant Carnegie the
60 * rights to redistribute these changes.
61 */
62
63 /*
64 * Kernel memory management.
65 */
66
67 #include <sys/cdefs.h>
68 __FBSDID("$FreeBSD$");
69
70 #include "opt_vm.h"
71
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/kernel.h> /* for ticks and hz */
75 #include <sys/domainset.h>
76 #include <sys/eventhandler.h>
77 #include <sys/lock.h>
78 #include <sys/proc.h>
79 #include <sys/malloc.h>
80 #include <sys/rwlock.h>
81 #include <sys/sysctl.h>
82 #include <sys/vmem.h>
83 #include <sys/vmmeter.h>
84
85 #include <vm/vm.h>
86 #include <vm/vm_param.h>
87 #include <vm/vm_domainset.h>
88 #include <vm/vm_kern.h>
89 #include <vm/pmap.h>
90 #include <vm/vm_map.h>
91 #include <vm/vm_object.h>
92 #include <vm/vm_page.h>
93 #include <vm/vm_pageout.h>
94 #include <vm/vm_phys.h>
95 #include <vm/vm_pagequeue.h>
96 #include <vm/vm_radix.h>
97 #include <vm/vm_extern.h>
98 #include <vm/uma.h>
99
100 vm_map_t kernel_map;
101 vm_map_t exec_map;
102 vm_map_t pipe_map;
103
104 const void *zero_region;
105 CTASSERT((ZERO_REGION_SIZE & PAGE_MASK) == 0);
106
107 /* NB: Used by kernel debuggers. */
108 const u_long vm_maxuser_address = VM_MAXUSER_ADDRESS;
109
110 u_int exec_map_entry_size;
111 u_int exec_map_entries;
112
113 SYSCTL_ULONG(_vm, OID_AUTO, min_kernel_address, CTLFLAG_RD,
114 SYSCTL_NULL_ULONG_PTR, VM_MIN_KERNEL_ADDRESS, "Min kernel address");
115
116 SYSCTL_ULONG(_vm, OID_AUTO, max_kernel_address, CTLFLAG_RD,
117 #if defined(__arm__) || defined(__sparc64__)
118 &vm_max_kernel_address, 0,
119 #else
120 SYSCTL_NULL_ULONG_PTR, VM_MAX_KERNEL_ADDRESS,
121 #endif
122 "Max kernel address");
123
124 #if VM_NRESERVLEVEL > 0
125 #define KVA_QUANTUM_SHIFT (VM_LEVEL_0_ORDER + PAGE_SHIFT)
126 #else
127 /* On non-superpage architectures we want large import sizes. */
128 #define KVA_QUANTUM_SHIFT (8 + PAGE_SHIFT)
129 #endif
130 #define KVA_QUANTUM (1 << KVA_QUANTUM_SHIFT)
131
132 /*
133 * kva_alloc:
134 *
135 * Allocate a virtual address range with no underlying object and
136 * no initial mapping to physical memory. Any mapping from this
137 * range to physical memory must be explicitly created prior to
138 * its use, typically with pmap_qenter(). Any attempt to create
139 * a mapping on demand through vm_fault() will result in a panic.
140 */
141 vm_offset_t
142 kva_alloc(vm_size_t size)
143 {
144 vm_offset_t addr;
145
146 size = round_page(size);
147 if (vmem_alloc(kernel_arena, size, M_BESTFIT | M_NOWAIT, &addr))
148 return (0);
149
150 return (addr);
151 }
152
153 /*
154 * kva_free:
155 *
156 * Release a region of kernel virtual memory allocated
157 * with kva_alloc, and return the physical pages
158 * associated with that region.
159 *
160 * This routine may not block on kernel maps.
161 */
162 void
163 kva_free(vm_offset_t addr, vm_size_t size)
164 {
165
166 size = round_page(size);
167 vmem_free(kernel_arena, addr, size);
168 }
169
170 static vm_page_t
171 kmem_alloc_contig_pages(vm_object_t object, vm_pindex_t pindex, int domain,
172 int pflags, u_long npages, vm_paddr_t low, vm_paddr_t high,
173 u_long alignment, vm_paddr_t boundary, vm_memattr_t memattr)
174 {
175 vm_page_t m;
176 int tries;
177 bool wait;
178
179 VM_OBJECT_ASSERT_WLOCKED(object);
180
181 wait = (pflags & VM_ALLOC_WAITOK) != 0;
182 pflags &= ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL);
183 pflags |= VM_ALLOC_NOWAIT;
184 for (tries = wait ? 3 : 1;; tries--) {
185 m = vm_page_alloc_contig_domain(object, pindex, domain, pflags,
186 npages, low, high, alignment, boundary, memattr);
187 if (m != NULL || tries == 0)
188 break;
189
190 VM_OBJECT_WUNLOCK(object);
191 if (!vm_page_reclaim_contig_domain(domain, pflags, npages,
192 low, high, alignment, boundary) && wait)
193 vm_wait_domain(domain);
194 VM_OBJECT_WLOCK(object);
195 }
196 return (m);
197 }
198
199 /*
200 * Allocates a region from the kernel address map and physical pages
201 * within the specified address range to the kernel object. Creates a
202 * wired mapping from this region to these pages, and returns the
203 * region's starting virtual address. The allocated pages are not
204 * necessarily physically contiguous. If M_ZERO is specified through the
205 * given flags, then the pages are zeroed before they are mapped.
206 */
207 static vm_offset_t
208 kmem_alloc_attr_domain(int domain, vm_size_t size, int flags, vm_paddr_t low,
209 vm_paddr_t high, vm_memattr_t memattr)
210 {
211 vmem_t *vmem;
212 vm_object_t object;
213 vm_offset_t addr, i, offset;
214 vm_page_t m;
215 int pflags;
216 vm_prot_t prot;
217
218 object = kernel_object;
219 size = round_page(size);
220 vmem = vm_dom[domain].vmd_kernel_arena;
221 if (vmem_alloc(vmem, size, M_BESTFIT | flags, &addr))
222 return (0);
223 offset = addr - VM_MIN_KERNEL_ADDRESS;
224 pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED;
225 prot = (flags & M_EXEC) != 0 ? VM_PROT_ALL : VM_PROT_RW;
226 VM_OBJECT_WLOCK(object);
227 for (i = 0; i < size; i += PAGE_SIZE) {
228 m = kmem_alloc_contig_pages(object, atop(offset + i),
229 domain, pflags, 1, low, high, PAGE_SIZE, 0, memattr);
230 if (m == NULL) {
231 VM_OBJECT_WUNLOCK(object);
232 kmem_unback(object, addr, i);
233 vmem_free(vmem, addr, size);
234 return (0);
235 }
236 KASSERT(vm_phys_domain(m) == domain,
237 ("kmem_alloc_attr_domain: Domain mismatch %d != %d",
238 vm_phys_domain(m), domain));
239 if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0)
240 pmap_zero_page(m);
241 m->valid = VM_PAGE_BITS_ALL;
242 pmap_enter(kernel_pmap, addr + i, m, prot,
243 prot | PMAP_ENTER_WIRED, 0);
244 }
245 VM_OBJECT_WUNLOCK(object);
246 return (addr);
247 }
248
249 vm_offset_t
250 kmem_alloc_attr(vm_size_t size, int flags, vm_paddr_t low, vm_paddr_t high,
251 vm_memattr_t memattr)
252 {
253
254 return (kmem_alloc_attr_domainset(DOMAINSET_RR(), size, flags, low,
255 high, memattr));
256 }
257
258 vm_offset_t
259 kmem_alloc_attr_domainset(struct domainset *ds, vm_size_t size, int flags,
260 vm_paddr_t low, vm_paddr_t high, vm_memattr_t memattr)
261 {
262 struct vm_domainset_iter di;
263 vm_offset_t addr;
264 int domain;
265
266 vm_domainset_iter_policy_init(&di, ds, &domain, &flags);
267 do {
268 addr = kmem_alloc_attr_domain(domain, size, flags, low, high,
269 memattr);
270 if (addr != 0)
271 break;
272 } while (vm_domainset_iter_policy(&di, &domain) == 0);
273
274 return (addr);
275 }
276
277 /*
278 * Allocates a region from the kernel address map and physically
279 * contiguous pages within the specified address range to the kernel
280 * object. Creates a wired mapping from this region to these pages, and
281 * returns the region's starting virtual address. If M_ZERO is specified
282 * through the given flags, then the pages are zeroed before they are
283 * mapped.
284 */
285 static vm_offset_t
286 kmem_alloc_contig_domain(int domain, vm_size_t size, int flags, vm_paddr_t low,
287 vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
288 vm_memattr_t memattr)
289 {
290 vmem_t *vmem;
291 vm_object_t object;
292 vm_offset_t addr, offset, tmp;
293 vm_page_t end_m, m;
294 u_long npages;
295 int pflags;
296
297 object = kernel_object;
298 size = round_page(size);
299 vmem = vm_dom[domain].vmd_kernel_arena;
300 if (vmem_alloc(vmem, size, flags | M_BESTFIT, &addr))
301 return (0);
302 offset = addr - VM_MIN_KERNEL_ADDRESS;
303 pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED;
304 npages = atop(size);
305 VM_OBJECT_WLOCK(object);
306 m = kmem_alloc_contig_pages(object, atop(offset), domain,
307 pflags, npages, low, high, alignment, boundary, memattr);
308 if (m == NULL) {
309 VM_OBJECT_WUNLOCK(object);
310 vmem_free(vmem, addr, size);
311 return (0);
312 }
313 KASSERT(vm_phys_domain(m) == domain,
314 ("kmem_alloc_contig_domain: Domain mismatch %d != %d",
315 vm_phys_domain(m), domain));
316 end_m = m + npages;
317 tmp = addr;
318 for (; m < end_m; m++) {
319 if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0)
320 pmap_zero_page(m);
321 m->valid = VM_PAGE_BITS_ALL;
322 pmap_enter(kernel_pmap, tmp, m, VM_PROT_RW,
323 VM_PROT_RW | PMAP_ENTER_WIRED, 0);
324 tmp += PAGE_SIZE;
325 }
326 VM_OBJECT_WUNLOCK(object);
327 return (addr);
328 }
329
330 vm_offset_t
331 kmem_alloc_contig(vm_size_t size, int flags, vm_paddr_t low, vm_paddr_t high,
332 u_long alignment, vm_paddr_t boundary, vm_memattr_t memattr)
333 {
334
335 return (kmem_alloc_contig_domainset(DOMAINSET_RR(), size, flags, low,
336 high, alignment, boundary, memattr));
337 }
338
339 vm_offset_t
340 kmem_alloc_contig_domainset(struct domainset *ds, vm_size_t size, int flags,
341 vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
342 vm_memattr_t memattr)
343 {
344 struct vm_domainset_iter di;
345 vm_offset_t addr;
346 int domain;
347
348 vm_domainset_iter_policy_init(&di, ds, &domain, &flags);
349 do {
350 addr = kmem_alloc_contig_domain(domain, size, flags, low, high,
351 alignment, boundary, memattr);
352 if (addr != 0)
353 break;
354 } while (vm_domainset_iter_policy(&di, &domain) == 0);
355
356 return (addr);
357 }
358
359 /*
360 * kmem_suballoc:
361 *
362 * Allocates a map to manage a subrange
363 * of the kernel virtual address space.
364 *
365 * Arguments are as follows:
366 *
367 * parent Map to take range from
368 * min, max Returned endpoints of map
369 * size Size of range to find
370 * superpage_align Request that min is superpage aligned
371 */
372 vm_map_t
373 kmem_suballoc(vm_map_t parent, vm_offset_t *min, vm_offset_t *max,
374 vm_size_t size, boolean_t superpage_align)
375 {
376 int ret;
377 vm_map_t result;
378
379 size = round_page(size);
380
381 *min = vm_map_min(parent);
382 ret = vm_map_find(parent, NULL, 0, min, size, 0, superpage_align ?
383 VMFS_SUPER_SPACE : VMFS_ANY_SPACE, VM_PROT_ALL, VM_PROT_ALL,
384 MAP_ACC_NO_CHARGE);
385 if (ret != KERN_SUCCESS)
386 panic("kmem_suballoc: bad status return of %d", ret);
387 *max = *min + size;
388 result = vm_map_create(vm_map_pmap(parent), *min, *max);
389 if (result == NULL)
390 panic("kmem_suballoc: cannot create submap");
391 if (vm_map_submap(parent, *min, *max, result) != KERN_SUCCESS)
392 panic("kmem_suballoc: unable to change range to submap");
393 return (result);
394 }
395
396 /*
397 * kmem_malloc_domain:
398 *
399 * Allocate wired-down pages in the kernel's address space.
400 */
401 static vm_offset_t
402 kmem_malloc_domain(int domain, vm_size_t size, int flags)
403 {
404 vmem_t *arena;
405 vm_offset_t addr;
406 int rv;
407
408 if (__predict_true((flags & M_EXEC) == 0))
409 arena = vm_dom[domain].vmd_kernel_arena;
410 else
411 arena = vm_dom[domain].vmd_kernel_rwx_arena;
412 size = round_page(size);
413 if (vmem_alloc(arena, size, flags | M_BESTFIT, &addr))
414 return (0);
415
416 rv = kmem_back_domain(domain, kernel_object, addr, size, flags);
417 if (rv != KERN_SUCCESS) {
418 vmem_free(arena, addr, size);
419 return (0);
420 }
421 return (addr);
422 }
423
424 vm_offset_t
425 kmem_malloc(vm_size_t size, int flags)
426 {
427
428 return (kmem_malloc_domainset(DOMAINSET_RR(), size, flags));
429 }
430
431 vm_offset_t
432 kmem_malloc_domainset(struct domainset *ds, vm_size_t size, int flags)
433 {
434 struct vm_domainset_iter di;
435 vm_offset_t addr;
436 int domain;
437
438 vm_domainset_iter_policy_init(&di, ds, &domain, &flags);
439 do {
440 addr = kmem_malloc_domain(domain, size, flags);
441 if (addr != 0)
442 break;
443 } while (vm_domainset_iter_policy(&di, &domain) == 0);
444
445 return (addr);
446 }
447
448 /*
449 * kmem_back_domain:
450 *
451 * Allocate physical pages from the specified domain for the specified
452 * virtual address range.
453 */
454 int
455 kmem_back_domain(int domain, vm_object_t object, vm_offset_t addr,
456 vm_size_t size, int flags)
457 {
458 vm_offset_t offset, i;
459 vm_page_t m, mpred;
460 vm_prot_t prot;
461 int pflags;
462
463 KASSERT(object == kernel_object,
464 ("kmem_back_domain: only supports kernel object."));
465
466 offset = addr - VM_MIN_KERNEL_ADDRESS;
467 pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED;
468 pflags &= ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL);
469 if (flags & M_WAITOK)
470 pflags |= VM_ALLOC_WAITFAIL;
471 prot = (flags & M_EXEC) != 0 ? VM_PROT_ALL : VM_PROT_RW;
472
473 i = 0;
474 VM_OBJECT_WLOCK(object);
475 retry:
476 mpred = vm_radix_lookup_le(&object->rtree, atop(offset + i));
477 for (; i < size; i += PAGE_SIZE, mpred = m) {
478 m = vm_page_alloc_domain_after(object, atop(offset + i),
479 domain, pflags, mpred);
480
481 /*
482 * Ran out of space, free everything up and return. Don't need
483 * to lock page queues here as we know that the pages we got
484 * aren't on any queues.
485 */
486 if (m == NULL) {
487 if ((flags & M_NOWAIT) == 0)
488 goto retry;
489 VM_OBJECT_WUNLOCK(object);
490 kmem_unback(object, addr, i);
491 return (KERN_NO_SPACE);
492 }
493 KASSERT(vm_phys_domain(m) == domain,
494 ("kmem_back_domain: Domain mismatch %d != %d",
495 vm_phys_domain(m), domain));
496 if (flags & M_ZERO && (m->flags & PG_ZERO) == 0)
497 pmap_zero_page(m);
498 KASSERT((m->oflags & VPO_UNMANAGED) != 0,
499 ("kmem_malloc: page %p is managed", m));
500 m->valid = VM_PAGE_BITS_ALL;
501 pmap_enter(kernel_pmap, addr + i, m, prot,
502 prot | PMAP_ENTER_WIRED, 0);
503 if (__predict_false((prot & VM_PROT_EXECUTE) != 0))
504 m->oflags |= VPO_KMEM_EXEC;
505 }
506 VM_OBJECT_WUNLOCK(object);
507
508 return (KERN_SUCCESS);
509 }
510
511 /*
512 * kmem_back:
513 *
514 * Allocate physical pages for the specified virtual address range.
515 */
516 int
517 kmem_back(vm_object_t object, vm_offset_t addr, vm_size_t size, int flags)
518 {
519 vm_offset_t end, next, start;
520 int domain, rv;
521
522 KASSERT(object == kernel_object,
523 ("kmem_back: only supports kernel object."));
524
525 for (start = addr, end = addr + size; addr < end; addr = next) {
526 /*
527 * We must ensure that pages backing a given large virtual page
528 * all come from the same physical domain.
529 */
530 if (vm_ndomains > 1) {
531 domain = (addr >> KVA_QUANTUM_SHIFT) % vm_ndomains;
532 while (VM_DOMAIN_EMPTY(domain))
533 domain++;
534 next = roundup2(addr + 1, KVA_QUANTUM);
535 if (next > end || next < start)
536 next = end;
537 } else {
538 domain = 0;
539 next = end;
540 }
541 rv = kmem_back_domain(domain, object, addr, next - addr, flags);
542 if (rv != KERN_SUCCESS) {
543 kmem_unback(object, start, addr - start);
544 break;
545 }
546 }
547 return (rv);
548 }
549
550 /*
551 * kmem_unback:
552 *
553 * Unmap and free the physical pages underlying the specified virtual
554 * address range.
555 *
556 * A physical page must exist within the specified object at each index
557 * that is being unmapped.
558 */
559 static struct vmem *
560 _kmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size)
561 {
562 struct vmem *arena;
563 vm_page_t m, next;
564 vm_offset_t end, offset;
565 int domain;
566
567 KASSERT(object == kernel_object,
568 ("kmem_unback: only supports kernel object."));
569
570 if (size == 0)
571 return (NULL);
572 pmap_remove(kernel_pmap, addr, addr + size);
573 offset = addr - VM_MIN_KERNEL_ADDRESS;
574 end = offset + size;
575 VM_OBJECT_WLOCK(object);
576 m = vm_page_lookup(object, atop(offset));
577 domain = vm_phys_domain(m);
578 if (__predict_true((m->oflags & VPO_KMEM_EXEC) == 0))
579 arena = vm_dom[domain].vmd_kernel_arena;
580 else
581 arena = vm_dom[domain].vmd_kernel_rwx_arena;
582 for (; offset < end; offset += PAGE_SIZE, m = next) {
583 next = vm_page_next(m);
584 vm_page_unwire_noq(m);
585 vm_page_free(m);
586 }
587 VM_OBJECT_WUNLOCK(object);
588
589 return (arena);
590 }
591
592 void
593 kmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size)
594 {
595
596 (void)_kmem_unback(object, addr, size);
597 }
598
599 /*
600 * kmem_free:
601 *
602 * Free memory allocated with kmem_malloc. The size must match the
603 * original allocation.
604 */
605 void
606 kmem_free(vm_offset_t addr, vm_size_t size)
607 {
608 struct vmem *arena;
609
610 size = round_page(size);
611 arena = _kmem_unback(kernel_object, addr, size);
612 if (arena != NULL)
613 vmem_free(arena, addr, size);
614 }
615
616 /*
617 * kmap_alloc_wait:
618 *
619 * Allocates pageable memory from a sub-map of the kernel. If the submap
620 * has no room, the caller sleeps waiting for more memory in the submap.
621 *
622 * This routine may block.
623 */
624 vm_offset_t
625 kmap_alloc_wait(vm_map_t map, vm_size_t size)
626 {
627 vm_offset_t addr;
628
629 size = round_page(size);
630 if (!swap_reserve(size))
631 return (0);
632
633 for (;;) {
634 /*
635 * To make this work for more than one map, use the map's lock
636 * to lock out sleepers/wakers.
637 */
638 vm_map_lock(map);
639 addr = vm_map_findspace(map, vm_map_min(map), size);
640 if (addr + size <= vm_map_max(map))
641 break;
642 /* no space now; see if we can ever get space */
643 if (vm_map_max(map) - vm_map_min(map) < size) {
644 vm_map_unlock(map);
645 swap_release(size);
646 return (0);
647 }
648 map->needs_wakeup = TRUE;
649 vm_map_unlock_and_wait(map, 0);
650 }
651 vm_map_insert(map, NULL, 0, addr, addr + size, VM_PROT_RW, VM_PROT_RW,
652 MAP_ACC_CHARGED);
653 vm_map_unlock(map);
654 return (addr);
655 }
656
657 /*
658 * kmap_free_wakeup:
659 *
660 * Returns memory to a submap of the kernel, and wakes up any processes
661 * waiting for memory in that map.
662 */
663 void
664 kmap_free_wakeup(vm_map_t map, vm_offset_t addr, vm_size_t size)
665 {
666
667 vm_map_lock(map);
668 (void) vm_map_delete(map, trunc_page(addr), round_page(addr + size));
669 if (map->needs_wakeup) {
670 map->needs_wakeup = FALSE;
671 vm_map_wakeup(map);
672 }
673 vm_map_unlock(map);
674 }
675
676 void
677 kmem_init_zero_region(void)
678 {
679 vm_offset_t addr, i;
680 vm_page_t m;
681
682 /*
683 * Map a single physical page of zeros to a larger virtual range.
684 * This requires less looping in places that want large amounts of
685 * zeros, while not using much more physical resources.
686 */
687 addr = kva_alloc(ZERO_REGION_SIZE);
688 m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
689 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO);
690 if ((m->flags & PG_ZERO) == 0)
691 pmap_zero_page(m);
692 for (i = 0; i < ZERO_REGION_SIZE; i += PAGE_SIZE)
693 pmap_qenter(addr + i, &m, 1);
694 pmap_protect(kernel_pmap, addr, addr + ZERO_REGION_SIZE, VM_PROT_READ);
695
696 zero_region = (const void *)addr;
697 }
698
699 /*
700 * Import KVA from the kernel map into the kernel arena.
701 */
702 static int
703 kva_import(void *unused, vmem_size_t size, int flags, vmem_addr_t *addrp)
704 {
705 vm_offset_t addr;
706 int result;
707
708 KASSERT((size % KVA_QUANTUM) == 0,
709 ("kva_import: Size %jd is not a multiple of %d",
710 (intmax_t)size, (int)KVA_QUANTUM));
711 addr = vm_map_min(kernel_map);
712 result = vm_map_find(kernel_map, NULL, 0, &addr, size, 0,
713 VMFS_SUPER_SPACE, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
714 if (result != KERN_SUCCESS)
715 return (ENOMEM);
716
717 *addrp = addr;
718
719 return (0);
720 }
721
722 /*
723 * Import KVA from a parent arena into a per-domain arena. Imports must be
724 * KVA_QUANTUM-aligned and a multiple of KVA_QUANTUM in size.
725 */
726 static int
727 kva_import_domain(void *arena, vmem_size_t size, int flags, vmem_addr_t *addrp)
728 {
729
730 KASSERT((size % KVA_QUANTUM) == 0,
731 ("kva_import_domain: Size %jd is not a multiple of %d",
732 (intmax_t)size, (int)KVA_QUANTUM));
733 return (vmem_xalloc(arena, size, KVA_QUANTUM, 0, 0, VMEM_ADDR_MIN,
734 VMEM_ADDR_MAX, flags, addrp));
735 }
736
737 /*
738 * kmem_init:
739 *
740 * Create the kernel map; insert a mapping covering kernel text,
741 * data, bss, and all space allocated thus far (`boostrap' data). The
742 * new map will thus map the range between VM_MIN_KERNEL_ADDRESS and
743 * `start' as allocated, and the range between `start' and `end' as free.
744 * Create the kernel vmem arena and its per-domain children.
745 */
746 void
747 kmem_init(vm_offset_t start, vm_offset_t end)
748 {
749 vm_map_t m;
750 int domain;
751
752 m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end);
753 m->system_map = 1;
754 vm_map_lock(m);
755 /* N.B.: cannot use kgdb to debug, starting with this assignment ... */
756 kernel_map = m;
757 (void) vm_map_insert(m, NULL, (vm_ooffset_t) 0,
758 #ifdef __amd64__
759 KERNBASE,
760 #else
761 VM_MIN_KERNEL_ADDRESS,
762 #endif
763 start, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
764 /* ... and ending with the completion of the above `insert' */
765 vm_map_unlock(m);
766
767 /*
768 * Initialize the kernel_arena. This can grow on demand.
769 */
770 vmem_init(kernel_arena, "kernel arena", 0, 0, PAGE_SIZE, 0, 0);
771 vmem_set_import(kernel_arena, kva_import, NULL, NULL, KVA_QUANTUM);
772
773 for (domain = 0; domain < vm_ndomains; domain++) {
774 /*
775 * Initialize the per-domain arenas. These are used to color
776 * the KVA space in a way that ensures that virtual large pages
777 * are backed by memory from the same physical domain,
778 * maximizing the potential for superpage promotion.
779 */
780 vm_dom[domain].vmd_kernel_arena = vmem_create(
781 "kernel arena domain", 0, 0, PAGE_SIZE, 0, M_WAITOK);
782 vmem_set_import(vm_dom[domain].vmd_kernel_arena,
783 kva_import_domain, NULL, kernel_arena, KVA_QUANTUM);
784
785 /*
786 * In architectures with superpages, maintain separate arenas
787 * for allocations with permissions that differ from the
788 * "standard" read/write permissions used for kernel memory,
789 * so as not to inhibit superpage promotion.
790 */
791 #if VM_NRESERVLEVEL > 0
792 vm_dom[domain].vmd_kernel_rwx_arena = vmem_create(
793 "kernel rwx arena domain", 0, 0, PAGE_SIZE, 0, M_WAITOK);
794 vmem_set_import(vm_dom[domain].vmd_kernel_rwx_arena,
795 kva_import_domain, (vmem_release_t *)vmem_xfree,
796 kernel_arena, KVA_QUANTUM);
797 #else
798 vm_dom[domain].vmd_kernel_rwx_arena =
799 vm_dom[domain].vmd_kernel_arena;
800 #endif
801 }
802 }
803
804 /*
805 * kmem_bootstrap_free:
806 *
807 * Free pages backing preloaded data (e.g., kernel modules) to the
808 * system. Currently only supported on platforms that create a
809 * vm_phys segment for preloaded data.
810 */
811 void
812 kmem_bootstrap_free(vm_offset_t start, vm_size_t size)
813 {
814 #if defined(__i386__) || defined(__amd64__)
815 struct vm_domain *vmd;
816 vm_offset_t end, va;
817 vm_paddr_t pa;
818 vm_page_t m;
819
820 end = trunc_page(start + size);
821 start = round_page(start);
822
823 for (va = start; va < end; va += PAGE_SIZE) {
824 pa = pmap_kextract(va);
825 m = PHYS_TO_VM_PAGE(pa);
826
827 vmd = vm_pagequeue_domain(m);
828 vm_domain_free_lock(vmd);
829 vm_phys_free_pages(m, 0);
830 vm_domain_free_unlock(vmd);
831
832 vm_domain_freecnt_inc(vmd, 1);
833 vm_cnt.v_page_count++;
834 }
835 pmap_remove(kernel_pmap, start, end);
836 (void)vmem_add(kernel_arena, start, end - start, M_WAITOK);
837 #endif
838 }
839
840 /*
841 * Allow userspace to directly trigger the VM drain routine for testing
842 * purposes.
843 */
844 static int
845 debug_vm_lowmem(SYSCTL_HANDLER_ARGS)
846 {
847 int error, i;
848
849 i = 0;
850 error = sysctl_handle_int(oidp, &i, 0, req);
851 if (error != 0)
852 return (error);
853 if ((i & ~(VM_LOW_KMEM | VM_LOW_PAGES)) != 0)
854 return (EINVAL);
855 if (i != 0)
856 EVENTHANDLER_INVOKE(vm_lowmem, i);
857 return (0);
858 }
859
860 SYSCTL_PROC(_debug, OID_AUTO, vm_lowmem,
861 CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RW, 0, 0, debug_vm_lowmem, "I",
862 "set to trigger vm_lowmem event with given flags");
863
864 static int
865 debug_uma_reclaim(SYSCTL_HANDLER_ARGS)
866 {
867 int error, i;
868
869 i = 0;
870 error = sysctl_handle_int(oidp, &i, 0, req);
871 if (error != 0)
872 return (error);
873 if (i != 0)
874 uma_reclaim();
875 return (0);
876 }
877
878 SYSCTL_PROC(_debug, OID_AUTO, uma_reclaim,
879 CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RW, 0, 0, debug_uma_reclaim, "I",
880 "set to generate request to reclaim uma caches");
Cache object: 0e6760af7cbfa6aa0da08f3c3e58aed7
|