FreeBSD/Linux Kernel Cross Reference
sys/uvm/uvm_km.c
1 /* $NetBSD: uvm_km.c,v 1.162 2022/08/06 05:55:37 chs Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California.
6 *
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)vm_kern.c 8.3 (Berkeley) 1/12/94
37 * from: Id: uvm_km.c,v 1.1.2.14 1998/02/06 05:19:27 chs Exp
38 *
39 *
40 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
41 * All rights reserved.
42 *
43 * Permission to use, copy, modify and distribute this software and
44 * its documentation is hereby granted, provided that both the copyright
45 * notice and this permission notice appear in all copies of the
46 * software, derivative works or modified versions, and any portions
47 * thereof, and that both notices appear in supporting documentation.
48 *
49 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
50 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
51 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
52 *
53 * Carnegie Mellon requests users of this software to return to
54 *
55 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
56 * School of Computer Science
57 * Carnegie Mellon University
58 * Pittsburgh PA 15213-3890
59 *
60 * any improvements or extensions that they make and grant Carnegie the
61 * rights to redistribute these changes.
62 */
63
64 /*
65 * uvm_km.c: handle kernel memory allocation and management
66 */
67
68 /*
69 * overview of kernel memory management:
70 *
71 * the kernel virtual address space is mapped by "kernel_map." kernel_map
72 * starts at VM_MIN_KERNEL_ADDRESS and goes to VM_MAX_KERNEL_ADDRESS.
73 * note that VM_MIN_KERNEL_ADDRESS is equal to vm_map_min(kernel_map).
74 *
75 * the kernel_map has several "submaps." submaps can only appear in
76 * the kernel_map (user processes can't use them). submaps "take over"
77 * the management of a sub-range of the kernel's address space. submaps
78 * are typically allocated at boot time and are never released. kernel
79 * virtual address space that is mapped by a submap is locked by the
80 * submap's lock -- not the kernel_map's lock.
81 *
82 * thus, the useful feature of submaps is that they allow us to break
83 * up the locking and protection of the kernel address space into smaller
84 * chunks.
85 *
86 * the vm system has several standard kernel submaps/arenas, including:
87 * kmem_arena => used for kmem/pool (memoryallocators(9))
88 * pager_map => used to map "buf" structures into kernel space
89 * exec_map => used during exec to handle exec args
90 * etc...
91 *
92 * The kmem_arena is a "special submap", as it lives in a fixed map entry
93 * within the kernel_map and is controlled by vmem(9).
94 *
95 * the kernel allocates its private memory out of special uvm_objects whose
96 * reference count is set to UVM_OBJ_KERN (thus indicating that the objects
97 * are "special" and never die). all kernel objects should be thought of
98 * as large, fixed-sized, sparsely populated uvm_objects. each kernel
99 * object is equal to the size of kernel virtual address space (i.e. the
100 * value "VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS").
101 *
102 * note that just because a kernel object spans the entire kernel virtual
103 * address space doesn't mean that it has to be mapped into the entire space.
104 * large chunks of a kernel object's space go unused either because
105 * that area of kernel VM is unmapped, or there is some other type of
106 * object mapped into that range (e.g. a vnode). for submap's kernel
107 * objects, the only part of the object that can ever be populated is the
108 * offsets that are managed by the submap.
109 *
110 * note that the "offset" in a kernel object is always the kernel virtual
111 * address minus the VM_MIN_KERNEL_ADDRESS (aka vm_map_min(kernel_map)).
112 * example:
113 * suppose VM_MIN_KERNEL_ADDRESS is 0xf8000000 and the kernel does a
114 * uvm_km_alloc(kernel_map, PAGE_SIZE) [allocate 1 wired down page in the
115 * kernel map]. if uvm_km_alloc returns virtual address 0xf8235000,
116 * then that means that the page at offset 0x235000 in kernel_object is
117 * mapped at 0xf8235000.
118 *
119 * kernel object have one other special property: when the kernel virtual
120 * memory mapping them is unmapped, the backing memory in the object is
121 * freed right away. this is done with the uvm_km_pgremove() function.
122 * this has to be done because there is no backing store for kernel pages
123 * and no need to save them after they are no longer referenced.
124 *
125 * Generic arenas:
126 *
127 * kmem_arena:
128 * Main arena controlling the kernel KVA used by other arenas.
129 *
130 * kmem_va_arena:
131 * Implements quantum caching in order to speedup allocations and
132 * reduce fragmentation. The pool(9), unless created with a custom
133 * meta-data allocator, and kmem(9) subsystems use this arena.
134 *
135 * Arenas for meta-data allocations are used by vmem(9) and pool(9).
136 * These arenas cannot use quantum cache. However, kmem_va_meta_arena
137 * compensates this by importing larger chunks from kmem_arena.
138 *
139 * kmem_va_meta_arena:
140 * Space for meta-data.
141 *
142 * kmem_meta_arena:
143 * Imports from kmem_va_meta_arena. Allocations from this arena are
144 * backed with the pages.
145 *
146 * Arena stacking:
147 *
148 * kmem_arena
149 * kmem_va_arena
150 * kmem_va_meta_arena
151 * kmem_meta_arena
152 */
153
154 #include <sys/cdefs.h>
155 __KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.162 2022/08/06 05:55:37 chs Exp $");
156
157 #include "opt_uvmhist.h"
158
159 #include "opt_kmempages.h"
160
161 #ifndef NKMEMPAGES
162 #define NKMEMPAGES 0
163 #endif
164
165 /*
166 * Defaults for lower and upper-bounds for the kmem_arena page count.
167 * Can be overridden by kernel config options.
168 */
169 #ifndef NKMEMPAGES_MIN
170 #define NKMEMPAGES_MIN NKMEMPAGES_MIN_DEFAULT
171 #endif
172
173 #ifndef NKMEMPAGES_MAX
174 #define NKMEMPAGES_MAX NKMEMPAGES_MAX_DEFAULT
175 #endif
176
177
178 #include <sys/param.h>
179 #include <sys/systm.h>
180 #include <sys/atomic.h>
181 #include <sys/proc.h>
182 #include <sys/pool.h>
183 #include <sys/vmem.h>
184 #include <sys/vmem_impl.h>
185 #include <sys/kmem.h>
186 #include <sys/msan.h>
187
188 #include <uvm/uvm.h>
189
190 /*
191 * global data structures
192 */
193
194 struct vm_map *kernel_map = NULL;
195
196 /*
197 * local data structues
198 */
199
200 static struct vm_map kernel_map_store;
201 static struct vm_map_entry kernel_image_mapent_store;
202 static struct vm_map_entry kernel_kmem_mapent_store;
203
204 int nkmempages = 0;
205 vaddr_t kmembase;
206 vsize_t kmemsize;
207
208 static struct vmem kmem_arena_store;
209 vmem_t *kmem_arena = NULL;
210 static struct vmem kmem_va_arena_store;
211 vmem_t *kmem_va_arena;
212
213 /*
214 * kmeminit_nkmempages: calculate the size of kmem_arena.
215 */
216 void
217 kmeminit_nkmempages(void)
218 {
219 int npages;
220
221 if (nkmempages != 0) {
222 /*
223 * It's already been set (by us being here before)
224 * bail out now;
225 */
226 return;
227 }
228
229 #if defined(NKMEMPAGES_MAX_UNLIMITED) && !defined(KMSAN)
230 npages = physmem;
231 #else
232
233 #if defined(KMSAN)
234 npages = (physmem / 4);
235 #elif defined(PMAP_MAP_POOLPAGE)
236 npages = (physmem / 4);
237 #else
238 npages = (physmem / 3) * 2;
239 #endif /* defined(PMAP_MAP_POOLPAGE) */
240
241 #if !defined(NKMEMPAGES_MAX_UNLIMITED)
242 if (npages > NKMEMPAGES_MAX)
243 npages = NKMEMPAGES_MAX;
244 #endif
245
246 #endif
247
248 if (npages < NKMEMPAGES_MIN)
249 npages = NKMEMPAGES_MIN;
250
251 nkmempages = npages;
252 }
253
254 /*
255 * uvm_km_bootstrap: init kernel maps and objects to reflect reality (i.e.
256 * KVM already allocated for text, data, bss, and static data structures).
257 *
258 * => KVM is defined by VM_MIN_KERNEL_ADDRESS/VM_MAX_KERNEL_ADDRESS.
259 * we assume that [vmin -> start] has already been allocated and that
260 * "end" is the end.
261 */
262
263 void
264 uvm_km_bootstrap(vaddr_t start, vaddr_t end)
265 {
266 bool kmem_arena_small;
267 vaddr_t base = VM_MIN_KERNEL_ADDRESS;
268 struct uvm_map_args args;
269 int error;
270
271 UVMHIST_FUNC(__func__);
272 UVMHIST_CALLARGS(maphist, "start=%#jx end=%#jx", start, end, 0,0);
273
274 kmeminit_nkmempages();
275 kmemsize = (vsize_t)nkmempages * PAGE_SIZE;
276 kmem_arena_small = kmemsize < 64 * 1024 * 1024;
277
278 UVMHIST_LOG(maphist, "kmemsize=%#jx", kmemsize, 0,0,0);
279
280 /*
281 * next, init kernel memory objects.
282 */
283
284 /* kernel_object: for pageable anonymous kernel memory */
285 uvm_kernel_object = uao_create(VM_MAX_KERNEL_ADDRESS -
286 VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNOBJ);
287
288 /*
289 * init the map and reserve any space that might already
290 * have been allocated kernel space before installing.
291 */
292
293 uvm_map_setup(&kernel_map_store, base, end, VM_MAP_PAGEABLE);
294 kernel_map_store.pmap = pmap_kernel();
295 if (start != base) {
296 error = uvm_map_prepare(&kernel_map_store,
297 base, start - base,
298 NULL, UVM_UNKNOWN_OFFSET, 0,
299 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
300 UVM_ADV_RANDOM, UVM_FLAG_FIXED), &args);
301 if (!error) {
302 kernel_image_mapent_store.flags =
303 UVM_MAP_KERNEL | UVM_MAP_STATIC | UVM_MAP_NOMERGE;
304 error = uvm_map_enter(&kernel_map_store, &args,
305 &kernel_image_mapent_store);
306 }
307
308 if (error)
309 panic(
310 "uvm_km_bootstrap: could not reserve space for kernel");
311
312 kmembase = args.uma_start + args.uma_size;
313 } else {
314 kmembase = base;
315 }
316
317 error = uvm_map_prepare(&kernel_map_store,
318 kmembase, kmemsize,
319 NULL, UVM_UNKNOWN_OFFSET, 0,
320 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
321 UVM_ADV_RANDOM, UVM_FLAG_FIXED), &args);
322 if (!error) {
323 kernel_kmem_mapent_store.flags =
324 UVM_MAP_KERNEL | UVM_MAP_STATIC | UVM_MAP_NOMERGE;
325 error = uvm_map_enter(&kernel_map_store, &args,
326 &kernel_kmem_mapent_store);
327 }
328
329 if (error)
330 panic("uvm_km_bootstrap: could not reserve kernel kmem");
331
332 /*
333 * install!
334 */
335
336 kernel_map = &kernel_map_store;
337
338 pool_subsystem_init();
339
340 kmem_arena = vmem_init(&kmem_arena_store, "kmem",
341 kmembase, kmemsize, PAGE_SIZE, NULL, NULL, NULL,
342 0, VM_NOSLEEP | VM_BOOTSTRAP, IPL_VM);
343 #ifdef PMAP_GROWKERNEL
344 /*
345 * kmem_arena VA allocations happen independently of uvm_map.
346 * grow kernel to accommodate the kmem_arena.
347 */
348 if (uvm_maxkaddr < kmembase + kmemsize) {
349 uvm_maxkaddr = pmap_growkernel(kmembase + kmemsize);
350 KASSERTMSG(uvm_maxkaddr >= kmembase + kmemsize,
351 "%#"PRIxVADDR" %#"PRIxVADDR" %#"PRIxVSIZE,
352 uvm_maxkaddr, kmembase, kmemsize);
353 }
354 #endif
355
356 vmem_subsystem_init(kmem_arena);
357
358 UVMHIST_LOG(maphist, "kmem vmem created (base=%#jx, size=%#jx",
359 kmembase, kmemsize, 0,0);
360
361 kmem_va_arena = vmem_init(&kmem_va_arena_store, "kva",
362 0, 0, PAGE_SIZE, vmem_alloc, vmem_free, kmem_arena,
363 (kmem_arena_small ? 4 : VMEM_QCACHE_IDX_MAX) * PAGE_SIZE,
364 VM_NOSLEEP, IPL_VM);
365
366 UVMHIST_LOG(maphist, "<- done", 0,0,0,0);
367 }
368
369 /*
370 * uvm_km_init: init the kernel maps virtual memory caches
371 * and start the pool/kmem allocator.
372 */
373 void
374 uvm_km_init(void)
375 {
376 kmem_init();
377 }
378
379 /*
380 * uvm_km_suballoc: allocate a submap in the kernel map. once a submap
381 * is allocated all references to that area of VM must go through it. this
382 * allows the locking of VAs in kernel_map to be broken up into regions.
383 *
384 * => if `fixed' is true, *vmin specifies where the region described
385 * pager_map => used to map "buf" structures into kernel space
386 * by the submap must start
387 * => if submap is non NULL we use that as the submap, otherwise we
388 * alloc a new map
389 */
390
391 struct vm_map *
392 uvm_km_suballoc(struct vm_map *map, vaddr_t *vmin /* IN/OUT */,
393 vaddr_t *vmax /* OUT */, vsize_t size, int flags, bool fixed,
394 struct vm_map *submap)
395 {
396 int mapflags = UVM_FLAG_NOMERGE | (fixed ? UVM_FLAG_FIXED : 0);
397 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
398
399 KASSERT(vm_map_pmap(map) == pmap_kernel());
400
401 size = round_page(size); /* round up to pagesize */
402
403 /*
404 * first allocate a blank spot in the parent map
405 */
406
407 if (uvm_map(map, vmin, size, NULL, UVM_UNKNOWN_OFFSET, 0,
408 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
409 UVM_ADV_RANDOM, mapflags)) != 0) {
410 panic("%s: unable to allocate space in parent map", __func__);
411 }
412
413 /*
414 * set VM bounds (vmin is filled in by uvm_map)
415 */
416
417 *vmax = *vmin + size;
418
419 /*
420 * add references to pmap and create or init the submap
421 */
422
423 pmap_reference(vm_map_pmap(map));
424 if (submap == NULL) {
425 submap = kmem_alloc(sizeof(*submap), KM_SLEEP);
426 }
427 uvm_map_setup(submap, *vmin, *vmax, flags);
428 submap->pmap = vm_map_pmap(map);
429
430 /*
431 * now let uvm_map_submap plug in it...
432 */
433
434 if (uvm_map_submap(map, *vmin, *vmax, submap) != 0)
435 panic("uvm_km_suballoc: submap allocation failed");
436
437 return(submap);
438 }
439
440 /*
441 * uvm_km_pgremove: remove pages from a kernel uvm_object and KVA.
442 */
443
444 void
445 uvm_km_pgremove(vaddr_t startva, vaddr_t endva)
446 {
447 struct uvm_object * const uobj = uvm_kernel_object;
448 const voff_t start = startva - vm_map_min(kernel_map);
449 const voff_t end = endva - vm_map_min(kernel_map);
450 struct vm_page *pg;
451 voff_t curoff, nextoff;
452 int swpgonlydelta = 0;
453 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
454
455 KASSERT(VM_MIN_KERNEL_ADDRESS <= startva);
456 KASSERT(startva < endva);
457 KASSERT(endva <= VM_MAX_KERNEL_ADDRESS);
458
459 rw_enter(uobj->vmobjlock, RW_WRITER);
460 pmap_remove(pmap_kernel(), startva, endva);
461 for (curoff = start; curoff < end; curoff = nextoff) {
462 nextoff = curoff + PAGE_SIZE;
463 pg = uvm_pagelookup(uobj, curoff);
464 if (pg != NULL && pg->flags & PG_BUSY) {
465 uvm_pagewait(pg, uobj->vmobjlock, "km_pgrm");
466 rw_enter(uobj->vmobjlock, RW_WRITER);
467 nextoff = curoff;
468 continue;
469 }
470
471 /*
472 * free the swap slot, then the page.
473 */
474
475 if (pg == NULL &&
476 uao_find_swslot(uobj, curoff >> PAGE_SHIFT) > 0) {
477 swpgonlydelta++;
478 }
479 uao_dropswap(uobj, curoff >> PAGE_SHIFT);
480 if (pg != NULL) {
481 uvm_pagefree(pg);
482 }
483 }
484 rw_exit(uobj->vmobjlock);
485
486 if (swpgonlydelta > 0) {
487 KASSERT(uvmexp.swpgonly >= swpgonlydelta);
488 atomic_add_int(&uvmexp.swpgonly, -swpgonlydelta);
489 }
490 }
491
492
493 /*
494 * uvm_km_pgremove_intrsafe: like uvm_km_pgremove(), but for non object backed
495 * regions.
496 *
497 * => when you unmap a part of anonymous kernel memory you want to toss
498 * the pages right away. (this is called from uvm_unmap_...).
499 * => none of the pages will ever be busy, and none of them will ever
500 * be on the active or inactive queues (because they have no object).
501 */
502
503 void
504 uvm_km_pgremove_intrsafe(struct vm_map *map, vaddr_t start, vaddr_t end)
505 {
506 #define __PGRM_BATCH 16
507 struct vm_page *pg;
508 paddr_t pa[__PGRM_BATCH];
509 int npgrm, i;
510 vaddr_t va, batch_vastart;
511
512 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
513
514 KASSERT(VM_MAP_IS_KERNEL(map));
515 KASSERTMSG(vm_map_min(map) <= start,
516 "vm_map_min(map) [%#"PRIxVADDR"] <= start [%#"PRIxVADDR"]"
517 " (size=%#"PRIxVSIZE")",
518 vm_map_min(map), start, end - start);
519 KASSERT(start < end);
520 KASSERT(end <= vm_map_max(map));
521
522 for (va = start; va < end;) {
523 batch_vastart = va;
524 /* create a batch of at most __PGRM_BATCH pages to free */
525 for (i = 0;
526 i < __PGRM_BATCH && va < end;
527 va += PAGE_SIZE) {
528 if (!pmap_extract(pmap_kernel(), va, &pa[i])) {
529 continue;
530 }
531 i++;
532 }
533 npgrm = i;
534 /* now remove the mappings */
535 pmap_kremove(batch_vastart, va - batch_vastart);
536 /* and free the pages */
537 for (i = 0; i < npgrm; i++) {
538 pg = PHYS_TO_VM_PAGE(pa[i]);
539 KASSERT(pg);
540 KASSERT(pg->uobject == NULL && pg->uanon == NULL);
541 KASSERT((pg->flags & PG_BUSY) == 0);
542 uvm_pagefree(pg);
543 }
544 }
545 #undef __PGRM_BATCH
546 }
547
548 #if defined(DEBUG)
549 void
550 uvm_km_check_empty(struct vm_map *map, vaddr_t start, vaddr_t end)
551 {
552 vaddr_t va;
553 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
554
555 KDASSERT(VM_MAP_IS_KERNEL(map));
556 KDASSERT(vm_map_min(map) <= start);
557 KDASSERT(start < end);
558 KDASSERT(end <= vm_map_max(map));
559
560 for (va = start; va < end; va += PAGE_SIZE) {
561 paddr_t pa;
562
563 if (pmap_extract(pmap_kernel(), va, &pa)) {
564 panic("uvm_km_check_empty: va %p has pa %#llx",
565 (void *)va, (long long)pa);
566 }
567 /*
568 * kernel_object should not have pages for the corresponding
569 * region. check it.
570 *
571 * why trylock? because:
572 * - caller might not want to block.
573 * - we can recurse when allocating radix_node for
574 * kernel_object.
575 */
576 if (rw_tryenter(uvm_kernel_object->vmobjlock, RW_READER)) {
577 struct vm_page *pg;
578
579 pg = uvm_pagelookup(uvm_kernel_object,
580 va - vm_map_min(kernel_map));
581 rw_exit(uvm_kernel_object->vmobjlock);
582 if (pg) {
583 panic("uvm_km_check_empty: "
584 "has page hashed at %p",
585 (const void *)va);
586 }
587 }
588 }
589 }
590 #endif /* defined(DEBUG) */
591
592 /*
593 * uvm_km_alloc: allocate an area of kernel memory.
594 *
595 * => NOTE: we can return 0 even if we can wait if there is not enough
596 * free VM space in the map... caller should be prepared to handle
597 * this case.
598 * => we return KVA of memory allocated
599 */
600
601 vaddr_t
602 uvm_km_alloc(struct vm_map *map, vsize_t size, vsize_t align, uvm_flag_t flags)
603 {
604 vaddr_t kva, loopva;
605 vaddr_t offset;
606 vsize_t loopsize;
607 struct vm_page *pg;
608 struct uvm_object *obj;
609 int pgaflags;
610 vm_prot_t prot, vaprot;
611 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
612
613 KASSERT(vm_map_pmap(map) == pmap_kernel());
614 KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED ||
615 (flags & UVM_KMF_TYPEMASK) == UVM_KMF_PAGEABLE ||
616 (flags & UVM_KMF_TYPEMASK) == UVM_KMF_VAONLY);
617 KASSERT((flags & UVM_KMF_VAONLY) != 0 || (flags & UVM_KMF_COLORMATCH) == 0);
618 KASSERT((flags & UVM_KMF_COLORMATCH) == 0 || (flags & UVM_KMF_VAONLY) != 0);
619
620 /*
621 * setup for call
622 */
623
624 kva = vm_map_min(map); /* hint */
625 size = round_page(size);
626 obj = (flags & UVM_KMF_PAGEABLE) ? uvm_kernel_object : NULL;
627 UVMHIST_LOG(maphist," (map=%#jx, obj=%#jx, size=%#jx, flags=%#jx)",
628 (uintptr_t)map, (uintptr_t)obj, size, flags);
629
630 /*
631 * allocate some virtual space
632 */
633
634 vaprot = (flags & UVM_KMF_EXEC) ? UVM_PROT_ALL : UVM_PROT_RW;
635 if (__predict_false(uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET,
636 align, UVM_MAPFLAG(vaprot, UVM_PROT_ALL, UVM_INH_NONE,
637 UVM_ADV_RANDOM,
638 (flags & (UVM_KMF_TRYLOCK | UVM_KMF_NOWAIT | UVM_KMF_WAITVA
639 | UVM_KMF_COLORMATCH)))) != 0)) {
640 UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0);
641 return(0);
642 }
643
644 /*
645 * if all we wanted was VA, return now
646 */
647
648 if (flags & (UVM_KMF_VAONLY | UVM_KMF_PAGEABLE)) {
649 UVMHIST_LOG(maphist,"<- done valloc (kva=%#jx)", kva,0,0,0);
650 return(kva);
651 }
652
653 /*
654 * recover object offset from virtual address
655 */
656
657 offset = kva - vm_map_min(kernel_map);
658 UVMHIST_LOG(maphist, " kva=%#jx, offset=%#jx", kva, offset,0,0);
659
660 /*
661 * now allocate and map in the memory... note that we are the only ones
662 * whom should ever get a handle on this area of VM.
663 */
664
665 loopva = kva;
666 loopsize = size;
667
668 pgaflags = UVM_FLAG_COLORMATCH;
669 if (flags & UVM_KMF_NOWAIT)
670 pgaflags |= UVM_PGA_USERESERVE;
671 if (flags & UVM_KMF_ZERO)
672 pgaflags |= UVM_PGA_ZERO;
673 prot = VM_PROT_READ | VM_PROT_WRITE;
674 if (flags & UVM_KMF_EXEC)
675 prot |= VM_PROT_EXECUTE;
676 while (loopsize) {
677 KASSERTMSG(!pmap_extract(pmap_kernel(), loopva, NULL),
678 "loopva=%#"PRIxVADDR, loopva);
679
680 pg = uvm_pagealloc_strat(NULL, offset, NULL, pgaflags,
681 #ifdef UVM_KM_VMFREELIST
682 UVM_PGA_STRAT_ONLY, UVM_KM_VMFREELIST
683 #else
684 UVM_PGA_STRAT_NORMAL, 0
685 #endif
686 );
687
688 /*
689 * out of memory?
690 */
691
692 if (__predict_false(pg == NULL)) {
693 if ((flags & UVM_KMF_NOWAIT) ||
694 ((flags & UVM_KMF_CANFAIL) && !uvm_reclaimable())) {
695 /* free everything! */
696 uvm_km_free(map, kva, size,
697 flags & UVM_KMF_TYPEMASK);
698 return (0);
699 } else {
700 uvm_wait("km_getwait2"); /* sleep here */
701 continue;
702 }
703 }
704
705 pg->flags &= ~PG_BUSY; /* new page */
706 UVM_PAGE_OWN(pg, NULL);
707
708 /*
709 * map it in
710 */
711
712 pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg),
713 prot, PMAP_KMPAGE);
714 loopva += PAGE_SIZE;
715 offset += PAGE_SIZE;
716 loopsize -= PAGE_SIZE;
717 }
718
719 pmap_update(pmap_kernel());
720
721 if ((flags & UVM_KMF_ZERO) == 0) {
722 kmsan_orig((void *)kva, size, KMSAN_TYPE_UVM, __RET_ADDR);
723 kmsan_mark((void *)kva, size, KMSAN_STATE_UNINIT);
724 }
725
726 UVMHIST_LOG(maphist,"<- done (kva=%#jx)", kva,0,0,0);
727 return(kva);
728 }
729
730 /*
731 * uvm_km_protect: change the protection of an allocated area
732 */
733
734 int
735 uvm_km_protect(struct vm_map *map, vaddr_t addr, vsize_t size, vm_prot_t prot)
736 {
737 return uvm_map_protect(map, addr, addr + round_page(size), prot, false);
738 }
739
740 /*
741 * uvm_km_free: free an area of kernel memory
742 */
743
744 void
745 uvm_km_free(struct vm_map *map, vaddr_t addr, vsize_t size, uvm_flag_t flags)
746 {
747 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
748
749 KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED ||
750 (flags & UVM_KMF_TYPEMASK) == UVM_KMF_PAGEABLE ||
751 (flags & UVM_KMF_TYPEMASK) == UVM_KMF_VAONLY);
752 KASSERT((addr & PAGE_MASK) == 0);
753 KASSERT(vm_map_pmap(map) == pmap_kernel());
754
755 size = round_page(size);
756
757 if (flags & UVM_KMF_PAGEABLE) {
758 uvm_km_pgremove(addr, addr + size);
759 } else if (flags & UVM_KMF_WIRED) {
760 /*
761 * Note: uvm_km_pgremove_intrsafe() extracts mapping, thus
762 * remove it after. See comment below about KVA visibility.
763 */
764 uvm_km_pgremove_intrsafe(map, addr, addr + size);
765 }
766
767 /*
768 * Note: uvm_unmap_remove() calls pmap_update() for us, before
769 * KVA becomes globally available.
770 */
771
772 uvm_unmap1(map, addr, addr + size, UVM_FLAG_VAONLY);
773 }
774
775 /* Sanity; must specify both or none. */
776 #if (defined(PMAP_MAP_POOLPAGE) || defined(PMAP_UNMAP_POOLPAGE)) && \
777 (!defined(PMAP_MAP_POOLPAGE) || !defined(PMAP_UNMAP_POOLPAGE))
778 #error Must specify MAP and UNMAP together.
779 #endif
780
781 #if defined(PMAP_ALLOC_POOLPAGE) && \
782 !defined(PMAP_MAP_POOLPAGE) && !defined(PMAP_UNMAP_POOLPAGE)
783 #error Must specify ALLOC with MAP and UNMAP
784 #endif
785
786 int
787 uvm_km_kmem_alloc(vmem_t *vm, vmem_size_t size, vm_flag_t flags,
788 vmem_addr_t *addr)
789 {
790 struct vm_page *pg;
791 vmem_addr_t va;
792 int rc;
793 vaddr_t loopva;
794 vsize_t loopsize;
795
796 size = round_page(size);
797
798 #if defined(PMAP_MAP_POOLPAGE)
799 if (size == PAGE_SIZE) {
800 again:
801 #ifdef PMAP_ALLOC_POOLPAGE
802 pg = PMAP_ALLOC_POOLPAGE((flags & VM_SLEEP) ?
803 0 : UVM_PGA_USERESERVE);
804 #else
805 pg = uvm_pagealloc(NULL, 0, NULL,
806 (flags & VM_SLEEP) ? 0 : UVM_PGA_USERESERVE);
807 #endif /* PMAP_ALLOC_POOLPAGE */
808 if (__predict_false(pg == NULL)) {
809 if (flags & VM_SLEEP) {
810 uvm_wait("plpg");
811 goto again;
812 }
813 return ENOMEM;
814 }
815 va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg));
816 KASSERT(va != 0);
817 *addr = va;
818 return 0;
819 }
820 #endif /* PMAP_MAP_POOLPAGE */
821
822 rc = vmem_alloc(vm, size, flags, &va);
823 if (rc != 0)
824 return rc;
825
826 #ifdef PMAP_GROWKERNEL
827 /*
828 * These VA allocations happen independently of uvm_map
829 * so this allocation must not extend beyond the current limit.
830 */
831 KASSERTMSG(uvm_maxkaddr >= va + size,
832 "%#"PRIxVADDR" %#"PRIxPTR" %#zx",
833 uvm_maxkaddr, va, size);
834 #endif
835
836 loopva = va;
837 loopsize = size;
838
839 while (loopsize) {
840 paddr_t pa __diagused;
841 KASSERTMSG(!pmap_extract(pmap_kernel(), loopva, &pa),
842 "loopva=%#"PRIxVADDR" loopsize=%#"PRIxVSIZE
843 " pa=%#"PRIxPADDR" vmem=%p",
844 loopva, loopsize, pa, vm);
845
846 pg = uvm_pagealloc(NULL, loopva, NULL,
847 UVM_FLAG_COLORMATCH
848 | ((flags & VM_SLEEP) ? 0 : UVM_PGA_USERESERVE));
849 if (__predict_false(pg == NULL)) {
850 if (flags & VM_SLEEP) {
851 uvm_wait("plpg");
852 continue;
853 } else {
854 uvm_km_pgremove_intrsafe(kernel_map, va,
855 va + size);
856 vmem_free(vm, va, size);
857 return ENOMEM;
858 }
859 }
860
861 pg->flags &= ~PG_BUSY; /* new page */
862 UVM_PAGE_OWN(pg, NULL);
863 pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg),
864 VM_PROT_READ|VM_PROT_WRITE, PMAP_KMPAGE);
865
866 loopva += PAGE_SIZE;
867 loopsize -= PAGE_SIZE;
868 }
869 pmap_update(pmap_kernel());
870
871 *addr = va;
872
873 return 0;
874 }
875
876 void
877 uvm_km_kmem_free(vmem_t *vm, vmem_addr_t addr, size_t size)
878 {
879
880 size = round_page(size);
881 #if defined(PMAP_UNMAP_POOLPAGE)
882 if (size == PAGE_SIZE) {
883 paddr_t pa;
884
885 pa = PMAP_UNMAP_POOLPAGE(addr);
886 uvm_pagefree(PHYS_TO_VM_PAGE(pa));
887 return;
888 }
889 #endif /* PMAP_UNMAP_POOLPAGE */
890 uvm_km_pgremove_intrsafe(kernel_map, addr, addr + size);
891 pmap_update(pmap_kernel());
892
893 vmem_free(vm, addr, size);
894 }
895
896 bool
897 uvm_km_va_starved_p(void)
898 {
899 vmem_size_t total;
900 vmem_size_t free;
901
902 if (kmem_arena == NULL)
903 return false;
904
905 total = vmem_size(kmem_arena, VMEM_ALLOC|VMEM_FREE);
906 free = vmem_size(kmem_arena, VMEM_FREE);
907
908 return (free < (total / 10));
909 }
Cache object: 01a47146f241b63572a67f888af12397
|