FreeBSD/Linux Kernel Cross Reference
sys/uvm/uvm_km.c
1 /* $NetBSD: uvm_km.c,v 1.92 2006/11/01 10:18:27 yamt Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California.
6 *
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by Charles D. Cranor,
23 * Washington University, the University of California, Berkeley and
24 * its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * @(#)vm_kern.c 8.3 (Berkeley) 1/12/94
42 * from: Id: uvm_km.c,v 1.1.2.14 1998/02/06 05:19:27 chs Exp
43 *
44 *
45 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46 * All rights reserved.
47 *
48 * Permission to use, copy, modify and distribute this software and
49 * its documentation is hereby granted, provided that both the copyright
50 * notice and this permission notice appear in all copies of the
51 * software, derivative works or modified versions, and any portions
52 * thereof, and that both notices appear in supporting documentation.
53 *
54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57 *
58 * Carnegie Mellon requests users of this software to return to
59 *
60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
61 * School of Computer Science
62 * Carnegie Mellon University
63 * Pittsburgh PA 15213-3890
64 *
65 * any improvements or extensions that they make and grant Carnegie the
66 * rights to redistribute these changes.
67 */
68
69 /*
70 * uvm_km.c: handle kernel memory allocation and management
71 */
72
73 /*
74 * overview of kernel memory management:
75 *
76 * the kernel virtual address space is mapped by "kernel_map." kernel_map
77 * starts at VM_MIN_KERNEL_ADDRESS and goes to VM_MAX_KERNEL_ADDRESS.
78 * note that VM_MIN_KERNEL_ADDRESS is equal to vm_map_min(kernel_map).
79 *
80 * the kernel_map has several "submaps." submaps can only appear in
81 * the kernel_map (user processes can't use them). submaps "take over"
82 * the management of a sub-range of the kernel's address space. submaps
83 * are typically allocated at boot time and are never released. kernel
84 * virtual address space that is mapped by a submap is locked by the
85 * submap's lock -- not the kernel_map's lock.
86 *
87 * thus, the useful feature of submaps is that they allow us to break
88 * up the locking and protection of the kernel address space into smaller
89 * chunks.
90 *
91 * the vm system has several standard kernel submaps, including:
92 * kmem_map => contains only wired kernel memory for the kernel
93 * malloc. *** access to kmem_map must be protected
94 * by splvm() because we are allowed to call malloc()
95 * at interrupt time ***
96 * mb_map => memory for large mbufs, *** protected by splvm ***
97 * pager_map => used to map "buf" structures into kernel space
98 * exec_map => used during exec to handle exec args
99 * etc...
100 *
101 * the kernel allocates its private memory out of special uvm_objects whose
102 * reference count is set to UVM_OBJ_KERN (thus indicating that the objects
103 * are "special" and never die). all kernel objects should be thought of
104 * as large, fixed-sized, sparsely populated uvm_objects. each kernel
105 * object is equal to the size of kernel virtual address space (i.e. the
106 * value "VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS").
107 *
108 * note that just because a kernel object spans the entire kernel virutal
109 * address space doesn't mean that it has to be mapped into the entire space.
110 * large chunks of a kernel object's space go unused either because
111 * that area of kernel VM is unmapped, or there is some other type of
112 * object mapped into that range (e.g. a vnode). for submap's kernel
113 * objects, the only part of the object that can ever be populated is the
114 * offsets that are managed by the submap.
115 *
116 * note that the "offset" in a kernel object is always the kernel virtual
117 * address minus the VM_MIN_KERNEL_ADDRESS (aka vm_map_min(kernel_map)).
118 * example:
119 * suppose VM_MIN_KERNEL_ADDRESS is 0xf8000000 and the kernel does a
120 * uvm_km_alloc(kernel_map, PAGE_SIZE) [allocate 1 wired down page in the
121 * kernel map]. if uvm_km_alloc returns virtual address 0xf8235000,
122 * then that means that the page at offset 0x235000 in kernel_object is
123 * mapped at 0xf8235000.
124 *
125 * kernel object have one other special property: when the kernel virtual
126 * memory mapping them is unmapped, the backing memory in the object is
127 * freed right away. this is done with the uvm_km_pgremove() function.
128 * this has to be done because there is no backing store for kernel pages
129 * and no need to save them after they are no longer referenced.
130 */
131
132 #include <sys/cdefs.h>
133 __KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.92 2006/11/01 10:18:27 yamt Exp $");
134
135 #include "opt_uvmhist.h"
136
137 #include <sys/param.h>
138 #include <sys/malloc.h>
139 #include <sys/systm.h>
140 #include <sys/proc.h>
141 #include <sys/pool.h>
142
143 #include <uvm/uvm.h>
144
145 /*
146 * global data structures
147 */
148
149 struct vm_map *kernel_map = NULL;
150
151 /*
152 * local data structues
153 */
154
155 static struct vm_map_kernel kernel_map_store;
156 static struct vm_map_entry kernel_first_mapent_store;
157
158 #if !defined(PMAP_MAP_POOLPAGE)
159
160 /*
161 * kva cache
162 *
163 * XXX maybe it's better to do this at the uvm_map layer.
164 */
165
166 #define KM_VACACHE_SIZE (32 * PAGE_SIZE) /* XXX tune */
167
168 static void *km_vacache_alloc(struct pool *, int);
169 static void km_vacache_free(struct pool *, void *);
170 static void km_vacache_init(struct vm_map *, const char *, size_t);
171
172 /* XXX */
173 #define KM_VACACHE_POOL_TO_MAP(pp) \
174 ((struct vm_map *)((char *)(pp) - \
175 offsetof(struct vm_map_kernel, vmk_vacache)))
176
177 static void *
178 km_vacache_alloc(struct pool *pp, int flags)
179 {
180 vaddr_t va;
181 size_t size;
182 struct vm_map *map;
183 size = pp->pr_alloc->pa_pagesz;
184
185 map = KM_VACACHE_POOL_TO_MAP(pp);
186
187 va = vm_map_min(map); /* hint */
188 if (uvm_map(map, &va, size, NULL, UVM_UNKNOWN_OFFSET, size,
189 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
190 UVM_ADV_RANDOM, UVM_FLAG_QUANTUM |
191 ((flags & PR_WAITOK) ? UVM_FLAG_WAITVA :
192 UVM_FLAG_TRYLOCK | UVM_FLAG_NOWAIT))))
193 return NULL;
194
195 return (void *)va;
196 }
197
198 static void
199 km_vacache_free(struct pool *pp, void *v)
200 {
201 vaddr_t va = (vaddr_t)v;
202 size_t size = pp->pr_alloc->pa_pagesz;
203 struct vm_map *map;
204
205 map = KM_VACACHE_POOL_TO_MAP(pp);
206 uvm_unmap1(map, va, va + size, UVM_FLAG_QUANTUM|UVM_FLAG_VAONLY);
207 }
208
209 /*
210 * km_vacache_init: initialize kva cache.
211 */
212
213 static void
214 km_vacache_init(struct vm_map *map, const char *name, size_t size)
215 {
216 struct vm_map_kernel *vmk;
217 struct pool *pp;
218 struct pool_allocator *pa;
219
220 KASSERT(VM_MAP_IS_KERNEL(map));
221 KASSERT(size < (vm_map_max(map) - vm_map_min(map)) / 2); /* sanity */
222
223 vmk = vm_map_to_kernel(map);
224 pp = &vmk->vmk_vacache;
225 pa = &vmk->vmk_vacache_allocator;
226 memset(pa, 0, sizeof(*pa));
227 pa->pa_alloc = km_vacache_alloc;
228 pa->pa_free = km_vacache_free;
229 pa->pa_pagesz = (unsigned int)size;
230 pa->pa_backingmap = map;
231 pa->pa_backingmapptr = NULL;
232 pool_init(pp, PAGE_SIZE, 0, 0, PR_NOTOUCH | PR_RECURSIVE, name, pa);
233 }
234
235 void
236 uvm_km_vacache_init(struct vm_map *map, const char *name, size_t size)
237 {
238
239 map->flags |= VM_MAP_VACACHE;
240 if (size == 0)
241 size = KM_VACACHE_SIZE;
242 km_vacache_init(map, name, size);
243 }
244
245 #else /* !defined(PMAP_MAP_POOLPAGE) */
246
247 void
248 uvm_km_vacache_init(struct vm_map *map, const char *name, size_t size)
249 {
250
251 /* nothing */
252 }
253
254 #endif /* !defined(PMAP_MAP_POOLPAGE) */
255
256 void
257 uvm_km_va_drain(struct vm_map *map, uvm_flag_t flags)
258 {
259 struct vm_map_kernel *vmk = vm_map_to_kernel(map);
260 const boolean_t intrsafe = (map->flags & VM_MAP_INTRSAFE) != 0;
261 int s = 0xdeadbeaf; /* XXX: gcc */
262
263 if (intrsafe) {
264 s = splvm();
265 }
266 callback_run_roundrobin(&vmk->vmk_reclaim_callback, NULL);
267 if (intrsafe) {
268 splx(s);
269 }
270 }
271
272 /*
273 * uvm_km_init: init kernel maps and objects to reflect reality (i.e.
274 * KVM already allocated for text, data, bss, and static data structures).
275 *
276 * => KVM is defined by VM_MIN_KERNEL_ADDRESS/VM_MAX_KERNEL_ADDRESS.
277 * we assume that [vmin -> start] has already been allocated and that
278 * "end" is the end.
279 */
280
281 void
282 uvm_km_init(vaddr_t start, vaddr_t end)
283 {
284 vaddr_t base = VM_MIN_KERNEL_ADDRESS;
285
286 /*
287 * next, init kernel memory objects.
288 */
289
290 /* kernel_object: for pageable anonymous kernel memory */
291 uao_init();
292 uvm.kernel_object = uao_create(VM_MAX_KERNEL_ADDRESS -
293 VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNOBJ);
294
295 /*
296 * init the map and reserve any space that might already
297 * have been allocated kernel space before installing.
298 */
299
300 uvm_map_setup_kernel(&kernel_map_store, base, end, VM_MAP_PAGEABLE);
301 kernel_map_store.vmk_map.pmap = pmap_kernel();
302 if (start != base) {
303 int error;
304 struct uvm_map_args args;
305
306 error = uvm_map_prepare(&kernel_map_store.vmk_map,
307 base, start - base,
308 NULL, UVM_UNKNOWN_OFFSET, 0,
309 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
310 UVM_ADV_RANDOM, UVM_FLAG_FIXED), &args);
311 if (!error) {
312 kernel_first_mapent_store.flags =
313 UVM_MAP_KERNEL | UVM_MAP_FIRST;
314 error = uvm_map_enter(&kernel_map_store.vmk_map, &args,
315 &kernel_first_mapent_store);
316 }
317
318 if (error)
319 panic(
320 "uvm_km_init: could not reserve space for kernel");
321 }
322
323 /*
324 * install!
325 */
326
327 kernel_map = &kernel_map_store.vmk_map;
328 uvm_km_vacache_init(kernel_map, "kvakernel", 0);
329 }
330
331 /*
332 * uvm_km_suballoc: allocate a submap in the kernel map. once a submap
333 * is allocated all references to that area of VM must go through it. this
334 * allows the locking of VAs in kernel_map to be broken up into regions.
335 *
336 * => if `fixed' is true, *vmin specifies where the region described
337 * by the submap must start
338 * => if submap is non NULL we use that as the submap, otherwise we
339 * alloc a new map
340 */
341
342 struct vm_map *
343 uvm_km_suballoc(struct vm_map *map, vaddr_t *vmin /* IN/OUT */,
344 vaddr_t *vmax /* OUT */, vsize_t size, int flags, boolean_t fixed,
345 struct vm_map_kernel *submap)
346 {
347 int mapflags = UVM_FLAG_NOMERGE | (fixed ? UVM_FLAG_FIXED : 0);
348
349 KASSERT(vm_map_pmap(map) == pmap_kernel());
350
351 size = round_page(size); /* round up to pagesize */
352 size += uvm_mapent_overhead(size, flags);
353
354 /*
355 * first allocate a blank spot in the parent map
356 */
357
358 if (uvm_map(map, vmin, size, NULL, UVM_UNKNOWN_OFFSET, 0,
359 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
360 UVM_ADV_RANDOM, mapflags)) != 0) {
361 panic("uvm_km_suballoc: unable to allocate space in parent map");
362 }
363
364 /*
365 * set VM bounds (vmin is filled in by uvm_map)
366 */
367
368 *vmax = *vmin + size;
369
370 /*
371 * add references to pmap and create or init the submap
372 */
373
374 pmap_reference(vm_map_pmap(map));
375 if (submap == NULL) {
376 submap = malloc(sizeof(*submap), M_VMMAP, M_WAITOK);
377 if (submap == NULL)
378 panic("uvm_km_suballoc: unable to create submap");
379 }
380 uvm_map_setup_kernel(submap, *vmin, *vmax, flags);
381 submap->vmk_map.pmap = vm_map_pmap(map);
382
383 /*
384 * now let uvm_map_submap plug in it...
385 */
386
387 if (uvm_map_submap(map, *vmin, *vmax, &submap->vmk_map) != 0)
388 panic("uvm_km_suballoc: submap allocation failed");
389
390 return(&submap->vmk_map);
391 }
392
393 /*
394 * uvm_km_pgremove: remove pages from a kernel uvm_object.
395 *
396 * => when you unmap a part of anonymous kernel memory you want to toss
397 * the pages right away. (this gets called from uvm_unmap_...).
398 */
399
400 void
401 uvm_km_pgremove(vaddr_t startva, vaddr_t endva)
402 {
403 struct uvm_object * const uobj = uvm.kernel_object;
404 const voff_t start = startva - vm_map_min(kernel_map);
405 const voff_t end = endva - vm_map_min(kernel_map);
406 struct vm_page *pg;
407 voff_t curoff, nextoff;
408 int swpgonlydelta = 0;
409 UVMHIST_FUNC("uvm_km_pgremove"); UVMHIST_CALLED(maphist);
410
411 KASSERT(VM_MIN_KERNEL_ADDRESS <= startva);
412 KASSERT(startva < endva);
413 KASSERT(endva <= VM_MAX_KERNEL_ADDRESS);
414
415 simple_lock(&uobj->vmobjlock);
416
417 for (curoff = start; curoff < end; curoff = nextoff) {
418 nextoff = curoff + PAGE_SIZE;
419 pg = uvm_pagelookup(uobj, curoff);
420 if (pg != NULL && pg->flags & PG_BUSY) {
421 pg->flags |= PG_WANTED;
422 UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0,
423 "km_pgrm", 0);
424 simple_lock(&uobj->vmobjlock);
425 nextoff = curoff;
426 continue;
427 }
428
429 /*
430 * free the swap slot, then the page.
431 */
432
433 if (pg == NULL &&
434 uao_find_swslot(uobj, curoff >> PAGE_SHIFT) > 0) {
435 swpgonlydelta++;
436 }
437 uao_dropswap(uobj, curoff >> PAGE_SHIFT);
438 if (pg != NULL) {
439 uvm_lock_pageq();
440 uvm_pagefree(pg);
441 uvm_unlock_pageq();
442 }
443 }
444 simple_unlock(&uobj->vmobjlock);
445
446 if (swpgonlydelta > 0) {
447 simple_lock(&uvm.swap_data_lock);
448 KASSERT(uvmexp.swpgonly >= swpgonlydelta);
449 uvmexp.swpgonly -= swpgonlydelta;
450 simple_unlock(&uvm.swap_data_lock);
451 }
452 }
453
454
455 /*
456 * uvm_km_pgremove_intrsafe: like uvm_km_pgremove(), but for non object backed
457 * regions.
458 *
459 * => when you unmap a part of anonymous kernel memory you want to toss
460 * the pages right away. (this is called from uvm_unmap_...).
461 * => none of the pages will ever be busy, and none of them will ever
462 * be on the active or inactive queues (because they have no object).
463 */
464
465 void
466 uvm_km_pgremove_intrsafe(vaddr_t start, vaddr_t end)
467 {
468 struct vm_page *pg;
469 paddr_t pa;
470 UVMHIST_FUNC("uvm_km_pgremove_intrsafe"); UVMHIST_CALLED(maphist);
471
472 KASSERT(VM_MIN_KERNEL_ADDRESS <= start);
473 KASSERT(start < end);
474 KASSERT(end <= VM_MAX_KERNEL_ADDRESS);
475
476 for (; start < end; start += PAGE_SIZE) {
477 if (!pmap_extract(pmap_kernel(), start, &pa)) {
478 continue;
479 }
480 pg = PHYS_TO_VM_PAGE(pa);
481 KASSERT(pg);
482 KASSERT(pg->uobject == NULL && pg->uanon == NULL);
483 uvm_pagefree(pg);
484 }
485 }
486
487 #if defined(DEBUG)
488 void
489 uvm_km_check_empty(vaddr_t start, vaddr_t end, boolean_t intrsafe)
490 {
491 vaddr_t va;
492 paddr_t pa;
493
494 KDASSERT(VM_MIN_KERNEL_ADDRESS <= start);
495 KDASSERT(start < end);
496 KDASSERT(end <= VM_MAX_KERNEL_ADDRESS);
497
498 for (va = start; va < end; va += PAGE_SIZE) {
499 if (pmap_extract(pmap_kernel(), va, &pa)) {
500 panic("uvm_km_check_empty: va %p has pa 0x%llx",
501 (void *)va, (long long)pa);
502 }
503 if (!intrsafe) {
504 const struct vm_page *pg;
505
506 simple_lock(&uvm.kernel_object->vmobjlock);
507 pg = uvm_pagelookup(uvm.kernel_object,
508 va - vm_map_min(kernel_map));
509 simple_unlock(&uvm.kernel_object->vmobjlock);
510 if (pg) {
511 panic("uvm_km_check_empty: "
512 "has page hashed at %p", (const void *)va);
513 }
514 }
515 }
516 }
517 #endif /* defined(DEBUG) */
518
519 /*
520 * uvm_km_alloc: allocate an area of kernel memory.
521 *
522 * => NOTE: we can return 0 even if we can wait if there is not enough
523 * free VM space in the map... caller should be prepared to handle
524 * this case.
525 * => we return KVA of memory allocated
526 */
527
528 vaddr_t
529 uvm_km_alloc(struct vm_map *map, vsize_t size, vsize_t align, uvm_flag_t flags)
530 {
531 vaddr_t kva, loopva;
532 vaddr_t offset;
533 vsize_t loopsize;
534 struct vm_page *pg;
535 struct uvm_object *obj;
536 int pgaflags;
537 vm_prot_t prot;
538 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
539
540 KASSERT(vm_map_pmap(map) == pmap_kernel());
541 KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED ||
542 (flags & UVM_KMF_TYPEMASK) == UVM_KMF_PAGEABLE ||
543 (flags & UVM_KMF_TYPEMASK) == UVM_KMF_VAONLY);
544
545 /*
546 * setup for call
547 */
548
549 kva = vm_map_min(map); /* hint */
550 size = round_page(size);
551 obj = (flags & UVM_KMF_PAGEABLE) ? uvm.kernel_object : NULL;
552 UVMHIST_LOG(maphist," (map=0x%x, obj=0x%x, size=0x%x, flags=%d)",
553 map, obj, size, flags);
554
555 /*
556 * allocate some virtual space
557 */
558
559 if (__predict_false(uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET,
560 align, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
561 UVM_ADV_RANDOM,
562 (flags & (UVM_KMF_TRYLOCK | UVM_KMF_NOWAIT | UVM_KMF_WAITVA))
563 | UVM_FLAG_QUANTUM)) != 0)) {
564 UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0);
565 return(0);
566 }
567
568 /*
569 * if all we wanted was VA, return now
570 */
571
572 if (flags & (UVM_KMF_VAONLY | UVM_KMF_PAGEABLE)) {
573 UVMHIST_LOG(maphist,"<- done valloc (kva=0x%x)", kva,0,0,0);
574 return(kva);
575 }
576
577 /*
578 * recover object offset from virtual address
579 */
580
581 offset = kva - vm_map_min(kernel_map);
582 UVMHIST_LOG(maphist, " kva=0x%x, offset=0x%x", kva, offset,0,0);
583
584 /*
585 * now allocate and map in the memory... note that we are the only ones
586 * whom should ever get a handle on this area of VM.
587 */
588
589 loopva = kva;
590 loopsize = size;
591
592 pgaflags = UVM_PGA_USERESERVE;
593 if (flags & UVM_KMF_ZERO)
594 pgaflags |= UVM_PGA_ZERO;
595 prot = VM_PROT_READ | VM_PROT_WRITE;
596 if (flags & UVM_KMF_EXEC)
597 prot |= VM_PROT_EXECUTE;
598 while (loopsize) {
599 KASSERT(!pmap_extract(pmap_kernel(), loopva, NULL));
600
601 pg = uvm_pagealloc(NULL, offset, NULL, pgaflags);
602
603 /*
604 * out of memory?
605 */
606
607 if (__predict_false(pg == NULL)) {
608 if ((flags & UVM_KMF_NOWAIT) ||
609 ((flags & UVM_KMF_CANFAIL) && !uvm_reclaimable())) {
610 /* free everything! */
611 uvm_km_free(map, kva, size,
612 flags & UVM_KMF_TYPEMASK);
613 return (0);
614 } else {
615 uvm_wait("km_getwait2"); /* sleep here */
616 continue;
617 }
618 }
619
620 pg->flags &= ~PG_BUSY; /* new page */
621 UVM_PAGE_OWN(pg, NULL);
622
623 /*
624 * map it in
625 */
626
627 pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg), prot);
628 loopva += PAGE_SIZE;
629 offset += PAGE_SIZE;
630 loopsize -= PAGE_SIZE;
631 }
632
633 pmap_update(pmap_kernel());
634
635 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
636 return(kva);
637 }
638
639 /*
640 * uvm_km_free: free an area of kernel memory
641 */
642
643 void
644 uvm_km_free(struct vm_map *map, vaddr_t addr, vsize_t size, uvm_flag_t flags)
645 {
646
647 KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED ||
648 (flags & UVM_KMF_TYPEMASK) == UVM_KMF_PAGEABLE ||
649 (flags & UVM_KMF_TYPEMASK) == UVM_KMF_VAONLY);
650 KASSERT((addr & PAGE_MASK) == 0);
651 KASSERT(vm_map_pmap(map) == pmap_kernel());
652
653 size = round_page(size);
654
655 if (flags & UVM_KMF_PAGEABLE) {
656 uvm_km_pgremove(addr, addr + size);
657 pmap_remove(pmap_kernel(), addr, addr + size);
658 } else if (flags & UVM_KMF_WIRED) {
659 uvm_km_pgremove_intrsafe(addr, addr + size);
660 pmap_kremove(addr, size);
661 }
662
663 uvm_unmap1(map, addr, addr + size, UVM_FLAG_QUANTUM|UVM_FLAG_VAONLY);
664 }
665
666 /* Sanity; must specify both or none. */
667 #if (defined(PMAP_MAP_POOLPAGE) || defined(PMAP_UNMAP_POOLPAGE)) && \
668 (!defined(PMAP_MAP_POOLPAGE) || !defined(PMAP_UNMAP_POOLPAGE))
669 #error Must specify MAP and UNMAP together.
670 #endif
671
672 /*
673 * uvm_km_alloc_poolpage: allocate a page for the pool allocator
674 *
675 * => if the pmap specifies an alternate mapping method, we use it.
676 */
677
678 /* ARGSUSED */
679 vaddr_t
680 uvm_km_alloc_poolpage_cache(struct vm_map *map, boolean_t waitok)
681 {
682 #if defined(PMAP_MAP_POOLPAGE)
683 return uvm_km_alloc_poolpage(map, waitok);
684 #else
685 struct vm_page *pg;
686 struct pool *pp = &vm_map_to_kernel(map)->vmk_vacache;
687 vaddr_t va;
688 int s = 0xdeadbeaf; /* XXX: gcc */
689 const boolean_t intrsafe = (map->flags & VM_MAP_INTRSAFE) != 0;
690
691 if ((map->flags & VM_MAP_VACACHE) == 0)
692 return uvm_km_alloc_poolpage(map, waitok);
693
694 if (intrsafe)
695 s = splvm();
696 va = (vaddr_t)pool_get(pp, waitok ? PR_WAITOK : PR_NOWAIT);
697 if (intrsafe)
698 splx(s);
699 if (va == 0)
700 return 0;
701 KASSERT(!pmap_extract(pmap_kernel(), va, NULL));
702 again:
703 pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
704 if (__predict_false(pg == NULL)) {
705 if (waitok) {
706 uvm_wait("plpg");
707 goto again;
708 } else {
709 if (intrsafe)
710 s = splvm();
711 pool_put(pp, (void *)va);
712 if (intrsafe)
713 splx(s);
714 return 0;
715 }
716 }
717 pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), VM_PROT_READ|VM_PROT_WRITE);
718 pmap_update(pmap_kernel());
719
720 return va;
721 #endif /* PMAP_MAP_POOLPAGE */
722 }
723
724 vaddr_t
725 uvm_km_alloc_poolpage(struct vm_map *map, boolean_t waitok)
726 {
727 #if defined(PMAP_MAP_POOLPAGE)
728 struct vm_page *pg;
729 vaddr_t va;
730
731 again:
732 pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
733 if (__predict_false(pg == NULL)) {
734 if (waitok) {
735 uvm_wait("plpg");
736 goto again;
737 } else
738 return (0);
739 }
740 va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg));
741 if (__predict_false(va == 0))
742 uvm_pagefree(pg);
743 return (va);
744 #else
745 vaddr_t va;
746 int s = 0xdeadbeaf; /* XXX: gcc */
747 const boolean_t intrsafe = (map->flags & VM_MAP_INTRSAFE) != 0;
748
749 if (intrsafe)
750 s = splvm();
751 va = uvm_km_alloc(map, PAGE_SIZE, 0,
752 (waitok ? 0 : UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK) | UVM_KMF_WIRED);
753 if (intrsafe)
754 splx(s);
755 return (va);
756 #endif /* PMAP_MAP_POOLPAGE */
757 }
758
759 /*
760 * uvm_km_free_poolpage: free a previously allocated pool page
761 *
762 * => if the pmap specifies an alternate unmapping method, we use it.
763 */
764
765 /* ARGSUSED */
766 void
767 uvm_km_free_poolpage_cache(struct vm_map *map, vaddr_t addr)
768 {
769 #if defined(PMAP_UNMAP_POOLPAGE)
770 uvm_km_free_poolpage(map, addr);
771 #else
772 struct pool *pp;
773 int s = 0xdeadbeaf; /* XXX: gcc */
774 const boolean_t intrsafe = (map->flags & VM_MAP_INTRSAFE) != 0;
775
776 if ((map->flags & VM_MAP_VACACHE) == 0) {
777 uvm_km_free_poolpage(map, addr);
778 return;
779 }
780
781 KASSERT(pmap_extract(pmap_kernel(), addr, NULL));
782 uvm_km_pgremove_intrsafe(addr, addr + PAGE_SIZE);
783 pmap_kremove(addr, PAGE_SIZE);
784 #if defined(DEBUG)
785 pmap_update(pmap_kernel());
786 #endif
787 KASSERT(!pmap_extract(pmap_kernel(), addr, NULL));
788 pp = &vm_map_to_kernel(map)->vmk_vacache;
789 if (intrsafe)
790 s = splvm();
791 pool_put(pp, (void *)addr);
792 if (intrsafe)
793 splx(s);
794 #endif
795 }
796
797 /* ARGSUSED */
798 void
799 uvm_km_free_poolpage(struct vm_map *map, vaddr_t addr)
800 {
801 #if defined(PMAP_UNMAP_POOLPAGE)
802 paddr_t pa;
803
804 pa = PMAP_UNMAP_POOLPAGE(addr);
805 uvm_pagefree(PHYS_TO_VM_PAGE(pa));
806 #else
807 int s = 0xdeadbeaf; /* XXX: gcc */
808 const boolean_t intrsafe = (map->flags & VM_MAP_INTRSAFE) != 0;
809
810 if (intrsafe)
811 s = splvm();
812 uvm_km_free(map, addr, PAGE_SIZE, UVM_KMF_WIRED);
813 if (intrsafe)
814 splx(s);
815 #endif /* PMAP_UNMAP_POOLPAGE */
816 }
Cache object: a359aa2df768acf0eea281e8622291b1
|