FreeBSD/Linux Kernel Cross Reference
sys/uvm/uvm_km.c
1 /* $NetBSD: uvm_km.c,v 1.77.2.1 2005/12/06 20:00:12 riz Exp $ */
2
3 /*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California.
6 *
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by Charles D. Cranor,
23 * Washington University, the University of California, Berkeley and
24 * its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * @(#)vm_kern.c 8.3 (Berkeley) 1/12/94
42 * from: Id: uvm_km.c,v 1.1.2.14 1998/02/06 05:19:27 chs Exp
43 *
44 *
45 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46 * All rights reserved.
47 *
48 * Permission to use, copy, modify and distribute this software and
49 * its documentation is hereby granted, provided that both the copyright
50 * notice and this permission notice appear in all copies of the
51 * software, derivative works or modified versions, and any portions
52 * thereof, and that both notices appear in supporting documentation.
53 *
54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57 *
58 * Carnegie Mellon requests users of this software to return to
59 *
60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
61 * School of Computer Science
62 * Carnegie Mellon University
63 * Pittsburgh PA 15213-3890
64 *
65 * any improvements or extensions that they make and grant Carnegie the
66 * rights to redistribute these changes.
67 */
68
69 /*
70 * uvm_km.c: handle kernel memory allocation and management
71 */
72
73 /*
74 * overview of kernel memory management:
75 *
76 * the kernel virtual address space is mapped by "kernel_map." kernel_map
77 * starts at VM_MIN_KERNEL_ADDRESS and goes to VM_MAX_KERNEL_ADDRESS.
78 * note that VM_MIN_KERNEL_ADDRESS is equal to vm_map_min(kernel_map).
79 *
80 * the kernel_map has several "submaps." submaps can only appear in
81 * the kernel_map (user processes can't use them). submaps "take over"
82 * the management of a sub-range of the kernel's address space. submaps
83 * are typically allocated at boot time and are never released. kernel
84 * virtual address space that is mapped by a submap is locked by the
85 * submap's lock -- not the kernel_map's lock.
86 *
87 * thus, the useful feature of submaps is that they allow us to break
88 * up the locking and protection of the kernel address space into smaller
89 * chunks.
90 *
91 * the vm system has several standard kernel submaps, including:
92 * kmem_map => contains only wired kernel memory for the kernel
93 * malloc. *** access to kmem_map must be protected
94 * by splvm() because we are allowed to call malloc()
95 * at interrupt time ***
96 * mb_map => memory for large mbufs, *** protected by splvm ***
97 * pager_map => used to map "buf" structures into kernel space
98 * exec_map => used during exec to handle exec args
99 * etc...
100 *
101 * the kernel allocates its private memory out of special uvm_objects whose
102 * reference count is set to UVM_OBJ_KERN (thus indicating that the objects
103 * are "special" and never die). all kernel objects should be thought of
104 * as large, fixed-sized, sparsely populated uvm_objects. each kernel
105 * object is equal to the size of kernel virtual address space (i.e. the
106 * value "VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS").
107 *
108 * most kernel private memory lives in kernel_object. the only exception
109 * to this is for memory that belongs to submaps that must be protected
110 * by splvm(). pages in these submaps are not assigned to an object.
111 *
112 * note that just because a kernel object spans the entire kernel virutal
113 * address space doesn't mean that it has to be mapped into the entire space.
114 * large chunks of a kernel object's space go unused either because
115 * that area of kernel VM is unmapped, or there is some other type of
116 * object mapped into that range (e.g. a vnode). for submap's kernel
117 * objects, the only part of the object that can ever be populated is the
118 * offsets that are managed by the submap.
119 *
120 * note that the "offset" in a kernel object is always the kernel virtual
121 * address minus the VM_MIN_KERNEL_ADDRESS (aka vm_map_min(kernel_map)).
122 * example:
123 * suppose VM_MIN_KERNEL_ADDRESS is 0xf8000000 and the kernel does a
124 * uvm_km_alloc(kernel_map, PAGE_SIZE) [allocate 1 wired down page in the
125 * kernel map]. if uvm_km_alloc returns virtual address 0xf8235000,
126 * then that means that the page at offset 0x235000 in kernel_object is
127 * mapped at 0xf8235000.
128 *
129 * kernel object have one other special property: when the kernel virtual
130 * memory mapping them is unmapped, the backing memory in the object is
131 * freed right away. this is done with the uvm_km_pgremove() function.
132 * this has to be done because there is no backing store for kernel pages
133 * and no need to save them after they are no longer referenced.
134 */
135
136 #include <sys/cdefs.h>
137 __KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.77.2.1 2005/12/06 20:00:12 riz Exp $");
138
139 #include "opt_uvmhist.h"
140
141 #include <sys/param.h>
142 #include <sys/malloc.h>
143 #include <sys/systm.h>
144 #include <sys/proc.h>
145 #include <sys/pool.h>
146
147 #include <uvm/uvm.h>
148
149 /*
150 * global data structures
151 */
152
153 struct vm_map *kernel_map = NULL;
154
155 /*
156 * local data structues
157 */
158
159 static struct vm_map_kernel kernel_map_store;
160 static struct vm_map_entry kernel_first_mapent_store;
161
162 #if !defined(PMAP_MAP_POOLPAGE)
163
164 /*
165 * kva cache
166 *
167 * XXX maybe it's better to do this at the uvm_map layer.
168 */
169
170 #define KM_VACACHE_SIZE (32 * PAGE_SIZE) /* XXX tune */
171
172 static void *km_vacache_alloc(struct pool *, int);
173 static void km_vacache_free(struct pool *, void *);
174 static void km_vacache_init(struct vm_map *, const char *, size_t);
175
176 /* XXX */
177 #define KM_VACACHE_POOL_TO_MAP(pp) \
178 ((struct vm_map *)((char *)(pp) - \
179 offsetof(struct vm_map_kernel, vmk_vacache)))
180
181 static void *
182 km_vacache_alloc(struct pool *pp, int flags)
183 {
184 vaddr_t va;
185 size_t size;
186 struct vm_map *map;
187 #if defined(DEBUG)
188 vaddr_t loopva;
189 #endif
190 size = pp->pr_alloc->pa_pagesz;
191
192 map = KM_VACACHE_POOL_TO_MAP(pp);
193
194 va = vm_map_min(map); /* hint */
195 if (uvm_map(map, &va, size, NULL, UVM_UNKNOWN_OFFSET, size,
196 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
197 UVM_ADV_RANDOM, UVM_FLAG_QUANTUM |
198 ((flags & PR_WAITOK) ? 0 : UVM_FLAG_TRYLOCK | UVM_FLAG_NOWAIT))))
199 return NULL;
200
201 #if defined(DEBUG)
202 for (loopva = va; loopva < va + size; loopva += PAGE_SIZE) {
203 if (pmap_extract(pmap_kernel(), loopva, NULL))
204 panic("km_vacache_free: has mapping");
205 }
206 #endif
207
208 return (void *)va;
209 }
210
211 static void
212 km_vacache_free(struct pool *pp, void *v)
213 {
214 vaddr_t va = (vaddr_t)v;
215 size_t size = pp->pr_alloc->pa_pagesz;
216 struct vm_map *map;
217 #if defined(DEBUG)
218 vaddr_t loopva;
219
220 for (loopva = va; loopva < va + size; loopva += PAGE_SIZE) {
221 if (pmap_extract(pmap_kernel(), loopva, NULL))
222 panic("km_vacache_free: has mapping");
223 }
224 #endif
225 map = KM_VACACHE_POOL_TO_MAP(pp);
226 uvm_unmap1(map, va, va + size, UVM_FLAG_QUANTUM);
227 }
228
229 /*
230 * km_vacache_init: initialize kva cache.
231 */
232
233 static void
234 km_vacache_init(struct vm_map *map, const char *name, size_t size)
235 {
236 struct vm_map_kernel *vmk;
237 struct pool *pp;
238 struct pool_allocator *pa;
239
240 KASSERT(VM_MAP_IS_KERNEL(map));
241 KASSERT(size < (vm_map_max(map) - vm_map_min(map)) / 2); /* sanity */
242
243 vmk = vm_map_to_kernel(map);
244 pp = &vmk->vmk_vacache;
245 pa = &vmk->vmk_vacache_allocator;
246 memset(pa, 0, sizeof(*pa));
247 pa->pa_alloc = km_vacache_alloc;
248 pa->pa_free = km_vacache_free;
249 pa->pa_pagesz = (unsigned int)size;
250 pool_init(pp, PAGE_SIZE, 0, 0, PR_NOTOUCH | PR_RECURSIVE, name, pa);
251
252 /* XXX for now.. */
253 pool_sethiwat(pp, 0);
254 }
255
256 void
257 uvm_km_vacache_init(struct vm_map *map, const char *name, size_t size)
258 {
259
260 map->flags |= VM_MAP_VACACHE;
261 if (size == 0)
262 size = KM_VACACHE_SIZE;
263 km_vacache_init(map, name, size);
264 }
265
266 #else /* !defined(PMAP_MAP_POOLPAGE) */
267
268 void
269 uvm_km_vacache_init(struct vm_map *map, const char *name, size_t size)
270 {
271
272 /* nothing */
273 }
274
275 #endif /* !defined(PMAP_MAP_POOLPAGE) */
276
277 /*
278 * uvm_km_init: init kernel maps and objects to reflect reality (i.e.
279 * KVM already allocated for text, data, bss, and static data structures).
280 *
281 * => KVM is defined by VM_MIN_KERNEL_ADDRESS/VM_MAX_KERNEL_ADDRESS.
282 * we assume that [min -> start] has already been allocated and that
283 * "end" is the end.
284 */
285
286 void
287 uvm_km_init(start, end)
288 vaddr_t start, end;
289 {
290 vaddr_t base = VM_MIN_KERNEL_ADDRESS;
291
292 /*
293 * next, init kernel memory objects.
294 */
295
296 /* kernel_object: for pageable anonymous kernel memory */
297 uao_init();
298 uvm.kernel_object = uao_create(VM_MAX_KERNEL_ADDRESS -
299 VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNOBJ);
300
301 /*
302 * init the map and reserve any space that might already
303 * have been allocated kernel space before installing.
304 */
305
306 uvm_map_setup_kernel(&kernel_map_store, base, end, VM_MAP_PAGEABLE);
307 kernel_map_store.vmk_map.pmap = pmap_kernel();
308 if (start != base) {
309 int error;
310 struct uvm_map_args args;
311
312 error = uvm_map_prepare(&kernel_map_store.vmk_map,
313 base, start - base,
314 NULL, UVM_UNKNOWN_OFFSET, 0,
315 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
316 UVM_ADV_RANDOM, UVM_FLAG_FIXED), &args);
317 if (!error) {
318 kernel_first_mapent_store.flags =
319 UVM_MAP_KERNEL | UVM_MAP_FIRST;
320 error = uvm_map_enter(&kernel_map_store.vmk_map, &args,
321 &kernel_first_mapent_store);
322 }
323
324 if (error)
325 panic(
326 "uvm_km_init: could not reserve space for kernel");
327 }
328
329 /*
330 * install!
331 */
332
333 kernel_map = &kernel_map_store.vmk_map;
334 uvm_km_vacache_init(kernel_map, "kvakernel", 0);
335 }
336
337 /*
338 * uvm_km_suballoc: allocate a submap in the kernel map. once a submap
339 * is allocated all references to that area of VM must go through it. this
340 * allows the locking of VAs in kernel_map to be broken up into regions.
341 *
342 * => if `fixed' is true, *min specifies where the region described
343 * by the submap must start
344 * => if submap is non NULL we use that as the submap, otherwise we
345 * alloc a new map
346 */
347 struct vm_map *
348 uvm_km_suballoc(map, min, max, size, flags, fixed, submap)
349 struct vm_map *map;
350 vaddr_t *min, *max; /* IN/OUT, OUT */
351 vsize_t size;
352 int flags;
353 boolean_t fixed;
354 struct vm_map_kernel *submap;
355 {
356 int mapflags = UVM_FLAG_NOMERGE | (fixed ? UVM_FLAG_FIXED : 0);
357
358 KASSERT(vm_map_pmap(map) == pmap_kernel());
359
360 size = round_page(size); /* round up to pagesize */
361
362 /*
363 * first allocate a blank spot in the parent map
364 */
365
366 if (uvm_map(map, min, size, NULL, UVM_UNKNOWN_OFFSET, 0,
367 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
368 UVM_ADV_RANDOM, mapflags)) != 0) {
369 panic("uvm_km_suballoc: unable to allocate space in parent map");
370 }
371
372 /*
373 * set VM bounds (min is filled in by uvm_map)
374 */
375
376 *max = *min + size;
377
378 /*
379 * add references to pmap and create or init the submap
380 */
381
382 pmap_reference(vm_map_pmap(map));
383 if (submap == NULL) {
384 submap = malloc(sizeof(*submap), M_VMMAP, M_WAITOK);
385 if (submap == NULL)
386 panic("uvm_km_suballoc: unable to create submap");
387 }
388 uvm_map_setup_kernel(submap, *min, *max, flags);
389 submap->vmk_map.pmap = vm_map_pmap(map);
390
391 /*
392 * now let uvm_map_submap plug in it...
393 */
394
395 if (uvm_map_submap(map, *min, *max, &submap->vmk_map) != 0)
396 panic("uvm_km_suballoc: submap allocation failed");
397
398 return(&submap->vmk_map);
399 }
400
401 /*
402 * uvm_km_pgremove: remove pages from a kernel uvm_object.
403 *
404 * => when you unmap a part of anonymous kernel memory you want to toss
405 * the pages right away. (this gets called from uvm_unmap_...).
406 */
407
408 void
409 uvm_km_pgremove(uobj, start, end)
410 struct uvm_object *uobj;
411 vaddr_t start, end;
412 {
413 struct vm_page *pg;
414 voff_t curoff, nextoff;
415 int swpgonlydelta = 0;
416 UVMHIST_FUNC("uvm_km_pgremove"); UVMHIST_CALLED(maphist);
417
418 KASSERT(uobj->pgops == &aobj_pager);
419 simple_lock(&uobj->vmobjlock);
420
421 for (curoff = start; curoff < end; curoff = nextoff) {
422 nextoff = curoff + PAGE_SIZE;
423 pg = uvm_pagelookup(uobj, curoff);
424 if (pg != NULL && pg->flags & PG_BUSY) {
425 pg->flags |= PG_WANTED;
426 UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0,
427 "km_pgrm", 0);
428 simple_lock(&uobj->vmobjlock);
429 nextoff = curoff;
430 continue;
431 }
432
433 /*
434 * free the swap slot, then the page.
435 */
436
437 if (pg == NULL &&
438 uao_find_swslot(uobj, curoff >> PAGE_SHIFT) > 0) {
439 swpgonlydelta++;
440 }
441 uao_dropswap(uobj, curoff >> PAGE_SHIFT);
442 if (pg != NULL) {
443 uvm_lock_pageq();
444 uvm_pagefree(pg);
445 uvm_unlock_pageq();
446 }
447 }
448 simple_unlock(&uobj->vmobjlock);
449
450 if (swpgonlydelta > 0) {
451 simple_lock(&uvm.swap_data_lock);
452 KASSERT(uvmexp.swpgonly >= swpgonlydelta);
453 uvmexp.swpgonly -= swpgonlydelta;
454 simple_unlock(&uvm.swap_data_lock);
455 }
456 }
457
458
459 /*
460 * uvm_km_pgremove_intrsafe: like uvm_km_pgremove(), but for "intrsafe"
461 * maps
462 *
463 * => when you unmap a part of anonymous kernel memory you want to toss
464 * the pages right away. (this is called from uvm_unmap_...).
465 * => none of the pages will ever be busy, and none of them will ever
466 * be on the active or inactive queues (because they have no object).
467 */
468
469 void
470 uvm_km_pgremove_intrsafe(start, end)
471 vaddr_t start, end;
472 {
473 struct vm_page *pg;
474 paddr_t pa;
475 UVMHIST_FUNC("uvm_km_pgremove_intrsafe"); UVMHIST_CALLED(maphist);
476
477 for (; start < end; start += PAGE_SIZE) {
478 if (!pmap_extract(pmap_kernel(), start, &pa)) {
479 continue;
480 }
481 pg = PHYS_TO_VM_PAGE(pa);
482 KASSERT(pg);
483 KASSERT(pg->uobject == NULL && pg->uanon == NULL);
484 uvm_pagefree(pg);
485 }
486 }
487
488
489 /*
490 * uvm_km_kmemalloc: lower level kernel memory allocator for malloc()
491 *
492 * => we map wired memory into the specified map using the obj passed in
493 * => NOTE: we can return NULL even if we can wait if there is not enough
494 * free VM space in the map... caller should be prepared to handle
495 * this case.
496 * => we return KVA of memory allocated
497 * => align,prefer - passed on to uvm_map()
498 * => flags: NOWAIT, VALLOC - just allocate VA, TRYLOCK - fail if we can't
499 * lock the map
500 */
501
502 vaddr_t
503 uvm_km_kmemalloc1(map, obj, size, align, prefer, flags)
504 struct vm_map *map;
505 struct uvm_object *obj;
506 vsize_t size;
507 vsize_t align;
508 voff_t prefer;
509 int flags;
510 {
511 vaddr_t kva, loopva;
512 vaddr_t offset;
513 vsize_t loopsize;
514 struct vm_page *pg;
515 UVMHIST_FUNC("uvm_km_kmemalloc"); UVMHIST_CALLED(maphist);
516
517 UVMHIST_LOG(maphist," (map=0x%x, obj=0x%x, size=0x%x, flags=%d)",
518 map, obj, size, flags);
519 KASSERT(vm_map_pmap(map) == pmap_kernel());
520
521 /*
522 * setup for call
523 */
524
525 size = round_page(size);
526 kva = vm_map_min(map); /* hint */
527
528 /*
529 * allocate some virtual space
530 */
531
532 if (__predict_false(uvm_map(map, &kva, size, obj, prefer, align,
533 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
534 UVM_ADV_RANDOM,
535 (flags & (UVM_KMF_TRYLOCK | UVM_KMF_NOWAIT))
536 | UVM_FLAG_QUANTUM))
537 != 0)) {
538 UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0);
539 return(0);
540 }
541
542 /*
543 * if all we wanted was VA, return now
544 */
545
546 if (flags & UVM_KMF_VALLOC) {
547 UVMHIST_LOG(maphist,"<- done valloc (kva=0x%x)", kva,0,0,0);
548 return(kva);
549 }
550
551 /*
552 * recover object offset from virtual address
553 */
554
555 offset = kva - vm_map_min(kernel_map);
556 UVMHIST_LOG(maphist, " kva=0x%x, offset=0x%x", kva, offset,0,0);
557
558 /*
559 * now allocate and map in the memory... note that we are the only ones
560 * whom should ever get a handle on this area of VM.
561 */
562
563 loopva = kva;
564 loopsize = size;
565 while (loopsize) {
566 if (obj) {
567 simple_lock(&obj->vmobjlock);
568 }
569 pg = uvm_pagealloc(obj, offset, NULL, UVM_PGA_USERESERVE);
570 if (__predict_true(pg != NULL)) {
571 pg->flags &= ~PG_BUSY; /* new page */
572 UVM_PAGE_OWN(pg, NULL);
573 }
574 if (obj) {
575 simple_unlock(&obj->vmobjlock);
576 }
577
578 /*
579 * out of memory?
580 */
581
582 if (__predict_false(pg == NULL)) {
583 if ((flags & UVM_KMF_NOWAIT) ||
584 ((flags & UVM_KMF_CANFAIL) && uvm_swapisfull())) {
585 /* free everything! */
586 uvm_unmap1(map, kva, kva + size,
587 UVM_FLAG_QUANTUM);
588 return (0);
589 } else {
590 uvm_wait("km_getwait2"); /* sleep here */
591 continue;
592 }
593 }
594
595 /*
596 * map it in
597 */
598
599 if (obj == NULL) {
600 pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg),
601 VM_PROT_READ | VM_PROT_WRITE);
602 } else {
603 pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg),
604 UVM_PROT_ALL,
605 PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE);
606 }
607 loopva += PAGE_SIZE;
608 offset += PAGE_SIZE;
609 loopsize -= PAGE_SIZE;
610 }
611
612 pmap_update(pmap_kernel());
613
614 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
615 return(kva);
616 }
617
618 /*
619 * uvm_km_free: free an area of kernel memory
620 */
621
622 void
623 uvm_km_free(map, addr, size)
624 struct vm_map *map;
625 vaddr_t addr;
626 vsize_t size;
627 {
628 uvm_unmap1(map, trunc_page(addr), round_page(addr+size),
629 UVM_FLAG_QUANTUM);
630 }
631
632 /*
633 * uvm_km_alloc1: allocate wired down memory in the kernel map.
634 *
635 * => we can sleep if needed
636 */
637
638 vaddr_t
639 uvm_km_alloc1(map, size, zeroit)
640 struct vm_map *map;
641 vsize_t size;
642 boolean_t zeroit;
643 {
644 vaddr_t kva, loopva, offset;
645 struct vm_page *pg;
646 UVMHIST_FUNC("uvm_km_alloc1"); UVMHIST_CALLED(maphist);
647
648 UVMHIST_LOG(maphist,"(map=0x%x, size=0x%x)", map, size,0,0);
649 KASSERT(vm_map_pmap(map) == pmap_kernel());
650
651 size = round_page(size);
652 kva = vm_map_min(map); /* hint */
653
654 /*
655 * allocate some virtual space
656 */
657
658 if (__predict_false(uvm_map(map, &kva, size, uvm.kernel_object,
659 UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL,
660 UVM_INH_NONE, UVM_ADV_RANDOM,
661 UVM_FLAG_QUANTUM)) != 0)) {
662 UVMHIST_LOG(maphist,"<- done (no VM)",0,0,0,0);
663 return(0);
664 }
665
666 /*
667 * recover object offset from virtual address
668 */
669
670 offset = kva - vm_map_min(kernel_map);
671 UVMHIST_LOG(maphist," kva=0x%x, offset=0x%x", kva, offset,0,0);
672
673 /*
674 * now allocate the memory.
675 */
676
677 loopva = kva;
678 while (size) {
679 simple_lock(&uvm.kernel_object->vmobjlock);
680 KASSERT(uvm_pagelookup(uvm.kernel_object, offset) == NULL);
681 pg = uvm_pagealloc(uvm.kernel_object, offset, NULL, 0);
682 if (pg) {
683 pg->flags &= ~PG_BUSY;
684 UVM_PAGE_OWN(pg, NULL);
685 }
686 simple_unlock(&uvm.kernel_object->vmobjlock);
687 if (pg == NULL) {
688 uvm_wait("km_alloc1w");
689 continue;
690 }
691 pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg),
692 UVM_PROT_ALL, PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE);
693 loopva += PAGE_SIZE;
694 offset += PAGE_SIZE;
695 size -= PAGE_SIZE;
696 }
697 pmap_update(map->pmap);
698
699 /*
700 * zero on request (note that "size" is now zero due to the above loop
701 * so we need to subtract kva from loopva to reconstruct the size).
702 */
703
704 if (zeroit)
705 memset((caddr_t)kva, 0, loopva - kva);
706 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
707 return(kva);
708 }
709
710 /*
711 * uvm_km_valloc1: allocate zero-fill memory in the kernel's address space
712 *
713 * => memory is not allocated until fault time
714 * => the align, prefer and flags parameters are passed on to uvm_map().
715 *
716 * Note: this function is also the backend for these macros:
717 * uvm_km_valloc
718 * uvm_km_valloc_wait
719 * uvm_km_valloc_prefer
720 * uvm_km_valloc_prefer_wait
721 * uvm_km_valloc_align
722 */
723
724 vaddr_t
725 uvm_km_valloc1(map, size, align, prefer, flags)
726 struct vm_map *map;
727 vsize_t size;
728 vsize_t align;
729 voff_t prefer;
730 uvm_flag_t flags;
731 {
732 vaddr_t kva;
733 int error;
734 UVMHIST_FUNC("uvm_km_valloc1"); UVMHIST_CALLED(maphist);
735
736 UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x, align=0x%x, prefer=0x%x)",
737 map, size, align, prefer);
738
739 KASSERT(vm_map_pmap(map) == pmap_kernel());
740
741 size = round_page(size);
742 /*
743 * Check if requested size is larger than the map, in which
744 * case we can't succeed.
745 */
746 if (size > vm_map_max(map) - vm_map_min(map))
747 return (0);
748
749 flags |= UVM_FLAG_QUANTUM;
750 if ((flags & UVM_KMF_NOWAIT) == 0) /* XXX */
751 flags |= UVM_FLAG_WAITVA; /* XXX */
752
753 kva = vm_map_min(map); /* hint */
754
755 /*
756 * allocate some virtual space. will be demand filled
757 * by kernel_object.
758 */
759
760 error = uvm_map(map, &kva, size, uvm.kernel_object,
761 prefer, align, UVM_MAPFLAG(UVM_PROT_ALL,
762 UVM_PROT_ALL, UVM_INH_NONE, UVM_ADV_RANDOM, flags));
763
764 KASSERT(error == 0 || (flags & UVM_KMF_NOWAIT) != 0);
765
766 if (error) {
767 return 0;
768 }
769
770 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
771
772 return (kva);
773 }
774
775 /* Function definitions for binary compatibility */
776 vaddr_t
777 uvm_km_kmemalloc(struct vm_map *map, struct uvm_object *obj,
778 vsize_t sz, int flags)
779 {
780 return uvm_km_kmemalloc1(map, obj, sz, 0, UVM_UNKNOWN_OFFSET, flags);
781 }
782
783 vaddr_t uvm_km_valloc(struct vm_map *map, vsize_t sz)
784 {
785 return uvm_km_valloc1(map, sz, 0, UVM_UNKNOWN_OFFSET, UVM_KMF_NOWAIT);
786 }
787
788 vaddr_t uvm_km_valloc_align(struct vm_map *map, vsize_t sz, vsize_t align)
789 {
790 return uvm_km_valloc1(map, sz, align, UVM_UNKNOWN_OFFSET, UVM_KMF_NOWAIT);
791 }
792
793 vaddr_t uvm_km_valloc_prefer_wait(struct vm_map *map, vsize_t sz, voff_t prefer)
794 {
795 return uvm_km_valloc1(map, sz, 0, prefer, 0);
796 }
797
798 vaddr_t uvm_km_valloc_wait(struct vm_map *map, vsize_t sz)
799 {
800 return uvm_km_valloc1(map, sz, 0, UVM_UNKNOWN_OFFSET, 0);
801 }
802
803 /* Sanity; must specify both or none. */
804 #if (defined(PMAP_MAP_POOLPAGE) || defined(PMAP_UNMAP_POOLPAGE)) && \
805 (!defined(PMAP_MAP_POOLPAGE) || !defined(PMAP_UNMAP_POOLPAGE))
806 #error Must specify MAP and UNMAP together.
807 #endif
808
809 /*
810 * uvm_km_alloc_poolpage: allocate a page for the pool allocator
811 *
812 * => if the pmap specifies an alternate mapping method, we use it.
813 */
814
815 /* ARGSUSED */
816 vaddr_t
817 uvm_km_alloc_poolpage_cache(map, obj, waitok)
818 struct vm_map *map;
819 struct uvm_object *obj;
820 boolean_t waitok;
821 {
822 #if defined(PMAP_MAP_POOLPAGE)
823 return uvm_km_alloc_poolpage1(map, obj, waitok);
824 #else
825 struct vm_page *pg;
826 struct pool *pp = &vm_map_to_kernel(map)->vmk_vacache;
827 vaddr_t va;
828 int s = 0xdeadbeaf; /* XXX: gcc */
829 const boolean_t intrsafe = (map->flags & VM_MAP_INTRSAFE) != 0;
830
831 if ((map->flags & VM_MAP_VACACHE) == 0)
832 return uvm_km_alloc_poolpage1(map, obj, waitok);
833
834 if (intrsafe)
835 s = splvm();
836 va = (vaddr_t)pool_get(pp, waitok ? PR_WAITOK : PR_NOWAIT);
837 if (intrsafe)
838 splx(s);
839 if (va == 0)
840 return 0;
841 KASSERT(!pmap_extract(pmap_kernel(), va, NULL));
842 again:
843 pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
844 if (__predict_false(pg == NULL)) {
845 if (waitok) {
846 uvm_wait("plpg");
847 goto again;
848 } else {
849 if (intrsafe)
850 s = splvm();
851 pool_put(pp, (void *)va);
852 if (intrsafe)
853 splx(s);
854 return 0;
855 }
856 }
857 pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
858 VM_PROT_READ|VM_PROT_WRITE);
859 pmap_update(pmap_kernel());
860
861 return va;
862 #endif /* PMAP_MAP_POOLPAGE */
863 }
864
865 vaddr_t
866 uvm_km_alloc_poolpage1(map, obj, waitok)
867 struct vm_map *map;
868 struct uvm_object *obj;
869 boolean_t waitok;
870 {
871 #if defined(PMAP_MAP_POOLPAGE)
872 struct vm_page *pg;
873 vaddr_t va;
874
875 again:
876 pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
877 if (__predict_false(pg == NULL)) {
878 if (waitok) {
879 uvm_wait("plpg");
880 goto again;
881 } else
882 return (0);
883 }
884 va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg));
885 if (__predict_false(va == 0))
886 uvm_pagefree(pg);
887 return (va);
888 #else
889 vaddr_t va;
890 int s = 0xdeadbeaf; /* XXX: gcc */
891 const boolean_t intrsafe = (map->flags & VM_MAP_INTRSAFE) != 0;
892
893 if (intrsafe)
894 s = splvm();
895 va = uvm_km_kmemalloc(map, obj, PAGE_SIZE,
896 waitok ? 0 : UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK);
897 if (intrsafe)
898 splx(s);
899 return (va);
900 #endif /* PMAP_MAP_POOLPAGE */
901 }
902
903 /*
904 * uvm_km_free_poolpage: free a previously allocated pool page
905 *
906 * => if the pmap specifies an alternate unmapping method, we use it.
907 */
908
909 /* ARGSUSED */
910 void
911 uvm_km_free_poolpage_cache(map, addr)
912 struct vm_map *map;
913 vaddr_t addr;
914 {
915 #if defined(PMAP_UNMAP_POOLPAGE)
916 uvm_km_free_poolpage1(map, addr);
917 #else
918 struct pool *pp;
919 int s = 0xdeadbeaf; /* XXX: gcc */
920 const boolean_t intrsafe = (map->flags & VM_MAP_INTRSAFE) != 0;
921
922 if ((map->flags & VM_MAP_VACACHE) == 0) {
923 uvm_km_free_poolpage1(map, addr);
924 return;
925 }
926
927 KASSERT(pmap_extract(pmap_kernel(), addr, NULL));
928 uvm_km_pgremove_intrsafe(addr, addr + PAGE_SIZE);
929 pmap_kremove(addr, PAGE_SIZE);
930 #if defined(DEBUG)
931 pmap_update(pmap_kernel());
932 #endif
933 KASSERT(!pmap_extract(pmap_kernel(), addr, NULL));
934 pp = &vm_map_to_kernel(map)->vmk_vacache;
935 if (intrsafe)
936 s = splvm();
937 pool_put(pp, (void *)addr);
938 if (intrsafe)
939 splx(s);
940 #endif
941 }
942
943 /* ARGSUSED */
944 void
945 uvm_km_free_poolpage1(map, addr)
946 struct vm_map *map;
947 vaddr_t addr;
948 {
949 #if defined(PMAP_UNMAP_POOLPAGE)
950 paddr_t pa;
951
952 pa = PMAP_UNMAP_POOLPAGE(addr);
953 uvm_pagefree(PHYS_TO_VM_PAGE(pa));
954 #else
955 int s = 0xdeadbeaf; /* XXX: gcc */
956 const boolean_t intrsafe = (map->flags & VM_MAP_INTRSAFE) != 0;
957
958 if (intrsafe)
959 s = splvm();
960 uvm_km_free(map, addr, PAGE_SIZE);
961 if (intrsafe)
962 splx(s);
963 #endif /* PMAP_UNMAP_POOLPAGE */
964 }
Cache object: 13ce0f520bb4c1c560c9e48f679a329b
|