FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_kern.c
1 /*-
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * from: @(#)vm_kern.c 8.3 (Berkeley) 1/12/94
33 *
34 *
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36 * All rights reserved.
37 *
38 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
39 *
40 * Permission to use, copy, modify and distribute this software and
41 * its documentation is hereby granted, provided that both the copyright
42 * notice and this permission notice appear in all copies of the
43 * software, derivative works or modified versions, and any portions
44 * thereof, and that both notices appear in supporting documentation.
45 *
46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49 *
50 * Carnegie Mellon requests users of this software to return to
51 *
52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
53 * School of Computer Science
54 * Carnegie Mellon University
55 * Pittsburgh PA 15213-3890
56 *
57 * any improvements or extensions that they make and grant Carnegie the
58 * rights to redistribute these changes.
59 */
60
61 /*
62 * Kernel memory management.
63 */
64
65 #include <sys/cdefs.h>
66 __FBSDID("$FreeBSD$");
67
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/kernel.h> /* for ticks and hz */
71 #include <sys/eventhandler.h>
72 #include <sys/lock.h>
73 #include <sys/mutex.h>
74 #include <sys/proc.h>
75 #include <sys/malloc.h>
76 #include <sys/sysctl.h>
77
78 #include <vm/vm.h>
79 #include <vm/vm_param.h>
80 #include <vm/pmap.h>
81 #include <vm/vm_map.h>
82 #include <vm/vm_object.h>
83 #include <vm/vm_page.h>
84 #include <vm/vm_pageout.h>
85 #include <vm/vm_extern.h>
86 #include <vm/uma.h>
87
88 vm_map_t kernel_map=0;
89 vm_map_t kmem_map=0;
90 vm_map_t exec_map=0;
91 vm_map_t pipe_map;
92 vm_map_t buffer_map=0;
93
94 const void *zero_region;
95 CTASSERT((ZERO_REGION_SIZE & PAGE_MASK) == 0);
96
97 /*
98 * kmem_alloc_nofault:
99 *
100 * Allocate a virtual address range with no underlying object and
101 * no initial mapping to physical memory. Any mapping from this
102 * range to physical memory must be explicitly created prior to
103 * its use, typically with pmap_qenter(). Any attempt to create
104 * a mapping on demand through vm_fault() will result in a panic.
105 */
106 vm_offset_t
107 kmem_alloc_nofault(map, size)
108 vm_map_t map;
109 vm_size_t size;
110 {
111 vm_offset_t addr;
112 int result;
113
114 size = round_page(size);
115 addr = vm_map_min(map);
116 result = vm_map_find(map, NULL, 0, &addr, size, VMFS_ANY_SPACE,
117 VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
118 if (result != KERN_SUCCESS) {
119 return (0);
120 }
121 return (addr);
122 }
123
124 /*
125 * kmem_alloc_nofault_space:
126 *
127 * Allocate a virtual address range with no underlying object and
128 * no initial mapping to physical memory within the specified
129 * address space. Any mapping from this range to physical memory
130 * must be explicitly created prior to its use, typically with
131 * pmap_qenter(). Any attempt to create a mapping on demand
132 * through vm_fault() will result in a panic.
133 */
134 vm_offset_t
135 kmem_alloc_nofault_space(map, size, find_space)
136 vm_map_t map;
137 vm_size_t size;
138 int find_space;
139 {
140 vm_offset_t addr;
141 int result;
142
143 size = round_page(size);
144 addr = vm_map_min(map);
145 result = vm_map_find(map, NULL, 0, &addr, size, find_space,
146 VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
147 if (result != KERN_SUCCESS) {
148 return (0);
149 }
150 return (addr);
151 }
152
153 /*
154 * Allocate wired-down memory in the kernel's address map
155 * or a submap.
156 */
157 vm_offset_t
158 kmem_alloc(map, size)
159 vm_map_t map;
160 vm_size_t size;
161 {
162 vm_offset_t addr;
163 vm_offset_t offset;
164 vm_offset_t i;
165
166 size = round_page(size);
167
168 /*
169 * Use the kernel object for wired-down kernel pages. Assume that no
170 * region of the kernel object is referenced more than once.
171 */
172
173 /*
174 * Locate sufficient space in the map. This will give us the final
175 * virtual address for the new memory, and thus will tell us the
176 * offset within the kernel map.
177 */
178 vm_map_lock(map);
179 if (vm_map_findspace(map, vm_map_min(map), size, &addr)) {
180 vm_map_unlock(map);
181 return (0);
182 }
183 offset = addr - VM_MIN_KERNEL_ADDRESS;
184 vm_object_reference(kernel_object);
185 vm_map_insert(map, kernel_object, offset, addr, addr + size,
186 VM_PROT_ALL, VM_PROT_ALL, 0);
187 vm_map_unlock(map);
188
189 /*
190 * Guarantee that there are pages already in this object before
191 * calling vm_map_wire. This is to prevent the following
192 * scenario:
193 *
194 * 1) Threads have swapped out, so that there is a pager for the
195 * kernel_object. 2) The kmsg zone is empty, and so we are
196 * kmem_allocing a new page for it. 3) vm_map_wire calls vm_fault;
197 * there is no page, but there is a pager, so we call
198 * pager_data_request. But the kmsg zone is empty, so we must
199 * kmem_alloc. 4) goto 1 5) Even if the kmsg zone is not empty: when
200 * we get the data back from the pager, it will be (very stale)
201 * non-zero data. kmem_alloc is defined to return zero-filled memory.
202 *
203 * We're intentionally not activating the pages we allocate to prevent a
204 * race with page-out. vm_map_wire will wire the pages.
205 */
206 VM_OBJECT_LOCK(kernel_object);
207 for (i = 0; i < size; i += PAGE_SIZE) {
208 vm_page_t mem;
209
210 mem = vm_page_grab(kernel_object, OFF_TO_IDX(offset + i),
211 VM_ALLOC_NOBUSY | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
212 mem->valid = VM_PAGE_BITS_ALL;
213 KASSERT((mem->flags & PG_UNMANAGED) != 0,
214 ("kmem_alloc: page %p is managed", mem));
215 }
216 VM_OBJECT_UNLOCK(kernel_object);
217
218 /*
219 * And finally, mark the data as non-pageable.
220 */
221 (void) vm_map_wire(map, addr, addr + size,
222 VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES);
223
224 return (addr);
225 }
226
227 /*
228 * kmem_free:
229 *
230 * Release a region of kernel virtual memory allocated
231 * with kmem_alloc, and return the physical pages
232 * associated with that region.
233 *
234 * This routine may not block on kernel maps.
235 */
236 void
237 kmem_free(map, addr, size)
238 vm_map_t map;
239 vm_offset_t addr;
240 vm_size_t size;
241 {
242
243 (void) vm_map_remove(map, trunc_page(addr), round_page(addr + size));
244 }
245
246 /*
247 * kmem_suballoc:
248 *
249 * Allocates a map to manage a subrange
250 * of the kernel virtual address space.
251 *
252 * Arguments are as follows:
253 *
254 * parent Map to take range from
255 * min, max Returned endpoints of map
256 * size Size of range to find
257 * superpage_align Request that min is superpage aligned
258 */
259 vm_map_t
260 kmem_suballoc(vm_map_t parent, vm_offset_t *min, vm_offset_t *max,
261 vm_size_t size, boolean_t superpage_align)
262 {
263 int ret;
264 vm_map_t result;
265
266 size = round_page(size);
267
268 *min = vm_map_min(parent);
269 ret = vm_map_find(parent, NULL, 0, min, size, superpage_align ?
270 VMFS_ALIGNED_SPACE : VMFS_ANY_SPACE, VM_PROT_ALL, VM_PROT_ALL,
271 MAP_ACC_NO_CHARGE);
272 if (ret != KERN_SUCCESS)
273 panic("kmem_suballoc: bad status return of %d", ret);
274 *max = *min + size;
275 result = vm_map_create(vm_map_pmap(parent), *min, *max);
276 if (result == NULL)
277 panic("kmem_suballoc: cannot create submap");
278 if (vm_map_submap(parent, *min, *max, result) != KERN_SUCCESS)
279 panic("kmem_suballoc: unable to change range to submap");
280 return (result);
281 }
282
283 /*
284 * kmem_malloc:
285 *
286 * Allocate wired-down memory in the kernel's address map for the higher
287 * level kernel memory allocator (kern/kern_malloc.c). We cannot use
288 * kmem_alloc() because we may need to allocate memory at interrupt
289 * level where we cannot block (canwait == FALSE).
290 *
291 * This routine has its own private kernel submap (kmem_map) and object
292 * (kmem_object). This, combined with the fact that only malloc uses
293 * this routine, ensures that we will never block in map or object waits.
294 *
295 * We don't worry about expanding the map (adding entries) since entries
296 * for wired maps are statically allocated.
297 *
298 * `map' is ONLY allowed to be kmem_map or one of the mbuf submaps to
299 * which we never free.
300 */
301 vm_offset_t
302 kmem_malloc(map, size, flags)
303 vm_map_t map;
304 vm_size_t size;
305 int flags;
306 {
307 vm_offset_t addr;
308 int i, rv;
309
310 size = round_page(size);
311 addr = vm_map_min(map);
312
313 /*
314 * Locate sufficient space in the map. This will give us the final
315 * virtual address for the new memory, and thus will tell us the
316 * offset within the kernel map.
317 */
318 vm_map_lock(map);
319 if (vm_map_findspace(map, vm_map_min(map), size, &addr)) {
320 vm_map_unlock(map);
321 if ((flags & M_NOWAIT) == 0) {
322 for (i = 0; i < 8; i++) {
323 EVENTHANDLER_INVOKE(vm_lowmem, 0);
324 uma_reclaim();
325 vm_map_lock(map);
326 if (vm_map_findspace(map, vm_map_min(map),
327 size, &addr) == 0) {
328 break;
329 }
330 vm_map_unlock(map);
331 tsleep(&i, 0, "nokva", (hz / 4) * (i + 1));
332 }
333 if (i == 8) {
334 panic("kmem_malloc(%ld): kmem_map too small: %ld total allocated",
335 (long)size, (long)map->size);
336 }
337 } else {
338 return (0);
339 }
340 }
341
342 rv = kmem_back(map, addr, size, flags);
343 vm_map_unlock(map);
344 return (rv == KERN_SUCCESS ? addr : 0);
345 }
346
347 /*
348 * kmem_back:
349 *
350 * Allocate physical pages for the specified virtual address range.
351 */
352 int
353 kmem_back(vm_map_t map, vm_offset_t addr, vm_size_t size, int flags)
354 {
355 vm_offset_t offset, i;
356 vm_map_entry_t entry;
357 vm_page_t m;
358 int pflags;
359
360 /*
361 * XXX the map must be locked for write on entry, but there's
362 * no easy way to assert that.
363 */
364
365 offset = addr - VM_MIN_KERNEL_ADDRESS;
366 vm_object_reference(kmem_object);
367 vm_map_insert(map, kmem_object, offset, addr, addr + size,
368 VM_PROT_ALL, VM_PROT_ALL, 0);
369
370 if ((flags & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT)
371 pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED;
372 else
373 pflags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED;
374
375 if (flags & M_ZERO)
376 pflags |= VM_ALLOC_ZERO;
377
378 VM_OBJECT_LOCK(kmem_object);
379 for (i = 0; i < size; i += PAGE_SIZE) {
380 retry:
381 m = vm_page_alloc(kmem_object, OFF_TO_IDX(offset + i), pflags);
382
383 /*
384 * Ran out of space, free everything up and return. Don't need
385 * to lock page queues here as we know that the pages we got
386 * aren't on any queues.
387 */
388 if (m == NULL) {
389 if ((flags & M_NOWAIT) == 0) {
390 VM_OBJECT_UNLOCK(kmem_object);
391 vm_map_unlock(map);
392 VM_WAIT;
393 vm_map_lock(map);
394 VM_OBJECT_LOCK(kmem_object);
395 goto retry;
396 }
397 /*
398 * Free the pages before removing the map entry.
399 * They are already marked busy. Calling
400 * vm_map_delete before the pages has been freed or
401 * unbusied will cause a deadlock.
402 */
403 while (i != 0) {
404 i -= PAGE_SIZE;
405 m = vm_page_lookup(kmem_object,
406 OFF_TO_IDX(offset + i));
407 vm_page_lock_queues();
408 vm_page_unwire(m, 0);
409 vm_page_free(m);
410 vm_page_unlock_queues();
411 }
412 VM_OBJECT_UNLOCK(kmem_object);
413 vm_map_delete(map, addr, addr + size);
414 return (KERN_NO_SPACE);
415 }
416 if (flags & M_ZERO && (m->flags & PG_ZERO) == 0)
417 pmap_zero_page(m);
418 m->valid = VM_PAGE_BITS_ALL;
419 KASSERT((m->flags & PG_UNMANAGED) != 0,
420 ("kmem_malloc: page %p is managed", m));
421 }
422 VM_OBJECT_UNLOCK(kmem_object);
423
424 /*
425 * Mark map entry as non-pageable. Assert: vm_map_insert() will never
426 * be able to extend the previous entry so there will be a new entry
427 * exactly corresponding to this address range and it will have
428 * wired_count == 0.
429 */
430 if (!vm_map_lookup_entry(map, addr, &entry) ||
431 entry->start != addr || entry->end != addr + size ||
432 entry->wired_count != 0)
433 panic("kmem_malloc: entry not found or misaligned");
434 entry->wired_count = 1;
435
436 /*
437 * At this point, the kmem_object must be unlocked because
438 * vm_map_simplify_entry() calls vm_object_deallocate(), which
439 * locks the kmem_object.
440 */
441 vm_map_simplify_entry(map, entry);
442
443 /*
444 * Loop thru pages, entering them in the pmap.
445 */
446 VM_OBJECT_LOCK(kmem_object);
447 for (i = 0; i < size; i += PAGE_SIZE) {
448 m = vm_page_lookup(kmem_object, OFF_TO_IDX(offset + i));
449 /*
450 * Because this is kernel_pmap, this call will not block.
451 */
452 pmap_enter(kernel_pmap, addr + i, VM_PROT_ALL, m, VM_PROT_ALL,
453 TRUE);
454 vm_page_wakeup(m);
455 }
456 VM_OBJECT_UNLOCK(kmem_object);
457
458 return (KERN_SUCCESS);
459 }
460
461 /*
462 * kmem_alloc_wait:
463 *
464 * Allocates pageable memory from a sub-map of the kernel. If the submap
465 * has no room, the caller sleeps waiting for more memory in the submap.
466 *
467 * This routine may block.
468 */
469 vm_offset_t
470 kmem_alloc_wait(map, size)
471 vm_map_t map;
472 vm_size_t size;
473 {
474 vm_offset_t addr;
475
476 size = round_page(size);
477 if (!swap_reserve(size))
478 return (0);
479
480 for (;;) {
481 /*
482 * To make this work for more than one map, use the map's lock
483 * to lock out sleepers/wakers.
484 */
485 vm_map_lock(map);
486 if (vm_map_findspace(map, vm_map_min(map), size, &addr) == 0)
487 break;
488 /* no space now; see if we can ever get space */
489 if (vm_map_max(map) - vm_map_min(map) < size) {
490 vm_map_unlock(map);
491 swap_release(size);
492 return (0);
493 }
494 map->needs_wakeup = TRUE;
495 vm_map_unlock_and_wait(map, 0);
496 }
497 vm_map_insert(map, NULL, 0, addr, addr + size, VM_PROT_ALL,
498 VM_PROT_ALL, MAP_ACC_CHARGED);
499 vm_map_unlock(map);
500 return (addr);
501 }
502
503 /*
504 * kmem_free_wakeup:
505 *
506 * Returns memory to a submap of the kernel, and wakes up any processes
507 * waiting for memory in that map.
508 */
509 void
510 kmem_free_wakeup(map, addr, size)
511 vm_map_t map;
512 vm_offset_t addr;
513 vm_size_t size;
514 {
515
516 vm_map_lock(map);
517 (void) vm_map_delete(map, trunc_page(addr), round_page(addr + size));
518 if (map->needs_wakeup) {
519 map->needs_wakeup = FALSE;
520 vm_map_wakeup(map);
521 }
522 vm_map_unlock(map);
523 }
524
525 static void
526 kmem_init_zero_region(void)
527 {
528 vm_offset_t addr, i;
529 vm_page_t m;
530 int error;
531
532 /*
533 * Map a single physical page of zeros to a larger virtual range.
534 * This requires less looping in places that want large amounts of
535 * zeros, while not using much more physical resources.
536 */
537 addr = kmem_alloc_nofault(kernel_map, ZERO_REGION_SIZE);
538 m = vm_page_alloc(NULL, OFF_TO_IDX(addr - VM_MIN_KERNEL_ADDRESS),
539 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO);
540 if ((m->flags & PG_ZERO) == 0)
541 pmap_zero_page(m);
542 for (i = 0; i < ZERO_REGION_SIZE; i += PAGE_SIZE)
543 pmap_qenter(addr + i, &m, 1);
544 error = vm_map_protect(kernel_map, addr, addr + ZERO_REGION_SIZE,
545 VM_PROT_READ, TRUE);
546 KASSERT(error == 0, ("error=%d", error));
547
548 zero_region = (const void *)addr;
549 }
550
551 /*
552 * kmem_init:
553 *
554 * Create the kernel map; insert a mapping covering kernel text,
555 * data, bss, and all space allocated thus far (`boostrap' data). The
556 * new map will thus map the range between VM_MIN_KERNEL_ADDRESS and
557 * `start' as allocated, and the range between `start' and `end' as free.
558 */
559 void
560 kmem_init(start, end)
561 vm_offset_t start, end;
562 {
563 vm_map_t m;
564
565 m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end);
566 m->system_map = 1;
567 vm_map_lock(m);
568 /* N.B.: cannot use kgdb to debug, starting with this assignment ... */
569 kernel_map = m;
570 (void) vm_map_insert(m, NULL, (vm_ooffset_t) 0,
571 #ifdef __amd64__
572 KERNBASE,
573 #else
574 VM_MIN_KERNEL_ADDRESS,
575 #endif
576 start, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
577 /* ... and ending with the completion of the above `insert' */
578 vm_map_unlock(m);
579
580 kmem_init_zero_region();
581 }
582
583 #ifdef DIAGNOSTIC
584 /*
585 * Allow userspace to directly trigger the VM drain routine for testing
586 * purposes.
587 */
588 static int
589 debug_vm_lowmem(SYSCTL_HANDLER_ARGS)
590 {
591 int error, i;
592
593 i = 0;
594 error = sysctl_handle_int(oidp, &i, 0, req);
595 if (error)
596 return (error);
597 if (i)
598 EVENTHANDLER_INVOKE(vm_lowmem, 0);
599 return (0);
600 }
601
602 SYSCTL_PROC(_debug, OID_AUTO, vm_lowmem, CTLTYPE_INT | CTLFLAG_RW, 0, 0,
603 debug_vm_lowmem, "I", "set to trigger vm_lowmem event");
604 #endif
Cache object: ba7b9de9719da81e1a10c317c82bbd48
|