FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_page.c
1 /*-
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 * Copyright (c) 1998 Matthew Dillon. All Rights Reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * The Mach Operating System project at Carnegie-Mellon University.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
34 */
35
36 /*-
37 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38 * All rights reserved.
39 *
40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
41 *
42 * Permission to use, copy, modify and distribute this software and
43 * its documentation is hereby granted, provided that both the copyright
44 * notice and this permission notice appear in all copies of the
45 * software, derivative works or modified versions, and any portions
46 * thereof, and that both notices appear in supporting documentation.
47 *
48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51 *
52 * Carnegie Mellon requests users of this software to return to
53 *
54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
55 * School of Computer Science
56 * Carnegie Mellon University
57 * Pittsburgh PA 15213-3890
58 *
59 * any improvements or extensions that they make and grant Carnegie the
60 * rights to redistribute these changes.
61 */
62
63 /*
64 * GENERAL RULES ON VM_PAGE MANIPULATION
65 *
66 * - A page queue lock is required when adding or removing a page from a
67 * page queue regardless of other locks or the busy state of a page.
68 *
69 * * In general, no thread besides the page daemon can acquire or
70 * hold more than one page queue lock at a time.
71 *
72 * * The page daemon can acquire and hold any pair of page queue
73 * locks in any order.
74 *
75 * - The object lock is required when inserting or removing
76 * pages from an object (vm_page_insert() or vm_page_remove()).
77 *
78 */
79
80 /*
81 * Resident memory management module.
82 */
83
84 #include <sys/cdefs.h>
85 __FBSDID("$FreeBSD$");
86
87 #include "opt_vm.h"
88
89 #include <sys/param.h>
90 #include <sys/systm.h>
91 #include <sys/lock.h>
92 #include <sys/kernel.h>
93 #include <sys/limits.h>
94 #include <sys/malloc.h>
95 #include <sys/mman.h>
96 #include <sys/msgbuf.h>
97 #include <sys/mutex.h>
98 #include <sys/proc.h>
99 #include <sys/rwlock.h>
100 #include <sys/sysctl.h>
101 #include <sys/vmmeter.h>
102 #include <sys/vnode.h>
103
104 #include <vm/vm.h>
105 #include <vm/pmap.h>
106 #include <vm/vm_param.h>
107 #include <vm/vm_kern.h>
108 #include <vm/vm_object.h>
109 #include <vm/vm_page.h>
110 #include <vm/vm_pageout.h>
111 #include <vm/vm_pager.h>
112 #include <vm/vm_phys.h>
113 #include <vm/vm_radix.h>
114 #include <vm/vm_reserv.h>
115 #include <vm/vm_extern.h>
116 #include <vm/uma.h>
117 #include <vm/uma_int.h>
118
119 #include <machine/md_var.h>
120
121 /*
122 * Associated with page of user-allocatable memory is a
123 * page structure.
124 */
125
126 struct vm_domain vm_dom[MAXMEMDOM];
127 struct mtx_padalign vm_page_queue_free_mtx;
128
129 struct mtx_padalign pa_lock[PA_LOCK_COUNT];
130
131 vm_page_t vm_page_array;
132 long vm_page_array_size;
133 long first_page;
134 int vm_page_zero_count;
135
136 static int boot_pages = UMA_BOOT_PAGES;
137 TUNABLE_INT("vm.boot_pages", &boot_pages);
138 SYSCTL_INT(_vm, OID_AUTO, boot_pages, CTLFLAG_RD, &boot_pages, 0,
139 "number of pages allocated for bootstrapping the VM system");
140
141 static int pa_tryrelock_restart;
142 SYSCTL_INT(_vm, OID_AUTO, tryrelock_restart, CTLFLAG_RD,
143 &pa_tryrelock_restart, 0, "Number of tryrelock restarts");
144
145 static uma_zone_t fakepg_zone;
146
147 static struct vnode *vm_page_alloc_init(vm_page_t m);
148 static void vm_page_cache_turn_free(vm_page_t m);
149 static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits);
150 static void vm_page_enqueue(int queue, vm_page_t m);
151 static void vm_page_init_fakepg(void *dummy);
152 static int vm_page_insert_after(vm_page_t m, vm_object_t object,
153 vm_pindex_t pindex, vm_page_t mpred);
154 static void vm_page_insert_radixdone(vm_page_t m, vm_object_t object,
155 vm_page_t mpred);
156
157 SYSINIT(vm_page, SI_SUB_VM, SI_ORDER_SECOND, vm_page_init_fakepg, NULL);
158
159 static void
160 vm_page_init_fakepg(void *dummy)
161 {
162
163 fakepg_zone = uma_zcreate("fakepg", sizeof(struct vm_page), NULL, NULL,
164 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE | UMA_ZONE_VM);
165 }
166
167 /* Make sure that u_long is at least 64 bits when PAGE_SIZE is 32K. */
168 #if PAGE_SIZE == 32768
169 #ifdef CTASSERT
170 CTASSERT(sizeof(u_long) >= 8);
171 #endif
172 #endif
173
174 /*
175 * Try to acquire a physical address lock while a pmap is locked. If we
176 * fail to trylock we unlock and lock the pmap directly and cache the
177 * locked pa in *locked. The caller should then restart their loop in case
178 * the virtual to physical mapping has changed.
179 */
180 int
181 vm_page_pa_tryrelock(pmap_t pmap, vm_paddr_t pa, vm_paddr_t *locked)
182 {
183 vm_paddr_t lockpa;
184
185 lockpa = *locked;
186 *locked = pa;
187 if (lockpa) {
188 PA_LOCK_ASSERT(lockpa, MA_OWNED);
189 if (PA_LOCKPTR(pa) == PA_LOCKPTR(lockpa))
190 return (0);
191 PA_UNLOCK(lockpa);
192 }
193 if (PA_TRYLOCK(pa))
194 return (0);
195 PMAP_UNLOCK(pmap);
196 atomic_add_int(&pa_tryrelock_restart, 1);
197 PA_LOCK(pa);
198 PMAP_LOCK(pmap);
199 return (EAGAIN);
200 }
201
202 /*
203 * vm_set_page_size:
204 *
205 * Sets the page size, perhaps based upon the memory
206 * size. Must be called before any use of page-size
207 * dependent functions.
208 */
209 void
210 vm_set_page_size(void)
211 {
212 if (cnt.v_page_size == 0)
213 cnt.v_page_size = PAGE_SIZE;
214 if (((cnt.v_page_size - 1) & cnt.v_page_size) != 0)
215 panic("vm_set_page_size: page size not a power of two");
216 }
217
218 /*
219 * vm_page_blacklist_lookup:
220 *
221 * See if a physical address in this page has been listed
222 * in the blacklist tunable. Entries in the tunable are
223 * separated by spaces or commas. If an invalid integer is
224 * encountered then the rest of the string is skipped.
225 */
226 static int
227 vm_page_blacklist_lookup(char *list, vm_paddr_t pa)
228 {
229 vm_paddr_t bad;
230 char *cp, *pos;
231
232 for (pos = list; *pos != '\0'; pos = cp) {
233 bad = strtoq(pos, &cp, 0);
234 if (*cp != '\0') {
235 if (*cp == ' ' || *cp == ',') {
236 cp++;
237 if (cp == pos)
238 continue;
239 } else
240 break;
241 }
242 if (pa == trunc_page(bad))
243 return (1);
244 }
245 return (0);
246 }
247
248 static void
249 vm_page_domain_init(struct vm_domain *vmd)
250 {
251 struct vm_pagequeue *pq;
252 int i;
253
254 *__DECONST(char **, &vmd->vmd_pagequeues[PQ_INACTIVE].pq_name) =
255 "vm inactive pagequeue";
256 *__DECONST(u_int **, &vmd->vmd_pagequeues[PQ_INACTIVE].pq_vcnt) =
257 &cnt.v_inactive_count;
258 *__DECONST(char **, &vmd->vmd_pagequeues[PQ_ACTIVE].pq_name) =
259 "vm active pagequeue";
260 *__DECONST(u_int **, &vmd->vmd_pagequeues[PQ_ACTIVE].pq_vcnt) =
261 &cnt.v_active_count;
262 vmd->vmd_page_count = 0;
263 vmd->vmd_free_count = 0;
264 vmd->vmd_segs = 0;
265 vmd->vmd_oom = FALSE;
266 vmd->vmd_pass = 0;
267 for (i = 0; i < PQ_COUNT; i++) {
268 pq = &vmd->vmd_pagequeues[i];
269 TAILQ_INIT(&pq->pq_pl);
270 mtx_init(&pq->pq_mutex, pq->pq_name, "vm pagequeue",
271 MTX_DEF | MTX_DUPOK);
272 }
273 }
274
275 /*
276 * vm_page_startup:
277 *
278 * Initializes the resident memory module. Allocates physical memory for
279 * bootstrapping UMA and some data structures that are used to manage
280 * physical pages. Initializes these structures, and populates the free
281 * page queues.
282 */
283 vm_offset_t
284 vm_page_startup(vm_offset_t vaddr)
285 {
286 vm_offset_t mapped;
287 vm_paddr_t high_avail, low_avail, page_range, size;
288 vm_paddr_t new_end;
289 int i;
290 vm_paddr_t pa;
291 vm_paddr_t last_pa;
292 char *list;
293
294 /* the biggest memory array is the second group of pages */
295 vm_paddr_t end;
296 vm_paddr_t biggestsize;
297 int biggestone;
298
299 biggestsize = 0;
300 biggestone = 0;
301 vaddr = round_page(vaddr);
302
303 for (i = 0; phys_avail[i + 1]; i += 2) {
304 phys_avail[i] = round_page(phys_avail[i]);
305 phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
306 }
307
308 #ifdef XEN
309 /*
310 * There is no obvious reason why i386 PV Xen needs vm_page structs
311 * created for these pseudo-physical addresses. XXX
312 */
313 vm_phys_add_seg(0, phys_avail[0]);
314 #endif
315
316 for (i = 0; phys_avail[i + 1]; i += 2) {
317 size = phys_avail[i + 1] - phys_avail[i];
318 if (size > biggestsize) {
319 biggestone = i;
320 biggestsize = size;
321 }
322 }
323
324 end = phys_avail[biggestone+1];
325
326 /*
327 * Initialize the page and queue locks.
328 */
329 mtx_init(&vm_page_queue_free_mtx, "vm page free queue", NULL, MTX_DEF);
330 for (i = 0; i < PA_LOCK_COUNT; i++)
331 mtx_init(&pa_lock[i], "vm page", NULL, MTX_DEF);
332 for (i = 0; i < vm_ndomains; i++)
333 vm_page_domain_init(&vm_dom[i]);
334
335 /*
336 * Allocate memory for use when boot strapping the kernel memory
337 * allocator.
338 */
339 new_end = end - (boot_pages * UMA_SLAB_SIZE);
340 new_end = trunc_page(new_end);
341 mapped = pmap_map(&vaddr, new_end, end,
342 VM_PROT_READ | VM_PROT_WRITE);
343 bzero((void *)mapped, end - new_end);
344 uma_startup((void *)mapped, boot_pages);
345
346 #if defined(__amd64__) || defined(__i386__) || defined(__arm__) || \
347 defined(__mips__)
348 /*
349 * Allocate a bitmap to indicate that a random physical page
350 * needs to be included in a minidump.
351 *
352 * The amd64 port needs this to indicate which direct map pages
353 * need to be dumped, via calls to dump_add_page()/dump_drop_page().
354 *
355 * However, i386 still needs this workspace internally within the
356 * minidump code. In theory, they are not needed on i386, but are
357 * included should the sf_buf code decide to use them.
358 */
359 last_pa = 0;
360 for (i = 0; dump_avail[i + 1] != 0; i += 2)
361 if (dump_avail[i + 1] > last_pa)
362 last_pa = dump_avail[i + 1];
363 page_range = last_pa / PAGE_SIZE;
364 vm_page_dump_size = round_page(roundup2(page_range, NBBY) / NBBY);
365 new_end -= vm_page_dump_size;
366 vm_page_dump = (void *)(uintptr_t)pmap_map(&vaddr, new_end,
367 new_end + vm_page_dump_size, VM_PROT_READ | VM_PROT_WRITE);
368 bzero((void *)vm_page_dump, vm_page_dump_size);
369 #endif
370 #if defined(__amd64__) || defined(__mips__)
371 /*
372 * Include the UMA bootstrap pages and vm_page_dump in a crash dump.
373 * When pmap_map() uses the direct map, they are not automatically
374 * included.
375 */
376 for (pa = new_end; pa < end; pa += PAGE_SIZE)
377 dump_add_page(pa);
378 #endif
379 phys_avail[biggestone + 1] = new_end;
380 #ifdef __amd64__
381 /*
382 * Request that the physical pages underlying the message buffer be
383 * included in a crash dump. Since the message buffer is accessed
384 * through the direct map, they are not automatically included.
385 */
386 pa = DMAP_TO_PHYS((vm_offset_t)msgbufp->msg_ptr);
387 last_pa = pa + round_page(msgbufsize);
388 while (pa < last_pa) {
389 dump_add_page(pa);
390 pa += PAGE_SIZE;
391 }
392 #endif
393 /*
394 * Compute the number of pages of memory that will be available for
395 * use, taking into account the overhead of a page structure per page.
396 * In other words, solve
397 * "available physical memory" - round_page(page_range *
398 * sizeof(struct vm_page)) = page_range * PAGE_SIZE
399 * for page_range.
400 */
401 low_avail = phys_avail[0];
402 high_avail = phys_avail[1];
403 for (i = 0; i < vm_phys_nsegs; i++) {
404 if (vm_phys_segs[i].start < low_avail)
405 low_avail = vm_phys_segs[i].start;
406 if (vm_phys_segs[i].end > high_avail)
407 high_avail = vm_phys_segs[i].end;
408 }
409 /* Skip the first chunk. It is already accounted for. */
410 for (i = 2; phys_avail[i + 1] != 0; i += 2) {
411 if (phys_avail[i] < low_avail)
412 low_avail = phys_avail[i];
413 if (phys_avail[i + 1] > high_avail)
414 high_avail = phys_avail[i + 1];
415 }
416 first_page = low_avail / PAGE_SIZE;
417 #ifdef VM_PHYSSEG_SPARSE
418 size = 0;
419 for (i = 0; i < vm_phys_nsegs; i++)
420 size += vm_phys_segs[i].end - vm_phys_segs[i].start;
421 for (i = 0; phys_avail[i + 1] != 0; i += 2)
422 size += phys_avail[i + 1] - phys_avail[i];
423 #elif defined(VM_PHYSSEG_DENSE)
424 size = high_avail - low_avail;
425 #else
426 #error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined."
427 #endif
428
429 #ifdef VM_PHYSSEG_DENSE
430 /*
431 * In the VM_PHYSSEG_DENSE case, the number of pages can account for
432 * the overhead of a page structure per page only if vm_page_array is
433 * allocated from the last physical memory chunk. Otherwise, we must
434 * allocate page structures representing the physical memory
435 * underlying vm_page_array, even though they will not be used.
436 */
437 if (new_end != high_avail)
438 page_range = size / PAGE_SIZE;
439 else
440 #endif
441 {
442 page_range = size / (PAGE_SIZE + sizeof(struct vm_page));
443
444 /*
445 * If the partial bytes remaining are large enough for
446 * a page (PAGE_SIZE) without a corresponding
447 * 'struct vm_page', then new_end will contain an
448 * extra page after subtracting the length of the VM
449 * page array. Compensate by subtracting an extra
450 * page from new_end.
451 */
452 if (size % (PAGE_SIZE + sizeof(struct vm_page)) >= PAGE_SIZE) {
453 if (new_end == high_avail)
454 high_avail -= PAGE_SIZE;
455 new_end -= PAGE_SIZE;
456 }
457 }
458 end = new_end;
459
460 /*
461 * Reserve an unmapped guard page to trap access to vm_page_array[-1].
462 * However, because this page is allocated from KVM, out-of-bounds
463 * accesses using the direct map will not be trapped.
464 */
465 vaddr += PAGE_SIZE;
466
467 /*
468 * Allocate physical memory for the page structures, and map it.
469 */
470 new_end = trunc_page(end - page_range * sizeof(struct vm_page));
471 mapped = pmap_map(&vaddr, new_end, end,
472 VM_PROT_READ | VM_PROT_WRITE);
473 vm_page_array = (vm_page_t) mapped;
474 #if VM_NRESERVLEVEL > 0
475 /*
476 * Allocate physical memory for the reservation management system's
477 * data structures, and map it.
478 */
479 if (high_avail == end)
480 high_avail = new_end;
481 new_end = vm_reserv_startup(&vaddr, new_end, high_avail);
482 #endif
483 #if defined(__amd64__) || defined(__mips__)
484 /*
485 * Include vm_page_array and vm_reserv_array in a crash dump.
486 */
487 for (pa = new_end; pa < end; pa += PAGE_SIZE)
488 dump_add_page(pa);
489 #endif
490 phys_avail[biggestone + 1] = new_end;
491
492 /*
493 * Add physical memory segments corresponding to the available
494 * physical pages.
495 */
496 for (i = 0; phys_avail[i + 1] != 0; i += 2)
497 vm_phys_add_seg(phys_avail[i], phys_avail[i + 1]);
498
499 /*
500 * Clear all of the page structures
501 */
502 bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page));
503 for (i = 0; i < page_range; i++)
504 vm_page_array[i].order = VM_NFREEORDER;
505 vm_page_array_size = page_range;
506
507 /*
508 * Initialize the physical memory allocator.
509 */
510 vm_phys_init();
511
512 /*
513 * Add every available physical page that is not blacklisted to
514 * the free lists.
515 */
516 cnt.v_page_count = 0;
517 cnt.v_free_count = 0;
518 list = getenv("vm.blacklist");
519 for (i = 0; phys_avail[i + 1] != 0; i += 2) {
520 pa = phys_avail[i];
521 last_pa = phys_avail[i + 1];
522 while (pa < last_pa) {
523 if (list != NULL &&
524 vm_page_blacklist_lookup(list, pa))
525 printf("Skipping page with pa 0x%jx\n",
526 (uintmax_t)pa);
527 else
528 vm_phys_add_page(pa);
529 pa += PAGE_SIZE;
530 }
531 }
532 freeenv(list);
533 #if VM_NRESERVLEVEL > 0
534 /*
535 * Initialize the reservation management system.
536 */
537 vm_reserv_init();
538 #endif
539 return (vaddr);
540 }
541
542 void
543 vm_page_reference(vm_page_t m)
544 {
545
546 vm_page_aflag_set(m, PGA_REFERENCED);
547 }
548
549 /*
550 * vm_page_busy_downgrade:
551 *
552 * Downgrade an exclusive busy page into a single shared busy page.
553 */
554 void
555 vm_page_busy_downgrade(vm_page_t m)
556 {
557 u_int x;
558 bool locked;
559
560 vm_page_assert_xbusied(m);
561 locked = mtx_owned(vm_page_lockptr(m));
562
563 for (;;) {
564 x = m->busy_lock;
565 x &= VPB_BIT_WAITERS;
566 if (x != 0 && !locked)
567 vm_page_lock(m);
568 if (atomic_cmpset_rel_int(&m->busy_lock,
569 VPB_SINGLE_EXCLUSIVER | x, VPB_SHARERS_WORD(1)))
570 break;
571 if (x != 0 && !locked)
572 vm_page_unlock(m);
573 }
574 if (x != 0) {
575 wakeup(m);
576 if (!locked)
577 vm_page_unlock(m);
578 }
579 }
580
581 /*
582 * vm_page_sbusied:
583 *
584 * Return a positive value if the page is shared busied, 0 otherwise.
585 */
586 int
587 vm_page_sbusied(vm_page_t m)
588 {
589 u_int x;
590
591 x = m->busy_lock;
592 return ((x & VPB_BIT_SHARED) != 0 && x != VPB_UNBUSIED);
593 }
594
595 /*
596 * vm_page_sunbusy:
597 *
598 * Shared unbusy a page.
599 */
600 void
601 vm_page_sunbusy(vm_page_t m)
602 {
603 u_int x;
604
605 vm_page_assert_sbusied(m);
606
607 for (;;) {
608 x = m->busy_lock;
609 if (VPB_SHARERS(x) > 1) {
610 if (atomic_cmpset_int(&m->busy_lock, x,
611 x - VPB_ONE_SHARER))
612 break;
613 continue;
614 }
615 if ((x & VPB_BIT_WAITERS) == 0) {
616 KASSERT(x == VPB_SHARERS_WORD(1),
617 ("vm_page_sunbusy: invalid lock state"));
618 if (atomic_cmpset_int(&m->busy_lock,
619 VPB_SHARERS_WORD(1), VPB_UNBUSIED))
620 break;
621 continue;
622 }
623 KASSERT(x == (VPB_SHARERS_WORD(1) | VPB_BIT_WAITERS),
624 ("vm_page_sunbusy: invalid lock state for waiters"));
625
626 vm_page_lock(m);
627 if (!atomic_cmpset_int(&m->busy_lock, x, VPB_UNBUSIED)) {
628 vm_page_unlock(m);
629 continue;
630 }
631 wakeup(m);
632 vm_page_unlock(m);
633 break;
634 }
635 }
636
637 /*
638 * vm_page_busy_sleep:
639 *
640 * Sleep and release the page lock, using the page pointer as wchan.
641 * This is used to implement the hard-path of busying mechanism.
642 *
643 * The given page must be locked.
644 *
645 * If nonshared is true, sleep only if the page is xbusy.
646 */
647 void
648 vm_page_busy_sleep(vm_page_t m, const char *wmesg, bool nonshared)
649 {
650 u_int x;
651
652 vm_page_assert_locked(m);
653
654 x = m->busy_lock;
655 if (x == VPB_UNBUSIED || (nonshared && (x & VPB_BIT_SHARED) != 0) ||
656 ((x & VPB_BIT_WAITERS) == 0 &&
657 !atomic_cmpset_int(&m->busy_lock, x, x | VPB_BIT_WAITERS))) {
658 vm_page_unlock(m);
659 return;
660 }
661 msleep(m, vm_page_lockptr(m), PVM | PDROP, wmesg, 0);
662 }
663
664 /*
665 * vm_page_trysbusy:
666 *
667 * Try to shared busy a page.
668 * If the operation succeeds 1 is returned otherwise 0.
669 * The operation never sleeps.
670 */
671 int
672 vm_page_trysbusy(vm_page_t m)
673 {
674 u_int x;
675
676 for (;;) {
677 x = m->busy_lock;
678 if ((x & VPB_BIT_SHARED) == 0)
679 return (0);
680 if (atomic_cmpset_acq_int(&m->busy_lock, x, x + VPB_ONE_SHARER))
681 return (1);
682 }
683 }
684
685 /*
686 * vm_page_xunbusy_hard:
687 *
688 * Called after the first try the exclusive unbusy of a page failed.
689 * It is assumed that the waiters bit is on.
690 */
691 void
692 vm_page_xunbusy_hard(vm_page_t m)
693 {
694
695 vm_page_assert_xbusied(m);
696
697 vm_page_lock(m);
698 atomic_store_rel_int(&m->busy_lock, VPB_UNBUSIED);
699 wakeup(m);
700 vm_page_unlock(m);
701 }
702
703 /*
704 * vm_page_flash:
705 *
706 * Wakeup anyone waiting for the page.
707 * The ownership bits do not change.
708 *
709 * The given page must be locked.
710 */
711 void
712 vm_page_flash(vm_page_t m)
713 {
714 u_int x;
715
716 vm_page_lock_assert(m, MA_OWNED);
717
718 for (;;) {
719 x = m->busy_lock;
720 if ((x & VPB_BIT_WAITERS) == 0)
721 return;
722 if (atomic_cmpset_int(&m->busy_lock, x,
723 x & (~VPB_BIT_WAITERS)))
724 break;
725 }
726 wakeup(m);
727 }
728
729 /*
730 * Keep page from being freed by the page daemon
731 * much of the same effect as wiring, except much lower
732 * overhead and should be used only for *very* temporary
733 * holding ("wiring").
734 */
735 void
736 vm_page_hold(vm_page_t mem)
737 {
738
739 vm_page_lock_assert(mem, MA_OWNED);
740 mem->hold_count++;
741 }
742
743 void
744 vm_page_unhold(vm_page_t mem)
745 {
746
747 vm_page_lock_assert(mem, MA_OWNED);
748 KASSERT(mem->hold_count >= 1, ("vm_page_unhold: hold count < 0!!!"));
749 --mem->hold_count;
750 if (mem->hold_count == 0 && (mem->flags & PG_UNHOLDFREE) != 0)
751 vm_page_free_toq(mem);
752 }
753
754 /*
755 * vm_page_unhold_pages:
756 *
757 * Unhold each of the pages that is referenced by the given array.
758 */
759 void
760 vm_page_unhold_pages(vm_page_t *ma, int count)
761 {
762 struct mtx *mtx, *new_mtx;
763
764 mtx = NULL;
765 for (; count != 0; count--) {
766 /*
767 * Avoid releasing and reacquiring the same page lock.
768 */
769 new_mtx = vm_page_lockptr(*ma);
770 if (mtx != new_mtx) {
771 if (mtx != NULL)
772 mtx_unlock(mtx);
773 mtx = new_mtx;
774 mtx_lock(mtx);
775 }
776 vm_page_unhold(*ma);
777 ma++;
778 }
779 if (mtx != NULL)
780 mtx_unlock(mtx);
781 }
782
783 vm_page_t
784 PHYS_TO_VM_PAGE(vm_paddr_t pa)
785 {
786 vm_page_t m;
787
788 #ifdef VM_PHYSSEG_SPARSE
789 m = vm_phys_paddr_to_vm_page(pa);
790 if (m == NULL)
791 m = vm_phys_fictitious_to_vm_page(pa);
792 return (m);
793 #elif defined(VM_PHYSSEG_DENSE)
794 long pi;
795
796 pi = atop(pa);
797 if (pi >= first_page && (pi - first_page) < vm_page_array_size) {
798 m = &vm_page_array[pi - first_page];
799 return (m);
800 }
801 return (vm_phys_fictitious_to_vm_page(pa));
802 #else
803 #error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined."
804 #endif
805 }
806
807 /*
808 * vm_page_getfake:
809 *
810 * Create a fictitious page with the specified physical address and
811 * memory attribute. The memory attribute is the only the machine-
812 * dependent aspect of a fictitious page that must be initialized.
813 */
814 vm_page_t
815 vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr)
816 {
817 vm_page_t m;
818
819 m = uma_zalloc(fakepg_zone, M_WAITOK | M_ZERO);
820 vm_page_initfake(m, paddr, memattr);
821 return (m);
822 }
823
824 void
825 vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr)
826 {
827
828 if ((m->flags & PG_FICTITIOUS) != 0) {
829 /*
830 * The page's memattr might have changed since the
831 * previous initialization. Update the pmap to the
832 * new memattr.
833 */
834 goto memattr;
835 }
836 m->phys_addr = paddr;
837 m->queue = PQ_NONE;
838 /* Fictitious pages don't use "segind". */
839 m->flags = PG_FICTITIOUS;
840 /* Fictitious pages don't use "order" or "pool". */
841 m->oflags = VPO_UNMANAGED;
842 m->busy_lock = VPB_SINGLE_EXCLUSIVER;
843 m->wire_count = 1;
844 pmap_page_init(m);
845 memattr:
846 pmap_page_set_memattr(m, memattr);
847 }
848
849 /*
850 * vm_page_putfake:
851 *
852 * Release a fictitious page.
853 */
854 void
855 vm_page_putfake(vm_page_t m)
856 {
857
858 KASSERT((m->oflags & VPO_UNMANAGED) != 0, ("managed %p", m));
859 KASSERT((m->flags & PG_FICTITIOUS) != 0,
860 ("vm_page_putfake: bad page %p", m));
861 uma_zfree(fakepg_zone, m);
862 }
863
864 /*
865 * vm_page_updatefake:
866 *
867 * Update the given fictitious page to the specified physical address and
868 * memory attribute.
869 */
870 void
871 vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr)
872 {
873
874 KASSERT((m->flags & PG_FICTITIOUS) != 0,
875 ("vm_page_updatefake: bad page %p", m));
876 m->phys_addr = paddr;
877 pmap_page_set_memattr(m, memattr);
878 }
879
880 /*
881 * vm_page_free:
882 *
883 * Free a page.
884 */
885 void
886 vm_page_free(vm_page_t m)
887 {
888
889 m->flags &= ~PG_ZERO;
890 vm_page_free_toq(m);
891 }
892
893 /*
894 * vm_page_free_zero:
895 *
896 * Free a page to the zerod-pages queue
897 */
898 void
899 vm_page_free_zero(vm_page_t m)
900 {
901
902 m->flags |= PG_ZERO;
903 vm_page_free_toq(m);
904 }
905
906 /*
907 * Unbusy and handle the page queueing for a page from the VOP_GETPAGES()
908 * array which is not the request page.
909 */
910 void
911 vm_page_readahead_finish(vm_page_t m)
912 {
913
914 if (m->valid != 0) {
915 /*
916 * Since the page is not the requested page, whether
917 * it should be activated or deactivated is not
918 * obvious. Empirical results have shown that
919 * deactivating the page is usually the best choice,
920 * unless the page is wanted by another thread.
921 */
922 vm_page_lock(m);
923 if ((m->busy_lock & VPB_BIT_WAITERS) != 0)
924 vm_page_activate(m);
925 else
926 vm_page_deactivate(m);
927 vm_page_unlock(m);
928 vm_page_xunbusy(m);
929 } else {
930 /*
931 * Free the completely invalid page. Such page state
932 * occurs due to the short read operation which did
933 * not covered our page at all, or in case when a read
934 * error happens.
935 */
936 vm_page_lock(m);
937 vm_page_free(m);
938 vm_page_unlock(m);
939 }
940 }
941
942 /*
943 * vm_page_sleep_if_busy:
944 *
945 * Sleep and release the page queues lock if the page is busied.
946 * Returns TRUE if the thread slept.
947 *
948 * The given page must be unlocked and object containing it must
949 * be locked.
950 */
951 int
952 vm_page_sleep_if_busy(vm_page_t m, const char *msg)
953 {
954 vm_object_t obj;
955
956 vm_page_lock_assert(m, MA_NOTOWNED);
957 VM_OBJECT_ASSERT_WLOCKED(m->object);
958
959 if (vm_page_busied(m)) {
960 /*
961 * The page-specific object must be cached because page
962 * identity can change during the sleep, causing the
963 * re-lock of a different object.
964 * It is assumed that a reference to the object is already
965 * held by the callers.
966 */
967 obj = m->object;
968 vm_page_lock(m);
969 VM_OBJECT_WUNLOCK(obj);
970 vm_page_busy_sleep(m, msg, false);
971 VM_OBJECT_WLOCK(obj);
972 return (TRUE);
973 }
974 return (FALSE);
975 }
976
977 /*
978 * vm_page_dirty_KBI: [ internal use only ]
979 *
980 * Set all bits in the page's dirty field.
981 *
982 * The object containing the specified page must be locked if the
983 * call is made from the machine-independent layer.
984 *
985 * See vm_page_clear_dirty_mask().
986 *
987 * This function should only be called by vm_page_dirty().
988 */
989 void
990 vm_page_dirty_KBI(vm_page_t m)
991 {
992
993 /* These assertions refer to this operation by its public name. */
994 KASSERT((m->flags & PG_CACHED) == 0,
995 ("vm_page_dirty: page in cache!"));
996 KASSERT(!VM_PAGE_IS_FREE(m),
997 ("vm_page_dirty: page is free!"));
998 KASSERT(m->valid == VM_PAGE_BITS_ALL,
999 ("vm_page_dirty: page is invalid!"));
1000 m->dirty = VM_PAGE_BITS_ALL;
1001 }
1002
1003 /*
1004 * vm_page_insert: [ internal use only ]
1005 *
1006 * Inserts the given mem entry into the object and object list.
1007 *
1008 * The object must be locked.
1009 */
1010 int
1011 vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
1012 {
1013 vm_page_t mpred;
1014
1015 VM_OBJECT_ASSERT_WLOCKED(object);
1016 mpred = vm_radix_lookup_le(&object->rtree, pindex);
1017 return (vm_page_insert_after(m, object, pindex, mpred));
1018 }
1019
1020 /*
1021 * vm_page_insert_after:
1022 *
1023 * Inserts the page "m" into the specified object at offset "pindex".
1024 *
1025 * The page "mpred" must immediately precede the offset "pindex" within
1026 * the specified object.
1027 *
1028 * The object must be locked.
1029 */
1030 static int
1031 vm_page_insert_after(vm_page_t m, vm_object_t object, vm_pindex_t pindex,
1032 vm_page_t mpred)
1033 {
1034 vm_page_t msucc;
1035
1036 VM_OBJECT_ASSERT_WLOCKED(object);
1037 KASSERT(m->object == NULL,
1038 ("vm_page_insert_after: page already inserted"));
1039 if (mpred != NULL) {
1040 KASSERT(mpred->object == object,
1041 ("vm_page_insert_after: object doesn't contain mpred"));
1042 KASSERT(mpred->pindex < pindex,
1043 ("vm_page_insert_after: mpred doesn't precede pindex"));
1044 msucc = TAILQ_NEXT(mpred, listq);
1045 } else
1046 msucc = TAILQ_FIRST(&object->memq);
1047 if (msucc != NULL)
1048 KASSERT(msucc->pindex > pindex,
1049 ("vm_page_insert_after: msucc doesn't succeed pindex"));
1050
1051 /*
1052 * Record the object/offset pair in this page
1053 */
1054 m->object = object;
1055 m->pindex = pindex;
1056
1057 /*
1058 * Now link into the object's ordered list of backed pages.
1059 */
1060 if (vm_radix_insert(&object->rtree, m)) {
1061 m->object = NULL;
1062 m->pindex = 0;
1063 return (1);
1064 }
1065 vm_page_insert_radixdone(m, object, mpred);
1066 return (0);
1067 }
1068
1069 /*
1070 * vm_page_insert_radixdone:
1071 *
1072 * Complete page "m" insertion into the specified object after the
1073 * radix trie hooking.
1074 *
1075 * The page "mpred" must precede the offset "m->pindex" within the
1076 * specified object.
1077 *
1078 * The object must be locked.
1079 */
1080 static void
1081 vm_page_insert_radixdone(vm_page_t m, vm_object_t object, vm_page_t mpred)
1082 {
1083
1084 VM_OBJECT_ASSERT_WLOCKED(object);
1085 KASSERT(object != NULL && m->object == object,
1086 ("vm_page_insert_radixdone: page %p has inconsistent object", m));
1087 if (mpred != NULL) {
1088 KASSERT(mpred->object == object,
1089 ("vm_page_insert_after: object doesn't contain mpred"));
1090 KASSERT(mpred->pindex < m->pindex,
1091 ("vm_page_insert_after: mpred doesn't precede pindex"));
1092 }
1093
1094 if (mpred != NULL)
1095 TAILQ_INSERT_AFTER(&object->memq, mpred, m, listq);
1096 else
1097 TAILQ_INSERT_HEAD(&object->memq, m, listq);
1098
1099 /*
1100 * Show that the object has one more resident page.
1101 */
1102 object->resident_page_count++;
1103
1104 /*
1105 * Hold the vnode until the last page is released.
1106 */
1107 if (object->resident_page_count == 1 && object->type == OBJT_VNODE)
1108 vhold(object->handle);
1109
1110 /*
1111 * Since we are inserting a new and possibly dirty page,
1112 * update the object's OBJ_MIGHTBEDIRTY flag.
1113 */
1114 if (pmap_page_is_write_mapped(m))
1115 vm_object_set_writeable_dirty(object);
1116 }
1117
1118 /*
1119 * vm_page_remove:
1120 *
1121 * Removes the given mem entry from the object/offset-page
1122 * table and the object page list, but do not invalidate/terminate
1123 * the backing store.
1124 *
1125 * The object must be locked. The page must be locked if it is managed.
1126 */
1127 void
1128 vm_page_remove(vm_page_t m)
1129 {
1130 vm_object_t object;
1131 boolean_t lockacq;
1132
1133 if ((m->oflags & VPO_UNMANAGED) == 0)
1134 vm_page_lock_assert(m, MA_OWNED);
1135 if ((object = m->object) == NULL)
1136 return;
1137 VM_OBJECT_ASSERT_WLOCKED(object);
1138 if (vm_page_xbusied(m)) {
1139 lockacq = FALSE;
1140 if ((m->oflags & VPO_UNMANAGED) != 0 &&
1141 !mtx_owned(vm_page_lockptr(m))) {
1142 lockacq = TRUE;
1143 vm_page_lock(m);
1144 }
1145 vm_page_flash(m);
1146 atomic_store_rel_int(&m->busy_lock, VPB_UNBUSIED);
1147 if (lockacq)
1148 vm_page_unlock(m);
1149 }
1150
1151 /*
1152 * Now remove from the object's list of backed pages.
1153 */
1154 vm_radix_remove(&object->rtree, m->pindex);
1155 TAILQ_REMOVE(&object->memq, m, listq);
1156
1157 /*
1158 * And show that the object has one fewer resident page.
1159 */
1160 object->resident_page_count--;
1161
1162 /*
1163 * The vnode may now be recycled.
1164 */
1165 if (object->resident_page_count == 0 && object->type == OBJT_VNODE)
1166 vdrop(object->handle);
1167
1168 m->object = NULL;
1169 }
1170
1171 /*
1172 * vm_page_lookup:
1173 *
1174 * Returns the page associated with the object/offset
1175 * pair specified; if none is found, NULL is returned.
1176 *
1177 * The object must be locked.
1178 */
1179 vm_page_t
1180 vm_page_lookup(vm_object_t object, vm_pindex_t pindex)
1181 {
1182
1183 VM_OBJECT_ASSERT_LOCKED(object);
1184 return (vm_radix_lookup(&object->rtree, pindex));
1185 }
1186
1187 /*
1188 * vm_page_find_least:
1189 *
1190 * Returns the page associated with the object with least pindex
1191 * greater than or equal to the parameter pindex, or NULL.
1192 *
1193 * The object must be locked.
1194 */
1195 vm_page_t
1196 vm_page_find_least(vm_object_t object, vm_pindex_t pindex)
1197 {
1198 vm_page_t m;
1199
1200 VM_OBJECT_ASSERT_LOCKED(object);
1201 if ((m = TAILQ_FIRST(&object->memq)) != NULL && m->pindex < pindex)
1202 m = vm_radix_lookup_ge(&object->rtree, pindex);
1203 return (m);
1204 }
1205
1206 /*
1207 * Returns the given page's successor (by pindex) within the object if it is
1208 * resident; if none is found, NULL is returned.
1209 *
1210 * The object must be locked.
1211 */
1212 vm_page_t
1213 vm_page_next(vm_page_t m)
1214 {
1215 vm_page_t next;
1216
1217 VM_OBJECT_ASSERT_WLOCKED(m->object);
1218 if ((next = TAILQ_NEXT(m, listq)) != NULL) {
1219 MPASS(next->object == m->object);
1220 if (next->pindex != m->pindex + 1)
1221 next = NULL;
1222 }
1223 return (next);
1224 }
1225
1226 /*
1227 * Returns the given page's predecessor (by pindex) within the object if it is
1228 * resident; if none is found, NULL is returned.
1229 *
1230 * The object must be locked.
1231 */
1232 vm_page_t
1233 vm_page_prev(vm_page_t m)
1234 {
1235 vm_page_t prev;
1236
1237 VM_OBJECT_ASSERT_WLOCKED(m->object);
1238 if ((prev = TAILQ_PREV(m, pglist, listq)) != NULL) {
1239 MPASS(prev->object == m->object);
1240 if (prev->pindex != m->pindex - 1)
1241 prev = NULL;
1242 }
1243 return (prev);
1244 }
1245
1246 /*
1247 * Uses the page mnew as a replacement for an existing page at index
1248 * pindex which must be already present in the object.
1249 *
1250 * The existing page must not be on a paging queue.
1251 */
1252 vm_page_t
1253 vm_page_replace(vm_page_t mnew, vm_object_t object, vm_pindex_t pindex)
1254 {
1255 vm_page_t mold, mpred;
1256
1257 VM_OBJECT_ASSERT_WLOCKED(object);
1258
1259 /*
1260 * This function mostly follows vm_page_insert() and
1261 * vm_page_remove() without the radix, object count and vnode
1262 * dance. Double check such functions for more comments.
1263 */
1264 mpred = vm_radix_lookup(&object->rtree, pindex);
1265 KASSERT(mpred != NULL,
1266 ("vm_page_replace: replacing page not present with pindex"));
1267 mpred = TAILQ_PREV(mpred, respgs, listq);
1268 if (mpred != NULL)
1269 KASSERT(mpred->pindex < pindex,
1270 ("vm_page_insert_after: mpred doesn't precede pindex"));
1271
1272 mnew->object = object;
1273 mnew->pindex = pindex;
1274 mold = vm_radix_replace(&object->rtree, mnew);
1275 KASSERT(mold->queue == PQ_NONE,
1276 ("vm_page_replace: mold is on a paging queue"));
1277
1278 /* Detach the old page from the resident tailq. */
1279 TAILQ_REMOVE(&object->memq, mold, listq);
1280
1281 mold->object = NULL;
1282 vm_page_xunbusy(mold);
1283
1284 /* Insert the new page in the resident tailq. */
1285 if (mpred != NULL)
1286 TAILQ_INSERT_AFTER(&object->memq, mpred, mnew, listq);
1287 else
1288 TAILQ_INSERT_HEAD(&object->memq, mnew, listq);
1289 if (pmap_page_is_write_mapped(mnew))
1290 vm_object_set_writeable_dirty(object);
1291 return (mold);
1292 }
1293
1294 /*
1295 * vm_page_rename:
1296 *
1297 * Move the given memory entry from its
1298 * current object to the specified target object/offset.
1299 *
1300 * Note: swap associated with the page must be invalidated by the move. We
1301 * have to do this for several reasons: (1) we aren't freeing the
1302 * page, (2) we are dirtying the page, (3) the VM system is probably
1303 * moving the page from object A to B, and will then later move
1304 * the backing store from A to B and we can't have a conflict.
1305 *
1306 * Note: we *always* dirty the page. It is necessary both for the
1307 * fact that we moved it, and because we may be invalidating
1308 * swap. If the page is on the cache, we have to deactivate it
1309 * or vm_page_dirty() will panic. Dirty pages are not allowed
1310 * on the cache.
1311 *
1312 * The objects must be locked.
1313 */
1314 int
1315 vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex)
1316 {
1317 vm_page_t mpred;
1318 vm_pindex_t opidx;
1319
1320 VM_OBJECT_ASSERT_WLOCKED(new_object);
1321
1322 mpred = vm_radix_lookup_le(&new_object->rtree, new_pindex);
1323 KASSERT(mpred == NULL || mpred->pindex != new_pindex,
1324 ("vm_page_rename: pindex already renamed"));
1325
1326 /*
1327 * Create a custom version of vm_page_insert() which does not depend
1328 * by m_prev and can cheat on the implementation aspects of the
1329 * function.
1330 */
1331 opidx = m->pindex;
1332 m->pindex = new_pindex;
1333 if (vm_radix_insert(&new_object->rtree, m)) {
1334 m->pindex = opidx;
1335 return (1);
1336 }
1337
1338 /*
1339 * The operation cannot fail anymore. The removal must happen before
1340 * the listq iterator is tainted.
1341 */
1342 m->pindex = opidx;
1343 vm_page_lock(m);
1344 vm_page_remove(m);
1345
1346 /* Return back to the new pindex to complete vm_page_insert(). */
1347 m->pindex = new_pindex;
1348 m->object = new_object;
1349 vm_page_unlock(m);
1350 vm_page_insert_radixdone(m, new_object, mpred);
1351 vm_page_dirty(m);
1352 return (0);
1353 }
1354
1355 /*
1356 * Convert all of the given object's cached pages that have a
1357 * pindex within the given range into free pages. If the value
1358 * zero is given for "end", then the range's upper bound is
1359 * infinity. If the given object is backed by a vnode and it
1360 * transitions from having one or more cached pages to none, the
1361 * vnode's hold count is reduced.
1362 */
1363 void
1364 vm_page_cache_free(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
1365 {
1366 vm_page_t m;
1367 boolean_t empty;
1368
1369 mtx_lock(&vm_page_queue_free_mtx);
1370 if (__predict_false(vm_radix_is_empty(&object->cache))) {
1371 mtx_unlock(&vm_page_queue_free_mtx);
1372 return;
1373 }
1374 while ((m = vm_radix_lookup_ge(&object->cache, start)) != NULL) {
1375 if (end != 0 && m->pindex >= end)
1376 break;
1377 vm_radix_remove(&object->cache, m->pindex);
1378 vm_page_cache_turn_free(m);
1379 }
1380 empty = vm_radix_is_empty(&object->cache);
1381 mtx_unlock(&vm_page_queue_free_mtx);
1382 if (object->type == OBJT_VNODE && empty)
1383 vdrop(object->handle);
1384 }
1385
1386 /*
1387 * Returns the cached page that is associated with the given
1388 * object and offset. If, however, none exists, returns NULL.
1389 *
1390 * The free page queue must be locked.
1391 */
1392 static inline vm_page_t
1393 vm_page_cache_lookup(vm_object_t object, vm_pindex_t pindex)
1394 {
1395
1396 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1397 return (vm_radix_lookup(&object->cache, pindex));
1398 }
1399
1400 /*
1401 * Remove the given cached page from its containing object's
1402 * collection of cached pages.
1403 *
1404 * The free page queue must be locked.
1405 */
1406 static void
1407 vm_page_cache_remove(vm_page_t m)
1408 {
1409
1410 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1411 KASSERT((m->flags & PG_CACHED) != 0,
1412 ("vm_page_cache_remove: page %p is not cached", m));
1413 vm_radix_remove(&m->object->cache, m->pindex);
1414 m->object = NULL;
1415 cnt.v_cache_count--;
1416 }
1417
1418 /*
1419 * Transfer all of the cached pages with offset greater than or
1420 * equal to 'offidxstart' from the original object's cache to the
1421 * new object's cache. However, any cached pages with offset
1422 * greater than or equal to the new object's size are kept in the
1423 * original object. Initially, the new object's cache must be
1424 * empty. Offset 'offidxstart' in the original object must
1425 * correspond to offset zero in the new object.
1426 *
1427 * The new object must be locked.
1428 */
1429 void
1430 vm_page_cache_transfer(vm_object_t orig_object, vm_pindex_t offidxstart,
1431 vm_object_t new_object)
1432 {
1433 vm_page_t m;
1434
1435 /*
1436 * Insertion into an object's collection of cached pages
1437 * requires the object to be locked. In contrast, removal does
1438 * not.
1439 */
1440 VM_OBJECT_ASSERT_WLOCKED(new_object);
1441 KASSERT(vm_radix_is_empty(&new_object->cache),
1442 ("vm_page_cache_transfer: object %p has cached pages",
1443 new_object));
1444 mtx_lock(&vm_page_queue_free_mtx);
1445 while ((m = vm_radix_lookup_ge(&orig_object->cache,
1446 offidxstart)) != NULL) {
1447 /*
1448 * Transfer all of the pages with offset greater than or
1449 * equal to 'offidxstart' from the original object's
1450 * cache to the new object's cache.
1451 */
1452 if ((m->pindex - offidxstart) >= new_object->size)
1453 break;
1454 vm_radix_remove(&orig_object->cache, m->pindex);
1455 /* Update the page's object and offset. */
1456 m->object = new_object;
1457 m->pindex -= offidxstart;
1458 if (vm_radix_insert(&new_object->cache, m))
1459 vm_page_cache_turn_free(m);
1460 }
1461 mtx_unlock(&vm_page_queue_free_mtx);
1462 }
1463
1464 /*
1465 * Returns TRUE if a cached page is associated with the given object and
1466 * offset, and FALSE otherwise.
1467 *
1468 * The object must be locked.
1469 */
1470 boolean_t
1471 vm_page_is_cached(vm_object_t object, vm_pindex_t pindex)
1472 {
1473 vm_page_t m;
1474
1475 /*
1476 * Insertion into an object's collection of cached pages requires the
1477 * object to be locked. Therefore, if the object is locked and the
1478 * object's collection is empty, there is no need to acquire the free
1479 * page queues lock in order to prove that the specified page doesn't
1480 * exist.
1481 */
1482 VM_OBJECT_ASSERT_WLOCKED(object);
1483 if (__predict_true(vm_object_cache_is_empty(object)))
1484 return (FALSE);
1485 mtx_lock(&vm_page_queue_free_mtx);
1486 m = vm_page_cache_lookup(object, pindex);
1487 mtx_unlock(&vm_page_queue_free_mtx);
1488 return (m != NULL);
1489 }
1490
1491 /*
1492 * vm_page_alloc:
1493 *
1494 * Allocate and return a page that is associated with the specified
1495 * object and offset pair. By default, this page is exclusive busied.
1496 *
1497 * The caller must always specify an allocation class.
1498 *
1499 * allocation classes:
1500 * VM_ALLOC_NORMAL normal process request
1501 * VM_ALLOC_SYSTEM system *really* needs a page
1502 * VM_ALLOC_INTERRUPT interrupt time request
1503 *
1504 * optional allocation flags:
1505 * VM_ALLOC_COUNT(number) the number of additional pages that the caller
1506 * intends to allocate
1507 * VM_ALLOC_IFCACHED return page only if it is cached
1508 * VM_ALLOC_IFNOTCACHED return NULL, do not reactivate if the page
1509 * is cached
1510 * VM_ALLOC_NOBUSY do not exclusive busy the page
1511 * VM_ALLOC_NODUMP do not include the page in a kernel core dump
1512 * VM_ALLOC_NOOBJ page is not associated with an object and
1513 * should not be exclusive busy
1514 * VM_ALLOC_SBUSY shared busy the allocated page
1515 * VM_ALLOC_WIRED wire the allocated page
1516 * VM_ALLOC_ZERO prefer a zeroed page
1517 *
1518 * This routine may not sleep.
1519 */
1520 vm_page_t
1521 vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
1522 {
1523 struct vnode *vp = NULL;
1524 vm_object_t m_object;
1525 vm_page_t m, mpred;
1526 int flags, req_class;
1527
1528 mpred = 0; /* XXX: pacify gcc */
1529 KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0) &&
1530 (object != NULL || (req & VM_ALLOC_SBUSY) == 0) &&
1531 ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) !=
1532 (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)),
1533 ("vm_page_alloc: inconsistent object(%p)/req(%x)", (void *)object,
1534 req));
1535 if (object != NULL)
1536 VM_OBJECT_ASSERT_WLOCKED(object);
1537
1538 req_class = req & VM_ALLOC_CLASS_MASK;
1539
1540 /*
1541 * The page daemon is allowed to dig deeper into the free page list.
1542 */
1543 if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT)
1544 req_class = VM_ALLOC_SYSTEM;
1545
1546 if (object != NULL) {
1547 mpred = vm_radix_lookup_le(&object->rtree, pindex);
1548 KASSERT(mpred == NULL || mpred->pindex != pindex,
1549 ("vm_page_alloc: pindex already allocated"));
1550 }
1551
1552 /*
1553 * The page allocation request can came from consumers which already
1554 * hold the free page queue mutex, like vm_page_insert() in
1555 * vm_page_cache().
1556 */
1557 mtx_lock_flags(&vm_page_queue_free_mtx, MTX_RECURSE);
1558 if (cnt.v_free_count + cnt.v_cache_count > cnt.v_free_reserved ||
1559 (req_class == VM_ALLOC_SYSTEM &&
1560 cnt.v_free_count + cnt.v_cache_count > cnt.v_interrupt_free_min) ||
1561 (req_class == VM_ALLOC_INTERRUPT &&
1562 cnt.v_free_count + cnt.v_cache_count > 0)) {
1563 /*
1564 * Allocate from the free queue if the number of free pages
1565 * exceeds the minimum for the request class.
1566 */
1567 if (object != NULL &&
1568 (m = vm_page_cache_lookup(object, pindex)) != NULL) {
1569 if ((req & VM_ALLOC_IFNOTCACHED) != 0) {
1570 mtx_unlock(&vm_page_queue_free_mtx);
1571 return (NULL);
1572 }
1573 if (vm_phys_unfree_page(m))
1574 vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m, 0);
1575 #if VM_NRESERVLEVEL > 0
1576 else if (!vm_reserv_reactivate_page(m))
1577 #else
1578 else
1579 #endif
1580 panic("vm_page_alloc: cache page %p is missing"
1581 " from the free queue", m);
1582 } else if ((req & VM_ALLOC_IFCACHED) != 0) {
1583 mtx_unlock(&vm_page_queue_free_mtx);
1584 return (NULL);
1585 #if VM_NRESERVLEVEL > 0
1586 } else if (object == NULL || (object->flags & (OBJ_COLORED |
1587 OBJ_FICTITIOUS)) != OBJ_COLORED || (m =
1588 vm_reserv_alloc_page(object, pindex, mpred)) == NULL) {
1589 #else
1590 } else {
1591 #endif
1592 m = vm_phys_alloc_pages(object != NULL ?
1593 VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT, 0);
1594 #if VM_NRESERVLEVEL > 0
1595 if (m == NULL && vm_reserv_reclaim_inactive()) {
1596 m = vm_phys_alloc_pages(object != NULL ?
1597 VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT,
1598 0);
1599 }
1600 #endif
1601 }
1602 } else {
1603 /*
1604 * Not allocatable, give up.
1605 */
1606 mtx_unlock(&vm_page_queue_free_mtx);
1607 atomic_add_int(&vm_pageout_deficit,
1608 max((u_int)req >> VM_ALLOC_COUNT_SHIFT, 1));
1609 pagedaemon_wakeup();
1610 return (NULL);
1611 }
1612
1613 /*
1614 * At this point we had better have found a good page.
1615 */
1616 KASSERT(m != NULL, ("vm_page_alloc: missing page"));
1617 KASSERT(m->queue == PQ_NONE,
1618 ("vm_page_alloc: page %p has unexpected queue %d", m, m->queue));
1619 KASSERT(m->wire_count == 0, ("vm_page_alloc: page %p is wired", m));
1620 KASSERT(m->hold_count == 0, ("vm_page_alloc: page %p is held", m));
1621 KASSERT(!vm_page_busied(m), ("vm_page_alloc: page %p is busy", m));
1622 KASSERT(m->dirty == 0, ("vm_page_alloc: page %p is dirty", m));
1623 KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT,
1624 ("vm_page_alloc: page %p has unexpected memattr %d", m,
1625 pmap_page_get_memattr(m)));
1626 if ((m->flags & PG_CACHED) != 0) {
1627 KASSERT((m->flags & PG_ZERO) == 0,
1628 ("vm_page_alloc: cached page %p is PG_ZERO", m));
1629 KASSERT(m->valid != 0,
1630 ("vm_page_alloc: cached page %p is invalid", m));
1631 if (m->object == object && m->pindex == pindex)
1632 cnt.v_reactivated++;
1633 else
1634 m->valid = 0;
1635 m_object = m->object;
1636 vm_page_cache_remove(m);
1637 if (m_object->type == OBJT_VNODE &&
1638 vm_object_cache_is_empty(m_object))
1639 vp = m_object->handle;
1640 } else {
1641 KASSERT(VM_PAGE_IS_FREE(m),
1642 ("vm_page_alloc: page %p is not free", m));
1643 KASSERT(m->valid == 0,
1644 ("vm_page_alloc: free page %p is valid", m));
1645 vm_phys_freecnt_adj(m, -1);
1646 }
1647
1648 /*
1649 * Only the PG_ZERO flag is inherited. The PG_CACHED or PG_FREE flag
1650 * must be cleared before the free page queues lock is released.
1651 */
1652 flags = 0;
1653 if (m->flags & PG_ZERO) {
1654 vm_page_zero_count--;
1655 if (req & VM_ALLOC_ZERO)
1656 flags = PG_ZERO;
1657 }
1658 if (req & VM_ALLOC_NODUMP)
1659 flags |= PG_NODUMP;
1660 m->flags = flags;
1661 mtx_unlock(&vm_page_queue_free_mtx);
1662 m->aflags = 0;
1663 m->oflags = object == NULL || (object->flags & OBJ_UNMANAGED) != 0 ?
1664 VPO_UNMANAGED : 0;
1665 m->busy_lock = VPB_UNBUSIED;
1666 if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ | VM_ALLOC_SBUSY)) == 0)
1667 m->busy_lock = VPB_SINGLE_EXCLUSIVER;
1668 if ((req & VM_ALLOC_SBUSY) != 0)
1669 m->busy_lock = VPB_SHARERS_WORD(1);
1670 if (req & VM_ALLOC_WIRED) {
1671 /*
1672 * The page lock is not required for wiring a page until that
1673 * page is inserted into the object.
1674 */
1675 atomic_add_int(&cnt.v_wire_count, 1);
1676 m->wire_count = 1;
1677 }
1678 m->act_count = 0;
1679
1680 if (object != NULL) {
1681 if (vm_page_insert_after(m, object, pindex, mpred)) {
1682 /* See the comment below about hold count. */
1683 if (vp != NULL)
1684 vdrop(vp);
1685 pagedaemon_wakeup();
1686 if (req & VM_ALLOC_WIRED) {
1687 atomic_subtract_int(&cnt.v_wire_count, 1);
1688 m->wire_count = 0;
1689 }
1690 m->object = NULL;
1691 m->oflags = VPO_UNMANAGED;
1692 m->busy_lock = VPB_UNBUSIED;
1693 vm_page_free(m);
1694 return (NULL);
1695 }
1696
1697 /* Ignore device objects; the pager sets "memattr" for them. */
1698 if (object->memattr != VM_MEMATTR_DEFAULT &&
1699 (object->flags & OBJ_FICTITIOUS) == 0)
1700 pmap_page_set_memattr(m, object->memattr);
1701 } else
1702 m->pindex = pindex;
1703
1704 /*
1705 * The following call to vdrop() must come after the above call
1706 * to vm_page_insert() in case both affect the same object and
1707 * vnode. Otherwise, the affected vnode's hold count could
1708 * temporarily become zero.
1709 */
1710 if (vp != NULL)
1711 vdrop(vp);
1712
1713 /*
1714 * Don't wakeup too often - wakeup the pageout daemon when
1715 * we would be nearly out of memory.
1716 */
1717 if (vm_paging_needed())
1718 pagedaemon_wakeup();
1719
1720 return (m);
1721 }
1722
1723 static void
1724 vm_page_alloc_contig_vdrop(struct spglist *lst)
1725 {
1726
1727 while (!SLIST_EMPTY(lst)) {
1728 vdrop((struct vnode *)SLIST_FIRST(lst)-> plinks.s.pv);
1729 SLIST_REMOVE_HEAD(lst, plinks.s.ss);
1730 }
1731 }
1732
1733 /*
1734 * vm_page_alloc_contig:
1735 *
1736 * Allocate a contiguous set of physical pages of the given size "npages"
1737 * from the free lists. All of the physical pages must be at or above
1738 * the given physical address "low" and below the given physical address
1739 * "high". The given value "alignment" determines the alignment of the
1740 * first physical page in the set. If the given value "boundary" is
1741 * non-zero, then the set of physical pages cannot cross any physical
1742 * address boundary that is a multiple of that value. Both "alignment"
1743 * and "boundary" must be a power of two.
1744 *
1745 * If the specified memory attribute, "memattr", is VM_MEMATTR_DEFAULT,
1746 * then the memory attribute setting for the physical pages is configured
1747 * to the object's memory attribute setting. Otherwise, the memory
1748 * attribute setting for the physical pages is configured to "memattr",
1749 * overriding the object's memory attribute setting. However, if the
1750 * object's memory attribute setting is not VM_MEMATTR_DEFAULT, then the
1751 * memory attribute setting for the physical pages cannot be configured
1752 * to VM_MEMATTR_DEFAULT.
1753 *
1754 * The caller must always specify an allocation class.
1755 *
1756 * allocation classes:
1757 * VM_ALLOC_NORMAL normal process request
1758 * VM_ALLOC_SYSTEM system *really* needs a page
1759 * VM_ALLOC_INTERRUPT interrupt time request
1760 *
1761 * optional allocation flags:
1762 * VM_ALLOC_NOBUSY do not exclusive busy the page
1763 * VM_ALLOC_NODUMP do not include the page in a kernel core dump
1764 * VM_ALLOC_NOOBJ page is not associated with an object and
1765 * should not be exclusive busy
1766 * VM_ALLOC_SBUSY shared busy the allocated page
1767 * VM_ALLOC_WIRED wire the allocated page
1768 * VM_ALLOC_ZERO prefer a zeroed page
1769 *
1770 * This routine may not sleep.
1771 */
1772 vm_page_t
1773 vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
1774 u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
1775 vm_paddr_t boundary, vm_memattr_t memattr)
1776 {
1777 struct vnode *drop;
1778 struct spglist deferred_vdrop_list;
1779 vm_page_t m, m_tmp, m_ret;
1780 u_int flags, oflags;
1781 int req_class;
1782
1783 KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0) &&
1784 (object != NULL || (req & VM_ALLOC_SBUSY) == 0) &&
1785 ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) !=
1786 (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)),
1787 ("vm_page_alloc: inconsistent object(%p)/req(%x)", (void *)object,
1788 req));
1789 if (object != NULL) {
1790 VM_OBJECT_ASSERT_WLOCKED(object);
1791 KASSERT(object->type == OBJT_PHYS,
1792 ("vm_page_alloc_contig: object %p isn't OBJT_PHYS",
1793 object));
1794 }
1795 KASSERT(npages > 0, ("vm_page_alloc_contig: npages is zero"));
1796 req_class = req & VM_ALLOC_CLASS_MASK;
1797
1798 /*
1799 * The page daemon is allowed to dig deeper into the free page list.
1800 */
1801 if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT)
1802 req_class = VM_ALLOC_SYSTEM;
1803
1804 SLIST_INIT(&deferred_vdrop_list);
1805 mtx_lock(&vm_page_queue_free_mtx);
1806 if (cnt.v_free_count + cnt.v_cache_count >= npages +
1807 cnt.v_free_reserved || (req_class == VM_ALLOC_SYSTEM &&
1808 cnt.v_free_count + cnt.v_cache_count >= npages +
1809 cnt.v_interrupt_free_min) || (req_class == VM_ALLOC_INTERRUPT &&
1810 cnt.v_free_count + cnt.v_cache_count >= npages)) {
1811 #if VM_NRESERVLEVEL > 0
1812 retry:
1813 if (object == NULL || (object->flags & OBJ_COLORED) == 0 ||
1814 (m_ret = vm_reserv_alloc_contig(object, pindex, npages,
1815 low, high, alignment, boundary)) == NULL)
1816 #endif
1817 m_ret = vm_phys_alloc_contig(npages, low, high,
1818 alignment, boundary);
1819 } else {
1820 mtx_unlock(&vm_page_queue_free_mtx);
1821 atomic_add_int(&vm_pageout_deficit, npages);
1822 pagedaemon_wakeup();
1823 return (NULL);
1824 }
1825 if (m_ret != NULL)
1826 for (m = m_ret; m < &m_ret[npages]; m++) {
1827 drop = vm_page_alloc_init(m);
1828 if (drop != NULL) {
1829 /*
1830 * Enqueue the vnode for deferred vdrop().
1831 */
1832 m->plinks.s.pv = drop;
1833 SLIST_INSERT_HEAD(&deferred_vdrop_list, m,
1834 plinks.s.ss);
1835 }
1836 }
1837 else {
1838 #if VM_NRESERVLEVEL > 0
1839 if (vm_reserv_reclaim_contig(npages, low, high, alignment,
1840 boundary))
1841 goto retry;
1842 #endif
1843 }
1844 mtx_unlock(&vm_page_queue_free_mtx);
1845 if (m_ret == NULL)
1846 return (NULL);
1847
1848 /*
1849 * Initialize the pages. Only the PG_ZERO flag is inherited.
1850 */
1851 flags = 0;
1852 if ((req & VM_ALLOC_ZERO) != 0)
1853 flags = PG_ZERO;
1854 if ((req & VM_ALLOC_NODUMP) != 0)
1855 flags |= PG_NODUMP;
1856 if ((req & VM_ALLOC_WIRED) != 0)
1857 atomic_add_int(&cnt.v_wire_count, npages);
1858 oflags = VPO_UNMANAGED;
1859 if (object != NULL) {
1860 if (object->memattr != VM_MEMATTR_DEFAULT &&
1861 memattr == VM_MEMATTR_DEFAULT)
1862 memattr = object->memattr;
1863 }
1864 for (m = m_ret; m < &m_ret[npages]; m++) {
1865 m->aflags = 0;
1866 m->flags = (m->flags | PG_NODUMP) & flags;
1867 m->busy_lock = VPB_UNBUSIED;
1868 if (object != NULL) {
1869 if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) == 0)
1870 m->busy_lock = VPB_SINGLE_EXCLUSIVER;
1871 if ((req & VM_ALLOC_SBUSY) != 0)
1872 m->busy_lock = VPB_SHARERS_WORD(1);
1873 }
1874 if ((req & VM_ALLOC_WIRED) != 0)
1875 m->wire_count = 1;
1876 /* Unmanaged pages don't use "act_count". */
1877 m->oflags = oflags;
1878 if (object != NULL) {
1879 if (vm_page_insert(m, object, pindex)) {
1880 vm_page_alloc_contig_vdrop(
1881 &deferred_vdrop_list);
1882 if (vm_paging_needed())
1883 pagedaemon_wakeup();
1884 if ((req & VM_ALLOC_WIRED) != 0)
1885 atomic_subtract_int(&cnt.v_wire_count,
1886 npages);
1887 for (m_tmp = m, m = m_ret;
1888 m < &m_ret[npages]; m++) {
1889 if ((req & VM_ALLOC_WIRED) != 0)
1890 m->wire_count = 0;
1891 if (m >= m_tmp) {
1892 m->object = NULL;
1893 m->oflags |= VPO_UNMANAGED;
1894 }
1895 m->busy_lock = VPB_UNBUSIED;
1896 vm_page_free(m);
1897 }
1898 return (NULL);
1899 }
1900 } else
1901 m->pindex = pindex;
1902 if (memattr != VM_MEMATTR_DEFAULT)
1903 pmap_page_set_memattr(m, memattr);
1904 pindex++;
1905 }
1906 vm_page_alloc_contig_vdrop(&deferred_vdrop_list);
1907 if (vm_paging_needed())
1908 pagedaemon_wakeup();
1909 return (m_ret);
1910 }
1911
1912 /*
1913 * Initialize a page that has been freshly dequeued from a freelist.
1914 * The caller has to drop the vnode returned, if it is not NULL.
1915 *
1916 * This function may only be used to initialize unmanaged pages.
1917 *
1918 * To be called with vm_page_queue_free_mtx held.
1919 */
1920 static struct vnode *
1921 vm_page_alloc_init(vm_page_t m)
1922 {
1923 struct vnode *drop;
1924 vm_object_t m_object;
1925
1926 KASSERT(m->queue == PQ_NONE,
1927 ("vm_page_alloc_init: page %p has unexpected queue %d",
1928 m, m->queue));
1929 KASSERT(m->wire_count == 0,
1930 ("vm_page_alloc_init: page %p is wired", m));
1931 KASSERT(m->hold_count == 0,
1932 ("vm_page_alloc_init: page %p is held", m));
1933 KASSERT(!vm_page_busied(m),
1934 ("vm_page_alloc_init: page %p is busy", m));
1935 KASSERT(m->dirty == 0,
1936 ("vm_page_alloc_init: page %p is dirty", m));
1937 KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT,
1938 ("vm_page_alloc_init: page %p has unexpected memattr %d",
1939 m, pmap_page_get_memattr(m)));
1940 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1941 drop = NULL;
1942 if ((m->flags & PG_CACHED) != 0) {
1943 KASSERT((m->flags & PG_ZERO) == 0,
1944 ("vm_page_alloc_init: cached page %p is PG_ZERO", m));
1945 m->valid = 0;
1946 m_object = m->object;
1947 vm_page_cache_remove(m);
1948 if (m_object->type == OBJT_VNODE &&
1949 vm_object_cache_is_empty(m_object))
1950 drop = m_object->handle;
1951 } else {
1952 KASSERT(VM_PAGE_IS_FREE(m),
1953 ("vm_page_alloc_init: page %p is not free", m));
1954 KASSERT(m->valid == 0,
1955 ("vm_page_alloc_init: free page %p is valid", m));
1956 vm_phys_freecnt_adj(m, -1);
1957 if ((m->flags & PG_ZERO) != 0)
1958 vm_page_zero_count--;
1959 }
1960 /* Don't clear the PG_ZERO flag; we'll need it later. */
1961 m->flags &= PG_ZERO;
1962 return (drop);
1963 }
1964
1965 /*
1966 * vm_page_alloc_freelist:
1967 *
1968 * Allocate a physical page from the specified free page list.
1969 *
1970 * The caller must always specify an allocation class.
1971 *
1972 * allocation classes:
1973 * VM_ALLOC_NORMAL normal process request
1974 * VM_ALLOC_SYSTEM system *really* needs a page
1975 * VM_ALLOC_INTERRUPT interrupt time request
1976 *
1977 * optional allocation flags:
1978 * VM_ALLOC_COUNT(number) the number of additional pages that the caller
1979 * intends to allocate
1980 * VM_ALLOC_WIRED wire the allocated page
1981 * VM_ALLOC_ZERO prefer a zeroed page
1982 *
1983 * This routine may not sleep.
1984 */
1985 vm_page_t
1986 vm_page_alloc_freelist(int flind, int req)
1987 {
1988 struct vnode *drop;
1989 vm_page_t m;
1990 u_int flags;
1991 int req_class;
1992
1993 req_class = req & VM_ALLOC_CLASS_MASK;
1994
1995 /*
1996 * The page daemon is allowed to dig deeper into the free page list.
1997 */
1998 if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT)
1999 req_class = VM_ALLOC_SYSTEM;
2000
2001 /*
2002 * Do not allocate reserved pages unless the req has asked for it.
2003 */
2004 mtx_lock_flags(&vm_page_queue_free_mtx, MTX_RECURSE);
2005 if (cnt.v_free_count + cnt.v_cache_count > cnt.v_free_reserved ||
2006 (req_class == VM_ALLOC_SYSTEM &&
2007 cnt.v_free_count + cnt.v_cache_count > cnt.v_interrupt_free_min) ||
2008 (req_class == VM_ALLOC_INTERRUPT &&
2009 cnt.v_free_count + cnt.v_cache_count > 0))
2010 m = vm_phys_alloc_freelist_pages(flind, VM_FREEPOOL_DIRECT, 0);
2011 else {
2012 mtx_unlock(&vm_page_queue_free_mtx);
2013 atomic_add_int(&vm_pageout_deficit,
2014 max((u_int)req >> VM_ALLOC_COUNT_SHIFT, 1));
2015 pagedaemon_wakeup();
2016 return (NULL);
2017 }
2018 if (m == NULL) {
2019 mtx_unlock(&vm_page_queue_free_mtx);
2020 return (NULL);
2021 }
2022 drop = vm_page_alloc_init(m);
2023 mtx_unlock(&vm_page_queue_free_mtx);
2024
2025 /*
2026 * Initialize the page. Only the PG_ZERO flag is inherited.
2027 */
2028 m->aflags = 0;
2029 flags = 0;
2030 if ((req & VM_ALLOC_ZERO) != 0)
2031 flags = PG_ZERO;
2032 m->flags &= flags;
2033 if ((req & VM_ALLOC_WIRED) != 0) {
2034 /*
2035 * The page lock is not required for wiring a page that does
2036 * not belong to an object.
2037 */
2038 atomic_add_int(&cnt.v_wire_count, 1);
2039 m->wire_count = 1;
2040 }
2041 /* Unmanaged pages don't use "act_count". */
2042 m->oflags = VPO_UNMANAGED;
2043 if (drop != NULL)
2044 vdrop(drop);
2045 if (vm_paging_needed())
2046 pagedaemon_wakeup();
2047 return (m);
2048 }
2049
2050 /*
2051 * vm_wait: (also see VM_WAIT macro)
2052 *
2053 * Sleep until free pages are available for allocation.
2054 * - Called in various places before memory allocations.
2055 */
2056 void
2057 vm_wait(void)
2058 {
2059
2060 mtx_lock(&vm_page_queue_free_mtx);
2061 if (curproc == pageproc) {
2062 vm_pageout_pages_needed = 1;
2063 msleep(&vm_pageout_pages_needed, &vm_page_queue_free_mtx,
2064 PDROP | PSWP, "VMWait", 0);
2065 } else {
2066 if (!vm_pages_needed) {
2067 vm_pages_needed = 1;
2068 wakeup(&vm_pages_needed);
2069 }
2070 msleep(&cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PVM,
2071 "vmwait", 0);
2072 }
2073 }
2074
2075 /*
2076 * vm_waitpfault: (also see VM_WAITPFAULT macro)
2077 *
2078 * Sleep until free pages are available for allocation.
2079 * - Called only in vm_fault so that processes page faulting
2080 * can be easily tracked.
2081 * - Sleeps at a lower priority than vm_wait() so that vm_wait()ing
2082 * processes will be able to grab memory first. Do not change
2083 * this balance without careful testing first.
2084 */
2085 void
2086 vm_waitpfault(void)
2087 {
2088
2089 mtx_lock(&vm_page_queue_free_mtx);
2090 if (!vm_pages_needed) {
2091 vm_pages_needed = 1;
2092 wakeup(&vm_pages_needed);
2093 }
2094 msleep(&cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PUSER,
2095 "pfault", 0);
2096 }
2097
2098 struct vm_pagequeue *
2099 vm_page_pagequeue(vm_page_t m)
2100 {
2101
2102 return (&vm_phys_domain(m)->vmd_pagequeues[m->queue]);
2103 }
2104
2105 /*
2106 * vm_page_dequeue:
2107 *
2108 * Remove the given page from its current page queue.
2109 *
2110 * The page must be locked.
2111 */
2112 void
2113 vm_page_dequeue(vm_page_t m)
2114 {
2115 struct vm_pagequeue *pq;
2116
2117 vm_page_lock_assert(m, MA_OWNED);
2118 KASSERT(m->queue != PQ_NONE,
2119 ("vm_page_dequeue: page %p is not queued", m));
2120 pq = vm_page_pagequeue(m);
2121 vm_pagequeue_lock(pq);
2122 m->queue = PQ_NONE;
2123 TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
2124 vm_pagequeue_cnt_dec(pq);
2125 vm_pagequeue_unlock(pq);
2126 }
2127
2128 /*
2129 * vm_page_dequeue_locked:
2130 *
2131 * Remove the given page from its current page queue.
2132 *
2133 * The page and page queue must be locked.
2134 */
2135 void
2136 vm_page_dequeue_locked(vm_page_t m)
2137 {
2138 struct vm_pagequeue *pq;
2139
2140 vm_page_lock_assert(m, MA_OWNED);
2141 pq = vm_page_pagequeue(m);
2142 vm_pagequeue_assert_locked(pq);
2143 m->queue = PQ_NONE;
2144 TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
2145 vm_pagequeue_cnt_dec(pq);
2146 }
2147
2148 /*
2149 * vm_page_enqueue:
2150 *
2151 * Add the given page to the specified page queue.
2152 *
2153 * The page must be locked.
2154 */
2155 static void
2156 vm_page_enqueue(int queue, vm_page_t m)
2157 {
2158 struct vm_pagequeue *pq;
2159
2160 vm_page_lock_assert(m, MA_OWNED);
2161 pq = &vm_phys_domain(m)->vmd_pagequeues[queue];
2162 vm_pagequeue_lock(pq);
2163 m->queue = queue;
2164 TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q);
2165 vm_pagequeue_cnt_inc(pq);
2166 vm_pagequeue_unlock(pq);
2167 }
2168
2169 /*
2170 * vm_page_requeue:
2171 *
2172 * Move the given page to the tail of its current page queue.
2173 *
2174 * The page must be locked.
2175 */
2176 void
2177 vm_page_requeue(vm_page_t m)
2178 {
2179 struct vm_pagequeue *pq;
2180
2181 vm_page_lock_assert(m, MA_OWNED);
2182 KASSERT(m->queue != PQ_NONE,
2183 ("vm_page_requeue: page %p is not queued", m));
2184 pq = vm_page_pagequeue(m);
2185 vm_pagequeue_lock(pq);
2186 TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
2187 TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q);
2188 vm_pagequeue_unlock(pq);
2189 }
2190
2191 /*
2192 * vm_page_requeue_locked:
2193 *
2194 * Move the given page to the tail of its current page queue.
2195 *
2196 * The page queue must be locked.
2197 */
2198 void
2199 vm_page_requeue_locked(vm_page_t m)
2200 {
2201 struct vm_pagequeue *pq;
2202
2203 KASSERT(m->queue != PQ_NONE,
2204 ("vm_page_requeue_locked: page %p is not queued", m));
2205 pq = vm_page_pagequeue(m);
2206 vm_pagequeue_assert_locked(pq);
2207 TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
2208 TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q);
2209 }
2210
2211 /*
2212 * vm_page_activate:
2213 *
2214 * Put the specified page on the active list (if appropriate).
2215 * Ensure that act_count is at least ACT_INIT but do not otherwise
2216 * mess with it.
2217 *
2218 * The page must be locked.
2219 */
2220 void
2221 vm_page_activate(vm_page_t m)
2222 {
2223 int queue;
2224
2225 vm_page_lock_assert(m, MA_OWNED);
2226 if ((queue = m->queue) != PQ_ACTIVE) {
2227 if (m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0) {
2228 if (m->act_count < ACT_INIT)
2229 m->act_count = ACT_INIT;
2230 if (queue != PQ_NONE)
2231 vm_page_dequeue(m);
2232 vm_page_enqueue(PQ_ACTIVE, m);
2233 } else
2234 KASSERT(queue == PQ_NONE,
2235 ("vm_page_activate: wired page %p is queued", m));
2236 } else {
2237 if (m->act_count < ACT_INIT)
2238 m->act_count = ACT_INIT;
2239 }
2240 }
2241
2242 /*
2243 * vm_page_free_wakeup:
2244 *
2245 * Helper routine for vm_page_free_toq() and vm_page_cache(). This
2246 * routine is called when a page has been added to the cache or free
2247 * queues.
2248 *
2249 * The page queues must be locked.
2250 */
2251 static inline void
2252 vm_page_free_wakeup(void)
2253 {
2254
2255 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
2256 /*
2257 * if pageout daemon needs pages, then tell it that there are
2258 * some free.
2259 */
2260 if (vm_pageout_pages_needed &&
2261 cnt.v_cache_count + cnt.v_free_count >= cnt.v_pageout_free_min) {
2262 wakeup(&vm_pageout_pages_needed);
2263 vm_pageout_pages_needed = 0;
2264 }
2265 /*
2266 * wakeup processes that are waiting on memory if we hit a
2267 * high water mark. And wakeup scheduler process if we have
2268 * lots of memory. this process will swapin processes.
2269 */
2270 if (vm_pages_needed && !vm_page_count_min()) {
2271 vm_pages_needed = 0;
2272 wakeup(&cnt.v_free_count);
2273 }
2274 }
2275
2276 /*
2277 * Turn a cached page into a free page, by changing its attributes.
2278 * Keep the statistics up-to-date.
2279 *
2280 * The free page queue must be locked.
2281 */
2282 static void
2283 vm_page_cache_turn_free(vm_page_t m)
2284 {
2285
2286 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
2287
2288 m->object = NULL;
2289 m->valid = 0;
2290 /* Clear PG_CACHED and set PG_FREE. */
2291 m->flags ^= PG_CACHED | PG_FREE;
2292 KASSERT((m->flags & (PG_CACHED | PG_FREE)) == PG_FREE,
2293 ("vm_page_cache_free: page %p has inconsistent flags", m));
2294 cnt.v_cache_count--;
2295 vm_phys_freecnt_adj(m, 1);
2296 }
2297
2298 /*
2299 * vm_page_free_toq:
2300 *
2301 * Returns the given page to the free list,
2302 * disassociating it with any VM object.
2303 *
2304 * The object must be locked. The page must be locked if it is managed.
2305 */
2306 void
2307 vm_page_free_toq(vm_page_t m)
2308 {
2309
2310 if ((m->oflags & VPO_UNMANAGED) == 0) {
2311 vm_page_lock_assert(m, MA_OWNED);
2312 KASSERT(!pmap_page_is_mapped(m),
2313 ("vm_page_free_toq: freeing mapped page %p", m));
2314 } else
2315 KASSERT(m->queue == PQ_NONE,
2316 ("vm_page_free_toq: unmanaged page %p is queued", m));
2317 PCPU_INC(cnt.v_tfree);
2318
2319 if (VM_PAGE_IS_FREE(m))
2320 panic("vm_page_free: freeing free page %p", m);
2321 else if (vm_page_sbusied(m))
2322 panic("vm_page_free: freeing busy page %p", m);
2323
2324 /*
2325 * Unqueue, then remove page. Note that we cannot destroy
2326 * the page here because we do not want to call the pager's
2327 * callback routine until after we've put the page on the
2328 * appropriate free queue.
2329 */
2330 vm_page_remque(m);
2331 vm_page_remove(m);
2332
2333 /*
2334 * If fictitious remove object association and
2335 * return, otherwise delay object association removal.
2336 */
2337 if ((m->flags & PG_FICTITIOUS) != 0) {
2338 return;
2339 }
2340
2341 m->valid = 0;
2342 vm_page_undirty(m);
2343
2344 if (m->wire_count != 0)
2345 panic("vm_page_free: freeing wired page %p", m);
2346 if (m->hold_count != 0) {
2347 m->flags &= ~PG_ZERO;
2348 KASSERT((m->flags & PG_UNHOLDFREE) == 0,
2349 ("vm_page_free: freeing PG_UNHOLDFREE page %p", m));
2350 m->flags |= PG_UNHOLDFREE;
2351 } else {
2352 /*
2353 * Restore the default memory attribute to the page.
2354 */
2355 if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT)
2356 pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT);
2357
2358 /*
2359 * Insert the page into the physical memory allocator's
2360 * cache/free page queues.
2361 */
2362 mtx_lock(&vm_page_queue_free_mtx);
2363 m->flags |= PG_FREE;
2364 vm_phys_freecnt_adj(m, 1);
2365 #if VM_NRESERVLEVEL > 0
2366 if (!vm_reserv_free_page(m))
2367 #else
2368 if (TRUE)
2369 #endif
2370 vm_phys_free_pages(m, 0);
2371 if ((m->flags & PG_ZERO) != 0)
2372 ++vm_page_zero_count;
2373 else
2374 vm_page_zero_idle_wakeup();
2375 vm_page_free_wakeup();
2376 mtx_unlock(&vm_page_queue_free_mtx);
2377 }
2378 }
2379
2380 /*
2381 * vm_page_wire:
2382 *
2383 * Mark this page as wired down by yet
2384 * another map, removing it from paging queues
2385 * as necessary.
2386 *
2387 * If the page is fictitious, then its wire count must remain one.
2388 *
2389 * The page must be locked.
2390 */
2391 void
2392 vm_page_wire(vm_page_t m)
2393 {
2394
2395 /*
2396 * Only bump the wire statistics if the page is not already wired,
2397 * and only unqueue the page if it is on some queue (if it is unmanaged
2398 * it is already off the queues).
2399 */
2400 vm_page_lock_assert(m, MA_OWNED);
2401 if ((m->flags & PG_FICTITIOUS) != 0) {
2402 KASSERT(m->wire_count == 1,
2403 ("vm_page_wire: fictitious page %p's wire count isn't one",
2404 m));
2405 return;
2406 }
2407 if (m->wire_count == 0) {
2408 KASSERT((m->oflags & VPO_UNMANAGED) == 0 ||
2409 m->queue == PQ_NONE,
2410 ("vm_page_wire: unmanaged page %p is queued", m));
2411 vm_page_remque(m);
2412 atomic_add_int(&cnt.v_wire_count, 1);
2413 }
2414 m->wire_count++;
2415 KASSERT(m->wire_count != 0, ("vm_page_wire: wire_count overflow m=%p", m));
2416 }
2417
2418 /*
2419 * vm_page_unwire:
2420 *
2421 * Release one wiring of the specified page, potentially enabling it to be
2422 * paged again. If paging is enabled, then the value of the parameter
2423 * "activate" determines to which queue the page is added. If "activate" is
2424 * non-zero, then the page is added to the active queue. Otherwise, it is
2425 * added to the inactive queue.
2426 *
2427 * However, unless the page belongs to an object, it is not enqueued because
2428 * it cannot be paged out.
2429 *
2430 * If a page is fictitious, then its wire count must always be one.
2431 *
2432 * A managed page must be locked.
2433 */
2434 void
2435 vm_page_unwire(vm_page_t m, int activate)
2436 {
2437
2438 if ((m->oflags & VPO_UNMANAGED) == 0)
2439 vm_page_lock_assert(m, MA_OWNED);
2440 if ((m->flags & PG_FICTITIOUS) != 0) {
2441 KASSERT(m->wire_count == 1,
2442 ("vm_page_unwire: fictitious page %p's wire count isn't one", m));
2443 return;
2444 }
2445 if (m->wire_count > 0) {
2446 m->wire_count--;
2447 if (m->wire_count == 0) {
2448 atomic_subtract_int(&cnt.v_wire_count, 1);
2449 if ((m->oflags & VPO_UNMANAGED) != 0 ||
2450 m->object == NULL)
2451 return;
2452 if (!activate)
2453 m->flags &= ~PG_WINATCFLS;
2454 vm_page_enqueue(activate ? PQ_ACTIVE : PQ_INACTIVE, m);
2455 }
2456 } else
2457 panic("vm_page_unwire: page %p's wire count is zero", m);
2458 }
2459
2460 /*
2461 * Move the specified page to the inactive queue.
2462 *
2463 * Many pages placed on the inactive queue should actually go
2464 * into the cache, but it is difficult to figure out which. What
2465 * we do instead, if the inactive target is well met, is to put
2466 * clean pages at the head of the inactive queue instead of the tail.
2467 * This will cause them to be moved to the cache more quickly and
2468 * if not actively re-referenced, reclaimed more quickly. If we just
2469 * stick these pages at the end of the inactive queue, heavy filesystem
2470 * meta-data accesses can cause an unnecessary paging load on memory bound
2471 * processes. This optimization causes one-time-use metadata to be
2472 * reused more quickly.
2473 *
2474 * Normally athead is 0 resulting in LRU operation. athead is set
2475 * to 1 if we want this page to be 'as if it were placed in the cache',
2476 * except without unmapping it from the process address space.
2477 *
2478 * The page must be locked.
2479 */
2480 static inline void
2481 _vm_page_deactivate(vm_page_t m, int athead)
2482 {
2483 struct vm_pagequeue *pq;
2484 int queue;
2485
2486 vm_page_assert_locked(m);
2487
2488 /*
2489 * Ignore if the page is already inactive, unless it is unlikely to be
2490 * reactivated.
2491 */
2492 if ((queue = m->queue) == PQ_INACTIVE && !athead)
2493 return;
2494 if (m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0) {
2495 pq = &vm_phys_domain(m)->vmd_pagequeues[PQ_INACTIVE];
2496 /* Avoid multiple acquisitions of the inactive queue lock. */
2497 if (queue == PQ_INACTIVE) {
2498 vm_pagequeue_lock(pq);
2499 vm_page_dequeue_locked(m);
2500 } else {
2501 if (queue != PQ_NONE)
2502 vm_page_dequeue(m);
2503 m->flags &= ~PG_WINATCFLS;
2504 vm_pagequeue_lock(pq);
2505 }
2506 m->queue = PQ_INACTIVE;
2507 if (athead)
2508 TAILQ_INSERT_HEAD(&pq->pq_pl, m, plinks.q);
2509 else
2510 TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q);
2511 vm_pagequeue_cnt_inc(pq);
2512 vm_pagequeue_unlock(pq);
2513 }
2514 }
2515
2516 /*
2517 * Move the specified page to the inactive queue.
2518 *
2519 * The page must be locked.
2520 */
2521 void
2522 vm_page_deactivate(vm_page_t m)
2523 {
2524
2525 _vm_page_deactivate(m, 0);
2526 }
2527
2528 /*
2529 * vm_page_try_to_cache:
2530 *
2531 * Returns 0 on failure, 1 on success
2532 */
2533 int
2534 vm_page_try_to_cache(vm_page_t m)
2535 {
2536
2537 vm_page_lock_assert(m, MA_OWNED);
2538 VM_OBJECT_ASSERT_WLOCKED(m->object);
2539 if (m->dirty || m->hold_count || m->wire_count ||
2540 (m->oflags & VPO_UNMANAGED) != 0 || vm_page_busied(m))
2541 return (0);
2542 pmap_remove_all(m);
2543 if (m->dirty)
2544 return (0);
2545 vm_page_cache(m);
2546 return (1);
2547 }
2548
2549 /*
2550 * vm_page_try_to_free()
2551 *
2552 * Attempt to free the page. If we cannot free it, we do nothing.
2553 * 1 is returned on success, 0 on failure.
2554 */
2555 int
2556 vm_page_try_to_free(vm_page_t m)
2557 {
2558
2559 vm_page_lock_assert(m, MA_OWNED);
2560 if (m->object != NULL)
2561 VM_OBJECT_ASSERT_WLOCKED(m->object);
2562 if (m->dirty || m->hold_count || m->wire_count ||
2563 (m->oflags & VPO_UNMANAGED) != 0 || vm_page_busied(m))
2564 return (0);
2565 pmap_remove_all(m);
2566 if (m->dirty)
2567 return (0);
2568 vm_page_free(m);
2569 return (1);
2570 }
2571
2572 /*
2573 * vm_page_cache
2574 *
2575 * Put the specified page onto the page cache queue (if appropriate).
2576 *
2577 * The object and page must be locked.
2578 */
2579 void
2580 vm_page_cache(vm_page_t m)
2581 {
2582 vm_object_t object;
2583 boolean_t cache_was_empty;
2584
2585 vm_page_lock_assert(m, MA_OWNED);
2586 object = m->object;
2587 VM_OBJECT_ASSERT_WLOCKED(object);
2588 if (vm_page_busied(m) || (m->oflags & VPO_UNMANAGED) ||
2589 m->hold_count || m->wire_count)
2590 panic("vm_page_cache: attempting to cache busy page");
2591 KASSERT(!pmap_page_is_mapped(m),
2592 ("vm_page_cache: page %p is mapped", m));
2593 KASSERT(m->dirty == 0, ("vm_page_cache: page %p is dirty", m));
2594 if (m->valid == 0 || object->type == OBJT_DEFAULT ||
2595 (object->type == OBJT_SWAP &&
2596 !vm_pager_has_page(object, m->pindex, NULL, NULL))) {
2597 /*
2598 * Hypothesis: A cache-elgible page belonging to a
2599 * default object or swap object but without a backing
2600 * store must be zero filled.
2601 */
2602 vm_page_free(m);
2603 return;
2604 }
2605 KASSERT((m->flags & PG_CACHED) == 0,
2606 ("vm_page_cache: page %p is already cached", m));
2607
2608 /*
2609 * Remove the page from the paging queues.
2610 */
2611 vm_page_remque(m);
2612
2613 /*
2614 * Remove the page from the object's collection of resident
2615 * pages.
2616 */
2617 vm_radix_remove(&object->rtree, m->pindex);
2618 TAILQ_REMOVE(&object->memq, m, listq);
2619 object->resident_page_count--;
2620
2621 /*
2622 * Restore the default memory attribute to the page.
2623 */
2624 if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT)
2625 pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT);
2626
2627 /*
2628 * Insert the page into the object's collection of cached pages
2629 * and the physical memory allocator's cache/free page queues.
2630 */
2631 m->flags &= ~PG_ZERO;
2632 mtx_lock(&vm_page_queue_free_mtx);
2633 cache_was_empty = vm_radix_is_empty(&object->cache);
2634 if (vm_radix_insert(&object->cache, m)) {
2635 mtx_unlock(&vm_page_queue_free_mtx);
2636 if (object->type == OBJT_VNODE &&
2637 object->resident_page_count == 0)
2638 vdrop(object->handle);
2639 m->object = NULL;
2640 vm_page_free(m);
2641 return;
2642 }
2643
2644 /*
2645 * The above call to vm_radix_insert() could reclaim the one pre-
2646 * existing cached page from this object, resulting in a call to
2647 * vdrop().
2648 */
2649 if (!cache_was_empty)
2650 cache_was_empty = vm_radix_is_singleton(&object->cache);
2651
2652 m->flags |= PG_CACHED;
2653 cnt.v_cache_count++;
2654 PCPU_INC(cnt.v_tcached);
2655 #if VM_NRESERVLEVEL > 0
2656 if (!vm_reserv_free_page(m)) {
2657 #else
2658 if (TRUE) {
2659 #endif
2660 vm_phys_set_pool(VM_FREEPOOL_CACHE, m, 0);
2661 vm_phys_free_pages(m, 0);
2662 }
2663 vm_page_free_wakeup();
2664 mtx_unlock(&vm_page_queue_free_mtx);
2665
2666 /*
2667 * Increment the vnode's hold count if this is the object's only
2668 * cached page. Decrement the vnode's hold count if this was
2669 * the object's only resident page.
2670 */
2671 if (object->type == OBJT_VNODE) {
2672 if (cache_was_empty && object->resident_page_count != 0)
2673 vhold(object->handle);
2674 else if (!cache_was_empty && object->resident_page_count == 0)
2675 vdrop(object->handle);
2676 }
2677 }
2678
2679 /*
2680 * vm_page_advise
2681 *
2682 * Deactivate or do nothing, as appropriate. This routine is used
2683 * by madvise() and vop_stdadvise().
2684 *
2685 * The object and page must be locked.
2686 */
2687 void
2688 vm_page_advise(vm_page_t m, int advice)
2689 {
2690
2691 vm_page_assert_locked(m);
2692 VM_OBJECT_ASSERT_WLOCKED(m->object);
2693 if (advice == MADV_FREE)
2694 /*
2695 * Mark the page clean. This will allow the page to be freed
2696 * up by the system. However, such pages are often reused
2697 * quickly by malloc() so we do not do anything that would
2698 * cause a page fault if we can help it.
2699 *
2700 * Specifically, we do not try to actually free the page now
2701 * nor do we try to put it in the cache (which would cause a
2702 * page fault on reuse).
2703 *
2704 * But we do make the page as freeable as we can without
2705 * actually taking the step of unmapping it.
2706 */
2707 vm_page_undirty(m);
2708 else if (advice != MADV_DONTNEED)
2709 return;
2710
2711 /*
2712 * Clear any references to the page. Otherwise, the page daemon will
2713 * immediately reactivate the page.
2714 */
2715 vm_page_aflag_clear(m, PGA_REFERENCED);
2716
2717 if (advice != MADV_FREE && m->dirty == 0 && pmap_is_modified(m))
2718 vm_page_dirty(m);
2719
2720 /*
2721 * Place clean pages at the head of the inactive queue rather than the
2722 * tail, thus defeating the queue's LRU operation and ensuring that the
2723 * page will be reused quickly.
2724 */
2725 _vm_page_deactivate(m, m->dirty == 0);
2726 }
2727
2728 /*
2729 * Grab a page, waiting until we are waken up due to the page
2730 * changing state. We keep on waiting, if the page continues
2731 * to be in the object. If the page doesn't exist, first allocate it
2732 * and then conditionally zero it.
2733 *
2734 * This routine may sleep.
2735 *
2736 * The object must be locked on entry. The lock will, however, be released
2737 * and reacquired if the routine sleeps.
2738 */
2739 vm_page_t
2740 vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags)
2741 {
2742 vm_page_t m;
2743 int sleep;
2744
2745 VM_OBJECT_ASSERT_WLOCKED(object);
2746 KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 ||
2747 (allocflags & VM_ALLOC_IGN_SBUSY) != 0,
2748 ("vm_page_grab: VM_ALLOC_SBUSY/VM_ALLOC_IGN_SBUSY mismatch"));
2749 retrylookup:
2750 if ((m = vm_page_lookup(object, pindex)) != NULL) {
2751 sleep = (allocflags & VM_ALLOC_IGN_SBUSY) != 0 ?
2752 vm_page_xbusied(m) : vm_page_busied(m);
2753 if (sleep) {
2754 /*
2755 * Reference the page before unlocking and
2756 * sleeping so that the page daemon is less
2757 * likely to reclaim it.
2758 */
2759 vm_page_aflag_set(m, PGA_REFERENCED);
2760 vm_page_lock(m);
2761 VM_OBJECT_WUNLOCK(object);
2762 vm_page_busy_sleep(m, "pgrbwt", (allocflags &
2763 VM_ALLOC_IGN_SBUSY) != 0);
2764 VM_OBJECT_WLOCK(object);
2765 goto retrylookup;
2766 } else {
2767 if ((allocflags & VM_ALLOC_WIRED) != 0) {
2768 vm_page_lock(m);
2769 vm_page_wire(m);
2770 vm_page_unlock(m);
2771 }
2772 if ((allocflags &
2773 (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) == 0)
2774 vm_page_xbusy(m);
2775 if ((allocflags & VM_ALLOC_SBUSY) != 0)
2776 vm_page_sbusy(m);
2777 return (m);
2778 }
2779 }
2780 m = vm_page_alloc(object, pindex, allocflags & ~VM_ALLOC_IGN_SBUSY);
2781 if (m == NULL) {
2782 VM_OBJECT_WUNLOCK(object);
2783 VM_WAIT;
2784 VM_OBJECT_WLOCK(object);
2785 goto retrylookup;
2786 } else if (m->valid != 0)
2787 return (m);
2788 if (allocflags & VM_ALLOC_ZERO && (m->flags & PG_ZERO) == 0)
2789 pmap_zero_page(m);
2790 return (m);
2791 }
2792
2793 /*
2794 * Mapping function for valid or dirty bits in a page.
2795 *
2796 * Inputs are required to range within a page.
2797 */
2798 vm_page_bits_t
2799 vm_page_bits(int base, int size)
2800 {
2801 int first_bit;
2802 int last_bit;
2803
2804 KASSERT(
2805 base + size <= PAGE_SIZE,
2806 ("vm_page_bits: illegal base/size %d/%d", base, size)
2807 );
2808
2809 if (size == 0) /* handle degenerate case */
2810 return (0);
2811
2812 first_bit = base >> DEV_BSHIFT;
2813 last_bit = (base + size - 1) >> DEV_BSHIFT;
2814
2815 return (((vm_page_bits_t)2 << last_bit) -
2816 ((vm_page_bits_t)1 << first_bit));
2817 }
2818
2819 /*
2820 * vm_page_set_valid_range:
2821 *
2822 * Sets portions of a page valid. The arguments are expected
2823 * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive
2824 * of any partial chunks touched by the range. The invalid portion of
2825 * such chunks will be zeroed.
2826 *
2827 * (base + size) must be less then or equal to PAGE_SIZE.
2828 */
2829 void
2830 vm_page_set_valid_range(vm_page_t m, int base, int size)
2831 {
2832 int endoff, frag;
2833
2834 VM_OBJECT_ASSERT_WLOCKED(m->object);
2835 if (size == 0) /* handle degenerate case */
2836 return;
2837
2838 /*
2839 * If the base is not DEV_BSIZE aligned and the valid
2840 * bit is clear, we have to zero out a portion of the
2841 * first block.
2842 */
2843 if ((frag = base & ~(DEV_BSIZE - 1)) != base &&
2844 (m->valid & (1 << (base >> DEV_BSHIFT))) == 0)
2845 pmap_zero_page_area(m, frag, base - frag);
2846
2847 /*
2848 * If the ending offset is not DEV_BSIZE aligned and the
2849 * valid bit is clear, we have to zero out a portion of
2850 * the last block.
2851 */
2852 endoff = base + size;
2853 if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff &&
2854 (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0)
2855 pmap_zero_page_area(m, endoff,
2856 DEV_BSIZE - (endoff & (DEV_BSIZE - 1)));
2857
2858 /*
2859 * Assert that no previously invalid block that is now being validated
2860 * is already dirty.
2861 */
2862 KASSERT((~m->valid & vm_page_bits(base, size) & m->dirty) == 0,
2863 ("vm_page_set_valid_range: page %p is dirty", m));
2864
2865 /*
2866 * Set valid bits inclusive of any overlap.
2867 */
2868 m->valid |= vm_page_bits(base, size);
2869 }
2870
2871 /*
2872 * Clear the given bits from the specified page's dirty field.
2873 */
2874 static __inline void
2875 vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits)
2876 {
2877 uintptr_t addr;
2878 #if PAGE_SIZE < 16384
2879 int shift;
2880 #endif
2881
2882 /*
2883 * If the object is locked and the page is neither exclusive busy nor
2884 * write mapped, then the page's dirty field cannot possibly be
2885 * set by a concurrent pmap operation.
2886 */
2887 VM_OBJECT_ASSERT_WLOCKED(m->object);
2888 if (!vm_page_xbusied(m) && !pmap_page_is_write_mapped(m))
2889 m->dirty &= ~pagebits;
2890 else {
2891 /*
2892 * The pmap layer can call vm_page_dirty() without
2893 * holding a distinguished lock. The combination of
2894 * the object's lock and an atomic operation suffice
2895 * to guarantee consistency of the page dirty field.
2896 *
2897 * For PAGE_SIZE == 32768 case, compiler already
2898 * properly aligns the dirty field, so no forcible
2899 * alignment is needed. Only require existence of
2900 * atomic_clear_64 when page size is 32768.
2901 */
2902 addr = (uintptr_t)&m->dirty;
2903 #if PAGE_SIZE == 32768
2904 atomic_clear_64((uint64_t *)addr, pagebits);
2905 #elif PAGE_SIZE == 16384
2906 atomic_clear_32((uint32_t *)addr, pagebits);
2907 #else /* PAGE_SIZE <= 8192 */
2908 /*
2909 * Use a trick to perform a 32-bit atomic on the
2910 * containing aligned word, to not depend on the existence
2911 * of atomic_clear_{8, 16}.
2912 */
2913 shift = addr & (sizeof(uint32_t) - 1);
2914 #if BYTE_ORDER == BIG_ENDIAN
2915 shift = (sizeof(uint32_t) - sizeof(m->dirty) - shift) * NBBY;
2916 #else
2917 shift *= NBBY;
2918 #endif
2919 addr &= ~(sizeof(uint32_t) - 1);
2920 atomic_clear_32((uint32_t *)addr, pagebits << shift);
2921 #endif /* PAGE_SIZE */
2922 }
2923 }
2924
2925 /*
2926 * vm_page_set_validclean:
2927 *
2928 * Sets portions of a page valid and clean. The arguments are expected
2929 * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive
2930 * of any partial chunks touched by the range. The invalid portion of
2931 * such chunks will be zero'd.
2932 *
2933 * (base + size) must be less then or equal to PAGE_SIZE.
2934 */
2935 void
2936 vm_page_set_validclean(vm_page_t m, int base, int size)
2937 {
2938 vm_page_bits_t oldvalid, pagebits;
2939 int endoff, frag;
2940
2941 VM_OBJECT_ASSERT_WLOCKED(m->object);
2942 if (size == 0) /* handle degenerate case */
2943 return;
2944
2945 /*
2946 * If the base is not DEV_BSIZE aligned and the valid
2947 * bit is clear, we have to zero out a portion of the
2948 * first block.
2949 */
2950 if ((frag = base & ~(DEV_BSIZE - 1)) != base &&
2951 (m->valid & ((vm_page_bits_t)1 << (base >> DEV_BSHIFT))) == 0)
2952 pmap_zero_page_area(m, frag, base - frag);
2953
2954 /*
2955 * If the ending offset is not DEV_BSIZE aligned and the
2956 * valid bit is clear, we have to zero out a portion of
2957 * the last block.
2958 */
2959 endoff = base + size;
2960 if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff &&
2961 (m->valid & ((vm_page_bits_t)1 << (endoff >> DEV_BSHIFT))) == 0)
2962 pmap_zero_page_area(m, endoff,
2963 DEV_BSIZE - (endoff & (DEV_BSIZE - 1)));
2964
2965 /*
2966 * Set valid, clear dirty bits. If validating the entire
2967 * page we can safely clear the pmap modify bit. We also
2968 * use this opportunity to clear the VPO_NOSYNC flag. If a process
2969 * takes a write fault on a MAP_NOSYNC memory area the flag will
2970 * be set again.
2971 *
2972 * We set valid bits inclusive of any overlap, but we can only
2973 * clear dirty bits for DEV_BSIZE chunks that are fully within
2974 * the range.
2975 */
2976 oldvalid = m->valid;
2977 pagebits = vm_page_bits(base, size);
2978 m->valid |= pagebits;
2979 #if 0 /* NOT YET */
2980 if ((frag = base & (DEV_BSIZE - 1)) != 0) {
2981 frag = DEV_BSIZE - frag;
2982 base += frag;
2983 size -= frag;
2984 if (size < 0)
2985 size = 0;
2986 }
2987 pagebits = vm_page_bits(base, size & (DEV_BSIZE - 1));
2988 #endif
2989 if (base == 0 && size == PAGE_SIZE) {
2990 /*
2991 * The page can only be modified within the pmap if it is
2992 * mapped, and it can only be mapped if it was previously
2993 * fully valid.
2994 */
2995 if (oldvalid == VM_PAGE_BITS_ALL)
2996 /*
2997 * Perform the pmap_clear_modify() first. Otherwise,
2998 * a concurrent pmap operation, such as
2999 * pmap_protect(), could clear a modification in the
3000 * pmap and set the dirty field on the page before
3001 * pmap_clear_modify() had begun and after the dirty
3002 * field was cleared here.
3003 */
3004 pmap_clear_modify(m);
3005 m->dirty = 0;
3006 m->oflags &= ~VPO_NOSYNC;
3007 } else if (oldvalid != VM_PAGE_BITS_ALL)
3008 m->dirty &= ~pagebits;
3009 else
3010 vm_page_clear_dirty_mask(m, pagebits);
3011 }
3012
3013 void
3014 vm_page_clear_dirty(vm_page_t m, int base, int size)
3015 {
3016
3017 vm_page_clear_dirty_mask(m, vm_page_bits(base, size));
3018 }
3019
3020 /*
3021 * vm_page_set_invalid:
3022 *
3023 * Invalidates DEV_BSIZE'd chunks within a page. Both the
3024 * valid and dirty bits for the effected areas are cleared.
3025 */
3026 void
3027 vm_page_set_invalid(vm_page_t m, int base, int size)
3028 {
3029 vm_page_bits_t bits;
3030 vm_object_t object;
3031
3032 object = m->object;
3033 VM_OBJECT_ASSERT_WLOCKED(object);
3034 if (object->type == OBJT_VNODE && base == 0 && IDX_TO_OFF(m->pindex) +
3035 size >= object->un_pager.vnp.vnp_size)
3036 bits = VM_PAGE_BITS_ALL;
3037 else
3038 bits = vm_page_bits(base, size);
3039 if (object->ref_count != 0 && m->valid == VM_PAGE_BITS_ALL &&
3040 bits != 0)
3041 pmap_remove_all(m);
3042 KASSERT((bits == 0 && m->valid == VM_PAGE_BITS_ALL) ||
3043 !pmap_page_is_mapped(m),
3044 ("vm_page_set_invalid: page %p is mapped", m));
3045 m->valid &= ~bits;
3046 m->dirty &= ~bits;
3047 }
3048
3049 /*
3050 * vm_page_zero_invalid()
3051 *
3052 * The kernel assumes that the invalid portions of a page contain
3053 * garbage, but such pages can be mapped into memory by user code.
3054 * When this occurs, we must zero out the non-valid portions of the
3055 * page so user code sees what it expects.
3056 *
3057 * Pages are most often semi-valid when the end of a file is mapped
3058 * into memory and the file's size is not page aligned.
3059 */
3060 void
3061 vm_page_zero_invalid(vm_page_t m, boolean_t setvalid)
3062 {
3063 int b;
3064 int i;
3065
3066 VM_OBJECT_ASSERT_WLOCKED(m->object);
3067 /*
3068 * Scan the valid bits looking for invalid sections that
3069 * must be zeroed. Invalid sub-DEV_BSIZE'd areas ( where the
3070 * valid bit may be set ) have already been zeroed by
3071 * vm_page_set_validclean().
3072 */
3073 for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) {
3074 if (i == (PAGE_SIZE / DEV_BSIZE) ||
3075 (m->valid & ((vm_page_bits_t)1 << i))) {
3076 if (i > b) {
3077 pmap_zero_page_area(m,
3078 b << DEV_BSHIFT, (i - b) << DEV_BSHIFT);
3079 }
3080 b = i + 1;
3081 }
3082 }
3083
3084 /*
3085 * setvalid is TRUE when we can safely set the zero'd areas
3086 * as being valid. We can do this if there are no cache consistancy
3087 * issues. e.g. it is ok to do with UFS, but not ok to do with NFS.
3088 */
3089 if (setvalid)
3090 m->valid = VM_PAGE_BITS_ALL;
3091 }
3092
3093 /*
3094 * vm_page_is_valid:
3095 *
3096 * Is (partial) page valid? Note that the case where size == 0
3097 * will return FALSE in the degenerate case where the page is
3098 * entirely invalid, and TRUE otherwise.
3099 */
3100 int
3101 vm_page_is_valid(vm_page_t m, int base, int size)
3102 {
3103 vm_page_bits_t bits;
3104
3105 VM_OBJECT_ASSERT_LOCKED(m->object);
3106 bits = vm_page_bits(base, size);
3107 return (m->valid != 0 && (m->valid & bits) == bits);
3108 }
3109
3110 /*
3111 * vm_page_ps_is_valid:
3112 *
3113 * Returns TRUE if the entire (super)page is valid and FALSE otherwise.
3114 */
3115 boolean_t
3116 vm_page_ps_is_valid(vm_page_t m)
3117 {
3118 int i, npages;
3119
3120 VM_OBJECT_ASSERT_LOCKED(m->object);
3121 npages = atop(pagesizes[m->psind]);
3122
3123 /*
3124 * The physically contiguous pages that make up a superpage, i.e., a
3125 * page with a page size index ("psind") greater than zero, will
3126 * occupy adjacent entries in vm_page_array[].
3127 */
3128 for (i = 0; i < npages; i++) {
3129 if (m[i].valid != VM_PAGE_BITS_ALL)
3130 return (FALSE);
3131 }
3132 return (TRUE);
3133 }
3134
3135 /*
3136 * Set the page's dirty bits if the page is modified.
3137 */
3138 void
3139 vm_page_test_dirty(vm_page_t m)
3140 {
3141
3142 VM_OBJECT_ASSERT_WLOCKED(m->object);
3143 if (m->dirty != VM_PAGE_BITS_ALL && pmap_is_modified(m))
3144 vm_page_dirty(m);
3145 }
3146
3147 void
3148 vm_page_lock_KBI(vm_page_t m, const char *file, int line)
3149 {
3150
3151 mtx_lock_flags_(vm_page_lockptr(m), 0, file, line);
3152 }
3153
3154 void
3155 vm_page_unlock_KBI(vm_page_t m, const char *file, int line)
3156 {
3157
3158 mtx_unlock_flags_(vm_page_lockptr(m), 0, file, line);
3159 }
3160
3161 int
3162 vm_page_trylock_KBI(vm_page_t m, const char *file, int line)
3163 {
3164
3165 return (mtx_trylock_flags_(vm_page_lockptr(m), 0, file, line));
3166 }
3167
3168 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
3169 void
3170 vm_page_assert_locked_KBI(vm_page_t m, const char *file, int line)
3171 {
3172
3173 vm_page_lock_assert_KBI(m, MA_OWNED, file, line);
3174 }
3175
3176 void
3177 vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line)
3178 {
3179
3180 mtx_assert_(vm_page_lockptr(m), a, file, line);
3181 }
3182 #endif
3183
3184 #ifdef INVARIANTS
3185 void
3186 vm_page_object_lock_assert(vm_page_t m)
3187 {
3188
3189 /*
3190 * Certain of the page's fields may only be modified by the
3191 * holder of the containing object's lock or the exclusive busy.
3192 * holder. Unfortunately, the holder of the write busy is
3193 * not recorded, and thus cannot be checked here.
3194 */
3195 if (m->object != NULL && !vm_page_xbusied(m))
3196 VM_OBJECT_ASSERT_WLOCKED(m->object);
3197 }
3198
3199 void
3200 vm_page_assert_pga_writeable(vm_page_t m, uint8_t bits)
3201 {
3202
3203 if ((bits & PGA_WRITEABLE) == 0)
3204 return;
3205
3206 /*
3207 * The PGA_WRITEABLE flag can only be set if the page is
3208 * managed, is exclusively busied or the object is locked.
3209 * Currently, this flag is only set by pmap_enter().
3210 */
3211 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3212 ("PGA_WRITEABLE on unmanaged page"));
3213 if (!vm_page_xbusied(m))
3214 VM_OBJECT_ASSERT_LOCKED(m->object);
3215 }
3216 #endif
3217
3218 #include "opt_ddb.h"
3219 #ifdef DDB
3220 #include <sys/kernel.h>
3221
3222 #include <ddb/ddb.h>
3223
3224 DB_SHOW_COMMAND(page, vm_page_print_page_info)
3225 {
3226 db_printf("cnt.v_free_count: %d\n", cnt.v_free_count);
3227 db_printf("cnt.v_cache_count: %d\n", cnt.v_cache_count);
3228 db_printf("cnt.v_inactive_count: %d\n", cnt.v_inactive_count);
3229 db_printf("cnt.v_active_count: %d\n", cnt.v_active_count);
3230 db_printf("cnt.v_wire_count: %d\n", cnt.v_wire_count);
3231 db_printf("cnt.v_free_reserved: %d\n", cnt.v_free_reserved);
3232 db_printf("cnt.v_free_min: %d\n", cnt.v_free_min);
3233 db_printf("cnt.v_free_target: %d\n", cnt.v_free_target);
3234 db_printf("cnt.v_cache_min: %d\n", cnt.v_cache_min);
3235 db_printf("cnt.v_inactive_target: %d\n", cnt.v_inactive_target);
3236 }
3237
3238 DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info)
3239 {
3240 int dom;
3241
3242 db_printf("pq_free %d pq_cache %d\n",
3243 cnt.v_free_count, cnt.v_cache_count);
3244 for (dom = 0; dom < vm_ndomains; dom++) {
3245 db_printf(
3246 "dom %d page_cnt %d free %d pq_act %d pq_inact %d pass %d\n",
3247 dom,
3248 vm_dom[dom].vmd_page_count,
3249 vm_dom[dom].vmd_free_count,
3250 vm_dom[dom].vmd_pagequeues[PQ_ACTIVE].pq_cnt,
3251 vm_dom[dom].vmd_pagequeues[PQ_INACTIVE].pq_cnt,
3252 vm_dom[dom].vmd_pass);
3253 }
3254 }
3255
3256 DB_SHOW_COMMAND(pginfo, vm_page_print_pginfo)
3257 {
3258 vm_page_t m;
3259 boolean_t phys;
3260
3261 if (!have_addr) {
3262 db_printf("show pginfo addr\n");
3263 return;
3264 }
3265
3266 phys = strchr(modif, 'p') != NULL;
3267 if (phys)
3268 m = PHYS_TO_VM_PAGE(addr);
3269 else
3270 m = (vm_page_t)addr;
3271 db_printf(
3272 "page %p obj %p pidx 0x%jx phys 0x%jx q %d hold %d wire %d\n"
3273 " af 0x%x of 0x%x f 0x%x act %d busy %x valid 0x%x dirty 0x%x\n",
3274 m, m->object, (uintmax_t)m->pindex, (uintmax_t)m->phys_addr,
3275 m->queue, m->hold_count, m->wire_count, m->aflags, m->oflags,
3276 m->flags, m->act_count, m->busy_lock, m->valid, m->dirty);
3277 }
3278 #endif /* DDB */
Cache object: 52def19f0d1a22767959bc288fa08f91
|