FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_reserv.c
1 /*-
2 * Copyright (c) 2002-2006 Rice University
3 * Copyright (c) 2007-2008 Alan L. Cox <alc@cs.rice.edu>
4 * All rights reserved.
5 *
6 * This software was developed for the FreeBSD Project by Alan L. Cox,
7 * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
25 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
28 * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Superpage reservation management module
34 */
35
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38
39 #include "opt_vm.h"
40
41 #include <sys/param.h>
42 #include <sys/kernel.h>
43 #include <sys/lock.h>
44 #include <sys/malloc.h>
45 #include <sys/mutex.h>
46 #include <sys/queue.h>
47 #include <sys/sbuf.h>
48 #include <sys/sysctl.h>
49 #include <sys/systm.h>
50
51 #include <vm/vm.h>
52 #include <vm/vm_param.h>
53 #include <vm/vm_object.h>
54 #include <vm/vm_page.h>
55 #include <vm/vm_phys.h>
56 #include <vm/vm_reserv.h>
57
58 /*
59 * The reservation system supports the speculative allocation of large physical
60 * pages ("superpages"). Speculative allocation enables the fully-automatic
61 * utilization of superpages by the virtual memory system. In other words, no
62 * programmatic directives are required to use superpages.
63 */
64
65 #if VM_NRESERVLEVEL > 0
66
67 /*
68 * The number of small pages that are contained in a level 0 reservation
69 */
70 #define VM_LEVEL_0_NPAGES (1 << VM_LEVEL_0_ORDER)
71
72 /*
73 * The number of bits by which a physical address is shifted to obtain the
74 * reservation number
75 */
76 #define VM_LEVEL_0_SHIFT (VM_LEVEL_0_ORDER + PAGE_SHIFT)
77
78 /*
79 * The size of a level 0 reservation in bytes
80 */
81 #define VM_LEVEL_0_SIZE (1 << VM_LEVEL_0_SHIFT)
82
83 /*
84 * Computes the index of the small page underlying the given (object, pindex)
85 * within the reservation's array of small pages.
86 */
87 #define VM_RESERV_INDEX(object, pindex) \
88 (((object)->pg_color + (pindex)) & (VM_LEVEL_0_NPAGES - 1))
89
90 /*
91 * The reservation structure
92 *
93 * A reservation structure is constructed whenever a large physical page is
94 * speculatively allocated to an object. The reservation provides the small
95 * physical pages for the range [pindex, pindex + VM_LEVEL_0_NPAGES) of offsets
96 * within that object. The reservation's "popcnt" tracks the number of these
97 * small physical pages that are in use at any given time. When and if the
98 * reservation is not fully utilized, it appears in the queue of partially-
99 * populated reservations. The reservation always appears on the containing
100 * object's list of reservations.
101 *
102 * A partially-populated reservation can be broken and reclaimed at any time.
103 */
104 struct vm_reserv {
105 TAILQ_ENTRY(vm_reserv) partpopq;
106 LIST_ENTRY(vm_reserv) objq;
107 vm_object_t object; /* containing object */
108 vm_pindex_t pindex; /* offset within object */
109 vm_page_t pages; /* first page of a superpage */
110 int popcnt; /* # of pages in use */
111 char inpartpopq;
112 };
113
114 /*
115 * The reservation array
116 *
117 * This array is analoguous in function to vm_page_array. It differs in the
118 * respect that it may contain a greater number of useful reservation
119 * structures than there are (physical) superpages. These "invalid"
120 * reservation structures exist to trade-off space for time in the
121 * implementation of vm_reserv_from_page(). Invalid reservation structures are
122 * distinguishable from "valid" reservation structures by inspecting the
123 * reservation's "pages" field. Invalid reservation structures have a NULL
124 * "pages" field.
125 *
126 * vm_reserv_from_page() maps a small (physical) page to an element of this
127 * array by computing a physical reservation number from the page's physical
128 * address. The physical reservation number is used as the array index.
129 *
130 * An "active" reservation is a valid reservation structure that has a non-NULL
131 * "object" field and a non-zero "popcnt" field. In other words, every active
132 * reservation belongs to a particular object. Moreover, every active
133 * reservation has an entry in the containing object's list of reservations.
134 */
135 static vm_reserv_t vm_reserv_array;
136
137 /*
138 * The partially-populated reservation queue
139 *
140 * This queue enables the fast recovery of an unused cached or free small page
141 * from a partially-populated reservation. The reservation at the head of
142 * this queue is the least-recently-changed, partially-populated reservation.
143 *
144 * Access to this queue is synchronized by the free page queue lock.
145 */
146 static TAILQ_HEAD(, vm_reserv) vm_rvq_partpop =
147 TAILQ_HEAD_INITIALIZER(vm_rvq_partpop);
148
149 static SYSCTL_NODE(_vm, OID_AUTO, reserv, CTLFLAG_RD, 0, "Reservation Info");
150
151 static long vm_reserv_broken;
152 SYSCTL_LONG(_vm_reserv, OID_AUTO, broken, CTLFLAG_RD,
153 &vm_reserv_broken, 0, "Cumulative number of broken reservations");
154
155 static long vm_reserv_freed;
156 SYSCTL_LONG(_vm_reserv, OID_AUTO, freed, CTLFLAG_RD,
157 &vm_reserv_freed, 0, "Cumulative number of freed reservations");
158
159 static int sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS);
160
161 SYSCTL_OID(_vm_reserv, OID_AUTO, partpopq, CTLTYPE_STRING | CTLFLAG_RD, NULL, 0,
162 sysctl_vm_reserv_partpopq, "A", "Partially-populated reservation queues");
163
164 static long vm_reserv_reclaimed;
165 SYSCTL_LONG(_vm_reserv, OID_AUTO, reclaimed, CTLFLAG_RD,
166 &vm_reserv_reclaimed, 0, "Cumulative number of reclaimed reservations");
167
168 static void vm_reserv_depopulate(vm_reserv_t rv);
169 static vm_reserv_t vm_reserv_from_page(vm_page_t m);
170 static boolean_t vm_reserv_has_pindex(vm_reserv_t rv,
171 vm_pindex_t pindex);
172 static void vm_reserv_populate(vm_reserv_t rv);
173 static void vm_reserv_reclaim(vm_reserv_t rv);
174
175 /*
176 * Describes the current state of the partially-populated reservation queue.
177 */
178 static int
179 sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS)
180 {
181 struct sbuf sbuf;
182 vm_reserv_t rv;
183 char *cbuf;
184 const int cbufsize = (VM_NRESERVLEVEL + 1) * 81;
185 int counter, error, level, unused_pages;
186
187 cbuf = malloc(cbufsize, M_TEMP, M_WAITOK | M_ZERO);
188 sbuf_new(&sbuf, cbuf, cbufsize, SBUF_FIXEDLEN);
189 sbuf_printf(&sbuf, "\nLEVEL SIZE NUMBER\n\n");
190 for (level = -1; level <= VM_NRESERVLEVEL - 2; level++) {
191 counter = 0;
192 unused_pages = 0;
193 mtx_lock(&vm_page_queue_free_mtx);
194 TAILQ_FOREACH(rv, &vm_rvq_partpop/*[level]*/, partpopq) {
195 counter++;
196 unused_pages += VM_LEVEL_0_NPAGES - rv->popcnt;
197 }
198 mtx_unlock(&vm_page_queue_free_mtx);
199 sbuf_printf(&sbuf, "%5d: %6dK, %6d\n", level,
200 unused_pages * ((int)PAGE_SIZE / 1024), counter);
201 }
202 sbuf_finish(&sbuf);
203 error = SYSCTL_OUT(req, sbuf_data(&sbuf), sbuf_len(&sbuf));
204 sbuf_delete(&sbuf);
205 free(cbuf, M_TEMP);
206 return (error);
207 }
208
209 /*
210 * Reduces the given reservation's population count. If the population count
211 * becomes zero, the reservation is destroyed. Additionally, moves the
212 * reservation to the tail of the partially-populated reservations queue if the
213 * population count is non-zero.
214 *
215 * The free page queue lock must be held.
216 */
217 static void
218 vm_reserv_depopulate(vm_reserv_t rv)
219 {
220
221 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
222 KASSERT(rv->object != NULL,
223 ("vm_reserv_depopulate: reserv %p is free", rv));
224 KASSERT(rv->popcnt > 0,
225 ("vm_reserv_depopulate: reserv %p's popcnt is corrupted", rv));
226 if (rv->inpartpopq) {
227 TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq);
228 rv->inpartpopq = FALSE;
229 }
230 rv->popcnt--;
231 if (rv->popcnt == 0) {
232 LIST_REMOVE(rv, objq);
233 rv->object = NULL;
234 vm_phys_free_pages(rv->pages, VM_LEVEL_0_ORDER);
235 vm_reserv_freed++;
236 } else {
237 rv->inpartpopq = TRUE;
238 TAILQ_INSERT_TAIL(&vm_rvq_partpop, rv, partpopq);
239 }
240 }
241
242 /*
243 * Returns the reservation to which the given page might belong.
244 */
245 static __inline vm_reserv_t
246 vm_reserv_from_page(vm_page_t m)
247 {
248
249 return (&vm_reserv_array[VM_PAGE_TO_PHYS(m) >> VM_LEVEL_0_SHIFT]);
250 }
251
252 /*
253 * Returns TRUE if the given reservation contains the given page index and
254 * FALSE otherwise.
255 */
256 static __inline boolean_t
257 vm_reserv_has_pindex(vm_reserv_t rv, vm_pindex_t pindex)
258 {
259
260 return (((pindex - rv->pindex) & ~(VM_LEVEL_0_NPAGES - 1)) == 0);
261 }
262
263 /*
264 * Increases the given reservation's population count. Moves the reservation
265 * to the tail of the partially-populated reservation queue.
266 *
267 * The free page queue must be locked.
268 */
269 static void
270 vm_reserv_populate(vm_reserv_t rv)
271 {
272
273 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
274 KASSERT(rv->object != NULL,
275 ("vm_reserv_populate: reserv %p is free", rv));
276 KASSERT(rv->popcnt < VM_LEVEL_0_NPAGES,
277 ("vm_reserv_populate: reserv %p is already full", rv));
278 if (rv->inpartpopq) {
279 TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq);
280 rv->inpartpopq = FALSE;
281 }
282 rv->popcnt++;
283 if (rv->popcnt < VM_LEVEL_0_NPAGES) {
284 rv->inpartpopq = TRUE;
285 TAILQ_INSERT_TAIL(&vm_rvq_partpop, rv, partpopq);
286 }
287 }
288
289 /*
290 * Allocates a page from an existing or newly-created reservation.
291 *
292 * The object and free page queue must be locked.
293 */
294 vm_page_t
295 vm_reserv_alloc_page(vm_object_t object, vm_pindex_t pindex)
296 {
297 vm_page_t m, mpred, msucc;
298 vm_pindex_t first, leftcap, rightcap;
299 vm_reserv_t rv;
300
301 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
302
303 /*
304 * Is a reservation fundamentally not possible?
305 */
306 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
307 if (pindex < VM_RESERV_INDEX(object, pindex) ||
308 pindex >= object->size)
309 return (NULL);
310
311 /*
312 * Look for an existing reservation.
313 */
314 msucc = NULL;
315 mpred = object->root;
316 while (mpred != NULL) {
317 KASSERT(mpred->pindex != pindex,
318 ("vm_reserv_alloc_page: pindex already allocated"));
319 rv = vm_reserv_from_page(mpred);
320 if (rv->object == object && vm_reserv_has_pindex(rv, pindex)) {
321 m = &rv->pages[VM_RESERV_INDEX(object, pindex)];
322 /* Handle vm_page_rename(m, new_object, ...). */
323 if ((m->flags & (PG_CACHED | PG_FREE)) == 0)
324 return (NULL);
325 vm_reserv_populate(rv);
326 return (m);
327 } else if (mpred->pindex < pindex) {
328 if (msucc != NULL ||
329 (msucc = TAILQ_NEXT(mpred, listq)) == NULL)
330 break;
331 KASSERT(msucc->pindex != pindex,
332 ("vm_reserv_alloc_page: pindex already allocated"));
333 rv = vm_reserv_from_page(msucc);
334 if (rv->object == object &&
335 vm_reserv_has_pindex(rv, pindex)) {
336 m = &rv->pages[VM_RESERV_INDEX(object, pindex)];
337 /* Handle vm_page_rename(m, new_object, ...). */
338 if ((m->flags & (PG_CACHED | PG_FREE)) == 0)
339 return (NULL);
340 vm_reserv_populate(rv);
341 return (m);
342 } else if (pindex < msucc->pindex)
343 break;
344 } else if (msucc == NULL) {
345 msucc = mpred;
346 mpred = TAILQ_PREV(msucc, pglist, listq);
347 continue;
348 }
349 msucc = NULL;
350 mpred = object->root = vm_page_splay(pindex, object->root);
351 }
352
353 /*
354 * Determine the first index to the left that can be used.
355 */
356 if (mpred == NULL)
357 leftcap = 0;
358 else if ((rv = vm_reserv_from_page(mpred))->object != object)
359 leftcap = mpred->pindex + 1;
360 else
361 leftcap = rv->pindex + VM_LEVEL_0_NPAGES;
362
363 /*
364 * Determine the first index to the right that cannot be used.
365 */
366 if (msucc == NULL)
367 rightcap = pindex + VM_LEVEL_0_NPAGES;
368 else if ((rv = vm_reserv_from_page(msucc))->object != object)
369 rightcap = msucc->pindex;
370 else
371 rightcap = rv->pindex;
372
373 /*
374 * Determine if a reservation fits between the first index to
375 * the left that can be used and the first index to the right
376 * that cannot be used.
377 */
378 first = pindex - VM_RESERV_INDEX(object, pindex);
379 if (first < leftcap || first + VM_LEVEL_0_NPAGES > rightcap)
380 return (NULL);
381
382 /*
383 * Would a new reservation extend past the end of the given object?
384 */
385 if (object->size < first + VM_LEVEL_0_NPAGES) {
386 /*
387 * Don't allocate a new reservation if the object is a vnode or
388 * backed by another object that is a vnode.
389 */
390 if (object->type == OBJT_VNODE ||
391 (object->backing_object != NULL &&
392 object->backing_object->type == OBJT_VNODE))
393 return (NULL);
394 /* Speculate that the object may grow. */
395 }
396
397 /*
398 * Allocate a new reservation.
399 */
400 m = vm_phys_alloc_pages(VM_FREEPOOL_DEFAULT, VM_LEVEL_0_ORDER);
401 if (m != NULL) {
402 rv = vm_reserv_from_page(m);
403 KASSERT(rv->pages == m,
404 ("vm_reserv_alloc_page: reserv %p's pages is corrupted",
405 rv));
406 KASSERT(rv->object == NULL,
407 ("vm_reserv_alloc_page: reserv %p isn't free", rv));
408 LIST_INSERT_HEAD(&object->rvq, rv, objq);
409 rv->object = object;
410 rv->pindex = first;
411 KASSERT(rv->popcnt == 0,
412 ("vm_reserv_alloc_page: reserv %p's popcnt is corrupted",
413 rv));
414 KASSERT(!rv->inpartpopq,
415 ("vm_reserv_alloc_page: reserv %p's inpartpopq is TRUE",
416 rv));
417 vm_reserv_populate(rv);
418 m = &rv->pages[VM_RESERV_INDEX(object, pindex)];
419 }
420 return (m);
421 }
422
423 /*
424 * Breaks all reservations belonging to the given object.
425 */
426 void
427 vm_reserv_break_all(vm_object_t object)
428 {
429 vm_reserv_t rv;
430 int i;
431
432 mtx_lock(&vm_page_queue_free_mtx);
433 while ((rv = LIST_FIRST(&object->rvq)) != NULL) {
434 KASSERT(rv->object == object,
435 ("vm_reserv_break_all: reserv %p is corrupted", rv));
436 if (rv->inpartpopq) {
437 TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq);
438 rv->inpartpopq = FALSE;
439 }
440 LIST_REMOVE(rv, objq);
441 rv->object = NULL;
442 for (i = 0; i < VM_LEVEL_0_NPAGES; i++) {
443 if ((rv->pages[i].flags & (PG_CACHED | PG_FREE)) != 0)
444 vm_phys_free_pages(&rv->pages[i], 0);
445 else
446 rv->popcnt--;
447 }
448 KASSERT(rv->popcnt == 0,
449 ("vm_reserv_break_all: reserv %p's popcnt is corrupted",
450 rv));
451 vm_reserv_broken++;
452 }
453 mtx_unlock(&vm_page_queue_free_mtx);
454 }
455
456 /*
457 * Frees the given page if it belongs to a reservation. Returns TRUE if the
458 * page is freed and FALSE otherwise.
459 *
460 * The free page queue lock must be held.
461 */
462 boolean_t
463 vm_reserv_free_page(vm_page_t m)
464 {
465 vm_reserv_t rv;
466
467 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
468 rv = vm_reserv_from_page(m);
469 if (rv->object == NULL)
470 return (FALSE);
471 if ((m->flags & PG_CACHED) != 0 && m->pool != VM_FREEPOOL_CACHE)
472 vm_phys_set_pool(VM_FREEPOOL_CACHE, rv->pages,
473 VM_LEVEL_0_ORDER);
474 vm_reserv_depopulate(rv);
475 return (TRUE);
476 }
477
478 /*
479 * Initializes the reservation management system. Specifically, initializes
480 * the reservation array.
481 *
482 * Requires that vm_page_array and first_page are initialized!
483 */
484 void
485 vm_reserv_init(void)
486 {
487 vm_paddr_t paddr;
488 int i;
489
490 /*
491 * Initialize the reservation array. Specifically, initialize the
492 * "pages" field for every element that has an underlying superpage.
493 */
494 for (i = 0; phys_avail[i + 1] != 0; i += 2) {
495 paddr = roundup2(phys_avail[i], VM_LEVEL_0_SIZE);
496 while (paddr + VM_LEVEL_0_SIZE <= phys_avail[i + 1]) {
497 vm_reserv_array[paddr >> VM_LEVEL_0_SHIFT].pages =
498 PHYS_TO_VM_PAGE(paddr);
499 paddr += VM_LEVEL_0_SIZE;
500 }
501 }
502 }
503
504 /*
505 * Returns a reservation level if the given page belongs to a fully-populated
506 * reservation and -1 otherwise.
507 */
508 int
509 vm_reserv_level_iffullpop(vm_page_t m)
510 {
511 vm_reserv_t rv;
512
513 rv = vm_reserv_from_page(m);
514 return (rv->popcnt == VM_LEVEL_0_NPAGES ? 0 : -1);
515 }
516
517 /*
518 * Prepare for the reactivation of a cached page.
519 *
520 * First, suppose that the given page "m" was allocated individually, i.e., not
521 * as part of a reservation, and cached. Then, suppose a reservation
522 * containing "m" is allocated by the same object. Although "m" and the
523 * reservation belong to the same object, "m"'s pindex may not match the
524 * reservation's.
525 *
526 * The free page queue must be locked.
527 */
528 boolean_t
529 vm_reserv_reactivate_page(vm_page_t m)
530 {
531 vm_reserv_t rv;
532 int i, m_index;
533
534 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
535 rv = vm_reserv_from_page(m);
536 if (rv->object == NULL)
537 return (FALSE);
538 KASSERT((m->flags & PG_CACHED) != 0,
539 ("vm_reserv_uncache_page: page %p is not cached", m));
540 if (m->object == rv->object &&
541 m->pindex - rv->pindex == VM_RESERV_INDEX(m->object, m->pindex))
542 vm_reserv_populate(rv);
543 else {
544 KASSERT(rv->inpartpopq,
545 ("vm_reserv_uncache_page: reserv %p's inpartpopq is FALSE",
546 rv));
547 TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq);
548 rv->inpartpopq = FALSE;
549 LIST_REMOVE(rv, objq);
550 rv->object = NULL;
551 /* Don't vm_phys_free_pages(m, 0). */
552 m_index = m - rv->pages;
553 for (i = 0; i < m_index; i++) {
554 if ((rv->pages[i].flags & (PG_CACHED | PG_FREE)) != 0)
555 vm_phys_free_pages(&rv->pages[i], 0);
556 else
557 rv->popcnt--;
558 }
559 for (i++; i < VM_LEVEL_0_NPAGES; i++) {
560 if ((rv->pages[i].flags & (PG_CACHED | PG_FREE)) != 0)
561 vm_phys_free_pages(&rv->pages[i], 0);
562 else
563 rv->popcnt--;
564 }
565 KASSERT(rv->popcnt == 0,
566 ("vm_reserv_uncache_page: reserv %p's popcnt is corrupted",
567 rv));
568 vm_reserv_broken++;
569 }
570 return (TRUE);
571 }
572
573 /*
574 * Breaks the given partially-populated reservation, releasing its cached and
575 * free pages to the physical memory allocator.
576 *
577 * The free page queue lock must be held.
578 */
579 static void
580 vm_reserv_reclaim(vm_reserv_t rv)
581 {
582 int i;
583
584 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
585 KASSERT(rv->inpartpopq,
586 ("vm_reserv_reclaim: reserv %p's inpartpopq is corrupted", rv));
587 TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq);
588 rv->inpartpopq = FALSE;
589 KASSERT(rv->object != NULL,
590 ("vm_reserv_reclaim: reserv %p is free", rv));
591 LIST_REMOVE(rv, objq);
592 rv->object = NULL;
593 for (i = 0; i < VM_LEVEL_0_NPAGES; i++) {
594 if ((rv->pages[i].flags & (PG_CACHED | PG_FREE)) != 0)
595 vm_phys_free_pages(&rv->pages[i], 0);
596 else
597 rv->popcnt--;
598 }
599 KASSERT(rv->popcnt == 0,
600 ("vm_reserv_reclaim: reserv %p's popcnt is corrupted", rv));
601 vm_reserv_reclaimed++;
602 }
603
604 /*
605 * Breaks the reservation at the head of the partially-populated reservation
606 * queue, releasing its cached and free pages to the physical memory
607 * allocator. Returns TRUE if a reservation is broken and FALSE otherwise.
608 *
609 * The free page queue lock must be held.
610 */
611 boolean_t
612 vm_reserv_reclaim_inactive(void)
613 {
614 vm_reserv_t rv;
615
616 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
617 if ((rv = TAILQ_FIRST(&vm_rvq_partpop)) != NULL) {
618 vm_reserv_reclaim(rv);
619 return (TRUE);
620 }
621 return (FALSE);
622 }
623
624 /*
625 * Searches the partially-populated reservation queue for the least recently
626 * active reservation with unused pages, i.e., cached or free, that satisfy the
627 * given request for contiguous physical memory. If a satisfactory reservation
628 * is found, it is broken. Returns TRUE if a reservation is broken and FALSE
629 * otherwise.
630 *
631 * The free page queue lock must be held.
632 */
633 boolean_t
634 vm_reserv_reclaim_contig(vm_paddr_t size, vm_paddr_t low, vm_paddr_t high,
635 unsigned long alignment, unsigned long boundary)
636 {
637 vm_paddr_t pa, pa_length;
638 vm_reserv_t rv;
639 int i;
640
641 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
642 if (size > VM_LEVEL_0_SIZE - PAGE_SIZE)
643 return (FALSE);
644 TAILQ_FOREACH(rv, &vm_rvq_partpop, partpopq) {
645 pa = VM_PAGE_TO_PHYS(&rv->pages[VM_LEVEL_0_NPAGES - 1]);
646 if (pa + PAGE_SIZE - size < low) {
647 /* this entire reservation is too low; go to next */
648 continue;
649 }
650 pa_length = 0;
651 for (i = 0; i < VM_LEVEL_0_NPAGES; i++)
652 if ((rv->pages[i].flags & (PG_CACHED | PG_FREE)) != 0) {
653 pa_length += PAGE_SIZE;
654 if (pa_length == PAGE_SIZE) {
655 pa = VM_PAGE_TO_PHYS(&rv->pages[i]);
656 if (pa + size > high) {
657 /* skip to next reservation */
658 break;
659 } else if (pa < low ||
660 (pa & (alignment - 1)) != 0 ||
661 ((pa ^ (pa + size - 1)) &
662 ~(boundary - 1)) != 0)
663 pa_length = 0;
664 }
665 if (pa_length >= size) {
666 vm_reserv_reclaim(rv);
667 return (TRUE);
668 }
669 } else
670 pa_length = 0;
671 }
672 return (FALSE);
673 }
674
675 /*
676 * Transfers the reservation underlying the given page to a new object.
677 *
678 * The object must be locked.
679 */
680 void
681 vm_reserv_rename(vm_page_t m, vm_object_t new_object, vm_object_t old_object,
682 vm_pindex_t old_object_offset)
683 {
684 vm_reserv_t rv;
685
686 VM_OBJECT_LOCK_ASSERT(new_object, MA_OWNED);
687 rv = vm_reserv_from_page(m);
688 if (rv->object == old_object) {
689 mtx_lock(&vm_page_queue_free_mtx);
690 if (rv->object == old_object) {
691 LIST_REMOVE(rv, objq);
692 LIST_INSERT_HEAD(&new_object->rvq, rv, objq);
693 rv->object = new_object;
694 rv->pindex -= old_object_offset;
695 }
696 mtx_unlock(&vm_page_queue_free_mtx);
697 }
698 }
699
700 /*
701 * Allocates the virtual and physical memory required by the reservation
702 * management system's data structures, in particular, the reservation array.
703 */
704 vm_paddr_t
705 vm_reserv_startup(vm_offset_t *vaddr, vm_paddr_t end, vm_paddr_t high_water)
706 {
707 vm_paddr_t new_end;
708 size_t size;
709
710 /*
711 * Calculate the size (in bytes) of the reservation array. Round up
712 * from "high_water" because every small page is mapped to an element
713 * in the reservation array based on its physical address. Thus, the
714 * number of elements in the reservation array can be greater than the
715 * number of superpages.
716 */
717 size = howmany(high_water, VM_LEVEL_0_SIZE) * sizeof(struct vm_reserv);
718
719 /*
720 * Allocate and map the physical memory for the reservation array. The
721 * next available virtual address is returned by reference.
722 */
723 new_end = end - round_page(size);
724 vm_reserv_array = (void *)(uintptr_t)pmap_map(vaddr, new_end, end,
725 VM_PROT_READ | VM_PROT_WRITE);
726 bzero(vm_reserv_array, size);
727
728 /*
729 * Return the next available physical address.
730 */
731 return (new_end);
732 }
733
734 #endif /* VM_NRESERVLEVEL > 0 */
Cache object: 49aa2ce7bc309562d83be9f80ab105a9
|