FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_phys.c
1 /*-
2 * Copyright (c) 2002-2006 Rice University
3 * Copyright (c) 2007 Alan L. Cox <alc@cs.rice.edu>
4 * All rights reserved.
5 *
6 * This software was developed for the FreeBSD Project by Alan L. Cox,
7 * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
25 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
28 * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD: releng/8.4/sys/vm/vm_phys.c 226897 2011-10-29 06:28:18Z attilio $");
34
35 #include "opt_ddb.h"
36 #include "opt_vm.h"
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/lock.h>
41 #include <sys/kernel.h>
42 #include <sys/malloc.h>
43 #include <sys/mutex.h>
44 #include <sys/queue.h>
45 #include <sys/sbuf.h>
46 #include <sys/sysctl.h>
47 #include <sys/vmmeter.h>
48 #include <sys/vnode.h>
49
50 #include <ddb/ddb.h>
51
52 #include <vm/vm.h>
53 #include <vm/vm_param.h>
54 #include <vm/vm_kern.h>
55 #include <vm/vm_object.h>
56 #include <vm/vm_page.h>
57 #include <vm/vm_phys.h>
58 #include <vm/vm_reserv.h>
59
60 struct vm_freelist {
61 struct pglist pl;
62 int lcnt;
63 };
64
65 struct vm_phys_seg {
66 vm_paddr_t start;
67 vm_paddr_t end;
68 vm_page_t first_page;
69 struct vm_freelist (*free_queues)[VM_NFREEPOOL][VM_NFREEORDER];
70 };
71
72 static struct vm_phys_seg vm_phys_segs[VM_PHYSSEG_MAX];
73
74 static int vm_phys_nsegs;
75
76 static struct vm_freelist
77 vm_phys_free_queues[VM_NFREELIST][VM_NFREEPOOL][VM_NFREEORDER];
78
79 static int vm_nfreelists = VM_FREELIST_DEFAULT + 1;
80
81 static int cnt_prezero;
82 SYSCTL_INT(_vm_stats_misc, OID_AUTO, cnt_prezero, CTLFLAG_RD,
83 &cnt_prezero, 0, "The number of physical pages prezeroed at idle time");
84
85 static int sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS);
86 SYSCTL_OID(_vm, OID_AUTO, phys_free, CTLTYPE_STRING | CTLFLAG_RD,
87 NULL, 0, sysctl_vm_phys_free, "A", "Phys Free Info");
88
89 static int sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS);
90 SYSCTL_OID(_vm, OID_AUTO, phys_segs, CTLTYPE_STRING | CTLFLAG_RD,
91 NULL, 0, sysctl_vm_phys_segs, "A", "Phys Seg Info");
92
93 static void vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind);
94 static int vm_phys_paddr_to_segind(vm_paddr_t pa);
95 static void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl,
96 int order);
97
98 /*
99 * Outputs the state of the physical memory allocator, specifically,
100 * the amount of physical memory in each free list.
101 */
102 static int
103 sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS)
104 {
105 struct sbuf sbuf;
106 struct vm_freelist *fl;
107 char *cbuf;
108 const int cbufsize = vm_nfreelists*(VM_NFREEORDER + 1)*81;
109 int error, flind, oind, pind;
110
111 cbuf = malloc(cbufsize, M_TEMP, M_WAITOK | M_ZERO);
112 sbuf_new(&sbuf, cbuf, cbufsize, SBUF_FIXEDLEN);
113 for (flind = 0; flind < vm_nfreelists; flind++) {
114 sbuf_printf(&sbuf, "\nFREE LIST %d:\n"
115 "\n ORDER (SIZE) | NUMBER"
116 "\n ", flind);
117 for (pind = 0; pind < VM_NFREEPOOL; pind++)
118 sbuf_printf(&sbuf, " | POOL %d", pind);
119 sbuf_printf(&sbuf, "\n-- ");
120 for (pind = 0; pind < VM_NFREEPOOL; pind++)
121 sbuf_printf(&sbuf, "-- -- ");
122 sbuf_printf(&sbuf, "--\n");
123 for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
124 sbuf_printf(&sbuf, " %2d (%6dK)", oind,
125 1 << (PAGE_SHIFT - 10 + oind));
126 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
127 fl = vm_phys_free_queues[flind][pind];
128 sbuf_printf(&sbuf, " | %6d", fl[oind].lcnt);
129 }
130 sbuf_printf(&sbuf, "\n");
131 }
132 }
133 sbuf_finish(&sbuf);
134 error = SYSCTL_OUT(req, sbuf_data(&sbuf), sbuf_len(&sbuf));
135 sbuf_delete(&sbuf);
136 free(cbuf, M_TEMP);
137 return (error);
138 }
139
140 /*
141 * Outputs the set of physical memory segments.
142 */
143 static int
144 sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS)
145 {
146 struct sbuf sbuf;
147 struct vm_phys_seg *seg;
148 char *cbuf;
149 const int cbufsize = VM_PHYSSEG_MAX*(VM_NFREEORDER + 1)*81;
150 int error, segind;
151
152 cbuf = malloc(cbufsize, M_TEMP, M_WAITOK | M_ZERO);
153 sbuf_new(&sbuf, cbuf, cbufsize, SBUF_FIXEDLEN);
154 for (segind = 0; segind < vm_phys_nsegs; segind++) {
155 sbuf_printf(&sbuf, "\nSEGMENT %d:\n\n", segind);
156 seg = &vm_phys_segs[segind];
157 sbuf_printf(&sbuf, "start: %#jx\n",
158 (uintmax_t)seg->start);
159 sbuf_printf(&sbuf, "end: %#jx\n",
160 (uintmax_t)seg->end);
161 sbuf_printf(&sbuf, "free list: %p\n", seg->free_queues);
162 }
163 sbuf_finish(&sbuf);
164 error = SYSCTL_OUT(req, sbuf_data(&sbuf), sbuf_len(&sbuf));
165 sbuf_delete(&sbuf);
166 free(cbuf, M_TEMP);
167 return (error);
168 }
169
170 /*
171 * Create a physical memory segment.
172 */
173 static void
174 vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind)
175 {
176 struct vm_phys_seg *seg;
177 #ifdef VM_PHYSSEG_SPARSE
178 long pages;
179 int segind;
180
181 pages = 0;
182 for (segind = 0; segind < vm_phys_nsegs; segind++) {
183 seg = &vm_phys_segs[segind];
184 pages += atop(seg->end - seg->start);
185 }
186 #endif
187 KASSERT(vm_phys_nsegs < VM_PHYSSEG_MAX,
188 ("vm_phys_create_seg: increase VM_PHYSSEG_MAX"));
189 seg = &vm_phys_segs[vm_phys_nsegs++];
190 seg->start = start;
191 seg->end = end;
192 #ifdef VM_PHYSSEG_SPARSE
193 seg->first_page = &vm_page_array[pages];
194 #else
195 seg->first_page = PHYS_TO_VM_PAGE(start);
196 #endif
197 seg->free_queues = &vm_phys_free_queues[flind];
198 }
199
200 /*
201 * Initialize the physical memory allocator.
202 */
203 void
204 vm_phys_init(void)
205 {
206 struct vm_freelist *fl;
207 int flind, i, oind, pind;
208
209 for (i = 0; phys_avail[i + 1] != 0; i += 2) {
210 #ifdef VM_FREELIST_ISADMA
211 if (phys_avail[i] < 16777216) {
212 if (phys_avail[i + 1] > 16777216) {
213 vm_phys_create_seg(phys_avail[i], 16777216,
214 VM_FREELIST_ISADMA);
215 vm_phys_create_seg(16777216, phys_avail[i + 1],
216 VM_FREELIST_DEFAULT);
217 } else {
218 vm_phys_create_seg(phys_avail[i],
219 phys_avail[i + 1], VM_FREELIST_ISADMA);
220 }
221 if (VM_FREELIST_ISADMA >= vm_nfreelists)
222 vm_nfreelists = VM_FREELIST_ISADMA + 1;
223 } else
224 #endif
225 #ifdef VM_FREELIST_HIGHMEM
226 if (phys_avail[i + 1] > VM_HIGHMEM_ADDRESS) {
227 if (phys_avail[i] < VM_HIGHMEM_ADDRESS) {
228 vm_phys_create_seg(phys_avail[i],
229 VM_HIGHMEM_ADDRESS, VM_FREELIST_DEFAULT);
230 vm_phys_create_seg(VM_HIGHMEM_ADDRESS,
231 phys_avail[i + 1], VM_FREELIST_HIGHMEM);
232 } else {
233 vm_phys_create_seg(phys_avail[i],
234 phys_avail[i + 1], VM_FREELIST_HIGHMEM);
235 }
236 if (VM_FREELIST_HIGHMEM >= vm_nfreelists)
237 vm_nfreelists = VM_FREELIST_HIGHMEM + 1;
238 } else
239 #endif
240 vm_phys_create_seg(phys_avail[i], phys_avail[i + 1],
241 VM_FREELIST_DEFAULT);
242 }
243 for (flind = 0; flind < vm_nfreelists; flind++) {
244 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
245 fl = vm_phys_free_queues[flind][pind];
246 for (oind = 0; oind < VM_NFREEORDER; oind++)
247 TAILQ_INIT(&fl[oind].pl);
248 }
249 }
250 }
251
252 /*
253 * Split a contiguous, power of two-sized set of physical pages.
254 */
255 static __inline void
256 vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order)
257 {
258 vm_page_t m_buddy;
259
260 while (oind > order) {
261 oind--;
262 m_buddy = &m[1 << oind];
263 KASSERT(m_buddy->order == VM_NFREEORDER,
264 ("vm_phys_split_pages: page %p has unexpected order %d",
265 m_buddy, m_buddy->order));
266 m_buddy->order = oind;
267 TAILQ_INSERT_HEAD(&fl[oind].pl, m_buddy, pageq);
268 fl[oind].lcnt++;
269 }
270 }
271
272 /*
273 * Initialize a physical page and add it to the free lists.
274 */
275 void
276 vm_phys_add_page(vm_paddr_t pa)
277 {
278 vm_page_t m;
279
280 cnt.v_page_count++;
281 m = vm_phys_paddr_to_vm_page(pa);
282 m->phys_addr = pa;
283 m->segind = vm_phys_paddr_to_segind(pa);
284 m->flags = PG_FREE;
285 KASSERT(m->order == VM_NFREEORDER,
286 ("vm_phys_add_page: page %p has unexpected order %d",
287 m, m->order));
288 m->pool = VM_FREEPOOL_DEFAULT;
289 pmap_page_init(m);
290 mtx_lock(&vm_page_queue_free_mtx);
291 cnt.v_free_count++;
292 vm_phys_free_pages(m, 0);
293 mtx_unlock(&vm_page_queue_free_mtx);
294 }
295
296 /*
297 * Allocate a contiguous, power of two-sized set of physical pages
298 * from the free lists.
299 *
300 * The free page queues must be locked.
301 */
302 vm_page_t
303 vm_phys_alloc_pages(int pool, int order)
304 {
305 vm_page_t m;
306 int flind;
307
308 for (flind = 0; flind < vm_nfreelists; flind++) {
309 m = vm_phys_alloc_freelist_pages(flind, pool, order);
310 if (m != NULL)
311 return (m);
312 }
313 return (NULL);
314 }
315
316 /*
317 * Find and dequeue a free page on the given free list, with the
318 * specified pool and order
319 */
320 vm_page_t
321 vm_phys_alloc_freelist_pages(int flind, int pool, int order)
322 {
323 struct vm_freelist *fl;
324 struct vm_freelist *alt;
325 int oind, pind;
326 vm_page_t m;
327
328 KASSERT(flind < VM_NFREELIST,
329 ("vm_phys_alloc_freelist_pages: freelist %d is out of range", flind));
330 KASSERT(pool < VM_NFREEPOOL,
331 ("vm_phys_alloc_freelist_pages: pool %d is out of range", pool));
332 KASSERT(order < VM_NFREEORDER,
333 ("vm_phys_alloc_freelist_pages: order %d is out of range", order));
334 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
335 fl = vm_phys_free_queues[flind][pool];
336 for (oind = order; oind < VM_NFREEORDER; oind++) {
337 m = TAILQ_FIRST(&fl[oind].pl);
338 if (m != NULL) {
339 TAILQ_REMOVE(&fl[oind].pl, m, pageq);
340 fl[oind].lcnt--;
341 m->order = VM_NFREEORDER;
342 vm_phys_split_pages(m, oind, fl, order);
343 return (m);
344 }
345 }
346
347 /*
348 * The given pool was empty. Find the largest
349 * contiguous, power-of-two-sized set of pages in any
350 * pool. Transfer these pages to the given pool, and
351 * use them to satisfy the allocation.
352 */
353 for (oind = VM_NFREEORDER - 1; oind >= order; oind--) {
354 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
355 alt = vm_phys_free_queues[flind][pind];
356 m = TAILQ_FIRST(&alt[oind].pl);
357 if (m != NULL) {
358 TAILQ_REMOVE(&alt[oind].pl, m, pageq);
359 alt[oind].lcnt--;
360 m->order = VM_NFREEORDER;
361 vm_phys_set_pool(pool, m, oind);
362 vm_phys_split_pages(m, oind, fl, order);
363 return (m);
364 }
365 }
366 }
367 return (NULL);
368 }
369
370 /*
371 * Allocate physical memory from phys_avail[].
372 */
373 vm_paddr_t
374 vm_phys_bootstrap_alloc(vm_size_t size, unsigned long alignment)
375 {
376 vm_paddr_t pa;
377 int i;
378
379 size = round_page(size);
380 for (i = 0; phys_avail[i + 1] != 0; i += 2) {
381 if (phys_avail[i + 1] - phys_avail[i] < size)
382 continue;
383 pa = phys_avail[i];
384 phys_avail[i] += size;
385 return (pa);
386 }
387 panic("vm_phys_bootstrap_alloc");
388 }
389
390 /*
391 * Find the vm_page corresponding to the given physical address.
392 */
393 vm_page_t
394 vm_phys_paddr_to_vm_page(vm_paddr_t pa)
395 {
396 struct vm_phys_seg *seg;
397 int segind;
398
399 for (segind = 0; segind < vm_phys_nsegs; segind++) {
400 seg = &vm_phys_segs[segind];
401 if (pa >= seg->start && pa < seg->end)
402 return (&seg->first_page[atop(pa - seg->start)]);
403 }
404 return (NULL);
405 }
406
407 /*
408 * Find the segment containing the given physical address.
409 */
410 static int
411 vm_phys_paddr_to_segind(vm_paddr_t pa)
412 {
413 struct vm_phys_seg *seg;
414 int segind;
415
416 for (segind = 0; segind < vm_phys_nsegs; segind++) {
417 seg = &vm_phys_segs[segind];
418 if (pa >= seg->start && pa < seg->end)
419 return (segind);
420 }
421 panic("vm_phys_paddr_to_segind: paddr %#jx is not in any segment" ,
422 (uintmax_t)pa);
423 }
424
425 /*
426 * Free a contiguous, power of two-sized set of physical pages.
427 *
428 * The free page queues must be locked.
429 */
430 void
431 vm_phys_free_pages(vm_page_t m, int order)
432 {
433 struct vm_freelist *fl;
434 struct vm_phys_seg *seg;
435 vm_paddr_t pa, pa_buddy;
436 vm_page_t m_buddy;
437
438 KASSERT(m->order == VM_NFREEORDER,
439 ("vm_phys_free_pages: page %p has unexpected order %d",
440 m, m->order));
441 KASSERT(m->pool < VM_NFREEPOOL,
442 ("vm_phys_free_pages: page %p has unexpected pool %d",
443 m, m->pool));
444 KASSERT(order < VM_NFREEORDER,
445 ("vm_phys_free_pages: order %d is out of range", order));
446 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
447 pa = VM_PAGE_TO_PHYS(m);
448 seg = &vm_phys_segs[m->segind];
449 while (order < VM_NFREEORDER - 1) {
450 pa_buddy = pa ^ (1 << (PAGE_SHIFT + order));
451 if (pa_buddy < seg->start ||
452 pa_buddy >= seg->end)
453 break;
454 m_buddy = &seg->first_page[atop(pa_buddy - seg->start)];
455 if (m_buddy->order != order)
456 break;
457 fl = (*seg->free_queues)[m_buddy->pool];
458 TAILQ_REMOVE(&fl[m_buddy->order].pl, m_buddy, pageq);
459 fl[m_buddy->order].lcnt--;
460 m_buddy->order = VM_NFREEORDER;
461 if (m_buddy->pool != m->pool)
462 vm_phys_set_pool(m->pool, m_buddy, order);
463 order++;
464 pa &= ~((1 << (PAGE_SHIFT + order)) - 1);
465 m = &seg->first_page[atop(pa - seg->start)];
466 }
467 m->order = order;
468 fl = (*seg->free_queues)[m->pool];
469 TAILQ_INSERT_TAIL(&fl[order].pl, m, pageq);
470 fl[order].lcnt++;
471 }
472
473 /*
474 * Set the pool for a contiguous, power of two-sized set of physical pages.
475 */
476 void
477 vm_phys_set_pool(int pool, vm_page_t m, int order)
478 {
479 vm_page_t m_tmp;
480
481 for (m_tmp = m; m_tmp < &m[1 << order]; m_tmp++)
482 m_tmp->pool = pool;
483 }
484
485 /*
486 * Search for the given physical page "m" in the free lists. If the search
487 * succeeds, remove "m" from the free lists and return TRUE. Otherwise, return
488 * FALSE, indicating that "m" is not in the free lists.
489 *
490 * The free page queues must be locked.
491 */
492 boolean_t
493 vm_phys_unfree_page(vm_page_t m)
494 {
495 struct vm_freelist *fl;
496 struct vm_phys_seg *seg;
497 vm_paddr_t pa, pa_half;
498 vm_page_t m_set, m_tmp;
499 int order;
500
501 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
502
503 /*
504 * First, find the contiguous, power of two-sized set of free
505 * physical pages containing the given physical page "m" and
506 * assign it to "m_set".
507 */
508 seg = &vm_phys_segs[m->segind];
509 for (m_set = m, order = 0; m_set->order == VM_NFREEORDER &&
510 order < VM_NFREEORDER - 1; ) {
511 order++;
512 pa = m->phys_addr & (~(vm_paddr_t)0 << (PAGE_SHIFT + order));
513 if (pa >= seg->start)
514 m_set = &seg->first_page[atop(pa - seg->start)];
515 else
516 return (FALSE);
517 }
518 if (m_set->order < order)
519 return (FALSE);
520 if (m_set->order == VM_NFREEORDER)
521 return (FALSE);
522 KASSERT(m_set->order < VM_NFREEORDER,
523 ("vm_phys_unfree_page: page %p has unexpected order %d",
524 m_set, m_set->order));
525
526 /*
527 * Next, remove "m_set" from the free lists. Finally, extract
528 * "m" from "m_set" using an iterative algorithm: While "m_set"
529 * is larger than a page, shrink "m_set" by returning the half
530 * of "m_set" that does not contain "m" to the free lists.
531 */
532 fl = (*seg->free_queues)[m_set->pool];
533 order = m_set->order;
534 TAILQ_REMOVE(&fl[order].pl, m_set, pageq);
535 fl[order].lcnt--;
536 m_set->order = VM_NFREEORDER;
537 while (order > 0) {
538 order--;
539 pa_half = m_set->phys_addr ^ (1 << (PAGE_SHIFT + order));
540 if (m->phys_addr < pa_half)
541 m_tmp = &seg->first_page[atop(pa_half - seg->start)];
542 else {
543 m_tmp = m_set;
544 m_set = &seg->first_page[atop(pa_half - seg->start)];
545 }
546 m_tmp->order = order;
547 TAILQ_INSERT_HEAD(&fl[order].pl, m_tmp, pageq);
548 fl[order].lcnt++;
549 }
550 KASSERT(m_set == m, ("vm_phys_unfree_page: fatal inconsistency"));
551 return (TRUE);
552 }
553
554 /*
555 * Try to zero one physical page. Used by an idle priority thread.
556 */
557 boolean_t
558 vm_phys_zero_pages_idle(void)
559 {
560 static struct vm_freelist *fl = vm_phys_free_queues[0][0];
561 static int flind, oind, pind;
562 vm_page_t m, m_tmp;
563
564 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
565 for (;;) {
566 TAILQ_FOREACH_REVERSE(m, &fl[oind].pl, pglist, pageq) {
567 for (m_tmp = m; m_tmp < &m[1 << oind]; m_tmp++) {
568 if ((m_tmp->flags & (PG_CACHED | PG_ZERO)) == 0) {
569 vm_phys_unfree_page(m_tmp);
570 cnt.v_free_count--;
571 mtx_unlock(&vm_page_queue_free_mtx);
572 pmap_zero_page_idle(m_tmp);
573 m_tmp->flags |= PG_ZERO;
574 mtx_lock(&vm_page_queue_free_mtx);
575 cnt.v_free_count++;
576 vm_phys_free_pages(m_tmp, 0);
577 vm_page_zero_count++;
578 cnt_prezero++;
579 return (TRUE);
580 }
581 }
582 }
583 oind++;
584 if (oind == VM_NFREEORDER) {
585 oind = 0;
586 pind++;
587 if (pind == VM_NFREEPOOL) {
588 pind = 0;
589 flind++;
590 if (flind == vm_nfreelists)
591 flind = 0;
592 }
593 fl = vm_phys_free_queues[flind][pind];
594 }
595 }
596 }
597
598 /*
599 * Allocate a contiguous set of physical pages of the given size
600 * "npages" from the free lists. All of the physical pages must be at
601 * or above the given physical address "low" and below the given
602 * physical address "high". The given value "alignment" determines the
603 * alignment of the first physical page in the set. If the given value
604 * "boundary" is non-zero, then the set of physical pages cannot cross
605 * any physical address boundary that is a multiple of that value. Both
606 * "alignment" and "boundary" must be a power of two.
607 */
608 vm_page_t
609 vm_phys_alloc_contig(unsigned long npages, vm_paddr_t low, vm_paddr_t high,
610 unsigned long alignment, unsigned long boundary)
611 {
612 struct vm_freelist *fl;
613 struct vm_phys_seg *seg;
614 struct vnode *vp;
615 vm_paddr_t pa, pa_last, size;
616 vm_page_t deferred_vdrop_list, m, m_ret;
617 int flind, i, oind, order, pind;
618
619 size = npages << PAGE_SHIFT;
620 KASSERT(size != 0,
621 ("vm_phys_alloc_contig: size must not be 0"));
622 KASSERT((alignment & (alignment - 1)) == 0,
623 ("vm_phys_alloc_contig: alignment must be a power of 2"));
624 KASSERT((boundary & (boundary - 1)) == 0,
625 ("vm_phys_alloc_contig: boundary must be a power of 2"));
626 deferred_vdrop_list = NULL;
627 /* Compute the queue that is the best fit for npages. */
628 for (order = 0; (1 << order) < npages; order++);
629 mtx_lock(&vm_page_queue_free_mtx);
630 #if VM_NRESERVLEVEL > 0
631 retry:
632 #endif
633 for (flind = 0; flind < vm_nfreelists; flind++) {
634 for (oind = min(order, VM_NFREEORDER - 1); oind < VM_NFREEORDER; oind++) {
635 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
636 fl = vm_phys_free_queues[flind][pind];
637 TAILQ_FOREACH(m_ret, &fl[oind].pl, pageq) {
638 /*
639 * A free list may contain physical pages
640 * from one or more segments.
641 */
642 seg = &vm_phys_segs[m_ret->segind];
643 if (seg->start > high ||
644 low >= seg->end)
645 continue;
646
647 /*
648 * Is the size of this allocation request
649 * larger than the largest block size?
650 */
651 if (order >= VM_NFREEORDER) {
652 /*
653 * Determine if a sufficient number
654 * of subsequent blocks to satisfy
655 * the allocation request are free.
656 */
657 pa = VM_PAGE_TO_PHYS(m_ret);
658 pa_last = pa + size;
659 for (;;) {
660 pa += 1 << (PAGE_SHIFT + VM_NFREEORDER - 1);
661 if (pa >= pa_last)
662 break;
663 if (pa < seg->start ||
664 pa >= seg->end)
665 break;
666 m = &seg->first_page[atop(pa - seg->start)];
667 if (m->order != VM_NFREEORDER - 1)
668 break;
669 }
670 /* If not, continue to the next block. */
671 if (pa < pa_last)
672 continue;
673 }
674
675 /*
676 * Determine if the blocks are within the given range,
677 * satisfy the given alignment, and do not cross the
678 * given boundary.
679 */
680 pa = VM_PAGE_TO_PHYS(m_ret);
681 if (pa >= low &&
682 pa + size <= high &&
683 (pa & (alignment - 1)) == 0 &&
684 ((pa ^ (pa + size - 1)) & ~(boundary - 1)) == 0)
685 goto done;
686 }
687 }
688 }
689 }
690 #if VM_NRESERVLEVEL > 0
691 if (vm_reserv_reclaim_contig(size, low, high, alignment, boundary))
692 goto retry;
693 #endif
694 mtx_unlock(&vm_page_queue_free_mtx);
695 return (NULL);
696 done:
697 for (m = m_ret; m < &m_ret[npages]; m = &m[1 << oind]) {
698 fl = (*seg->free_queues)[m->pool];
699 TAILQ_REMOVE(&fl[m->order].pl, m, pageq);
700 fl[m->order].lcnt--;
701 m->order = VM_NFREEORDER;
702 }
703 if (m_ret->pool != VM_FREEPOOL_DEFAULT)
704 vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m_ret, oind);
705 fl = (*seg->free_queues)[m_ret->pool];
706 vm_phys_split_pages(m_ret, oind, fl, order);
707 for (i = 0; i < npages; i++) {
708 m = &m_ret[i];
709 vp = vm_page_alloc_init(m);
710 if (vp != NULL) {
711 /*
712 * Enqueue the vnode for deferred vdrop().
713 *
714 * Unmanaged pages don't use "pageq", so it
715 * can be safely abused to construct a short-
716 * lived queue of vnodes.
717 */
718 m->pageq.tqe_prev = (void *)vp;
719 m->pageq.tqe_next = deferred_vdrop_list;
720 deferred_vdrop_list = m;
721 }
722 }
723 for (; i < roundup2(npages, 1 << imin(oind, order)); i++) {
724 m = &m_ret[i];
725 KASSERT(m->order == VM_NFREEORDER,
726 ("vm_phys_alloc_contig: page %p has unexpected order %d",
727 m, m->order));
728 vm_phys_free_pages(m, 0);
729 }
730 mtx_unlock(&vm_page_queue_free_mtx);
731 while (deferred_vdrop_list != NULL) {
732 vdrop((struct vnode *)deferred_vdrop_list->pageq.tqe_prev);
733 deferred_vdrop_list = deferred_vdrop_list->pageq.tqe_next;
734 }
735 return (m_ret);
736 }
737
738 #ifdef DDB
739 /*
740 * Show the number of physical pages in each of the free lists.
741 */
742 DB_SHOW_COMMAND(freepages, db_show_freepages)
743 {
744 struct vm_freelist *fl;
745 int flind, oind, pind;
746
747 for (flind = 0; flind < vm_nfreelists; flind++) {
748 db_printf("FREE LIST %d:\n"
749 "\n ORDER (SIZE) | NUMBER"
750 "\n ", flind);
751 for (pind = 0; pind < VM_NFREEPOOL; pind++)
752 db_printf(" | POOL %d", pind);
753 db_printf("\n-- ");
754 for (pind = 0; pind < VM_NFREEPOOL; pind++)
755 db_printf("-- -- ");
756 db_printf("--\n");
757 for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
758 db_printf(" %2.2d (%6.6dK)", oind,
759 1 << (PAGE_SHIFT - 10 + oind));
760 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
761 fl = vm_phys_free_queues[flind][pind];
762 db_printf(" | %6.6d", fl[oind].lcnt);
763 }
764 db_printf("\n");
765 }
766 db_printf("\n");
767 }
768 }
769 #endif
Cache object: 3c182e9feed8d3a528c15b82edde85dc
|