The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/vm/vm_contig.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 1991 Regents of the University of California.
    3  * All rights reserved.
    4  *
    5  * This code is derived from software contributed to Berkeley by
    6  * The Mach Operating System project at Carnegie-Mellon University.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  * 3. All advertising materials mentioning features or use of this software
   17  *    must display the following acknowledgement:
   18  *      This product includes software developed by the University of
   19  *      California, Berkeley and its contributors.
   20  * 4. Neither the name of the University nor the names of its contributors
   21  *    may be used to endorse or promote products derived from this software
   22  *    without specific prior written permission.
   23  *
   24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   34  * SUCH DAMAGE.
   35  *
   36  *      from: @(#)vm_page.c     7.4 (Berkeley) 5/7/91
   37  */
   38 
   39 /*
   40  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
   41  * All rights reserved.
   42  *
   43  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
   44  *
   45  * Permission to use, copy, modify and distribute this software and
   46  * its documentation is hereby granted, provided that both the copyright
   47  * notice and this permission notice appear in all copies of the
   48  * software, derivative works or modified versions, and any portions
   49  * thereof, and that both notices appear in supporting documentation.
   50  *
   51  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
   52  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
   53  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
   54  *
   55  * Carnegie Mellon requests users of this software to return to
   56  *
   57  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
   58  *  School of Computer Science
   59  *  Carnegie Mellon University
   60  *  Pittsburgh PA 15213-3890
   61  *
   62  * any improvements or extensions that they make and grant Carnegie the
   63  * rights to redistribute these changes.
   64  */
   65 
   66 #include <sys/cdefs.h>
   67 __FBSDID("$FreeBSD: releng/5.2/sys/vm/vm_contig.c 121226 2003-10-18 21:09:21Z alc $");
   68 
   69 #include <sys/param.h>
   70 #include <sys/systm.h>
   71 #include <sys/lock.h>
   72 #include <sys/malloc.h>
   73 #include <sys/mutex.h>
   74 #include <sys/proc.h>
   75 #include <sys/vmmeter.h>
   76 #include <sys/vnode.h>
   77 
   78 #include <vm/vm.h>
   79 #include <vm/vm_param.h>
   80 #include <vm/vm_kern.h>
   81 #include <vm/pmap.h>
   82 #include <vm/vm_map.h>
   83 #include <vm/vm_object.h>
   84 #include <vm/vm_page.h>
   85 #include <vm/vm_pageout.h>
   86 #include <vm/vm_pager.h>
   87 #include <vm/vm_extern.h>
   88 
   89 static int
   90 vm_contig_launder(int queue)
   91 {
   92         vm_object_t object;
   93         vm_page_t m, m_tmp, next;
   94         struct vnode *vp;
   95 
   96         for (m = TAILQ_FIRST(&vm_page_queues[queue].pl); m != NULL; m = next) {
   97                 next = TAILQ_NEXT(m, pageq);
   98                 KASSERT(m->queue == queue,
   99                     ("vm_contig_launder: page %p's queue is not %d", m, queue));
  100                 if (!VM_OBJECT_TRYLOCK(m->object))
  101                         continue;
  102                 if (vm_page_sleep_if_busy(m, TRUE, "vpctw0")) {
  103                         VM_OBJECT_UNLOCK(m->object);
  104                         vm_page_lock_queues();
  105                         return (TRUE);
  106                 }
  107                 vm_page_test_dirty(m);
  108                 if (m->dirty) {
  109                         object = m->object;
  110                         if (object->type == OBJT_VNODE) {
  111                                 vm_page_unlock_queues();
  112                                 vp = object->handle;
  113                                 VM_OBJECT_UNLOCK(object);
  114                                 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
  115                                 VM_OBJECT_LOCK(object);
  116                                 vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
  117                                 VM_OBJECT_UNLOCK(object);
  118                                 VOP_UNLOCK(vp, 0, curthread);
  119                                 vm_page_lock_queues();
  120                                 return (TRUE);
  121                         } else if (object->type == OBJT_SWAP ||
  122                                    object->type == OBJT_DEFAULT) {
  123                                 m_tmp = m;
  124                                 vm_pageout_flush(&m_tmp, 1, 0);
  125                                 VM_OBJECT_UNLOCK(object);
  126                                 return (TRUE);
  127                         }
  128                 } else if (m->busy == 0 && m->hold_count == 0)
  129                         vm_page_cache(m);
  130                 VM_OBJECT_UNLOCK(m->object);
  131         }
  132         return (FALSE);
  133 }
  134 
  135 /*
  136  * This interface is for merging with malloc() someday.
  137  * Even if we never implement compaction so that contiguous allocation
  138  * works after initialization time, malloc()'s data structures are good
  139  * for statistics and for allocations of less than a page.
  140  */
  141 static void *
  142 contigmalloc1(
  143         unsigned long size,     /* should be size_t here and for malloc() */
  144         struct malloc_type *type,
  145         int flags,
  146         vm_paddr_t low,
  147         vm_paddr_t high,
  148         unsigned long alignment,
  149         unsigned long boundary,
  150         vm_map_t map)
  151 {
  152         int i, s, start;
  153         vm_paddr_t phys;
  154         vm_object_t object;
  155         vm_offset_t addr, tmp_addr;
  156         int pass, pqtype;
  157         vm_page_t pga = vm_page_array;
  158 
  159         size = round_page(size);
  160         if (size == 0)
  161                 panic("contigmalloc1: size must not be 0");
  162         if ((alignment & (alignment - 1)) != 0)
  163                 panic("contigmalloc1: alignment must be a power of 2");
  164         if ((boundary & (boundary - 1)) != 0)
  165                 panic("contigmalloc1: boundary must be a power of 2");
  166 
  167         start = 0;
  168         for (pass = 0; pass <= 1; pass++) {
  169                 s = splvm();
  170                 vm_page_lock_queues();
  171 again:
  172                 /*
  173                  * Find first page in array that is free, within range,
  174                  * aligned, and such that the boundary won't be crossed.
  175                  */
  176                 for (i = start; i < cnt.v_page_count; i++) {
  177                         phys = VM_PAGE_TO_PHYS(&pga[i]);
  178                         pqtype = pga[i].queue - pga[i].pc;
  179                         if (((pqtype == PQ_FREE) || (pqtype == PQ_CACHE)) &&
  180                             (phys >= low) && (phys < high) &&
  181                             ((phys & (alignment - 1)) == 0) &&
  182                             (((phys ^ (phys + size - 1)) & ~(boundary - 1)) == 0))
  183                                 break;
  184                 }
  185 
  186                 /*
  187                  * If the above failed or we will exceed the upper bound, fail.
  188                  */
  189                 if ((i == cnt.v_page_count) ||
  190                         ((VM_PAGE_TO_PHYS(&pga[i]) + size) > high)) {
  191 again1:
  192                         if (vm_contig_launder(PQ_INACTIVE))
  193                                 goto again1;
  194                         if (vm_contig_launder(PQ_ACTIVE))
  195                                 goto again1;
  196                         vm_page_unlock_queues();
  197                         splx(s);
  198                         continue;
  199                 }
  200                 start = i;
  201 
  202                 /*
  203                  * Check successive pages for contiguous and free.
  204                  */
  205                 for (i = start + 1; i < (start + size / PAGE_SIZE); i++) {
  206                         pqtype = pga[i].queue - pga[i].pc;
  207                         if ((VM_PAGE_TO_PHYS(&pga[i]) !=
  208                             (VM_PAGE_TO_PHYS(&pga[i - 1]) + PAGE_SIZE)) ||
  209                             ((pqtype != PQ_FREE) && (pqtype != PQ_CACHE))) {
  210                                 start++;
  211                                 goto again;
  212                         }
  213                 }
  214                 for (i = start; i < (start + size / PAGE_SIZE); i++) {
  215                         vm_page_t m = &pga[i];
  216 
  217                         if ((m->queue - m->pc) == PQ_CACHE) {
  218                                 object = m->object;
  219                                 if (!VM_OBJECT_TRYLOCK(object)) {
  220                                         start++;
  221                                         goto again;
  222                                 }
  223                                 vm_page_busy(m);
  224                                 vm_page_free(m);
  225                                 VM_OBJECT_UNLOCK(object);
  226                         }
  227                         mtx_lock_spin(&vm_page_queue_free_mtx);
  228                         vm_pageq_remove_nowakeup(m);
  229                         m->valid = VM_PAGE_BITS_ALL;
  230                         if (m->flags & PG_ZERO)
  231                                 vm_page_zero_count--;
  232                         /* Don't clear the PG_ZERO flag, we'll need it later. */
  233                         m->flags &= PG_ZERO;
  234                         KASSERT(m->dirty == 0,
  235                             ("contigmalloc1: page %p was dirty", m));
  236                         m->wire_count = 0;
  237                         m->busy = 0;
  238                         m->object = NULL;
  239                         mtx_unlock_spin(&vm_page_queue_free_mtx);
  240                 }
  241                 vm_page_unlock_queues();
  242                 /*
  243                  * We've found a contiguous chunk that meets are requirements.
  244                  * Allocate kernel VM, unfree and assign the physical pages to
  245                  * it and return kernel VM pointer.
  246                  */
  247                 vm_map_lock(map);
  248                 if (vm_map_findspace(map, vm_map_min(map), size, &addr) !=
  249                     KERN_SUCCESS) {
  250                         /*
  251                          * XXX We almost never run out of kernel virtual
  252                          * space, so we don't make the allocated memory
  253                          * above available.
  254                          */
  255                         vm_map_unlock(map);
  256                         splx(s);
  257                         return (NULL);
  258                 }
  259                 vm_object_reference(kernel_object);
  260                 vm_map_insert(map, kernel_object, addr - VM_MIN_KERNEL_ADDRESS,
  261                     addr, addr + size, VM_PROT_ALL, VM_PROT_ALL, 0);
  262                 vm_map_unlock(map);
  263 
  264                 tmp_addr = addr;
  265                 VM_OBJECT_LOCK(kernel_object);
  266                 for (i = start; i < (start + size / PAGE_SIZE); i++) {
  267                         vm_page_t m = &pga[i];
  268                         vm_page_insert(m, kernel_object,
  269                                 OFF_TO_IDX(tmp_addr - VM_MIN_KERNEL_ADDRESS));
  270                         if ((flags & M_ZERO) && !(m->flags & PG_ZERO))
  271                                 pmap_zero_page(m);
  272                         m->flags = 0;
  273                         tmp_addr += PAGE_SIZE;
  274                 }
  275                 VM_OBJECT_UNLOCK(kernel_object);
  276                 vm_map_wire(map, addr, addr + size,
  277                     VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES);
  278 
  279                 splx(s);
  280                 return ((void *)addr);
  281         }
  282         return (NULL);
  283 }
  284 
  285 void *
  286 contigmalloc(
  287         unsigned long size,     /* should be size_t here and for malloc() */
  288         struct malloc_type *type,
  289         int flags,
  290         vm_paddr_t low,
  291         vm_paddr_t high,
  292         unsigned long alignment,
  293         unsigned long boundary)
  294 {
  295         void * ret;
  296 
  297         mtx_lock(&Giant);
  298         ret = contigmalloc1(size, type, flags, low, high, alignment, boundary,
  299             kernel_map);
  300         mtx_unlock(&Giant);
  301         return (ret);
  302 }
  303 
  304 void
  305 contigfree(void *addr, unsigned long size, struct malloc_type *type)
  306 {
  307         GIANT_REQUIRED;
  308         kmem_free(kernel_map, (vm_offset_t)addr, size);
  309 }
  310 
  311 vm_offset_t
  312 vm_page_alloc_contig(
  313         vm_offset_t size,
  314         vm_paddr_t low,
  315         vm_paddr_t high,
  316         vm_offset_t alignment)
  317 {
  318         vm_offset_t ret;
  319 
  320         GIANT_REQUIRED;
  321         ret = ((vm_offset_t)contigmalloc1(size, M_DEVBUF, M_NOWAIT, low, high,
  322                                           alignment, 0ul, kernel_map));
  323         return (ret);
  324 }

Cache object: eed66d8166dab9c9bf22f8b80a14973b


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.