The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/mm/memblock.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Procedures for maintaining information about logical memory blocks.
    3  *
    4  * Peter Bergner, IBM Corp.     June 2001.
    5  * Copyright (C) 2001 Peter Bergner.
    6  *
    7  *      This program is free software; you can redistribute it and/or
    8  *      modify it under the terms of the GNU General Public License
    9  *      as published by the Free Software Foundation; either version
   10  *      2 of the License, or (at your option) any later version.
   11  */
   12 
   13 #include <linux/kernel.h>
   14 #include <linux/slab.h>
   15 #include <linux/init.h>
   16 #include <linux/bitops.h>
   17 #include <linux/poison.h>
   18 #include <linux/pfn.h>
   19 #include <linux/debugfs.h>
   20 #include <linux/seq_file.h>
   21 #include <linux/memblock.h>
   22 
   23 static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
   24 static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
   25 
   26 struct memblock memblock __initdata_memblock = {
   27         .memory.regions         = memblock_memory_init_regions,
   28         .memory.cnt             = 1,    /* empty dummy entry */
   29         .memory.max             = INIT_MEMBLOCK_REGIONS,
   30 
   31         .reserved.regions       = memblock_reserved_init_regions,
   32         .reserved.cnt           = 1,    /* empty dummy entry */
   33         .reserved.max           = INIT_MEMBLOCK_REGIONS,
   34 
   35         .current_limit          = MEMBLOCK_ALLOC_ANYWHERE,
   36 };
   37 
   38 int memblock_debug __initdata_memblock;
   39 static int memblock_can_resize __initdata_memblock;
   40 static int memblock_memory_in_slab __initdata_memblock = 0;
   41 static int memblock_reserved_in_slab __initdata_memblock = 0;
   42 
   43 /* inline so we don't get a warning when pr_debug is compiled out */
   44 static __init_memblock const char *
   45 memblock_type_name(struct memblock_type *type)
   46 {
   47         if (type == &memblock.memory)
   48                 return "memory";
   49         else if (type == &memblock.reserved)
   50                 return "reserved";
   51         else
   52                 return "unknown";
   53 }
   54 
   55 /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
   56 static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
   57 {
   58         return *size = min(*size, (phys_addr_t)ULLONG_MAX - base);
   59 }
   60 
   61 /*
   62  * Address comparison utilities
   63  */
   64 static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
   65                                        phys_addr_t base2, phys_addr_t size2)
   66 {
   67         return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
   68 }
   69 
   70 static long __init_memblock memblock_overlaps_region(struct memblock_type *type,
   71                                         phys_addr_t base, phys_addr_t size)
   72 {
   73         unsigned long i;
   74 
   75         for (i = 0; i < type->cnt; i++) {
   76                 phys_addr_t rgnbase = type->regions[i].base;
   77                 phys_addr_t rgnsize = type->regions[i].size;
   78                 if (memblock_addrs_overlap(base, size, rgnbase, rgnsize))
   79                         break;
   80         }
   81 
   82         return (i < type->cnt) ? i : -1;
   83 }
   84 
   85 /**
   86  * memblock_find_in_range_node - find free area in given range and node
   87  * @start: start of candidate range
   88  * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
   89  * @size: size of free area to find
   90  * @align: alignment of free area to find
   91  * @nid: nid of the free area to find, %MAX_NUMNODES for any node
   92  *
   93  * Find @size free area aligned to @align in the specified range and node.
   94  *
   95  * RETURNS:
   96  * Found address on success, %0 on failure.
   97  */
   98 phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start,
   99                                         phys_addr_t end, phys_addr_t size,
  100                                         phys_addr_t align, int nid)
  101 {
  102         phys_addr_t this_start, this_end, cand;
  103         u64 i;
  104 
  105         /* pump up @end */
  106         if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
  107                 end = memblock.current_limit;
  108 
  109         /* avoid allocating the first page */
  110         start = max_t(phys_addr_t, start, PAGE_SIZE);
  111         end = max(start, end);
  112 
  113         for_each_free_mem_range_reverse(i, nid, &this_start, &this_end, NULL) {
  114                 this_start = clamp(this_start, start, end);
  115                 this_end = clamp(this_end, start, end);
  116 
  117                 if (this_end < size)
  118                         continue;
  119 
  120                 cand = round_down(this_end - size, align);
  121                 if (cand >= this_start)
  122                         return cand;
  123         }
  124         return 0;
  125 }
  126 
  127 /**
  128  * memblock_find_in_range - find free area in given range
  129  * @start: start of candidate range
  130  * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
  131  * @size: size of free area to find
  132  * @align: alignment of free area to find
  133  *
  134  * Find @size free area aligned to @align in the specified range.
  135  *
  136  * RETURNS:
  137  * Found address on success, %0 on failure.
  138  */
  139 phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
  140                                         phys_addr_t end, phys_addr_t size,
  141                                         phys_addr_t align)
  142 {
  143         return memblock_find_in_range_node(start, end, size, align,
  144                                            MAX_NUMNODES);
  145 }
  146 
  147 static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
  148 {
  149         type->total_size -= type->regions[r].size;
  150         memmove(&type->regions[r], &type->regions[r + 1],
  151                 (type->cnt - (r + 1)) * sizeof(type->regions[r]));
  152         type->cnt--;
  153 
  154         /* Special case for empty arrays */
  155         if (type->cnt == 0) {
  156                 WARN_ON(type->total_size != 0);
  157                 type->cnt = 1;
  158                 type->regions[0].base = 0;
  159                 type->regions[0].size = 0;
  160                 memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
  161         }
  162 }
  163 
  164 phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info(
  165                                         phys_addr_t *addr)
  166 {
  167         if (memblock.reserved.regions == memblock_reserved_init_regions)
  168                 return 0;
  169 
  170         *addr = __pa(memblock.reserved.regions);
  171 
  172         return PAGE_ALIGN(sizeof(struct memblock_region) *
  173                           memblock.reserved.max);
  174 }
  175 
  176 /**
  177  * memblock_double_array - double the size of the memblock regions array
  178  * @type: memblock type of the regions array being doubled
  179  * @new_area_start: starting address of memory range to avoid overlap with
  180  * @new_area_size: size of memory range to avoid overlap with
  181  *
  182  * Double the size of the @type regions array. If memblock is being used to
  183  * allocate memory for a new reserved regions array and there is a previously
  184  * allocated memory range [@new_area_start,@new_area_start+@new_area_size]
  185  * waiting to be reserved, ensure the memory used by the new array does
  186  * not overlap.
  187  *
  188  * RETURNS:
  189  * 0 on success, -1 on failure.
  190  */
  191 static int __init_memblock memblock_double_array(struct memblock_type *type,
  192                                                 phys_addr_t new_area_start,
  193                                                 phys_addr_t new_area_size)
  194 {
  195         struct memblock_region *new_array, *old_array;
  196         phys_addr_t old_alloc_size, new_alloc_size;
  197         phys_addr_t old_size, new_size, addr;
  198         int use_slab = slab_is_available();
  199         int *in_slab;
  200 
  201         /* We don't allow resizing until we know about the reserved regions
  202          * of memory that aren't suitable for allocation
  203          */
  204         if (!memblock_can_resize)
  205                 return -1;
  206 
  207         /* Calculate new doubled size */
  208         old_size = type->max * sizeof(struct memblock_region);
  209         new_size = old_size << 1;
  210         /*
  211          * We need to allocated new one align to PAGE_SIZE,
  212          *   so we can free them completely later.
  213          */
  214         old_alloc_size = PAGE_ALIGN(old_size);
  215         new_alloc_size = PAGE_ALIGN(new_size);
  216 
  217         /* Retrieve the slab flag */
  218         if (type == &memblock.memory)
  219                 in_slab = &memblock_memory_in_slab;
  220         else
  221                 in_slab = &memblock_reserved_in_slab;
  222 
  223         /* Try to find some space for it.
  224          *
  225          * WARNING: We assume that either slab_is_available() and we use it or
  226          * we use MEMBLOCK for allocations. That means that this is unsafe to
  227          * use when bootmem is currently active (unless bootmem itself is
  228          * implemented on top of MEMBLOCK which isn't the case yet)
  229          *
  230          * This should however not be an issue for now, as we currently only
  231          * call into MEMBLOCK while it's still active, or much later when slab
  232          * is active for memory hotplug operations
  233          */
  234         if (use_slab) {
  235                 new_array = kmalloc(new_size, GFP_KERNEL);
  236                 addr = new_array ? __pa(new_array) : 0;
  237         } else {
  238                 /* only exclude range when trying to double reserved.regions */
  239                 if (type != &memblock.reserved)
  240                         new_area_start = new_area_size = 0;
  241 
  242                 addr = memblock_find_in_range(new_area_start + new_area_size,
  243                                                 memblock.current_limit,
  244                                                 new_alloc_size, PAGE_SIZE);
  245                 if (!addr && new_area_size)
  246                         addr = memblock_find_in_range(0,
  247                                 min(new_area_start, memblock.current_limit),
  248                                 new_alloc_size, PAGE_SIZE);
  249 
  250                 new_array = addr ? __va(addr) : NULL;
  251         }
  252         if (!addr) {
  253                 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
  254                        memblock_type_name(type), type->max, type->max * 2);
  255                 return -1;
  256         }
  257 
  258         memblock_dbg("memblock: %s is doubled to %ld at [%#010llx-%#010llx]",
  259                         memblock_type_name(type), type->max * 2, (u64)addr,
  260                         (u64)addr + new_size - 1);
  261 
  262         /*
  263          * Found space, we now need to move the array over before we add the
  264          * reserved region since it may be our reserved array itself that is
  265          * full.
  266          */
  267         memcpy(new_array, type->regions, old_size);
  268         memset(new_array + type->max, 0, old_size);
  269         old_array = type->regions;
  270         type->regions = new_array;
  271         type->max <<= 1;
  272 
  273         /* Free old array. We needn't free it if the array is the static one */
  274         if (*in_slab)
  275                 kfree(old_array);
  276         else if (old_array != memblock_memory_init_regions &&
  277                  old_array != memblock_reserved_init_regions)
  278                 memblock_free(__pa(old_array), old_alloc_size);
  279 
  280         /*
  281          * Reserve the new array if that comes from the memblock.  Otherwise, we
  282          * needn't do it
  283          */
  284         if (!use_slab)
  285                 BUG_ON(memblock_reserve(addr, new_alloc_size));
  286 
  287         /* Update slab flag */
  288         *in_slab = use_slab;
  289 
  290         return 0;
  291 }
  292 
  293 /**
  294  * memblock_merge_regions - merge neighboring compatible regions
  295  * @type: memblock type to scan
  296  *
  297  * Scan @type and merge neighboring compatible regions.
  298  */
  299 static void __init_memblock memblock_merge_regions(struct memblock_type *type)
  300 {
  301         int i = 0;
  302 
  303         /* cnt never goes below 1 */
  304         while (i < type->cnt - 1) {
  305                 struct memblock_region *this = &type->regions[i];
  306                 struct memblock_region *next = &type->regions[i + 1];
  307 
  308                 if (this->base + this->size != next->base ||
  309                     memblock_get_region_node(this) !=
  310                     memblock_get_region_node(next)) {
  311                         BUG_ON(this->base + this->size > next->base);
  312                         i++;
  313                         continue;
  314                 }
  315 
  316                 this->size += next->size;
  317                 /* move forward from next + 1, index of which is i + 2 */
  318                 memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
  319                 type->cnt--;
  320         }
  321 }
  322 
  323 /**
  324  * memblock_insert_region - insert new memblock region
  325  * @type: memblock type to insert into
  326  * @idx: index for the insertion point
  327  * @base: base address of the new region
  328  * @size: size of the new region
  329  *
  330  * Insert new memblock region [@base,@base+@size) into @type at @idx.
  331  * @type must already have extra room to accomodate the new region.
  332  */
  333 static void __init_memblock memblock_insert_region(struct memblock_type *type,
  334                                                    int idx, phys_addr_t base,
  335                                                    phys_addr_t size, int nid)
  336 {
  337         struct memblock_region *rgn = &type->regions[idx];
  338 
  339         BUG_ON(type->cnt >= type->max);
  340         memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
  341         rgn->base = base;
  342         rgn->size = size;
  343         memblock_set_region_node(rgn, nid);
  344         type->cnt++;
  345         type->total_size += size;
  346 }
  347 
  348 /**
  349  * memblock_add_region - add new memblock region
  350  * @type: memblock type to add new region into
  351  * @base: base address of the new region
  352  * @size: size of the new region
  353  * @nid: nid of the new region
  354  *
  355  * Add new memblock region [@base,@base+@size) into @type.  The new region
  356  * is allowed to overlap with existing ones - overlaps don't affect already
  357  * existing regions.  @type is guaranteed to be minimal (all neighbouring
  358  * compatible regions are merged) after the addition.
  359  *
  360  * RETURNS:
  361  * 0 on success, -errno on failure.
  362  */
  363 static int __init_memblock memblock_add_region(struct memblock_type *type,
  364                                 phys_addr_t base, phys_addr_t size, int nid)
  365 {
  366         bool insert = false;
  367         phys_addr_t obase = base;
  368         phys_addr_t end = base + memblock_cap_size(base, &size);
  369         int i, nr_new;
  370 
  371         if (!size)
  372                 return 0;
  373 
  374         /* special case for empty array */
  375         if (type->regions[0].size == 0) {
  376                 WARN_ON(type->cnt != 1 || type->total_size);
  377                 type->regions[0].base = base;
  378                 type->regions[0].size = size;
  379                 memblock_set_region_node(&type->regions[0], nid);
  380                 type->total_size = size;
  381                 return 0;
  382         }
  383 repeat:
  384         /*
  385          * The following is executed twice.  Once with %false @insert and
  386          * then with %true.  The first counts the number of regions needed
  387          * to accomodate the new area.  The second actually inserts them.
  388          */
  389         base = obase;
  390         nr_new = 0;
  391 
  392         for (i = 0; i < type->cnt; i++) {
  393                 struct memblock_region *rgn = &type->regions[i];
  394                 phys_addr_t rbase = rgn->base;
  395                 phys_addr_t rend = rbase + rgn->size;
  396 
  397                 if (rbase >= end)
  398                         break;
  399                 if (rend <= base)
  400                         continue;
  401                 /*
  402                  * @rgn overlaps.  If it separates the lower part of new
  403                  * area, insert that portion.
  404                  */
  405                 if (rbase > base) {
  406                         nr_new++;
  407                         if (insert)
  408                                 memblock_insert_region(type, i++, base,
  409                                                        rbase - base, nid);
  410                 }
  411                 /* area below @rend is dealt with, forget about it */
  412                 base = min(rend, end);
  413         }
  414 
  415         /* insert the remaining portion */
  416         if (base < end) {
  417                 nr_new++;
  418                 if (insert)
  419                         memblock_insert_region(type, i, base, end - base, nid);
  420         }
  421 
  422         /*
  423          * If this was the first round, resize array and repeat for actual
  424          * insertions; otherwise, merge and return.
  425          */
  426         if (!insert) {
  427                 while (type->cnt + nr_new > type->max)
  428                         if (memblock_double_array(type, obase, size) < 0)
  429                                 return -ENOMEM;
  430                 insert = true;
  431                 goto repeat;
  432         } else {
  433                 memblock_merge_regions(type);
  434                 return 0;
  435         }
  436 }
  437 
  438 int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
  439                                        int nid)
  440 {
  441         return memblock_add_region(&memblock.memory, base, size, nid);
  442 }
  443 
  444 int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
  445 {
  446         return memblock_add_region(&memblock.memory, base, size, MAX_NUMNODES);
  447 }
  448 
  449 /**
  450  * memblock_isolate_range - isolate given range into disjoint memblocks
  451  * @type: memblock type to isolate range for
  452  * @base: base of range to isolate
  453  * @size: size of range to isolate
  454  * @start_rgn: out parameter for the start of isolated region
  455  * @end_rgn: out parameter for the end of isolated region
  456  *
  457  * Walk @type and ensure that regions don't cross the boundaries defined by
  458  * [@base,@base+@size).  Crossing regions are split at the boundaries,
  459  * which may create at most two more regions.  The index of the first
  460  * region inside the range is returned in *@start_rgn and end in *@end_rgn.
  461  *
  462  * RETURNS:
  463  * 0 on success, -errno on failure.
  464  */
  465 static int __init_memblock memblock_isolate_range(struct memblock_type *type,
  466                                         phys_addr_t base, phys_addr_t size,
  467                                         int *start_rgn, int *end_rgn)
  468 {
  469         phys_addr_t end = base + memblock_cap_size(base, &size);
  470         int i;
  471 
  472         *start_rgn = *end_rgn = 0;
  473 
  474         if (!size)
  475                 return 0;
  476 
  477         /* we'll create at most two more regions */
  478         while (type->cnt + 2 > type->max)
  479                 if (memblock_double_array(type, base, size) < 0)
  480                         return -ENOMEM;
  481 
  482         for (i = 0; i < type->cnt; i++) {
  483                 struct memblock_region *rgn = &type->regions[i];
  484                 phys_addr_t rbase = rgn->base;
  485                 phys_addr_t rend = rbase + rgn->size;
  486 
  487                 if (rbase >= end)
  488                         break;
  489                 if (rend <= base)
  490                         continue;
  491 
  492                 if (rbase < base) {
  493                         /*
  494                          * @rgn intersects from below.  Split and continue
  495                          * to process the next region - the new top half.
  496                          */
  497                         rgn->base = base;
  498                         rgn->size -= base - rbase;
  499                         type->total_size -= base - rbase;
  500                         memblock_insert_region(type, i, rbase, base - rbase,
  501                                                memblock_get_region_node(rgn));
  502                 } else if (rend > end) {
  503                         /*
  504                          * @rgn intersects from above.  Split and redo the
  505                          * current region - the new bottom half.
  506                          */
  507                         rgn->base = end;
  508                         rgn->size -= end - rbase;
  509                         type->total_size -= end - rbase;
  510                         memblock_insert_region(type, i--, rbase, end - rbase,
  511                                                memblock_get_region_node(rgn));
  512                 } else {
  513                         /* @rgn is fully contained, record it */
  514                         if (!*end_rgn)
  515                                 *start_rgn = i;
  516                         *end_rgn = i + 1;
  517                 }
  518         }
  519 
  520         return 0;
  521 }
  522 
  523 static int __init_memblock __memblock_remove(struct memblock_type *type,
  524                                              phys_addr_t base, phys_addr_t size)
  525 {
  526         int start_rgn, end_rgn;
  527         int i, ret;
  528 
  529         ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
  530         if (ret)
  531                 return ret;
  532 
  533         for (i = end_rgn - 1; i >= start_rgn; i--)
  534                 memblock_remove_region(type, i);
  535         return 0;
  536 }
  537 
  538 int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
  539 {
  540         return __memblock_remove(&memblock.memory, base, size);
  541 }
  542 
  543 int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
  544 {
  545         memblock_dbg("   memblock_free: [%#016llx-%#016llx] %pF\n",
  546                      (unsigned long long)base,
  547                      (unsigned long long)base + size,
  548                      (void *)_RET_IP_);
  549 
  550         return __memblock_remove(&memblock.reserved, base, size);
  551 }
  552 
  553 int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
  554 {
  555         struct memblock_type *_rgn = &memblock.reserved;
  556 
  557         memblock_dbg("memblock_reserve: [%#016llx-%#016llx] %pF\n",
  558                      (unsigned long long)base,
  559                      (unsigned long long)base + size,
  560                      (void *)_RET_IP_);
  561 
  562         return memblock_add_region(_rgn, base, size, MAX_NUMNODES);
  563 }
  564 
  565 /**
  566  * __next_free_mem_range - next function for for_each_free_mem_range()
  567  * @idx: pointer to u64 loop variable
  568  * @nid: nid: node selector, %MAX_NUMNODES for all nodes
  569  * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
  570  * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
  571  * @out_nid: ptr to int for nid of the range, can be %NULL
  572  *
  573  * Find the first free area from *@idx which matches @nid, fill the out
  574  * parameters, and update *@idx for the next iteration.  The lower 32bit of
  575  * *@idx contains index into memory region and the upper 32bit indexes the
  576  * areas before each reserved region.  For example, if reserved regions
  577  * look like the following,
  578  *
  579  *      0:[0-16), 1:[32-48), 2:[128-130)
  580  *
  581  * The upper 32bit indexes the following regions.
  582  *
  583  *      0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
  584  *
  585  * As both region arrays are sorted, the function advances the two indices
  586  * in lockstep and returns each intersection.
  587  */
  588 void __init_memblock __next_free_mem_range(u64 *idx, int nid,
  589                                            phys_addr_t *out_start,
  590                                            phys_addr_t *out_end, int *out_nid)
  591 {
  592         struct memblock_type *mem = &memblock.memory;
  593         struct memblock_type *rsv = &memblock.reserved;
  594         int mi = *idx & 0xffffffff;
  595         int ri = *idx >> 32;
  596 
  597         for ( ; mi < mem->cnt; mi++) {
  598                 struct memblock_region *m = &mem->regions[mi];
  599                 phys_addr_t m_start = m->base;
  600                 phys_addr_t m_end = m->base + m->size;
  601 
  602                 /* only memory regions are associated with nodes, check it */
  603                 if (nid != MAX_NUMNODES && nid != memblock_get_region_node(m))
  604                         continue;
  605 
  606                 /* scan areas before each reservation for intersection */
  607                 for ( ; ri < rsv->cnt + 1; ri++) {
  608                         struct memblock_region *r = &rsv->regions[ri];
  609                         phys_addr_t r_start = ri ? r[-1].base + r[-1].size : 0;
  610                         phys_addr_t r_end = ri < rsv->cnt ? r->base : ULLONG_MAX;
  611 
  612                         /* if ri advanced past mi, break out to advance mi */
  613                         if (r_start >= m_end)
  614                                 break;
  615                         /* if the two regions intersect, we're done */
  616                         if (m_start < r_end) {
  617                                 if (out_start)
  618                                         *out_start = max(m_start, r_start);
  619                                 if (out_end)
  620                                         *out_end = min(m_end, r_end);
  621                                 if (out_nid)
  622                                         *out_nid = memblock_get_region_node(m);
  623                                 /*
  624                                  * The region which ends first is advanced
  625                                  * for the next iteration.
  626                                  */
  627                                 if (m_end <= r_end)
  628                                         mi++;
  629                                 else
  630                                         ri++;
  631                                 *idx = (u32)mi | (u64)ri << 32;
  632                                 return;
  633                         }
  634                 }
  635         }
  636 
  637         /* signal end of iteration */
  638         *idx = ULLONG_MAX;
  639 }
  640 
  641 /**
  642  * __next_free_mem_range_rev - next function for for_each_free_mem_range_reverse()
  643  * @idx: pointer to u64 loop variable
  644  * @nid: nid: node selector, %MAX_NUMNODES for all nodes
  645  * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
  646  * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
  647  * @out_nid: ptr to int for nid of the range, can be %NULL
  648  *
  649  * Reverse of __next_free_mem_range().
  650  */
  651 void __init_memblock __next_free_mem_range_rev(u64 *idx, int nid,
  652                                            phys_addr_t *out_start,
  653                                            phys_addr_t *out_end, int *out_nid)
  654 {
  655         struct memblock_type *mem = &memblock.memory;
  656         struct memblock_type *rsv = &memblock.reserved;
  657         int mi = *idx & 0xffffffff;
  658         int ri = *idx >> 32;
  659 
  660         if (*idx == (u64)ULLONG_MAX) {
  661                 mi = mem->cnt - 1;
  662                 ri = rsv->cnt;
  663         }
  664 
  665         for ( ; mi >= 0; mi--) {
  666                 struct memblock_region *m = &mem->regions[mi];
  667                 phys_addr_t m_start = m->base;
  668                 phys_addr_t m_end = m->base + m->size;
  669 
  670                 /* only memory regions are associated with nodes, check it */
  671                 if (nid != MAX_NUMNODES && nid != memblock_get_region_node(m))
  672                         continue;
  673 
  674                 /* scan areas before each reservation for intersection */
  675                 for ( ; ri >= 0; ri--) {
  676                         struct memblock_region *r = &rsv->regions[ri];
  677                         phys_addr_t r_start = ri ? r[-1].base + r[-1].size : 0;
  678                         phys_addr_t r_end = ri < rsv->cnt ? r->base : ULLONG_MAX;
  679 
  680                         /* if ri advanced past mi, break out to advance mi */
  681                         if (r_end <= m_start)
  682                                 break;
  683                         /* if the two regions intersect, we're done */
  684                         if (m_end > r_start) {
  685                                 if (out_start)
  686                                         *out_start = max(m_start, r_start);
  687                                 if (out_end)
  688                                         *out_end = min(m_end, r_end);
  689                                 if (out_nid)
  690                                         *out_nid = memblock_get_region_node(m);
  691 
  692                                 if (m_start >= r_start)
  693                                         mi--;
  694                                 else
  695                                         ri--;
  696                                 *idx = (u32)mi | (u64)ri << 32;
  697                                 return;
  698                         }
  699                 }
  700         }
  701 
  702         *idx = ULLONG_MAX;
  703 }
  704 
  705 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  706 /*
  707  * Common iterator interface used to define for_each_mem_range().
  708  */
  709 void __init_memblock __next_mem_pfn_range(int *idx, int nid,
  710                                 unsigned long *out_start_pfn,
  711                                 unsigned long *out_end_pfn, int *out_nid)
  712 {
  713         struct memblock_type *type = &memblock.memory;
  714         struct memblock_region *r;
  715 
  716         while (++*idx < type->cnt) {
  717                 r = &type->regions[*idx];
  718 
  719                 if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
  720                         continue;
  721                 if (nid == MAX_NUMNODES || nid == r->nid)
  722                         break;
  723         }
  724         if (*idx >= type->cnt) {
  725                 *idx = -1;
  726                 return;
  727         }
  728 
  729         if (out_start_pfn)
  730                 *out_start_pfn = PFN_UP(r->base);
  731         if (out_end_pfn)
  732                 *out_end_pfn = PFN_DOWN(r->base + r->size);
  733         if (out_nid)
  734                 *out_nid = r->nid;
  735 }
  736 
  737 /**
  738  * memblock_set_node - set node ID on memblock regions
  739  * @base: base of area to set node ID for
  740  * @size: size of area to set node ID for
  741  * @nid: node ID to set
  742  *
  743  * Set the nid of memblock memory regions in [@base,@base+@size) to @nid.
  744  * Regions which cross the area boundaries are split as necessary.
  745  *
  746  * RETURNS:
  747  * 0 on success, -errno on failure.
  748  */
  749 int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
  750                                       int nid)
  751 {
  752         struct memblock_type *type = &memblock.memory;
  753         int start_rgn, end_rgn;
  754         int i, ret;
  755 
  756         ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
  757         if (ret)
  758                 return ret;
  759 
  760         for (i = start_rgn; i < end_rgn; i++)
  761                 memblock_set_region_node(&type->regions[i], nid);
  762 
  763         memblock_merge_regions(type);
  764         return 0;
  765 }
  766 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
  767 
  768 static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size,
  769                                         phys_addr_t align, phys_addr_t max_addr,
  770                                         int nid)
  771 {
  772         phys_addr_t found;
  773 
  774         /* align @size to avoid excessive fragmentation on reserved array */
  775         size = round_up(size, align);
  776 
  777         found = memblock_find_in_range_node(0, max_addr, size, align, nid);
  778         if (found && !memblock_reserve(found, size))
  779                 return found;
  780 
  781         return 0;
  782 }
  783 
  784 phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
  785 {
  786         return memblock_alloc_base_nid(size, align, MEMBLOCK_ALLOC_ACCESSIBLE, nid);
  787 }
  788 
  789 phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
  790 {
  791         return memblock_alloc_base_nid(size, align, max_addr, MAX_NUMNODES);
  792 }
  793 
  794 phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
  795 {
  796         phys_addr_t alloc;
  797 
  798         alloc = __memblock_alloc_base(size, align, max_addr);
  799 
  800         if (alloc == 0)
  801                 panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
  802                       (unsigned long long) size, (unsigned long long) max_addr);
  803 
  804         return alloc;
  805 }
  806 
  807 phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
  808 {
  809         return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
  810 }
  811 
  812 phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
  813 {
  814         phys_addr_t res = memblock_alloc_nid(size, align, nid);
  815 
  816         if (res)
  817                 return res;
  818         return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
  819 }
  820 
  821 
  822 /*
  823  * Remaining API functions
  824  */
  825 
  826 phys_addr_t __init memblock_phys_mem_size(void)
  827 {
  828         return memblock.memory.total_size;
  829 }
  830 
  831 /* lowest address */
  832 phys_addr_t __init_memblock memblock_start_of_DRAM(void)
  833 {
  834         return memblock.memory.regions[0].base;
  835 }
  836 
  837 phys_addr_t __init_memblock memblock_end_of_DRAM(void)
  838 {
  839         int idx = memblock.memory.cnt - 1;
  840 
  841         return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
  842 }
  843 
  844 void __init memblock_enforce_memory_limit(phys_addr_t limit)
  845 {
  846         unsigned long i;
  847         phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX;
  848 
  849         if (!limit)
  850                 return;
  851 
  852         /* find out max address */
  853         for (i = 0; i < memblock.memory.cnt; i++) {
  854                 struct memblock_region *r = &memblock.memory.regions[i];
  855 
  856                 if (limit <= r->size) {
  857                         max_addr = r->base + limit;
  858                         break;
  859                 }
  860                 limit -= r->size;
  861         }
  862 
  863         /* truncate both memory and reserved regions */
  864         __memblock_remove(&memblock.memory, max_addr, (phys_addr_t)ULLONG_MAX);
  865         __memblock_remove(&memblock.reserved, max_addr, (phys_addr_t)ULLONG_MAX);
  866 }
  867 
  868 static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
  869 {
  870         unsigned int left = 0, right = type->cnt;
  871 
  872         do {
  873                 unsigned int mid = (right + left) / 2;
  874 
  875                 if (addr < type->regions[mid].base)
  876                         right = mid;
  877                 else if (addr >= (type->regions[mid].base +
  878                                   type->regions[mid].size))
  879                         left = mid + 1;
  880                 else
  881                         return mid;
  882         } while (left < right);
  883         return -1;
  884 }
  885 
  886 int __init memblock_is_reserved(phys_addr_t addr)
  887 {
  888         return memblock_search(&memblock.reserved, addr) != -1;
  889 }
  890 
  891 int __init_memblock memblock_is_memory(phys_addr_t addr)
  892 {
  893         return memblock_search(&memblock.memory, addr) != -1;
  894 }
  895 
  896 /**
  897  * memblock_is_region_memory - check if a region is a subset of memory
  898  * @base: base of region to check
  899  * @size: size of region to check
  900  *
  901  * Check if the region [@base, @base+@size) is a subset of a memory block.
  902  *
  903  * RETURNS:
  904  * 0 if false, non-zero if true
  905  */
  906 int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
  907 {
  908         int idx = memblock_search(&memblock.memory, base);
  909         phys_addr_t end = base + memblock_cap_size(base, &size);
  910 
  911         if (idx == -1)
  912                 return 0;
  913         return memblock.memory.regions[idx].base <= base &&
  914                 (memblock.memory.regions[idx].base +
  915                  memblock.memory.regions[idx].size) >= end;
  916 }
  917 
  918 /**
  919  * memblock_is_region_reserved - check if a region intersects reserved memory
  920  * @base: base of region to check
  921  * @size: size of region to check
  922  *
  923  * Check if the region [@base, @base+@size) intersects a reserved memory block.
  924  *
  925  * RETURNS:
  926  * 0 if false, non-zero if true
  927  */
  928 int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
  929 {
  930         memblock_cap_size(base, &size);
  931         return memblock_overlaps_region(&memblock.reserved, base, size) >= 0;
  932 }
  933 
  934 void __init_memblock memblock_trim_memory(phys_addr_t align)
  935 {
  936         int i;
  937         phys_addr_t start, end, orig_start, orig_end;
  938         struct memblock_type *mem = &memblock.memory;
  939 
  940         for (i = 0; i < mem->cnt; i++) {
  941                 orig_start = mem->regions[i].base;
  942                 orig_end = mem->regions[i].base + mem->regions[i].size;
  943                 start = round_up(orig_start, align);
  944                 end = round_down(orig_end, align);
  945 
  946                 if (start == orig_start && end == orig_end)
  947                         continue;
  948 
  949                 if (start < end) {
  950                         mem->regions[i].base = start;
  951                         mem->regions[i].size = end - start;
  952                 } else {
  953                         memblock_remove_region(mem, i);
  954                         i--;
  955                 }
  956         }
  957 }
  958 
  959 void __init_memblock memblock_set_current_limit(phys_addr_t limit)
  960 {
  961         memblock.current_limit = limit;
  962 }
  963 
  964 static void __init_memblock memblock_dump(struct memblock_type *type, char *name)
  965 {
  966         unsigned long long base, size;
  967         int i;
  968 
  969         pr_info(" %s.cnt  = 0x%lx\n", name, type->cnt);
  970 
  971         for (i = 0; i < type->cnt; i++) {
  972                 struct memblock_region *rgn = &type->regions[i];
  973                 char nid_buf[32] = "";
  974 
  975                 base = rgn->base;
  976                 size = rgn->size;
  977 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  978                 if (memblock_get_region_node(rgn) != MAX_NUMNODES)
  979                         snprintf(nid_buf, sizeof(nid_buf), " on node %d",
  980                                  memblock_get_region_node(rgn));
  981 #endif
  982                 pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes%s\n",
  983                         name, i, base, base + size - 1, size, nid_buf);
  984         }
  985 }
  986 
  987 void __init_memblock __memblock_dump_all(void)
  988 {
  989         pr_info("MEMBLOCK configuration:\n");
  990         pr_info(" memory size = %#llx reserved size = %#llx\n",
  991                 (unsigned long long)memblock.memory.total_size,
  992                 (unsigned long long)memblock.reserved.total_size);
  993 
  994         memblock_dump(&memblock.memory, "memory");
  995         memblock_dump(&memblock.reserved, "reserved");
  996 }
  997 
  998 void __init memblock_allow_resize(void)
  999 {
 1000         memblock_can_resize = 1;
 1001 }
 1002 
 1003 static int __init early_memblock(char *p)
 1004 {
 1005         if (p && strstr(p, "debug"))
 1006                 memblock_debug = 1;
 1007         return 0;
 1008 }
 1009 early_param("memblock", early_memblock);
 1010 
 1011 #if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK)
 1012 
 1013 static int memblock_debug_show(struct seq_file *m, void *private)
 1014 {
 1015         struct memblock_type *type = m->private;
 1016         struct memblock_region *reg;
 1017         int i;
 1018 
 1019         for (i = 0; i < type->cnt; i++) {
 1020                 reg = &type->regions[i];
 1021                 seq_printf(m, "%4d: ", i);
 1022                 if (sizeof(phys_addr_t) == 4)
 1023                         seq_printf(m, "0x%08lx..0x%08lx\n",
 1024                                    (unsigned long)reg->base,
 1025                                    (unsigned long)(reg->base + reg->size - 1));
 1026                 else
 1027                         seq_printf(m, "0x%016llx..0x%016llx\n",
 1028                                    (unsigned long long)reg->base,
 1029                                    (unsigned long long)(reg->base + reg->size - 1));
 1030 
 1031         }
 1032         return 0;
 1033 }
 1034 
 1035 static int memblock_debug_open(struct inode *inode, struct file *file)
 1036 {
 1037         return single_open(file, memblock_debug_show, inode->i_private);
 1038 }
 1039 
 1040 static const struct file_operations memblock_debug_fops = {
 1041         .open = memblock_debug_open,
 1042         .read = seq_read,
 1043         .llseek = seq_lseek,
 1044         .release = single_release,
 1045 };
 1046 
 1047 static int __init memblock_init_debugfs(void)
 1048 {
 1049         struct dentry *root = debugfs_create_dir("memblock", NULL);
 1050         if (!root)
 1051                 return -ENXIO;
 1052         debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops);
 1053         debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops);
 1054 
 1055         return 0;
 1056 }
 1057 __initcall(memblock_init_debugfs);
 1058 
 1059 #endif /* CONFIG_DEBUG_FS */

Cache object: a77e8c733f7e3fff6dbd54d3e178617e


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.