The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/mm/bootmem.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  *  bootmem - A boot-time physical memory allocator and configurator
    3  *
    4  *  Copyright (C) 1999 Ingo Molnar
    5  *                1999 Kanoj Sarcar, SGI
    6  *                2008 Johannes Weiner
    7  *
    8  * Access to this subsystem has to be serialized externally (which is true
    9  * for the boot process anyway).
   10  */
   11 #include <linux/init.h>
   12 #include <linux/pfn.h>
   13 #include <linux/slab.h>
   14 #include <linux/bootmem.h>
   15 #include <linux/export.h>
   16 #include <linux/kmemleak.h>
   17 #include <linux/range.h>
   18 #include <linux/memblock.h>
   19 
   20 #include <asm/bug.h>
   21 #include <asm/io.h>
   22 #include <asm/processor.h>
   23 
   24 #include "internal.h"
   25 
   26 #ifndef CONFIG_NEED_MULTIPLE_NODES
   27 struct pglist_data __refdata contig_page_data = {
   28         .bdata = &bootmem_node_data[0]
   29 };
   30 EXPORT_SYMBOL(contig_page_data);
   31 #endif
   32 
   33 unsigned long max_low_pfn;
   34 unsigned long min_low_pfn;
   35 unsigned long max_pfn;
   36 
   37 bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata;
   38 
   39 static struct list_head bdata_list __initdata = LIST_HEAD_INIT(bdata_list);
   40 
   41 static int bootmem_debug;
   42 
   43 static int __init bootmem_debug_setup(char *buf)
   44 {
   45         bootmem_debug = 1;
   46         return 0;
   47 }
   48 early_param("bootmem_debug", bootmem_debug_setup);
   49 
   50 #define bdebug(fmt, args...) ({                         \
   51         if (unlikely(bootmem_debug))                    \
   52                 printk(KERN_INFO                        \
   53                         "bootmem::%s " fmt,             \
   54                         __func__, ## args);             \
   55 })
   56 
   57 static unsigned long __init bootmap_bytes(unsigned long pages)
   58 {
   59         unsigned long bytes = DIV_ROUND_UP(pages, 8);
   60 
   61         return ALIGN(bytes, sizeof(long));
   62 }
   63 
   64 /**
   65  * bootmem_bootmap_pages - calculate bitmap size in pages
   66  * @pages: number of pages the bitmap has to represent
   67  */
   68 unsigned long __init bootmem_bootmap_pages(unsigned long pages)
   69 {
   70         unsigned long bytes = bootmap_bytes(pages);
   71 
   72         return PAGE_ALIGN(bytes) >> PAGE_SHIFT;
   73 }
   74 
   75 /*
   76  * link bdata in order
   77  */
   78 static void __init link_bootmem(bootmem_data_t *bdata)
   79 {
   80         bootmem_data_t *ent;
   81 
   82         list_for_each_entry(ent, &bdata_list, list) {
   83                 if (bdata->node_min_pfn < ent->node_min_pfn) {
   84                         list_add_tail(&bdata->list, &ent->list);
   85                         return;
   86                 }
   87         }
   88 
   89         list_add_tail(&bdata->list, &bdata_list);
   90 }
   91 
   92 /*
   93  * Called once to set up the allocator itself.
   94  */
   95 static unsigned long __init init_bootmem_core(bootmem_data_t *bdata,
   96         unsigned long mapstart, unsigned long start, unsigned long end)
   97 {
   98         unsigned long mapsize;
   99 
  100         mminit_validate_memmodel_limits(&start, &end);
  101         bdata->node_bootmem_map = phys_to_virt(PFN_PHYS(mapstart));
  102         bdata->node_min_pfn = start;
  103         bdata->node_low_pfn = end;
  104         link_bootmem(bdata);
  105 
  106         /*
  107          * Initially all pages are reserved - setup_arch() has to
  108          * register free RAM areas explicitly.
  109          */
  110         mapsize = bootmap_bytes(end - start);
  111         memset(bdata->node_bootmem_map, 0xff, mapsize);
  112 
  113         bdebug("nid=%td start=%lx map=%lx end=%lx mapsize=%lx\n",
  114                 bdata - bootmem_node_data, start, mapstart, end, mapsize);
  115 
  116         return mapsize;
  117 }
  118 
  119 /**
  120  * init_bootmem_node - register a node as boot memory
  121  * @pgdat: node to register
  122  * @freepfn: pfn where the bitmap for this node is to be placed
  123  * @startpfn: first pfn on the node
  124  * @endpfn: first pfn after the node
  125  *
  126  * Returns the number of bytes needed to hold the bitmap for this node.
  127  */
  128 unsigned long __init init_bootmem_node(pg_data_t *pgdat, unsigned long freepfn,
  129                                 unsigned long startpfn, unsigned long endpfn)
  130 {
  131         return init_bootmem_core(pgdat->bdata, freepfn, startpfn, endpfn);
  132 }
  133 
  134 /**
  135  * init_bootmem - register boot memory
  136  * @start: pfn where the bitmap is to be placed
  137  * @pages: number of available physical pages
  138  *
  139  * Returns the number of bytes needed to hold the bitmap.
  140  */
  141 unsigned long __init init_bootmem(unsigned long start, unsigned long pages)
  142 {
  143         max_low_pfn = pages;
  144         min_low_pfn = start;
  145         return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages);
  146 }
  147 
  148 /*
  149  * free_bootmem_late - free bootmem pages directly to page allocator
  150  * @addr: starting physical address of the range
  151  * @size: size of the range in bytes
  152  *
  153  * This is only useful when the bootmem allocator has already been torn
  154  * down, but we are still initializing the system.  Pages are given directly
  155  * to the page allocator, no bootmem metadata is updated because it is gone.
  156  */
  157 void __init free_bootmem_late(unsigned long physaddr, unsigned long size)
  158 {
  159         unsigned long cursor, end;
  160 
  161         kmemleak_free_part(__va(physaddr), size);
  162 
  163         cursor = PFN_UP(physaddr);
  164         end = PFN_DOWN(physaddr + size);
  165 
  166         for (; cursor < end; cursor++) {
  167                 __free_pages_bootmem(pfn_to_page(cursor), 0);
  168                 totalram_pages++;
  169         }
  170 }
  171 
  172 static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
  173 {
  174         struct page *page;
  175         unsigned long start, end, pages, count = 0;
  176 
  177         if (!bdata->node_bootmem_map)
  178                 return 0;
  179 
  180         start = bdata->node_min_pfn;
  181         end = bdata->node_low_pfn;
  182 
  183         bdebug("nid=%td start=%lx end=%lx\n",
  184                 bdata - bootmem_node_data, start, end);
  185 
  186         while (start < end) {
  187                 unsigned long *map, idx, vec;
  188                 unsigned shift;
  189 
  190                 map = bdata->node_bootmem_map;
  191                 idx = start - bdata->node_min_pfn;
  192                 shift = idx & (BITS_PER_LONG - 1);
  193                 /*
  194                  * vec holds at most BITS_PER_LONG map bits,
  195                  * bit 0 corresponds to start.
  196                  */
  197                 vec = ~map[idx / BITS_PER_LONG];
  198 
  199                 if (shift) {
  200                         vec >>= shift;
  201                         if (end - start >= BITS_PER_LONG)
  202                                 vec |= ~map[idx / BITS_PER_LONG + 1] <<
  203                                         (BITS_PER_LONG - shift);
  204                 }
  205                 /*
  206                  * If we have a properly aligned and fully unreserved
  207                  * BITS_PER_LONG block of pages in front of us, free
  208                  * it in one go.
  209                  */
  210                 if (IS_ALIGNED(start, BITS_PER_LONG) && vec == ~0UL) {
  211                         int order = ilog2(BITS_PER_LONG);
  212 
  213                         __free_pages_bootmem(pfn_to_page(start), order);
  214                         count += BITS_PER_LONG;
  215                         start += BITS_PER_LONG;
  216                 } else {
  217                         unsigned long cur = start;
  218 
  219                         start = ALIGN(start + 1, BITS_PER_LONG);
  220                         while (vec && cur != start) {
  221                                 if (vec & 1) {
  222                                         page = pfn_to_page(cur);
  223                                         __free_pages_bootmem(page, 0);
  224                                         count++;
  225                                 }
  226                                 vec >>= 1;
  227                                 ++cur;
  228                         }
  229                 }
  230         }
  231 
  232         page = virt_to_page(bdata->node_bootmem_map);
  233         pages = bdata->node_low_pfn - bdata->node_min_pfn;
  234         pages = bootmem_bootmap_pages(pages);
  235         count += pages;
  236         while (pages--)
  237                 __free_pages_bootmem(page++, 0);
  238 
  239         bdebug("nid=%td released=%lx\n", bdata - bootmem_node_data, count);
  240 
  241         return count;
  242 }
  243 
  244 static void reset_node_lowmem_managed_pages(pg_data_t *pgdat)
  245 {
  246         struct zone *z;
  247 
  248         /*
  249          * In free_area_init_core(), highmem zone's managed_pages is set to
  250          * present_pages, and bootmem allocator doesn't allocate from highmem
  251          * zones. So there's no need to recalculate managed_pages because all
  252          * highmem pages will be managed by the buddy system. Here highmem
  253          * zone also includes highmem movable zone.
  254          */
  255         for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
  256                 if (!is_highmem(z))
  257                         z->managed_pages = 0;
  258 }
  259 
  260 /**
  261  * free_all_bootmem_node - release a node's free pages to the buddy allocator
  262  * @pgdat: node to be released
  263  *
  264  * Returns the number of pages actually released.
  265  */
  266 unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
  267 {
  268         register_page_bootmem_info_node(pgdat);
  269         reset_node_lowmem_managed_pages(pgdat);
  270         return free_all_bootmem_core(pgdat->bdata);
  271 }
  272 
  273 /**
  274  * free_all_bootmem - release free pages to the buddy allocator
  275  *
  276  * Returns the number of pages actually released.
  277  */
  278 unsigned long __init free_all_bootmem(void)
  279 {
  280         unsigned long total_pages = 0;
  281         bootmem_data_t *bdata;
  282         struct pglist_data *pgdat;
  283 
  284         for_each_online_pgdat(pgdat)
  285                 reset_node_lowmem_managed_pages(pgdat);
  286 
  287         list_for_each_entry(bdata, &bdata_list, list)
  288                 total_pages += free_all_bootmem_core(bdata);
  289 
  290         return total_pages;
  291 }
  292 
  293 static void __init __free(bootmem_data_t *bdata,
  294                         unsigned long sidx, unsigned long eidx)
  295 {
  296         unsigned long idx;
  297 
  298         bdebug("nid=%td start=%lx end=%lx\n", bdata - bootmem_node_data,
  299                 sidx + bdata->node_min_pfn,
  300                 eidx + bdata->node_min_pfn);
  301 
  302         if (bdata->hint_idx > sidx)
  303                 bdata->hint_idx = sidx;
  304 
  305         for (idx = sidx; idx < eidx; idx++)
  306                 if (!test_and_clear_bit(idx, bdata->node_bootmem_map))
  307                         BUG();
  308 }
  309 
  310 static int __init __reserve(bootmem_data_t *bdata, unsigned long sidx,
  311                         unsigned long eidx, int flags)
  312 {
  313         unsigned long idx;
  314         int exclusive = flags & BOOTMEM_EXCLUSIVE;
  315 
  316         bdebug("nid=%td start=%lx end=%lx flags=%x\n",
  317                 bdata - bootmem_node_data,
  318                 sidx + bdata->node_min_pfn,
  319                 eidx + bdata->node_min_pfn,
  320                 flags);
  321 
  322         for (idx = sidx; idx < eidx; idx++)
  323                 if (test_and_set_bit(idx, bdata->node_bootmem_map)) {
  324                         if (exclusive) {
  325                                 __free(bdata, sidx, idx);
  326                                 return -EBUSY;
  327                         }
  328                         bdebug("silent double reserve of PFN %lx\n",
  329                                 idx + bdata->node_min_pfn);
  330                 }
  331         return 0;
  332 }
  333 
  334 static int __init mark_bootmem_node(bootmem_data_t *bdata,
  335                                 unsigned long start, unsigned long end,
  336                                 int reserve, int flags)
  337 {
  338         unsigned long sidx, eidx;
  339 
  340         bdebug("nid=%td start=%lx end=%lx reserve=%d flags=%x\n",
  341                 bdata - bootmem_node_data, start, end, reserve, flags);
  342 
  343         BUG_ON(start < bdata->node_min_pfn);
  344         BUG_ON(end > bdata->node_low_pfn);
  345 
  346         sidx = start - bdata->node_min_pfn;
  347         eidx = end - bdata->node_min_pfn;
  348 
  349         if (reserve)
  350                 return __reserve(bdata, sidx, eidx, flags);
  351         else
  352                 __free(bdata, sidx, eidx);
  353         return 0;
  354 }
  355 
  356 static int __init mark_bootmem(unsigned long start, unsigned long end,
  357                                 int reserve, int flags)
  358 {
  359         unsigned long pos;
  360         bootmem_data_t *bdata;
  361 
  362         pos = start;
  363         list_for_each_entry(bdata, &bdata_list, list) {
  364                 int err;
  365                 unsigned long max;
  366 
  367                 if (pos < bdata->node_min_pfn ||
  368                     pos >= bdata->node_low_pfn) {
  369                         BUG_ON(pos != start);
  370                         continue;
  371                 }
  372 
  373                 max = min(bdata->node_low_pfn, end);
  374 
  375                 err = mark_bootmem_node(bdata, pos, max, reserve, flags);
  376                 if (reserve && err) {
  377                         mark_bootmem(start, pos, 0, 0);
  378                         return err;
  379                 }
  380 
  381                 if (max == end)
  382                         return 0;
  383                 pos = bdata->node_low_pfn;
  384         }
  385         BUG();
  386 }
  387 
  388 /**
  389  * free_bootmem_node - mark a page range as usable
  390  * @pgdat: node the range resides on
  391  * @physaddr: starting address of the range
  392  * @size: size of the range in bytes
  393  *
  394  * Partial pages will be considered reserved and left as they are.
  395  *
  396  * The range must reside completely on the specified node.
  397  */
  398 void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
  399                               unsigned long size)
  400 {
  401         unsigned long start, end;
  402 
  403         kmemleak_free_part(__va(physaddr), size);
  404 
  405         start = PFN_UP(physaddr);
  406         end = PFN_DOWN(physaddr + size);
  407 
  408         mark_bootmem_node(pgdat->bdata, start, end, 0, 0);
  409 }
  410 
  411 /**
  412  * free_bootmem - mark a page range as usable
  413  * @addr: starting physical address of the range
  414  * @size: size of the range in bytes
  415  *
  416  * Partial pages will be considered reserved and left as they are.
  417  *
  418  * The range must be contiguous but may span node boundaries.
  419  */
  420 void __init free_bootmem(unsigned long physaddr, unsigned long size)
  421 {
  422         unsigned long start, end;
  423 
  424         kmemleak_free_part(__va(physaddr), size);
  425 
  426         start = PFN_UP(physaddr);
  427         end = PFN_DOWN(physaddr + size);
  428 
  429         mark_bootmem(start, end, 0, 0);
  430 }
  431 
  432 /**
  433  * reserve_bootmem_node - mark a page range as reserved
  434  * @pgdat: node the range resides on
  435  * @physaddr: starting address of the range
  436  * @size: size of the range in bytes
  437  * @flags: reservation flags (see linux/bootmem.h)
  438  *
  439  * Partial pages will be reserved.
  440  *
  441  * The range must reside completely on the specified node.
  442  */
  443 int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
  444                                  unsigned long size, int flags)
  445 {
  446         unsigned long start, end;
  447 
  448         start = PFN_DOWN(physaddr);
  449         end = PFN_UP(physaddr + size);
  450 
  451         return mark_bootmem_node(pgdat->bdata, start, end, 1, flags);
  452 }
  453 
  454 /**
  455  * reserve_bootmem - mark a page range as reserved
  456  * @addr: starting address of the range
  457  * @size: size of the range in bytes
  458  * @flags: reservation flags (see linux/bootmem.h)
  459  *
  460  * Partial pages will be reserved.
  461  *
  462  * The range must be contiguous but may span node boundaries.
  463  */
  464 int __init reserve_bootmem(unsigned long addr, unsigned long size,
  465                             int flags)
  466 {
  467         unsigned long start, end;
  468 
  469         start = PFN_DOWN(addr);
  470         end = PFN_UP(addr + size);
  471 
  472         return mark_bootmem(start, end, 1, flags);
  473 }
  474 
  475 static unsigned long __init align_idx(struct bootmem_data *bdata,
  476                                       unsigned long idx, unsigned long step)
  477 {
  478         unsigned long base = bdata->node_min_pfn;
  479 
  480         /*
  481          * Align the index with respect to the node start so that the
  482          * combination of both satisfies the requested alignment.
  483          */
  484 
  485         return ALIGN(base + idx, step) - base;
  486 }
  487 
  488 static unsigned long __init align_off(struct bootmem_data *bdata,
  489                                       unsigned long off, unsigned long align)
  490 {
  491         unsigned long base = PFN_PHYS(bdata->node_min_pfn);
  492 
  493         /* Same as align_idx for byte offsets */
  494 
  495         return ALIGN(base + off, align) - base;
  496 }
  497 
  498 static void * __init alloc_bootmem_bdata(struct bootmem_data *bdata,
  499                                         unsigned long size, unsigned long align,
  500                                         unsigned long goal, unsigned long limit)
  501 {
  502         unsigned long fallback = 0;
  503         unsigned long min, max, start, sidx, midx, step;
  504 
  505         bdebug("nid=%td size=%lx [%lu pages] align=%lx goal=%lx limit=%lx\n",
  506                 bdata - bootmem_node_data, size, PAGE_ALIGN(size) >> PAGE_SHIFT,
  507                 align, goal, limit);
  508 
  509         BUG_ON(!size);
  510         BUG_ON(align & (align - 1));
  511         BUG_ON(limit && goal + size > limit);
  512 
  513         if (!bdata->node_bootmem_map)
  514                 return NULL;
  515 
  516         min = bdata->node_min_pfn;
  517         max = bdata->node_low_pfn;
  518 
  519         goal >>= PAGE_SHIFT;
  520         limit >>= PAGE_SHIFT;
  521 
  522         if (limit && max > limit)
  523                 max = limit;
  524         if (max <= min)
  525                 return NULL;
  526 
  527         step = max(align >> PAGE_SHIFT, 1UL);
  528 
  529         if (goal && min < goal && goal < max)
  530                 start = ALIGN(goal, step);
  531         else
  532                 start = ALIGN(min, step);
  533 
  534         sidx = start - bdata->node_min_pfn;
  535         midx = max - bdata->node_min_pfn;
  536 
  537         if (bdata->hint_idx > sidx) {
  538                 /*
  539                  * Handle the valid case of sidx being zero and still
  540                  * catch the fallback below.
  541                  */
  542                 fallback = sidx + 1;
  543                 sidx = align_idx(bdata, bdata->hint_idx, step);
  544         }
  545 
  546         while (1) {
  547                 int merge;
  548                 void *region;
  549                 unsigned long eidx, i, start_off, end_off;
  550 find_block:
  551                 sidx = find_next_zero_bit(bdata->node_bootmem_map, midx, sidx);
  552                 sidx = align_idx(bdata, sidx, step);
  553                 eidx = sidx + PFN_UP(size);
  554 
  555                 if (sidx >= midx || eidx > midx)
  556                         break;
  557 
  558                 for (i = sidx; i < eidx; i++)
  559                         if (test_bit(i, bdata->node_bootmem_map)) {
  560                                 sidx = align_idx(bdata, i, step);
  561                                 if (sidx == i)
  562                                         sidx += step;
  563                                 goto find_block;
  564                         }
  565 
  566                 if (bdata->last_end_off & (PAGE_SIZE - 1) &&
  567                                 PFN_DOWN(bdata->last_end_off) + 1 == sidx)
  568                         start_off = align_off(bdata, bdata->last_end_off, align);
  569                 else
  570                         start_off = PFN_PHYS(sidx);
  571 
  572                 merge = PFN_DOWN(start_off) < sidx;
  573                 end_off = start_off + size;
  574 
  575                 bdata->last_end_off = end_off;
  576                 bdata->hint_idx = PFN_UP(end_off);
  577 
  578                 /*
  579                  * Reserve the area now:
  580                  */
  581                 if (__reserve(bdata, PFN_DOWN(start_off) + merge,
  582                                 PFN_UP(end_off), BOOTMEM_EXCLUSIVE))
  583                         BUG();
  584 
  585                 region = phys_to_virt(PFN_PHYS(bdata->node_min_pfn) +
  586                                 start_off);
  587                 memset(region, 0, size);
  588                 /*
  589                  * The min_count is set to 0 so that bootmem allocated blocks
  590                  * are never reported as leaks.
  591                  */
  592                 kmemleak_alloc(region, size, 0, 0);
  593                 return region;
  594         }
  595 
  596         if (fallback) {
  597                 sidx = align_idx(bdata, fallback - 1, step);
  598                 fallback = 0;
  599                 goto find_block;
  600         }
  601 
  602         return NULL;
  603 }
  604 
  605 static void * __init alloc_bootmem_core(unsigned long size,
  606                                         unsigned long align,
  607                                         unsigned long goal,
  608                                         unsigned long limit)
  609 {
  610         bootmem_data_t *bdata;
  611         void *region;
  612 
  613         if (WARN_ON_ONCE(slab_is_available()))
  614                 return kzalloc(size, GFP_NOWAIT);
  615 
  616         list_for_each_entry(bdata, &bdata_list, list) {
  617                 if (goal && bdata->node_low_pfn <= PFN_DOWN(goal))
  618                         continue;
  619                 if (limit && bdata->node_min_pfn >= PFN_DOWN(limit))
  620                         break;
  621 
  622                 region = alloc_bootmem_bdata(bdata, size, align, goal, limit);
  623                 if (region)
  624                         return region;
  625         }
  626 
  627         return NULL;
  628 }
  629 
  630 static void * __init ___alloc_bootmem_nopanic(unsigned long size,
  631                                               unsigned long align,
  632                                               unsigned long goal,
  633                                               unsigned long limit)
  634 {
  635         void *ptr;
  636 
  637 restart:
  638         ptr = alloc_bootmem_core(size, align, goal, limit);
  639         if (ptr)
  640                 return ptr;
  641         if (goal) {
  642                 goal = 0;
  643                 goto restart;
  644         }
  645 
  646         return NULL;
  647 }
  648 
  649 /**
  650  * __alloc_bootmem_nopanic - allocate boot memory without panicking
  651  * @size: size of the request in bytes
  652  * @align: alignment of the region
  653  * @goal: preferred starting address of the region
  654  *
  655  * The goal is dropped if it can not be satisfied and the allocation will
  656  * fall back to memory below @goal.
  657  *
  658  * Allocation may happen on any node in the system.
  659  *
  660  * Returns NULL on failure.
  661  */
  662 void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
  663                                         unsigned long goal)
  664 {
  665         unsigned long limit = 0;
  666 
  667         return ___alloc_bootmem_nopanic(size, align, goal, limit);
  668 }
  669 
  670 static void * __init ___alloc_bootmem(unsigned long size, unsigned long align,
  671                                         unsigned long goal, unsigned long limit)
  672 {
  673         void *mem = ___alloc_bootmem_nopanic(size, align, goal, limit);
  674 
  675         if (mem)
  676                 return mem;
  677         /*
  678          * Whoops, we cannot satisfy the allocation request.
  679          */
  680         printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
  681         panic("Out of memory");
  682         return NULL;
  683 }
  684 
  685 /**
  686  * __alloc_bootmem - allocate boot memory
  687  * @size: size of the request in bytes
  688  * @align: alignment of the region
  689  * @goal: preferred starting address of the region
  690  *
  691  * The goal is dropped if it can not be satisfied and the allocation will
  692  * fall back to memory below @goal.
  693  *
  694  * Allocation may happen on any node in the system.
  695  *
  696  * The function panics if the request can not be satisfied.
  697  */
  698 void * __init __alloc_bootmem(unsigned long size, unsigned long align,
  699                               unsigned long goal)
  700 {
  701         unsigned long limit = 0;
  702 
  703         return ___alloc_bootmem(size, align, goal, limit);
  704 }
  705 
  706 void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
  707                                 unsigned long size, unsigned long align,
  708                                 unsigned long goal, unsigned long limit)
  709 {
  710         void *ptr;
  711 
  712         if (WARN_ON_ONCE(slab_is_available()))
  713                 return kzalloc(size, GFP_NOWAIT);
  714 again:
  715 
  716         /* do not panic in alloc_bootmem_bdata() */
  717         if (limit && goal + size > limit)
  718                 limit = 0;
  719 
  720         ptr = alloc_bootmem_bdata(pgdat->bdata, size, align, goal, limit);
  721         if (ptr)
  722                 return ptr;
  723 
  724         ptr = alloc_bootmem_core(size, align, goal, limit);
  725         if (ptr)
  726                 return ptr;
  727 
  728         if (goal) {
  729                 goal = 0;
  730                 goto again;
  731         }
  732 
  733         return NULL;
  734 }
  735 
  736 void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
  737                                    unsigned long align, unsigned long goal)
  738 {
  739         if (WARN_ON_ONCE(slab_is_available()))
  740                 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
  741 
  742         return ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0);
  743 }
  744 
  745 void * __init ___alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
  746                                     unsigned long align, unsigned long goal,
  747                                     unsigned long limit)
  748 {
  749         void *ptr;
  750 
  751         ptr = ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0);
  752         if (ptr)
  753                 return ptr;
  754 
  755         printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
  756         panic("Out of memory");
  757         return NULL;
  758 }
  759 
  760 /**
  761  * __alloc_bootmem_node - allocate boot memory from a specific node
  762  * @pgdat: node to allocate from
  763  * @size: size of the request in bytes
  764  * @align: alignment of the region
  765  * @goal: preferred starting address of the region
  766  *
  767  * The goal is dropped if it can not be satisfied and the allocation will
  768  * fall back to memory below @goal.
  769  *
  770  * Allocation may fall back to any node in the system if the specified node
  771  * can not hold the requested memory.
  772  *
  773  * The function panics if the request can not be satisfied.
  774  */
  775 void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
  776                                    unsigned long align, unsigned long goal)
  777 {
  778         if (WARN_ON_ONCE(slab_is_available()))
  779                 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
  780 
  781         return  ___alloc_bootmem_node(pgdat, size, align, goal, 0);
  782 }
  783 
  784 void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
  785                                    unsigned long align, unsigned long goal)
  786 {
  787 #ifdef MAX_DMA32_PFN
  788         unsigned long end_pfn;
  789 
  790         if (WARN_ON_ONCE(slab_is_available()))
  791                 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
  792 
  793         /* update goal according ...MAX_DMA32_PFN */
  794         end_pfn = pgdat->node_start_pfn + pgdat->node_spanned_pages;
  795 
  796         if (end_pfn > MAX_DMA32_PFN + (128 >> (20 - PAGE_SHIFT)) &&
  797             (goal >> PAGE_SHIFT) < MAX_DMA32_PFN) {
  798                 void *ptr;
  799                 unsigned long new_goal;
  800 
  801                 new_goal = MAX_DMA32_PFN << PAGE_SHIFT;
  802                 ptr = alloc_bootmem_bdata(pgdat->bdata, size, align,
  803                                                  new_goal, 0);
  804                 if (ptr)
  805                         return ptr;
  806         }
  807 #endif
  808 
  809         return __alloc_bootmem_node(pgdat, size, align, goal);
  810 
  811 }
  812 
  813 #ifndef ARCH_LOW_ADDRESS_LIMIT
  814 #define ARCH_LOW_ADDRESS_LIMIT  0xffffffffUL
  815 #endif
  816 
  817 /**
  818  * __alloc_bootmem_low - allocate low boot memory
  819  * @size: size of the request in bytes
  820  * @align: alignment of the region
  821  * @goal: preferred starting address of the region
  822  *
  823  * The goal is dropped if it can not be satisfied and the allocation will
  824  * fall back to memory below @goal.
  825  *
  826  * Allocation may happen on any node in the system.
  827  *
  828  * The function panics if the request can not be satisfied.
  829  */
  830 void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
  831                                   unsigned long goal)
  832 {
  833         return ___alloc_bootmem(size, align, goal, ARCH_LOW_ADDRESS_LIMIT);
  834 }
  835 
  836 /**
  837  * __alloc_bootmem_low_node - allocate low boot memory from a specific node
  838  * @pgdat: node to allocate from
  839  * @size: size of the request in bytes
  840  * @align: alignment of the region
  841  * @goal: preferred starting address of the region
  842  *
  843  * The goal is dropped if it can not be satisfied and the allocation will
  844  * fall back to memory below @goal.
  845  *
  846  * Allocation may fall back to any node in the system if the specified node
  847  * can not hold the requested memory.
  848  *
  849  * The function panics if the request can not be satisfied.
  850  */
  851 void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
  852                                        unsigned long align, unsigned long goal)
  853 {
  854         if (WARN_ON_ONCE(slab_is_available()))
  855                 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
  856 
  857         return ___alloc_bootmem_node(pgdat, size, align,
  858                                      goal, ARCH_LOW_ADDRESS_LIMIT);
  859 }

Cache object: 814fe20e8c7df4c7e68dd6668675d360


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.