The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/mm/page_alloc.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  *  linux/mm/page_alloc.c
    3  *
    4  *  Manages the free list, the system allocates free pages here.
    5  *  Note that kmalloc() lives in slab.c
    6  *
    7  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
    8  *  Swap reorganised 29.12.95, Stephen Tweedie
    9  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
   10  *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
   11  *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
   12  *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
   13  *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
   14  *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
   15  */
   16 
   17 #include <linux/stddef.h>
   18 #include <linux/mm.h>
   19 #include <linux/swap.h>
   20 #include <linux/interrupt.h>
   21 #include <linux/pagemap.h>
   22 #include <linux/jiffies.h>
   23 #include <linux/bootmem.h>
   24 #include <linux/memblock.h>
   25 #include <linux/compiler.h>
   26 #include <linux/kernel.h>
   27 #include <linux/kmemcheck.h>
   28 #include <linux/module.h>
   29 #include <linux/suspend.h>
   30 #include <linux/pagevec.h>
   31 #include <linux/blkdev.h>
   32 #include <linux/slab.h>
   33 #include <linux/ratelimit.h>
   34 #include <linux/oom.h>
   35 #include <linux/notifier.h>
   36 #include <linux/topology.h>
   37 #include <linux/sysctl.h>
   38 #include <linux/cpu.h>
   39 #include <linux/cpuset.h>
   40 #include <linux/memory_hotplug.h>
   41 #include <linux/nodemask.h>
   42 #include <linux/vmalloc.h>
   43 #include <linux/vmstat.h>
   44 #include <linux/mempolicy.h>
   45 #include <linux/stop_machine.h>
   46 #include <linux/sort.h>
   47 #include <linux/pfn.h>
   48 #include <linux/backing-dev.h>
   49 #include <linux/fault-inject.h>
   50 #include <linux/page-isolation.h>
   51 #include <linux/page_cgroup.h>
   52 #include <linux/debugobjects.h>
   53 #include <linux/kmemleak.h>
   54 #include <linux/compaction.h>
   55 #include <trace/events/kmem.h>
   56 #include <linux/ftrace_event.h>
   57 #include <linux/memcontrol.h>
   58 #include <linux/prefetch.h>
   59 #include <linux/migrate.h>
   60 #include <linux/page-debug-flags.h>
   61 
   62 #include <asm/tlbflush.h>
   63 #include <asm/div64.h>
   64 #include "internal.h"
   65 
   66 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
   67 DEFINE_PER_CPU(int, numa_node);
   68 EXPORT_PER_CPU_SYMBOL(numa_node);
   69 #endif
   70 
   71 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
   72 /*
   73  * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
   74  * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
   75  * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
   76  * defined in <linux/topology.h>.
   77  */
   78 DEFINE_PER_CPU(int, _numa_mem_);                /* Kernel "local memory" node */
   79 EXPORT_PER_CPU_SYMBOL(_numa_mem_);
   80 #endif
   81 
   82 /*
   83  * Array of node states.
   84  */
   85 nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
   86         [N_POSSIBLE] = NODE_MASK_ALL,
   87         [N_ONLINE] = { { [0] = 1UL } },
   88 #ifndef CONFIG_NUMA
   89         [N_NORMAL_MEMORY] = { { [0] = 1UL } },
   90 #ifdef CONFIG_HIGHMEM
   91         [N_HIGH_MEMORY] = { { [0] = 1UL } },
   92 #endif
   93 #ifdef CONFIG_MOVABLE_NODE
   94         [N_MEMORY] = { { [0] = 1UL } },
   95 #endif
   96         [N_CPU] = { { [0] = 1UL } },
   97 #endif  /* NUMA */
   98 };
   99 EXPORT_SYMBOL(node_states);
  100 
  101 unsigned long totalram_pages __read_mostly;
  102 unsigned long totalreserve_pages __read_mostly;
  103 /*
  104  * When calculating the number of globally allowed dirty pages, there
  105  * is a certain number of per-zone reserves that should not be
  106  * considered dirtyable memory.  This is the sum of those reserves
  107  * over all existing zones that contribute dirtyable memory.
  108  */
  109 unsigned long dirty_balance_reserve __read_mostly;
  110 
  111 int percpu_pagelist_fraction;
  112 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
  113 
  114 #ifdef CONFIG_PM_SLEEP
  115 /*
  116  * The following functions are used by the suspend/hibernate code to temporarily
  117  * change gfp_allowed_mask in order to avoid using I/O during memory allocations
  118  * while devices are suspended.  To avoid races with the suspend/hibernate code,
  119  * they should always be called with pm_mutex held (gfp_allowed_mask also should
  120  * only be modified with pm_mutex held, unless the suspend/hibernate code is
  121  * guaranteed not to run in parallel with that modification).
  122  */
  123 
  124 static gfp_t saved_gfp_mask;
  125 
  126 void pm_restore_gfp_mask(void)
  127 {
  128         WARN_ON(!mutex_is_locked(&pm_mutex));
  129         if (saved_gfp_mask) {
  130                 gfp_allowed_mask = saved_gfp_mask;
  131                 saved_gfp_mask = 0;
  132         }
  133 }
  134 
  135 void pm_restrict_gfp_mask(void)
  136 {
  137         WARN_ON(!mutex_is_locked(&pm_mutex));
  138         WARN_ON(saved_gfp_mask);
  139         saved_gfp_mask = gfp_allowed_mask;
  140         gfp_allowed_mask &= ~GFP_IOFS;
  141 }
  142 
  143 bool pm_suspended_storage(void)
  144 {
  145         if ((gfp_allowed_mask & GFP_IOFS) == GFP_IOFS)
  146                 return false;
  147         return true;
  148 }
  149 #endif /* CONFIG_PM_SLEEP */
  150 
  151 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
  152 int pageblock_order __read_mostly;
  153 #endif
  154 
  155 static void __free_pages_ok(struct page *page, unsigned int order);
  156 
  157 /*
  158  * results with 256, 32 in the lowmem_reserve sysctl:
  159  *      1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
  160  *      1G machine -> (16M dma, 784M normal, 224M high)
  161  *      NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
  162  *      HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
  163  *      HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
  164  *
  165  * TBD: should special case ZONE_DMA32 machines here - in those we normally
  166  * don't need any ZONE_NORMAL reservation
  167  */
  168 int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
  169 #ifdef CONFIG_ZONE_DMA
  170          256,
  171 #endif
  172 #ifdef CONFIG_ZONE_DMA32
  173          256,
  174 #endif
  175 #ifdef CONFIG_HIGHMEM
  176          32,
  177 #endif
  178          32,
  179 };
  180 
  181 EXPORT_SYMBOL(totalram_pages);
  182 
  183 static char * const zone_names[MAX_NR_ZONES] = {
  184 #ifdef CONFIG_ZONE_DMA
  185          "DMA",
  186 #endif
  187 #ifdef CONFIG_ZONE_DMA32
  188          "DMA32",
  189 #endif
  190          "Normal",
  191 #ifdef CONFIG_HIGHMEM
  192          "HighMem",
  193 #endif
  194          "Movable",
  195 };
  196 
  197 int min_free_kbytes = 1024;
  198 
  199 static unsigned long __meminitdata nr_kernel_pages;
  200 static unsigned long __meminitdata nr_all_pages;
  201 static unsigned long __meminitdata dma_reserve;
  202 
  203 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  204 static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
  205 static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
  206 static unsigned long __initdata required_kernelcore;
  207 static unsigned long __initdata required_movablecore;
  208 static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
  209 
  210 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
  211 int movable_zone;
  212 EXPORT_SYMBOL(movable_zone);
  213 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
  214 
  215 #if MAX_NUMNODES > 1
  216 int nr_node_ids __read_mostly = MAX_NUMNODES;
  217 int nr_online_nodes __read_mostly = 1;
  218 EXPORT_SYMBOL(nr_node_ids);
  219 EXPORT_SYMBOL(nr_online_nodes);
  220 #endif
  221 
  222 int page_group_by_mobility_disabled __read_mostly;
  223 
  224 void set_pageblock_migratetype(struct page *page, int migratetype)
  225 {
  226 
  227         if (unlikely(page_group_by_mobility_disabled))
  228                 migratetype = MIGRATE_UNMOVABLE;
  229 
  230         set_pageblock_flags_group(page, (unsigned long)migratetype,
  231                                         PB_migrate, PB_migrate_end);
  232 }
  233 
  234 bool oom_killer_disabled __read_mostly;
  235 
  236 #ifdef CONFIG_DEBUG_VM
  237 static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
  238 {
  239         int ret = 0;
  240         unsigned seq;
  241         unsigned long pfn = page_to_pfn(page);
  242 
  243         do {
  244                 seq = zone_span_seqbegin(zone);
  245                 if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
  246                         ret = 1;
  247                 else if (pfn < zone->zone_start_pfn)
  248                         ret = 1;
  249         } while (zone_span_seqretry(zone, seq));
  250 
  251         return ret;
  252 }
  253 
  254 static int page_is_consistent(struct zone *zone, struct page *page)
  255 {
  256         if (!pfn_valid_within(page_to_pfn(page)))
  257                 return 0;
  258         if (zone != page_zone(page))
  259                 return 0;
  260 
  261         return 1;
  262 }
  263 /*
  264  * Temporary debugging check for pages not lying within a given zone.
  265  */
  266 static int bad_range(struct zone *zone, struct page *page)
  267 {
  268         if (page_outside_zone_boundaries(zone, page))
  269                 return 1;
  270         if (!page_is_consistent(zone, page))
  271                 return 1;
  272 
  273         return 0;
  274 }
  275 #else
  276 static inline int bad_range(struct zone *zone, struct page *page)
  277 {
  278         return 0;
  279 }
  280 #endif
  281 
  282 static void bad_page(struct page *page)
  283 {
  284         static unsigned long resume;
  285         static unsigned long nr_shown;
  286         static unsigned long nr_unshown;
  287 
  288         /* Don't complain about poisoned pages */
  289         if (PageHWPoison(page)) {
  290                 reset_page_mapcount(page); /* remove PageBuddy */
  291                 return;
  292         }
  293 
  294         /*
  295          * Allow a burst of 60 reports, then keep quiet for that minute;
  296          * or allow a steady drip of one report per second.
  297          */
  298         if (nr_shown == 60) {
  299                 if (time_before(jiffies, resume)) {
  300                         nr_unshown++;
  301                         goto out;
  302                 }
  303                 if (nr_unshown) {
  304                         printk(KERN_ALERT
  305                               "BUG: Bad page state: %lu messages suppressed\n",
  306                                 nr_unshown);
  307                         nr_unshown = 0;
  308                 }
  309                 nr_shown = 0;
  310         }
  311         if (nr_shown++ == 0)
  312                 resume = jiffies + 60 * HZ;
  313 
  314         printk(KERN_ALERT "BUG: Bad page state in process %s  pfn:%05lx\n",
  315                 current->comm, page_to_pfn(page));
  316         dump_page(page);
  317 
  318         print_modules();
  319         dump_stack();
  320 out:
  321         /* Leave bad fields for debug, except PageBuddy could make trouble */
  322         reset_page_mapcount(page); /* remove PageBuddy */
  323         add_taint(TAINT_BAD_PAGE);
  324 }
  325 
  326 /*
  327  * Higher-order pages are called "compound pages".  They are structured thusly:
  328  *
  329  * The first PAGE_SIZE page is called the "head page".
  330  *
  331  * The remaining PAGE_SIZE pages are called "tail pages".
  332  *
  333  * All pages have PG_compound set.  All tail pages have their ->first_page
  334  * pointing at the head page.
  335  *
  336  * The first tail page's ->lru.next holds the address of the compound page's
  337  * put_page() function.  Its ->lru.prev holds the order of allocation.
  338  * This usage means that zero-order pages may not be compound.
  339  */
  340 
  341 static void free_compound_page(struct page *page)
  342 {
  343         __free_pages_ok(page, compound_order(page));
  344 }
  345 
  346 void prep_compound_page(struct page *page, unsigned long order)
  347 {
  348         int i;
  349         int nr_pages = 1 << order;
  350 
  351         set_compound_page_dtor(page, free_compound_page);
  352         set_compound_order(page, order);
  353         __SetPageHead(page);
  354         for (i = 1; i < nr_pages; i++) {
  355                 struct page *p = page + i;
  356                 __SetPageTail(p);
  357                 set_page_count(p, 0);
  358                 p->first_page = page;
  359         }
  360 }
  361 
  362 /* update __split_huge_page_refcount if you change this function */
  363 static int destroy_compound_page(struct page *page, unsigned long order)
  364 {
  365         int i;
  366         int nr_pages = 1 << order;
  367         int bad = 0;
  368 
  369         if (unlikely(compound_order(page) != order)) {
  370                 bad_page(page);
  371                 bad++;
  372         }
  373 
  374         __ClearPageHead(page);
  375 
  376         for (i = 1; i < nr_pages; i++) {
  377                 struct page *p = page + i;
  378 
  379                 if (unlikely(!PageTail(p) || (p->first_page != page))) {
  380                         bad_page(page);
  381                         bad++;
  382                 }
  383                 __ClearPageTail(p);
  384         }
  385 
  386         return bad;
  387 }
  388 
  389 static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
  390 {
  391         int i;
  392 
  393         /*
  394          * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
  395          * and __GFP_HIGHMEM from hard or soft interrupt context.
  396          */
  397         VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
  398         for (i = 0; i < (1 << order); i++)
  399                 clear_highpage(page + i);
  400 }
  401 
  402 #ifdef CONFIG_DEBUG_PAGEALLOC
  403 unsigned int _debug_guardpage_minorder;
  404 
  405 static int __init debug_guardpage_minorder_setup(char *buf)
  406 {
  407         unsigned long res;
  408 
  409         if (kstrtoul(buf, 10, &res) < 0 ||  res > MAX_ORDER / 2) {
  410                 printk(KERN_ERR "Bad debug_guardpage_minorder value\n");
  411                 return 0;
  412         }
  413         _debug_guardpage_minorder = res;
  414         printk(KERN_INFO "Setting debug_guardpage_minorder to %lu\n", res);
  415         return 0;
  416 }
  417 __setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);
  418 
  419 static inline void set_page_guard_flag(struct page *page)
  420 {
  421         __set_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
  422 }
  423 
  424 static inline void clear_page_guard_flag(struct page *page)
  425 {
  426         __clear_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
  427 }
  428 #else
  429 static inline void set_page_guard_flag(struct page *page) { }
  430 static inline void clear_page_guard_flag(struct page *page) { }
  431 #endif
  432 
  433 static inline void set_page_order(struct page *page, int order)
  434 {
  435         set_page_private(page, order);
  436         __SetPageBuddy(page);
  437 }
  438 
  439 static inline void rmv_page_order(struct page *page)
  440 {
  441         __ClearPageBuddy(page);
  442         set_page_private(page, 0);
  443 }
  444 
  445 /*
  446  * Locate the struct page for both the matching buddy in our
  447  * pair (buddy1) and the combined O(n+1) page they form (page).
  448  *
  449  * 1) Any buddy B1 will have an order O twin B2 which satisfies
  450  * the following equation:
  451  *     B2 = B1 ^ (1 << O)
  452  * For example, if the starting buddy (buddy2) is #8 its order
  453  * 1 buddy is #10:
  454  *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
  455  *
  456  * 2) Any buddy B will have an order O+1 parent P which
  457  * satisfies the following equation:
  458  *     P = B & ~(1 << O)
  459  *
  460  * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
  461  */
  462 static inline unsigned long
  463 __find_buddy_index(unsigned long page_idx, unsigned int order)
  464 {
  465         return page_idx ^ (1 << order);
  466 }
  467 
  468 /*
  469  * This function checks whether a page is free && is the buddy
  470  * we can do coalesce a page and its buddy if
  471  * (a) the buddy is not in a hole &&
  472  * (b) the buddy is in the buddy system &&
  473  * (c) a page and its buddy have the same order &&
  474  * (d) a page and its buddy are in the same zone.
  475  *
  476  * For recording whether a page is in the buddy system, we set ->_mapcount -2.
  477  * Setting, clearing, and testing _mapcount -2 is serialized by zone->lock.
  478  *
  479  * For recording page's order, we use page_private(page).
  480  */
  481 static inline int page_is_buddy(struct page *page, struct page *buddy,
  482                                                                 int order)
  483 {
  484         if (!pfn_valid_within(page_to_pfn(buddy)))
  485                 return 0;
  486 
  487         if (page_zone_id(page) != page_zone_id(buddy))
  488                 return 0;
  489 
  490         if (page_is_guard(buddy) && page_order(buddy) == order) {
  491                 VM_BUG_ON(page_count(buddy) != 0);
  492                 return 1;
  493         }
  494 
  495         if (PageBuddy(buddy) && page_order(buddy) == order) {
  496                 VM_BUG_ON(page_count(buddy) != 0);
  497                 return 1;
  498         }
  499         return 0;
  500 }
  501 
  502 /*
  503  * Freeing function for a buddy system allocator.
  504  *
  505  * The concept of a buddy system is to maintain direct-mapped table
  506  * (containing bit values) for memory blocks of various "orders".
  507  * The bottom level table contains the map for the smallest allocatable
  508  * units of memory (here, pages), and each level above it describes
  509  * pairs of units from the levels below, hence, "buddies".
  510  * At a high level, all that happens here is marking the table entry
  511  * at the bottom level available, and propagating the changes upward
  512  * as necessary, plus some accounting needed to play nicely with other
  513  * parts of the VM system.
  514  * At each level, we keep a list of pages, which are heads of continuous
  515  * free pages of length of (1 << order) and marked with _mapcount -2. Page's
  516  * order is recorded in page_private(page) field.
  517  * So when we are allocating or freeing one, we can derive the state of the
  518  * other.  That is, if we allocate a small block, and both were
  519  * free, the remainder of the region must be split into blocks.
  520  * If a block is freed, and its buddy is also free, then this
  521  * triggers coalescing into a block of larger size.
  522  *
  523  * -- nyc
  524  */
  525 
  526 static inline void __free_one_page(struct page *page,
  527                 struct zone *zone, unsigned int order,
  528                 int migratetype)
  529 {
  530         unsigned long page_idx;
  531         unsigned long combined_idx;
  532         unsigned long uninitialized_var(buddy_idx);
  533         struct page *buddy;
  534 
  535         if (unlikely(PageCompound(page)))
  536                 if (unlikely(destroy_compound_page(page, order)))
  537                         return;
  538 
  539         VM_BUG_ON(migratetype == -1);
  540 
  541         page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
  542 
  543         VM_BUG_ON(page_idx & ((1 << order) - 1));
  544         VM_BUG_ON(bad_range(zone, page));
  545 
  546         while (order < MAX_ORDER-1) {
  547                 buddy_idx = __find_buddy_index(page_idx, order);
  548                 buddy = page + (buddy_idx - page_idx);
  549                 if (!page_is_buddy(page, buddy, order))
  550                         break;
  551                 /*
  552                  * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
  553                  * merge with it and move up one order.
  554                  */
  555                 if (page_is_guard(buddy)) {
  556                         clear_page_guard_flag(buddy);
  557                         set_page_private(page, 0);
  558                         __mod_zone_freepage_state(zone, 1 << order,
  559                                                   migratetype);
  560                 } else {
  561                         list_del(&buddy->lru);
  562                         zone->free_area[order].nr_free--;
  563                         rmv_page_order(buddy);
  564                 }
  565                 combined_idx = buddy_idx & page_idx;
  566                 page = page + (combined_idx - page_idx);
  567                 page_idx = combined_idx;
  568                 order++;
  569         }
  570         set_page_order(page, order);
  571 
  572         /*
  573          * If this is not the largest possible page, check if the buddy
  574          * of the next-highest order is free. If it is, it's possible
  575          * that pages are being freed that will coalesce soon. In case,
  576          * that is happening, add the free page to the tail of the list
  577          * so it's less likely to be used soon and more likely to be merged
  578          * as a higher order page
  579          */
  580         if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
  581                 struct page *higher_page, *higher_buddy;
  582                 combined_idx = buddy_idx & page_idx;
  583                 higher_page = page + (combined_idx - page_idx);
  584                 buddy_idx = __find_buddy_index(combined_idx, order + 1);
  585                 higher_buddy = higher_page + (buddy_idx - combined_idx);
  586                 if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
  587                         list_add_tail(&page->lru,
  588                                 &zone->free_area[order].free_list[migratetype]);
  589                         goto out;
  590                 }
  591         }
  592 
  593         list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
  594 out:
  595         zone->free_area[order].nr_free++;
  596 }
  597 
  598 static inline int free_pages_check(struct page *page)
  599 {
  600         if (unlikely(page_mapcount(page) |
  601                 (page->mapping != NULL)  |
  602                 (atomic_read(&page->_count) != 0) |
  603                 (page->flags & PAGE_FLAGS_CHECK_AT_FREE) |
  604                 (mem_cgroup_bad_page_check(page)))) {
  605                 bad_page(page);
  606                 return 1;
  607         }
  608         reset_page_last_nid(page);
  609         if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
  610                 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
  611         return 0;
  612 }
  613 
  614 /*
  615  * Frees a number of pages from the PCP lists
  616  * Assumes all pages on list are in same zone, and of same order.
  617  * count is the number of pages to free.
  618  *
  619  * If the zone was previously in an "all pages pinned" state then look to
  620  * see if this freeing clears that state.
  621  *
  622  * And clear the zone's pages_scanned counter, to hold off the "all pages are
  623  * pinned" detection logic.
  624  */
  625 static void free_pcppages_bulk(struct zone *zone, int count,
  626                                         struct per_cpu_pages *pcp)
  627 {
  628         int migratetype = 0;
  629         int batch_free = 0;
  630         int to_free = count;
  631 
  632         spin_lock(&zone->lock);
  633         zone->all_unreclaimable = 0;
  634         zone->pages_scanned = 0;
  635 
  636         while (to_free) {
  637                 struct page *page;
  638                 struct list_head *list;
  639 
  640                 /*
  641                  * Remove pages from lists in a round-robin fashion. A
  642                  * batch_free count is maintained that is incremented when an
  643                  * empty list is encountered.  This is so more pages are freed
  644                  * off fuller lists instead of spinning excessively around empty
  645                  * lists
  646                  */
  647                 do {
  648                         batch_free++;
  649                         if (++migratetype == MIGRATE_PCPTYPES)
  650                                 migratetype = 0;
  651                         list = &pcp->lists[migratetype];
  652                 } while (list_empty(list));
  653 
  654                 /* This is the only non-empty list. Free them all. */
  655                 if (batch_free == MIGRATE_PCPTYPES)
  656                         batch_free = to_free;
  657 
  658                 do {
  659                         int mt; /* migratetype of the to-be-freed page */
  660 
  661                         page = list_entry(list->prev, struct page, lru);
  662                         /* must delete as __free_one_page list manipulates */
  663                         list_del(&page->lru);
  664                         mt = get_freepage_migratetype(page);
  665                         /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
  666                         __free_one_page(page, zone, 0, mt);
  667                         trace_mm_page_pcpu_drain(page, 0, mt);
  668                         if (likely(get_pageblock_migratetype(page) != MIGRATE_ISOLATE)) {
  669                                 __mod_zone_page_state(zone, NR_FREE_PAGES, 1);
  670                                 if (is_migrate_cma(mt))
  671                                         __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 1);
  672                         }
  673                 } while (--to_free && --batch_free && !list_empty(list));
  674         }
  675         spin_unlock(&zone->lock);
  676 }
  677 
  678 static void free_one_page(struct zone *zone, struct page *page, int order,
  679                                 int migratetype)
  680 {
  681         spin_lock(&zone->lock);
  682         zone->all_unreclaimable = 0;
  683         zone->pages_scanned = 0;
  684 
  685         __free_one_page(page, zone, order, migratetype);
  686         if (unlikely(migratetype != MIGRATE_ISOLATE))
  687                 __mod_zone_freepage_state(zone, 1 << order, migratetype);
  688         spin_unlock(&zone->lock);
  689 }
  690 
  691 static bool free_pages_prepare(struct page *page, unsigned int order)
  692 {
  693         int i;
  694         int bad = 0;
  695 
  696         trace_mm_page_free(page, order);
  697         kmemcheck_free_shadow(page, order);
  698 
  699         if (PageAnon(page))
  700                 page->mapping = NULL;
  701         for (i = 0; i < (1 << order); i++)
  702                 bad += free_pages_check(page + i);
  703         if (bad)
  704                 return false;
  705 
  706         if (!PageHighMem(page)) {
  707                 debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
  708                 debug_check_no_obj_freed(page_address(page),
  709                                            PAGE_SIZE << order);
  710         }
  711         arch_free_page(page, order);
  712         kernel_map_pages(page, 1 << order, 0);
  713 
  714         return true;
  715 }
  716 
  717 static void __free_pages_ok(struct page *page, unsigned int order)
  718 {
  719         unsigned long flags;
  720         int migratetype;
  721 
  722         if (!free_pages_prepare(page, order))
  723                 return;
  724 
  725         local_irq_save(flags);
  726         __count_vm_events(PGFREE, 1 << order);
  727         migratetype = get_pageblock_migratetype(page);
  728         set_freepage_migratetype(page, migratetype);
  729         free_one_page(page_zone(page), page, order, migratetype);
  730         local_irq_restore(flags);
  731 }
  732 
  733 /*
  734  * Read access to zone->managed_pages is safe because it's unsigned long,
  735  * but we still need to serialize writers. Currently all callers of
  736  * __free_pages_bootmem() except put_page_bootmem() should only be used
  737  * at boot time. So for shorter boot time, we shift the burden to
  738  * put_page_bootmem() to serialize writers.
  739  */
  740 void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
  741 {
  742         unsigned int nr_pages = 1 << order;
  743         unsigned int loop;
  744 
  745         prefetchw(page);
  746         for (loop = 0; loop < nr_pages; loop++) {
  747                 struct page *p = &page[loop];
  748 
  749                 if (loop + 1 < nr_pages)
  750                         prefetchw(p + 1);
  751                 __ClearPageReserved(p);
  752                 set_page_count(p, 0);
  753         }
  754 
  755         page_zone(page)->managed_pages += 1 << order;
  756         set_page_refcounted(page);
  757         __free_pages(page, order);
  758 }
  759 
  760 #ifdef CONFIG_CMA
  761 /* Free whole pageblock and set it's migration type to MIGRATE_CMA. */
  762 void __init init_cma_reserved_pageblock(struct page *page)
  763 {
  764         unsigned i = pageblock_nr_pages;
  765         struct page *p = page;
  766 
  767         do {
  768                 __ClearPageReserved(p);
  769                 set_page_count(p, 0);
  770         } while (++p, --i);
  771 
  772         set_page_refcounted(page);
  773         set_pageblock_migratetype(page, MIGRATE_CMA);
  774         __free_pages(page, pageblock_order);
  775         totalram_pages += pageblock_nr_pages;
  776 }
  777 #endif
  778 
  779 /*
  780  * The order of subdivision here is critical for the IO subsystem.
  781  * Please do not alter this order without good reasons and regression
  782  * testing. Specifically, as large blocks of memory are subdivided,
  783  * the order in which smaller blocks are delivered depends on the order
  784  * they're subdivided in this function. This is the primary factor
  785  * influencing the order in which pages are delivered to the IO
  786  * subsystem according to empirical testing, and this is also justified
  787  * by considering the behavior of a buddy system containing a single
  788  * large block of memory acted on by a series of small allocations.
  789  * This behavior is a critical factor in sglist merging's success.
  790  *
  791  * -- nyc
  792  */
  793 static inline void expand(struct zone *zone, struct page *page,
  794         int low, int high, struct free_area *area,
  795         int migratetype)
  796 {
  797         unsigned long size = 1 << high;
  798 
  799         while (high > low) {
  800                 area--;
  801                 high--;
  802                 size >>= 1;
  803                 VM_BUG_ON(bad_range(zone, &page[size]));
  804 
  805 #ifdef CONFIG_DEBUG_PAGEALLOC
  806                 if (high < debug_guardpage_minorder()) {
  807                         /*
  808                          * Mark as guard pages (or page), that will allow to
  809                          * merge back to allocator when buddy will be freed.
  810                          * Corresponding page table entries will not be touched,
  811                          * pages will stay not present in virtual address space
  812                          */
  813                         INIT_LIST_HEAD(&page[size].lru);
  814                         set_page_guard_flag(&page[size]);
  815                         set_page_private(&page[size], high);
  816                         /* Guard pages are not available for any usage */
  817                         __mod_zone_freepage_state(zone, -(1 << high),
  818                                                   migratetype);
  819                         continue;
  820                 }
  821 #endif
  822                 list_add(&page[size].lru, &area->free_list[migratetype]);
  823                 area->nr_free++;
  824                 set_page_order(&page[size], high);
  825         }
  826 }
  827 
  828 /*
  829  * This page is about to be returned from the page allocator
  830  */
  831 static inline int check_new_page(struct page *page)
  832 {
  833         if (unlikely(page_mapcount(page) |
  834                 (page->mapping != NULL)  |
  835                 (atomic_read(&page->_count) != 0)  |
  836                 (page->flags & PAGE_FLAGS_CHECK_AT_PREP) |
  837                 (mem_cgroup_bad_page_check(page)))) {
  838                 bad_page(page);
  839                 return 1;
  840         }
  841         return 0;
  842 }
  843 
  844 static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
  845 {
  846         int i;
  847 
  848         for (i = 0; i < (1 << order); i++) {
  849                 struct page *p = page + i;
  850                 if (unlikely(check_new_page(p)))
  851                         return 1;
  852         }
  853 
  854         set_page_private(page, 0);
  855         set_page_refcounted(page);
  856 
  857         arch_alloc_page(page, order);
  858         kernel_map_pages(page, 1 << order, 1);
  859 
  860         if (gfp_flags & __GFP_ZERO)
  861                 prep_zero_page(page, order, gfp_flags);
  862 
  863         if (order && (gfp_flags & __GFP_COMP))
  864                 prep_compound_page(page, order);
  865 
  866         return 0;
  867 }
  868 
  869 /*
  870  * Go through the free lists for the given migratetype and remove
  871  * the smallest available page from the freelists
  872  */
  873 static inline
  874 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
  875                                                 int migratetype)
  876 {
  877         unsigned int current_order;
  878         struct free_area * area;
  879         struct page *page;
  880 
  881         /* Find a page of the appropriate size in the preferred list */
  882         for (current_order = order; current_order < MAX_ORDER; ++current_order) {
  883                 area = &(zone->free_area[current_order]);
  884                 if (list_empty(&area->free_list[migratetype]))
  885                         continue;
  886 
  887                 page = list_entry(area->free_list[migratetype].next,
  888                                                         struct page, lru);
  889                 list_del(&page->lru);
  890                 rmv_page_order(page);
  891                 area->nr_free--;
  892                 expand(zone, page, order, current_order, area, migratetype);
  893                 return page;
  894         }
  895 
  896         return NULL;
  897 }
  898 
  899 
  900 /*
  901  * This array describes the order lists are fallen back to when
  902  * the free lists for the desirable migrate type are depleted
  903  */
  904 static int fallbacks[MIGRATE_TYPES][4] = {
  905         [MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,     MIGRATE_RESERVE },
  906         [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,     MIGRATE_RESERVE },
  907 #ifdef CONFIG_CMA
  908         [MIGRATE_MOVABLE]     = { MIGRATE_CMA,         MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
  909         [MIGRATE_CMA]         = { MIGRATE_RESERVE }, /* Never used */
  910 #else
  911         [MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE,   MIGRATE_RESERVE },
  912 #endif
  913         [MIGRATE_RESERVE]     = { MIGRATE_RESERVE }, /* Never used */
  914         [MIGRATE_ISOLATE]     = { MIGRATE_RESERVE }, /* Never used */
  915 };
  916 
  917 /*
  918  * Move the free pages in a range to the free lists of the requested type.
  919  * Note that start_page and end_pages are not aligned on a pageblock
  920  * boundary. If alignment is required, use move_freepages_block()
  921  */
  922 int move_freepages(struct zone *zone,
  923                           struct page *start_page, struct page *end_page,
  924                           int migratetype)
  925 {
  926         struct page *page;
  927         unsigned long order;
  928         int pages_moved = 0;
  929 
  930 #ifndef CONFIG_HOLES_IN_ZONE
  931         /*
  932          * page_zone is not safe to call in this context when
  933          * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
  934          * anyway as we check zone boundaries in move_freepages_block().
  935          * Remove at a later date when no bug reports exist related to
  936          * grouping pages by mobility
  937          */
  938         BUG_ON(page_zone(start_page) != page_zone(end_page));
  939 #endif
  940 
  941         for (page = start_page; page <= end_page;) {
  942                 /* Make sure we are not inadvertently changing nodes */
  943                 VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));
  944 
  945                 if (!pfn_valid_within(page_to_pfn(page))) {
  946                         page++;
  947                         continue;
  948                 }
  949 
  950                 if (!PageBuddy(page)) {
  951                         page++;
  952                         continue;
  953                 }
  954 
  955                 order = page_order(page);
  956                 list_move(&page->lru,
  957                           &zone->free_area[order].free_list[migratetype]);
  958                 set_freepage_migratetype(page, migratetype);
  959                 page += 1 << order;
  960                 pages_moved += 1 << order;
  961         }
  962 
  963         return pages_moved;
  964 }
  965 
  966 int move_freepages_block(struct zone *zone, struct page *page,
  967                                 int migratetype)
  968 {
  969         unsigned long start_pfn, end_pfn;
  970         struct page *start_page, *end_page;
  971 
  972         start_pfn = page_to_pfn(page);
  973         start_pfn = start_pfn & ~(pageblock_nr_pages-1);
  974         start_page = pfn_to_page(start_pfn);
  975         end_page = start_page + pageblock_nr_pages - 1;
  976         end_pfn = start_pfn + pageblock_nr_pages - 1;
  977 
  978         /* Do not cross zone boundaries */
  979         if (start_pfn < zone->zone_start_pfn)
  980                 start_page = page;
  981         if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages)
  982                 return 0;
  983 
  984         return move_freepages(zone, start_page, end_page, migratetype);
  985 }
  986 
  987 static void change_pageblock_range(struct page *pageblock_page,
  988                                         int start_order, int migratetype)
  989 {
  990         int nr_pageblocks = 1 << (start_order - pageblock_order);
  991 
  992         while (nr_pageblocks--) {
  993                 set_pageblock_migratetype(pageblock_page, migratetype);
  994                 pageblock_page += pageblock_nr_pages;
  995         }
  996 }
  997 
  998 /* Remove an element from the buddy allocator from the fallback list */
  999 static inline struct page *
 1000 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
 1001 {
 1002         struct free_area * area;
 1003         int current_order;
 1004         struct page *page;
 1005         int migratetype, i;
 1006 
 1007         /* Find the largest possible block of pages in the other list */
 1008         for (current_order = MAX_ORDER-1; current_order >= order;
 1009                                                 --current_order) {
 1010                 for (i = 0;; i++) {
 1011                         migratetype = fallbacks[start_migratetype][i];
 1012 
 1013                         /* MIGRATE_RESERVE handled later if necessary */
 1014                         if (migratetype == MIGRATE_RESERVE)
 1015                                 break;
 1016 
 1017                         area = &(zone->free_area[current_order]);
 1018                         if (list_empty(&area->free_list[migratetype]))
 1019                                 continue;
 1020 
 1021                         page = list_entry(area->free_list[migratetype].next,
 1022                                         struct page, lru);
 1023                         area->nr_free--;
 1024 
 1025                         /*
 1026                          * If breaking a large block of pages, move all free
 1027                          * pages to the preferred allocation list. If falling
 1028                          * back for a reclaimable kernel allocation, be more
 1029                          * aggressive about taking ownership of free pages
 1030                          *
 1031                          * On the other hand, never change migration
 1032                          * type of MIGRATE_CMA pageblocks nor move CMA
 1033                          * pages on different free lists. We don't
 1034                          * want unmovable pages to be allocated from
 1035                          * MIGRATE_CMA areas.
 1036                          */
 1037                         if (!is_migrate_cma(migratetype) &&
 1038                             (unlikely(current_order >= pageblock_order / 2) ||
 1039                              start_migratetype == MIGRATE_RECLAIMABLE ||
 1040                              page_group_by_mobility_disabled)) {
 1041                                 int pages;
 1042                                 pages = move_freepages_block(zone, page,
 1043                                                                 start_migratetype);
 1044 
 1045                                 /* Claim the whole block if over half of it is free */
 1046                                 if (pages >= (1 << (pageblock_order-1)) ||
 1047                                                 page_group_by_mobility_disabled)
 1048                                         set_pageblock_migratetype(page,
 1049                                                                 start_migratetype);
 1050 
 1051                                 migratetype = start_migratetype;
 1052                         }
 1053 
 1054                         /* Remove the page from the freelists */
 1055                         list_del(&page->lru);
 1056                         rmv_page_order(page);
 1057 
 1058                         /* Take ownership for orders >= pageblock_order */
 1059                         if (current_order >= pageblock_order &&
 1060                             !is_migrate_cma(migratetype))
 1061                                 change_pageblock_range(page, current_order,
 1062                                                         start_migratetype);
 1063 
 1064                         expand(zone, page, order, current_order, area,
 1065                                is_migrate_cma(migratetype)
 1066                              ? migratetype : start_migratetype);
 1067 
 1068                         trace_mm_page_alloc_extfrag(page, order, current_order,
 1069                                 start_migratetype, migratetype);
 1070 
 1071                         return page;
 1072                 }
 1073         }
 1074 
 1075         return NULL;
 1076 }
 1077 
 1078 /*
 1079  * Do the hard work of removing an element from the buddy allocator.
 1080  * Call me with the zone->lock already held.
 1081  */
 1082 static struct page *__rmqueue(struct zone *zone, unsigned int order,
 1083                                                 int migratetype)
 1084 {
 1085         struct page *page;
 1086 
 1087 retry_reserve:
 1088         page = __rmqueue_smallest(zone, order, migratetype);
 1089 
 1090         if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
 1091                 page = __rmqueue_fallback(zone, order, migratetype);
 1092 
 1093                 /*
 1094                  * Use MIGRATE_RESERVE rather than fail an allocation. goto
 1095                  * is used because __rmqueue_smallest is an inline function
 1096                  * and we want just one call site
 1097                  */
 1098                 if (!page) {
 1099                         migratetype = MIGRATE_RESERVE;
 1100                         goto retry_reserve;
 1101                 }
 1102         }
 1103 
 1104         trace_mm_page_alloc_zone_locked(page, order, migratetype);
 1105         return page;
 1106 }
 1107 
 1108 /*
 1109  * Obtain a specified number of elements from the buddy allocator, all under
 1110  * a single hold of the lock, for efficiency.  Add them to the supplied list.
 1111  * Returns the number of new pages which were placed at *list.
 1112  */
 1113 static int rmqueue_bulk(struct zone *zone, unsigned int order,
 1114                         unsigned long count, struct list_head *list,
 1115                         int migratetype, int cold)
 1116 {
 1117         int mt = migratetype, i;
 1118 
 1119         spin_lock(&zone->lock);
 1120         for (i = 0; i < count; ++i) {
 1121                 struct page *page = __rmqueue(zone, order, migratetype);
 1122                 if (unlikely(page == NULL))
 1123                         break;
 1124 
 1125                 /*
 1126                  * Split buddy pages returned by expand() are received here
 1127                  * in physical page order. The page is added to the callers and
 1128                  * list and the list head then moves forward. From the callers
 1129                  * perspective, the linked list is ordered by page number in
 1130                  * some conditions. This is useful for IO devices that can
 1131                  * merge IO requests if the physical pages are ordered
 1132                  * properly.
 1133                  */
 1134                 if (likely(cold == 0))
 1135                         list_add(&page->lru, list);
 1136                 else
 1137                         list_add_tail(&page->lru, list);
 1138                 if (IS_ENABLED(CONFIG_CMA)) {
 1139                         mt = get_pageblock_migratetype(page);
 1140                         if (!is_migrate_cma(mt) && mt != MIGRATE_ISOLATE)
 1141                                 mt = migratetype;
 1142                 }
 1143                 set_freepage_migratetype(page, mt);
 1144                 list = &page->lru;
 1145                 if (is_migrate_cma(mt))
 1146                         __mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
 1147                                               -(1 << order));
 1148         }
 1149         __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
 1150         spin_unlock(&zone->lock);
 1151         return i;
 1152 }
 1153 
 1154 #ifdef CONFIG_NUMA
 1155 /*
 1156  * Called from the vmstat counter updater to drain pagesets of this
 1157  * currently executing processor on remote nodes after they have
 1158  * expired.
 1159  *
 1160  * Note that this function must be called with the thread pinned to
 1161  * a single processor.
 1162  */
 1163 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
 1164 {
 1165         unsigned long flags;
 1166         int to_drain;
 1167 
 1168         local_irq_save(flags);
 1169         if (pcp->count >= pcp->batch)
 1170                 to_drain = pcp->batch;
 1171         else
 1172                 to_drain = pcp->count;
 1173         if (to_drain > 0) {
 1174                 free_pcppages_bulk(zone, to_drain, pcp);
 1175                 pcp->count -= to_drain;
 1176         }
 1177         local_irq_restore(flags);
 1178 }
 1179 #endif
 1180 
 1181 /*
 1182  * Drain pages of the indicated processor.
 1183  *
 1184  * The processor must either be the current processor and the
 1185  * thread pinned to the current processor or a processor that
 1186  * is not online.
 1187  */
 1188 static void drain_pages(unsigned int cpu)
 1189 {
 1190         unsigned long flags;
 1191         struct zone *zone;
 1192 
 1193         for_each_populated_zone(zone) {
 1194                 struct per_cpu_pageset *pset;
 1195                 struct per_cpu_pages *pcp;
 1196 
 1197                 local_irq_save(flags);
 1198                 pset = per_cpu_ptr(zone->pageset, cpu);
 1199 
 1200                 pcp = &pset->pcp;
 1201                 if (pcp->count) {
 1202                         free_pcppages_bulk(zone, pcp->count, pcp);
 1203                         pcp->count = 0;
 1204                 }
 1205                 local_irq_restore(flags);
 1206         }
 1207 }
 1208 
 1209 /*
 1210  * Spill all of this CPU's per-cpu pages back into the buddy allocator.
 1211  */
 1212 void drain_local_pages(void *arg)
 1213 {
 1214         drain_pages(smp_processor_id());
 1215 }
 1216 
 1217 /*
 1218  * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
 1219  *
 1220  * Note that this code is protected against sending an IPI to an offline
 1221  * CPU but does not guarantee sending an IPI to newly hotplugged CPUs:
 1222  * on_each_cpu_mask() blocks hotplug and won't talk to offlined CPUs but
 1223  * nothing keeps CPUs from showing up after we populated the cpumask and
 1224  * before the call to on_each_cpu_mask().
 1225  */
 1226 void drain_all_pages(void)
 1227 {
 1228         int cpu;
 1229         struct per_cpu_pageset *pcp;
 1230         struct zone *zone;
 1231 
 1232         /*
 1233          * Allocate in the BSS so we wont require allocation in
 1234          * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
 1235          */
 1236         static cpumask_t cpus_with_pcps;
 1237 
 1238         /*
 1239          * We don't care about racing with CPU hotplug event
 1240          * as offline notification will cause the notified
 1241          * cpu to drain that CPU pcps and on_each_cpu_mask
 1242          * disables preemption as part of its processing
 1243          */
 1244         for_each_online_cpu(cpu) {
 1245                 bool has_pcps = false;
 1246                 for_each_populated_zone(zone) {
 1247                         pcp = per_cpu_ptr(zone->pageset, cpu);
 1248                         if (pcp->pcp.count) {
 1249                                 has_pcps = true;
 1250                                 break;
 1251                         }
 1252                 }
 1253                 if (has_pcps)
 1254                         cpumask_set_cpu(cpu, &cpus_with_pcps);
 1255                 else
 1256                         cpumask_clear_cpu(cpu, &cpus_with_pcps);
 1257         }
 1258         on_each_cpu_mask(&cpus_with_pcps, drain_local_pages, NULL, 1);
 1259 }
 1260 
 1261 #ifdef CONFIG_HIBERNATION
 1262 
 1263 void mark_free_pages(struct zone *zone)
 1264 {
 1265         unsigned long pfn, max_zone_pfn;
 1266         unsigned long flags;
 1267         int order, t;
 1268         struct list_head *curr;
 1269 
 1270         if (!zone->spanned_pages)
 1271                 return;
 1272 
 1273         spin_lock_irqsave(&zone->lock, flags);
 1274 
 1275         max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
 1276         for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
 1277                 if (pfn_valid(pfn)) {
 1278                         struct page *page = pfn_to_page(pfn);
 1279 
 1280                         if (!swsusp_page_is_forbidden(page))
 1281                                 swsusp_unset_page_free(page);
 1282                 }
 1283 
 1284         for_each_migratetype_order(order, t) {
 1285                 list_for_each(curr, &zone->free_area[order].free_list[t]) {
 1286                         unsigned long i;
 1287 
 1288                         pfn = page_to_pfn(list_entry(curr, struct page, lru));
 1289                         for (i = 0; i < (1UL << order); i++)
 1290                                 swsusp_set_page_free(pfn_to_page(pfn + i));
 1291                 }
 1292         }
 1293         spin_unlock_irqrestore(&zone->lock, flags);
 1294 }
 1295 #endif /* CONFIG_PM */
 1296 
 1297 /*
 1298  * Free a 0-order page
 1299  * cold == 1 ? free a cold page : free a hot page
 1300  */
 1301 void free_hot_cold_page(struct page *page, int cold)
 1302 {
 1303         struct zone *zone = page_zone(page);
 1304         struct per_cpu_pages *pcp;
 1305         unsigned long flags;
 1306         int migratetype;
 1307 
 1308         if (!free_pages_prepare(page, 0))
 1309                 return;
 1310 
 1311         migratetype = get_pageblock_migratetype(page);
 1312         set_freepage_migratetype(page, migratetype);
 1313         local_irq_save(flags);
 1314         __count_vm_event(PGFREE);
 1315 
 1316         /*
 1317          * We only track unmovable, reclaimable and movable on pcp lists.
 1318          * Free ISOLATE pages back to the allocator because they are being
 1319          * offlined but treat RESERVE as movable pages so we can get those
 1320          * areas back if necessary. Otherwise, we may have to free
 1321          * excessively into the page allocator
 1322          */
 1323         if (migratetype >= MIGRATE_PCPTYPES) {
 1324                 if (unlikely(migratetype == MIGRATE_ISOLATE)) {
 1325                         free_one_page(zone, page, 0, migratetype);
 1326                         goto out;
 1327                 }
 1328                 migratetype = MIGRATE_MOVABLE;
 1329         }
 1330 
 1331         pcp = &this_cpu_ptr(zone->pageset)->pcp;
 1332         if (cold)
 1333                 list_add_tail(&page->lru, &pcp->lists[migratetype]);
 1334         else
 1335                 list_add(&page->lru, &pcp->lists[migratetype]);
 1336         pcp->count++;
 1337         if (pcp->count >= pcp->high) {
 1338                 free_pcppages_bulk(zone, pcp->batch, pcp);
 1339                 pcp->count -= pcp->batch;
 1340         }
 1341 
 1342 out:
 1343         local_irq_restore(flags);
 1344 }
 1345 
 1346 /*
 1347  * Free a list of 0-order pages
 1348  */
 1349 void free_hot_cold_page_list(struct list_head *list, int cold)
 1350 {
 1351         struct page *page, *next;
 1352 
 1353         list_for_each_entry_safe(page, next, list, lru) {
 1354                 trace_mm_page_free_batched(page, cold);
 1355                 free_hot_cold_page(page, cold);
 1356         }
 1357 }
 1358 
 1359 /*
 1360  * split_page takes a non-compound higher-order page, and splits it into
 1361  * n (1<<order) sub-pages: page[0..n]
 1362  * Each sub-page must be freed individually.
 1363  *
 1364  * Note: this is probably too low level an operation for use in drivers.
 1365  * Please consult with lkml before using this in your driver.
 1366  */
 1367 void split_page(struct page *page, unsigned int order)
 1368 {
 1369         int i;
 1370 
 1371         VM_BUG_ON(PageCompound(page));
 1372         VM_BUG_ON(!page_count(page));
 1373 
 1374 #ifdef CONFIG_KMEMCHECK
 1375         /*
 1376          * Split shadow pages too, because free(page[0]) would
 1377          * otherwise free the whole shadow.
 1378          */
 1379         if (kmemcheck_page_is_tracked(page))
 1380                 split_page(virt_to_page(page[0].shadow), order);
 1381 #endif
 1382 
 1383         for (i = 1; i < (1 << order); i++)
 1384                 set_page_refcounted(page + i);
 1385 }
 1386 
 1387 static int __isolate_free_page(struct page *page, unsigned int order)
 1388 {
 1389         unsigned long watermark;
 1390         struct zone *zone;
 1391         int mt;
 1392 
 1393         BUG_ON(!PageBuddy(page));
 1394 
 1395         zone = page_zone(page);
 1396         mt = get_pageblock_migratetype(page);
 1397 
 1398         if (mt != MIGRATE_ISOLATE) {
 1399                 /* Obey watermarks as if the page was being allocated */
 1400                 watermark = low_wmark_pages(zone) + (1 << order);
 1401                 if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
 1402                         return 0;
 1403 
 1404                 __mod_zone_freepage_state(zone, -(1UL << order), mt);
 1405         }
 1406 
 1407         /* Remove page from free list */
 1408         list_del(&page->lru);
 1409         zone->free_area[order].nr_free--;
 1410         rmv_page_order(page);
 1411 
 1412         /* Set the pageblock if the isolated page is at least a pageblock */
 1413         if (order >= pageblock_order - 1) {
 1414                 struct page *endpage = page + (1 << order) - 1;
 1415                 for (; page < endpage; page += pageblock_nr_pages) {
 1416                         int mt = get_pageblock_migratetype(page);
 1417                         if (mt != MIGRATE_ISOLATE && !is_migrate_cma(mt))
 1418                                 set_pageblock_migratetype(page,
 1419                                                           MIGRATE_MOVABLE);
 1420                 }
 1421         }
 1422 
 1423         return 1UL << order;
 1424 }
 1425 
 1426 /*
 1427  * Similar to split_page except the page is already free. As this is only
 1428  * being used for migration, the migratetype of the block also changes.
 1429  * As this is called with interrupts disabled, the caller is responsible
 1430  * for calling arch_alloc_page() and kernel_map_page() after interrupts
 1431  * are enabled.
 1432  *
 1433  * Note: this is probably too low level an operation for use in drivers.
 1434  * Please consult with lkml before using this in your driver.
 1435  */
 1436 int split_free_page(struct page *page)
 1437 {
 1438         unsigned int order;
 1439         int nr_pages;
 1440 
 1441         order = page_order(page);
 1442 
 1443         nr_pages = __isolate_free_page(page, order);
 1444         if (!nr_pages)
 1445                 return 0;
 1446 
 1447         /* Split into individual pages */
 1448         set_page_refcounted(page);
 1449         split_page(page, order);
 1450         return nr_pages;
 1451 }
 1452 
 1453 /*
 1454  * Really, prep_compound_page() should be called from __rmqueue_bulk().  But
 1455  * we cheat by calling it from here, in the order > 0 path.  Saves a branch
 1456  * or two.
 1457  */
 1458 static inline
 1459 struct page *buffered_rmqueue(struct zone *preferred_zone,
 1460                         struct zone *zone, int order, gfp_t gfp_flags,
 1461                         int migratetype)
 1462 {
 1463         unsigned long flags;
 1464         struct page *page;
 1465         int cold = !!(gfp_flags & __GFP_COLD);
 1466 
 1467 again:
 1468         if (likely(order == 0)) {
 1469                 struct per_cpu_pages *pcp;
 1470                 struct list_head *list;
 1471 
 1472                 local_irq_save(flags);
 1473                 pcp = &this_cpu_ptr(zone->pageset)->pcp;
 1474                 list = &pcp->lists[migratetype];
 1475                 if (list_empty(list)) {
 1476                         pcp->count += rmqueue_bulk(zone, 0,
 1477                                         pcp->batch, list,
 1478                                         migratetype, cold);
 1479                         if (unlikely(list_empty(list)))
 1480                                 goto failed;
 1481                 }
 1482 
 1483                 if (cold)
 1484                         page = list_entry(list->prev, struct page, lru);
 1485                 else
 1486                         page = list_entry(list->next, struct page, lru);
 1487 
 1488                 list_del(&page->lru);
 1489                 pcp->count--;
 1490         } else {
 1491                 if (unlikely(gfp_flags & __GFP_NOFAIL)) {
 1492                         /*
 1493                          * __GFP_NOFAIL is not to be used in new code.
 1494                          *
 1495                          * All __GFP_NOFAIL callers should be fixed so that they
 1496                          * properly detect and handle allocation failures.
 1497                          *
 1498                          * We most definitely don't want callers attempting to
 1499                          * allocate greater than order-1 page units with
 1500                          * __GFP_NOFAIL.
 1501                          */
 1502                         WARN_ON_ONCE(order > 1);
 1503                 }
 1504                 spin_lock_irqsave(&zone->lock, flags);
 1505                 page = __rmqueue(zone, order, migratetype);
 1506                 spin_unlock(&zone->lock);
 1507                 if (!page)
 1508                         goto failed;
 1509                 __mod_zone_freepage_state(zone, -(1 << order),
 1510                                           get_pageblock_migratetype(page));
 1511         }
 1512 
 1513         __count_zone_vm_events(PGALLOC, zone, 1 << order);
 1514         zone_statistics(preferred_zone, zone, gfp_flags);
 1515         local_irq_restore(flags);
 1516 
 1517         VM_BUG_ON(bad_range(zone, page));
 1518         if (prep_new_page(page, order, gfp_flags))
 1519                 goto again;
 1520         return page;
 1521 
 1522 failed:
 1523         local_irq_restore(flags);
 1524         return NULL;
 1525 }
 1526 
 1527 #ifdef CONFIG_FAIL_PAGE_ALLOC
 1528 
 1529 static struct {
 1530         struct fault_attr attr;
 1531 
 1532         u32 ignore_gfp_highmem;
 1533         u32 ignore_gfp_wait;
 1534         u32 min_order;
 1535 } fail_page_alloc = {
 1536         .attr = FAULT_ATTR_INITIALIZER,
 1537         .ignore_gfp_wait = 1,
 1538         .ignore_gfp_highmem = 1,
 1539         .min_order = 1,
 1540 };
 1541 
 1542 static int __init setup_fail_page_alloc(char *str)
 1543 {
 1544         return setup_fault_attr(&fail_page_alloc.attr, str);
 1545 }
 1546 __setup("fail_page_alloc=", setup_fail_page_alloc);
 1547 
 1548 static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
 1549 {
 1550         if (order < fail_page_alloc.min_order)
 1551                 return false;
 1552         if (gfp_mask & __GFP_NOFAIL)
 1553                 return false;
 1554         if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
 1555                 return false;
 1556         if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
 1557                 return false;
 1558 
 1559         return should_fail(&fail_page_alloc.attr, 1 << order);
 1560 }
 1561 
 1562 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
 1563 
 1564 static int __init fail_page_alloc_debugfs(void)
 1565 {
 1566         umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
 1567         struct dentry *dir;
 1568 
 1569         dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
 1570                                         &fail_page_alloc.attr);
 1571         if (IS_ERR(dir))
 1572                 return PTR_ERR(dir);
 1573 
 1574         if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
 1575                                 &fail_page_alloc.ignore_gfp_wait))
 1576                 goto fail;
 1577         if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir,
 1578                                 &fail_page_alloc.ignore_gfp_highmem))
 1579                 goto fail;
 1580         if (!debugfs_create_u32("min-order", mode, dir,
 1581                                 &fail_page_alloc.min_order))
 1582                 goto fail;
 1583 
 1584         return 0;
 1585 fail:
 1586         debugfs_remove_recursive(dir);
 1587 
 1588         return -ENOMEM;
 1589 }
 1590 
 1591 late_initcall(fail_page_alloc_debugfs);
 1592 
 1593 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
 1594 
 1595 #else /* CONFIG_FAIL_PAGE_ALLOC */
 1596 
 1597 static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
 1598 {
 1599         return false;
 1600 }
 1601 
 1602 #endif /* CONFIG_FAIL_PAGE_ALLOC */
 1603 
 1604 /*
 1605  * Return true if free pages are above 'mark'. This takes into account the order
 1606  * of the allocation.
 1607  */
 1608 static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
 1609                       int classzone_idx, int alloc_flags, long free_pages)
 1610 {
 1611         /* free_pages my go negative - that's OK */
 1612         long min = mark;
 1613         long lowmem_reserve = z->lowmem_reserve[classzone_idx];
 1614         int o;
 1615 
 1616         free_pages -= (1 << order) - 1;
 1617         if (alloc_flags & ALLOC_HIGH)
 1618                 min -= min / 2;
 1619         if (alloc_flags & ALLOC_HARDER)
 1620                 min -= min / 4;
 1621 #ifdef CONFIG_CMA
 1622         /* If allocation can't use CMA areas don't use free CMA pages */
 1623         if (!(alloc_flags & ALLOC_CMA))
 1624                 free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES);
 1625 #endif
 1626         if (free_pages <= min + lowmem_reserve)
 1627                 return false;
 1628         for (o = 0; o < order; o++) {
 1629                 /* At the next order, this order's pages become unavailable */
 1630                 free_pages -= z->free_area[o].nr_free << o;
 1631 
 1632                 /* Require fewer higher order pages to be free */
 1633                 min >>= 1;
 1634 
 1635                 if (free_pages <= min)
 1636                         return false;
 1637         }
 1638         return true;
 1639 }
 1640 
 1641 bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
 1642                       int classzone_idx, int alloc_flags)
 1643 {
 1644         return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
 1645                                         zone_page_state(z, NR_FREE_PAGES));
 1646 }
 1647 
 1648 bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
 1649                       int classzone_idx, int alloc_flags)
 1650 {
 1651         long free_pages = zone_page_state(z, NR_FREE_PAGES);
 1652 
 1653         if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
 1654                 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
 1655 
 1656         return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
 1657                                                                 free_pages);
 1658 }
 1659 
 1660 #ifdef CONFIG_NUMA
 1661 /*
 1662  * zlc_setup - Setup for "zonelist cache".  Uses cached zone data to
 1663  * skip over zones that are not allowed by the cpuset, or that have
 1664  * been recently (in last second) found to be nearly full.  See further
 1665  * comments in mmzone.h.  Reduces cache footprint of zonelist scans
 1666  * that have to skip over a lot of full or unallowed zones.
 1667  *
 1668  * If the zonelist cache is present in the passed in zonelist, then
 1669  * returns a pointer to the allowed node mask (either the current
 1670  * tasks mems_allowed, or node_states[N_MEMORY].)
 1671  *
 1672  * If the zonelist cache is not available for this zonelist, does
 1673  * nothing and returns NULL.
 1674  *
 1675  * If the fullzones BITMAP in the zonelist cache is stale (more than
 1676  * a second since last zap'd) then we zap it out (clear its bits.)
 1677  *
 1678  * We hold off even calling zlc_setup, until after we've checked the
 1679  * first zone in the zonelist, on the theory that most allocations will
 1680  * be satisfied from that first zone, so best to examine that zone as
 1681  * quickly as we can.
 1682  */
 1683 static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
 1684 {
 1685         struct zonelist_cache *zlc;     /* cached zonelist speedup info */
 1686         nodemask_t *allowednodes;       /* zonelist_cache approximation */
 1687 
 1688         zlc = zonelist->zlcache_ptr;
 1689         if (!zlc)
 1690                 return NULL;
 1691 
 1692         if (time_after(jiffies, zlc->last_full_zap + HZ)) {
 1693                 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
 1694                 zlc->last_full_zap = jiffies;
 1695         }
 1696 
 1697         allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
 1698                                         &cpuset_current_mems_allowed :
 1699                                         &node_states[N_MEMORY];
 1700         return allowednodes;
 1701 }
 1702 
 1703 /*
 1704  * Given 'z' scanning a zonelist, run a couple of quick checks to see
 1705  * if it is worth looking at further for free memory:
 1706  *  1) Check that the zone isn't thought to be full (doesn't have its
 1707  *     bit set in the zonelist_cache fullzones BITMAP).
 1708  *  2) Check that the zones node (obtained from the zonelist_cache
 1709  *     z_to_n[] mapping) is allowed in the passed in allowednodes mask.
 1710  * Return true (non-zero) if zone is worth looking at further, or
 1711  * else return false (zero) if it is not.
 1712  *
 1713  * This check -ignores- the distinction between various watermarks,
 1714  * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ...  If a zone is
 1715  * found to be full for any variation of these watermarks, it will
 1716  * be considered full for up to one second by all requests, unless
 1717  * we are so low on memory on all allowed nodes that we are forced
 1718  * into the second scan of the zonelist.
 1719  *
 1720  * In the second scan we ignore this zonelist cache and exactly
 1721  * apply the watermarks to all zones, even it is slower to do so.
 1722  * We are low on memory in the second scan, and should leave no stone
 1723  * unturned looking for a free page.
 1724  */
 1725 static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
 1726                                                 nodemask_t *allowednodes)
 1727 {
 1728         struct zonelist_cache *zlc;     /* cached zonelist speedup info */
 1729         int i;                          /* index of *z in zonelist zones */
 1730         int n;                          /* node that zone *z is on */
 1731 
 1732         zlc = zonelist->zlcache_ptr;
 1733         if (!zlc)
 1734                 return 1;
 1735 
 1736         i = z - zonelist->_zonerefs;
 1737         n = zlc->z_to_n[i];
 1738 
 1739         /* This zone is worth trying if it is allowed but not full */
 1740         return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
 1741 }
 1742 
 1743 /*
 1744  * Given 'z' scanning a zonelist, set the corresponding bit in
 1745  * zlc->fullzones, so that subsequent attempts to allocate a page
 1746  * from that zone don't waste time re-examining it.
 1747  */
 1748 static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
 1749 {
 1750         struct zonelist_cache *zlc;     /* cached zonelist speedup info */
 1751         int i;                          /* index of *z in zonelist zones */
 1752 
 1753         zlc = zonelist->zlcache_ptr;
 1754         if (!zlc)
 1755                 return;
 1756 
 1757         i = z - zonelist->_zonerefs;
 1758 
 1759         set_bit(i, zlc->fullzones);
 1760 }
 1761 
 1762 /*
 1763  * clear all zones full, called after direct reclaim makes progress so that
 1764  * a zone that was recently full is not skipped over for up to a second
 1765  */
 1766 static void zlc_clear_zones_full(struct zonelist *zonelist)
 1767 {
 1768         struct zonelist_cache *zlc;     /* cached zonelist speedup info */
 1769 
 1770         zlc = zonelist->zlcache_ptr;
 1771         if (!zlc)
 1772                 return;
 1773 
 1774         bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
 1775 }
 1776 
 1777 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
 1778 {
 1779         return node_isset(local_zone->node, zone->zone_pgdat->reclaim_nodes);
 1780 }
 1781 
 1782 static void __paginginit init_zone_allows_reclaim(int nid)
 1783 {
 1784         int i;
 1785 
 1786         for_each_online_node(i)
 1787                 if (node_distance(nid, i) <= RECLAIM_DISTANCE)
 1788                         node_set(i, NODE_DATA(nid)->reclaim_nodes);
 1789                 else
 1790                         zone_reclaim_mode = 1;
 1791 }
 1792 
 1793 #else   /* CONFIG_NUMA */
 1794 
 1795 static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
 1796 {
 1797         return NULL;
 1798 }
 1799 
 1800 static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
 1801                                 nodemask_t *allowednodes)
 1802 {
 1803         return 1;
 1804 }
 1805 
 1806 static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
 1807 {
 1808 }
 1809 
 1810 static void zlc_clear_zones_full(struct zonelist *zonelist)
 1811 {
 1812 }
 1813 
 1814 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
 1815 {
 1816         return true;
 1817 }
 1818 
 1819 static inline void init_zone_allows_reclaim(int nid)
 1820 {
 1821 }
 1822 #endif  /* CONFIG_NUMA */
 1823 
 1824 /*
 1825  * get_page_from_freelist goes through the zonelist trying to allocate
 1826  * a page.
 1827  */
 1828 static struct page *
 1829 get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
 1830                 struct zonelist *zonelist, int high_zoneidx, int alloc_flags,
 1831                 struct zone *preferred_zone, int migratetype)
 1832 {
 1833         struct zoneref *z;
 1834         struct page *page = NULL;
 1835         int classzone_idx;
 1836         struct zone *zone;
 1837         nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
 1838         int zlc_active = 0;             /* set if using zonelist_cache */
 1839         int did_zlc_setup = 0;          /* just call zlc_setup() one time */
 1840 
 1841         classzone_idx = zone_idx(preferred_zone);
 1842 zonelist_scan:
 1843         /*
 1844          * Scan zonelist, looking for a zone with enough free.
 1845          * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
 1846          */
 1847         for_each_zone_zonelist_nodemask(zone, z, zonelist,
 1848                                                 high_zoneidx, nodemask) {
 1849                 if (IS_ENABLED(CONFIG_NUMA) && zlc_active &&
 1850                         !zlc_zone_worth_trying(zonelist, z, allowednodes))
 1851                                 continue;
 1852                 if ((alloc_flags & ALLOC_CPUSET) &&
 1853                         !cpuset_zone_allowed_softwall(zone, gfp_mask))
 1854                                 continue;
 1855                 /*
 1856                  * When allocating a page cache page for writing, we
 1857                  * want to get it from a zone that is within its dirty
 1858                  * limit, such that no single zone holds more than its
 1859                  * proportional share of globally allowed dirty pages.
 1860                  * The dirty limits take into account the zone's
 1861                  * lowmem reserves and high watermark so that kswapd
 1862                  * should be able to balance it without having to
 1863                  * write pages from its LRU list.
 1864                  *
 1865                  * This may look like it could increase pressure on
 1866                  * lower zones by failing allocations in higher zones
 1867                  * before they are full.  But the pages that do spill
 1868                  * over are limited as the lower zones are protected
 1869                  * by this very same mechanism.  It should not become
 1870                  * a practical burden to them.
 1871                  *
 1872                  * XXX: For now, allow allocations to potentially
 1873                  * exceed the per-zone dirty limit in the slowpath
 1874                  * (ALLOC_WMARK_LOW unset) before going into reclaim,
 1875                  * which is important when on a NUMA setup the allowed
 1876                  * zones are together not big enough to reach the
 1877                  * global limit.  The proper fix for these situations
 1878                  * will require awareness of zones in the
 1879                  * dirty-throttling and the flusher threads.
 1880                  */
 1881                 if ((alloc_flags & ALLOC_WMARK_LOW) &&
 1882                     (gfp_mask & __GFP_WRITE) && !zone_dirty_ok(zone))
 1883                         goto this_zone_full;
 1884 
 1885                 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
 1886                 if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
 1887                         unsigned long mark;
 1888                         int ret;
 1889 
 1890                         mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
 1891                         if (zone_watermark_ok(zone, order, mark,
 1892                                     classzone_idx, alloc_flags))
 1893                                 goto try_this_zone;
 1894 
 1895                         if (IS_ENABLED(CONFIG_NUMA) &&
 1896                                         !did_zlc_setup && nr_online_nodes > 1) {
 1897                                 /*
 1898                                  * we do zlc_setup if there are multiple nodes
 1899                                  * and before considering the first zone allowed
 1900                                  * by the cpuset.
 1901                                  */
 1902                                 allowednodes = zlc_setup(zonelist, alloc_flags);
 1903                                 zlc_active = 1;
 1904                                 did_zlc_setup = 1;
 1905                         }
 1906 
 1907                         if (zone_reclaim_mode == 0 ||
 1908                             !zone_allows_reclaim(preferred_zone, zone))
 1909                                 goto this_zone_full;
 1910 
 1911                         /*
 1912                          * As we may have just activated ZLC, check if the first
 1913                          * eligible zone has failed zone_reclaim recently.
 1914                          */
 1915                         if (IS_ENABLED(CONFIG_NUMA) && zlc_active &&
 1916                                 !zlc_zone_worth_trying(zonelist, z, allowednodes))
 1917                                 continue;
 1918 
 1919                         ret = zone_reclaim(zone, gfp_mask, order);
 1920                         switch (ret) {
 1921                         case ZONE_RECLAIM_NOSCAN:
 1922                                 /* did not scan */
 1923                                 continue;
 1924                         case ZONE_RECLAIM_FULL:
 1925                                 /* scanned but unreclaimable */
 1926                                 continue;
 1927                         default:
 1928                                 /* did we reclaim enough */
 1929                                 if (!zone_watermark_ok(zone, order, mark,
 1930                                                 classzone_idx, alloc_flags))
 1931                                         goto this_zone_full;
 1932                         }
 1933                 }
 1934 
 1935 try_this_zone:
 1936                 page = buffered_rmqueue(preferred_zone, zone, order,
 1937                                                 gfp_mask, migratetype);
 1938                 if (page)
 1939                         break;
 1940 this_zone_full:
 1941                 if (IS_ENABLED(CONFIG_NUMA))
 1942                         zlc_mark_zone_full(zonelist, z);
 1943         }
 1944 
 1945         if (unlikely(IS_ENABLED(CONFIG_NUMA) && page == NULL && zlc_active)) {
 1946                 /* Disable zlc cache for second zonelist scan */
 1947                 zlc_active = 0;
 1948                 goto zonelist_scan;
 1949         }
 1950 
 1951         if (page)
 1952                 /*
 1953                  * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was
 1954                  * necessary to allocate the page. The expectation is
 1955                  * that the caller is taking steps that will free more
 1956                  * memory. The caller should avoid the page being used
 1957                  * for !PFMEMALLOC purposes.
 1958                  */
 1959                 page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS);
 1960 
 1961         return page;
 1962 }
 1963 
 1964 /*
 1965  * Large machines with many possible nodes should not always dump per-node
 1966  * meminfo in irq context.
 1967  */
 1968 static inline bool should_suppress_show_mem(void)
 1969 {
 1970         bool ret = false;
 1971 
 1972 #if NODES_SHIFT > 8
 1973         ret = in_interrupt();
 1974 #endif
 1975         return ret;
 1976 }
 1977 
 1978 static DEFINE_RATELIMIT_STATE(nopage_rs,
 1979                 DEFAULT_RATELIMIT_INTERVAL,
 1980                 DEFAULT_RATELIMIT_BURST);
 1981 
 1982 void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...)
 1983 {
 1984         unsigned int filter = SHOW_MEM_FILTER_NODES;
 1985 
 1986         if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) ||
 1987             debug_guardpage_minorder() > 0)
 1988                 return;
 1989 
 1990         /*
 1991          * This documents exceptions given to allocations in certain
 1992          * contexts that are allowed to allocate outside current's set
 1993          * of allowed nodes.
 1994          */
 1995         if (!(gfp_mask & __GFP_NOMEMALLOC))
 1996                 if (test_thread_flag(TIF_MEMDIE) ||
 1997                     (current->flags & (PF_MEMALLOC | PF_EXITING)))
 1998                         filter &= ~SHOW_MEM_FILTER_NODES;
 1999         if (in_interrupt() || !(gfp_mask & __GFP_WAIT))
 2000                 filter &= ~SHOW_MEM_FILTER_NODES;
 2001 
 2002         if (fmt) {
 2003                 struct va_format vaf;
 2004                 va_list args;
 2005 
 2006                 va_start(args, fmt);
 2007 
 2008                 vaf.fmt = fmt;
 2009                 vaf.va = &args;
 2010 
 2011                 pr_warn("%pV", &vaf);
 2012 
 2013                 va_end(args);
 2014         }
 2015 
 2016         pr_warn("%s: page allocation failure: order:%d, mode:0x%x\n",
 2017                 current->comm, order, gfp_mask);
 2018 
 2019         dump_stack();
 2020         if (!should_suppress_show_mem())
 2021                 show_mem(filter);
 2022 }
 2023 
 2024 static inline int
 2025 should_alloc_retry(gfp_t gfp_mask, unsigned int order,
 2026                                 unsigned long did_some_progress,
 2027                                 unsigned long pages_reclaimed)
 2028 {
 2029         /* Do not loop if specifically requested */
 2030         if (gfp_mask & __GFP_NORETRY)
 2031                 return 0;
 2032 
 2033         /* Always retry if specifically requested */
 2034         if (gfp_mask & __GFP_NOFAIL)
 2035                 return 1;
 2036 
 2037         /*
 2038          * Suspend converts GFP_KERNEL to __GFP_WAIT which can prevent reclaim
 2039          * making forward progress without invoking OOM. Suspend also disables
 2040          * storage devices so kswapd will not help. Bail if we are suspending.
 2041          */
 2042         if (!did_some_progress && pm_suspended_storage())
 2043                 return 0;
 2044 
 2045         /*
 2046          * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER
 2047          * means __GFP_NOFAIL, but that may not be true in other
 2048          * implementations.
 2049          */
 2050         if (order <= PAGE_ALLOC_COSTLY_ORDER)
 2051                 return 1;
 2052 
 2053         /*
 2054          * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is
 2055          * specified, then we retry until we no longer reclaim any pages
 2056          * (above), or we've reclaimed an order of pages at least as
 2057          * large as the allocation's order. In both cases, if the
 2058          * allocation still fails, we stop retrying.
 2059          */
 2060         if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order))
 2061                 return 1;
 2062 
 2063         return 0;
 2064 }
 2065 
 2066 static inline struct page *
 2067 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
 2068         struct zonelist *zonelist, enum zone_type high_zoneidx,
 2069         nodemask_t *nodemask, struct zone *preferred_zone,
 2070         int migratetype)
 2071 {
 2072         struct page *page;
 2073 
 2074         /* Acquire the OOM killer lock for the zones in zonelist */
 2075         if (!try_set_zonelist_oom(zonelist, gfp_mask)) {
 2076                 schedule_timeout_uninterruptible(1);
 2077                 return NULL;
 2078         }
 2079 
 2080         /*
 2081          * Go through the zonelist yet one more time, keep very high watermark
 2082          * here, this is only to catch a parallel oom killing, we must fail if
 2083          * we're still under heavy pressure.
 2084          */
 2085         page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
 2086                 order, zonelist, high_zoneidx,
 2087                 ALLOC_WMARK_HIGH|ALLOC_CPUSET,
 2088                 preferred_zone, migratetype);
 2089         if (page)
 2090                 goto out;
 2091 
 2092         if (!(gfp_mask & __GFP_NOFAIL)) {
 2093                 /* The OOM killer will not help higher order allocs */
 2094                 if (order > PAGE_ALLOC_COSTLY_ORDER)
 2095                         goto out;
 2096                 /* The OOM killer does not needlessly kill tasks for lowmem */
 2097                 if (high_zoneidx < ZONE_NORMAL)
 2098                         goto out;
 2099                 /*
 2100                  * GFP_THISNODE contains __GFP_NORETRY and we never hit this.
 2101                  * Sanity check for bare calls of __GFP_THISNODE, not real OOM.
 2102                  * The caller should handle page allocation failure by itself if
 2103                  * it specifies __GFP_THISNODE.
 2104                  * Note: Hugepage uses it but will hit PAGE_ALLOC_COSTLY_ORDER.
 2105                  */
 2106                 if (gfp_mask & __GFP_THISNODE)
 2107                         goto out;
 2108         }
 2109         /* Exhausted what can be done so it's blamo time */
 2110         out_of_memory(zonelist, gfp_mask, order, nodemask, false);
 2111 
 2112 out:
 2113         clear_zonelist_oom(zonelist, gfp_mask);
 2114         return page;
 2115 }
 2116 
 2117 #ifdef CONFIG_COMPACTION
 2118 /* Try memory compaction for high-order allocations before reclaim */
 2119 static struct page *
 2120 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
 2121         struct zonelist *zonelist, enum zone_type high_zoneidx,
 2122         nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
 2123         int migratetype, bool sync_migration,
 2124         bool *contended_compaction, bool *deferred_compaction,
 2125         unsigned long *did_some_progress)
 2126 {
 2127         if (!order)
 2128                 return NULL;
 2129 
 2130         if (compaction_deferred(preferred_zone, order)) {
 2131                 *deferred_compaction = true;
 2132                 return NULL;
 2133         }
 2134 
 2135         current->flags |= PF_MEMALLOC;
 2136         *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
 2137                                                 nodemask, sync_migration,
 2138                                                 contended_compaction);
 2139         current->flags &= ~PF_MEMALLOC;
 2140 
 2141         if (*did_some_progress != COMPACT_SKIPPED) {
 2142                 struct page *page;
 2143 
 2144                 /* Page migration frees to the PCP lists but we want merging */
 2145                 drain_pages(get_cpu());
 2146                 put_cpu();
 2147 
 2148                 page = get_page_from_freelist(gfp_mask, nodemask,
 2149                                 order, zonelist, high_zoneidx,
 2150                                 alloc_flags & ~ALLOC_NO_WATERMARKS,
 2151                                 preferred_zone, migratetype);
 2152                 if (page) {
 2153                         preferred_zone->compact_blockskip_flush = false;
 2154                         preferred_zone->compact_considered = 0;
 2155                         preferred_zone->compact_defer_shift = 0;
 2156                         if (order >= preferred_zone->compact_order_failed)
 2157                                 preferred_zone->compact_order_failed = order + 1;
 2158                         count_vm_event(COMPACTSUCCESS);
 2159                         return page;
 2160                 }
 2161 
 2162                 /*
 2163                  * It's bad if compaction run occurs and fails.
 2164                  * The most likely reason is that pages exist,
 2165                  * but not enough to satisfy watermarks.
 2166                  */
 2167                 count_vm_event(COMPACTFAIL);
 2168 
 2169                 /*
 2170                  * As async compaction considers a subset of pageblocks, only
 2171                  * defer if the failure was a sync compaction failure.
 2172                  */
 2173                 if (sync_migration)
 2174                         defer_compaction(preferred_zone, order);
 2175 
 2176                 cond_resched();
 2177         }
 2178 
 2179         return NULL;
 2180 }
 2181 #else
 2182 static inline struct page *
 2183 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
 2184         struct zonelist *zonelist, enum zone_type high_zoneidx,
 2185         nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
 2186         int migratetype, bool sync_migration,
 2187         bool *contended_compaction, bool *deferred_compaction,
 2188         unsigned long *did_some_progress)
 2189 {
 2190         return NULL;
 2191 }
 2192 #endif /* CONFIG_COMPACTION */
 2193 
 2194 /* Perform direct synchronous page reclaim */
 2195 static int
 2196 __perform_reclaim(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist,
 2197                   nodemask_t *nodemask)
 2198 {
 2199         struct reclaim_state reclaim_state;
 2200         int progress;
 2201 
 2202         cond_resched();
 2203 
 2204         /* We now go into synchronous reclaim */
 2205         cpuset_memory_pressure_bump();
 2206         current->flags |= PF_MEMALLOC;
 2207         lockdep_set_current_reclaim_state(gfp_mask);
 2208         reclaim_state.reclaimed_slab = 0;
 2209         current->reclaim_state = &reclaim_state;
 2210 
 2211         progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
 2212 
 2213         current->reclaim_state = NULL;
 2214         lockdep_clear_current_reclaim_state();
 2215         current->flags &= ~PF_MEMALLOC;
 2216 
 2217         cond_resched();
 2218 
 2219         return progress;
 2220 }
 2221 
 2222 /* The really slow allocator path where we enter direct reclaim */
 2223 static inline struct page *
 2224 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
 2225         struct zonelist *zonelist, enum zone_type high_zoneidx,
 2226         nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
 2227         int migratetype, unsigned long *did_some_progress)
 2228 {
 2229         struct page *page = NULL;
 2230         bool drained = false;
 2231 
 2232         *did_some_progress = __perform_reclaim(gfp_mask, order, zonelist,
 2233                                                nodemask);
 2234         if (unlikely(!(*did_some_progress)))
 2235                 return NULL;
 2236 
 2237         /* After successful reclaim, reconsider all zones for allocation */
 2238         if (IS_ENABLED(CONFIG_NUMA))
 2239                 zlc_clear_zones_full(zonelist);
 2240 
 2241 retry:
 2242         page = get_page_from_freelist(gfp_mask, nodemask, order,
 2243                                         zonelist, high_zoneidx,
 2244                                         alloc_flags & ~ALLOC_NO_WATERMARKS,
 2245                                         preferred_zone, migratetype);
 2246 
 2247         /*
 2248          * If an allocation failed after direct reclaim, it could be because
 2249          * pages are pinned on the per-cpu lists. Drain them and try again
 2250          */
 2251         if (!page && !drained) {
 2252                 drain_all_pages();
 2253                 drained = true;
 2254                 goto retry;
 2255         }
 2256 
 2257         return page;
 2258 }
 2259 
 2260 /*
 2261  * This is called in the allocator slow-path if the allocation request is of
 2262  * sufficient urgency to ignore watermarks and take other desperate measures
 2263  */
 2264 static inline struct page *
 2265 __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
 2266         struct zonelist *zonelist, enum zone_type high_zoneidx,
 2267         nodemask_t *nodemask, struct zone *preferred_zone,
 2268         int migratetype)
 2269 {
 2270         struct page *page;
 2271 
 2272         do {
 2273                 page = get_page_from_freelist(gfp_mask, nodemask, order,
 2274                         zonelist, high_zoneidx, ALLOC_NO_WATERMARKS,
 2275                         preferred_zone, migratetype);
 2276 
 2277                 if (!page && gfp_mask & __GFP_NOFAIL)
 2278                         wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
 2279         } while (!page && (gfp_mask & __GFP_NOFAIL));
 2280 
 2281         return page;
 2282 }
 2283 
 2284 static inline
 2285 void wake_all_kswapd(unsigned int order, struct zonelist *zonelist,
 2286                                                 enum zone_type high_zoneidx,
 2287                                                 enum zone_type classzone_idx)
 2288 {
 2289         struct zoneref *z;
 2290         struct zone *zone;
 2291 
 2292         for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
 2293                 wakeup_kswapd(zone, order, classzone_idx);
 2294 }
 2295 
 2296 static inline int
 2297 gfp_to_alloc_flags(gfp_t gfp_mask)
 2298 {
 2299         int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
 2300         const gfp_t wait = gfp_mask & __GFP_WAIT;
 2301 
 2302         /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
 2303         BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
 2304 
 2305         /*
 2306          * The caller may dip into page reserves a bit more if the caller
 2307          * cannot run direct reclaim, or if the caller has realtime scheduling
 2308          * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
 2309          * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
 2310          */
 2311         alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
 2312 
 2313         if (!wait) {
 2314                 /*
 2315                  * Not worth trying to allocate harder for
 2316                  * __GFP_NOMEMALLOC even if it can't schedule.
 2317                  */
 2318                 if  (!(gfp_mask & __GFP_NOMEMALLOC))
 2319                         alloc_flags |= ALLOC_HARDER;
 2320                 /*
 2321                  * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
 2322                  * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
 2323                  */
 2324                 alloc_flags &= ~ALLOC_CPUSET;
 2325         } else if (unlikely(rt_task(current)) && !in_interrupt())
 2326                 alloc_flags |= ALLOC_HARDER;
 2327 
 2328         if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
 2329                 if (gfp_mask & __GFP_MEMALLOC)
 2330                         alloc_flags |= ALLOC_NO_WATERMARKS;
 2331                 else if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
 2332                         alloc_flags |= ALLOC_NO_WATERMARKS;
 2333                 else if (!in_interrupt() &&
 2334                                 ((current->flags & PF_MEMALLOC) ||
 2335                                  unlikely(test_thread_flag(TIF_MEMDIE))))
 2336                         alloc_flags |= ALLOC_NO_WATERMARKS;
 2337         }
 2338 #ifdef CONFIG_CMA
 2339         if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
 2340                 alloc_flags |= ALLOC_CMA;
 2341 #endif
 2342         return alloc_flags;
 2343 }
 2344 
 2345 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
 2346 {
 2347         return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS);
 2348 }
 2349 
 2350 static inline struct page *
 2351 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
 2352         struct zonelist *zonelist, enum zone_type high_zoneidx,
 2353         nodemask_t *nodemask, struct zone *preferred_zone,
 2354         int migratetype)
 2355 {
 2356         const gfp_t wait = gfp_mask & __GFP_WAIT;
 2357         struct page *page = NULL;
 2358         int alloc_flags;
 2359         unsigned long pages_reclaimed = 0;
 2360         unsigned long did_some_progress;
 2361         bool sync_migration = false;
 2362         bool deferred_compaction = false;
 2363         bool contended_compaction = false;
 2364 
 2365         /*
 2366          * In the slowpath, we sanity check order to avoid ever trying to
 2367          * reclaim >= MAX_ORDER areas which will never succeed. Callers may
 2368          * be using allocators in order of preference for an area that is
 2369          * too large.
 2370          */
 2371         if (order >= MAX_ORDER) {
 2372                 WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
 2373                 return NULL;
 2374         }
 2375 
 2376         /*
 2377          * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
 2378          * __GFP_NOWARN set) should not cause reclaim since the subsystem
 2379          * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
 2380          * using a larger set of nodes after it has established that the
 2381          * allowed per node queues are empty and that nodes are
 2382          * over allocated.
 2383          */
 2384         if (IS_ENABLED(CONFIG_NUMA) &&
 2385                         (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
 2386                 goto nopage;
 2387 
 2388 restart:
 2389         if (!(gfp_mask & __GFP_NO_KSWAPD))
 2390                 wake_all_kswapd(order, zonelist, high_zoneidx,
 2391                                                 zone_idx(preferred_zone));
 2392 
 2393         /*
 2394          * OK, we're below the kswapd watermark and have kicked background
 2395          * reclaim. Now things get more complex, so set up alloc_flags according
 2396          * to how we want to proceed.
 2397          */
 2398         alloc_flags = gfp_to_alloc_flags(gfp_mask);
 2399 
 2400         /*
 2401          * Find the true preferred zone if the allocation is unconstrained by
 2402          * cpusets.
 2403          */
 2404         if (!(alloc_flags & ALLOC_CPUSET) && !nodemask)
 2405                 first_zones_zonelist(zonelist, high_zoneidx, NULL,
 2406                                         &preferred_zone);
 2407 
 2408 rebalance:
 2409         /* This is the last chance, in general, before the goto nopage. */
 2410         page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
 2411                         high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
 2412                         preferred_zone, migratetype);
 2413         if (page)
 2414                 goto got_pg;
 2415 
 2416         /* Allocate without watermarks if the context allows */
 2417         if (alloc_flags & ALLOC_NO_WATERMARKS) {
 2418                 /*
 2419                  * Ignore mempolicies if ALLOC_NO_WATERMARKS on the grounds
 2420                  * the allocation is high priority and these type of
 2421                  * allocations are system rather than user orientated
 2422                  */
 2423                 zonelist = node_zonelist(numa_node_id(), gfp_mask);
 2424 
 2425                 page = __alloc_pages_high_priority(gfp_mask, order,
 2426                                 zonelist, high_zoneidx, nodemask,
 2427                                 preferred_zone, migratetype);
 2428                 if (page) {
 2429                         goto got_pg;
 2430                 }
 2431         }
 2432 
 2433         /* Atomic allocations - we can't balance anything */
 2434         if (!wait)
 2435                 goto nopage;
 2436 
 2437         /* Avoid recursion of direct reclaim */
 2438         if (current->flags & PF_MEMALLOC)
 2439                 goto nopage;
 2440 
 2441         /* Avoid allocations with no watermarks from looping endlessly */
 2442         if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
 2443                 goto nopage;
 2444 
 2445         /*
 2446          * Try direct compaction. The first pass is asynchronous. Subsequent
 2447          * attempts after direct reclaim are synchronous
 2448          */
 2449         page = __alloc_pages_direct_compact(gfp_mask, order,
 2450                                         zonelist, high_zoneidx,
 2451                                         nodemask,
 2452                                         alloc_flags, preferred_zone,
 2453                                         migratetype, sync_migration,
 2454                                         &contended_compaction,
 2455                                         &deferred_compaction,
 2456                                         &did_some_progress);
 2457         if (page)
 2458                 goto got_pg;
 2459         sync_migration = true;
 2460 
 2461         /*
 2462          * If compaction is deferred for high-order allocations, it is because
 2463          * sync compaction recently failed. In this is the case and the caller
 2464          * requested a movable allocation that does not heavily disrupt the
 2465          * system then fail the allocation instead of entering direct reclaim.
 2466          */
 2467         if ((deferred_compaction || contended_compaction) &&
 2468                                                 (gfp_mask & __GFP_NO_KSWAPD))
 2469                 goto nopage;
 2470 
 2471         /* Try direct reclaim and then allocating */
 2472         page = __alloc_pages_direct_reclaim(gfp_mask, order,
 2473                                         zonelist, high_zoneidx,
 2474                                         nodemask,
 2475                                         alloc_flags, preferred_zone,
 2476                                         migratetype, &did_some_progress);
 2477         if (page)
 2478                 goto got_pg;
 2479 
 2480         /*
 2481          * If we failed to make any progress reclaiming, then we are
 2482          * running out of options and have to consider going OOM
 2483          */
 2484         if (!did_some_progress) {
 2485                 if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
 2486                         if (oom_killer_disabled)
 2487                                 goto nopage;
 2488                         /* Coredumps can quickly deplete all memory reserves */
 2489                         if ((current->flags & PF_DUMPCORE) &&
 2490                             !(gfp_mask & __GFP_NOFAIL))
 2491                                 goto nopage;
 2492                         page = __alloc_pages_may_oom(gfp_mask, order,
 2493                                         zonelist, high_zoneidx,
 2494                                         nodemask, preferred_zone,
 2495                                         migratetype);
 2496                         if (page)
 2497                                 goto got_pg;
 2498 
 2499                         if (!(gfp_mask & __GFP_NOFAIL)) {
 2500                                 /*
 2501                                  * The oom killer is not called for high-order
 2502                                  * allocations that may fail, so if no progress
 2503                                  * is being made, there are no other options and
 2504                                  * retrying is unlikely to help.
 2505                                  */
 2506                                 if (order > PAGE_ALLOC_COSTLY_ORDER)
 2507                                         goto nopage;
 2508                                 /*
 2509                                  * The oom killer is not called for lowmem
 2510                                  * allocations to prevent needlessly killing
 2511                                  * innocent tasks.
 2512                                  */
 2513                                 if (high_zoneidx < ZONE_NORMAL)
 2514                                         goto nopage;
 2515                         }
 2516 
 2517                         goto restart;
 2518                 }
 2519         }
 2520 
 2521         /* Check if we should retry the allocation */
 2522         pages_reclaimed += did_some_progress;
 2523         if (should_alloc_retry(gfp_mask, order, did_some_progress,
 2524                                                 pages_reclaimed)) {
 2525                 /* Wait for some write requests to complete then retry */
 2526                 wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
 2527                 goto rebalance;
 2528         } else {
 2529                 /*
 2530                  * High-order allocations do not necessarily loop after
 2531                  * direct reclaim and reclaim/compaction depends on compaction
 2532                  * being called after reclaim so call directly if necessary
 2533                  */
 2534                 page = __alloc_pages_direct_compact(gfp_mask, order,
 2535                                         zonelist, high_zoneidx,
 2536                                         nodemask,
 2537                                         alloc_flags, preferred_zone,
 2538                                         migratetype, sync_migration,
 2539                                         &contended_compaction,
 2540                                         &deferred_compaction,
 2541                                         &did_some_progress);
 2542                 if (page)
 2543                         goto got_pg;
 2544         }
 2545 
 2546 nopage:
 2547         warn_alloc_failed(gfp_mask, order, NULL);
 2548         return page;
 2549 got_pg:
 2550         if (kmemcheck_enabled)
 2551                 kmemcheck_pagealloc_alloc(page, order, gfp_mask);
 2552 
 2553         return page;
 2554 }
 2555 
 2556 /*
 2557  * This is the 'heart' of the zoned buddy allocator.
 2558  */
 2559 struct page *
 2560 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
 2561                         struct zonelist *zonelist, nodemask_t *nodemask)
 2562 {
 2563         enum zone_type high_zoneidx = gfp_zone(gfp_mask);
 2564         struct zone *preferred_zone;
 2565         struct page *page = NULL;
 2566         int migratetype = allocflags_to_migratetype(gfp_mask);
 2567         unsigned int cpuset_mems_cookie;
 2568         int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET;
 2569         struct mem_cgroup *memcg = NULL;
 2570 
 2571         gfp_mask &= gfp_allowed_mask;
 2572 
 2573         lockdep_trace_alloc(gfp_mask);
 2574 
 2575         might_sleep_if(gfp_mask & __GFP_WAIT);
 2576 
 2577         if (should_fail_alloc_page(gfp_mask, order))
 2578                 return NULL;
 2579 
 2580         /*
 2581          * Check the zones suitable for the gfp_mask contain at least one
 2582          * valid zone. It's possible to have an empty zonelist as a result
 2583          * of GFP_THISNODE and a memoryless node
 2584          */
 2585         if (unlikely(!zonelist->_zonerefs->zone))
 2586                 return NULL;
 2587 
 2588         /*
 2589          * Will only have any effect when __GFP_KMEMCG is set.  This is
 2590          * verified in the (always inline) callee
 2591          */
 2592         if (!memcg_kmem_newpage_charge(gfp_mask, &memcg, order))
 2593                 return NULL;
 2594 
 2595 retry_cpuset:
 2596         cpuset_mems_cookie = get_mems_allowed();
 2597 
 2598         /* The preferred zone is used for statistics later */
 2599         first_zones_zonelist(zonelist, high_zoneidx,
 2600                                 nodemask ? : &cpuset_current_mems_allowed,
 2601                                 &preferred_zone);
 2602         if (!preferred_zone)
 2603                 goto out;
 2604 
 2605 #ifdef CONFIG_CMA
 2606         if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
 2607                 alloc_flags |= ALLOC_CMA;
 2608 #endif
 2609         /* First allocation attempt */
 2610         page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
 2611                         zonelist, high_zoneidx, alloc_flags,
 2612                         preferred_zone, migratetype);
 2613         if (unlikely(!page))
 2614                 page = __alloc_pages_slowpath(gfp_mask, order,
 2615                                 zonelist, high_zoneidx, nodemask,
 2616                                 preferred_zone, migratetype);
 2617 
 2618         trace_mm_page_alloc(page, order, gfp_mask, migratetype);
 2619 
 2620 out:
 2621         /*
 2622          * When updating a task's mems_allowed, it is possible to race with
 2623          * parallel threads in such a way that an allocation can fail while
 2624          * the mask is being updated. If a page allocation is about to fail,
 2625          * check if the cpuset changed during allocation and if so, retry.
 2626          */
 2627         if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
 2628                 goto retry_cpuset;
 2629 
 2630         memcg_kmem_commit_charge(page, memcg, order);
 2631 
 2632         return page;
 2633 }
 2634 EXPORT_SYMBOL(__alloc_pages_nodemask);
 2635 
 2636 /*
 2637  * Common helper functions.
 2638  */
 2639 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
 2640 {
 2641         struct page *page;
 2642 
 2643         /*
 2644          * __get_free_pages() returns a 32-bit address, which cannot represent
 2645          * a highmem page
 2646          */
 2647         VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
 2648 
 2649         page = alloc_pages(gfp_mask, order);
 2650         if (!page)
 2651                 return 0;
 2652         return (unsigned long) page_address(page);
 2653 }
 2654 EXPORT_SYMBOL(__get_free_pages);
 2655 
 2656 unsigned long get_zeroed_page(gfp_t gfp_mask)
 2657 {
 2658         return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
 2659 }
 2660 EXPORT_SYMBOL(get_zeroed_page);
 2661 
 2662 void __free_pages(struct page *page, unsigned int order)
 2663 {
 2664         if (put_page_testzero(page)) {
 2665                 if (order == 0)
 2666                         free_hot_cold_page(page, 0);
 2667                 else
 2668                         __free_pages_ok(page, order);
 2669         }
 2670 }
 2671 
 2672 EXPORT_SYMBOL(__free_pages);
 2673 
 2674 void free_pages(unsigned long addr, unsigned int order)
 2675 {
 2676         if (addr != 0) {
 2677                 VM_BUG_ON(!virt_addr_valid((void *)addr));
 2678                 __free_pages(virt_to_page((void *)addr), order);
 2679         }
 2680 }
 2681 
 2682 EXPORT_SYMBOL(free_pages);
 2683 
 2684 /*
 2685  * __free_memcg_kmem_pages and free_memcg_kmem_pages will free
 2686  * pages allocated with __GFP_KMEMCG.
 2687  *
 2688  * Those pages are accounted to a particular memcg, embedded in the
 2689  * corresponding page_cgroup. To avoid adding a hit in the allocator to search
 2690  * for that information only to find out that it is NULL for users who have no
 2691  * interest in that whatsoever, we provide these functions.
 2692  *
 2693  * The caller knows better which flags it relies on.
 2694  */
 2695 void __free_memcg_kmem_pages(struct page *page, unsigned int order)
 2696 {
 2697         memcg_kmem_uncharge_pages(page, order);
 2698         __free_pages(page, order);
 2699 }
 2700 
 2701 void free_memcg_kmem_pages(unsigned long addr, unsigned int order)
 2702 {
 2703         if (addr != 0) {
 2704                 VM_BUG_ON(!virt_addr_valid((void *)addr));
 2705                 __free_memcg_kmem_pages(virt_to_page((void *)addr), order);
 2706         }
 2707 }
 2708 
 2709 static void *make_alloc_exact(unsigned long addr, unsigned order, size_t size)
 2710 {
 2711         if (addr) {
 2712                 unsigned long alloc_end = addr + (PAGE_SIZE << order);
 2713                 unsigned long used = addr + PAGE_ALIGN(size);
 2714 
 2715                 split_page(virt_to_page((void *)addr), order);
 2716                 while (used < alloc_end) {
 2717                         free_page(used);
 2718                         used += PAGE_SIZE;
 2719                 }
 2720         }
 2721         return (void *)addr;
 2722 }
 2723 
 2724 /**
 2725  * alloc_pages_exact - allocate an exact number physically-contiguous pages.
 2726  * @size: the number of bytes to allocate
 2727  * @gfp_mask: GFP flags for the allocation
 2728  *
 2729  * This function is similar to alloc_pages(), except that it allocates the
 2730  * minimum number of pages to satisfy the request.  alloc_pages() can only
 2731  * allocate memory in power-of-two pages.
 2732  *
 2733  * This function is also limited by MAX_ORDER.
 2734  *
 2735  * Memory allocated by this function must be released by free_pages_exact().
 2736  */
 2737 void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
 2738 {
 2739         unsigned int order = get_order(size);
 2740         unsigned long addr;
 2741 
 2742         addr = __get_free_pages(gfp_mask, order);
 2743         return make_alloc_exact(addr, order, size);
 2744 }
 2745 EXPORT_SYMBOL(alloc_pages_exact);
 2746 
 2747 /**
 2748  * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
 2749  *                         pages on a node.
 2750  * @nid: the preferred node ID where memory should be allocated
 2751  * @size: the number of bytes to allocate
 2752  * @gfp_mask: GFP flags for the allocation
 2753  *
 2754  * Like alloc_pages_exact(), but try to allocate on node nid first before falling
 2755  * back.
 2756  * Note this is not alloc_pages_exact_node() which allocates on a specific node,
 2757  * but is not exact.
 2758  */
 2759 void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
 2760 {
 2761         unsigned order = get_order(size);
 2762         struct page *p = alloc_pages_node(nid, gfp_mask, order);
 2763         if (!p)
 2764                 return NULL;
 2765         return make_alloc_exact((unsigned long)page_address(p), order, size);
 2766 }
 2767 EXPORT_SYMBOL(alloc_pages_exact_nid);
 2768 
 2769 /**
 2770  * free_pages_exact - release memory allocated via alloc_pages_exact()
 2771  * @virt: the value returned by alloc_pages_exact.
 2772  * @size: size of allocation, same value as passed to alloc_pages_exact().
 2773  *
 2774  * Release the memory allocated by a previous call to alloc_pages_exact.
 2775  */
 2776 void free_pages_exact(void *virt, size_t size)
 2777 {
 2778         unsigned long addr = (unsigned long)virt;
 2779         unsigned long end = addr + PAGE_ALIGN(size);
 2780 
 2781         while (addr < end) {
 2782                 free_page(addr);
 2783                 addr += PAGE_SIZE;
 2784         }
 2785 }
 2786 EXPORT_SYMBOL(free_pages_exact);
 2787 
 2788 static unsigned int nr_free_zone_pages(int offset)
 2789 {
 2790         struct zoneref *z;
 2791         struct zone *zone;
 2792 
 2793         /* Just pick one node, since fallback list is circular */
 2794         unsigned int sum = 0;
 2795 
 2796         struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
 2797 
 2798         for_each_zone_zonelist(zone, z, zonelist, offset) {
 2799                 unsigned long size = zone->present_pages;
 2800                 unsigned long high = high_wmark_pages(zone);
 2801                 if (size > high)
 2802                         sum += size - high;
 2803         }
 2804 
 2805         return sum;
 2806 }
 2807 
 2808 /*
 2809  * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
 2810  */
 2811 unsigned int nr_free_buffer_pages(void)
 2812 {
 2813         return nr_free_zone_pages(gfp_zone(GFP_USER));
 2814 }
 2815 EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
 2816 
 2817 /*
 2818  * Amount of free RAM allocatable within all zones
 2819  */
 2820 unsigned int nr_free_pagecache_pages(void)
 2821 {
 2822         return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
 2823 }
 2824 
 2825 static inline void show_node(struct zone *zone)
 2826 {
 2827         if (IS_ENABLED(CONFIG_NUMA))
 2828                 printk("Node %d ", zone_to_nid(zone));
 2829 }
 2830 
 2831 void si_meminfo(struct sysinfo *val)
 2832 {
 2833         val->totalram = totalram_pages;
 2834         val->sharedram = 0;
 2835         val->freeram = global_page_state(NR_FREE_PAGES);
 2836         val->bufferram = nr_blockdev_pages();
 2837         val->totalhigh = totalhigh_pages;
 2838         val->freehigh = nr_free_highpages();
 2839         val->mem_unit = PAGE_SIZE;
 2840 }
 2841 
 2842 EXPORT_SYMBOL(si_meminfo);
 2843 
 2844 #ifdef CONFIG_NUMA
 2845 void si_meminfo_node(struct sysinfo *val, int nid)
 2846 {
 2847         pg_data_t *pgdat = NODE_DATA(nid);
 2848 
 2849         val->totalram = pgdat->node_present_pages;
 2850         val->freeram = node_page_state(nid, NR_FREE_PAGES);
 2851 #ifdef CONFIG_HIGHMEM
 2852         val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
 2853         val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
 2854                         NR_FREE_PAGES);
 2855 #else
 2856         val->totalhigh = 0;
 2857         val->freehigh = 0;
 2858 #endif
 2859         val->mem_unit = PAGE_SIZE;
 2860 }
 2861 #endif
 2862 
 2863 /*
 2864  * Determine whether the node should be displayed or not, depending on whether
 2865  * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
 2866  */
 2867 bool skip_free_areas_node(unsigned int flags, int nid)
 2868 {
 2869         bool ret = false;
 2870         unsigned int cpuset_mems_cookie;
 2871 
 2872         if (!(flags & SHOW_MEM_FILTER_NODES))
 2873                 goto out;
 2874 
 2875         do {
 2876                 cpuset_mems_cookie = get_mems_allowed();
 2877                 ret = !node_isset(nid, cpuset_current_mems_allowed);
 2878         } while (!put_mems_allowed(cpuset_mems_cookie));
 2879 out:
 2880         return ret;
 2881 }
 2882 
 2883 #define K(x) ((x) << (PAGE_SHIFT-10))
 2884 
 2885 static void show_migration_types(unsigned char type)
 2886 {
 2887         static const char types[MIGRATE_TYPES] = {
 2888                 [MIGRATE_UNMOVABLE]     = 'U',
 2889                 [MIGRATE_RECLAIMABLE]   = 'E',
 2890                 [MIGRATE_MOVABLE]       = 'M',
 2891                 [MIGRATE_RESERVE]       = 'R',
 2892 #ifdef CONFIG_CMA
 2893                 [MIGRATE_CMA]           = 'C',
 2894 #endif
 2895                 [MIGRATE_ISOLATE]       = 'I',
 2896         };
 2897         char tmp[MIGRATE_TYPES + 1];
 2898         char *p = tmp;
 2899         int i;
 2900 
 2901         for (i = 0; i < MIGRATE_TYPES; i++) {
 2902                 if (type & (1 << i))
 2903                         *p++ = types[i];
 2904         }
 2905 
 2906         *p = '\0';
 2907         printk("(%s) ", tmp);
 2908 }
 2909 
 2910 /*
 2911  * Show free area list (used inside shift_scroll-lock stuff)
 2912  * We also calculate the percentage fragmentation. We do this by counting the
 2913  * memory on each free list with the exception of the first item on the list.
 2914  * Suppresses nodes that are not allowed by current's cpuset if
 2915  * SHOW_MEM_FILTER_NODES is passed.
 2916  */
 2917 void show_free_areas(unsigned int filter)
 2918 {
 2919         int cpu;
 2920         struct zone *zone;
 2921 
 2922         for_each_populated_zone(zone) {
 2923                 if (skip_free_areas_node(filter, zone_to_nid(zone)))
 2924                         continue;
 2925                 show_node(zone);
 2926                 printk("%s per-cpu:\n", zone->name);
 2927 
 2928                 for_each_online_cpu(cpu) {
 2929                         struct per_cpu_pageset *pageset;
 2930 
 2931                         pageset = per_cpu_ptr(zone->pageset, cpu);
 2932 
 2933                         printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
 2934                                cpu, pageset->pcp.high,
 2935                                pageset->pcp.batch, pageset->pcp.count);
 2936                 }
 2937         }
 2938 
 2939         printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
 2940                 " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
 2941                 " unevictable:%lu"
 2942                 " dirty:%lu writeback:%lu unstable:%lu\n"
 2943                 " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n"
 2944                 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
 2945                 " free_cma:%lu\n",
 2946                 global_page_state(NR_ACTIVE_ANON),
 2947                 global_page_state(NR_INACTIVE_ANON),
 2948                 global_page_state(NR_ISOLATED_ANON),
 2949                 global_page_state(NR_ACTIVE_FILE),
 2950                 global_page_state(NR_INACTIVE_FILE),
 2951                 global_page_state(NR_ISOLATED_FILE),
 2952                 global_page_state(NR_UNEVICTABLE),
 2953                 global_page_state(NR_FILE_DIRTY),
 2954                 global_page_state(NR_WRITEBACK),
 2955                 global_page_state(NR_UNSTABLE_NFS),
 2956                 global_page_state(NR_FREE_PAGES),
 2957                 global_page_state(NR_SLAB_RECLAIMABLE),
 2958                 global_page_state(NR_SLAB_UNRECLAIMABLE),
 2959                 global_page_state(NR_FILE_MAPPED),
 2960                 global_page_state(NR_SHMEM),
 2961                 global_page_state(NR_PAGETABLE),
 2962                 global_page_state(NR_BOUNCE),
 2963                 global_page_state(NR_FREE_CMA_PAGES));
 2964 
 2965         for_each_populated_zone(zone) {
 2966                 int i;
 2967 
 2968                 if (skip_free_areas_node(filter, zone_to_nid(zone)))
 2969                         continue;
 2970                 show_node(zone);
 2971                 printk("%s"
 2972                         " free:%lukB"
 2973                         " min:%lukB"
 2974                         " low:%lukB"
 2975                         " high:%lukB"
 2976                         " active_anon:%lukB"
 2977                         " inactive_anon:%lukB"
 2978                         " active_file:%lukB"
 2979                         " inactive_file:%lukB"
 2980                         " unevictable:%lukB"
 2981                         " isolated(anon):%lukB"
 2982                         " isolated(file):%lukB"
 2983                         " present:%lukB"
 2984                         " managed:%lukB"
 2985                         " mlocked:%lukB"
 2986                         " dirty:%lukB"
 2987                         " writeback:%lukB"
 2988                         " mapped:%lukB"
 2989                         " shmem:%lukB"
 2990                         " slab_reclaimable:%lukB"
 2991                         " slab_unreclaimable:%lukB"
 2992                         " kernel_stack:%lukB"
 2993                         " pagetables:%lukB"
 2994                         " unstable:%lukB"
 2995                         " bounce:%lukB"
 2996                         " free_cma:%lukB"
 2997                         " writeback_tmp:%lukB"
 2998                         " pages_scanned:%lu"
 2999                         " all_unreclaimable? %s"
 3000                         "\n",
 3001                         zone->name,
 3002                         K(zone_page_state(zone, NR_FREE_PAGES)),
 3003                         K(min_wmark_pages(zone)),
 3004                         K(low_wmark_pages(zone)),
 3005                         K(high_wmark_pages(zone)),
 3006                         K(zone_page_state(zone, NR_ACTIVE_ANON)),
 3007                         K(zone_page_state(zone, NR_INACTIVE_ANON)),
 3008                         K(zone_page_state(zone, NR_ACTIVE_FILE)),
 3009                         K(zone_page_state(zone, NR_INACTIVE_FILE)),
 3010                         K(zone_page_state(zone, NR_UNEVICTABLE)),
 3011                         K(zone_page_state(zone, NR_ISOLATED_ANON)),
 3012                         K(zone_page_state(zone, NR_ISOLATED_FILE)),
 3013                         K(zone->present_pages),
 3014                         K(zone->managed_pages),
 3015                         K(zone_page_state(zone, NR_MLOCK)),
 3016                         K(zone_page_state(zone, NR_FILE_DIRTY)),
 3017                         K(zone_page_state(zone, NR_WRITEBACK)),
 3018                         K(zone_page_state(zone, NR_FILE_MAPPED)),
 3019                         K(zone_page_state(zone, NR_SHMEM)),
 3020                         K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
 3021                         K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
 3022                         zone_page_state(zone, NR_KERNEL_STACK) *
 3023                                 THREAD_SIZE / 1024,
 3024                         K(zone_page_state(zone, NR_PAGETABLE)),
 3025                         K(zone_page_state(zone, NR_UNSTABLE_NFS)),
 3026                         K(zone_page_state(zone, NR_BOUNCE)),
 3027                         K(zone_page_state(zone, NR_FREE_CMA_PAGES)),
 3028                         K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
 3029                         zone->pages_scanned,
 3030                         (zone->all_unreclaimable ? "yes" : "no")
 3031                         );
 3032                 printk("lowmem_reserve[]:");
 3033                 for (i = 0; i < MAX_NR_ZONES; i++)
 3034                         printk(" %lu", zone->lowmem_reserve[i]);
 3035                 printk("\n");
 3036         }
 3037 
 3038         for_each_populated_zone(zone) {
 3039                 unsigned long nr[MAX_ORDER], flags, order, total = 0;
 3040                 unsigned char types[MAX_ORDER];
 3041 
 3042                 if (skip_free_areas_node(filter, zone_to_nid(zone)))
 3043                         continue;
 3044                 show_node(zone);
 3045                 printk("%s: ", zone->name);
 3046 
 3047                 spin_lock_irqsave(&zone->lock, flags);
 3048                 for (order = 0; order < MAX_ORDER; order++) {
 3049                         struct free_area *area = &zone->free_area[order];
 3050                         int type;
 3051 
 3052                         nr[order] = area->nr_free;
 3053                         total += nr[order] << order;
 3054 
 3055                         types[order] = 0;
 3056                         for (type = 0; type < MIGRATE_TYPES; type++) {
 3057                                 if (!list_empty(&area->free_list[type]))
 3058                                         types[order] |= 1 << type;
 3059                         }
 3060                 }
 3061                 spin_unlock_irqrestore(&zone->lock, flags);
 3062                 for (order = 0; order < MAX_ORDER; order++) {
 3063                         printk("%lu*%lukB ", nr[order], K(1UL) << order);
 3064                         if (nr[order])
 3065                                 show_migration_types(types[order]);
 3066                 }
 3067                 printk("= %lukB\n", K(total));
 3068         }
 3069 
 3070         printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
 3071 
 3072         show_swap_cache_info();
 3073 }
 3074 
 3075 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
 3076 {
 3077         zoneref->zone = zone;
 3078         zoneref->zone_idx = zone_idx(zone);
 3079 }
 3080 
 3081 /*
 3082  * Builds allocation fallback zone lists.
 3083  *
 3084  * Add all populated zones of a node to the zonelist.
 3085  */
 3086 static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
 3087                                 int nr_zones, enum zone_type zone_type)
 3088 {
 3089         struct zone *zone;
 3090 
 3091         BUG_ON(zone_type >= MAX_NR_ZONES);
 3092         zone_type++;
 3093 
 3094         do {
 3095                 zone_type--;
 3096                 zone = pgdat->node_zones + zone_type;
 3097                 if (populated_zone(zone)) {
 3098                         zoneref_set_zone(zone,
 3099                                 &zonelist->_zonerefs[nr_zones++]);
 3100                         check_highest_zone(zone_type);
 3101                 }
 3102 
 3103         } while (zone_type);
 3104         return nr_zones;
 3105 }
 3106 
 3107 
 3108 /*
 3109  *  zonelist_order:
 3110  *  0 = automatic detection of better ordering.
 3111  *  1 = order by ([node] distance, -zonetype)
 3112  *  2 = order by (-zonetype, [node] distance)
 3113  *
 3114  *  If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
 3115  *  the same zonelist. So only NUMA can configure this param.
 3116  */
 3117 #define ZONELIST_ORDER_DEFAULT  0
 3118 #define ZONELIST_ORDER_NODE     1
 3119 #define ZONELIST_ORDER_ZONE     2
 3120 
 3121 /* zonelist order in the kernel.
 3122  * set_zonelist_order() will set this to NODE or ZONE.
 3123  */
 3124 static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
 3125 static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
 3126 
 3127 
 3128 #ifdef CONFIG_NUMA
 3129 /* The value user specified ....changed by config */
 3130 static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
 3131 /* string for sysctl */
 3132 #define NUMA_ZONELIST_ORDER_LEN 16
 3133 char numa_zonelist_order[16] = "default";
 3134 
 3135 /*
 3136  * interface for configure zonelist ordering.
 3137  * command line option "numa_zonelist_order"
 3138  *      = "[dD]efault   - default, automatic configuration.
 3139  *      = "[nN]ode      - order by node locality, then by zone within node
 3140  *      = "[zZ]one      - order by zone, then by locality within zone
 3141  */
 3142 
 3143 static int __parse_numa_zonelist_order(char *s)
 3144 {
 3145         if (*s == 'd' || *s == 'D') {
 3146                 user_zonelist_order = ZONELIST_ORDER_DEFAULT;
 3147         } else if (*s == 'n' || *s == 'N') {
 3148                 user_zonelist_order = ZONELIST_ORDER_NODE;
 3149         } else if (*s == 'z' || *s == 'Z') {
 3150                 user_zonelist_order = ZONELIST_ORDER_ZONE;
 3151         } else {
 3152                 printk(KERN_WARNING
 3153                         "Ignoring invalid numa_zonelist_order value:  "
 3154                         "%s\n", s);
 3155                 return -EINVAL;
 3156         }
 3157         return 0;
 3158 }
 3159 
 3160 static __init int setup_numa_zonelist_order(char *s)
 3161 {
 3162         int ret;
 3163 
 3164         if (!s)
 3165                 return 0;
 3166 
 3167         ret = __parse_numa_zonelist_order(s);
 3168         if (ret == 0)
 3169                 strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN);
 3170 
 3171         return ret;
 3172 }
 3173 early_param("numa_zonelist_order", setup_numa_zonelist_order);
 3174 
 3175 /*
 3176  * sysctl handler for numa_zonelist_order
 3177  */
 3178 int numa_zonelist_order_handler(ctl_table *table, int write,
 3179                 void __user *buffer, size_t *length,
 3180                 loff_t *ppos)
 3181 {
 3182         char saved_string[NUMA_ZONELIST_ORDER_LEN];
 3183         int ret;
 3184         static DEFINE_MUTEX(zl_order_mutex);
 3185 
 3186         mutex_lock(&zl_order_mutex);
 3187         if (write)
 3188                 strcpy(saved_string, (char*)table->data);
 3189         ret = proc_dostring(table, write, buffer, length, ppos);
 3190         if (ret)
 3191                 goto out;
 3192         if (write) {
 3193                 int oldval = user_zonelist_order;
 3194                 if (__parse_numa_zonelist_order((char*)table->data)) {
 3195                         /*
 3196                          * bogus value.  restore saved string
 3197                          */
 3198                         strncpy((char*)table->data, saved_string,
 3199                                 NUMA_ZONELIST_ORDER_LEN);
 3200                         user_zonelist_order = oldval;
 3201                 } else if (oldval != user_zonelist_order) {
 3202                         mutex_lock(&zonelists_mutex);
 3203                         build_all_zonelists(NULL, NULL);
 3204                         mutex_unlock(&zonelists_mutex);
 3205                 }
 3206         }
 3207 out:
 3208         mutex_unlock(&zl_order_mutex);
 3209         return ret;
 3210 }
 3211 
 3212 
 3213 #define MAX_NODE_LOAD (nr_online_nodes)
 3214 static int node_load[MAX_NUMNODES];
 3215 
 3216 /**
 3217  * find_next_best_node - find the next node that should appear in a given node's fallback list
 3218  * @node: node whose fallback list we're appending
 3219  * @used_node_mask: nodemask_t of already used nodes
 3220  *
 3221  * We use a number of factors to determine which is the next node that should
 3222  * appear on a given node's fallback list.  The node should not have appeared
 3223  * already in @node's fallback list, and it should be the next closest node
 3224  * according to the distance array (which contains arbitrary distance values
 3225  * from each node to each node in the system), and should also prefer nodes
 3226  * with no CPUs, since presumably they'll have very little allocation pressure
 3227  * on them otherwise.
 3228  * It returns -1 if no node is found.
 3229  */
 3230 static int find_next_best_node(int node, nodemask_t *used_node_mask)
 3231 {
 3232         int n, val;
 3233         int min_val = INT_MAX;
 3234         int best_node = -1;
 3235         const struct cpumask *tmp = cpumask_of_node(0);
 3236 
 3237         /* Use the local node if we haven't already */
 3238         if (!node_isset(node, *used_node_mask)) {
 3239                 node_set(node, *used_node_mask);
 3240                 return node;
 3241         }
 3242 
 3243         for_each_node_state(n, N_MEMORY) {
 3244 
 3245                 /* Don't want a node to appear more than once */
 3246                 if (node_isset(n, *used_node_mask))
 3247                         continue;
 3248 
 3249                 /* Use the distance array to find the distance */
 3250                 val = node_distance(node, n);
 3251 
 3252                 /* Penalize nodes under us ("prefer the next node") */
 3253                 val += (n < node);
 3254 
 3255                 /* Give preference to headless and unused nodes */
 3256                 tmp = cpumask_of_node(n);
 3257                 if (!cpumask_empty(tmp))
 3258                         val += PENALTY_FOR_NODE_WITH_CPUS;
 3259 
 3260                 /* Slight preference for less loaded node */
 3261                 val *= (MAX_NODE_LOAD*MAX_NUMNODES);
 3262                 val += node_load[n];
 3263 
 3264                 if (val < min_val) {
 3265                         min_val = val;
 3266                         best_node = n;
 3267                 }
 3268         }
 3269 
 3270         if (best_node >= 0)
 3271                 node_set(best_node, *used_node_mask);
 3272 
 3273         return best_node;
 3274 }
 3275 
 3276 
 3277 /*
 3278  * Build zonelists ordered by node and zones within node.
 3279  * This results in maximum locality--normal zone overflows into local
 3280  * DMA zone, if any--but risks exhausting DMA zone.
 3281  */
 3282 static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
 3283 {
 3284         int j;
 3285         struct zonelist *zonelist;
 3286 
 3287         zonelist = &pgdat->node_zonelists[0];
 3288         for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
 3289                 ;
 3290         j = build_zonelists_node(NODE_DATA(node), zonelist, j,
 3291                                                         MAX_NR_ZONES - 1);
 3292         zonelist->_zonerefs[j].zone = NULL;
 3293         zonelist->_zonerefs[j].zone_idx = 0;
 3294 }
 3295 
 3296 /*
 3297  * Build gfp_thisnode zonelists
 3298  */
 3299 static void build_thisnode_zonelists(pg_data_t *pgdat)
 3300 {
 3301         int j;
 3302         struct zonelist *zonelist;
 3303 
 3304         zonelist = &pgdat->node_zonelists[1];
 3305         j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
 3306         zonelist->_zonerefs[j].zone = NULL;
 3307         zonelist->_zonerefs[j].zone_idx = 0;
 3308 }
 3309 
 3310 /*
 3311  * Build zonelists ordered by zone and nodes within zones.
 3312  * This results in conserving DMA zone[s] until all Normal memory is
 3313  * exhausted, but results in overflowing to remote node while memory
 3314  * may still exist in local DMA zone.
 3315  */
 3316 static int node_order[MAX_NUMNODES];
 3317 
 3318 static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
 3319 {
 3320         int pos, j, node;
 3321         int zone_type;          /* needs to be signed */
 3322         struct zone *z;
 3323         struct zonelist *zonelist;
 3324 
 3325         zonelist = &pgdat->node_zonelists[0];
 3326         pos = 0;
 3327         for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
 3328                 for (j = 0; j < nr_nodes; j++) {
 3329                         node = node_order[j];
 3330                         z = &NODE_DATA(node)->node_zones[zone_type];
 3331                         if (populated_zone(z)) {
 3332                                 zoneref_set_zone(z,
 3333                                         &zonelist->_zonerefs[pos++]);
 3334                                 check_highest_zone(zone_type);
 3335                         }
 3336                 }
 3337         }
 3338         zonelist->_zonerefs[pos].zone = NULL;
 3339         zonelist->_zonerefs[pos].zone_idx = 0;
 3340 }
 3341 
 3342 static int default_zonelist_order(void)
 3343 {
 3344         int nid, zone_type;
 3345         unsigned long low_kmem_size,total_size;
 3346         struct zone *z;
 3347         int average_size;
 3348         /*
 3349          * ZONE_DMA and ZONE_DMA32 can be very small area in the system.
 3350          * If they are really small and used heavily, the system can fall
 3351          * into OOM very easily.
 3352          * This function detect ZONE_DMA/DMA32 size and configures zone order.
 3353          */
 3354         /* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */
 3355         low_kmem_size = 0;
 3356         total_size = 0;
 3357         for_each_online_node(nid) {
 3358                 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
 3359                         z = &NODE_DATA(nid)->node_zones[zone_type];
 3360                         if (populated_zone(z)) {
 3361                                 if (zone_type < ZONE_NORMAL)
 3362                                         low_kmem_size += z->present_pages;
 3363                                 total_size += z->present_pages;
 3364                         } else if (zone_type == ZONE_NORMAL) {
 3365                                 /*
 3366                                  * If any node has only lowmem, then node order
 3367                                  * is preferred to allow kernel allocations
 3368                                  * locally; otherwise, they can easily infringe
 3369                                  * on other nodes when there is an abundance of
 3370                                  * lowmem available to allocate from.
 3371                                  */
 3372                                 return ZONELIST_ORDER_NODE;
 3373                         }
 3374                 }
 3375         }
 3376         if (!low_kmem_size ||  /* there are no DMA area. */
 3377             low_kmem_size > total_size/2) /* DMA/DMA32 is big. */
 3378                 return ZONELIST_ORDER_NODE;
 3379         /*
 3380          * look into each node's config.
 3381          * If there is a node whose DMA/DMA32 memory is very big area on
 3382          * local memory, NODE_ORDER may be suitable.
 3383          */
 3384         average_size = total_size /
 3385                                 (nodes_weight(node_states[N_MEMORY]) + 1);
 3386         for_each_online_node(nid) {
 3387                 low_kmem_size = 0;
 3388                 total_size = 0;
 3389                 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
 3390                         z = &NODE_DATA(nid)->node_zones[zone_type];
 3391                         if (populated_zone(z)) {
 3392                                 if (zone_type < ZONE_NORMAL)
 3393                                         low_kmem_size += z->present_pages;
 3394                                 total_size += z->present_pages;
 3395                         }
 3396                 }
 3397                 if (low_kmem_size &&
 3398                     total_size > average_size && /* ignore small node */
 3399                     low_kmem_size > total_size * 70/100)
 3400                         return ZONELIST_ORDER_NODE;
 3401         }
 3402         return ZONELIST_ORDER_ZONE;
 3403 }
 3404 
 3405 static void set_zonelist_order(void)
 3406 {
 3407         if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
 3408                 current_zonelist_order = default_zonelist_order();
 3409         else
 3410                 current_zonelist_order = user_zonelist_order;
 3411 }
 3412 
 3413 static void build_zonelists(pg_data_t *pgdat)
 3414 {
 3415         int j, node, load;
 3416         enum zone_type i;
 3417         nodemask_t used_mask;
 3418         int local_node, prev_node;
 3419         struct zonelist *zonelist;
 3420         int order = current_zonelist_order;
 3421 
 3422         /* initialize zonelists */
 3423         for (i = 0; i < MAX_ZONELISTS; i++) {
 3424                 zonelist = pgdat->node_zonelists + i;
 3425                 zonelist->_zonerefs[0].zone = NULL;
 3426                 zonelist->_zonerefs[0].zone_idx = 0;
 3427         }
 3428 
 3429         /* NUMA-aware ordering of nodes */
 3430         local_node = pgdat->node_id;
 3431         load = nr_online_nodes;
 3432         prev_node = local_node;
 3433         nodes_clear(used_mask);
 3434 
 3435         memset(node_order, 0, sizeof(node_order));
 3436         j = 0;
 3437 
 3438         while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
 3439                 /*
 3440                  * We don't want to pressure a particular node.
 3441                  * So adding penalty to the first node in same
 3442                  * distance group to make it round-robin.
 3443                  */
 3444                 if (node_distance(local_node, node) !=
 3445                     node_distance(local_node, prev_node))
 3446                         node_load[node] = load;
 3447 
 3448                 prev_node = node;
 3449                 load--;
 3450                 if (order == ZONELIST_ORDER_NODE)
 3451                         build_zonelists_in_node_order(pgdat, node);
 3452                 else
 3453                         node_order[j++] = node; /* remember order */
 3454         }
 3455 
 3456         if (order == ZONELIST_ORDER_ZONE) {
 3457                 /* calculate node order -- i.e., DMA last! */
 3458                 build_zonelists_in_zone_order(pgdat, j);
 3459         }
 3460 
 3461         build_thisnode_zonelists(pgdat);
 3462 }
 3463 
 3464 /* Construct the zonelist performance cache - see further mmzone.h */
 3465 static void build_zonelist_cache(pg_data_t *pgdat)
 3466 {
 3467         struct zonelist *zonelist;
 3468         struct zonelist_cache *zlc;
 3469         struct zoneref *z;
 3470 
 3471         zonelist = &pgdat->node_zonelists[0];
 3472         zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
 3473         bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
 3474         for (z = zonelist->_zonerefs; z->zone; z++)
 3475                 zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z);
 3476 }
 3477 
 3478 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
 3479 /*
 3480  * Return node id of node used for "local" allocations.
 3481  * I.e., first node id of first zone in arg node's generic zonelist.
 3482  * Used for initializing percpu 'numa_mem', which is used primarily
 3483  * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
 3484  */
 3485 int local_memory_node(int node)
 3486 {
 3487         struct zone *zone;
 3488 
 3489         (void)first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
 3490                                    gfp_zone(GFP_KERNEL),
 3491                                    NULL,
 3492                                    &zone);
 3493         return zone->node;
 3494 }
 3495 #endif
 3496 
 3497 #else   /* CONFIG_NUMA */
 3498 
 3499 static void set_zonelist_order(void)
 3500 {
 3501         current_zonelist_order = ZONELIST_ORDER_ZONE;
 3502 }
 3503 
 3504 static void build_zonelists(pg_data_t *pgdat)
 3505 {
 3506         int node, local_node;
 3507         enum zone_type j;
 3508         struct zonelist *zonelist;
 3509 
 3510         local_node = pgdat->node_id;
 3511 
 3512         zonelist = &pgdat->node_zonelists[0];
 3513         j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
 3514 
 3515         /*
 3516          * Now we build the zonelist so that it contains the zones
 3517          * of all the other nodes.
 3518          * We don't want to pressure a particular node, so when
 3519          * building the zones for node N, we make sure that the
 3520          * zones coming right after the local ones are those from
 3521          * node N+1 (modulo N)
 3522          */
 3523         for (node = local_node + 1; node < MAX_NUMNODES; node++) {
 3524                 if (!node_online(node))
 3525                         continue;
 3526                 j = build_zonelists_node(NODE_DATA(node), zonelist, j,
 3527                                                         MAX_NR_ZONES - 1);
 3528         }
 3529         for (node = 0; node < local_node; node++) {
 3530                 if (!node_online(node))
 3531                         continue;
 3532                 j = build_zonelists_node(NODE_DATA(node), zonelist, j,
 3533                                                         MAX_NR_ZONES - 1);
 3534         }
 3535 
 3536         zonelist->_zonerefs[j].zone = NULL;
 3537         zonelist->_zonerefs[j].zone_idx = 0;
 3538 }
 3539 
 3540 /* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */
 3541 static void build_zonelist_cache(pg_data_t *pgdat)
 3542 {
 3543         pgdat->node_zonelists[0].zlcache_ptr = NULL;
 3544 }
 3545 
 3546 #endif  /* CONFIG_NUMA */
 3547 
 3548 /*
 3549  * Boot pageset table. One per cpu which is going to be used for all
 3550  * zones and all nodes. The parameters will be set in such a way
 3551  * that an item put on a list will immediately be handed over to
 3552  * the buddy list. This is safe since pageset manipulation is done
 3553  * with interrupts disabled.
 3554  *
 3555  * The boot_pagesets must be kept even after bootup is complete for
 3556  * unused processors and/or zones. They do play a role for bootstrapping
 3557  * hotplugged processors.
 3558  *
 3559  * zoneinfo_show() and maybe other functions do
 3560  * not check if the processor is online before following the pageset pointer.
 3561  * Other parts of the kernel may not check if the zone is available.
 3562  */
 3563 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
 3564 static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
 3565 static void setup_zone_pageset(struct zone *zone);
 3566 
 3567 /*
 3568  * Global mutex to protect against size modification of zonelists
 3569  * as well as to serialize pageset setup for the new populated zone.
 3570  */
 3571 DEFINE_MUTEX(zonelists_mutex);
 3572 
 3573 /* return values int ....just for stop_machine() */
 3574 static int __build_all_zonelists(void *data)
 3575 {
 3576         int nid;
 3577         int cpu;
 3578         pg_data_t *self = data;
 3579 
 3580 #ifdef CONFIG_NUMA
 3581         memset(node_load, 0, sizeof(node_load));
 3582 #endif
 3583 
 3584         if (self && !node_online(self->node_id)) {
 3585                 build_zonelists(self);
 3586                 build_zonelist_cache(self);
 3587         }
 3588 
 3589         for_each_online_node(nid) {
 3590                 pg_data_t *pgdat = NODE_DATA(nid);
 3591 
 3592                 build_zonelists(pgdat);
 3593                 build_zonelist_cache(pgdat);
 3594         }
 3595 
 3596         /*
 3597          * Initialize the boot_pagesets that are going to be used
 3598          * for bootstrapping processors. The real pagesets for
 3599          * each zone will be allocated later when the per cpu
 3600          * allocator is available.
 3601          *
 3602          * boot_pagesets are used also for bootstrapping offline
 3603          * cpus if the system is already booted because the pagesets
 3604          * are needed to initialize allocators on a specific cpu too.
 3605          * F.e. the percpu allocator needs the page allocator which
 3606          * needs the percpu allocator in order to allocate its pagesets
 3607          * (a chicken-egg dilemma).
 3608          */
 3609         for_each_possible_cpu(cpu) {
 3610                 setup_pageset(&per_cpu(boot_pageset, cpu), 0);
 3611 
 3612 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
 3613                 /*
 3614                  * We now know the "local memory node" for each node--
 3615                  * i.e., the node of the first zone in the generic zonelist.
 3616                  * Set up numa_mem percpu variable for on-line cpus.  During
 3617                  * boot, only the boot cpu should be on-line;  we'll init the
 3618                  * secondary cpus' numa_mem as they come on-line.  During
 3619                  * node/memory hotplug, we'll fixup all on-line cpus.
 3620                  */
 3621                 if (cpu_online(cpu))
 3622                         set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
 3623 #endif
 3624         }
 3625 
 3626         return 0;
 3627 }
 3628 
 3629 /*
 3630  * Called with zonelists_mutex held always
 3631  * unless system_state == SYSTEM_BOOTING.
 3632  */
 3633 void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone)
 3634 {
 3635         set_zonelist_order();
 3636 
 3637         if (system_state == SYSTEM_BOOTING) {
 3638                 __build_all_zonelists(NULL);
 3639                 mminit_verify_zonelist();
 3640                 cpuset_init_current_mems_allowed();
 3641         } else {
 3642                 /* we have to stop all cpus to guarantee there is no user
 3643                    of zonelist */
 3644 #ifdef CONFIG_MEMORY_HOTPLUG
 3645                 if (zone)
 3646                         setup_zone_pageset(zone);
 3647 #endif
 3648                 stop_machine(__build_all_zonelists, pgdat, NULL);
 3649                 /* cpuset refresh routine should be here */
 3650         }
 3651         vm_total_pages = nr_free_pagecache_pages();
 3652         /*
 3653          * Disable grouping by mobility if the number of pages in the
 3654          * system is too low to allow the mechanism to work. It would be
 3655          * more accurate, but expensive to check per-zone. This check is
 3656          * made on memory-hotadd so a system can start with mobility
 3657          * disabled and enable it later
 3658          */
 3659         if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
 3660                 page_group_by_mobility_disabled = 1;
 3661         else
 3662                 page_group_by_mobility_disabled = 0;
 3663 
 3664         printk("Built %i zonelists in %s order, mobility grouping %s.  "
 3665                 "Total pages: %ld\n",
 3666                         nr_online_nodes,
 3667                         zonelist_order_name[current_zonelist_order],
 3668                         page_group_by_mobility_disabled ? "off" : "on",
 3669                         vm_total_pages);
 3670 #ifdef CONFIG_NUMA
 3671         printk("Policy zone: %s\n", zone_names[policy_zone]);
 3672 #endif
 3673 }
 3674 
 3675 /*
 3676  * Helper functions to size the waitqueue hash table.
 3677  * Essentially these want to choose hash table sizes sufficiently
 3678  * large so that collisions trying to wait on pages are rare.
 3679  * But in fact, the number of active page waitqueues on typical
 3680  * systems is ridiculously low, less than 200. So this is even
 3681  * conservative, even though it seems large.
 3682  *
 3683  * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
 3684  * waitqueues, i.e. the size of the waitq table given the number of pages.
 3685  */
 3686 #define PAGES_PER_WAITQUEUE     256
 3687 
 3688 #ifndef CONFIG_MEMORY_HOTPLUG
 3689 static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
 3690 {
 3691         unsigned long size = 1;
 3692 
 3693         pages /= PAGES_PER_WAITQUEUE;
 3694 
 3695         while (size < pages)
 3696                 size <<= 1;
 3697 
 3698         /*
 3699          * Once we have dozens or even hundreds of threads sleeping
 3700          * on IO we've got bigger problems than wait queue collision.
 3701          * Limit the size of the wait table to a reasonable size.
 3702          */
 3703         size = min(size, 4096UL);
 3704 
 3705         return max(size, 4UL);
 3706 }
 3707 #else
 3708 /*
 3709  * A zone's size might be changed by hot-add, so it is not possible to determine
 3710  * a suitable size for its wait_table.  So we use the maximum size now.
 3711  *
 3712  * The max wait table size = 4096 x sizeof(wait_queue_head_t).   ie:
 3713  *
 3714  *    i386 (preemption config)    : 4096 x 16 = 64Kbyte.
 3715  *    ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
 3716  *    ia64, x86-64 (preemption)   : 4096 x 24 = 96Kbyte.
 3717  *
 3718  * The maximum entries are prepared when a zone's memory is (512K + 256) pages
 3719  * or more by the traditional way. (See above).  It equals:
 3720  *
 3721  *    i386, x86-64, powerpc(4K page size) : =  ( 2G + 1M)byte.
 3722  *    ia64(16K page size)                 : =  ( 8G + 4M)byte.
 3723  *    powerpc (64K page size)             : =  (32G +16M)byte.
 3724  */
 3725 static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
 3726 {
 3727         return 4096UL;
 3728 }
 3729 #endif
 3730 
 3731 /*
 3732  * This is an integer logarithm so that shifts can be used later
 3733  * to extract the more random high bits from the multiplicative
 3734  * hash function before the remainder is taken.
 3735  */
 3736 static inline unsigned long wait_table_bits(unsigned long size)
 3737 {
 3738         return ffz(~size);
 3739 }
 3740 
 3741 #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
 3742 
 3743 /*
 3744  * Check if a pageblock contains reserved pages
 3745  */
 3746 static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
 3747 {
 3748         unsigned long pfn;
 3749 
 3750         for (pfn = start_pfn; pfn < end_pfn; pfn++) {
 3751                 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
 3752                         return 1;
 3753         }
 3754         return 0;
 3755 }
 3756 
 3757 /*
 3758  * Mark a number of pageblocks as MIGRATE_RESERVE. The number
 3759  * of blocks reserved is based on min_wmark_pages(zone). The memory within
 3760  * the reserve will tend to store contiguous free pages. Setting min_free_kbytes
 3761  * higher will lead to a bigger reserve which will get freed as contiguous
 3762  * blocks as reclaim kicks in
 3763  */
 3764 static void setup_zone_migrate_reserve(struct zone *zone)
 3765 {
 3766         unsigned long start_pfn, pfn, end_pfn, block_end_pfn;
 3767         struct page *page;
 3768         unsigned long block_migratetype;
 3769         int reserve;
 3770 
 3771         /*
 3772          * Get the start pfn, end pfn and the number of blocks to reserve
 3773          * We have to be careful to be aligned to pageblock_nr_pages to
 3774          * make sure that we always check pfn_valid for the first page in
 3775          * the block.
 3776          */
 3777         start_pfn = zone->zone_start_pfn;
 3778         end_pfn = start_pfn + zone->spanned_pages;
 3779         start_pfn = roundup(start_pfn, pageblock_nr_pages);
 3780         reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
 3781                                                         pageblock_order;
 3782 
 3783         /*
 3784          * Reserve blocks are generally in place to help high-order atomic
 3785          * allocations that are short-lived. A min_free_kbytes value that
 3786          * would result in more than 2 reserve blocks for atomic allocations
 3787          * is assumed to be in place to help anti-fragmentation for the
 3788          * future allocation of hugepages at runtime.
 3789          */
 3790         reserve = min(2, reserve);
 3791 
 3792         for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
 3793                 if (!pfn_valid(pfn))
 3794                         continue;
 3795                 page = pfn_to_page(pfn);
 3796 
 3797                 /* Watch out for overlapping nodes */
 3798                 if (page_to_nid(page) != zone_to_nid(zone))
 3799                         continue;
 3800 
 3801                 block_migratetype = get_pageblock_migratetype(page);
 3802 
 3803                 /* Only test what is necessary when the reserves are not met */
 3804                 if (reserve > 0) {
 3805                         /*
 3806                          * Blocks with reserved pages will never free, skip
 3807                          * them.
 3808                          */
 3809                         block_end_pfn = min(pfn + pageblock_nr_pages, end_pfn);
 3810                         if (pageblock_is_reserved(pfn, block_end_pfn))
 3811                                 continue;
 3812 
 3813                         /* If this block is reserved, account for it */
 3814                         if (block_migratetype == MIGRATE_RESERVE) {
 3815                                 reserve--;
 3816                                 continue;
 3817                         }
 3818 
 3819                         /* Suitable for reserving if this block is movable */
 3820                         if (block_migratetype == MIGRATE_MOVABLE) {
 3821                                 set_pageblock_migratetype(page,
 3822                                                         MIGRATE_RESERVE);
 3823                                 move_freepages_block(zone, page,
 3824                                                         MIGRATE_RESERVE);
 3825                                 reserve--;
 3826                                 continue;
 3827                         }
 3828                 }
 3829 
 3830                 /*
 3831                  * If the reserve is met and this is a previous reserved block,
 3832                  * take it back
 3833                  */
 3834                 if (block_migratetype == MIGRATE_RESERVE) {
 3835                         set_pageblock_migratetype(page, MIGRATE_MOVABLE);
 3836                         move_freepages_block(zone, page, MIGRATE_MOVABLE);
 3837                 }
 3838         }
 3839 }
 3840 
 3841 /*
 3842  * Initially all pages are reserved - free ones are freed
 3843  * up by free_all_bootmem() once the early boot process is
 3844  * done. Non-atomic initialization, single-pass.
 3845  */
 3846 void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
 3847                 unsigned long start_pfn, enum memmap_context context)
 3848 {
 3849         struct page *page;
 3850         unsigned long end_pfn = start_pfn + size;
 3851         unsigned long pfn;
 3852         struct zone *z;
 3853 
 3854         if (highest_memmap_pfn < end_pfn - 1)
 3855                 highest_memmap_pfn = end_pfn - 1;
 3856 
 3857         z = &NODE_DATA(nid)->node_zones[zone];
 3858         for (pfn = start_pfn; pfn < end_pfn; pfn++) {
 3859                 /*
 3860                  * There can be holes in boot-time mem_map[]s
 3861                  * handed to this function.  They do not
 3862                  * exist on hotplugged memory.
 3863                  */
 3864                 if (context == MEMMAP_EARLY) {
 3865                         if (!early_pfn_valid(pfn))
 3866                                 continue;
 3867                         if (!early_pfn_in_nid(pfn, nid))
 3868                                 continue;
 3869                 }
 3870                 page = pfn_to_page(pfn);
 3871                 set_page_links(page, zone, nid, pfn);
 3872                 mminit_verify_page_links(page, zone, nid, pfn);
 3873                 init_page_count(page);
 3874                 reset_page_mapcount(page);
 3875                 reset_page_last_nid(page);
 3876                 SetPageReserved(page);
 3877                 /*
 3878                  * Mark the block movable so that blocks are reserved for
 3879                  * movable at startup. This will force kernel allocations
 3880                  * to reserve their blocks rather than leaking throughout
 3881                  * the address space during boot when many long-lived
 3882                  * kernel allocations are made. Later some blocks near
 3883                  * the start are marked MIGRATE_RESERVE by
 3884                  * setup_zone_migrate_reserve()
 3885                  *
 3886                  * bitmap is created for zone's valid pfn range. but memmap
 3887                  * can be created for invalid pages (for alignment)
 3888                  * check here not to call set_pageblock_migratetype() against
 3889                  * pfn out of zone.
 3890                  */
 3891                 if ((z->zone_start_pfn <= pfn)
 3892                     && (pfn < z->zone_start_pfn + z->spanned_pages)
 3893                     && !(pfn & (pageblock_nr_pages - 1)))
 3894                         set_pageblock_migratetype(page, MIGRATE_MOVABLE);
 3895 
 3896                 INIT_LIST_HEAD(&page->lru);
 3897 #ifdef WANT_PAGE_VIRTUAL
 3898                 /* The shift won't overflow because ZONE_NORMAL is below 4G. */
 3899                 if (!is_highmem_idx(zone))
 3900                         set_page_address(page, __va(pfn << PAGE_SHIFT));
 3901 #endif
 3902         }
 3903 }
 3904 
 3905 static void __meminit zone_init_free_lists(struct zone *zone)
 3906 {
 3907         int order, t;
 3908         for_each_migratetype_order(order, t) {
 3909                 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
 3910                 zone->free_area[order].nr_free = 0;
 3911         }
 3912 }
 3913 
 3914 #ifndef __HAVE_ARCH_MEMMAP_INIT
 3915 #define memmap_init(size, nid, zone, start_pfn) \
 3916         memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
 3917 #endif
 3918 
 3919 static int __meminit zone_batchsize(struct zone *zone)
 3920 {
 3921 #ifdef CONFIG_MMU
 3922         int batch;
 3923 
 3924         /*
 3925          * The per-cpu-pages pools are set to around 1000th of the
 3926          * size of the zone.  But no more than 1/2 of a meg.
 3927          *
 3928          * OK, so we don't know how big the cache is.  So guess.
 3929          */
 3930         batch = zone->present_pages / 1024;
 3931         if (batch * PAGE_SIZE > 512 * 1024)
 3932                 batch = (512 * 1024) / PAGE_SIZE;
 3933         batch /= 4;             /* We effectively *= 4 below */
 3934         if (batch < 1)
 3935                 batch = 1;
 3936 
 3937         /*
 3938          * Clamp the batch to a 2^n - 1 value. Having a power
 3939          * of 2 value was found to be more likely to have
 3940          * suboptimal cache aliasing properties in some cases.
 3941          *
 3942          * For example if 2 tasks are alternately allocating
 3943          * batches of pages, one task can end up with a lot
 3944          * of pages of one half of the possible page colors
 3945          * and the other with pages of the other colors.
 3946          */
 3947         batch = rounddown_pow_of_two(batch + batch/2) - 1;
 3948 
 3949         return batch;
 3950 
 3951 #else
 3952         /* The deferral and batching of frees should be suppressed under NOMMU
 3953          * conditions.
 3954          *
 3955          * The problem is that NOMMU needs to be able to allocate large chunks
 3956          * of contiguous memory as there's no hardware page translation to
 3957          * assemble apparent contiguous memory from discontiguous pages.
 3958          *
 3959          * Queueing large contiguous runs of pages for batching, however,
 3960          * causes the pages to actually be freed in smaller chunks.  As there
 3961          * can be a significant delay between the individual batches being
 3962          * recycled, this leads to the once large chunks of space being
 3963          * fragmented and becoming unavailable for high-order allocations.
 3964          */
 3965         return 0;
 3966 #endif
 3967 }
 3968 
 3969 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
 3970 {
 3971         struct per_cpu_pages *pcp;
 3972         int migratetype;
 3973 
 3974         memset(p, 0, sizeof(*p));
 3975 
 3976         pcp = &p->pcp;
 3977         pcp->count = 0;
 3978         pcp->high = 6 * batch;
 3979         pcp->batch = max(1UL, 1 * batch);
 3980         for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
 3981                 INIT_LIST_HEAD(&pcp->lists[migratetype]);
 3982 }
 3983 
 3984 /*
 3985  * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist
 3986  * to the value high for the pageset p.
 3987  */
 3988 
 3989 static void setup_pagelist_highmark(struct per_cpu_pageset *p,
 3990                                 unsigned long high)
 3991 {
 3992         struct per_cpu_pages *pcp;
 3993 
 3994         pcp = &p->pcp;
 3995         pcp->high = high;
 3996         pcp->batch = max(1UL, high/4);
 3997         if ((high/4) > (PAGE_SHIFT * 8))
 3998                 pcp->batch = PAGE_SHIFT * 8;
 3999 }
 4000 
 4001 static void __meminit setup_zone_pageset(struct zone *zone)
 4002 {
 4003         int cpu;
 4004 
 4005         zone->pageset = alloc_percpu(struct per_cpu_pageset);
 4006 
 4007         for_each_possible_cpu(cpu) {
 4008                 struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
 4009 
 4010                 setup_pageset(pcp, zone_batchsize(zone));
 4011 
 4012                 if (percpu_pagelist_fraction)
 4013                         setup_pagelist_highmark(pcp,
 4014                                 (zone->present_pages /
 4015                                         percpu_pagelist_fraction));
 4016         }
 4017 }
 4018 
 4019 /*
 4020  * Allocate per cpu pagesets and initialize them.
 4021  * Before this call only boot pagesets were available.
 4022  */
 4023 void __init setup_per_cpu_pageset(void)
 4024 {
 4025         struct zone *zone;
 4026 
 4027         for_each_populated_zone(zone)
 4028                 setup_zone_pageset(zone);
 4029 }
 4030 
 4031 static noinline __init_refok
 4032 int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
 4033 {
 4034         int i;
 4035         struct pglist_data *pgdat = zone->zone_pgdat;
 4036         size_t alloc_size;
 4037 
 4038         /*
 4039          * The per-page waitqueue mechanism uses hashed waitqueues
 4040          * per zone.
 4041          */
 4042         zone->wait_table_hash_nr_entries =
 4043                  wait_table_hash_nr_entries(zone_size_pages);
 4044         zone->wait_table_bits =
 4045                 wait_table_bits(zone->wait_table_hash_nr_entries);
 4046         alloc_size = zone->wait_table_hash_nr_entries
 4047                                         * sizeof(wait_queue_head_t);
 4048 
 4049         if (!slab_is_available()) {
 4050                 zone->wait_table = (wait_queue_head_t *)
 4051                         alloc_bootmem_node_nopanic(pgdat, alloc_size);
 4052         } else {
 4053                 /*
 4054                  * This case means that a zone whose size was 0 gets new memory
 4055                  * via memory hot-add.
 4056                  * But it may be the case that a new node was hot-added.  In
 4057                  * this case vmalloc() will not be able to use this new node's
 4058                  * memory - this wait_table must be initialized to use this new
 4059                  * node itself as well.
 4060                  * To use this new node's memory, further consideration will be
 4061                  * necessary.
 4062                  */
 4063                 zone->wait_table = vmalloc(alloc_size);
 4064         }
 4065         if (!zone->wait_table)
 4066                 return -ENOMEM;
 4067 
 4068         for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
 4069                 init_waitqueue_head(zone->wait_table + i);
 4070 
 4071         return 0;
 4072 }
 4073 
 4074 static __meminit void zone_pcp_init(struct zone *zone)
 4075 {
 4076         /*
 4077          * per cpu subsystem is not up at this point. The following code
 4078          * relies on the ability of the linker to provide the
 4079          * offset of a (static) per cpu variable into the per cpu area.
 4080          */
 4081         zone->pageset = &boot_pageset;
 4082 
 4083         if (zone->present_pages)
 4084                 printk(KERN_DEBUG "  %s zone: %lu pages, LIFO batch:%u\n",
 4085                         zone->name, zone->present_pages,
 4086                                          zone_batchsize(zone));
 4087 }
 4088 
 4089 int __meminit init_currently_empty_zone(struct zone *zone,
 4090                                         unsigned long zone_start_pfn,
 4091                                         unsigned long size,
 4092                                         enum memmap_context context)
 4093 {
 4094         struct pglist_data *pgdat = zone->zone_pgdat;
 4095         int ret;
 4096         ret = zone_wait_table_init(zone, size);
 4097         if (ret)
 4098                 return ret;
 4099         pgdat->nr_zones = zone_idx(zone) + 1;
 4100 
 4101         zone->zone_start_pfn = zone_start_pfn;
 4102 
 4103         mminit_dprintk(MMINIT_TRACE, "memmap_init",
 4104                         "Initialising map node %d zone %lu pfns %lu -> %lu\n",
 4105                         pgdat->node_id,
 4106                         (unsigned long)zone_idx(zone),
 4107                         zone_start_pfn, (zone_start_pfn + size));
 4108 
 4109         zone_init_free_lists(zone);
 4110 
 4111         return 0;
 4112 }
 4113 
 4114 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
 4115 #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
 4116 /*
 4117  * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
 4118  * Architectures may implement their own version but if add_active_range()
 4119  * was used and there are no special requirements, this is a convenient
 4120  * alternative
 4121  */
 4122 int __meminit __early_pfn_to_nid(unsigned long pfn)
 4123 {
 4124         unsigned long start_pfn, end_pfn;
 4125         int i, nid;
 4126 
 4127         for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
 4128                 if (start_pfn <= pfn && pfn < end_pfn)
 4129                         return nid;
 4130         /* This is a memory hole */
 4131         return -1;
 4132 }
 4133 #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
 4134 
 4135 int __meminit early_pfn_to_nid(unsigned long pfn)
 4136 {
 4137         int nid;
 4138 
 4139         nid = __early_pfn_to_nid(pfn);
 4140         if (nid >= 0)
 4141                 return nid;
 4142         /* just returns 0 */
 4143         return 0;
 4144 }
 4145 
 4146 #ifdef CONFIG_NODES_SPAN_OTHER_NODES
 4147 bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
 4148 {
 4149         int nid;
 4150 
 4151         nid = __early_pfn_to_nid(pfn);
 4152         if (nid >= 0 && nid != node)
 4153                 return false;
 4154         return true;
 4155 }
 4156 #endif
 4157 
 4158 /**
 4159  * free_bootmem_with_active_regions - Call free_bootmem_node for each active range
 4160  * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
 4161  * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node
 4162  *
 4163  * If an architecture guarantees that all ranges registered with
 4164  * add_active_ranges() contain no holes and may be freed, this
 4165  * this function may be used instead of calling free_bootmem() manually.
 4166  */
 4167 void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
 4168 {
 4169         unsigned long start_pfn, end_pfn;
 4170         int i, this_nid;
 4171 
 4172         for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) {
 4173                 start_pfn = min(start_pfn, max_low_pfn);
 4174                 end_pfn = min(end_pfn, max_low_pfn);
 4175 
 4176                 if (start_pfn < end_pfn)
 4177                         free_bootmem_node(NODE_DATA(this_nid),
 4178                                           PFN_PHYS(start_pfn),
 4179                                           (end_pfn - start_pfn) << PAGE_SHIFT);
 4180         }
 4181 }
 4182 
 4183 /**
 4184  * sparse_memory_present_with_active_regions - Call memory_present for each active range
 4185  * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
 4186  *
 4187  * If an architecture guarantees that all ranges registered with
 4188  * add_active_ranges() contain no holes and may be freed, this
 4189  * function may be used instead of calling memory_present() manually.
 4190  */
 4191 void __init sparse_memory_present_with_active_regions(int nid)
 4192 {
 4193         unsigned long start_pfn, end_pfn;
 4194         int i, this_nid;
 4195 
 4196         for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid)
 4197                 memory_present(this_nid, start_pfn, end_pfn);
 4198 }
 4199 
 4200 /**
 4201  * get_pfn_range_for_nid - Return the start and end page frames for a node
 4202  * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
 4203  * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
 4204  * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
 4205  *
 4206  * It returns the start and end page frame of a node based on information
 4207  * provided by an arch calling add_active_range(). If called for a node
 4208  * with no available memory, a warning is printed and the start and end
 4209  * PFNs will be 0.
 4210  */
 4211 void __meminit get_pfn_range_for_nid(unsigned int nid,
 4212                         unsigned long *start_pfn, unsigned long *end_pfn)
 4213 {
 4214         unsigned long this_start_pfn, this_end_pfn;
 4215         int i;
 4216 
 4217         *start_pfn = -1UL;
 4218         *end_pfn = 0;
 4219 
 4220         for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
 4221                 *start_pfn = min(*start_pfn, this_start_pfn);
 4222                 *end_pfn = max(*end_pfn, this_end_pfn);
 4223         }
 4224 
 4225         if (*start_pfn == -1UL)
 4226                 *start_pfn = 0;
 4227 }
 4228 
 4229 /*
 4230  * This finds a zone that can be used for ZONE_MOVABLE pages. The
 4231  * assumption is made that zones within a node are ordered in monotonic
 4232  * increasing memory addresses so that the "highest" populated zone is used
 4233  */
 4234 static void __init find_usable_zone_for_movable(void)
 4235 {
 4236         int zone_index;
 4237         for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
 4238                 if (zone_index == ZONE_MOVABLE)
 4239                         continue;
 4240 
 4241                 if (arch_zone_highest_possible_pfn[zone_index] >
 4242                                 arch_zone_lowest_possible_pfn[zone_index])
 4243                         break;
 4244         }
 4245 
 4246         VM_BUG_ON(zone_index == -1);
 4247         movable_zone = zone_index;
 4248 }
 4249 
 4250 /*
 4251  * The zone ranges provided by the architecture do not include ZONE_MOVABLE
 4252  * because it is sized independent of architecture. Unlike the other zones,
 4253  * the starting point for ZONE_MOVABLE is not fixed. It may be different
 4254  * in each node depending on the size of each node and how evenly kernelcore
 4255  * is distributed. This helper function adjusts the zone ranges
 4256  * provided by the architecture for a given node by using the end of the
 4257  * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
 4258  * zones within a node are in order of monotonic increases memory addresses
 4259  */
 4260 static void __meminit adjust_zone_range_for_zone_movable(int nid,
 4261                                         unsigned long zone_type,
 4262                                         unsigned long node_start_pfn,
 4263                                         unsigned long node_end_pfn,
 4264                                         unsigned long *zone_start_pfn,
 4265                                         unsigned long *zone_end_pfn)
 4266 {
 4267         /* Only adjust if ZONE_MOVABLE is on this node */
 4268         if (zone_movable_pfn[nid]) {
 4269                 /* Size ZONE_MOVABLE */
 4270                 if (zone_type == ZONE_MOVABLE) {
 4271                         *zone_start_pfn = zone_movable_pfn[nid];
 4272                         *zone_end_pfn = min(node_end_pfn,
 4273                                 arch_zone_highest_possible_pfn[movable_zone]);
 4274 
 4275                 /* Adjust for ZONE_MOVABLE starting within this range */
 4276                 } else if (*zone_start_pfn < zone_movable_pfn[nid] &&
 4277                                 *zone_end_pfn > zone_movable_pfn[nid]) {
 4278                         *zone_end_pfn = zone_movable_pfn[nid];
 4279 
 4280                 /* Check if this whole range is within ZONE_MOVABLE */
 4281                 } else if (*zone_start_pfn >= zone_movable_pfn[nid])
 4282                         *zone_start_pfn = *zone_end_pfn;
 4283         }
 4284 }
 4285 
 4286 /*
 4287  * Return the number of pages a zone spans in a node, including holes
 4288  * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
 4289  */
 4290 static unsigned long __meminit zone_spanned_pages_in_node(int nid,
 4291                                         unsigned long zone_type,
 4292                                         unsigned long *ignored)
 4293 {
 4294         unsigned long node_start_pfn, node_end_pfn;
 4295         unsigned long zone_start_pfn, zone_end_pfn;
 4296 
 4297         /* Get the start and end of the node and zone */
 4298         get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
 4299         zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
 4300         zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
 4301         adjust_zone_range_for_zone_movable(nid, zone_type,
 4302                                 node_start_pfn, node_end_pfn,
 4303                                 &zone_start_pfn, &zone_end_pfn);
 4304 
 4305         /* Check that this node has pages within the zone's required range */
 4306         if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
 4307                 return 0;
 4308 
 4309         /* Move the zone boundaries inside the node if necessary */
 4310         zone_end_pfn = min(zone_end_pfn, node_end_pfn);
 4311         zone_start_pfn = max(zone_start_pfn, node_start_pfn);
 4312 
 4313         /* Return the spanned pages */
 4314         return zone_end_pfn - zone_start_pfn;
 4315 }
 4316 
 4317 /*
 4318  * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
 4319  * then all holes in the requested range will be accounted for.
 4320  */
 4321 unsigned long __meminit __absent_pages_in_range(int nid,
 4322                                 unsigned long range_start_pfn,
 4323                                 unsigned long range_end_pfn)
 4324 {
 4325         unsigned long nr_absent = range_end_pfn - range_start_pfn;
 4326         unsigned long start_pfn, end_pfn;
 4327         int i;
 4328 
 4329         for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
 4330                 start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
 4331                 end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
 4332                 nr_absent -= end_pfn - start_pfn;
 4333         }
 4334         return nr_absent;
 4335 }
 4336 
 4337 /**
 4338  * absent_pages_in_range - Return number of page frames in holes within a range
 4339  * @start_pfn: The start PFN to start searching for holes
 4340  * @end_pfn: The end PFN to stop searching for holes
 4341  *
 4342  * It returns the number of pages frames in memory holes within a range.
 4343  */
 4344 unsigned long __init absent_pages_in_range(unsigned long start_pfn,
 4345                                                         unsigned long end_pfn)
 4346 {
 4347         return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
 4348 }
 4349 
 4350 /* Return the number of page frames in holes in a zone on a node */
 4351 static unsigned long __meminit zone_absent_pages_in_node(int nid,
 4352                                         unsigned long zone_type,
 4353                                         unsigned long *ignored)
 4354 {
 4355         unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
 4356         unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
 4357         unsigned long node_start_pfn, node_end_pfn;
 4358         unsigned long zone_start_pfn, zone_end_pfn;
 4359 
 4360         get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
 4361         zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
 4362         zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
 4363 
 4364         adjust_zone_range_for_zone_movable(nid, zone_type,
 4365                         node_start_pfn, node_end_pfn,
 4366                         &zone_start_pfn, &zone_end_pfn);
 4367         return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
 4368 }
 4369 
 4370 #else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
 4371 static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
 4372                                         unsigned long zone_type,
 4373                                         unsigned long *zones_size)
 4374 {
 4375         return zones_size[zone_type];
 4376 }
 4377 
 4378 static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
 4379                                                 unsigned long zone_type,
 4380                                                 unsigned long *zholes_size)
 4381 {
 4382         if (!zholes_size)
 4383                 return 0;
 4384 
 4385         return zholes_size[zone_type];
 4386 }
 4387 
 4388 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
 4389 
 4390 static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
 4391                 unsigned long *zones_size, unsigned long *zholes_size)
 4392 {
 4393         unsigned long realtotalpages, totalpages = 0;
 4394         enum zone_type i;
 4395 
 4396         for (i = 0; i < MAX_NR_ZONES; i++)
 4397                 totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
 4398                                                                 zones_size);
 4399         pgdat->node_spanned_pages = totalpages;
 4400 
 4401         realtotalpages = totalpages;
 4402         for (i = 0; i < MAX_NR_ZONES; i++)
 4403                 realtotalpages -=
 4404                         zone_absent_pages_in_node(pgdat->node_id, i,
 4405                                                                 zholes_size);
 4406         pgdat->node_present_pages = realtotalpages;
 4407         printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
 4408                                                         realtotalpages);
 4409 }
 4410 
 4411 #ifndef CONFIG_SPARSEMEM
 4412 /*
 4413  * Calculate the size of the zone->blockflags rounded to an unsigned long
 4414  * Start by making sure zonesize is a multiple of pageblock_order by rounding
 4415  * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
 4416  * round what is now in bits to nearest long in bits, then return it in
 4417  * bytes.
 4418  */
 4419 static unsigned long __init usemap_size(unsigned long zonesize)
 4420 {
 4421         unsigned long usemapsize;
 4422 
 4423         usemapsize = roundup(zonesize, pageblock_nr_pages);
 4424         usemapsize = usemapsize >> pageblock_order;
 4425         usemapsize *= NR_PAGEBLOCK_BITS;
 4426         usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
 4427 
 4428         return usemapsize / 8;
 4429 }
 4430 
 4431 static void __init setup_usemap(struct pglist_data *pgdat,
 4432                                 struct zone *zone, unsigned long zonesize)
 4433 {
 4434         unsigned long usemapsize = usemap_size(zonesize);
 4435         zone->pageblock_flags = NULL;
 4436         if (usemapsize)
 4437                 zone->pageblock_flags = alloc_bootmem_node_nopanic(pgdat,
 4438                                                                    usemapsize);
 4439 }
 4440 #else
 4441 static inline void setup_usemap(struct pglist_data *pgdat,
 4442                                 struct zone *zone, unsigned long zonesize) {}
 4443 #endif /* CONFIG_SPARSEMEM */
 4444 
 4445 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
 4446 
 4447 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
 4448 void __init set_pageblock_order(void)
 4449 {
 4450         unsigned int order;
 4451 
 4452         /* Check that pageblock_nr_pages has not already been setup */
 4453         if (pageblock_order)
 4454                 return;
 4455 
 4456         if (HPAGE_SHIFT > PAGE_SHIFT)
 4457                 order = HUGETLB_PAGE_ORDER;
 4458         else
 4459                 order = MAX_ORDER - 1;
 4460 
 4461         /*
 4462          * Assume the largest contiguous order of interest is a huge page.
 4463          * This value may be variable depending on boot parameters on IA64 and
 4464          * powerpc.
 4465          */
 4466         pageblock_order = order;
 4467 }
 4468 #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
 4469 
 4470 /*
 4471  * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
 4472  * is unused as pageblock_order is set at compile-time. See
 4473  * include/linux/pageblock-flags.h for the values of pageblock_order based on
 4474  * the kernel config
 4475  */
 4476 void __init set_pageblock_order(void)
 4477 {
 4478 }
 4479 
 4480 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
 4481 
 4482 static unsigned long __paginginit calc_memmap_size(unsigned long spanned_pages,
 4483                                                    unsigned long present_pages)
 4484 {
 4485         unsigned long pages = spanned_pages;
 4486 
 4487         /*
 4488          * Provide a more accurate estimation if there are holes within
 4489          * the zone and SPARSEMEM is in use. If there are holes within the
 4490          * zone, each populated memory region may cost us one or two extra
 4491          * memmap pages due to alignment because memmap pages for each
 4492          * populated regions may not naturally algined on page boundary.
 4493          * So the (present_pages >> 4) heuristic is a tradeoff for that.
 4494          */
 4495         if (spanned_pages > present_pages + (present_pages >> 4) &&
 4496             IS_ENABLED(CONFIG_SPARSEMEM))
 4497                 pages = present_pages;
 4498 
 4499         return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
 4500 }
 4501 
 4502 /*
 4503  * Set up the zone data structures:
 4504  *   - mark all pages reserved
 4505  *   - mark all memory queues empty
 4506  *   - clear the memory bitmaps
 4507  *
 4508  * NOTE: pgdat should get zeroed by caller.
 4509  */
 4510 static void __paginginit free_area_init_core(struct pglist_data *pgdat,
 4511                 unsigned long *zones_size, unsigned long *zholes_size)
 4512 {
 4513         enum zone_type j;
 4514         int nid = pgdat->node_id;
 4515         unsigned long zone_start_pfn = pgdat->node_start_pfn;
 4516         int ret;
 4517 
 4518         pgdat_resize_init(pgdat);
 4519 #ifdef CONFIG_NUMA_BALANCING
 4520         spin_lock_init(&pgdat->numabalancing_migrate_lock);
 4521         pgdat->numabalancing_migrate_nr_pages = 0;
 4522         pgdat->numabalancing_migrate_next_window = jiffies;
 4523 #endif
 4524         init_waitqueue_head(&pgdat->kswapd_wait);
 4525         init_waitqueue_head(&pgdat->pfmemalloc_wait);
 4526         pgdat_page_cgroup_init(pgdat);
 4527 
 4528         for (j = 0; j < MAX_NR_ZONES; j++) {
 4529                 struct zone *zone = pgdat->node_zones + j;
 4530                 unsigned long size, realsize, freesize, memmap_pages;
 4531 
 4532                 size = zone_spanned_pages_in_node(nid, j, zones_size);
 4533                 realsize = freesize = size - zone_absent_pages_in_node(nid, j,
 4534                                                                 zholes_size);
 4535 
 4536                 /*
 4537                  * Adjust freesize so that it accounts for how much memory
 4538                  * is used by this zone for memmap. This affects the watermark
 4539                  * and per-cpu initialisations
 4540                  */
 4541                 memmap_pages = calc_memmap_size(size, realsize);
 4542                 if (freesize >= memmap_pages) {
 4543                         freesize -= memmap_pages;
 4544                         if (memmap_pages)
 4545                                 printk(KERN_DEBUG
 4546                                        "  %s zone: %lu pages used for memmap\n",
 4547                                        zone_names[j], memmap_pages);
 4548                 } else
 4549                         printk(KERN_WARNING
 4550                                 "  %s zone: %lu pages exceeds freesize %lu\n",
 4551                                 zone_names[j], memmap_pages, freesize);
 4552 
 4553                 /* Account for reserved pages */
 4554                 if (j == 0 && freesize > dma_reserve) {
 4555                         freesize -= dma_reserve;
 4556                         printk(KERN_DEBUG "  %s zone: %lu pages reserved\n",
 4557                                         zone_names[0], dma_reserve);
 4558                 }
 4559 
 4560                 if (!is_highmem_idx(j))
 4561                         nr_kernel_pages += freesize;
 4562                 /* Charge for highmem memmap if there are enough kernel pages */
 4563                 else if (nr_kernel_pages > memmap_pages * 2)
 4564                         nr_kernel_pages -= memmap_pages;
 4565                 nr_all_pages += freesize;
 4566 
 4567                 zone->spanned_pages = size;
 4568                 zone->present_pages = freesize;
 4569                 /*
 4570                  * Set an approximate value for lowmem here, it will be adjusted
 4571                  * when the bootmem allocator frees pages into the buddy system.
 4572                  * And all highmem pages will be managed by the buddy system.
 4573                  */
 4574                 zone->managed_pages = is_highmem_idx(j) ? realsize : freesize;
 4575 #ifdef CONFIG_NUMA
 4576                 zone->node = nid;
 4577                 zone->min_unmapped_pages = (freesize*sysctl_min_unmapped_ratio)
 4578                                                 / 100;
 4579                 zone->min_slab_pages = (freesize * sysctl_min_slab_ratio) / 100;
 4580 #endif
 4581                 zone->name = zone_names[j];
 4582                 spin_lock_init(&zone->lock);
 4583                 spin_lock_init(&zone->lru_lock);
 4584                 zone_seqlock_init(zone);
 4585                 zone->zone_pgdat = pgdat;
 4586 
 4587                 zone_pcp_init(zone);
 4588                 lruvec_init(&zone->lruvec);
 4589                 if (!size)
 4590                         continue;
 4591 
 4592                 set_pageblock_order();
 4593                 setup_usemap(pgdat, zone, size);
 4594                 ret = init_currently_empty_zone(zone, zone_start_pfn,
 4595                                                 size, MEMMAP_EARLY);
 4596                 BUG_ON(ret);
 4597                 memmap_init(size, nid, j, zone_start_pfn);
 4598                 zone_start_pfn += size;
 4599         }
 4600 }
 4601 
 4602 static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
 4603 {
 4604         /* Skip empty nodes */
 4605         if (!pgdat->node_spanned_pages)
 4606                 return;
 4607 
 4608 #ifdef CONFIG_FLAT_NODE_MEM_MAP
 4609         /* ia64 gets its own node_mem_map, before this, without bootmem */
 4610         if (!pgdat->node_mem_map) {
 4611                 unsigned long size, start, end;
 4612                 struct page *map;
 4613 
 4614                 /*
 4615                  * The zone's endpoints aren't required to be MAX_ORDER
 4616                  * aligned but the node_mem_map endpoints must be in order
 4617                  * for the buddy allocator to function correctly.
 4618                  */
 4619                 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
 4620                 end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
 4621                 end = ALIGN(end, MAX_ORDER_NR_PAGES);
 4622                 size =  (end - start) * sizeof(struct page);
 4623                 map = alloc_remap(pgdat->node_id, size);
 4624                 if (!map)
 4625                         map = alloc_bootmem_node_nopanic(pgdat, size);
 4626                 pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
 4627         }
 4628 #ifndef CONFIG_NEED_MULTIPLE_NODES
 4629         /*
 4630          * With no DISCONTIG, the global mem_map is just set as node 0's
 4631          */
 4632         if (pgdat == NODE_DATA(0)) {
 4633                 mem_map = NODE_DATA(0)->node_mem_map;
 4634 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
 4635                 if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
 4636                         mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
 4637 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
 4638         }
 4639 #endif
 4640 #endif /* CONFIG_FLAT_NODE_MEM_MAP */
 4641 }
 4642 
 4643 void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
 4644                 unsigned long node_start_pfn, unsigned long *zholes_size)
 4645 {
 4646         pg_data_t *pgdat = NODE_DATA(nid);
 4647 
 4648         /* pg_data_t should be reset to zero when it's allocated */
 4649         WARN_ON(pgdat->nr_zones || pgdat->classzone_idx);
 4650 
 4651         pgdat->node_id = nid;
 4652         pgdat->node_start_pfn = node_start_pfn;
 4653         init_zone_allows_reclaim(nid);
 4654         calculate_node_totalpages(pgdat, zones_size, zholes_size);
 4655 
 4656         alloc_node_mem_map(pgdat);
 4657 #ifdef CONFIG_FLAT_NODE_MEM_MAP
 4658         printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
 4659                 nid, (unsigned long)pgdat,
 4660                 (unsigned long)pgdat->node_mem_map);
 4661 #endif
 4662 
 4663         free_area_init_core(pgdat, zones_size, zholes_size);
 4664 }
 4665 
 4666 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
 4667 
 4668 #if MAX_NUMNODES > 1
 4669 /*
 4670  * Figure out the number of possible node ids.
 4671  */
 4672 static void __init setup_nr_node_ids(void)
 4673 {
 4674         unsigned int node;
 4675         unsigned int highest = 0;
 4676 
 4677         for_each_node_mask(node, node_possible_map)
 4678                 highest = node;
 4679         nr_node_ids = highest + 1;
 4680 }
 4681 #else
 4682 static inline void setup_nr_node_ids(void)
 4683 {
 4684 }
 4685 #endif
 4686 
 4687 /**
 4688  * node_map_pfn_alignment - determine the maximum internode alignment
 4689  *
 4690  * This function should be called after node map is populated and sorted.
 4691  * It calculates the maximum power of two alignment which can distinguish
 4692  * all the nodes.
 4693  *
 4694  * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
 4695  * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)).  If the
 4696  * nodes are shifted by 256MiB, 256MiB.  Note that if only the last node is
 4697  * shifted, 1GiB is enough and this function will indicate so.
 4698  *
 4699  * This is used to test whether pfn -> nid mapping of the chosen memory
 4700  * model has fine enough granularity to avoid incorrect mapping for the
 4701  * populated node map.
 4702  *
 4703  * Returns the determined alignment in pfn's.  0 if there is no alignment
 4704  * requirement (single node).
 4705  */
 4706 unsigned long __init node_map_pfn_alignment(void)
 4707 {
 4708         unsigned long accl_mask = 0, last_end = 0;
 4709         unsigned long start, end, mask;
 4710         int last_nid = -1;
 4711         int i, nid;
 4712 
 4713         for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
 4714                 if (!start || last_nid < 0 || last_nid == nid) {
 4715                         last_nid = nid;
 4716                         last_end = end;
 4717                         continue;
 4718                 }
 4719 
 4720                 /*
 4721                  * Start with a mask granular enough to pin-point to the
 4722                  * start pfn and tick off bits one-by-one until it becomes
 4723                  * too coarse to separate the current node from the last.
 4724                  */
 4725                 mask = ~((1 << __ffs(start)) - 1);
 4726                 while (mask && last_end <= (start & (mask << 1)))
 4727                         mask <<= 1;
 4728 
 4729                 /* accumulate all internode masks */
 4730                 accl_mask |= mask;
 4731         }
 4732 
 4733         /* convert mask to number of pages */
 4734         return ~accl_mask + 1;
 4735 }
 4736 
 4737 /* Find the lowest pfn for a node */
 4738 static unsigned long __init find_min_pfn_for_node(int nid)
 4739 {
 4740         unsigned long min_pfn = ULONG_MAX;
 4741         unsigned long start_pfn;
 4742         int i;
 4743 
 4744         for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL)
 4745                 min_pfn = min(min_pfn, start_pfn);
 4746 
 4747         if (min_pfn == ULONG_MAX) {
 4748                 printk(KERN_WARNING
 4749                         "Could not find start_pfn for node %d\n", nid);
 4750                 return 0;
 4751         }
 4752 
 4753         return min_pfn;
 4754 }
 4755 
 4756 /**
 4757  * find_min_pfn_with_active_regions - Find the minimum PFN registered
 4758  *
 4759  * It returns the minimum PFN based on information provided via
 4760  * add_active_range().
 4761  */
 4762 unsigned long __init find_min_pfn_with_active_regions(void)
 4763 {
 4764         return find_min_pfn_for_node(MAX_NUMNODES);
 4765 }
 4766 
 4767 /*
 4768  * early_calculate_totalpages()
 4769  * Sum pages in active regions for movable zone.
 4770  * Populate N_MEMORY for calculating usable_nodes.
 4771  */
 4772 static unsigned long __init early_calculate_totalpages(void)
 4773 {
 4774         unsigned long totalpages = 0;
 4775         unsigned long start_pfn, end_pfn;
 4776         int i, nid;
 4777 
 4778         for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
 4779                 unsigned long pages = end_pfn - start_pfn;
 4780 
 4781                 totalpages += pages;
 4782                 if (pages)
 4783                         node_set_state(nid, N_MEMORY);
 4784         }
 4785         return totalpages;
 4786 }
 4787 
 4788 /*
 4789  * Find the PFN the Movable zone begins in each node. Kernel memory
 4790  * is spread evenly between nodes as long as the nodes have enough
 4791  * memory. When they don't, some nodes will have more kernelcore than
 4792  * others
 4793  */
 4794 static void __init find_zone_movable_pfns_for_nodes(void)
 4795 {
 4796         int i, nid;
 4797         unsigned long usable_startpfn;
 4798         unsigned long kernelcore_node, kernelcore_remaining;
 4799         /* save the state before borrow the nodemask */
 4800         nodemask_t saved_node_state = node_states[N_MEMORY];
 4801         unsigned long totalpages = early_calculate_totalpages();
 4802         int usable_nodes = nodes_weight(node_states[N_MEMORY]);
 4803 
 4804         /*
 4805          * If movablecore was specified, calculate what size of
 4806          * kernelcore that corresponds so that memory usable for
 4807          * any allocation type is evenly spread. If both kernelcore
 4808          * and movablecore are specified, then the value of kernelcore
 4809          * will be used for required_kernelcore if it's greater than
 4810          * what movablecore would have allowed.
 4811          */
 4812         if (required_movablecore) {
 4813                 unsigned long corepages;
 4814 
 4815                 /*
 4816                  * Round-up so that ZONE_MOVABLE is at least as large as what
 4817                  * was requested by the user
 4818                  */
 4819                 required_movablecore =
 4820                         roundup(required_movablecore, MAX_ORDER_NR_PAGES);
 4821                 corepages = totalpages - required_movablecore;
 4822 
 4823                 required_kernelcore = max(required_kernelcore, corepages);
 4824         }
 4825 
 4826         /* If kernelcore was not specified, there is no ZONE_MOVABLE */
 4827         if (!required_kernelcore)
 4828                 goto out;
 4829 
 4830         /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
 4831         find_usable_zone_for_movable();
 4832         usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
 4833 
 4834 restart:
 4835         /* Spread kernelcore memory as evenly as possible throughout nodes */
 4836         kernelcore_node = required_kernelcore / usable_nodes;
 4837         for_each_node_state(nid, N_MEMORY) {
 4838                 unsigned long start_pfn, end_pfn;
 4839 
 4840                 /*
 4841                  * Recalculate kernelcore_node if the division per node
 4842                  * now exceeds what is necessary to satisfy the requested
 4843                  * amount of memory for the kernel
 4844                  */
 4845                 if (required_kernelcore < kernelcore_node)
 4846                         kernelcore_node = required_kernelcore / usable_nodes;
 4847 
 4848                 /*
 4849                  * As the map is walked, we track how much memory is usable
 4850                  * by the kernel using kernelcore_remaining. When it is
 4851                  * 0, the rest of the node is usable by ZONE_MOVABLE
 4852                  */
 4853                 kernelcore_remaining = kernelcore_node;
 4854 
 4855                 /* Go through each range of PFNs within this node */
 4856                 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
 4857                         unsigned long size_pages;
 4858 
 4859                         start_pfn = max(start_pfn, zone_movable_pfn[nid]);
 4860                         if (start_pfn >= end_pfn)
 4861                                 continue;
 4862 
 4863                         /* Account for what is only usable for kernelcore */
 4864                         if (start_pfn < usable_startpfn) {
 4865                                 unsigned long kernel_pages;
 4866                                 kernel_pages = min(end_pfn, usable_startpfn)
 4867                                                                 - start_pfn;
 4868 
 4869                                 kernelcore_remaining -= min(kernel_pages,
 4870                                                         kernelcore_remaining);
 4871                                 required_kernelcore -= min(kernel_pages,
 4872                                                         required_kernelcore);
 4873 
 4874                                 /* Continue if range is now fully accounted */
 4875                                 if (end_pfn <= usable_startpfn) {
 4876 
 4877                                         /*
 4878                                          * Push zone_movable_pfn to the end so
 4879                                          * that if we have to rebalance
 4880                                          * kernelcore across nodes, we will
 4881                                          * not double account here
 4882                                          */
 4883                                         zone_movable_pfn[nid] = end_pfn;
 4884                                         continue;
 4885                                 }
 4886                                 start_pfn = usable_startpfn;
 4887                         }
 4888 
 4889                         /*
 4890                          * The usable PFN range for ZONE_MOVABLE is from
 4891                          * start_pfn->end_pfn. Calculate size_pages as the
 4892                          * number of pages used as kernelcore
 4893                          */
 4894                         size_pages = end_pfn - start_pfn;
 4895                         if (size_pages > kernelcore_remaining)
 4896                                 size_pages = kernelcore_remaining;
 4897                         zone_movable_pfn[nid] = start_pfn + size_pages;
 4898 
 4899                         /*
 4900                          * Some kernelcore has been met, update counts and
 4901                          * break if the kernelcore for this node has been
 4902                          * satisified
 4903                          */
 4904                         required_kernelcore -= min(required_kernelcore,
 4905                                                                 size_pages);
 4906                         kernelcore_remaining -= size_pages;
 4907                         if (!kernelcore_remaining)
 4908                                 break;
 4909                 }
 4910         }
 4911 
 4912         /*
 4913          * If there is still required_kernelcore, we do another pass with one
 4914          * less node in the count. This will push zone_movable_pfn[nid] further
 4915          * along on the nodes that still have memory until kernelcore is
 4916          * satisified
 4917          */
 4918         usable_nodes--;
 4919         if (usable_nodes && required_kernelcore > usable_nodes)
 4920                 goto restart;
 4921 
 4922         /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
 4923         for (nid = 0; nid < MAX_NUMNODES; nid++)
 4924                 zone_movable_pfn[nid] =
 4925                         roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
 4926 
 4927 out:
 4928         /* restore the node_state */
 4929         node_states[N_MEMORY] = saved_node_state;
 4930 }
 4931 
 4932 /* Any regular or high memory on that node ? */
 4933 static void check_for_memory(pg_data_t *pgdat, int nid)
 4934 {
 4935         enum zone_type zone_type;
 4936 
 4937         if (N_MEMORY == N_NORMAL_MEMORY)
 4938                 return;
 4939 
 4940         for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
 4941                 struct zone *zone = &pgdat->node_zones[zone_type];
 4942                 if (zone->present_pages) {
 4943                         node_set_state(nid, N_HIGH_MEMORY);
 4944                         if (N_NORMAL_MEMORY != N_HIGH_MEMORY &&
 4945                             zone_type <= ZONE_NORMAL)
 4946                                 node_set_state(nid, N_NORMAL_MEMORY);
 4947                         break;
 4948                 }
 4949         }
 4950 }
 4951 
 4952 /**
 4953  * free_area_init_nodes - Initialise all pg_data_t and zone data
 4954  * @max_zone_pfn: an array of max PFNs for each zone
 4955  *
 4956  * This will call free_area_init_node() for each active node in the system.
 4957  * Using the page ranges provided by add_active_range(), the size of each
 4958  * zone in each node and their holes is calculated. If the maximum PFN
 4959  * between two adjacent zones match, it is assumed that the zone is empty.
 4960  * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
 4961  * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
 4962  * starts where the previous one ended. For example, ZONE_DMA32 starts
 4963  * at arch_max_dma_pfn.
 4964  */
 4965 void __init free_area_init_nodes(unsigned long *max_zone_pfn)
 4966 {
 4967         unsigned long start_pfn, end_pfn;
 4968         int i, nid;
 4969 
 4970         /* Record where the zone boundaries are */
 4971         memset(arch_zone_lowest_possible_pfn, 0,
 4972                                 sizeof(arch_zone_lowest_possible_pfn));
 4973         memset(arch_zone_highest_possible_pfn, 0,
 4974                                 sizeof(arch_zone_highest_possible_pfn));
 4975         arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
 4976         arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
 4977         for (i = 1; i < MAX_NR_ZONES; i++) {
 4978                 if (i == ZONE_MOVABLE)
 4979                         continue;
 4980                 arch_zone_lowest_possible_pfn[i] =
 4981                         arch_zone_highest_possible_pfn[i-1];
 4982                 arch_zone_highest_possible_pfn[i] =
 4983                         max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
 4984         }
 4985         arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
 4986         arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
 4987 
 4988         /* Find the PFNs that ZONE_MOVABLE begins at in each node */
 4989         memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
 4990         find_zone_movable_pfns_for_nodes();
 4991 
 4992         /* Print out the zone ranges */
 4993         printk("Zone ranges:\n");
 4994         for (i = 0; i < MAX_NR_ZONES; i++) {
 4995                 if (i == ZONE_MOVABLE)
 4996                         continue;
 4997                 printk(KERN_CONT "  %-8s ", zone_names[i]);
 4998                 if (arch_zone_lowest_possible_pfn[i] ==
 4999                                 arch_zone_highest_possible_pfn[i])
 5000                         printk(KERN_CONT "empty\n");
 5001                 else
 5002                         printk(KERN_CONT "[mem %0#10lx-%0#10lx]\n",
 5003                                 arch_zone_lowest_possible_pfn[i] << PAGE_SHIFT,
 5004                                 (arch_zone_highest_possible_pfn[i]
 5005                                         << PAGE_SHIFT) - 1);
 5006         }
 5007 
 5008         /* Print out the PFNs ZONE_MOVABLE begins at in each node */
 5009         printk("Movable zone start for each node\n");
 5010         for (i = 0; i < MAX_NUMNODES; i++) {
 5011                 if (zone_movable_pfn[i])
 5012                         printk("  Node %d: %#010lx\n", i,
 5013                                zone_movable_pfn[i] << PAGE_SHIFT);
 5014         }
 5015 
 5016         /* Print out the early node map */
 5017         printk("Early memory node ranges\n");
 5018         for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
 5019                 printk("  node %3d: [mem %#010lx-%#010lx]\n", nid,
 5020                        start_pfn << PAGE_SHIFT, (end_pfn << PAGE_SHIFT) - 1);
 5021 
 5022         /* Initialise every node */
 5023         mminit_verify_pageflags_layout();
 5024         setup_nr_node_ids();
 5025         for_each_online_node(nid) {
 5026                 pg_data_t *pgdat = NODE_DATA(nid);
 5027                 free_area_init_node(nid, NULL,
 5028                                 find_min_pfn_for_node(nid), NULL);
 5029 
 5030                 /* Any memory on that node */
 5031                 if (pgdat->node_present_pages)
 5032                         node_set_state(nid, N_MEMORY);
 5033                 check_for_memory(pgdat, nid);
 5034         }
 5035 }
 5036 
 5037 static int __init cmdline_parse_core(char *p, unsigned long *core)
 5038 {
 5039         unsigned long long coremem;
 5040         if (!p)
 5041                 return -EINVAL;
 5042 
 5043         coremem = memparse(p, &p);
 5044         *core = coremem >> PAGE_SHIFT;
 5045 
 5046         /* Paranoid check that UL is enough for the coremem value */
 5047         WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
 5048 
 5049         return 0;
 5050 }
 5051 
 5052 /*
 5053  * kernelcore=size sets the amount of memory for use for allocations that
 5054  * cannot be reclaimed or migrated.
 5055  */
 5056 static int __init cmdline_parse_kernelcore(char *p)
 5057 {
 5058         return cmdline_parse_core(p, &required_kernelcore);
 5059 }
 5060 
 5061 /*
 5062  * movablecore=size sets the amount of memory for use for allocations that
 5063  * can be reclaimed or migrated.
 5064  */
 5065 static int __init cmdline_parse_movablecore(char *p)
 5066 {
 5067         return cmdline_parse_core(p, &required_movablecore);
 5068 }
 5069 
 5070 early_param("kernelcore", cmdline_parse_kernelcore);
 5071 early_param("movablecore", cmdline_parse_movablecore);
 5072 
 5073 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
 5074 
 5075 /**
 5076  * set_dma_reserve - set the specified number of pages reserved in the first zone
 5077  * @new_dma_reserve: The number of pages to mark reserved
 5078  *
 5079  * The per-cpu batchsize and zone watermarks are determined by present_pages.
 5080  * In the DMA zone, a significant percentage may be consumed by kernel image
 5081  * and other unfreeable allocations which can skew the watermarks badly. This
 5082  * function may optionally be used to account for unfreeable pages in the
 5083  * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
 5084  * smaller per-cpu batchsize.
 5085  */
 5086 void __init set_dma_reserve(unsigned long new_dma_reserve)
 5087 {
 5088         dma_reserve = new_dma_reserve;
 5089 }
 5090 
 5091 void __init free_area_init(unsigned long *zones_size)
 5092 {
 5093         free_area_init_node(0, zones_size,
 5094                         __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
 5095 }
 5096 
 5097 static int page_alloc_cpu_notify(struct notifier_block *self,
 5098                                  unsigned long action, void *hcpu)
 5099 {
 5100         int cpu = (unsigned long)hcpu;
 5101 
 5102         if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
 5103                 lru_add_drain_cpu(cpu);
 5104                 drain_pages(cpu);
 5105 
 5106                 /*
 5107                  * Spill the event counters of the dead processor
 5108                  * into the current processors event counters.
 5109                  * This artificially elevates the count of the current
 5110                  * processor.
 5111                  */
 5112                 vm_events_fold_cpu(cpu);
 5113 
 5114                 /*
 5115                  * Zero the differential counters of the dead processor
 5116                  * so that the vm statistics are consistent.
 5117                  *
 5118                  * This is only okay since the processor is dead and cannot
 5119                  * race with what we are doing.
 5120                  */
 5121                 refresh_cpu_vm_stats(cpu);
 5122         }
 5123         return NOTIFY_OK;
 5124 }
 5125 
 5126 void __init page_alloc_init(void)
 5127 {
 5128         hotcpu_notifier(page_alloc_cpu_notify, 0);
 5129 }
 5130 
 5131 /*
 5132  * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio
 5133  *      or min_free_kbytes changes.
 5134  */
 5135 static void calculate_totalreserve_pages(void)
 5136 {
 5137         struct pglist_data *pgdat;
 5138         unsigned long reserve_pages = 0;
 5139         enum zone_type i, j;
 5140 
 5141         for_each_online_pgdat(pgdat) {
 5142                 for (i = 0; i < MAX_NR_ZONES; i++) {
 5143                         struct zone *zone = pgdat->node_zones + i;
 5144                         unsigned long max = 0;
 5145 
 5146                         /* Find valid and maximum lowmem_reserve in the zone */
 5147                         for (j = i; j < MAX_NR_ZONES; j++) {
 5148                                 if (zone->lowmem_reserve[j] > max)
 5149                                         max = zone->lowmem_reserve[j];
 5150                         }
 5151 
 5152                         /* we treat the high watermark as reserved pages. */
 5153                         max += high_wmark_pages(zone);
 5154 
 5155                         if (max > zone->present_pages)
 5156                                 max = zone->present_pages;
 5157                         reserve_pages += max;
 5158                         /*
 5159                          * Lowmem reserves are not available to
 5160                          * GFP_HIGHUSER page cache allocations and
 5161                          * kswapd tries to balance zones to their high
 5162                          * watermark.  As a result, neither should be
 5163                          * regarded as dirtyable memory, to prevent a
 5164                          * situation where reclaim has to clean pages
 5165                          * in order to balance the zones.
 5166                          */
 5167                         zone->dirty_balance_reserve = max;
 5168                 }
 5169         }
 5170         dirty_balance_reserve = reserve_pages;
 5171         totalreserve_pages = reserve_pages;
 5172 }
 5173 
 5174 /*
 5175  * setup_per_zone_lowmem_reserve - called whenever
 5176  *      sysctl_lower_zone_reserve_ratio changes.  Ensures that each zone
 5177  *      has a correct pages reserved value, so an adequate number of
 5178  *      pages are left in the zone after a successful __alloc_pages().
 5179  */
 5180 static void setup_per_zone_lowmem_reserve(void)
 5181 {
 5182         struct pglist_data *pgdat;
 5183         enum zone_type j, idx;
 5184 
 5185         for_each_online_pgdat(pgdat) {
 5186                 for (j = 0; j < MAX_NR_ZONES; j++) {
 5187                         struct zone *zone = pgdat->node_zones + j;
 5188                         unsigned long present_pages = zone->present_pages;
 5189 
 5190                         zone->lowmem_reserve[j] = 0;
 5191 
 5192                         idx = j;
 5193                         while (idx) {
 5194                                 struct zone *lower_zone;
 5195 
 5196                                 idx--;
 5197 
 5198                                 if (sysctl_lowmem_reserve_ratio[idx] < 1)
 5199                                         sysctl_lowmem_reserve_ratio[idx] = 1;
 5200 
 5201                                 lower_zone = pgdat->node_zones + idx;
 5202                                 lower_zone->lowmem_reserve[j] = present_pages /
 5203                                         sysctl_lowmem_reserve_ratio[idx];
 5204                                 present_pages += lower_zone->present_pages;
 5205                         }
 5206                 }
 5207         }
 5208 
 5209         /* update totalreserve_pages */
 5210         calculate_totalreserve_pages();
 5211 }
 5212 
 5213 static void __setup_per_zone_wmarks(void)
 5214 {
 5215         unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
 5216         unsigned long lowmem_pages = 0;
 5217         struct zone *zone;
 5218         unsigned long flags;
 5219 
 5220         /* Calculate total number of !ZONE_HIGHMEM pages */
 5221         for_each_zone(zone) {
 5222                 if (!is_highmem(zone))
 5223                         lowmem_pages += zone->present_pages;
 5224         }
 5225 
 5226         for_each_zone(zone) {
 5227                 u64 tmp;
 5228 
 5229                 spin_lock_irqsave(&zone->lock, flags);
 5230                 tmp = (u64)pages_min * zone->present_pages;
 5231                 do_div(tmp, lowmem_pages);
 5232                 if (is_highmem(zone)) {
 5233                         /*
 5234                          * __GFP_HIGH and PF_MEMALLOC allocations usually don't
 5235                          * need highmem pages, so cap pages_min to a small
 5236                          * value here.
 5237                          *
 5238                          * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
 5239                          * deltas controls asynch page reclaim, and so should
 5240                          * not be capped for highmem.
 5241                          */
 5242                         int min_pages;
 5243 
 5244                         min_pages = zone->present_pages / 1024;
 5245                         if (min_pages < SWAP_CLUSTER_MAX)
 5246                                 min_pages = SWAP_CLUSTER_MAX;
 5247                         if (min_pages > 128)
 5248                                 min_pages = 128;
 5249                         zone->watermark[WMARK_MIN] = min_pages;
 5250                 } else {
 5251                         /*
 5252                          * If it's a lowmem zone, reserve a number of pages
 5253                          * proportionate to the zone's size.
 5254                          */
 5255                         zone->watermark[WMARK_MIN] = tmp;
 5256                 }
 5257 
 5258                 zone->watermark[WMARK_LOW]  = min_wmark_pages(zone) + (tmp >> 2);
 5259                 zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
 5260 
 5261                 setup_zone_migrate_reserve(zone);
 5262                 spin_unlock_irqrestore(&zone->lock, flags);
 5263         }
 5264 
 5265         /* update totalreserve_pages */
 5266         calculate_totalreserve_pages();
 5267 }
 5268 
 5269 /**
 5270  * setup_per_zone_wmarks - called when min_free_kbytes changes
 5271  * or when memory is hot-{added|removed}
 5272  *
 5273  * Ensures that the watermark[min,low,high] values for each zone are set
 5274  * correctly with respect to min_free_kbytes.
 5275  */
 5276 void setup_per_zone_wmarks(void)
 5277 {
 5278         mutex_lock(&zonelists_mutex);
 5279         __setup_per_zone_wmarks();
 5280         mutex_unlock(&zonelists_mutex);
 5281 }
 5282 
 5283 /*
 5284  * The inactive anon list should be small enough that the VM never has to
 5285  * do too much work, but large enough that each inactive page has a chance
 5286  * to be referenced again before it is swapped out.
 5287  *
 5288  * The inactive_anon ratio is the target ratio of ACTIVE_ANON to
 5289  * INACTIVE_ANON pages on this zone's LRU, maintained by the
 5290  * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of
 5291  * the anonymous pages are kept on the inactive list.
 5292  *
 5293  * total     target    max
 5294  * memory    ratio     inactive anon
 5295  * -------------------------------------
 5296  *   10MB       1         5MB
 5297  *  100MB       1        50MB
 5298  *    1GB       3       250MB
 5299  *   10GB      10       0.9GB
 5300  *  100GB      31         3GB
 5301  *    1TB     101        10GB
 5302  *   10TB     320        32GB
 5303  */
 5304 static void __meminit calculate_zone_inactive_ratio(struct zone *zone)
 5305 {
 5306         unsigned int gb, ratio;
 5307 
 5308         /* Zone size in gigabytes */
 5309         gb = zone->present_pages >> (30 - PAGE_SHIFT);
 5310         if (gb)
 5311                 ratio = int_sqrt(10 * gb);
 5312         else
 5313                 ratio = 1;
 5314 
 5315         zone->inactive_ratio = ratio;
 5316 }
 5317 
 5318 static void __meminit setup_per_zone_inactive_ratio(void)
 5319 {
 5320         struct zone *zone;
 5321 
 5322         for_each_zone(zone)
 5323                 calculate_zone_inactive_ratio(zone);
 5324 }
 5325 
 5326 /*
 5327  * Initialise min_free_kbytes.
 5328  *
 5329  * For small machines we want it small (128k min).  For large machines
 5330  * we want it large (64MB max).  But it is not linear, because network
 5331  * bandwidth does not increase linearly with machine size.  We use
 5332  *
 5333  *      min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
 5334  *      min_free_kbytes = sqrt(lowmem_kbytes * 16)
 5335  *
 5336  * which yields
 5337  *
 5338  * 16MB:        512k
 5339  * 32MB:        724k
 5340  * 64MB:        1024k
 5341  * 128MB:       1448k
 5342  * 256MB:       2048k
 5343  * 512MB:       2896k
 5344  * 1024MB:      4096k
 5345  * 2048MB:      5792k
 5346  * 4096MB:      8192k
 5347  * 8192MB:      11584k
 5348  * 16384MB:     16384k
 5349  */
 5350 int __meminit init_per_zone_wmark_min(void)
 5351 {
 5352         unsigned long lowmem_kbytes;
 5353 
 5354         lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
 5355 
 5356         min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
 5357         if (min_free_kbytes < 128)
 5358                 min_free_kbytes = 128;
 5359         if (min_free_kbytes > 65536)
 5360                 min_free_kbytes = 65536;
 5361         setup_per_zone_wmarks();
 5362         refresh_zone_stat_thresholds();
 5363         setup_per_zone_lowmem_reserve();
 5364         setup_per_zone_inactive_ratio();
 5365         return 0;
 5366 }
 5367 module_init(init_per_zone_wmark_min)
 5368 
 5369 /*
 5370  * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 
 5371  *      that we can call two helper functions whenever min_free_kbytes
 5372  *      changes.
 5373  */
 5374 int min_free_kbytes_sysctl_handler(ctl_table *table, int write, 
 5375         void __user *buffer, size_t *length, loff_t *ppos)
 5376 {
 5377         proc_dointvec(table, write, buffer, length, ppos);
 5378         if (write)
 5379                 setup_per_zone_wmarks();
 5380         return 0;
 5381 }
 5382 
 5383 #ifdef CONFIG_NUMA
 5384 int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
 5385         void __user *buffer, size_t *length, loff_t *ppos)
 5386 {
 5387         struct zone *zone;
 5388         int rc;
 5389 
 5390         rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
 5391         if (rc)
 5392                 return rc;
 5393 
 5394         for_each_zone(zone)
 5395                 zone->min_unmapped_pages = (zone->present_pages *
 5396                                 sysctl_min_unmapped_ratio) / 100;
 5397         return 0;
 5398 }
 5399 
 5400 int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
 5401         void __user *buffer, size_t *length, loff_t *ppos)
 5402 {
 5403         struct zone *zone;
 5404         int rc;
 5405 
 5406         rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
 5407         if (rc)
 5408                 return rc;
 5409 
 5410         for_each_zone(zone)
 5411                 zone->min_slab_pages = (zone->present_pages *
 5412                                 sysctl_min_slab_ratio) / 100;
 5413         return 0;
 5414 }
 5415 #endif
 5416 
 5417 /*
 5418  * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
 5419  *      proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
 5420  *      whenever sysctl_lowmem_reserve_ratio changes.
 5421  *
 5422  * The reserve ratio obviously has absolutely no relation with the
 5423  * minimum watermarks. The lowmem reserve ratio can only make sense
 5424  * if in function of the boot time zone sizes.
 5425  */
 5426 int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
 5427         void __user *buffer, size_t *length, loff_t *ppos)
 5428 {
 5429         proc_dointvec_minmax(table, write, buffer, length, ppos);
 5430         setup_per_zone_lowmem_reserve();
 5431         return 0;
 5432 }
 5433 
 5434 /*
 5435  * percpu_pagelist_fraction - changes the pcp->high for each zone on each
 5436  * cpu.  It is the fraction of total pages in each zone that a hot per cpu pagelist
 5437  * can have before it gets flushed back to buddy allocator.
 5438  */
 5439 
 5440 int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
 5441         void __user *buffer, size_t *length, loff_t *ppos)
 5442 {
 5443         struct zone *zone;
 5444         unsigned int cpu;
 5445         int ret;
 5446 
 5447         ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
 5448         if (!write || (ret < 0))
 5449                 return ret;
 5450         for_each_populated_zone(zone) {
 5451                 for_each_possible_cpu(cpu) {
 5452                         unsigned long  high;
 5453                         high = zone->present_pages / percpu_pagelist_fraction;
 5454                         setup_pagelist_highmark(
 5455                                 per_cpu_ptr(zone->pageset, cpu), high);
 5456                 }
 5457         }
 5458         return 0;
 5459 }
 5460 
 5461 int hashdist = HASHDIST_DEFAULT;
 5462 
 5463 #ifdef CONFIG_NUMA
 5464 static int __init set_hashdist(char *str)
 5465 {
 5466         if (!str)
 5467                 return 0;
 5468         hashdist = simple_strtoul(str, &str, 0);
 5469         return 1;
 5470 }
 5471 __setup("hashdist=", set_hashdist);
 5472 #endif
 5473 
 5474 /*
 5475  * allocate a large system hash table from bootmem
 5476  * - it is assumed that the hash table must contain an exact power-of-2
 5477  *   quantity of entries
 5478  * - limit is the number of hash buckets, not the total allocation size
 5479  */
 5480 void *__init alloc_large_system_hash(const char *tablename,
 5481                                      unsigned long bucketsize,
 5482                                      unsigned long numentries,
 5483                                      int scale,
 5484                                      int flags,
 5485                                      unsigned int *_hash_shift,
 5486                                      unsigned int *_hash_mask,
 5487                                      unsigned long low_limit,
 5488                                      unsigned long high_limit)
 5489 {
 5490         unsigned long long max = high_limit;
 5491         unsigned long log2qty, size;
 5492         void *table = NULL;
 5493 
 5494         /* allow the kernel cmdline to have a say */
 5495         if (!numentries) {
 5496                 /* round applicable memory size up to nearest megabyte */
 5497                 numentries = nr_kernel_pages;
 5498                 numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
 5499                 numentries >>= 20 - PAGE_SHIFT;
 5500                 numentries <<= 20 - PAGE_SHIFT;
 5501 
 5502                 /* limit to 1 bucket per 2^scale bytes of low memory */
 5503                 if (scale > PAGE_SHIFT)
 5504                         numentries >>= (scale - PAGE_SHIFT);
 5505                 else
 5506                         numentries <<= (PAGE_SHIFT - scale);
 5507 
 5508                 /* Make sure we've got at least a 0-order allocation.. */
 5509                 if (unlikely(flags & HASH_SMALL)) {
 5510                         /* Makes no sense without HASH_EARLY */
 5511                         WARN_ON(!(flags & HASH_EARLY));
 5512                         if (!(numentries >> *_hash_shift)) {
 5513                                 numentries = 1UL << *_hash_shift;
 5514                                 BUG_ON(!numentries);
 5515                         }
 5516                 } else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
 5517                         numentries = PAGE_SIZE / bucketsize;
 5518         }
 5519         numentries = roundup_pow_of_two(numentries);
 5520 
 5521         /* limit allocation size to 1/16 total memory by default */
 5522         if (max == 0) {
 5523                 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
 5524                 do_div(max, bucketsize);
 5525         }
 5526         max = min(max, 0x80000000ULL);
 5527 
 5528         if (numentries < low_limit)
 5529                 numentries = low_limit;
 5530         if (numentries > max)
 5531                 numentries = max;
 5532 
 5533         log2qty = ilog2(numentries);
 5534 
 5535         do {
 5536                 size = bucketsize << log2qty;
 5537                 if (flags & HASH_EARLY)
 5538                         table = alloc_bootmem_nopanic(size);
 5539                 else if (hashdist)
 5540                         table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
 5541                 else {
 5542                         /*
 5543                          * If bucketsize is not a power-of-two, we may free
 5544                          * some pages at the end of hash table which
 5545                          * alloc_pages_exact() automatically does
 5546                          */
 5547                         if (get_order(size) < MAX_ORDER) {
 5548                                 table = alloc_pages_exact(size, GFP_ATOMIC);
 5549                                 kmemleak_alloc(table, size, 1, GFP_ATOMIC);
 5550                         }
 5551                 }
 5552         } while (!table && size > PAGE_SIZE && --log2qty);
 5553 
 5554         if (!table)
 5555                 panic("Failed to allocate %s hash table\n", tablename);
 5556 
 5557         printk(KERN_INFO "%s hash table entries: %ld (order: %d, %lu bytes)\n",
 5558                tablename,
 5559                (1UL << log2qty),
 5560                ilog2(size) - PAGE_SHIFT,
 5561                size);
 5562 
 5563         if (_hash_shift)
 5564                 *_hash_shift = log2qty;
 5565         if (_hash_mask)
 5566                 *_hash_mask = (1 << log2qty) - 1;
 5567 
 5568         return table;
 5569 }
 5570 
 5571 /* Return a pointer to the bitmap storing bits affecting a block of pages */
 5572 static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
 5573                                                         unsigned long pfn)
 5574 {
 5575 #ifdef CONFIG_SPARSEMEM
 5576         return __pfn_to_section(pfn)->pageblock_flags;
 5577 #else
 5578         return zone->pageblock_flags;
 5579 #endif /* CONFIG_SPARSEMEM */
 5580 }
 5581 
 5582 static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
 5583 {
 5584 #ifdef CONFIG_SPARSEMEM
 5585         pfn &= (PAGES_PER_SECTION-1);
 5586         return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
 5587 #else
 5588         pfn = pfn - round_down(zone->zone_start_pfn, pageblock_nr_pages);
 5589         return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
 5590 #endif /* CONFIG_SPARSEMEM */
 5591 }
 5592 
 5593 /**
 5594  * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages
 5595  * @page: The page within the block of interest
 5596  * @start_bitidx: The first bit of interest to retrieve
 5597  * @end_bitidx: The last bit of interest
 5598  * returns pageblock_bits flags
 5599  */
 5600 unsigned long get_pageblock_flags_group(struct page *page,
 5601                                         int start_bitidx, int end_bitidx)
 5602 {
 5603         struct zone *zone;
 5604         unsigned long *bitmap;
 5605         unsigned long pfn, bitidx;
 5606         unsigned long flags = 0;
 5607         unsigned long value = 1;
 5608 
 5609         zone = page_zone(page);
 5610         pfn = page_to_pfn(page);
 5611         bitmap = get_pageblock_bitmap(zone, pfn);
 5612         bitidx = pfn_to_bitidx(zone, pfn);
 5613 
 5614         for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
 5615                 if (test_bit(bitidx + start_bitidx, bitmap))
 5616                         flags |= value;
 5617 
 5618         return flags;
 5619 }
 5620 
 5621 /**
 5622  * set_pageblock_flags_group - Set the requested group of flags for a pageblock_nr_pages block of pages
 5623  * @page: The page within the block of interest
 5624  * @start_bitidx: The first bit of interest
 5625  * @end_bitidx: The last bit of interest
 5626  * @flags: The flags to set
 5627  */
 5628 void set_pageblock_flags_group(struct page *page, unsigned long flags,
 5629                                         int start_bitidx, int end_bitidx)
 5630 {
 5631         struct zone *zone;
 5632         unsigned long *bitmap;
 5633         unsigned long pfn, bitidx;
 5634         unsigned long value = 1;
 5635 
 5636         zone = page_zone(page);
 5637         pfn = page_to_pfn(page);
 5638         bitmap = get_pageblock_bitmap(zone, pfn);
 5639         bitidx = pfn_to_bitidx(zone, pfn);
 5640         VM_BUG_ON(pfn < zone->zone_start_pfn);
 5641         VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages);
 5642 
 5643         for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
 5644                 if (flags & value)
 5645                         __set_bit(bitidx + start_bitidx, bitmap);
 5646                 else
 5647                         __clear_bit(bitidx + start_bitidx, bitmap);
 5648 }
 5649 
 5650 /*
 5651  * This function checks whether pageblock includes unmovable pages or not.
 5652  * If @count is not zero, it is okay to include less @count unmovable pages
 5653  *
 5654  * PageLRU check wihtout isolation or lru_lock could race so that
 5655  * MIGRATE_MOVABLE block might include unmovable pages. It means you can't
 5656  * expect this function should be exact.
 5657  */
 5658 bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
 5659                          bool skip_hwpoisoned_pages)
 5660 {
 5661         unsigned long pfn, iter, found;
 5662         int mt;
 5663 
 5664         /*
 5665          * For avoiding noise data, lru_add_drain_all() should be called
 5666          * If ZONE_MOVABLE, the zone never contains unmovable pages
 5667          */
 5668         if (zone_idx(zone) == ZONE_MOVABLE)
 5669                 return false;
 5670         mt = get_pageblock_migratetype(page);
 5671         if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt))
 5672                 return false;
 5673 
 5674         pfn = page_to_pfn(page);
 5675         for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
 5676                 unsigned long check = pfn + iter;
 5677 
 5678                 if (!pfn_valid_within(check))
 5679                         continue;
 5680 
 5681                 page = pfn_to_page(check);
 5682                 /*
 5683                  * We can't use page_count without pin a page
 5684                  * because another CPU can free compound page.
 5685                  * This check already skips compound tails of THP
 5686                  * because their page->_count is zero at all time.
 5687                  */
 5688                 if (!atomic_read(&page->_count)) {
 5689                         if (PageBuddy(page))
 5690                                 iter += (1 << page_order(page)) - 1;
 5691                         continue;
 5692                 }
 5693 
 5694                 /*
 5695                  * The HWPoisoned page may be not in buddy system, and
 5696                  * page_count() is not 0.
 5697                  */
 5698                 if (skip_hwpoisoned_pages && PageHWPoison(page))
 5699                         continue;
 5700 
 5701                 if (!PageLRU(page))
 5702                         found++;
 5703                 /*
 5704                  * If there are RECLAIMABLE pages, we need to check it.
 5705                  * But now, memory offline itself doesn't call shrink_slab()
 5706                  * and it still to be fixed.
 5707                  */
 5708                 /*
 5709                  * If the page is not RAM, page_count()should be 0.
 5710                  * we don't need more check. This is an _used_ not-movable page.
 5711                  *
 5712                  * The problematic thing here is PG_reserved pages. PG_reserved
 5713                  * is set to both of a memory hole page and a _used_ kernel
 5714                  * page at boot.
 5715                  */
 5716                 if (found > count)
 5717                         return true;
 5718         }
 5719         return false;
 5720 }
 5721 
 5722 bool is_pageblock_removable_nolock(struct page *page)
 5723 {
 5724         struct zone *zone;
 5725         unsigned long pfn;
 5726 
 5727         /*
 5728          * We have to be careful here because we are iterating over memory
 5729          * sections which are not zone aware so we might end up outside of
 5730          * the zone but still within the section.
 5731          * We have to take care about the node as well. If the node is offline
 5732          * its NODE_DATA will be NULL - see page_zone.
 5733          */
 5734         if (!node_online(page_to_nid(page)))
 5735                 return false;
 5736 
 5737         zone = page_zone(page);
 5738         pfn = page_to_pfn(page);
 5739         if (zone->zone_start_pfn > pfn ||
 5740                         zone->zone_start_pfn + zone->spanned_pages <= pfn)
 5741                 return false;
 5742 
 5743         return !has_unmovable_pages(zone, page, 0, true);
 5744 }
 5745 
 5746 #ifdef CONFIG_CMA
 5747 
 5748 static unsigned long pfn_max_align_down(unsigned long pfn)
 5749 {
 5750         return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
 5751                              pageblock_nr_pages) - 1);
 5752 }
 5753 
 5754 static unsigned long pfn_max_align_up(unsigned long pfn)
 5755 {
 5756         return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
 5757                                 pageblock_nr_pages));
 5758 }
 5759 
 5760 /* [start, end) must belong to a single zone. */
 5761 static int __alloc_contig_migrate_range(struct compact_control *cc,
 5762                                         unsigned long start, unsigned long end)
 5763 {
 5764         /* This function is based on compact_zone() from compaction.c. */
 5765         unsigned long nr_reclaimed;
 5766         unsigned long pfn = start;
 5767         unsigned int tries = 0;
 5768         int ret = 0;
 5769 
 5770         migrate_prep();
 5771 
 5772         while (pfn < end || !list_empty(&cc->migratepages)) {
 5773                 if (fatal_signal_pending(current)) {
 5774                         ret = -EINTR;
 5775                         break;
 5776                 }
 5777 
 5778                 if (list_empty(&cc->migratepages)) {
 5779                         cc->nr_migratepages = 0;
 5780                         pfn = isolate_migratepages_range(cc->zone, cc,
 5781                                                          pfn, end, true);
 5782                         if (!pfn) {
 5783                                 ret = -EINTR;
 5784                                 break;
 5785                         }
 5786                         tries = 0;
 5787                 } else if (++tries == 5) {
 5788                         ret = ret < 0 ? ret : -EBUSY;
 5789                         break;
 5790                 }
 5791 
 5792                 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
 5793                                                         &cc->migratepages);
 5794                 cc->nr_migratepages -= nr_reclaimed;
 5795 
 5796                 ret = migrate_pages(&cc->migratepages,
 5797                                     alloc_migrate_target,
 5798                                     0, false, MIGRATE_SYNC,
 5799                                     MR_CMA);
 5800         }
 5801 
 5802         putback_movable_pages(&cc->migratepages);
 5803         return ret > 0 ? 0 : ret;
 5804 }
 5805 
 5806 /**
 5807  * alloc_contig_range() -- tries to allocate given range of pages
 5808  * @start:      start PFN to allocate
 5809  * @end:        one-past-the-last PFN to allocate
 5810  * @migratetype:        migratetype of the underlaying pageblocks (either
 5811  *                      #MIGRATE_MOVABLE or #MIGRATE_CMA).  All pageblocks
 5812  *                      in range must have the same migratetype and it must
 5813  *                      be either of the two.
 5814  *
 5815  * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
 5816  * aligned, however it's the caller's responsibility to guarantee that
 5817  * we are the only thread that changes migrate type of pageblocks the
 5818  * pages fall in.
 5819  *
 5820  * The PFN range must belong to a single zone.
 5821  *
 5822  * Returns zero on success or negative error code.  On success all
 5823  * pages which PFN is in [start, end) are allocated for the caller and
 5824  * need to be freed with free_contig_range().
 5825  */
 5826 int alloc_contig_range(unsigned long start, unsigned long end,
 5827                        unsigned migratetype)
 5828 {
 5829         unsigned long outer_start, outer_end;
 5830         int ret = 0, order;
 5831 
 5832         struct compact_control cc = {
 5833                 .nr_migratepages = 0,
 5834                 .order = -1,
 5835                 .zone = page_zone(pfn_to_page(start)),
 5836                 .sync = true,
 5837                 .ignore_skip_hint = true,
 5838         };
 5839         INIT_LIST_HEAD(&cc.migratepages);
 5840 
 5841         /*
 5842          * What we do here is we mark all pageblocks in range as
 5843          * MIGRATE_ISOLATE.  Because pageblock and max order pages may
 5844          * have different sizes, and due to the way page allocator
 5845          * work, we align the range to biggest of the two pages so
 5846          * that page allocator won't try to merge buddies from
 5847          * different pageblocks and change MIGRATE_ISOLATE to some
 5848          * other migration type.
 5849          *
 5850          * Once the pageblocks are marked as MIGRATE_ISOLATE, we
 5851          * migrate the pages from an unaligned range (ie. pages that
 5852          * we are interested in).  This will put all the pages in
 5853          * range back to page allocator as MIGRATE_ISOLATE.
 5854          *
 5855          * When this is done, we take the pages in range from page
 5856          * allocator removing them from the buddy system.  This way
 5857          * page allocator will never consider using them.
 5858          *
 5859          * This lets us mark the pageblocks back as
 5860          * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
 5861          * aligned range but not in the unaligned, original range are
 5862          * put back to page allocator so that buddy can use them.
 5863          */
 5864 
 5865         ret = start_isolate_page_range(pfn_max_align_down(start),
 5866                                        pfn_max_align_up(end), migratetype,
 5867                                        false);
 5868         if (ret)
 5869                 return ret;
 5870 
 5871         ret = __alloc_contig_migrate_range(&cc, start, end);
 5872         if (ret)
 5873                 goto done;
 5874 
 5875         /*
 5876          * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
 5877          * aligned blocks that are marked as MIGRATE_ISOLATE.  What's
 5878          * more, all pages in [start, end) are free in page allocator.
 5879          * What we are going to do is to allocate all pages from
 5880          * [start, end) (that is remove them from page allocator).
 5881          *
 5882          * The only problem is that pages at the beginning and at the
 5883          * end of interesting range may be not aligned with pages that
 5884          * page allocator holds, ie. they can be part of higher order
 5885          * pages.  Because of this, we reserve the bigger range and
 5886          * once this is done free the pages we are not interested in.
 5887          *
 5888          * We don't have to hold zone->lock here because the pages are
 5889          * isolated thus they won't get removed from buddy.
 5890          */
 5891 
 5892         lru_add_drain_all();
 5893         drain_all_pages();
 5894 
 5895         order = 0;
 5896         outer_start = start;
 5897         while (!PageBuddy(pfn_to_page(outer_start))) {
 5898                 if (++order >= MAX_ORDER) {
 5899                         ret = -EBUSY;
 5900                         goto done;
 5901                 }
 5902                 outer_start &= ~0UL << order;
 5903         }
 5904 
 5905         /* Make sure the range is really isolated. */
 5906         if (test_pages_isolated(outer_start, end, false)) {
 5907                 pr_warn("alloc_contig_range test_pages_isolated(%lx, %lx) failed\n",
 5908                        outer_start, end);
 5909                 ret = -EBUSY;
 5910                 goto done;
 5911         }
 5912 
 5913 
 5914         /* Grab isolated pages from freelists. */
 5915         outer_end = isolate_freepages_range(&cc, outer_start, end);
 5916         if (!outer_end) {
 5917                 ret = -EBUSY;
 5918                 goto done;
 5919         }
 5920 
 5921         /* Free head and tail (if any) */
 5922         if (start != outer_start)
 5923                 free_contig_range(outer_start, start - outer_start);
 5924         if (end != outer_end)
 5925                 free_contig_range(end, outer_end - end);
 5926 
 5927 done:
 5928         undo_isolate_page_range(pfn_max_align_down(start),
 5929                                 pfn_max_align_up(end), migratetype);
 5930         return ret;
 5931 }
 5932 
 5933 void free_contig_range(unsigned long pfn, unsigned nr_pages)
 5934 {
 5935         unsigned int count = 0;
 5936 
 5937         for (; nr_pages--; pfn++) {
 5938                 struct page *page = pfn_to_page(pfn);
 5939 
 5940                 count += page_count(page) != 1;
 5941                 __free_page(page);
 5942         }
 5943         WARN(count != 0, "%d pages are still in use!\n", count);
 5944 }
 5945 #endif
 5946 
 5947 #ifdef CONFIG_MEMORY_HOTPLUG
 5948 static int __meminit __zone_pcp_update(void *data)
 5949 {
 5950         struct zone *zone = data;
 5951         int cpu;
 5952         unsigned long batch = zone_batchsize(zone), flags;
 5953 
 5954         for_each_possible_cpu(cpu) {
 5955                 struct per_cpu_pageset *pset;
 5956                 struct per_cpu_pages *pcp;
 5957 
 5958                 pset = per_cpu_ptr(zone->pageset, cpu);
 5959                 pcp = &pset->pcp;
 5960 
 5961                 local_irq_save(flags);
 5962                 if (pcp->count > 0)
 5963                         free_pcppages_bulk(zone, pcp->count, pcp);
 5964                 drain_zonestat(zone, pset);
 5965                 setup_pageset(pset, batch);
 5966                 local_irq_restore(flags);
 5967         }
 5968         return 0;
 5969 }
 5970 
 5971 void __meminit zone_pcp_update(struct zone *zone)
 5972 {
 5973         stop_machine(__zone_pcp_update, zone, NULL);
 5974 }
 5975 #endif
 5976 
 5977 void zone_pcp_reset(struct zone *zone)
 5978 {
 5979         unsigned long flags;
 5980         int cpu;
 5981         struct per_cpu_pageset *pset;
 5982 
 5983         /* avoid races with drain_pages()  */
 5984         local_irq_save(flags);
 5985         if (zone->pageset != &boot_pageset) {
 5986                 for_each_online_cpu(cpu) {
 5987                         pset = per_cpu_ptr(zone->pageset, cpu);
 5988                         drain_zonestat(zone, pset);
 5989                 }
 5990                 free_percpu(zone->pageset);
 5991                 zone->pageset = &boot_pageset;
 5992         }
 5993         local_irq_restore(flags);
 5994 }
 5995 
 5996 #ifdef CONFIG_MEMORY_HOTREMOVE
 5997 /*
 5998  * All pages in the range must be isolated before calling this.
 5999  */
 6000 void
 6001 __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
 6002 {
 6003         struct page *page;
 6004         struct zone *zone;
 6005         int order, i;
 6006         unsigned long pfn;
 6007         unsigned long flags;
 6008         /* find the first valid pfn */
 6009         for (pfn = start_pfn; pfn < end_pfn; pfn++)
 6010                 if (pfn_valid(pfn))
 6011                         break;
 6012         if (pfn == end_pfn)
 6013                 return;
 6014         zone = page_zone(pfn_to_page(pfn));
 6015         spin_lock_irqsave(&zone->lock, flags);
 6016         pfn = start_pfn;
 6017         while (pfn < end_pfn) {
 6018                 if (!pfn_valid(pfn)) {
 6019                         pfn++;
 6020                         continue;
 6021                 }
 6022                 page = pfn_to_page(pfn);
 6023                 /*
 6024                  * The HWPoisoned page may be not in buddy system, and
 6025                  * page_count() is not 0.
 6026                  */
 6027                 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
 6028                         pfn++;
 6029                         SetPageReserved(page);
 6030                         continue;
 6031                 }
 6032 
 6033                 BUG_ON(page_count(page));
 6034                 BUG_ON(!PageBuddy(page));
 6035                 order = page_order(page);
 6036 #ifdef CONFIG_DEBUG_VM
 6037                 printk(KERN_INFO "remove from free list %lx %d %lx\n",
 6038                        pfn, 1 << order, end_pfn);
 6039 #endif
 6040                 list_del(&page->lru);
 6041                 rmv_page_order(page);
 6042                 zone->free_area[order].nr_free--;
 6043                 for (i = 0; i < (1 << order); i++)
 6044                         SetPageReserved((page+i));
 6045                 pfn += (1 << order);
 6046         }
 6047         spin_unlock_irqrestore(&zone->lock, flags);
 6048 }
 6049 #endif
 6050 
 6051 #ifdef CONFIG_MEMORY_FAILURE
 6052 bool is_free_buddy_page(struct page *page)
 6053 {
 6054         struct zone *zone = page_zone(page);
 6055         unsigned long pfn = page_to_pfn(page);
 6056         unsigned long flags;
 6057         int order;
 6058 
 6059         spin_lock_irqsave(&zone->lock, flags);
 6060         for (order = 0; order < MAX_ORDER; order++) {
 6061                 struct page *page_head = page - (pfn & ((1 << order) - 1));
 6062 
 6063                 if (PageBuddy(page_head) && page_order(page_head) >= order)
 6064                         break;
 6065         }
 6066         spin_unlock_irqrestore(&zone->lock, flags);
 6067 
 6068         return order < MAX_ORDER;
 6069 }
 6070 #endif
 6071 
 6072 static const struct trace_print_flags pageflag_names[] = {
 6073         {1UL << PG_locked,              "locked"        },
 6074         {1UL << PG_error,               "error"         },
 6075         {1UL << PG_referenced,          "referenced"    },
 6076         {1UL << PG_uptodate,            "uptodate"      },
 6077         {1UL << PG_dirty,               "dirty"         },
 6078         {1UL << PG_lru,                 "lru"           },
 6079         {1UL << PG_active,              "active"        },
 6080         {1UL << PG_slab,                "slab"          },
 6081         {1UL << PG_owner_priv_1,        "owner_priv_1"  },
 6082         {1UL << PG_arch_1,              "arch_1"        },
 6083         {1UL << PG_reserved,            "reserved"      },
 6084         {1UL << PG_private,             "private"       },
 6085         {1UL << PG_private_2,           "private_2"     },
 6086         {1UL << PG_writeback,           "writeback"     },
 6087 #ifdef CONFIG_PAGEFLAGS_EXTENDED
 6088         {1UL << PG_head,                "head"          },
 6089         {1UL << PG_tail,                "tail"          },
 6090 #else
 6091         {1UL << PG_compound,            "compound"      },
 6092 #endif
 6093         {1UL << PG_swapcache,           "swapcache"     },
 6094         {1UL << PG_mappedtodisk,        "mappedtodisk"  },
 6095         {1UL << PG_reclaim,             "reclaim"       },
 6096         {1UL << PG_swapbacked,          "swapbacked"    },
 6097         {1UL << PG_unevictable,         "unevictable"   },
 6098 #ifdef CONFIG_MMU
 6099         {1UL << PG_mlocked,             "mlocked"       },
 6100 #endif
 6101 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
 6102         {1UL << PG_uncached,            "uncached"      },
 6103 #endif
 6104 #ifdef CONFIG_MEMORY_FAILURE
 6105         {1UL << PG_hwpoison,            "hwpoison"      },
 6106 #endif
 6107 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 6108         {1UL << PG_compound_lock,       "compound_lock" },
 6109 #endif
 6110 };
 6111 
 6112 static void dump_page_flags(unsigned long flags)
 6113 {
 6114         const char *delim = "";
 6115         unsigned long mask;
 6116         int i;
 6117 
 6118         BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS);
 6119 
 6120         printk(KERN_ALERT "page flags: %#lx(", flags);
 6121 
 6122         /* remove zone id */
 6123         flags &= (1UL << NR_PAGEFLAGS) - 1;
 6124 
 6125         for (i = 0; i < ARRAY_SIZE(pageflag_names) && flags; i++) {
 6126 
 6127                 mask = pageflag_names[i].mask;
 6128                 if ((flags & mask) != mask)
 6129                         continue;
 6130 
 6131                 flags &= ~mask;
 6132                 printk("%s%s", delim, pageflag_names[i].name);
 6133                 delim = "|";
 6134         }
 6135 
 6136         /* check for left over flags */
 6137         if (flags)
 6138                 printk("%s%#lx", delim, flags);
 6139 
 6140         printk(")\n");
 6141 }
 6142 
 6143 void dump_page(struct page *page)
 6144 {
 6145         printk(KERN_ALERT
 6146                "page:%p count:%d mapcount:%d mapping:%p index:%#lx\n",
 6147                 page, atomic_read(&page->_count), page_mapcount(page),
 6148                 page->mapping, page->index);
 6149         dump_page_flags(page->flags);
 6150         mem_cgroup_print_bad_page(page);
 6151 }

Cache object: c6deb6579f7897df93766022892e6181


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.