The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/mm/vmstat.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  *  linux/mm/vmstat.c
    3  *
    4  *  Manages VM statistics
    5  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
    6  *
    7  *  zoned VM statistics
    8  *  Copyright (C) 2006 Silicon Graphics, Inc.,
    9  *              Christoph Lameter <christoph@lameter.com>
   10  */
   11 #include <linux/fs.h>
   12 #include <linux/mm.h>
   13 #include <linux/err.h>
   14 #include <linux/module.h>
   15 #include <linux/slab.h>
   16 #include <linux/cpu.h>
   17 #include <linux/vmstat.h>
   18 #include <linux/sched.h>
   19 #include <linux/math64.h>
   20 #include <linux/writeback.h>
   21 #include <linux/compaction.h>
   22 
   23 #ifdef CONFIG_VM_EVENT_COUNTERS
   24 DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
   25 EXPORT_PER_CPU_SYMBOL(vm_event_states);
   26 
   27 static void sum_vm_events(unsigned long *ret)
   28 {
   29         int cpu;
   30         int i;
   31 
   32         memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
   33 
   34         for_each_online_cpu(cpu) {
   35                 struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
   36 
   37                 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
   38                         ret[i] += this->event[i];
   39         }
   40 }
   41 
   42 /*
   43  * Accumulate the vm event counters across all CPUs.
   44  * The result is unavoidably approximate - it can change
   45  * during and after execution of this function.
   46 */
   47 void all_vm_events(unsigned long *ret)
   48 {
   49         get_online_cpus();
   50         sum_vm_events(ret);
   51         put_online_cpus();
   52 }
   53 EXPORT_SYMBOL_GPL(all_vm_events);
   54 
   55 #ifdef CONFIG_HOTPLUG
   56 /*
   57  * Fold the foreign cpu events into our own.
   58  *
   59  * This is adding to the events on one processor
   60  * but keeps the global counts constant.
   61  */
   62 void vm_events_fold_cpu(int cpu)
   63 {
   64         struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
   65         int i;
   66 
   67         for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
   68                 count_vm_events(i, fold_state->event[i]);
   69                 fold_state->event[i] = 0;
   70         }
   71 }
   72 #endif /* CONFIG_HOTPLUG */
   73 
   74 #endif /* CONFIG_VM_EVENT_COUNTERS */
   75 
   76 /*
   77  * Manage combined zone based / global counters
   78  *
   79  * vm_stat contains the global counters
   80  */
   81 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
   82 EXPORT_SYMBOL(vm_stat);
   83 
   84 #ifdef CONFIG_SMP
   85 
   86 int calculate_pressure_threshold(struct zone *zone)
   87 {
   88         int threshold;
   89         int watermark_distance;
   90 
   91         /*
   92          * As vmstats are not up to date, there is drift between the estimated
   93          * and real values. For high thresholds and a high number of CPUs, it
   94          * is possible for the min watermark to be breached while the estimated
   95          * value looks fine. The pressure threshold is a reduced value such
   96          * that even the maximum amount of drift will not accidentally breach
   97          * the min watermark
   98          */
   99         watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone);
  100         threshold = max(1, (int)(watermark_distance / num_online_cpus()));
  101 
  102         /*
  103          * Maximum threshold is 125
  104          */
  105         threshold = min(125, threshold);
  106 
  107         return threshold;
  108 }
  109 
  110 int calculate_normal_threshold(struct zone *zone)
  111 {
  112         int threshold;
  113         int mem;        /* memory in 128 MB units */
  114 
  115         /*
  116          * The threshold scales with the number of processors and the amount
  117          * of memory per zone. More memory means that we can defer updates for
  118          * longer, more processors could lead to more contention.
  119          * fls() is used to have a cheap way of logarithmic scaling.
  120          *
  121          * Some sample thresholds:
  122          *
  123          * Threshold    Processors      (fls)   Zonesize        fls(mem+1)
  124          * ------------------------------------------------------------------
  125          * 8            1               1       0.9-1 GB        4
  126          * 16           2               2       0.9-1 GB        4
  127          * 20           2               2       1-2 GB          5
  128          * 24           2               2       2-4 GB          6
  129          * 28           2               2       4-8 GB          7
  130          * 32           2               2       8-16 GB         8
  131          * 4            2               2       <128M           1
  132          * 30           4               3       2-4 GB          5
  133          * 48           4               3       8-16 GB         8
  134          * 32           8               4       1-2 GB          4
  135          * 32           8               4       0.9-1GB         4
  136          * 10           16              5       <128M           1
  137          * 40           16              5       900M            4
  138          * 70           64              7       2-4 GB          5
  139          * 84           64              7       4-8 GB          6
  140          * 108          512             9       4-8 GB          6
  141          * 125          1024            10      8-16 GB         8
  142          * 125          1024            10      16-32 GB        9
  143          */
  144 
  145         mem = zone->present_pages >> (27 - PAGE_SHIFT);
  146 
  147         threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
  148 
  149         /*
  150          * Maximum threshold is 125
  151          */
  152         threshold = min(125, threshold);
  153 
  154         return threshold;
  155 }
  156 
  157 /*
  158  * Refresh the thresholds for each zone.
  159  */
  160 void refresh_zone_stat_thresholds(void)
  161 {
  162         struct zone *zone;
  163         int cpu;
  164         int threshold;
  165 
  166         for_each_populated_zone(zone) {
  167                 unsigned long max_drift, tolerate_drift;
  168 
  169                 threshold = calculate_normal_threshold(zone);
  170 
  171                 for_each_online_cpu(cpu)
  172                         per_cpu_ptr(zone->pageset, cpu)->stat_threshold
  173                                                         = threshold;
  174 
  175                 /*
  176                  * Only set percpu_drift_mark if there is a danger that
  177                  * NR_FREE_PAGES reports the low watermark is ok when in fact
  178                  * the min watermark could be breached by an allocation
  179                  */
  180                 tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
  181                 max_drift = num_online_cpus() * threshold;
  182                 if (max_drift > tolerate_drift)
  183                         zone->percpu_drift_mark = high_wmark_pages(zone) +
  184                                         max_drift;
  185         }
  186 }
  187 
  188 void set_pgdat_percpu_threshold(pg_data_t *pgdat,
  189                                 int (*calculate_pressure)(struct zone *))
  190 {
  191         struct zone *zone;
  192         int cpu;
  193         int threshold;
  194         int i;
  195 
  196         for (i = 0; i < pgdat->nr_zones; i++) {
  197                 zone = &pgdat->node_zones[i];
  198                 if (!zone->percpu_drift_mark)
  199                         continue;
  200 
  201                 threshold = (*calculate_pressure)(zone);
  202                 for_each_possible_cpu(cpu)
  203                         per_cpu_ptr(zone->pageset, cpu)->stat_threshold
  204                                                         = threshold;
  205         }
  206 }
  207 
  208 /*
  209  * For use when we know that interrupts are disabled.
  210  */
  211 void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
  212                                 int delta)
  213 {
  214         struct per_cpu_pageset __percpu *pcp = zone->pageset;
  215         s8 __percpu *p = pcp->vm_stat_diff + item;
  216         long x;
  217         long t;
  218 
  219         x = delta + __this_cpu_read(*p);
  220 
  221         t = __this_cpu_read(pcp->stat_threshold);
  222 
  223         if (unlikely(x > t || x < -t)) {
  224                 zone_page_state_add(x, zone, item);
  225                 x = 0;
  226         }
  227         __this_cpu_write(*p, x);
  228 }
  229 EXPORT_SYMBOL(__mod_zone_page_state);
  230 
  231 /*
  232  * Optimized increment and decrement functions.
  233  *
  234  * These are only for a single page and therefore can take a struct page *
  235  * argument instead of struct zone *. This allows the inclusion of the code
  236  * generated for page_zone(page) into the optimized functions.
  237  *
  238  * No overflow check is necessary and therefore the differential can be
  239  * incremented or decremented in place which may allow the compilers to
  240  * generate better code.
  241  * The increment or decrement is known and therefore one boundary check can
  242  * be omitted.
  243  *
  244  * NOTE: These functions are very performance sensitive. Change only
  245  * with care.
  246  *
  247  * Some processors have inc/dec instructions that are atomic vs an interrupt.
  248  * However, the code must first determine the differential location in a zone
  249  * based on the processor number and then inc/dec the counter. There is no
  250  * guarantee without disabling preemption that the processor will not change
  251  * in between and therefore the atomicity vs. interrupt cannot be exploited
  252  * in a useful way here.
  253  */
  254 void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
  255 {
  256         struct per_cpu_pageset __percpu *pcp = zone->pageset;
  257         s8 __percpu *p = pcp->vm_stat_diff + item;
  258         s8 v, t;
  259 
  260         v = __this_cpu_inc_return(*p);
  261         t = __this_cpu_read(pcp->stat_threshold);
  262         if (unlikely(v > t)) {
  263                 s8 overstep = t >> 1;
  264 
  265                 zone_page_state_add(v + overstep, zone, item);
  266                 __this_cpu_write(*p, -overstep);
  267         }
  268 }
  269 
  270 void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
  271 {
  272         __inc_zone_state(page_zone(page), item);
  273 }
  274 EXPORT_SYMBOL(__inc_zone_page_state);
  275 
  276 void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
  277 {
  278         struct per_cpu_pageset __percpu *pcp = zone->pageset;
  279         s8 __percpu *p = pcp->vm_stat_diff + item;
  280         s8 v, t;
  281 
  282         v = __this_cpu_dec_return(*p);
  283         t = __this_cpu_read(pcp->stat_threshold);
  284         if (unlikely(v < - t)) {
  285                 s8 overstep = t >> 1;
  286 
  287                 zone_page_state_add(v - overstep, zone, item);
  288                 __this_cpu_write(*p, overstep);
  289         }
  290 }
  291 
  292 void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
  293 {
  294         __dec_zone_state(page_zone(page), item);
  295 }
  296 EXPORT_SYMBOL(__dec_zone_page_state);
  297 
  298 #ifdef CONFIG_HAVE_CMPXCHG_LOCAL
  299 /*
  300  * If we have cmpxchg_local support then we do not need to incur the overhead
  301  * that comes with local_irq_save/restore if we use this_cpu_cmpxchg.
  302  *
  303  * mod_state() modifies the zone counter state through atomic per cpu
  304  * operations.
  305  *
  306  * Overstep mode specifies how overstep should handled:
  307  *     0       No overstepping
  308  *     1       Overstepping half of threshold
  309  *     -1      Overstepping minus half of threshold
  310 */
  311 static inline void mod_state(struct zone *zone,
  312        enum zone_stat_item item, int delta, int overstep_mode)
  313 {
  314         struct per_cpu_pageset __percpu *pcp = zone->pageset;
  315         s8 __percpu *p = pcp->vm_stat_diff + item;
  316         long o, n, t, z;
  317 
  318         do {
  319                 z = 0;  /* overflow to zone counters */
  320 
  321                 /*
  322                  * The fetching of the stat_threshold is racy. We may apply
  323                  * a counter threshold to the wrong the cpu if we get
  324                  * rescheduled while executing here. However, the next
  325                  * counter update will apply the threshold again and
  326                  * therefore bring the counter under the threshold again.
  327                  *
  328                  * Most of the time the thresholds are the same anyways
  329                  * for all cpus in a zone.
  330                  */
  331                 t = this_cpu_read(pcp->stat_threshold);
  332 
  333                 o = this_cpu_read(*p);
  334                 n = delta + o;
  335 
  336                 if (n > t || n < -t) {
  337                         int os = overstep_mode * (t >> 1) ;
  338 
  339                         /* Overflow must be added to zone counters */
  340                         z = n + os;
  341                         n = -os;
  342                 }
  343         } while (this_cpu_cmpxchg(*p, o, n) != o);
  344 
  345         if (z)
  346                 zone_page_state_add(z, zone, item);
  347 }
  348 
  349 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
  350                                         int delta)
  351 {
  352         mod_state(zone, item, delta, 0);
  353 }
  354 EXPORT_SYMBOL(mod_zone_page_state);
  355 
  356 void inc_zone_state(struct zone *zone, enum zone_stat_item item)
  357 {
  358         mod_state(zone, item, 1, 1);
  359 }
  360 
  361 void inc_zone_page_state(struct page *page, enum zone_stat_item item)
  362 {
  363         mod_state(page_zone(page), item, 1, 1);
  364 }
  365 EXPORT_SYMBOL(inc_zone_page_state);
  366 
  367 void dec_zone_page_state(struct page *page, enum zone_stat_item item)
  368 {
  369         mod_state(page_zone(page), item, -1, -1);
  370 }
  371 EXPORT_SYMBOL(dec_zone_page_state);
  372 #else
  373 /*
  374  * Use interrupt disable to serialize counter updates
  375  */
  376 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
  377                                         int delta)
  378 {
  379         unsigned long flags;
  380 
  381         local_irq_save(flags);
  382         __mod_zone_page_state(zone, item, delta);
  383         local_irq_restore(flags);
  384 }
  385 EXPORT_SYMBOL(mod_zone_page_state);
  386 
  387 void inc_zone_state(struct zone *zone, enum zone_stat_item item)
  388 {
  389         unsigned long flags;
  390 
  391         local_irq_save(flags);
  392         __inc_zone_state(zone, item);
  393         local_irq_restore(flags);
  394 }
  395 
  396 void inc_zone_page_state(struct page *page, enum zone_stat_item item)
  397 {
  398         unsigned long flags;
  399         struct zone *zone;
  400 
  401         zone = page_zone(page);
  402         local_irq_save(flags);
  403         __inc_zone_state(zone, item);
  404         local_irq_restore(flags);
  405 }
  406 EXPORT_SYMBOL(inc_zone_page_state);
  407 
  408 void dec_zone_page_state(struct page *page, enum zone_stat_item item)
  409 {
  410         unsigned long flags;
  411 
  412         local_irq_save(flags);
  413         __dec_zone_page_state(page, item);
  414         local_irq_restore(flags);
  415 }
  416 EXPORT_SYMBOL(dec_zone_page_state);
  417 #endif
  418 
  419 /*
  420  * Update the zone counters for one cpu.
  421  *
  422  * The cpu specified must be either the current cpu or a processor that
  423  * is not online. If it is the current cpu then the execution thread must
  424  * be pinned to the current cpu.
  425  *
  426  * Note that refresh_cpu_vm_stats strives to only access
  427  * node local memory. The per cpu pagesets on remote zones are placed
  428  * in the memory local to the processor using that pageset. So the
  429  * loop over all zones will access a series of cachelines local to
  430  * the processor.
  431  *
  432  * The call to zone_page_state_add updates the cachelines with the
  433  * statistics in the remote zone struct as well as the global cachelines
  434  * with the global counters. These could cause remote node cache line
  435  * bouncing and will have to be only done when necessary.
  436  */
  437 void refresh_cpu_vm_stats(int cpu)
  438 {
  439         struct zone *zone;
  440         int i;
  441         int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
  442 
  443         for_each_populated_zone(zone) {
  444                 struct per_cpu_pageset *p;
  445 
  446                 p = per_cpu_ptr(zone->pageset, cpu);
  447 
  448                 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
  449                         if (p->vm_stat_diff[i]) {
  450                                 unsigned long flags;
  451                                 int v;
  452 
  453                                 local_irq_save(flags);
  454                                 v = p->vm_stat_diff[i];
  455                                 p->vm_stat_diff[i] = 0;
  456                                 local_irq_restore(flags);
  457                                 atomic_long_add(v, &zone->vm_stat[i]);
  458                                 global_diff[i] += v;
  459 #ifdef CONFIG_NUMA
  460                                 /* 3 seconds idle till flush */
  461                                 p->expire = 3;
  462 #endif
  463                         }
  464                 cond_resched();
  465 #ifdef CONFIG_NUMA
  466                 /*
  467                  * Deal with draining the remote pageset of this
  468                  * processor
  469                  *
  470                  * Check if there are pages remaining in this pageset
  471                  * if not then there is nothing to expire.
  472                  */
  473                 if (!p->expire || !p->pcp.count)
  474                         continue;
  475 
  476                 /*
  477                  * We never drain zones local to this processor.
  478                  */
  479                 if (zone_to_nid(zone) == numa_node_id()) {
  480                         p->expire = 0;
  481                         continue;
  482                 }
  483 
  484                 p->expire--;
  485                 if (p->expire)
  486                         continue;
  487 
  488                 if (p->pcp.count)
  489                         drain_zone_pages(zone, &p->pcp);
  490 #endif
  491         }
  492 
  493         for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
  494                 if (global_diff[i])
  495                         atomic_long_add(global_diff[i], &vm_stat[i]);
  496 }
  497 
  498 void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
  499 {
  500         int i;
  501 
  502         for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
  503                 if (pset->vm_stat_diff[i]) {
  504                         int v = pset->vm_stat_diff[i];
  505                         pset->vm_stat_diff[i] = 0;
  506                         atomic_long_add(v, &zone->vm_stat[i]);
  507                         atomic_long_add(v, &vm_stat[i]);
  508                 }
  509 }
  510 #endif
  511 
  512 #ifdef CONFIG_NUMA
  513 /*
  514  * zonelist = the list of zones passed to the allocator
  515  * z        = the zone from which the allocation occurred.
  516  *
  517  * Must be called with interrupts disabled.
  518  *
  519  * When __GFP_OTHER_NODE is set assume the node of the preferred
  520  * zone is the local node. This is useful for daemons who allocate
  521  * memory on behalf of other processes.
  522  */
  523 void zone_statistics(struct zone *preferred_zone, struct zone *z, gfp_t flags)
  524 {
  525         if (z->zone_pgdat == preferred_zone->zone_pgdat) {
  526                 __inc_zone_state(z, NUMA_HIT);
  527         } else {
  528                 __inc_zone_state(z, NUMA_MISS);
  529                 __inc_zone_state(preferred_zone, NUMA_FOREIGN);
  530         }
  531         if (z->node == ((flags & __GFP_OTHER_NODE) ?
  532                         preferred_zone->node : numa_node_id()))
  533                 __inc_zone_state(z, NUMA_LOCAL);
  534         else
  535                 __inc_zone_state(z, NUMA_OTHER);
  536 }
  537 #endif
  538 
  539 #ifdef CONFIG_COMPACTION
  540 
  541 struct contig_page_info {
  542         unsigned long free_pages;
  543         unsigned long free_blocks_total;
  544         unsigned long free_blocks_suitable;
  545 };
  546 
  547 /*
  548  * Calculate the number of free pages in a zone, how many contiguous
  549  * pages are free and how many are large enough to satisfy an allocation of
  550  * the target size. Note that this function makes no attempt to estimate
  551  * how many suitable free blocks there *might* be if MOVABLE pages were
  552  * migrated. Calculating that is possible, but expensive and can be
  553  * figured out from userspace
  554  */
  555 static void fill_contig_page_info(struct zone *zone,
  556                                 unsigned int suitable_order,
  557                                 struct contig_page_info *info)
  558 {
  559         unsigned int order;
  560 
  561         info->free_pages = 0;
  562         info->free_blocks_total = 0;
  563         info->free_blocks_suitable = 0;
  564 
  565         for (order = 0; order < MAX_ORDER; order++) {
  566                 unsigned long blocks;
  567 
  568                 /* Count number of free blocks */
  569                 blocks = zone->free_area[order].nr_free;
  570                 info->free_blocks_total += blocks;
  571 
  572                 /* Count free base pages */
  573                 info->free_pages += blocks << order;
  574 
  575                 /* Count the suitable free blocks */
  576                 if (order >= suitable_order)
  577                         info->free_blocks_suitable += blocks <<
  578                                                 (order - suitable_order);
  579         }
  580 }
  581 
  582 /*
  583  * A fragmentation index only makes sense if an allocation of a requested
  584  * size would fail. If that is true, the fragmentation index indicates
  585  * whether external fragmentation or a lack of memory was the problem.
  586  * The value can be used to determine if page reclaim or compaction
  587  * should be used
  588  */
  589 static int __fragmentation_index(unsigned int order, struct contig_page_info *info)
  590 {
  591         unsigned long requested = 1UL << order;
  592 
  593         if (!info->free_blocks_total)
  594                 return 0;
  595 
  596         /* Fragmentation index only makes sense when a request would fail */
  597         if (info->free_blocks_suitable)
  598                 return -1000;
  599 
  600         /*
  601          * Index is between 0 and 1 so return within 3 decimal places
  602          *
  603          * 0 => allocation would fail due to lack of memory
  604          * 1 => allocation would fail due to fragmentation
  605          */
  606         return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total);
  607 }
  608 
  609 /* Same as __fragmentation index but allocs contig_page_info on stack */
  610 int fragmentation_index(struct zone *zone, unsigned int order)
  611 {
  612         struct contig_page_info info;
  613 
  614         fill_contig_page_info(zone, order, &info);
  615         return __fragmentation_index(order, &info);
  616 }
  617 #endif
  618 
  619 #if defined(CONFIG_PROC_FS) || defined(CONFIG_COMPACTION)
  620 #include <linux/proc_fs.h>
  621 #include <linux/seq_file.h>
  622 
  623 static char * const migratetype_names[MIGRATE_TYPES] = {
  624         "Unmovable",
  625         "Reclaimable",
  626         "Movable",
  627         "Reserve",
  628 #ifdef CONFIG_CMA
  629         "CMA",
  630 #endif
  631         "Isolate",
  632 };
  633 
  634 static void *frag_start(struct seq_file *m, loff_t *pos)
  635 {
  636         pg_data_t *pgdat;
  637         loff_t node = *pos;
  638         for (pgdat = first_online_pgdat();
  639              pgdat && node;
  640              pgdat = next_online_pgdat(pgdat))
  641                 --node;
  642 
  643         return pgdat;
  644 }
  645 
  646 static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
  647 {
  648         pg_data_t *pgdat = (pg_data_t *)arg;
  649 
  650         (*pos)++;
  651         return next_online_pgdat(pgdat);
  652 }
  653 
  654 static void frag_stop(struct seq_file *m, void *arg)
  655 {
  656 }
  657 
  658 /* Walk all the zones in a node and print using a callback */
  659 static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
  660                 void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
  661 {
  662         struct zone *zone;
  663         struct zone *node_zones = pgdat->node_zones;
  664         unsigned long flags;
  665 
  666         for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
  667                 if (!populated_zone(zone))
  668                         continue;
  669 
  670                 spin_lock_irqsave(&zone->lock, flags);
  671                 print(m, pgdat, zone);
  672                 spin_unlock_irqrestore(&zone->lock, flags);
  673         }
  674 }
  675 #endif
  676 
  677 #if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || defined(CONFIG_NUMA)
  678 #ifdef CONFIG_ZONE_DMA
  679 #define TEXT_FOR_DMA(xx) xx "_dma",
  680 #else
  681 #define TEXT_FOR_DMA(xx)
  682 #endif
  683 
  684 #ifdef CONFIG_ZONE_DMA32
  685 #define TEXT_FOR_DMA32(xx) xx "_dma32",
  686 #else
  687 #define TEXT_FOR_DMA32(xx)
  688 #endif
  689 
  690 #ifdef CONFIG_HIGHMEM
  691 #define TEXT_FOR_HIGHMEM(xx) xx "_high",
  692 #else
  693 #define TEXT_FOR_HIGHMEM(xx)
  694 #endif
  695 
  696 #define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
  697                                         TEXT_FOR_HIGHMEM(xx) xx "_movable",
  698 
  699 const char * const vmstat_text[] = {
  700         /* Zoned VM counters */
  701         "nr_free_pages",
  702         "nr_inactive_anon",
  703         "nr_active_anon",
  704         "nr_inactive_file",
  705         "nr_active_file",
  706         "nr_unevictable",
  707         "nr_mlock",
  708         "nr_anon_pages",
  709         "nr_mapped",
  710         "nr_file_pages",
  711         "nr_dirty",
  712         "nr_writeback",
  713         "nr_slab_reclaimable",
  714         "nr_slab_unreclaimable",
  715         "nr_page_table_pages",
  716         "nr_kernel_stack",
  717         "nr_unstable",
  718         "nr_bounce",
  719         "nr_vmscan_write",
  720         "nr_vmscan_immediate_reclaim",
  721         "nr_writeback_temp",
  722         "nr_isolated_anon",
  723         "nr_isolated_file",
  724         "nr_shmem",
  725         "nr_dirtied",
  726         "nr_written",
  727 
  728 #ifdef CONFIG_NUMA
  729         "numa_hit",
  730         "numa_miss",
  731         "numa_foreign",
  732         "numa_interleave",
  733         "numa_local",
  734         "numa_other",
  735 #endif
  736         "nr_anon_transparent_hugepages",
  737         "nr_free_cma",
  738         "nr_dirty_threshold",
  739         "nr_dirty_background_threshold",
  740 
  741 #ifdef CONFIG_VM_EVENT_COUNTERS
  742         "pgpgin",
  743         "pgpgout",
  744         "pswpin",
  745         "pswpout",
  746 
  747         TEXTS_FOR_ZONES("pgalloc")
  748 
  749         "pgfree",
  750         "pgactivate",
  751         "pgdeactivate",
  752 
  753         "pgfault",
  754         "pgmajfault",
  755 
  756         TEXTS_FOR_ZONES("pgrefill")
  757         TEXTS_FOR_ZONES("pgsteal_kswapd")
  758         TEXTS_FOR_ZONES("pgsteal_direct")
  759         TEXTS_FOR_ZONES("pgscan_kswapd")
  760         TEXTS_FOR_ZONES("pgscan_direct")
  761         "pgscan_direct_throttle",
  762 
  763 #ifdef CONFIG_NUMA
  764         "zone_reclaim_failed",
  765 #endif
  766         "pginodesteal",
  767         "slabs_scanned",
  768         "kswapd_inodesteal",
  769         "kswapd_low_wmark_hit_quickly",
  770         "kswapd_high_wmark_hit_quickly",
  771         "kswapd_skip_congestion_wait",
  772         "pageoutrun",
  773         "allocstall",
  774 
  775         "pgrotated",
  776 
  777 #ifdef CONFIG_NUMA_BALANCING
  778         "numa_pte_updates",
  779         "numa_hint_faults",
  780         "numa_hint_faults_local",
  781         "numa_pages_migrated",
  782 #endif
  783 #ifdef CONFIG_MIGRATION
  784         "pgmigrate_success",
  785         "pgmigrate_fail",
  786 #endif
  787 #ifdef CONFIG_COMPACTION
  788         "compact_migrate_scanned",
  789         "compact_free_scanned",
  790         "compact_isolated",
  791         "compact_stall",
  792         "compact_fail",
  793         "compact_success",
  794 #endif
  795 
  796 #ifdef CONFIG_HUGETLB_PAGE
  797         "htlb_buddy_alloc_success",
  798         "htlb_buddy_alloc_fail",
  799 #endif
  800         "unevictable_pgs_culled",
  801         "unevictable_pgs_scanned",
  802         "unevictable_pgs_rescued",
  803         "unevictable_pgs_mlocked",
  804         "unevictable_pgs_munlocked",
  805         "unevictable_pgs_cleared",
  806         "unevictable_pgs_stranded",
  807 
  808 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  809         "thp_fault_alloc",
  810         "thp_fault_fallback",
  811         "thp_collapse_alloc",
  812         "thp_collapse_alloc_failed",
  813         "thp_split",
  814         "thp_zero_page_alloc",
  815         "thp_zero_page_alloc_failed",
  816 #endif
  817 
  818 #endif /* CONFIG_VM_EVENTS_COUNTERS */
  819 };
  820 #endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA */
  821 
  822 
  823 #ifdef CONFIG_PROC_FS
  824 static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
  825                                                 struct zone *zone)
  826 {
  827         int order;
  828 
  829         seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
  830         for (order = 0; order < MAX_ORDER; ++order)
  831                 seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
  832         seq_putc(m, '\n');
  833 }
  834 
  835 /*
  836  * This walks the free areas for each zone.
  837  */
  838 static int frag_show(struct seq_file *m, void *arg)
  839 {
  840         pg_data_t *pgdat = (pg_data_t *)arg;
  841         walk_zones_in_node(m, pgdat, frag_show_print);
  842         return 0;
  843 }
  844 
  845 static void pagetypeinfo_showfree_print(struct seq_file *m,
  846                                         pg_data_t *pgdat, struct zone *zone)
  847 {
  848         int order, mtype;
  849 
  850         for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
  851                 seq_printf(m, "Node %4d, zone %8s, type %12s ",
  852                                         pgdat->node_id,
  853                                         zone->name,
  854                                         migratetype_names[mtype]);
  855                 for (order = 0; order < MAX_ORDER; ++order) {
  856                         unsigned long freecount = 0;
  857                         struct free_area *area;
  858                         struct list_head *curr;
  859 
  860                         area = &(zone->free_area[order]);
  861 
  862                         list_for_each(curr, &area->free_list[mtype])
  863                                 freecount++;
  864                         seq_printf(m, "%6lu ", freecount);
  865                 }
  866                 seq_putc(m, '\n');
  867         }
  868 }
  869 
  870 /* Print out the free pages at each order for each migatetype */
  871 static int pagetypeinfo_showfree(struct seq_file *m, void *arg)
  872 {
  873         int order;
  874         pg_data_t *pgdat = (pg_data_t *)arg;
  875 
  876         /* Print header */
  877         seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
  878         for (order = 0; order < MAX_ORDER; ++order)
  879                 seq_printf(m, "%6d ", order);
  880         seq_putc(m, '\n');
  881 
  882         walk_zones_in_node(m, pgdat, pagetypeinfo_showfree_print);
  883 
  884         return 0;
  885 }
  886 
  887 static void pagetypeinfo_showblockcount_print(struct seq_file *m,
  888                                         pg_data_t *pgdat, struct zone *zone)
  889 {
  890         int mtype;
  891         unsigned long pfn;
  892         unsigned long start_pfn = zone->zone_start_pfn;
  893         unsigned long end_pfn = start_pfn + zone->spanned_pages;
  894         unsigned long count[MIGRATE_TYPES] = { 0, };
  895 
  896         for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
  897                 struct page *page;
  898 
  899                 if (!pfn_valid(pfn))
  900                         continue;
  901 
  902                 page = pfn_to_page(pfn);
  903 
  904                 /* Watch for unexpected holes punched in the memmap */
  905                 if (!memmap_valid_within(pfn, page, zone))
  906                         continue;
  907 
  908                 mtype = get_pageblock_migratetype(page);
  909 
  910                 if (mtype < MIGRATE_TYPES)
  911                         count[mtype]++;
  912         }
  913 
  914         /* Print counts */
  915         seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
  916         for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
  917                 seq_printf(m, "%12lu ", count[mtype]);
  918         seq_putc(m, '\n');
  919 }
  920 
  921 /* Print out the free pages at each order for each migratetype */
  922 static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
  923 {
  924         int mtype;
  925         pg_data_t *pgdat = (pg_data_t *)arg;
  926 
  927         seq_printf(m, "\n%-23s", "Number of blocks type ");
  928         for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
  929                 seq_printf(m, "%12s ", migratetype_names[mtype]);
  930         seq_putc(m, '\n');
  931         walk_zones_in_node(m, pgdat, pagetypeinfo_showblockcount_print);
  932 
  933         return 0;
  934 }
  935 
  936 /*
  937  * This prints out statistics in relation to grouping pages by mobility.
  938  * It is expensive to collect so do not constantly read the file.
  939  */
  940 static int pagetypeinfo_show(struct seq_file *m, void *arg)
  941 {
  942         pg_data_t *pgdat = (pg_data_t *)arg;
  943 
  944         /* check memoryless node */
  945         if (!node_state(pgdat->node_id, N_MEMORY))
  946                 return 0;
  947 
  948         seq_printf(m, "Page block order: %d\n", pageblock_order);
  949         seq_printf(m, "Pages per block:  %lu\n", pageblock_nr_pages);
  950         seq_putc(m, '\n');
  951         pagetypeinfo_showfree(m, pgdat);
  952         pagetypeinfo_showblockcount(m, pgdat);
  953 
  954         return 0;
  955 }
  956 
  957 static const struct seq_operations fragmentation_op = {
  958         .start  = frag_start,
  959         .next   = frag_next,
  960         .stop   = frag_stop,
  961         .show   = frag_show,
  962 };
  963 
  964 static int fragmentation_open(struct inode *inode, struct file *file)
  965 {
  966         return seq_open(file, &fragmentation_op);
  967 }
  968 
  969 static const struct file_operations fragmentation_file_operations = {
  970         .open           = fragmentation_open,
  971         .read           = seq_read,
  972         .llseek         = seq_lseek,
  973         .release        = seq_release,
  974 };
  975 
  976 static const struct seq_operations pagetypeinfo_op = {
  977         .start  = frag_start,
  978         .next   = frag_next,
  979         .stop   = frag_stop,
  980         .show   = pagetypeinfo_show,
  981 };
  982 
  983 static int pagetypeinfo_open(struct inode *inode, struct file *file)
  984 {
  985         return seq_open(file, &pagetypeinfo_op);
  986 }
  987 
  988 static const struct file_operations pagetypeinfo_file_ops = {
  989         .open           = pagetypeinfo_open,
  990         .read           = seq_read,
  991         .llseek         = seq_lseek,
  992         .release        = seq_release,
  993 };
  994 
  995 static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
  996                                                         struct zone *zone)
  997 {
  998         int i;
  999         seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
 1000         seq_printf(m,
 1001                    "\n  pages free     %lu"
 1002                    "\n        min      %lu"
 1003                    "\n        low      %lu"
 1004                    "\n        high     %lu"
 1005                    "\n        scanned  %lu"
 1006                    "\n        spanned  %lu"
 1007                    "\n        present  %lu"
 1008                    "\n        managed  %lu",
 1009                    zone_page_state(zone, NR_FREE_PAGES),
 1010                    min_wmark_pages(zone),
 1011                    low_wmark_pages(zone),
 1012                    high_wmark_pages(zone),
 1013                    zone->pages_scanned,
 1014                    zone->spanned_pages,
 1015                    zone->present_pages,
 1016                    zone->managed_pages);
 1017 
 1018         for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
 1019                 seq_printf(m, "\n    %-12s %lu", vmstat_text[i],
 1020                                 zone_page_state(zone, i));
 1021 
 1022         seq_printf(m,
 1023                    "\n        protection: (%lu",
 1024                    zone->lowmem_reserve[0]);
 1025         for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
 1026                 seq_printf(m, ", %lu", zone->lowmem_reserve[i]);
 1027         seq_printf(m,
 1028                    ")"
 1029                    "\n  pagesets");
 1030         for_each_online_cpu(i) {
 1031                 struct per_cpu_pageset *pageset;
 1032 
 1033                 pageset = per_cpu_ptr(zone->pageset, i);
 1034                 seq_printf(m,
 1035                            "\n    cpu: %i"
 1036                            "\n              count: %i"
 1037                            "\n              high:  %i"
 1038                            "\n              batch: %i",
 1039                            i,
 1040                            pageset->pcp.count,
 1041                            pageset->pcp.high,
 1042                            pageset->pcp.batch);
 1043 #ifdef CONFIG_SMP
 1044                 seq_printf(m, "\n  vm stats threshold: %d",
 1045                                 pageset->stat_threshold);
 1046 #endif
 1047         }
 1048         seq_printf(m,
 1049                    "\n  all_unreclaimable: %u"
 1050                    "\n  start_pfn:         %lu"
 1051                    "\n  inactive_ratio:    %u",
 1052                    zone->all_unreclaimable,
 1053                    zone->zone_start_pfn,
 1054                    zone->inactive_ratio);
 1055         seq_putc(m, '\n');
 1056 }
 1057 
 1058 /*
 1059  * Output information about zones in @pgdat.
 1060  */
 1061 static int zoneinfo_show(struct seq_file *m, void *arg)
 1062 {
 1063         pg_data_t *pgdat = (pg_data_t *)arg;
 1064         walk_zones_in_node(m, pgdat, zoneinfo_show_print);
 1065         return 0;
 1066 }
 1067 
 1068 static const struct seq_operations zoneinfo_op = {
 1069         .start  = frag_start, /* iterate over all zones. The same as in
 1070                                * fragmentation. */
 1071         .next   = frag_next,
 1072         .stop   = frag_stop,
 1073         .show   = zoneinfo_show,
 1074 };
 1075 
 1076 static int zoneinfo_open(struct inode *inode, struct file *file)
 1077 {
 1078         return seq_open(file, &zoneinfo_op);
 1079 }
 1080 
 1081 static const struct file_operations proc_zoneinfo_file_operations = {
 1082         .open           = zoneinfo_open,
 1083         .read           = seq_read,
 1084         .llseek         = seq_lseek,
 1085         .release        = seq_release,
 1086 };
 1087 
 1088 enum writeback_stat_item {
 1089         NR_DIRTY_THRESHOLD,
 1090         NR_DIRTY_BG_THRESHOLD,
 1091         NR_VM_WRITEBACK_STAT_ITEMS,
 1092 };
 1093 
 1094 static void *vmstat_start(struct seq_file *m, loff_t *pos)
 1095 {
 1096         unsigned long *v;
 1097         int i, stat_items_size;
 1098 
 1099         if (*pos >= ARRAY_SIZE(vmstat_text))
 1100                 return NULL;
 1101         stat_items_size = NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long) +
 1102                           NR_VM_WRITEBACK_STAT_ITEMS * sizeof(unsigned long);
 1103 
 1104 #ifdef CONFIG_VM_EVENT_COUNTERS
 1105         stat_items_size += sizeof(struct vm_event_state);
 1106 #endif
 1107 
 1108         v = kmalloc(stat_items_size, GFP_KERNEL);
 1109         m->private = v;
 1110         if (!v)
 1111                 return ERR_PTR(-ENOMEM);
 1112         for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
 1113                 v[i] = global_page_state(i);
 1114         v += NR_VM_ZONE_STAT_ITEMS;
 1115 
 1116         global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
 1117                             v + NR_DIRTY_THRESHOLD);
 1118         v += NR_VM_WRITEBACK_STAT_ITEMS;
 1119 
 1120 #ifdef CONFIG_VM_EVENT_COUNTERS
 1121         all_vm_events(v);
 1122         v[PGPGIN] /= 2;         /* sectors -> kbytes */
 1123         v[PGPGOUT] /= 2;
 1124 #endif
 1125         return (unsigned long *)m->private + *pos;
 1126 }
 1127 
 1128 static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
 1129 {
 1130         (*pos)++;
 1131         if (*pos >= ARRAY_SIZE(vmstat_text))
 1132                 return NULL;
 1133         return (unsigned long *)m->private + *pos;
 1134 }
 1135 
 1136 static int vmstat_show(struct seq_file *m, void *arg)
 1137 {
 1138         unsigned long *l = arg;
 1139         unsigned long off = l - (unsigned long *)m->private;
 1140 
 1141         seq_printf(m, "%s %lu\n", vmstat_text[off], *l);
 1142         return 0;
 1143 }
 1144 
 1145 static void vmstat_stop(struct seq_file *m, void *arg)
 1146 {
 1147         kfree(m->private);
 1148         m->private = NULL;
 1149 }
 1150 
 1151 static const struct seq_operations vmstat_op = {
 1152         .start  = vmstat_start,
 1153         .next   = vmstat_next,
 1154         .stop   = vmstat_stop,
 1155         .show   = vmstat_show,
 1156 };
 1157 
 1158 static int vmstat_open(struct inode *inode, struct file *file)
 1159 {
 1160         return seq_open(file, &vmstat_op);
 1161 }
 1162 
 1163 static const struct file_operations proc_vmstat_file_operations = {
 1164         .open           = vmstat_open,
 1165         .read           = seq_read,
 1166         .llseek         = seq_lseek,
 1167         .release        = seq_release,
 1168 };
 1169 #endif /* CONFIG_PROC_FS */
 1170 
 1171 #ifdef CONFIG_SMP
 1172 static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
 1173 int sysctl_stat_interval __read_mostly = HZ;
 1174 
 1175 static void vmstat_update(struct work_struct *w)
 1176 {
 1177         refresh_cpu_vm_stats(smp_processor_id());
 1178         schedule_delayed_work(&__get_cpu_var(vmstat_work),
 1179                 round_jiffies_relative(sysctl_stat_interval));
 1180 }
 1181 
 1182 static void __cpuinit start_cpu_timer(int cpu)
 1183 {
 1184         struct delayed_work *work = &per_cpu(vmstat_work, cpu);
 1185 
 1186         INIT_DEFERRABLE_WORK(work, vmstat_update);
 1187         schedule_delayed_work_on(cpu, work, __round_jiffies_relative(HZ, cpu));
 1188 }
 1189 
 1190 /*
 1191  * Use the cpu notifier to insure that the thresholds are recalculated
 1192  * when necessary.
 1193  */
 1194 static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
 1195                 unsigned long action,
 1196                 void *hcpu)
 1197 {
 1198         long cpu = (long)hcpu;
 1199 
 1200         switch (action) {
 1201         case CPU_ONLINE:
 1202         case CPU_ONLINE_FROZEN:
 1203                 refresh_zone_stat_thresholds();
 1204                 start_cpu_timer(cpu);
 1205                 node_set_state(cpu_to_node(cpu), N_CPU);
 1206                 break;
 1207         case CPU_DOWN_PREPARE:
 1208         case CPU_DOWN_PREPARE_FROZEN:
 1209                 cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
 1210                 per_cpu(vmstat_work, cpu).work.func = NULL;
 1211                 break;
 1212         case CPU_DOWN_FAILED:
 1213         case CPU_DOWN_FAILED_FROZEN:
 1214                 start_cpu_timer(cpu);
 1215                 break;
 1216         case CPU_DEAD:
 1217         case CPU_DEAD_FROZEN:
 1218                 refresh_zone_stat_thresholds();
 1219                 break;
 1220         default:
 1221                 break;
 1222         }
 1223         return NOTIFY_OK;
 1224 }
 1225 
 1226 static struct notifier_block __cpuinitdata vmstat_notifier =
 1227         { &vmstat_cpuup_callback, NULL, 0 };
 1228 #endif
 1229 
 1230 static int __init setup_vmstat(void)
 1231 {
 1232 #ifdef CONFIG_SMP
 1233         int cpu;
 1234 
 1235         register_cpu_notifier(&vmstat_notifier);
 1236 
 1237         for_each_online_cpu(cpu)
 1238                 start_cpu_timer(cpu);
 1239 #endif
 1240 #ifdef CONFIG_PROC_FS
 1241         proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
 1242         proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
 1243         proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
 1244         proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
 1245 #endif
 1246         return 0;
 1247 }
 1248 module_init(setup_vmstat)
 1249 
 1250 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
 1251 #include <linux/debugfs.h>
 1252 
 1253 
 1254 /*
 1255  * Return an index indicating how much of the available free memory is
 1256  * unusable for an allocation of the requested size.
 1257  */
 1258 static int unusable_free_index(unsigned int order,
 1259                                 struct contig_page_info *info)
 1260 {
 1261         /* No free memory is interpreted as all free memory is unusable */
 1262         if (info->free_pages == 0)
 1263                 return 1000;
 1264 
 1265         /*
 1266          * Index should be a value between 0 and 1. Return a value to 3
 1267          * decimal places.
 1268          *
 1269          * 0 => no fragmentation
 1270          * 1 => high fragmentation
 1271          */
 1272         return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages);
 1273 
 1274 }
 1275 
 1276 static void unusable_show_print(struct seq_file *m,
 1277                                         pg_data_t *pgdat, struct zone *zone)
 1278 {
 1279         unsigned int order;
 1280         int index;
 1281         struct contig_page_info info;
 1282 
 1283         seq_printf(m, "Node %d, zone %8s ",
 1284                                 pgdat->node_id,
 1285                                 zone->name);
 1286         for (order = 0; order < MAX_ORDER; ++order) {
 1287                 fill_contig_page_info(zone, order, &info);
 1288                 index = unusable_free_index(order, &info);
 1289                 seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
 1290         }
 1291 
 1292         seq_putc(m, '\n');
 1293 }
 1294 
 1295 /*
 1296  * Display unusable free space index
 1297  *
 1298  * The unusable free space index measures how much of the available free
 1299  * memory cannot be used to satisfy an allocation of a given size and is a
 1300  * value between 0 and 1. The higher the value, the more of free memory is
 1301  * unusable and by implication, the worse the external fragmentation is. This
 1302  * can be expressed as a percentage by multiplying by 100.
 1303  */
 1304 static int unusable_show(struct seq_file *m, void *arg)
 1305 {
 1306         pg_data_t *pgdat = (pg_data_t *)arg;
 1307 
 1308         /* check memoryless node */
 1309         if (!node_state(pgdat->node_id, N_MEMORY))
 1310                 return 0;
 1311 
 1312         walk_zones_in_node(m, pgdat, unusable_show_print);
 1313 
 1314         return 0;
 1315 }
 1316 
 1317 static const struct seq_operations unusable_op = {
 1318         .start  = frag_start,
 1319         .next   = frag_next,
 1320         .stop   = frag_stop,
 1321         .show   = unusable_show,
 1322 };
 1323 
 1324 static int unusable_open(struct inode *inode, struct file *file)
 1325 {
 1326         return seq_open(file, &unusable_op);
 1327 }
 1328 
 1329 static const struct file_operations unusable_file_ops = {
 1330         .open           = unusable_open,
 1331         .read           = seq_read,
 1332         .llseek         = seq_lseek,
 1333         .release        = seq_release,
 1334 };
 1335 
 1336 static void extfrag_show_print(struct seq_file *m,
 1337                                         pg_data_t *pgdat, struct zone *zone)
 1338 {
 1339         unsigned int order;
 1340         int index;
 1341 
 1342         /* Alloc on stack as interrupts are disabled for zone walk */
 1343         struct contig_page_info info;
 1344 
 1345         seq_printf(m, "Node %d, zone %8s ",
 1346                                 pgdat->node_id,
 1347                                 zone->name);
 1348         for (order = 0; order < MAX_ORDER; ++order) {
 1349                 fill_contig_page_info(zone, order, &info);
 1350                 index = __fragmentation_index(order, &info);
 1351                 seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
 1352         }
 1353 
 1354         seq_putc(m, '\n');
 1355 }
 1356 
 1357 /*
 1358  * Display fragmentation index for orders that allocations would fail for
 1359  */
 1360 static int extfrag_show(struct seq_file *m, void *arg)
 1361 {
 1362         pg_data_t *pgdat = (pg_data_t *)arg;
 1363 
 1364         walk_zones_in_node(m, pgdat, extfrag_show_print);
 1365 
 1366         return 0;
 1367 }
 1368 
 1369 static const struct seq_operations extfrag_op = {
 1370         .start  = frag_start,
 1371         .next   = frag_next,
 1372         .stop   = frag_stop,
 1373         .show   = extfrag_show,
 1374 };
 1375 
 1376 static int extfrag_open(struct inode *inode, struct file *file)
 1377 {
 1378         return seq_open(file, &extfrag_op);
 1379 }
 1380 
 1381 static const struct file_operations extfrag_file_ops = {
 1382         .open           = extfrag_open,
 1383         .read           = seq_read,
 1384         .llseek         = seq_lseek,
 1385         .release        = seq_release,
 1386 };
 1387 
 1388 static int __init extfrag_debug_init(void)
 1389 {
 1390         struct dentry *extfrag_debug_root;
 1391 
 1392         extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
 1393         if (!extfrag_debug_root)
 1394                 return -ENOMEM;
 1395 
 1396         if (!debugfs_create_file("unusable_index", 0444,
 1397                         extfrag_debug_root, NULL, &unusable_file_ops))
 1398                 goto fail;
 1399 
 1400         if (!debugfs_create_file("extfrag_index", 0444,
 1401                         extfrag_debug_root, NULL, &extfrag_file_ops))
 1402                 goto fail;
 1403 
 1404         return 0;
 1405 fail:
 1406         debugfs_remove_recursive(extfrag_debug_root);
 1407         return -ENOMEM;
 1408 }
 1409 
 1410 module_init(extfrag_debug_init);
 1411 #endif

Cache object: d1105871d485c127a6076ca641c5cd40


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.