The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kernel/cpuset.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  *  kernel/cpuset.c
    3  *
    4  *  Processor and Memory placement constraints for sets of tasks.
    5  *
    6  *  Copyright (C) 2003 BULL SA.
    7  *  Copyright (C) 2004-2007 Silicon Graphics, Inc.
    8  *  Copyright (C) 2006 Google, Inc
    9  *
   10  *  Portions derived from Patrick Mochel's sysfs code.
   11  *  sysfs is Copyright (c) 2001-3 Patrick Mochel
   12  *
   13  *  2003-10-10 Written by Simon Derr.
   14  *  2003-10-22 Updates by Stephen Hemminger.
   15  *  2004 May-July Rework by Paul Jackson.
   16  *  2006 Rework by Paul Menage to use generic cgroups
   17  *  2008 Rework of the scheduler domains and CPU hotplug handling
   18  *       by Max Krasnyansky
   19  *
   20  *  This file is subject to the terms and conditions of the GNU General Public
   21  *  License.  See the file COPYING in the main directory of the Linux
   22  *  distribution for more details.
   23  */
   24 
   25 #include <linux/cpu.h>
   26 #include <linux/cpumask.h>
   27 #include <linux/cpuset.h>
   28 #include <linux/err.h>
   29 #include <linux/errno.h>
   30 #include <linux/file.h>
   31 #include <linux/fs.h>
   32 #include <linux/init.h>
   33 #include <linux/interrupt.h>
   34 #include <linux/kernel.h>
   35 #include <linux/kmod.h>
   36 #include <linux/list.h>
   37 #include <linux/mempolicy.h>
   38 #include <linux/mm.h>
   39 #include <linux/memory.h>
   40 #include <linux/export.h>
   41 #include <linux/mount.h>
   42 #include <linux/namei.h>
   43 #include <linux/pagemap.h>
   44 #include <linux/proc_fs.h>
   45 #include <linux/rcupdate.h>
   46 #include <linux/sched.h>
   47 #include <linux/seq_file.h>
   48 #include <linux/security.h>
   49 #include <linux/slab.h>
   50 #include <linux/spinlock.h>
   51 #include <linux/stat.h>
   52 #include <linux/string.h>
   53 #include <linux/time.h>
   54 #include <linux/backing-dev.h>
   55 #include <linux/sort.h>
   56 
   57 #include <asm/uaccess.h>
   58 #include <linux/atomic.h>
   59 #include <linux/mutex.h>
   60 #include <linux/workqueue.h>
   61 #include <linux/cgroup.h>
   62 
   63 /*
   64  * Workqueue for cpuset related tasks.
   65  *
   66  * Using kevent workqueue may cause deadlock when memory_migrate
   67  * is set. So we create a separate workqueue thread for cpuset.
   68  */
   69 static struct workqueue_struct *cpuset_wq;
   70 
   71 /*
   72  * Tracks how many cpusets are currently defined in system.
   73  * When there is only one cpuset (the root cpuset) we can
   74  * short circuit some hooks.
   75  */
   76 int number_of_cpusets __read_mostly;
   77 
   78 /* Forward declare cgroup structures */
   79 struct cgroup_subsys cpuset_subsys;
   80 struct cpuset;
   81 
   82 /* See "Frequency meter" comments, below. */
   83 
   84 struct fmeter {
   85         int cnt;                /* unprocessed events count */
   86         int val;                /* most recent output value */
   87         time_t time;            /* clock (secs) when val computed */
   88         spinlock_t lock;        /* guards read or write of above */
   89 };
   90 
   91 struct cpuset {
   92         struct cgroup_subsys_state css;
   93 
   94         unsigned long flags;            /* "unsigned long" so bitops work */
   95         cpumask_var_t cpus_allowed;     /* CPUs allowed to tasks in cpuset */
   96         nodemask_t mems_allowed;        /* Memory Nodes allowed to tasks */
   97 
   98         struct cpuset *parent;          /* my parent */
   99 
  100         struct fmeter fmeter;           /* memory_pressure filter */
  101 
  102         /* partition number for rebuild_sched_domains() */
  103         int pn;
  104 
  105         /* for custom sched domain */
  106         int relax_domain_level;
  107 
  108         /* used for walking a cpuset hierarchy */
  109         struct list_head stack_list;
  110 };
  111 
  112 /* Retrieve the cpuset for a cgroup */
  113 static inline struct cpuset *cgroup_cs(struct cgroup *cont)
  114 {
  115         return container_of(cgroup_subsys_state(cont, cpuset_subsys_id),
  116                             struct cpuset, css);
  117 }
  118 
  119 /* Retrieve the cpuset for a task */
  120 static inline struct cpuset *task_cs(struct task_struct *task)
  121 {
  122         return container_of(task_subsys_state(task, cpuset_subsys_id),
  123                             struct cpuset, css);
  124 }
  125 
  126 #ifdef CONFIG_NUMA
  127 static inline bool task_has_mempolicy(struct task_struct *task)
  128 {
  129         return task->mempolicy;
  130 }
  131 #else
  132 static inline bool task_has_mempolicy(struct task_struct *task)
  133 {
  134         return false;
  135 }
  136 #endif
  137 
  138 
  139 /* bits in struct cpuset flags field */
  140 typedef enum {
  141         CS_CPU_EXCLUSIVE,
  142         CS_MEM_EXCLUSIVE,
  143         CS_MEM_HARDWALL,
  144         CS_MEMORY_MIGRATE,
  145         CS_SCHED_LOAD_BALANCE,
  146         CS_SPREAD_PAGE,
  147         CS_SPREAD_SLAB,
  148 } cpuset_flagbits_t;
  149 
  150 /* the type of hotplug event */
  151 enum hotplug_event {
  152         CPUSET_CPU_OFFLINE,
  153         CPUSET_MEM_OFFLINE,
  154 };
  155 
  156 /* convenient tests for these bits */
  157 static inline int is_cpu_exclusive(const struct cpuset *cs)
  158 {
  159         return test_bit(CS_CPU_EXCLUSIVE, &cs->flags);
  160 }
  161 
  162 static inline int is_mem_exclusive(const struct cpuset *cs)
  163 {
  164         return test_bit(CS_MEM_EXCLUSIVE, &cs->flags);
  165 }
  166 
  167 static inline int is_mem_hardwall(const struct cpuset *cs)
  168 {
  169         return test_bit(CS_MEM_HARDWALL, &cs->flags);
  170 }
  171 
  172 static inline int is_sched_load_balance(const struct cpuset *cs)
  173 {
  174         return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
  175 }
  176 
  177 static inline int is_memory_migrate(const struct cpuset *cs)
  178 {
  179         return test_bit(CS_MEMORY_MIGRATE, &cs->flags);
  180 }
  181 
  182 static inline int is_spread_page(const struct cpuset *cs)
  183 {
  184         return test_bit(CS_SPREAD_PAGE, &cs->flags);
  185 }
  186 
  187 static inline int is_spread_slab(const struct cpuset *cs)
  188 {
  189         return test_bit(CS_SPREAD_SLAB, &cs->flags);
  190 }
  191 
  192 static struct cpuset top_cpuset = {
  193         .flags = ((1 << CS_CPU_EXCLUSIVE) | (1 << CS_MEM_EXCLUSIVE)),
  194 };
  195 
  196 /*
  197  * There are two global mutexes guarding cpuset structures.  The first
  198  * is the main control groups cgroup_mutex, accessed via
  199  * cgroup_lock()/cgroup_unlock().  The second is the cpuset-specific
  200  * callback_mutex, below. They can nest.  It is ok to first take
  201  * cgroup_mutex, then nest callback_mutex.  We also require taking
  202  * task_lock() when dereferencing a task's cpuset pointer.  See "The
  203  * task_lock() exception", at the end of this comment.
  204  *
  205  * A task must hold both mutexes to modify cpusets.  If a task
  206  * holds cgroup_mutex, then it blocks others wanting that mutex,
  207  * ensuring that it is the only task able to also acquire callback_mutex
  208  * and be able to modify cpusets.  It can perform various checks on
  209  * the cpuset structure first, knowing nothing will change.  It can
  210  * also allocate memory while just holding cgroup_mutex.  While it is
  211  * performing these checks, various callback routines can briefly
  212  * acquire callback_mutex to query cpusets.  Once it is ready to make
  213  * the changes, it takes callback_mutex, blocking everyone else.
  214  *
  215  * Calls to the kernel memory allocator can not be made while holding
  216  * callback_mutex, as that would risk double tripping on callback_mutex
  217  * from one of the callbacks into the cpuset code from within
  218  * __alloc_pages().
  219  *
  220  * If a task is only holding callback_mutex, then it has read-only
  221  * access to cpusets.
  222  *
  223  * Now, the task_struct fields mems_allowed and mempolicy may be changed
  224  * by other task, we use alloc_lock in the task_struct fields to protect
  225  * them.
  226  *
  227  * The cpuset_common_file_read() handlers only hold callback_mutex across
  228  * small pieces of code, such as when reading out possibly multi-word
  229  * cpumasks and nodemasks.
  230  *
  231  * Accessing a task's cpuset should be done in accordance with the
  232  * guidelines for accessing subsystem state in kernel/cgroup.c
  233  */
  234 
  235 static DEFINE_MUTEX(callback_mutex);
  236 
  237 /*
  238  * cpuset_buffer_lock protects both the cpuset_name and cpuset_nodelist
  239  * buffers.  They are statically allocated to prevent using excess stack
  240  * when calling cpuset_print_task_mems_allowed().
  241  */
  242 #define CPUSET_NAME_LEN         (128)
  243 #define CPUSET_NODELIST_LEN     (256)
  244 static char cpuset_name[CPUSET_NAME_LEN];
  245 static char cpuset_nodelist[CPUSET_NODELIST_LEN];
  246 static DEFINE_SPINLOCK(cpuset_buffer_lock);
  247 
  248 /*
  249  * This is ugly, but preserves the userspace API for existing cpuset
  250  * users. If someone tries to mount the "cpuset" filesystem, we
  251  * silently switch it to mount "cgroup" instead
  252  */
  253 static struct dentry *cpuset_mount(struct file_system_type *fs_type,
  254                          int flags, const char *unused_dev_name, void *data)
  255 {
  256         struct file_system_type *cgroup_fs = get_fs_type("cgroup");
  257         struct dentry *ret = ERR_PTR(-ENODEV);
  258         if (cgroup_fs) {
  259                 char mountopts[] =
  260                         "cpuset,noprefix,"
  261                         "release_agent=/sbin/cpuset_release_agent";
  262                 ret = cgroup_fs->mount(cgroup_fs, flags,
  263                                            unused_dev_name, mountopts);
  264                 put_filesystem(cgroup_fs);
  265         }
  266         return ret;
  267 }
  268 
  269 static struct file_system_type cpuset_fs_type = {
  270         .name = "cpuset",
  271         .mount = cpuset_mount,
  272 };
  273 
  274 /*
  275  * Return in pmask the portion of a cpusets's cpus_allowed that
  276  * are online.  If none are online, walk up the cpuset hierarchy
  277  * until we find one that does have some online cpus.  If we get
  278  * all the way to the top and still haven't found any online cpus,
  279  * return cpu_online_mask.  Or if passed a NULL cs from an exit'ing
  280  * task, return cpu_online_mask.
  281  *
  282  * One way or another, we guarantee to return some non-empty subset
  283  * of cpu_online_mask.
  284  *
  285  * Call with callback_mutex held.
  286  */
  287 
  288 static void guarantee_online_cpus(const struct cpuset *cs,
  289                                   struct cpumask *pmask)
  290 {
  291         while (cs && !cpumask_intersects(cs->cpus_allowed, cpu_online_mask))
  292                 cs = cs->parent;
  293         if (cs)
  294                 cpumask_and(pmask, cs->cpus_allowed, cpu_online_mask);
  295         else
  296                 cpumask_copy(pmask, cpu_online_mask);
  297         BUG_ON(!cpumask_intersects(pmask, cpu_online_mask));
  298 }
  299 
  300 /*
  301  * Return in *pmask the portion of a cpusets's mems_allowed that
  302  * are online, with memory.  If none are online with memory, walk
  303  * up the cpuset hierarchy until we find one that does have some
  304  * online mems.  If we get all the way to the top and still haven't
  305  * found any online mems, return node_states[N_MEMORY].
  306  *
  307  * One way or another, we guarantee to return some non-empty subset
  308  * of node_states[N_MEMORY].
  309  *
  310  * Call with callback_mutex held.
  311  */
  312 
  313 static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
  314 {
  315         while (cs && !nodes_intersects(cs->mems_allowed,
  316                                         node_states[N_MEMORY]))
  317                 cs = cs->parent;
  318         if (cs)
  319                 nodes_and(*pmask, cs->mems_allowed,
  320                                         node_states[N_MEMORY]);
  321         else
  322                 *pmask = node_states[N_MEMORY];
  323         BUG_ON(!nodes_intersects(*pmask, node_states[N_MEMORY]));
  324 }
  325 
  326 /*
  327  * update task's spread flag if cpuset's page/slab spread flag is set
  328  *
  329  * Called with callback_mutex/cgroup_mutex held
  330  */
  331 static void cpuset_update_task_spread_flag(struct cpuset *cs,
  332                                         struct task_struct *tsk)
  333 {
  334         if (is_spread_page(cs))
  335                 tsk->flags |= PF_SPREAD_PAGE;
  336         else
  337                 tsk->flags &= ~PF_SPREAD_PAGE;
  338         if (is_spread_slab(cs))
  339                 tsk->flags |= PF_SPREAD_SLAB;
  340         else
  341                 tsk->flags &= ~PF_SPREAD_SLAB;
  342 }
  343 
  344 /*
  345  * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q?
  346  *
  347  * One cpuset is a subset of another if all its allowed CPUs and
  348  * Memory Nodes are a subset of the other, and its exclusive flags
  349  * are only set if the other's are set.  Call holding cgroup_mutex.
  350  */
  351 
  352 static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
  353 {
  354         return  cpumask_subset(p->cpus_allowed, q->cpus_allowed) &&
  355                 nodes_subset(p->mems_allowed, q->mems_allowed) &&
  356                 is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
  357                 is_mem_exclusive(p) <= is_mem_exclusive(q);
  358 }
  359 
  360 /**
  361  * alloc_trial_cpuset - allocate a trial cpuset
  362  * @cs: the cpuset that the trial cpuset duplicates
  363  */
  364 static struct cpuset *alloc_trial_cpuset(const struct cpuset *cs)
  365 {
  366         struct cpuset *trial;
  367 
  368         trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL);
  369         if (!trial)
  370                 return NULL;
  371 
  372         if (!alloc_cpumask_var(&trial->cpus_allowed, GFP_KERNEL)) {
  373                 kfree(trial);
  374                 return NULL;
  375         }
  376         cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
  377 
  378         return trial;
  379 }
  380 
  381 /**
  382  * free_trial_cpuset - free the trial cpuset
  383  * @trial: the trial cpuset to be freed
  384  */
  385 static void free_trial_cpuset(struct cpuset *trial)
  386 {
  387         free_cpumask_var(trial->cpus_allowed);
  388         kfree(trial);
  389 }
  390 
  391 /*
  392  * validate_change() - Used to validate that any proposed cpuset change
  393  *                     follows the structural rules for cpusets.
  394  *
  395  * If we replaced the flag and mask values of the current cpuset
  396  * (cur) with those values in the trial cpuset (trial), would
  397  * our various subset and exclusive rules still be valid?  Presumes
  398  * cgroup_mutex held.
  399  *
  400  * 'cur' is the address of an actual, in-use cpuset.  Operations
  401  * such as list traversal that depend on the actual address of the
  402  * cpuset in the list must use cur below, not trial.
  403  *
  404  * 'trial' is the address of bulk structure copy of cur, with
  405  * perhaps one or more of the fields cpus_allowed, mems_allowed,
  406  * or flags changed to new, trial values.
  407  *
  408  * Return 0 if valid, -errno if not.
  409  */
  410 
  411 static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
  412 {
  413         struct cgroup *cont;
  414         struct cpuset *c, *par;
  415 
  416         /* Each of our child cpusets must be a subset of us */
  417         list_for_each_entry(cont, &cur->css.cgroup->children, sibling) {
  418                 if (!is_cpuset_subset(cgroup_cs(cont), trial))
  419                         return -EBUSY;
  420         }
  421 
  422         /* Remaining checks don't apply to root cpuset */
  423         if (cur == &top_cpuset)
  424                 return 0;
  425 
  426         par = cur->parent;
  427 
  428         /* We must be a subset of our parent cpuset */
  429         if (!is_cpuset_subset(trial, par))
  430                 return -EACCES;
  431 
  432         /*
  433          * If either I or some sibling (!= me) is exclusive, we can't
  434          * overlap
  435          */
  436         list_for_each_entry(cont, &par->css.cgroup->children, sibling) {
  437                 c = cgroup_cs(cont);
  438                 if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
  439                     c != cur &&
  440                     cpumask_intersects(trial->cpus_allowed, c->cpus_allowed))
  441                         return -EINVAL;
  442                 if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
  443                     c != cur &&
  444                     nodes_intersects(trial->mems_allowed, c->mems_allowed))
  445                         return -EINVAL;
  446         }
  447 
  448         /* Cpusets with tasks can't have empty cpus_allowed or mems_allowed */
  449         if (cgroup_task_count(cur->css.cgroup)) {
  450                 if (cpumask_empty(trial->cpus_allowed) ||
  451                     nodes_empty(trial->mems_allowed)) {
  452                         return -ENOSPC;
  453                 }
  454         }
  455 
  456         return 0;
  457 }
  458 
  459 #ifdef CONFIG_SMP
  460 /*
  461  * Helper routine for generate_sched_domains().
  462  * Do cpusets a, b have overlapping cpus_allowed masks?
  463  */
  464 static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
  465 {
  466         return cpumask_intersects(a->cpus_allowed, b->cpus_allowed);
  467 }
  468 
  469 static void
  470 update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
  471 {
  472         if (dattr->relax_domain_level < c->relax_domain_level)
  473                 dattr->relax_domain_level = c->relax_domain_level;
  474         return;
  475 }
  476 
  477 static void
  478 update_domain_attr_tree(struct sched_domain_attr *dattr, struct cpuset *c)
  479 {
  480         LIST_HEAD(q);
  481 
  482         list_add(&c->stack_list, &q);
  483         while (!list_empty(&q)) {
  484                 struct cpuset *cp;
  485                 struct cgroup *cont;
  486                 struct cpuset *child;
  487 
  488                 cp = list_first_entry(&q, struct cpuset, stack_list);
  489                 list_del(q.next);
  490 
  491                 if (cpumask_empty(cp->cpus_allowed))
  492                         continue;
  493 
  494                 if (is_sched_load_balance(cp))
  495                         update_domain_attr(dattr, cp);
  496 
  497                 list_for_each_entry(cont, &cp->css.cgroup->children, sibling) {
  498                         child = cgroup_cs(cont);
  499                         list_add_tail(&child->stack_list, &q);
  500                 }
  501         }
  502 }
  503 
  504 /*
  505  * generate_sched_domains()
  506  *
  507  * This function builds a partial partition of the systems CPUs
  508  * A 'partial partition' is a set of non-overlapping subsets whose
  509  * union is a subset of that set.
  510  * The output of this function needs to be passed to kernel/sched.c
  511  * partition_sched_domains() routine, which will rebuild the scheduler's
  512  * load balancing domains (sched domains) as specified by that partial
  513  * partition.
  514  *
  515  * See "What is sched_load_balance" in Documentation/cgroups/cpusets.txt
  516  * for a background explanation of this.
  517  *
  518  * Does not return errors, on the theory that the callers of this
  519  * routine would rather not worry about failures to rebuild sched
  520  * domains when operating in the severe memory shortage situations
  521  * that could cause allocation failures below.
  522  *
  523  * Must be called with cgroup_lock held.
  524  *
  525  * The three key local variables below are:
  526  *    q  - a linked-list queue of cpuset pointers, used to implement a
  527  *         top-down scan of all cpusets.  This scan loads a pointer
  528  *         to each cpuset marked is_sched_load_balance into the
  529  *         array 'csa'.  For our purposes, rebuilding the schedulers
  530  *         sched domains, we can ignore !is_sched_load_balance cpusets.
  531  *  csa  - (for CpuSet Array) Array of pointers to all the cpusets
  532  *         that need to be load balanced, for convenient iterative
  533  *         access by the subsequent code that finds the best partition,
  534  *         i.e the set of domains (subsets) of CPUs such that the
  535  *         cpus_allowed of every cpuset marked is_sched_load_balance
  536  *         is a subset of one of these domains, while there are as
  537  *         many such domains as possible, each as small as possible.
  538  * doms  - Conversion of 'csa' to an array of cpumasks, for passing to
  539  *         the kernel/sched.c routine partition_sched_domains() in a
  540  *         convenient format, that can be easily compared to the prior
  541  *         value to determine what partition elements (sched domains)
  542  *         were changed (added or removed.)
  543  *
  544  * Finding the best partition (set of domains):
  545  *      The triple nested loops below over i, j, k scan over the
  546  *      load balanced cpusets (using the array of cpuset pointers in
  547  *      csa[]) looking for pairs of cpusets that have overlapping
  548  *      cpus_allowed, but which don't have the same 'pn' partition
  549  *      number and gives them in the same partition number.  It keeps
  550  *      looping on the 'restart' label until it can no longer find
  551  *      any such pairs.
  552  *
  553  *      The union of the cpus_allowed masks from the set of
  554  *      all cpusets having the same 'pn' value then form the one
  555  *      element of the partition (one sched domain) to be passed to
  556  *      partition_sched_domains().
  557  */
  558 static int generate_sched_domains(cpumask_var_t **domains,
  559                         struct sched_domain_attr **attributes)
  560 {
  561         LIST_HEAD(q);           /* queue of cpusets to be scanned */
  562         struct cpuset *cp;      /* scans q */
  563         struct cpuset **csa;    /* array of all cpuset ptrs */
  564         int csn;                /* how many cpuset ptrs in csa so far */
  565         int i, j, k;            /* indices for partition finding loops */
  566         cpumask_var_t *doms;    /* resulting partition; i.e. sched domains */
  567         struct sched_domain_attr *dattr;  /* attributes for custom domains */
  568         int ndoms = 0;          /* number of sched domains in result */
  569         int nslot;              /* next empty doms[] struct cpumask slot */
  570 
  571         doms = NULL;
  572         dattr = NULL;
  573         csa = NULL;
  574 
  575         /* Special case for the 99% of systems with one, full, sched domain */
  576         if (is_sched_load_balance(&top_cpuset)) {
  577                 ndoms = 1;
  578                 doms = alloc_sched_domains(ndoms);
  579                 if (!doms)
  580                         goto done;
  581 
  582                 dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);
  583                 if (dattr) {
  584                         *dattr = SD_ATTR_INIT;
  585                         update_domain_attr_tree(dattr, &top_cpuset);
  586                 }
  587                 cpumask_copy(doms[0], top_cpuset.cpus_allowed);
  588 
  589                 goto done;
  590         }
  591 
  592         csa = kmalloc(number_of_cpusets * sizeof(cp), GFP_KERNEL);
  593         if (!csa)
  594                 goto done;
  595         csn = 0;
  596 
  597         list_add(&top_cpuset.stack_list, &q);
  598         while (!list_empty(&q)) {
  599                 struct cgroup *cont;
  600                 struct cpuset *child;   /* scans child cpusets of cp */
  601 
  602                 cp = list_first_entry(&q, struct cpuset, stack_list);
  603                 list_del(q.next);
  604 
  605                 if (cpumask_empty(cp->cpus_allowed))
  606                         continue;
  607 
  608                 /*
  609                  * All child cpusets contain a subset of the parent's cpus, so
  610                  * just skip them, and then we call update_domain_attr_tree()
  611                  * to calc relax_domain_level of the corresponding sched
  612                  * domain.
  613                  */
  614                 if (is_sched_load_balance(cp)) {
  615                         csa[csn++] = cp;
  616                         continue;
  617                 }
  618 
  619                 list_for_each_entry(cont, &cp->css.cgroup->children, sibling) {
  620                         child = cgroup_cs(cont);
  621                         list_add_tail(&child->stack_list, &q);
  622                 }
  623         }
  624 
  625         for (i = 0; i < csn; i++)
  626                 csa[i]->pn = i;
  627         ndoms = csn;
  628 
  629 restart:
  630         /* Find the best partition (set of sched domains) */
  631         for (i = 0; i < csn; i++) {
  632                 struct cpuset *a = csa[i];
  633                 int apn = a->pn;
  634 
  635                 for (j = 0; j < csn; j++) {
  636                         struct cpuset *b = csa[j];
  637                         int bpn = b->pn;
  638 
  639                         if (apn != bpn && cpusets_overlap(a, b)) {
  640                                 for (k = 0; k < csn; k++) {
  641                                         struct cpuset *c = csa[k];
  642 
  643                                         if (c->pn == bpn)
  644                                                 c->pn = apn;
  645                                 }
  646                                 ndoms--;        /* one less element */
  647                                 goto restart;
  648                         }
  649                 }
  650         }
  651 
  652         /*
  653          * Now we know how many domains to create.
  654          * Convert <csn, csa> to <ndoms, doms> and populate cpu masks.
  655          */
  656         doms = alloc_sched_domains(ndoms);
  657         if (!doms)
  658                 goto done;
  659 
  660         /*
  661          * The rest of the code, including the scheduler, can deal with
  662          * dattr==NULL case. No need to abort if alloc fails.
  663          */
  664         dattr = kmalloc(ndoms * sizeof(struct sched_domain_attr), GFP_KERNEL);
  665 
  666         for (nslot = 0, i = 0; i < csn; i++) {
  667                 struct cpuset *a = csa[i];
  668                 struct cpumask *dp;
  669                 int apn = a->pn;
  670 
  671                 if (apn < 0) {
  672                         /* Skip completed partitions */
  673                         continue;
  674                 }
  675 
  676                 dp = doms[nslot];
  677 
  678                 if (nslot == ndoms) {
  679                         static int warnings = 10;
  680                         if (warnings) {
  681                                 printk(KERN_WARNING
  682                                  "rebuild_sched_domains confused:"
  683                                   " nslot %d, ndoms %d, csn %d, i %d,"
  684                                   " apn %d\n",
  685                                   nslot, ndoms, csn, i, apn);
  686                                 warnings--;
  687                         }
  688                         continue;
  689                 }
  690 
  691                 cpumask_clear(dp);
  692                 if (dattr)
  693                         *(dattr + nslot) = SD_ATTR_INIT;
  694                 for (j = i; j < csn; j++) {
  695                         struct cpuset *b = csa[j];
  696 
  697                         if (apn == b->pn) {
  698                                 cpumask_or(dp, dp, b->cpus_allowed);
  699                                 if (dattr)
  700                                         update_domain_attr_tree(dattr + nslot, b);
  701 
  702                                 /* Done with this partition */
  703                                 b->pn = -1;
  704                         }
  705                 }
  706                 nslot++;
  707         }
  708         BUG_ON(nslot != ndoms);
  709 
  710 done:
  711         kfree(csa);
  712 
  713         /*
  714          * Fallback to the default domain if kmalloc() failed.
  715          * See comments in partition_sched_domains().
  716          */
  717         if (doms == NULL)
  718                 ndoms = 1;
  719 
  720         *domains    = doms;
  721         *attributes = dattr;
  722         return ndoms;
  723 }
  724 
  725 /*
  726  * Rebuild scheduler domains.
  727  *
  728  * Call with neither cgroup_mutex held nor within get_online_cpus().
  729  * Takes both cgroup_mutex and get_online_cpus().
  730  *
  731  * Cannot be directly called from cpuset code handling changes
  732  * to the cpuset pseudo-filesystem, because it cannot be called
  733  * from code that already holds cgroup_mutex.
  734  */
  735 static void do_rebuild_sched_domains(struct work_struct *unused)
  736 {
  737         struct sched_domain_attr *attr;
  738         cpumask_var_t *doms;
  739         int ndoms;
  740 
  741         get_online_cpus();
  742 
  743         /* Generate domain masks and attrs */
  744         cgroup_lock();
  745         ndoms = generate_sched_domains(&doms, &attr);
  746         cgroup_unlock();
  747 
  748         /* Have scheduler rebuild the domains */
  749         partition_sched_domains(ndoms, doms, attr);
  750 
  751         put_online_cpus();
  752 }
  753 #else /* !CONFIG_SMP */
  754 static void do_rebuild_sched_domains(struct work_struct *unused)
  755 {
  756 }
  757 
  758 static int generate_sched_domains(cpumask_var_t **domains,
  759                         struct sched_domain_attr **attributes)
  760 {
  761         *domains = NULL;
  762         return 1;
  763 }
  764 #endif /* CONFIG_SMP */
  765 
  766 static DECLARE_WORK(rebuild_sched_domains_work, do_rebuild_sched_domains);
  767 
  768 /*
  769  * Rebuild scheduler domains, asynchronously via workqueue.
  770  *
  771  * If the flag 'sched_load_balance' of any cpuset with non-empty
  772  * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
  773  * which has that flag enabled, or if any cpuset with a non-empty
  774  * 'cpus' is removed, then call this routine to rebuild the
  775  * scheduler's dynamic sched domains.
  776  *
  777  * The rebuild_sched_domains() and partition_sched_domains()
  778  * routines must nest cgroup_lock() inside get_online_cpus(),
  779  * but such cpuset changes as these must nest that locking the
  780  * other way, holding cgroup_lock() for much of the code.
  781  *
  782  * So in order to avoid an ABBA deadlock, the cpuset code handling
  783  * these user changes delegates the actual sched domain rebuilding
  784  * to a separate workqueue thread, which ends up processing the
  785  * above do_rebuild_sched_domains() function.
  786  */
  787 static void async_rebuild_sched_domains(void)
  788 {
  789         queue_work(cpuset_wq, &rebuild_sched_domains_work);
  790 }
  791 
  792 /*
  793  * Accomplishes the same scheduler domain rebuild as the above
  794  * async_rebuild_sched_domains(), however it directly calls the
  795  * rebuild routine synchronously rather than calling it via an
  796  * asynchronous work thread.
  797  *
  798  * This can only be called from code that is not holding
  799  * cgroup_mutex (not nested in a cgroup_lock() call.)
  800  */
  801 void rebuild_sched_domains(void)
  802 {
  803         do_rebuild_sched_domains(NULL);
  804 }
  805 
  806 /**
  807  * cpuset_test_cpumask - test a task's cpus_allowed versus its cpuset's
  808  * @tsk: task to test
  809  * @scan: struct cgroup_scanner contained in its struct cpuset_hotplug_scanner
  810  *
  811  * Call with cgroup_mutex held.  May take callback_mutex during call.
  812  * Called for each task in a cgroup by cgroup_scan_tasks().
  813  * Return nonzero if this tasks's cpus_allowed mask should be changed (in other
  814  * words, if its mask is not equal to its cpuset's mask).
  815  */
  816 static int cpuset_test_cpumask(struct task_struct *tsk,
  817                                struct cgroup_scanner *scan)
  818 {
  819         return !cpumask_equal(&tsk->cpus_allowed,
  820                         (cgroup_cs(scan->cg))->cpus_allowed);
  821 }
  822 
  823 /**
  824  * cpuset_change_cpumask - make a task's cpus_allowed the same as its cpuset's
  825  * @tsk: task to test
  826  * @scan: struct cgroup_scanner containing the cgroup of the task
  827  *
  828  * Called by cgroup_scan_tasks() for each task in a cgroup whose
  829  * cpus_allowed mask needs to be changed.
  830  *
  831  * We don't need to re-check for the cgroup/cpuset membership, since we're
  832  * holding cgroup_lock() at this point.
  833  */
  834 static void cpuset_change_cpumask(struct task_struct *tsk,
  835                                   struct cgroup_scanner *scan)
  836 {
  837         set_cpus_allowed_ptr(tsk, ((cgroup_cs(scan->cg))->cpus_allowed));
  838 }
  839 
  840 /**
  841  * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
  842  * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
  843  * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
  844  *
  845  * Called with cgroup_mutex held
  846  *
  847  * The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
  848  * calling callback functions for each.
  849  *
  850  * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
  851  * if @heap != NULL.
  852  */
  853 static void update_tasks_cpumask(struct cpuset *cs, struct ptr_heap *heap)
  854 {
  855         struct cgroup_scanner scan;
  856 
  857         scan.cg = cs->css.cgroup;
  858         scan.test_task = cpuset_test_cpumask;
  859         scan.process_task = cpuset_change_cpumask;
  860         scan.heap = heap;
  861         cgroup_scan_tasks(&scan);
  862 }
  863 
  864 /**
  865  * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
  866  * @cs: the cpuset to consider
  867  * @buf: buffer of cpu numbers written to this cpuset
  868  */
  869 static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
  870                           const char *buf)
  871 {
  872         struct ptr_heap heap;
  873         int retval;
  874         int is_load_balanced;
  875 
  876         /* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */
  877         if (cs == &top_cpuset)
  878                 return -EACCES;
  879 
  880         /*
  881          * An empty cpus_allowed is ok only if the cpuset has no tasks.
  882          * Since cpulist_parse() fails on an empty mask, we special case
  883          * that parsing.  The validate_change() call ensures that cpusets
  884          * with tasks have cpus.
  885          */
  886         if (!*buf) {
  887                 cpumask_clear(trialcs->cpus_allowed);
  888         } else {
  889                 retval = cpulist_parse(buf, trialcs->cpus_allowed);
  890                 if (retval < 0)
  891                         return retval;
  892 
  893                 if (!cpumask_subset(trialcs->cpus_allowed, cpu_active_mask))
  894                         return -EINVAL;
  895         }
  896         retval = validate_change(cs, trialcs);
  897         if (retval < 0)
  898                 return retval;
  899 
  900         /* Nothing to do if the cpus didn't change */
  901         if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
  902                 return 0;
  903 
  904         retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
  905         if (retval)
  906                 return retval;
  907 
  908         is_load_balanced = is_sched_load_balance(trialcs);
  909 
  910         mutex_lock(&callback_mutex);
  911         cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
  912         mutex_unlock(&callback_mutex);
  913 
  914         /*
  915          * Scan tasks in the cpuset, and update the cpumasks of any
  916          * that need an update.
  917          */
  918         update_tasks_cpumask(cs, &heap);
  919 
  920         heap_free(&heap);
  921 
  922         if (is_load_balanced)
  923                 async_rebuild_sched_domains();
  924         return 0;
  925 }
  926 
  927 /*
  928  * cpuset_migrate_mm
  929  *
  930  *    Migrate memory region from one set of nodes to another.
  931  *
  932  *    Temporarilly set tasks mems_allowed to target nodes of migration,
  933  *    so that the migration code can allocate pages on these nodes.
  934  *
  935  *    Call holding cgroup_mutex, so current's cpuset won't change
  936  *    during this call, as manage_mutex holds off any cpuset_attach()
  937  *    calls.  Therefore we don't need to take task_lock around the
  938  *    call to guarantee_online_mems(), as we know no one is changing
  939  *    our task's cpuset.
  940  *
  941  *    While the mm_struct we are migrating is typically from some
  942  *    other task, the task_struct mems_allowed that we are hacking
  943  *    is for our current task, which must allocate new pages for that
  944  *    migrating memory region.
  945  */
  946 
  947 static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
  948                                                         const nodemask_t *to)
  949 {
  950         struct task_struct *tsk = current;
  951 
  952         tsk->mems_allowed = *to;
  953 
  954         do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL);
  955 
  956         guarantee_online_mems(task_cs(tsk),&tsk->mems_allowed);
  957 }
  958 
  959 /*
  960  * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy
  961  * @tsk: the task to change
  962  * @newmems: new nodes that the task will be set
  963  *
  964  * In order to avoid seeing no nodes if the old and new nodes are disjoint,
  965  * we structure updates as setting all new allowed nodes, then clearing newly
  966  * disallowed ones.
  967  */
  968 static void cpuset_change_task_nodemask(struct task_struct *tsk,
  969                                         nodemask_t *newmems)
  970 {
  971         bool need_loop;
  972 
  973         /*
  974          * Allow tasks that have access to memory reserves because they have
  975          * been OOM killed to get memory anywhere.
  976          */
  977         if (unlikely(test_thread_flag(TIF_MEMDIE)))
  978                 return;
  979         if (current->flags & PF_EXITING) /* Let dying task have memory */
  980                 return;
  981 
  982         task_lock(tsk);
  983         /*
  984          * Determine if a loop is necessary if another thread is doing
  985          * get_mems_allowed().  If at least one node remains unchanged and
  986          * tsk does not have a mempolicy, then an empty nodemask will not be
  987          * possible when mems_allowed is larger than a word.
  988          */
  989         need_loop = task_has_mempolicy(tsk) ||
  990                         !nodes_intersects(*newmems, tsk->mems_allowed);
  991 
  992         if (need_loop)
  993                 write_seqcount_begin(&tsk->mems_allowed_seq);
  994 
  995         nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
  996         mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1);
  997 
  998         mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP2);
  999         tsk->mems_allowed = *newmems;
 1000 
 1001         if (need_loop)
 1002                 write_seqcount_end(&tsk->mems_allowed_seq);
 1003 
 1004         task_unlock(tsk);
 1005 }
 1006 
 1007 /*
 1008  * Update task's mems_allowed and rebind its mempolicy and vmas' mempolicy
 1009  * of it to cpuset's new mems_allowed, and migrate pages to new nodes if
 1010  * memory_migrate flag is set. Called with cgroup_mutex held.
 1011  */
 1012 static void cpuset_change_nodemask(struct task_struct *p,
 1013                                    struct cgroup_scanner *scan)
 1014 {
 1015         struct mm_struct *mm;
 1016         struct cpuset *cs;
 1017         int migrate;
 1018         const nodemask_t *oldmem = scan->data;
 1019         static nodemask_t newmems;      /* protected by cgroup_mutex */
 1020 
 1021         cs = cgroup_cs(scan->cg);
 1022         guarantee_online_mems(cs, &newmems);
 1023 
 1024         cpuset_change_task_nodemask(p, &newmems);
 1025 
 1026         mm = get_task_mm(p);
 1027         if (!mm)
 1028                 return;
 1029 
 1030         migrate = is_memory_migrate(cs);
 1031 
 1032         mpol_rebind_mm(mm, &cs->mems_allowed);
 1033         if (migrate)
 1034                 cpuset_migrate_mm(mm, oldmem, &cs->mems_allowed);
 1035         mmput(mm);
 1036 }
 1037 
 1038 static void *cpuset_being_rebound;
 1039 
 1040 /**
 1041  * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
 1042  * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
 1043  * @oldmem: old mems_allowed of cpuset cs
 1044  * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
 1045  *
 1046  * Called with cgroup_mutex held
 1047  * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
 1048  * if @heap != NULL.
 1049  */
 1050 static void update_tasks_nodemask(struct cpuset *cs, const nodemask_t *oldmem,
 1051                                  struct ptr_heap *heap)
 1052 {
 1053         struct cgroup_scanner scan;
 1054 
 1055         cpuset_being_rebound = cs;              /* causes mpol_dup() rebind */
 1056 
 1057         scan.cg = cs->css.cgroup;
 1058         scan.test_task = NULL;
 1059         scan.process_task = cpuset_change_nodemask;
 1060         scan.heap = heap;
 1061         scan.data = (nodemask_t *)oldmem;
 1062 
 1063         /*
 1064          * The mpol_rebind_mm() call takes mmap_sem, which we couldn't
 1065          * take while holding tasklist_lock.  Forks can happen - the
 1066          * mpol_dup() cpuset_being_rebound check will catch such forks,
 1067          * and rebind their vma mempolicies too.  Because we still hold
 1068          * the global cgroup_mutex, we know that no other rebind effort
 1069          * will be contending for the global variable cpuset_being_rebound.
 1070          * It's ok if we rebind the same mm twice; mpol_rebind_mm()
 1071          * is idempotent.  Also migrate pages in each mm to new nodes.
 1072          */
 1073         cgroup_scan_tasks(&scan);
 1074 
 1075         /* We're done rebinding vmas to this cpuset's new mems_allowed. */
 1076         cpuset_being_rebound = NULL;
 1077 }
 1078 
 1079 /*
 1080  * Handle user request to change the 'mems' memory placement
 1081  * of a cpuset.  Needs to validate the request, update the
 1082  * cpusets mems_allowed, and for each task in the cpuset,
 1083  * update mems_allowed and rebind task's mempolicy and any vma
 1084  * mempolicies and if the cpuset is marked 'memory_migrate',
 1085  * migrate the tasks pages to the new memory.
 1086  *
 1087  * Call with cgroup_mutex held.  May take callback_mutex during call.
 1088  * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
 1089  * lock each such tasks mm->mmap_sem, scan its vma's and rebind
 1090  * their mempolicies to the cpusets new mems_allowed.
 1091  */
 1092 static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
 1093                            const char *buf)
 1094 {
 1095         NODEMASK_ALLOC(nodemask_t, oldmem, GFP_KERNEL);
 1096         int retval;
 1097         struct ptr_heap heap;
 1098 
 1099         if (!oldmem)
 1100                 return -ENOMEM;
 1101 
 1102         /*
 1103          * top_cpuset.mems_allowed tracks node_stats[N_MEMORY];
 1104          * it's read-only
 1105          */
 1106         if (cs == &top_cpuset) {
 1107                 retval = -EACCES;
 1108                 goto done;
 1109         }
 1110 
 1111         /*
 1112          * An empty mems_allowed is ok iff there are no tasks in the cpuset.
 1113          * Since nodelist_parse() fails on an empty mask, we special case
 1114          * that parsing.  The validate_change() call ensures that cpusets
 1115          * with tasks have memory.
 1116          */
 1117         if (!*buf) {
 1118                 nodes_clear(trialcs->mems_allowed);
 1119         } else {
 1120                 retval = nodelist_parse(buf, trialcs->mems_allowed);
 1121                 if (retval < 0)
 1122                         goto done;
 1123 
 1124                 if (!nodes_subset(trialcs->mems_allowed,
 1125                                 node_states[N_MEMORY])) {
 1126                         retval =  -EINVAL;
 1127                         goto done;
 1128                 }
 1129         }
 1130         *oldmem = cs->mems_allowed;
 1131         if (nodes_equal(*oldmem, trialcs->mems_allowed)) {
 1132                 retval = 0;             /* Too easy - nothing to do */
 1133                 goto done;
 1134         }
 1135         retval = validate_change(cs, trialcs);
 1136         if (retval < 0)
 1137                 goto done;
 1138 
 1139         retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
 1140         if (retval < 0)
 1141                 goto done;
 1142 
 1143         mutex_lock(&callback_mutex);
 1144         cs->mems_allowed = trialcs->mems_allowed;
 1145         mutex_unlock(&callback_mutex);
 1146 
 1147         update_tasks_nodemask(cs, oldmem, &heap);
 1148 
 1149         heap_free(&heap);
 1150 done:
 1151         NODEMASK_FREE(oldmem);
 1152         return retval;
 1153 }
 1154 
 1155 int current_cpuset_is_being_rebound(void)
 1156 {
 1157         return task_cs(current) == cpuset_being_rebound;
 1158 }
 1159 
 1160 static int update_relax_domain_level(struct cpuset *cs, s64 val)
 1161 {
 1162 #ifdef CONFIG_SMP
 1163         if (val < -1 || val >= sched_domain_level_max)
 1164                 return -EINVAL;
 1165 #endif
 1166 
 1167         if (val != cs->relax_domain_level) {
 1168                 cs->relax_domain_level = val;
 1169                 if (!cpumask_empty(cs->cpus_allowed) &&
 1170                     is_sched_load_balance(cs))
 1171                         async_rebuild_sched_domains();
 1172         }
 1173 
 1174         return 0;
 1175 }
 1176 
 1177 /*
 1178  * cpuset_change_flag - make a task's spread flags the same as its cpuset's
 1179  * @tsk: task to be updated
 1180  * @scan: struct cgroup_scanner containing the cgroup of the task
 1181  *
 1182  * Called by cgroup_scan_tasks() for each task in a cgroup.
 1183  *
 1184  * We don't need to re-check for the cgroup/cpuset membership, since we're
 1185  * holding cgroup_lock() at this point.
 1186  */
 1187 static void cpuset_change_flag(struct task_struct *tsk,
 1188                                 struct cgroup_scanner *scan)
 1189 {
 1190         cpuset_update_task_spread_flag(cgroup_cs(scan->cg), tsk);
 1191 }
 1192 
 1193 /*
 1194  * update_tasks_flags - update the spread flags of tasks in the cpuset.
 1195  * @cs: the cpuset in which each task's spread flags needs to be changed
 1196  * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
 1197  *
 1198  * Called with cgroup_mutex held
 1199  *
 1200  * The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
 1201  * calling callback functions for each.
 1202  *
 1203  * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
 1204  * if @heap != NULL.
 1205  */
 1206 static void update_tasks_flags(struct cpuset *cs, struct ptr_heap *heap)
 1207 {
 1208         struct cgroup_scanner scan;
 1209 
 1210         scan.cg = cs->css.cgroup;
 1211         scan.test_task = NULL;
 1212         scan.process_task = cpuset_change_flag;
 1213         scan.heap = heap;
 1214         cgroup_scan_tasks(&scan);
 1215 }
 1216 
 1217 /*
 1218  * update_flag - read a 0 or a 1 in a file and update associated flag
 1219  * bit:         the bit to update (see cpuset_flagbits_t)
 1220  * cs:          the cpuset to update
 1221  * turning_on:  whether the flag is being set or cleared
 1222  *
 1223  * Call with cgroup_mutex held.
 1224  */
 1225 
 1226 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
 1227                        int turning_on)
 1228 {
 1229         struct cpuset *trialcs;
 1230         int balance_flag_changed;
 1231         int spread_flag_changed;
 1232         struct ptr_heap heap;
 1233         int err;
 1234 
 1235         trialcs = alloc_trial_cpuset(cs);
 1236         if (!trialcs)
 1237                 return -ENOMEM;
 1238 
 1239         if (turning_on)
 1240                 set_bit(bit, &trialcs->flags);
 1241         else
 1242                 clear_bit(bit, &trialcs->flags);
 1243 
 1244         err = validate_change(cs, trialcs);
 1245         if (err < 0)
 1246                 goto out;
 1247 
 1248         err = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
 1249         if (err < 0)
 1250                 goto out;
 1251 
 1252         balance_flag_changed = (is_sched_load_balance(cs) !=
 1253                                 is_sched_load_balance(trialcs));
 1254 
 1255         spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
 1256                         || (is_spread_page(cs) != is_spread_page(trialcs)));
 1257 
 1258         mutex_lock(&callback_mutex);
 1259         cs->flags = trialcs->flags;
 1260         mutex_unlock(&callback_mutex);
 1261 
 1262         if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
 1263                 async_rebuild_sched_domains();
 1264 
 1265         if (spread_flag_changed)
 1266                 update_tasks_flags(cs, &heap);
 1267         heap_free(&heap);
 1268 out:
 1269         free_trial_cpuset(trialcs);
 1270         return err;
 1271 }
 1272 
 1273 /*
 1274  * Frequency meter - How fast is some event occurring?
 1275  *
 1276  * These routines manage a digitally filtered, constant time based,
 1277  * event frequency meter.  There are four routines:
 1278  *   fmeter_init() - initialize a frequency meter.
 1279  *   fmeter_markevent() - called each time the event happens.
 1280  *   fmeter_getrate() - returns the recent rate of such events.
 1281  *   fmeter_update() - internal routine used to update fmeter.
 1282  *
 1283  * A common data structure is passed to each of these routines,
 1284  * which is used to keep track of the state required to manage the
 1285  * frequency meter and its digital filter.
 1286  *
 1287  * The filter works on the number of events marked per unit time.
 1288  * The filter is single-pole low-pass recursive (IIR).  The time unit
 1289  * is 1 second.  Arithmetic is done using 32-bit integers scaled to
 1290  * simulate 3 decimal digits of precision (multiplied by 1000).
 1291  *
 1292  * With an FM_COEF of 933, and a time base of 1 second, the filter
 1293  * has a half-life of 10 seconds, meaning that if the events quit
 1294  * happening, then the rate returned from the fmeter_getrate()
 1295  * will be cut in half each 10 seconds, until it converges to zero.
 1296  *
 1297  * It is not worth doing a real infinitely recursive filter.  If more
 1298  * than FM_MAXTICKS ticks have elapsed since the last filter event,
 1299  * just compute FM_MAXTICKS ticks worth, by which point the level
 1300  * will be stable.
 1301  *
 1302  * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid
 1303  * arithmetic overflow in the fmeter_update() routine.
 1304  *
 1305  * Given the simple 32 bit integer arithmetic used, this meter works
 1306  * best for reporting rates between one per millisecond (msec) and
 1307  * one per 32 (approx) seconds.  At constant rates faster than one
 1308  * per msec it maxes out at values just under 1,000,000.  At constant
 1309  * rates between one per msec, and one per second it will stabilize
 1310  * to a value N*1000, where N is the rate of events per second.
 1311  * At constant rates between one per second and one per 32 seconds,
 1312  * it will be choppy, moving up on the seconds that have an event,
 1313  * and then decaying until the next event.  At rates slower than
 1314  * about one in 32 seconds, it decays all the way back to zero between
 1315  * each event.
 1316  */
 1317 
 1318 #define FM_COEF 933             /* coefficient for half-life of 10 secs */
 1319 #define FM_MAXTICKS ((time_t)99) /* useless computing more ticks than this */
 1320 #define FM_MAXCNT 1000000       /* limit cnt to avoid overflow */
 1321 #define FM_SCALE 1000           /* faux fixed point scale */
 1322 
 1323 /* Initialize a frequency meter */
 1324 static void fmeter_init(struct fmeter *fmp)
 1325 {
 1326         fmp->cnt = 0;
 1327         fmp->val = 0;
 1328         fmp->time = 0;
 1329         spin_lock_init(&fmp->lock);
 1330 }
 1331 
 1332 /* Internal meter update - process cnt events and update value */
 1333 static void fmeter_update(struct fmeter *fmp)
 1334 {
 1335         time_t now = get_seconds();
 1336         time_t ticks = now - fmp->time;
 1337 
 1338         if (ticks == 0)
 1339                 return;
 1340 
 1341         ticks = min(FM_MAXTICKS, ticks);
 1342         while (ticks-- > 0)
 1343                 fmp->val = (FM_COEF * fmp->val) / FM_SCALE;
 1344         fmp->time = now;
 1345 
 1346         fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE;
 1347         fmp->cnt = 0;
 1348 }
 1349 
 1350 /* Process any previous ticks, then bump cnt by one (times scale). */
 1351 static void fmeter_markevent(struct fmeter *fmp)
 1352 {
 1353         spin_lock(&fmp->lock);
 1354         fmeter_update(fmp);
 1355         fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE);
 1356         spin_unlock(&fmp->lock);
 1357 }
 1358 
 1359 /* Process any previous ticks, then return current value. */
 1360 static int fmeter_getrate(struct fmeter *fmp)
 1361 {
 1362         int val;
 1363 
 1364         spin_lock(&fmp->lock);
 1365         fmeter_update(fmp);
 1366         val = fmp->val;
 1367         spin_unlock(&fmp->lock);
 1368         return val;
 1369 }
 1370 
 1371 /*
 1372  * Protected by cgroup_lock. The nodemasks must be stored globally because
 1373  * dynamically allocating them is not allowed in can_attach, and they must
 1374  * persist until attach.
 1375  */
 1376 static cpumask_var_t cpus_attach;
 1377 static nodemask_t cpuset_attach_nodemask_from;
 1378 static nodemask_t cpuset_attach_nodemask_to;
 1379 
 1380 /* Called by cgroups to determine if a cpuset is usable; cgroup_mutex held */
 1381 static int cpuset_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
 1382 {
 1383         struct cpuset *cs = cgroup_cs(cgrp);
 1384         struct task_struct *task;
 1385         int ret;
 1386 
 1387         if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))
 1388                 return -ENOSPC;
 1389 
 1390         cgroup_taskset_for_each(task, cgrp, tset) {
 1391                 /*
 1392                  * Kthreads bound to specific cpus cannot be moved to a new
 1393                  * cpuset; we cannot change their cpu affinity and
 1394                  * isolating such threads by their set of allowed nodes is
 1395                  * unnecessary.  Thus, cpusets are not applicable for such
 1396                  * threads.  This prevents checking for success of
 1397                  * set_cpus_allowed_ptr() on all attached tasks before
 1398                  * cpus_allowed may be changed.
 1399                  */
 1400                 if (task->flags & PF_THREAD_BOUND)
 1401                         return -EINVAL;
 1402                 if ((ret = security_task_setscheduler(task)))
 1403                         return ret;
 1404         }
 1405 
 1406         /* prepare for attach */
 1407         if (cs == &top_cpuset)
 1408                 cpumask_copy(cpus_attach, cpu_possible_mask);
 1409         else
 1410                 guarantee_online_cpus(cs, cpus_attach);
 1411 
 1412         guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
 1413 
 1414         return 0;
 1415 }
 1416 
 1417 static void cpuset_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
 1418 {
 1419         struct mm_struct *mm;
 1420         struct task_struct *task;
 1421         struct task_struct *leader = cgroup_taskset_first(tset);
 1422         struct cgroup *oldcgrp = cgroup_taskset_cur_cgroup(tset);
 1423         struct cpuset *cs = cgroup_cs(cgrp);
 1424         struct cpuset *oldcs = cgroup_cs(oldcgrp);
 1425 
 1426         cgroup_taskset_for_each(task, cgrp, tset) {
 1427                 /*
 1428                  * can_attach beforehand should guarantee that this doesn't
 1429                  * fail.  TODO: have a better way to handle failure here
 1430                  */
 1431                 WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
 1432 
 1433                 cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
 1434                 cpuset_update_task_spread_flag(cs, task);
 1435         }
 1436 
 1437         /*
 1438          * Change mm, possibly for multiple threads in a threadgroup. This is
 1439          * expensive and may sleep.
 1440          */
 1441         cpuset_attach_nodemask_from = oldcs->mems_allowed;
 1442         cpuset_attach_nodemask_to = cs->mems_allowed;
 1443         mm = get_task_mm(leader);
 1444         if (mm) {
 1445                 mpol_rebind_mm(mm, &cpuset_attach_nodemask_to);
 1446                 if (is_memory_migrate(cs))
 1447                         cpuset_migrate_mm(mm, &cpuset_attach_nodemask_from,
 1448                                           &cpuset_attach_nodemask_to);
 1449                 mmput(mm);
 1450         }
 1451 }
 1452 
 1453 /* The various types of files and directories in a cpuset file system */
 1454 
 1455 typedef enum {
 1456         FILE_MEMORY_MIGRATE,
 1457         FILE_CPULIST,
 1458         FILE_MEMLIST,
 1459         FILE_CPU_EXCLUSIVE,
 1460         FILE_MEM_EXCLUSIVE,
 1461         FILE_MEM_HARDWALL,
 1462         FILE_SCHED_LOAD_BALANCE,
 1463         FILE_SCHED_RELAX_DOMAIN_LEVEL,
 1464         FILE_MEMORY_PRESSURE_ENABLED,
 1465         FILE_MEMORY_PRESSURE,
 1466         FILE_SPREAD_PAGE,
 1467         FILE_SPREAD_SLAB,
 1468 } cpuset_filetype_t;
 1469 
 1470 static int cpuset_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
 1471 {
 1472         int retval = 0;
 1473         struct cpuset *cs = cgroup_cs(cgrp);
 1474         cpuset_filetype_t type = cft->private;
 1475 
 1476         if (!cgroup_lock_live_group(cgrp))
 1477                 return -ENODEV;
 1478 
 1479         switch (type) {
 1480         case FILE_CPU_EXCLUSIVE:
 1481                 retval = update_flag(CS_CPU_EXCLUSIVE, cs, val);
 1482                 break;
 1483         case FILE_MEM_EXCLUSIVE:
 1484                 retval = update_flag(CS_MEM_EXCLUSIVE, cs, val);
 1485                 break;
 1486         case FILE_MEM_HARDWALL:
 1487                 retval = update_flag(CS_MEM_HARDWALL, cs, val);
 1488                 break;
 1489         case FILE_SCHED_LOAD_BALANCE:
 1490                 retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val);
 1491                 break;
 1492         case FILE_MEMORY_MIGRATE:
 1493                 retval = update_flag(CS_MEMORY_MIGRATE, cs, val);
 1494                 break;
 1495         case FILE_MEMORY_PRESSURE_ENABLED:
 1496                 cpuset_memory_pressure_enabled = !!val;
 1497                 break;
 1498         case FILE_MEMORY_PRESSURE:
 1499                 retval = -EACCES;
 1500                 break;
 1501         case FILE_SPREAD_PAGE:
 1502                 retval = update_flag(CS_SPREAD_PAGE, cs, val);
 1503                 break;
 1504         case FILE_SPREAD_SLAB:
 1505                 retval = update_flag(CS_SPREAD_SLAB, cs, val);
 1506                 break;
 1507         default:
 1508                 retval = -EINVAL;
 1509                 break;
 1510         }
 1511         cgroup_unlock();
 1512         return retval;
 1513 }
 1514 
 1515 static int cpuset_write_s64(struct cgroup *cgrp, struct cftype *cft, s64 val)
 1516 {
 1517         int retval = 0;
 1518         struct cpuset *cs = cgroup_cs(cgrp);
 1519         cpuset_filetype_t type = cft->private;
 1520 
 1521         if (!cgroup_lock_live_group(cgrp))
 1522                 return -ENODEV;
 1523 
 1524         switch (type) {
 1525         case FILE_SCHED_RELAX_DOMAIN_LEVEL:
 1526                 retval = update_relax_domain_level(cs, val);
 1527                 break;
 1528         default:
 1529                 retval = -EINVAL;
 1530                 break;
 1531         }
 1532         cgroup_unlock();
 1533         return retval;
 1534 }
 1535 
 1536 /*
 1537  * Common handling for a write to a "cpus" or "mems" file.
 1538  */
 1539 static int cpuset_write_resmask(struct cgroup *cgrp, struct cftype *cft,
 1540                                 const char *buf)
 1541 {
 1542         int retval = 0;
 1543         struct cpuset *cs = cgroup_cs(cgrp);
 1544         struct cpuset *trialcs;
 1545 
 1546         if (!cgroup_lock_live_group(cgrp))
 1547                 return -ENODEV;
 1548 
 1549         trialcs = alloc_trial_cpuset(cs);
 1550         if (!trialcs) {
 1551                 retval = -ENOMEM;
 1552                 goto out;
 1553         }
 1554 
 1555         switch (cft->private) {
 1556         case FILE_CPULIST:
 1557                 retval = update_cpumask(cs, trialcs, buf);
 1558                 break;
 1559         case FILE_MEMLIST:
 1560                 retval = update_nodemask(cs, trialcs, buf);
 1561                 break;
 1562         default:
 1563                 retval = -EINVAL;
 1564                 break;
 1565         }
 1566 
 1567         free_trial_cpuset(trialcs);
 1568 out:
 1569         cgroup_unlock();
 1570         return retval;
 1571 }
 1572 
 1573 /*
 1574  * These ascii lists should be read in a single call, by using a user
 1575  * buffer large enough to hold the entire map.  If read in smaller
 1576  * chunks, there is no guarantee of atomicity.  Since the display format
 1577  * used, list of ranges of sequential numbers, is variable length,
 1578  * and since these maps can change value dynamically, one could read
 1579  * gibberish by doing partial reads while a list was changing.
 1580  * A single large read to a buffer that crosses a page boundary is
 1581  * ok, because the result being copied to user land is not recomputed
 1582  * across a page fault.
 1583  */
 1584 
 1585 static size_t cpuset_sprintf_cpulist(char *page, struct cpuset *cs)
 1586 {
 1587         size_t count;
 1588 
 1589         mutex_lock(&callback_mutex);
 1590         count = cpulist_scnprintf(page, PAGE_SIZE, cs->cpus_allowed);
 1591         mutex_unlock(&callback_mutex);
 1592 
 1593         return count;
 1594 }
 1595 
 1596 static size_t cpuset_sprintf_memlist(char *page, struct cpuset *cs)
 1597 {
 1598         size_t count;
 1599 
 1600         mutex_lock(&callback_mutex);
 1601         count = nodelist_scnprintf(page, PAGE_SIZE, cs->mems_allowed);
 1602         mutex_unlock(&callback_mutex);
 1603 
 1604         return count;
 1605 }
 1606 
 1607 static ssize_t cpuset_common_file_read(struct cgroup *cont,
 1608                                        struct cftype *cft,
 1609                                        struct file *file,
 1610                                        char __user *buf,
 1611                                        size_t nbytes, loff_t *ppos)
 1612 {
 1613         struct cpuset *cs = cgroup_cs(cont);
 1614         cpuset_filetype_t type = cft->private;
 1615         char *page;
 1616         ssize_t retval = 0;
 1617         char *s;
 1618 
 1619         if (!(page = (char *)__get_free_page(GFP_TEMPORARY)))
 1620                 return -ENOMEM;
 1621 
 1622         s = page;
 1623 
 1624         switch (type) {
 1625         case FILE_CPULIST:
 1626                 s += cpuset_sprintf_cpulist(s, cs);
 1627                 break;
 1628         case FILE_MEMLIST:
 1629                 s += cpuset_sprintf_memlist(s, cs);
 1630                 break;
 1631         default:
 1632                 retval = -EINVAL;
 1633                 goto out;
 1634         }
 1635         *s++ = '\n';
 1636 
 1637         retval = simple_read_from_buffer(buf, nbytes, ppos, page, s - page);
 1638 out:
 1639         free_page((unsigned long)page);
 1640         return retval;
 1641 }
 1642 
 1643 static u64 cpuset_read_u64(struct cgroup *cont, struct cftype *cft)
 1644 {
 1645         struct cpuset *cs = cgroup_cs(cont);
 1646         cpuset_filetype_t type = cft->private;
 1647         switch (type) {
 1648         case FILE_CPU_EXCLUSIVE:
 1649                 return is_cpu_exclusive(cs);
 1650         case FILE_MEM_EXCLUSIVE:
 1651                 return is_mem_exclusive(cs);
 1652         case FILE_MEM_HARDWALL:
 1653                 return is_mem_hardwall(cs);
 1654         case FILE_SCHED_LOAD_BALANCE:
 1655                 return is_sched_load_balance(cs);
 1656         case FILE_MEMORY_MIGRATE:
 1657                 return is_memory_migrate(cs);
 1658         case FILE_MEMORY_PRESSURE_ENABLED:
 1659                 return cpuset_memory_pressure_enabled;
 1660         case FILE_MEMORY_PRESSURE:
 1661                 return fmeter_getrate(&cs->fmeter);
 1662         case FILE_SPREAD_PAGE:
 1663                 return is_spread_page(cs);
 1664         case FILE_SPREAD_SLAB:
 1665                 return is_spread_slab(cs);
 1666         default:
 1667                 BUG();
 1668         }
 1669 
 1670         /* Unreachable but makes gcc happy */
 1671         return 0;
 1672 }
 1673 
 1674 static s64 cpuset_read_s64(struct cgroup *cont, struct cftype *cft)
 1675 {
 1676         struct cpuset *cs = cgroup_cs(cont);
 1677         cpuset_filetype_t type = cft->private;
 1678         switch (type) {
 1679         case FILE_SCHED_RELAX_DOMAIN_LEVEL:
 1680                 return cs->relax_domain_level;
 1681         default:
 1682                 BUG();
 1683         }
 1684 
 1685         /* Unrechable but makes gcc happy */
 1686         return 0;
 1687 }
 1688 
 1689 
 1690 /*
 1691  * for the common functions, 'private' gives the type of file
 1692  */
 1693 
 1694 static struct cftype files[] = {
 1695         {
 1696                 .name = "cpus",
 1697                 .read = cpuset_common_file_read,
 1698                 .write_string = cpuset_write_resmask,
 1699                 .max_write_len = (100U + 6 * NR_CPUS),
 1700                 .private = FILE_CPULIST,
 1701         },
 1702 
 1703         {
 1704                 .name = "mems",
 1705                 .read = cpuset_common_file_read,
 1706                 .write_string = cpuset_write_resmask,
 1707                 .max_write_len = (100U + 6 * MAX_NUMNODES),
 1708                 .private = FILE_MEMLIST,
 1709         },
 1710 
 1711         {
 1712                 .name = "cpu_exclusive",
 1713                 .read_u64 = cpuset_read_u64,
 1714                 .write_u64 = cpuset_write_u64,
 1715                 .private = FILE_CPU_EXCLUSIVE,
 1716         },
 1717 
 1718         {
 1719                 .name = "mem_exclusive",
 1720                 .read_u64 = cpuset_read_u64,
 1721                 .write_u64 = cpuset_write_u64,
 1722                 .private = FILE_MEM_EXCLUSIVE,
 1723         },
 1724 
 1725         {
 1726                 .name = "mem_hardwall",
 1727                 .read_u64 = cpuset_read_u64,
 1728                 .write_u64 = cpuset_write_u64,
 1729                 .private = FILE_MEM_HARDWALL,
 1730         },
 1731 
 1732         {
 1733                 .name = "sched_load_balance",
 1734                 .read_u64 = cpuset_read_u64,
 1735                 .write_u64 = cpuset_write_u64,
 1736                 .private = FILE_SCHED_LOAD_BALANCE,
 1737         },
 1738 
 1739         {
 1740                 .name = "sched_relax_domain_level",
 1741                 .read_s64 = cpuset_read_s64,
 1742                 .write_s64 = cpuset_write_s64,
 1743                 .private = FILE_SCHED_RELAX_DOMAIN_LEVEL,
 1744         },
 1745 
 1746         {
 1747                 .name = "memory_migrate",
 1748                 .read_u64 = cpuset_read_u64,
 1749                 .write_u64 = cpuset_write_u64,
 1750                 .private = FILE_MEMORY_MIGRATE,
 1751         },
 1752 
 1753         {
 1754                 .name = "memory_pressure",
 1755                 .read_u64 = cpuset_read_u64,
 1756                 .write_u64 = cpuset_write_u64,
 1757                 .private = FILE_MEMORY_PRESSURE,
 1758                 .mode = S_IRUGO,
 1759         },
 1760 
 1761         {
 1762                 .name = "memory_spread_page",
 1763                 .read_u64 = cpuset_read_u64,
 1764                 .write_u64 = cpuset_write_u64,
 1765                 .private = FILE_SPREAD_PAGE,
 1766         },
 1767 
 1768         {
 1769                 .name = "memory_spread_slab",
 1770                 .read_u64 = cpuset_read_u64,
 1771                 .write_u64 = cpuset_write_u64,
 1772                 .private = FILE_SPREAD_SLAB,
 1773         },
 1774 
 1775         {
 1776                 .name = "memory_pressure_enabled",
 1777                 .flags = CFTYPE_ONLY_ON_ROOT,
 1778                 .read_u64 = cpuset_read_u64,
 1779                 .write_u64 = cpuset_write_u64,
 1780                 .private = FILE_MEMORY_PRESSURE_ENABLED,
 1781         },
 1782 
 1783         { }     /* terminate */
 1784 };
 1785 
 1786 /*
 1787  *      cpuset_css_alloc - allocate a cpuset css
 1788  *      cont:   control group that the new cpuset will be part of
 1789  */
 1790 
 1791 static struct cgroup_subsys_state *cpuset_css_alloc(struct cgroup *cont)
 1792 {
 1793         struct cgroup *parent_cg = cont->parent;
 1794         struct cgroup *tmp_cg;
 1795         struct cpuset *parent, *cs;
 1796 
 1797         if (!parent_cg)
 1798                 return &top_cpuset.css;
 1799         parent = cgroup_cs(parent_cg);
 1800 
 1801         cs = kmalloc(sizeof(*cs), GFP_KERNEL);
 1802         if (!cs)
 1803                 return ERR_PTR(-ENOMEM);
 1804         if (!alloc_cpumask_var(&cs->cpus_allowed, GFP_KERNEL)) {
 1805                 kfree(cs);
 1806                 return ERR_PTR(-ENOMEM);
 1807         }
 1808 
 1809         cs->flags = 0;
 1810         if (is_spread_page(parent))
 1811                 set_bit(CS_SPREAD_PAGE, &cs->flags);
 1812         if (is_spread_slab(parent))
 1813                 set_bit(CS_SPREAD_SLAB, &cs->flags);
 1814         set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
 1815         cpumask_clear(cs->cpus_allowed);
 1816         nodes_clear(cs->mems_allowed);
 1817         fmeter_init(&cs->fmeter);
 1818         cs->relax_domain_level = -1;
 1819 
 1820         cs->parent = parent;
 1821         number_of_cpusets++;
 1822 
 1823         if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &cont->flags))
 1824                 goto skip_clone;
 1825 
 1826         /*
 1827          * Clone @parent's configuration if CGRP_CPUSET_CLONE_CHILDREN is
 1828          * set.  This flag handling is implemented in cgroup core for
 1829          * histrical reasons - the flag may be specified during mount.
 1830          *
 1831          * Currently, if any sibling cpusets have exclusive cpus or mem, we
 1832          * refuse to clone the configuration - thereby refusing the task to
 1833          * be entered, and as a result refusing the sys_unshare() or
 1834          * clone() which initiated it.  If this becomes a problem for some
 1835          * users who wish to allow that scenario, then this could be
 1836          * changed to grant parent->cpus_allowed-sibling_cpus_exclusive
 1837          * (and likewise for mems) to the new cgroup.
 1838          */
 1839         list_for_each_entry(tmp_cg, &parent_cg->children, sibling) {
 1840                 struct cpuset *tmp_cs = cgroup_cs(tmp_cg);
 1841 
 1842                 if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs))
 1843                         goto skip_clone;
 1844         }
 1845 
 1846         mutex_lock(&callback_mutex);
 1847         cs->mems_allowed = parent->mems_allowed;
 1848         cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
 1849         mutex_unlock(&callback_mutex);
 1850 skip_clone:
 1851         return &cs->css;
 1852 }
 1853 
 1854 /*
 1855  * If the cpuset being removed has its flag 'sched_load_balance'
 1856  * enabled, then simulate turning sched_load_balance off, which
 1857  * will call async_rebuild_sched_domains().
 1858  */
 1859 
 1860 static void cpuset_css_free(struct cgroup *cont)
 1861 {
 1862         struct cpuset *cs = cgroup_cs(cont);
 1863 
 1864         if (is_sched_load_balance(cs))
 1865                 update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
 1866 
 1867         number_of_cpusets--;
 1868         free_cpumask_var(cs->cpus_allowed);
 1869         kfree(cs);
 1870 }
 1871 
 1872 struct cgroup_subsys cpuset_subsys = {
 1873         .name = "cpuset",
 1874         .css_alloc = cpuset_css_alloc,
 1875         .css_free = cpuset_css_free,
 1876         .can_attach = cpuset_can_attach,
 1877         .attach = cpuset_attach,
 1878         .subsys_id = cpuset_subsys_id,
 1879         .base_cftypes = files,
 1880         .early_init = 1,
 1881 };
 1882 
 1883 /**
 1884  * cpuset_init - initialize cpusets at system boot
 1885  *
 1886  * Description: Initialize top_cpuset and the cpuset internal file system,
 1887  **/
 1888 
 1889 int __init cpuset_init(void)
 1890 {
 1891         int err = 0;
 1892 
 1893         if (!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL))
 1894                 BUG();
 1895 
 1896         cpumask_setall(top_cpuset.cpus_allowed);
 1897         nodes_setall(top_cpuset.mems_allowed);
 1898 
 1899         fmeter_init(&top_cpuset.fmeter);
 1900         set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags);
 1901         top_cpuset.relax_domain_level = -1;
 1902 
 1903         err = register_filesystem(&cpuset_fs_type);
 1904         if (err < 0)
 1905                 return err;
 1906 
 1907         if (!alloc_cpumask_var(&cpus_attach, GFP_KERNEL))
 1908                 BUG();
 1909 
 1910         number_of_cpusets = 1;
 1911         return 0;
 1912 }
 1913 
 1914 /**
 1915  * cpuset_do_move_task - move a given task to another cpuset
 1916  * @tsk: pointer to task_struct the task to move
 1917  * @scan: struct cgroup_scanner contained in its struct cpuset_hotplug_scanner
 1918  *
 1919  * Called by cgroup_scan_tasks() for each task in a cgroup.
 1920  * Return nonzero to stop the walk through the tasks.
 1921  */
 1922 static void cpuset_do_move_task(struct task_struct *tsk,
 1923                                 struct cgroup_scanner *scan)
 1924 {
 1925         struct cgroup *new_cgroup = scan->data;
 1926 
 1927         cgroup_attach_task(new_cgroup, tsk);
 1928 }
 1929 
 1930 /**
 1931  * move_member_tasks_to_cpuset - move tasks from one cpuset to another
 1932  * @from: cpuset in which the tasks currently reside
 1933  * @to: cpuset to which the tasks will be moved
 1934  *
 1935  * Called with cgroup_mutex held
 1936  * callback_mutex must not be held, as cpuset_attach() will take it.
 1937  *
 1938  * The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
 1939  * calling callback functions for each.
 1940  */
 1941 static void move_member_tasks_to_cpuset(struct cpuset *from, struct cpuset *to)
 1942 {
 1943         struct cgroup_scanner scan;
 1944 
 1945         scan.cg = from->css.cgroup;
 1946         scan.test_task = NULL; /* select all tasks in cgroup */
 1947         scan.process_task = cpuset_do_move_task;
 1948         scan.heap = NULL;
 1949         scan.data = to->css.cgroup;
 1950 
 1951         if (cgroup_scan_tasks(&scan))
 1952                 printk(KERN_ERR "move_member_tasks_to_cpuset: "
 1953                                 "cgroup_scan_tasks failed\n");
 1954 }
 1955 
 1956 /*
 1957  * If CPU and/or memory hotplug handlers, below, unplug any CPUs
 1958  * or memory nodes, we need to walk over the cpuset hierarchy,
 1959  * removing that CPU or node from all cpusets.  If this removes the
 1960  * last CPU or node from a cpuset, then move the tasks in the empty
 1961  * cpuset to its next-highest non-empty parent.
 1962  *
 1963  * Called with cgroup_mutex held
 1964  * callback_mutex must not be held, as cpuset_attach() will take it.
 1965  */
 1966 static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
 1967 {
 1968         struct cpuset *parent;
 1969 
 1970         /*
 1971          * The cgroup's css_sets list is in use if there are tasks
 1972          * in the cpuset; the list is empty if there are none;
 1973          * the cs->css.refcnt seems always 0.
 1974          */
 1975         if (list_empty(&cs->css.cgroup->css_sets))
 1976                 return;
 1977 
 1978         /*
 1979          * Find its next-highest non-empty parent, (top cpuset
 1980          * has online cpus, so can't be empty).
 1981          */
 1982         parent = cs->parent;
 1983         while (cpumask_empty(parent->cpus_allowed) ||
 1984                         nodes_empty(parent->mems_allowed))
 1985                 parent = parent->parent;
 1986 
 1987         move_member_tasks_to_cpuset(cs, parent);
 1988 }
 1989 
 1990 /*
 1991  * Helper function to traverse cpusets.
 1992  * It can be used to walk the cpuset tree from top to bottom, completing
 1993  * one layer before dropping down to the next (thus always processing a
 1994  * node before any of its children).
 1995  */
 1996 static struct cpuset *cpuset_next(struct list_head *queue)
 1997 {
 1998         struct cpuset *cp;
 1999         struct cpuset *child;   /* scans child cpusets of cp */
 2000         struct cgroup *cont;
 2001 
 2002         if (list_empty(queue))
 2003                 return NULL;
 2004 
 2005         cp = list_first_entry(queue, struct cpuset, stack_list);
 2006         list_del(queue->next);
 2007         list_for_each_entry(cont, &cp->css.cgroup->children, sibling) {
 2008                 child = cgroup_cs(cont);
 2009                 list_add_tail(&child->stack_list, queue);
 2010         }
 2011 
 2012         return cp;
 2013 }
 2014 
 2015 
 2016 /*
 2017  * Walk the specified cpuset subtree upon a hotplug operation (CPU/Memory
 2018  * online/offline) and update the cpusets accordingly.
 2019  * For regular CPU/Mem hotplug, look for empty cpusets; the tasks of such
 2020  * cpuset must be moved to a parent cpuset.
 2021  *
 2022  * Called with cgroup_mutex held.  We take callback_mutex to modify
 2023  * cpus_allowed and mems_allowed.
 2024  *
 2025  * This walk processes the tree from top to bottom, completing one layer
 2026  * before dropping down to the next.  It always processes a node before
 2027  * any of its children.
 2028  *
 2029  * In the case of memory hot-unplug, it will remove nodes from N_MEMORY
 2030  * if all present pages from a node are offlined.
 2031  */
 2032 static void
 2033 scan_cpusets_upon_hotplug(struct cpuset *root, enum hotplug_event event)
 2034 {
 2035         LIST_HEAD(queue);
 2036         struct cpuset *cp;              /* scans cpusets being updated */
 2037         static nodemask_t oldmems;      /* protected by cgroup_mutex */
 2038 
 2039         list_add_tail((struct list_head *)&root->stack_list, &queue);
 2040 
 2041         switch (event) {
 2042         case CPUSET_CPU_OFFLINE:
 2043                 while ((cp = cpuset_next(&queue)) != NULL) {
 2044 
 2045                         /* Continue past cpusets with all cpus online */
 2046                         if (cpumask_subset(cp->cpus_allowed, cpu_active_mask))
 2047                                 continue;
 2048 
 2049                         /* Remove offline cpus from this cpuset. */
 2050                         mutex_lock(&callback_mutex);
 2051                         cpumask_and(cp->cpus_allowed, cp->cpus_allowed,
 2052                                                         cpu_active_mask);
 2053                         mutex_unlock(&callback_mutex);
 2054 
 2055                         /* Move tasks from the empty cpuset to a parent */
 2056                         if (cpumask_empty(cp->cpus_allowed))
 2057                                 remove_tasks_in_empty_cpuset(cp);
 2058                         else
 2059                                 update_tasks_cpumask(cp, NULL);
 2060                 }
 2061                 break;
 2062 
 2063         case CPUSET_MEM_OFFLINE:
 2064                 while ((cp = cpuset_next(&queue)) != NULL) {
 2065 
 2066                         /* Continue past cpusets with all mems online */
 2067                         if (nodes_subset(cp->mems_allowed,
 2068                                         node_states[N_MEMORY]))
 2069                                 continue;
 2070 
 2071                         oldmems = cp->mems_allowed;
 2072 
 2073                         /* Remove offline mems from this cpuset. */
 2074                         mutex_lock(&callback_mutex);
 2075                         nodes_and(cp->mems_allowed, cp->mems_allowed,
 2076                                                 node_states[N_MEMORY]);
 2077                         mutex_unlock(&callback_mutex);
 2078 
 2079                         /* Move tasks from the empty cpuset to a parent */
 2080                         if (nodes_empty(cp->mems_allowed))
 2081                                 remove_tasks_in_empty_cpuset(cp);
 2082                         else
 2083                                 update_tasks_nodemask(cp, &oldmems, NULL);
 2084                 }
 2085         }
 2086 }
 2087 
 2088 /*
 2089  * The top_cpuset tracks what CPUs and Memory Nodes are online,
 2090  * period.  This is necessary in order to make cpusets transparent
 2091  * (of no affect) on systems that are actively using CPU hotplug
 2092  * but making no active use of cpusets.
 2093  *
 2094  * The only exception to this is suspend/resume, where we don't
 2095  * modify cpusets at all.
 2096  *
 2097  * This routine ensures that top_cpuset.cpus_allowed tracks
 2098  * cpu_active_mask on each CPU hotplug (cpuhp) event.
 2099  *
 2100  * Called within get_online_cpus().  Needs to call cgroup_lock()
 2101  * before calling generate_sched_domains().
 2102  *
 2103  * @cpu_online: Indicates whether this is a CPU online event (true) or
 2104  * a CPU offline event (false).
 2105  */
 2106 void cpuset_update_active_cpus(bool cpu_online)
 2107 {
 2108         struct sched_domain_attr *attr;
 2109         cpumask_var_t *doms;
 2110         int ndoms;
 2111 
 2112         cgroup_lock();
 2113         mutex_lock(&callback_mutex);
 2114         cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask);
 2115         mutex_unlock(&callback_mutex);
 2116 
 2117         if (!cpu_online)
 2118                 scan_cpusets_upon_hotplug(&top_cpuset, CPUSET_CPU_OFFLINE);
 2119 
 2120         ndoms = generate_sched_domains(&doms, &attr);
 2121         cgroup_unlock();
 2122 
 2123         /* Have scheduler rebuild the domains */
 2124         partition_sched_domains(ndoms, doms, attr);
 2125 }
 2126 
 2127 #ifdef CONFIG_MEMORY_HOTPLUG
 2128 /*
 2129  * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY].
 2130  * Call this routine anytime after node_states[N_MEMORY] changes.
 2131  * See cpuset_update_active_cpus() for CPU hotplug handling.
 2132  */
 2133 static int cpuset_track_online_nodes(struct notifier_block *self,
 2134                                 unsigned long action, void *arg)
 2135 {
 2136         static nodemask_t oldmems;      /* protected by cgroup_mutex */
 2137 
 2138         cgroup_lock();
 2139         switch (action) {
 2140         case MEM_ONLINE:
 2141                 oldmems = top_cpuset.mems_allowed;
 2142                 mutex_lock(&callback_mutex);
 2143                 top_cpuset.mems_allowed = node_states[N_MEMORY];
 2144                 mutex_unlock(&callback_mutex);
 2145                 update_tasks_nodemask(&top_cpuset, &oldmems, NULL);
 2146                 break;
 2147         case MEM_OFFLINE:
 2148                 /*
 2149                  * needn't update top_cpuset.mems_allowed explicitly because
 2150                  * scan_cpusets_upon_hotplug() will update it.
 2151                  */
 2152                 scan_cpusets_upon_hotplug(&top_cpuset, CPUSET_MEM_OFFLINE);
 2153                 break;
 2154         default:
 2155                 break;
 2156         }
 2157         cgroup_unlock();
 2158 
 2159         return NOTIFY_OK;
 2160 }
 2161 #endif
 2162 
 2163 /**
 2164  * cpuset_init_smp - initialize cpus_allowed
 2165  *
 2166  * Description: Finish top cpuset after cpu, node maps are initialized
 2167  **/
 2168 
 2169 void __init cpuset_init_smp(void)
 2170 {
 2171         cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask);
 2172         top_cpuset.mems_allowed = node_states[N_MEMORY];
 2173 
 2174         hotplug_memory_notifier(cpuset_track_online_nodes, 10);
 2175 
 2176         cpuset_wq = create_singlethread_workqueue("cpuset");
 2177         BUG_ON(!cpuset_wq);
 2178 }
 2179 
 2180 /**
 2181  * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
 2182  * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
 2183  * @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
 2184  *
 2185  * Description: Returns the cpumask_var_t cpus_allowed of the cpuset
 2186  * attached to the specified @tsk.  Guaranteed to return some non-empty
 2187  * subset of cpu_online_mask, even if this means going outside the
 2188  * tasks cpuset.
 2189  **/
 2190 
 2191 void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
 2192 {
 2193         mutex_lock(&callback_mutex);
 2194         task_lock(tsk);
 2195         guarantee_online_cpus(task_cs(tsk), pmask);
 2196         task_unlock(tsk);
 2197         mutex_unlock(&callback_mutex);
 2198 }
 2199 
 2200 void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
 2201 {
 2202         const struct cpuset *cs;
 2203 
 2204         rcu_read_lock();
 2205         cs = task_cs(tsk);
 2206         if (cs)
 2207                 do_set_cpus_allowed(tsk, cs->cpus_allowed);
 2208         rcu_read_unlock();
 2209 
 2210         /*
 2211          * We own tsk->cpus_allowed, nobody can change it under us.
 2212          *
 2213          * But we used cs && cs->cpus_allowed lockless and thus can
 2214          * race with cgroup_attach_task() or update_cpumask() and get
 2215          * the wrong tsk->cpus_allowed. However, both cases imply the
 2216          * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr()
 2217          * which takes task_rq_lock().
 2218          *
 2219          * If we are called after it dropped the lock we must see all
 2220          * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary
 2221          * set any mask even if it is not right from task_cs() pov,
 2222          * the pending set_cpus_allowed_ptr() will fix things.
 2223          *
 2224          * select_fallback_rq() will fix things ups and set cpu_possible_mask
 2225          * if required.
 2226          */
 2227 }
 2228 
 2229 void cpuset_init_current_mems_allowed(void)
 2230 {
 2231         nodes_setall(current->mems_allowed);
 2232 }
 2233 
 2234 /**
 2235  * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
 2236  * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
 2237  *
 2238  * Description: Returns the nodemask_t mems_allowed of the cpuset
 2239  * attached to the specified @tsk.  Guaranteed to return some non-empty
 2240  * subset of node_states[N_MEMORY], even if this means going outside the
 2241  * tasks cpuset.
 2242  **/
 2243 
 2244 nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
 2245 {
 2246         nodemask_t mask;
 2247 
 2248         mutex_lock(&callback_mutex);
 2249         task_lock(tsk);
 2250         guarantee_online_mems(task_cs(tsk), &mask);
 2251         task_unlock(tsk);
 2252         mutex_unlock(&callback_mutex);
 2253 
 2254         return mask;
 2255 }
 2256 
 2257 /**
 2258  * cpuset_nodemask_valid_mems_allowed - check nodemask vs. curremt mems_allowed
 2259  * @nodemask: the nodemask to be checked
 2260  *
 2261  * Are any of the nodes in the nodemask allowed in current->mems_allowed?
 2262  */
 2263 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
 2264 {
 2265         return nodes_intersects(*nodemask, current->mems_allowed);
 2266 }
 2267 
 2268 /*
 2269  * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or
 2270  * mem_hardwall ancestor to the specified cpuset.  Call holding
 2271  * callback_mutex.  If no ancestor is mem_exclusive or mem_hardwall
 2272  * (an unusual configuration), then returns the root cpuset.
 2273  */
 2274 static const struct cpuset *nearest_hardwall_ancestor(const struct cpuset *cs)
 2275 {
 2276         while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && cs->parent)
 2277                 cs = cs->parent;
 2278         return cs;
 2279 }
 2280 
 2281 /**
 2282  * cpuset_node_allowed_softwall - Can we allocate on a memory node?
 2283  * @node: is this an allowed node?
 2284  * @gfp_mask: memory allocation flags
 2285  *
 2286  * If we're in interrupt, yes, we can always allocate.  If __GFP_THISNODE is
 2287  * set, yes, we can always allocate.  If node is in our task's mems_allowed,
 2288  * yes.  If it's not a __GFP_HARDWALL request and this node is in the nearest
 2289  * hardwalled cpuset ancestor to this task's cpuset, yes.  If the task has been
 2290  * OOM killed and has access to memory reserves as specified by the TIF_MEMDIE
 2291  * flag, yes.
 2292  * Otherwise, no.
 2293  *
 2294  * If __GFP_HARDWALL is set, cpuset_node_allowed_softwall() reduces to
 2295  * cpuset_node_allowed_hardwall().  Otherwise, cpuset_node_allowed_softwall()
 2296  * might sleep, and might allow a node from an enclosing cpuset.
 2297  *
 2298  * cpuset_node_allowed_hardwall() only handles the simpler case of hardwall
 2299  * cpusets, and never sleeps.
 2300  *
 2301  * The __GFP_THISNODE placement logic is really handled elsewhere,
 2302  * by forcibly using a zonelist starting at a specified node, and by
 2303  * (in get_page_from_freelist()) refusing to consider the zones for
 2304  * any node on the zonelist except the first.  By the time any such
 2305  * calls get to this routine, we should just shut up and say 'yes'.
 2306  *
 2307  * GFP_USER allocations are marked with the __GFP_HARDWALL bit,
 2308  * and do not allow allocations outside the current tasks cpuset
 2309  * unless the task has been OOM killed as is marked TIF_MEMDIE.
 2310  * GFP_KERNEL allocations are not so marked, so can escape to the
 2311  * nearest enclosing hardwalled ancestor cpuset.
 2312  *
 2313  * Scanning up parent cpusets requires callback_mutex.  The
 2314  * __alloc_pages() routine only calls here with __GFP_HARDWALL bit
 2315  * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the
 2316  * current tasks mems_allowed came up empty on the first pass over
 2317  * the zonelist.  So only GFP_KERNEL allocations, if all nodes in the
 2318  * cpuset are short of memory, might require taking the callback_mutex
 2319  * mutex.
 2320  *
 2321  * The first call here from mm/page_alloc:get_page_from_freelist()
 2322  * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets,
 2323  * so no allocation on a node outside the cpuset is allowed (unless
 2324  * in interrupt, of course).
 2325  *
 2326  * The second pass through get_page_from_freelist() doesn't even call
 2327  * here for GFP_ATOMIC calls.  For those calls, the __alloc_pages()
 2328  * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set
 2329  * in alloc_flags.  That logic and the checks below have the combined
 2330  * affect that:
 2331  *      in_interrupt - any node ok (current task context irrelevant)
 2332  *      GFP_ATOMIC   - any node ok
 2333  *      TIF_MEMDIE   - any node ok
 2334  *      GFP_KERNEL   - any node in enclosing hardwalled cpuset ok
 2335  *      GFP_USER     - only nodes in current tasks mems allowed ok.
 2336  *
 2337  * Rule:
 2338  *    Don't call cpuset_node_allowed_softwall if you can't sleep, unless you
 2339  *    pass in the __GFP_HARDWALL flag set in gfp_flag, which disables
 2340  *    the code that might scan up ancestor cpusets and sleep.
 2341  */
 2342 int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
 2343 {
 2344         const struct cpuset *cs;        /* current cpuset ancestors */
 2345         int allowed;                    /* is allocation in zone z allowed? */
 2346 
 2347         if (in_interrupt() || (gfp_mask & __GFP_THISNODE))
 2348                 return 1;
 2349         might_sleep_if(!(gfp_mask & __GFP_HARDWALL));
 2350         if (node_isset(node, current->mems_allowed))
 2351                 return 1;
 2352         /*
 2353          * Allow tasks that have access to memory reserves because they have
 2354          * been OOM killed to get memory anywhere.
 2355          */
 2356         if (unlikely(test_thread_flag(TIF_MEMDIE)))
 2357                 return 1;
 2358         if (gfp_mask & __GFP_HARDWALL)  /* If hardwall request, stop here */
 2359                 return 0;
 2360 
 2361         if (current->flags & PF_EXITING) /* Let dying task have memory */
 2362                 return 1;
 2363 
 2364         /* Not hardwall and node outside mems_allowed: scan up cpusets */
 2365         mutex_lock(&callback_mutex);
 2366 
 2367         task_lock(current);
 2368         cs = nearest_hardwall_ancestor(task_cs(current));
 2369         task_unlock(current);
 2370 
 2371         allowed = node_isset(node, cs->mems_allowed);
 2372         mutex_unlock(&callback_mutex);
 2373         return allowed;
 2374 }
 2375 
 2376 /*
 2377  * cpuset_node_allowed_hardwall - Can we allocate on a memory node?
 2378  * @node: is this an allowed node?
 2379  * @gfp_mask: memory allocation flags
 2380  *
 2381  * If we're in interrupt, yes, we can always allocate.  If __GFP_THISNODE is
 2382  * set, yes, we can always allocate.  If node is in our task's mems_allowed,
 2383  * yes.  If the task has been OOM killed and has access to memory reserves as
 2384  * specified by the TIF_MEMDIE flag, yes.
 2385  * Otherwise, no.
 2386  *
 2387  * The __GFP_THISNODE placement logic is really handled elsewhere,
 2388  * by forcibly using a zonelist starting at a specified node, and by
 2389  * (in get_page_from_freelist()) refusing to consider the zones for
 2390  * any node on the zonelist except the first.  By the time any such
 2391  * calls get to this routine, we should just shut up and say 'yes'.
 2392  *
 2393  * Unlike the cpuset_node_allowed_softwall() variant, above,
 2394  * this variant requires that the node be in the current task's
 2395  * mems_allowed or that we're in interrupt.  It does not scan up the
 2396  * cpuset hierarchy for the nearest enclosing mem_exclusive cpuset.
 2397  * It never sleeps.
 2398  */
 2399 int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
 2400 {
 2401         if (in_interrupt() || (gfp_mask & __GFP_THISNODE))
 2402                 return 1;
 2403         if (node_isset(node, current->mems_allowed))
 2404                 return 1;
 2405         /*
 2406          * Allow tasks that have access to memory reserves because they have
 2407          * been OOM killed to get memory anywhere.
 2408          */
 2409         if (unlikely(test_thread_flag(TIF_MEMDIE)))
 2410                 return 1;
 2411         return 0;
 2412 }
 2413 
 2414 /**
 2415  * cpuset_unlock - release lock on cpuset changes
 2416  *
 2417  * Undo the lock taken in a previous cpuset_lock() call.
 2418  */
 2419 
 2420 void cpuset_unlock(void)
 2421 {
 2422         mutex_unlock(&callback_mutex);
 2423 }
 2424 
 2425 /**
 2426  * cpuset_mem_spread_node() - On which node to begin search for a file page
 2427  * cpuset_slab_spread_node() - On which node to begin search for a slab page
 2428  *
 2429  * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for
 2430  * tasks in a cpuset with is_spread_page or is_spread_slab set),
 2431  * and if the memory allocation used cpuset_mem_spread_node()
 2432  * to determine on which node to start looking, as it will for
 2433  * certain page cache or slab cache pages such as used for file
 2434  * system buffers and inode caches, then instead of starting on the
 2435  * local node to look for a free page, rather spread the starting
 2436  * node around the tasks mems_allowed nodes.
 2437  *
 2438  * We don't have to worry about the returned node being offline
 2439  * because "it can't happen", and even if it did, it would be ok.
 2440  *
 2441  * The routines calling guarantee_online_mems() are careful to
 2442  * only set nodes in task->mems_allowed that are online.  So it
 2443  * should not be possible for the following code to return an
 2444  * offline node.  But if it did, that would be ok, as this routine
 2445  * is not returning the node where the allocation must be, only
 2446  * the node where the search should start.  The zonelist passed to
 2447  * __alloc_pages() will include all nodes.  If the slab allocator
 2448  * is passed an offline node, it will fall back to the local node.
 2449  * See kmem_cache_alloc_node().
 2450  */
 2451 
 2452 static int cpuset_spread_node(int *rotor)
 2453 {
 2454         int node;
 2455 
 2456         node = next_node(*rotor, current->mems_allowed);
 2457         if (node == MAX_NUMNODES)
 2458                 node = first_node(current->mems_allowed);
 2459         *rotor = node;
 2460         return node;
 2461 }
 2462 
 2463 int cpuset_mem_spread_node(void)
 2464 {
 2465         if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE)
 2466                 current->cpuset_mem_spread_rotor =
 2467                         node_random(&current->mems_allowed);
 2468 
 2469         return cpuset_spread_node(&current->cpuset_mem_spread_rotor);
 2470 }
 2471 
 2472 int cpuset_slab_spread_node(void)
 2473 {
 2474         if (current->cpuset_slab_spread_rotor == NUMA_NO_NODE)
 2475                 current->cpuset_slab_spread_rotor =
 2476                         node_random(&current->mems_allowed);
 2477 
 2478         return cpuset_spread_node(&current->cpuset_slab_spread_rotor);
 2479 }
 2480 
 2481 EXPORT_SYMBOL_GPL(cpuset_mem_spread_node);
 2482 
 2483 /**
 2484  * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
 2485  * @tsk1: pointer to task_struct of some task.
 2486  * @tsk2: pointer to task_struct of some other task.
 2487  *
 2488  * Description: Return true if @tsk1's mems_allowed intersects the
 2489  * mems_allowed of @tsk2.  Used by the OOM killer to determine if
 2490  * one of the task's memory usage might impact the memory available
 2491  * to the other.
 2492  **/
 2493 
 2494 int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
 2495                                    const struct task_struct *tsk2)
 2496 {
 2497         return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed);
 2498 }
 2499 
 2500 /**
 2501  * cpuset_print_task_mems_allowed - prints task's cpuset and mems_allowed
 2502  * @task: pointer to task_struct of some task.
 2503  *
 2504  * Description: Prints @task's name, cpuset name, and cached copy of its
 2505  * mems_allowed to the kernel log.  Must hold task_lock(task) to allow
 2506  * dereferencing task_cs(task).
 2507  */
 2508 void cpuset_print_task_mems_allowed(struct task_struct *tsk)
 2509 {
 2510         struct dentry *dentry;
 2511 
 2512         dentry = task_cs(tsk)->css.cgroup->dentry;
 2513         spin_lock(&cpuset_buffer_lock);
 2514         snprintf(cpuset_name, CPUSET_NAME_LEN,
 2515                  dentry ? (const char *)dentry->d_name.name : "/");
 2516         nodelist_scnprintf(cpuset_nodelist, CPUSET_NODELIST_LEN,
 2517                            tsk->mems_allowed);
 2518         printk(KERN_INFO "%s cpuset=%s mems_allowed=%s\n",
 2519                tsk->comm, cpuset_name, cpuset_nodelist);
 2520         spin_unlock(&cpuset_buffer_lock);
 2521 }
 2522 
 2523 /*
 2524  * Collection of memory_pressure is suppressed unless
 2525  * this flag is enabled by writing "1" to the special
 2526  * cpuset file 'memory_pressure_enabled' in the root cpuset.
 2527  */
 2528 
 2529 int cpuset_memory_pressure_enabled __read_mostly;
 2530 
 2531 /**
 2532  * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims.
 2533  *
 2534  * Keep a running average of the rate of synchronous (direct)
 2535  * page reclaim efforts initiated by tasks in each cpuset.
 2536  *
 2537  * This represents the rate at which some task in the cpuset
 2538  * ran low on memory on all nodes it was allowed to use, and
 2539  * had to enter the kernels page reclaim code in an effort to
 2540  * create more free memory by tossing clean pages or swapping
 2541  * or writing dirty pages.
 2542  *
 2543  * Display to user space in the per-cpuset read-only file
 2544  * "memory_pressure".  Value displayed is an integer
 2545  * representing the recent rate of entry into the synchronous
 2546  * (direct) page reclaim by any task attached to the cpuset.
 2547  **/
 2548 
 2549 void __cpuset_memory_pressure_bump(void)
 2550 {
 2551         task_lock(current);
 2552         fmeter_markevent(&task_cs(current)->fmeter);
 2553         task_unlock(current);
 2554 }
 2555 
 2556 #ifdef CONFIG_PROC_PID_CPUSET
 2557 /*
 2558  * proc_cpuset_show()
 2559  *  - Print tasks cpuset path into seq_file.
 2560  *  - Used for /proc/<pid>/cpuset.
 2561  *  - No need to task_lock(tsk) on this tsk->cpuset reference, as it
 2562  *    doesn't really matter if tsk->cpuset changes after we read it,
 2563  *    and we take cgroup_mutex, keeping cpuset_attach() from changing it
 2564  *    anyway.
 2565  */
 2566 static int proc_cpuset_show(struct seq_file *m, void *unused_v)
 2567 {
 2568         struct pid *pid;
 2569         struct task_struct *tsk;
 2570         char *buf;
 2571         struct cgroup_subsys_state *css;
 2572         int retval;
 2573 
 2574         retval = -ENOMEM;
 2575         buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
 2576         if (!buf)
 2577                 goto out;
 2578 
 2579         retval = -ESRCH;
 2580         pid = m->private;
 2581         tsk = get_pid_task(pid, PIDTYPE_PID);
 2582         if (!tsk)
 2583                 goto out_free;
 2584 
 2585         retval = -EINVAL;
 2586         cgroup_lock();
 2587         css = task_subsys_state(tsk, cpuset_subsys_id);
 2588         retval = cgroup_path(css->cgroup, buf, PAGE_SIZE);
 2589         if (retval < 0)
 2590                 goto out_unlock;
 2591         seq_puts(m, buf);
 2592         seq_putc(m, '\n');
 2593 out_unlock:
 2594         cgroup_unlock();
 2595         put_task_struct(tsk);
 2596 out_free:
 2597         kfree(buf);
 2598 out:
 2599         return retval;
 2600 }
 2601 
 2602 static int cpuset_open(struct inode *inode, struct file *file)
 2603 {
 2604         struct pid *pid = PROC_I(inode)->pid;
 2605         return single_open(file, proc_cpuset_show, pid);
 2606 }
 2607 
 2608 const struct file_operations proc_cpuset_operations = {
 2609         .open           = cpuset_open,
 2610         .read           = seq_read,
 2611         .llseek         = seq_lseek,
 2612         .release        = single_release,
 2613 };
 2614 #endif /* CONFIG_PROC_PID_CPUSET */
 2615 
 2616 /* Display task mems_allowed in /proc/<pid>/status file. */
 2617 void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
 2618 {
 2619         seq_printf(m, "Mems_allowed:\t");
 2620         seq_nodemask(m, &task->mems_allowed);
 2621         seq_printf(m, "\n");
 2622         seq_printf(m, "Mems_allowed_list:\t");
 2623         seq_nodemask_list(m, &task->mems_allowed);
 2624         seq_printf(m, "\n");
 2625 }

Cache object: b50f4d9c8e2aea77d73153080c54a166


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.