The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kernel/workqueue.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * kernel/workqueue.c - generic async execution with shared worker pool
    3  *
    4  * Copyright (C) 2002           Ingo Molnar
    5  *
    6  *   Derived from the taskqueue/keventd code by:
    7  *     David Woodhouse <dwmw2@infradead.org>
    8  *     Andrew Morton
    9  *     Kai Petzke <wpp@marie.physik.tu-berlin.de>
   10  *     Theodore Ts'o <tytso@mit.edu>
   11  *
   12  * Made to use alloc_percpu by Christoph Lameter.
   13  *
   14  * Copyright (C) 2010           SUSE Linux Products GmbH
   15  * Copyright (C) 2010           Tejun Heo <tj@kernel.org>
   16  *
   17  * This is the generic async execution mechanism.  Work items as are
   18  * executed in process context.  The worker pool is shared and
   19  * automatically managed.  There is one worker pool for each CPU and
   20  * one extra for works which are better served by workers which are
   21  * not bound to any specific CPU.
   22  *
   23  * Please read Documentation/workqueue.txt for details.
   24  */
   25 
   26 #include <linux/export.h>
   27 #include <linux/kernel.h>
   28 #include <linux/sched.h>
   29 #include <linux/init.h>
   30 #include <linux/signal.h>
   31 #include <linux/completion.h>
   32 #include <linux/workqueue.h>
   33 #include <linux/slab.h>
   34 #include <linux/cpu.h>
   35 #include <linux/notifier.h>
   36 #include <linux/kthread.h>
   37 #include <linux/hardirq.h>
   38 #include <linux/mempolicy.h>
   39 #include <linux/freezer.h>
   40 #include <linux/kallsyms.h>
   41 #include <linux/debug_locks.h>
   42 #include <linux/lockdep.h>
   43 #include <linux/idr.h>
   44 
   45 #include "workqueue_sched.h"
   46 
   47 enum {
   48         /*
   49          * global_cwq flags
   50          *
   51          * A bound gcwq is either associated or disassociated with its CPU.
   52          * While associated (!DISASSOCIATED), all workers are bound to the
   53          * CPU and none has %WORKER_UNBOUND set and concurrency management
   54          * is in effect.
   55          *
   56          * While DISASSOCIATED, the cpu may be offline and all workers have
   57          * %WORKER_UNBOUND set and concurrency management disabled, and may
   58          * be executing on any CPU.  The gcwq behaves as an unbound one.
   59          *
   60          * Note that DISASSOCIATED can be flipped only while holding
   61          * assoc_mutex of all pools on the gcwq to avoid changing binding
   62          * state while create_worker() is in progress.
   63          */
   64         GCWQ_DISASSOCIATED      = 1 << 0,       /* cpu can't serve workers */
   65         GCWQ_FREEZING           = 1 << 1,       /* freeze in progress */
   66 
   67         /* pool flags */
   68         POOL_MANAGE_WORKERS     = 1 << 0,       /* need to manage workers */
   69         POOL_MANAGING_WORKERS   = 1 << 1,       /* managing workers */
   70 
   71         /* worker flags */
   72         WORKER_STARTED          = 1 << 0,       /* started */
   73         WORKER_DIE              = 1 << 1,       /* die die die */
   74         WORKER_IDLE             = 1 << 2,       /* is idle */
   75         WORKER_PREP             = 1 << 3,       /* preparing to run works */
   76         WORKER_CPU_INTENSIVE    = 1 << 6,       /* cpu intensive */
   77         WORKER_UNBOUND          = 1 << 7,       /* worker is unbound */
   78 
   79         WORKER_NOT_RUNNING      = WORKER_PREP | WORKER_UNBOUND |
   80                                   WORKER_CPU_INTENSIVE,
   81 
   82         NR_WORKER_POOLS         = 2,            /* # worker pools per gcwq */
   83 
   84         BUSY_WORKER_HASH_ORDER  = 6,            /* 64 pointers */
   85         BUSY_WORKER_HASH_SIZE   = 1 << BUSY_WORKER_HASH_ORDER,
   86         BUSY_WORKER_HASH_MASK   = BUSY_WORKER_HASH_SIZE - 1,
   87 
   88         MAX_IDLE_WORKERS_RATIO  = 4,            /* 1/4 of busy can be idle */
   89         IDLE_WORKER_TIMEOUT     = 300 * HZ,     /* keep idle ones for 5 mins */
   90 
   91         MAYDAY_INITIAL_TIMEOUT  = HZ / 100 >= 2 ? HZ / 100 : 2,
   92                                                 /* call for help after 10ms
   93                                                    (min two ticks) */
   94         MAYDAY_INTERVAL         = HZ / 10,      /* and then every 100ms */
   95         CREATE_COOLDOWN         = HZ,           /* time to breath after fail */
   96 
   97         /*
   98          * Rescue workers are used only on emergencies and shared by
   99          * all cpus.  Give -20.
  100          */
  101         RESCUER_NICE_LEVEL      = -20,
  102         HIGHPRI_NICE_LEVEL      = -20,
  103 };
  104 
  105 /*
  106  * Structure fields follow one of the following exclusion rules.
  107  *
  108  * I: Modifiable by initialization/destruction paths and read-only for
  109  *    everyone else.
  110  *
  111  * P: Preemption protected.  Disabling preemption is enough and should
  112  *    only be modified and accessed from the local cpu.
  113  *
  114  * L: gcwq->lock protected.  Access with gcwq->lock held.
  115  *
  116  * X: During normal operation, modification requires gcwq->lock and
  117  *    should be done only from local cpu.  Either disabling preemption
  118  *    on local cpu or grabbing gcwq->lock is enough for read access.
  119  *    If GCWQ_DISASSOCIATED is set, it's identical to L.
  120  *
  121  * F: wq->flush_mutex protected.
  122  *
  123  * W: workqueue_lock protected.
  124  */
  125 
  126 struct global_cwq;
  127 struct worker_pool;
  128 
  129 /*
  130  * The poor guys doing the actual heavy lifting.  All on-duty workers
  131  * are either serving the manager role, on idle list or on busy hash.
  132  */
  133 struct worker {
  134         /* on idle list while idle, on busy hash table while busy */
  135         union {
  136                 struct list_head        entry;  /* L: while idle */
  137                 struct hlist_node       hentry; /* L: while busy */
  138         };
  139 
  140         struct work_struct      *current_work;  /* L: work being processed */
  141         struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */
  142         struct list_head        scheduled;      /* L: scheduled works */
  143         struct task_struct      *task;          /* I: worker task */
  144         struct worker_pool      *pool;          /* I: the associated pool */
  145         /* 64 bytes boundary on 64bit, 32 on 32bit */
  146         unsigned long           last_active;    /* L: last active timestamp */
  147         unsigned int            flags;          /* X: flags */
  148         int                     id;             /* I: worker id */
  149 
  150         /* for rebinding worker to CPU */
  151         struct work_struct      rebind_work;    /* L: for busy worker */
  152 };
  153 
  154 struct worker_pool {
  155         struct global_cwq       *gcwq;          /* I: the owning gcwq */
  156         unsigned int            flags;          /* X: flags */
  157 
  158         struct list_head        worklist;       /* L: list of pending works */
  159         int                     nr_workers;     /* L: total number of workers */
  160 
  161         /* nr_idle includes the ones off idle_list for rebinding */
  162         int                     nr_idle;        /* L: currently idle ones */
  163 
  164         struct list_head        idle_list;      /* X: list of idle workers */
  165         struct timer_list       idle_timer;     /* L: worker idle timeout */
  166         struct timer_list       mayday_timer;   /* L: SOS timer for workers */
  167 
  168         struct mutex            assoc_mutex;    /* protect GCWQ_DISASSOCIATED */
  169         struct ida              worker_ida;     /* L: for worker IDs */
  170 };
  171 
  172 /*
  173  * Global per-cpu workqueue.  There's one and only one for each cpu
  174  * and all works are queued and processed here regardless of their
  175  * target workqueues.
  176  */
  177 struct global_cwq {
  178         spinlock_t              lock;           /* the gcwq lock */
  179         unsigned int            cpu;            /* I: the associated cpu */
  180         unsigned int            flags;          /* L: GCWQ_* flags */
  181 
  182         /* workers are chained either in busy_hash or pool idle_list */
  183         struct hlist_head       busy_hash[BUSY_WORKER_HASH_SIZE];
  184                                                 /* L: hash of busy workers */
  185 
  186         struct worker_pool      pools[NR_WORKER_POOLS];
  187                                                 /* normal and highpri pools */
  188 } ____cacheline_aligned_in_smp;
  189 
  190 /*
  191  * The per-CPU workqueue.  The lower WORK_STRUCT_FLAG_BITS of
  192  * work_struct->data are used for flags and thus cwqs need to be
  193  * aligned at two's power of the number of flag bits.
  194  */
  195 struct cpu_workqueue_struct {
  196         struct worker_pool      *pool;          /* I: the associated pool */
  197         struct workqueue_struct *wq;            /* I: the owning workqueue */
  198         int                     work_color;     /* L: current color */
  199         int                     flush_color;    /* L: flushing color */
  200         int                     nr_in_flight[WORK_NR_COLORS];
  201                                                 /* L: nr of in_flight works */
  202         int                     nr_active;      /* L: nr of active works */
  203         int                     max_active;     /* L: max active works */
  204         struct list_head        delayed_works;  /* L: delayed works */
  205 };
  206 
  207 /*
  208  * Structure used to wait for workqueue flush.
  209  */
  210 struct wq_flusher {
  211         struct list_head        list;           /* F: list of flushers */
  212         int                     flush_color;    /* F: flush color waiting for */
  213         struct completion       done;           /* flush completion */
  214 };
  215 
  216 /*
  217  * All cpumasks are assumed to be always set on UP and thus can't be
  218  * used to determine whether there's something to be done.
  219  */
  220 #ifdef CONFIG_SMP
  221 typedef cpumask_var_t mayday_mask_t;
  222 #define mayday_test_and_set_cpu(cpu, mask)      \
  223         cpumask_test_and_set_cpu((cpu), (mask))
  224 #define mayday_clear_cpu(cpu, mask)             cpumask_clear_cpu((cpu), (mask))
  225 #define for_each_mayday_cpu(cpu, mask)          for_each_cpu((cpu), (mask))
  226 #define alloc_mayday_mask(maskp, gfp)           zalloc_cpumask_var((maskp), (gfp))
  227 #define free_mayday_mask(mask)                  free_cpumask_var((mask))
  228 #else
  229 typedef unsigned long mayday_mask_t;
  230 #define mayday_test_and_set_cpu(cpu, mask)      test_and_set_bit(0, &(mask))
  231 #define mayday_clear_cpu(cpu, mask)             clear_bit(0, &(mask))
  232 #define for_each_mayday_cpu(cpu, mask)          if ((cpu) = 0, (mask))
  233 #define alloc_mayday_mask(maskp, gfp)           true
  234 #define free_mayday_mask(mask)                  do { } while (0)
  235 #endif
  236 
  237 /*
  238  * The externally visible workqueue abstraction is an array of
  239  * per-CPU workqueues:
  240  */
  241 struct workqueue_struct {
  242         unsigned int            flags;          /* W: WQ_* flags */
  243         union {
  244                 struct cpu_workqueue_struct __percpu    *pcpu;
  245                 struct cpu_workqueue_struct             *single;
  246                 unsigned long                           v;
  247         } cpu_wq;                               /* I: cwq's */
  248         struct list_head        list;           /* W: list of all workqueues */
  249 
  250         struct mutex            flush_mutex;    /* protects wq flushing */
  251         int                     work_color;     /* F: current work color */
  252         int                     flush_color;    /* F: current flush color */
  253         atomic_t                nr_cwqs_to_flush; /* flush in progress */
  254         struct wq_flusher       *first_flusher; /* F: first flusher */
  255         struct list_head        flusher_queue;  /* F: flush waiters */
  256         struct list_head        flusher_overflow; /* F: flush overflow list */
  257 
  258         mayday_mask_t           mayday_mask;    /* cpus requesting rescue */
  259         struct worker           *rescuer;       /* I: rescue worker */
  260 
  261         int                     nr_drainers;    /* W: drain in progress */
  262         int                     saved_max_active; /* W: saved cwq max_active */
  263 #ifdef CONFIG_LOCKDEP
  264         struct lockdep_map      lockdep_map;
  265 #endif
  266         char                    name[];         /* I: workqueue name */
  267 };
  268 
  269 struct workqueue_struct *system_wq __read_mostly;
  270 EXPORT_SYMBOL_GPL(system_wq);
  271 struct workqueue_struct *system_highpri_wq __read_mostly;
  272 EXPORT_SYMBOL_GPL(system_highpri_wq);
  273 struct workqueue_struct *system_long_wq __read_mostly;
  274 EXPORT_SYMBOL_GPL(system_long_wq);
  275 struct workqueue_struct *system_unbound_wq __read_mostly;
  276 EXPORT_SYMBOL_GPL(system_unbound_wq);
  277 struct workqueue_struct *system_freezable_wq __read_mostly;
  278 EXPORT_SYMBOL_GPL(system_freezable_wq);
  279 
  280 #define CREATE_TRACE_POINTS
  281 #include <trace/events/workqueue.h>
  282 
  283 #define for_each_worker_pool(pool, gcwq)                                \
  284         for ((pool) = &(gcwq)->pools[0];                                \
  285              (pool) < &(gcwq)->pools[NR_WORKER_POOLS]; (pool)++)
  286 
  287 #define for_each_busy_worker(worker, i, pos, gcwq)                      \
  288         for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)                     \
  289                 hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry)
  290 
  291 static inline int __next_gcwq_cpu(int cpu, const struct cpumask *mask,
  292                                   unsigned int sw)
  293 {
  294         if (cpu < nr_cpu_ids) {
  295                 if (sw & 1) {
  296                         cpu = cpumask_next(cpu, mask);
  297                         if (cpu < nr_cpu_ids)
  298                                 return cpu;
  299                 }
  300                 if (sw & 2)
  301                         return WORK_CPU_UNBOUND;
  302         }
  303         return WORK_CPU_NONE;
  304 }
  305 
  306 static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
  307                                 struct workqueue_struct *wq)
  308 {
  309         return __next_gcwq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2);
  310 }
  311 
  312 /*
  313  * CPU iterators
  314  *
  315  * An extra gcwq is defined for an invalid cpu number
  316  * (WORK_CPU_UNBOUND) to host workqueues which are not bound to any
  317  * specific CPU.  The following iterators are similar to
  318  * for_each_*_cpu() iterators but also considers the unbound gcwq.
  319  *
  320  * for_each_gcwq_cpu()          : possible CPUs + WORK_CPU_UNBOUND
  321  * for_each_online_gcwq_cpu()   : online CPUs + WORK_CPU_UNBOUND
  322  * for_each_cwq_cpu()           : possible CPUs for bound workqueues,
  323  *                                WORK_CPU_UNBOUND for unbound workqueues
  324  */
  325 #define for_each_gcwq_cpu(cpu)                                          \
  326         for ((cpu) = __next_gcwq_cpu(-1, cpu_possible_mask, 3);         \
  327              (cpu) < WORK_CPU_NONE;                                     \
  328              (cpu) = __next_gcwq_cpu((cpu), cpu_possible_mask, 3))
  329 
  330 #define for_each_online_gcwq_cpu(cpu)                                   \
  331         for ((cpu) = __next_gcwq_cpu(-1, cpu_online_mask, 3);           \
  332              (cpu) < WORK_CPU_NONE;                                     \
  333              (cpu) = __next_gcwq_cpu((cpu), cpu_online_mask, 3))
  334 
  335 #define for_each_cwq_cpu(cpu, wq)                                       \
  336         for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, (wq));        \
  337              (cpu) < WORK_CPU_NONE;                                     \
  338              (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, (wq)))
  339 
  340 #ifdef CONFIG_DEBUG_OBJECTS_WORK
  341 
  342 static struct debug_obj_descr work_debug_descr;
  343 
  344 static void *work_debug_hint(void *addr)
  345 {
  346         return ((struct work_struct *) addr)->func;
  347 }
  348 
  349 /*
  350  * fixup_init is called when:
  351  * - an active object is initialized
  352  */
  353 static int work_fixup_init(void *addr, enum debug_obj_state state)
  354 {
  355         struct work_struct *work = addr;
  356 
  357         switch (state) {
  358         case ODEBUG_STATE_ACTIVE:
  359                 cancel_work_sync(work);
  360                 debug_object_init(work, &work_debug_descr);
  361                 return 1;
  362         default:
  363                 return 0;
  364         }
  365 }
  366 
  367 /*
  368  * fixup_activate is called when:
  369  * - an active object is activated
  370  * - an unknown object is activated (might be a statically initialized object)
  371  */
  372 static int work_fixup_activate(void *addr, enum debug_obj_state state)
  373 {
  374         struct work_struct *work = addr;
  375 
  376         switch (state) {
  377 
  378         case ODEBUG_STATE_NOTAVAILABLE:
  379                 /*
  380                  * This is not really a fixup. The work struct was
  381                  * statically initialized. We just make sure that it
  382                  * is tracked in the object tracker.
  383                  */
  384                 if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) {
  385                         debug_object_init(work, &work_debug_descr);
  386                         debug_object_activate(work, &work_debug_descr);
  387                         return 0;
  388                 }
  389                 WARN_ON_ONCE(1);
  390                 return 0;
  391 
  392         case ODEBUG_STATE_ACTIVE:
  393                 WARN_ON(1);
  394 
  395         default:
  396                 return 0;
  397         }
  398 }
  399 
  400 /*
  401  * fixup_free is called when:
  402  * - an active object is freed
  403  */
  404 static int work_fixup_free(void *addr, enum debug_obj_state state)
  405 {
  406         struct work_struct *work = addr;
  407 
  408         switch (state) {
  409         case ODEBUG_STATE_ACTIVE:
  410                 cancel_work_sync(work);
  411                 debug_object_free(work, &work_debug_descr);
  412                 return 1;
  413         default:
  414                 return 0;
  415         }
  416 }
  417 
  418 static struct debug_obj_descr work_debug_descr = {
  419         .name           = "work_struct",
  420         .debug_hint     = work_debug_hint,
  421         .fixup_init     = work_fixup_init,
  422         .fixup_activate = work_fixup_activate,
  423         .fixup_free     = work_fixup_free,
  424 };
  425 
  426 static inline void debug_work_activate(struct work_struct *work)
  427 {
  428         debug_object_activate(work, &work_debug_descr);
  429 }
  430 
  431 static inline void debug_work_deactivate(struct work_struct *work)
  432 {
  433         debug_object_deactivate(work, &work_debug_descr);
  434 }
  435 
  436 void __init_work(struct work_struct *work, int onstack)
  437 {
  438         if (onstack)
  439                 debug_object_init_on_stack(work, &work_debug_descr);
  440         else
  441                 debug_object_init(work, &work_debug_descr);
  442 }
  443 EXPORT_SYMBOL_GPL(__init_work);
  444 
  445 void destroy_work_on_stack(struct work_struct *work)
  446 {
  447         debug_object_free(work, &work_debug_descr);
  448 }
  449 EXPORT_SYMBOL_GPL(destroy_work_on_stack);
  450 
  451 #else
  452 static inline void debug_work_activate(struct work_struct *work) { }
  453 static inline void debug_work_deactivate(struct work_struct *work) { }
  454 #endif
  455 
  456 /* Serializes the accesses to the list of workqueues. */
  457 static DEFINE_SPINLOCK(workqueue_lock);
  458 static LIST_HEAD(workqueues);
  459 static bool workqueue_freezing;         /* W: have wqs started freezing? */
  460 
  461 /*
  462  * The almighty global cpu workqueues.  nr_running is the only field
  463  * which is expected to be used frequently by other cpus via
  464  * try_to_wake_up().  Put it in a separate cacheline.
  465  */
  466 static DEFINE_PER_CPU(struct global_cwq, global_cwq);
  467 static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, pool_nr_running[NR_WORKER_POOLS]);
  468 
  469 /*
  470  * Global cpu workqueue and nr_running counter for unbound gcwq.  The
  471  * gcwq is always online, has GCWQ_DISASSOCIATED set, and all its
  472  * workers have WORKER_UNBOUND set.
  473  */
  474 static struct global_cwq unbound_global_cwq;
  475 static atomic_t unbound_pool_nr_running[NR_WORKER_POOLS] = {
  476         [0 ... NR_WORKER_POOLS - 1]     = ATOMIC_INIT(0),       /* always 0 */
  477 };
  478 
  479 static int worker_thread(void *__worker);
  480 
  481 static int worker_pool_pri(struct worker_pool *pool)
  482 {
  483         return pool - pool->gcwq->pools;
  484 }
  485 
  486 static struct global_cwq *get_gcwq(unsigned int cpu)
  487 {
  488         if (cpu != WORK_CPU_UNBOUND)
  489                 return &per_cpu(global_cwq, cpu);
  490         else
  491                 return &unbound_global_cwq;
  492 }
  493 
  494 static atomic_t *get_pool_nr_running(struct worker_pool *pool)
  495 {
  496         int cpu = pool->gcwq->cpu;
  497         int idx = worker_pool_pri(pool);
  498 
  499         if (cpu != WORK_CPU_UNBOUND)
  500                 return &per_cpu(pool_nr_running, cpu)[idx];
  501         else
  502                 return &unbound_pool_nr_running[idx];
  503 }
  504 
  505 static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
  506                                             struct workqueue_struct *wq)
  507 {
  508         if (!(wq->flags & WQ_UNBOUND)) {
  509                 if (likely(cpu < nr_cpu_ids))
  510                         return per_cpu_ptr(wq->cpu_wq.pcpu, cpu);
  511         } else if (likely(cpu == WORK_CPU_UNBOUND))
  512                 return wq->cpu_wq.single;
  513         return NULL;
  514 }
  515 
  516 static unsigned int work_color_to_flags(int color)
  517 {
  518         return color << WORK_STRUCT_COLOR_SHIFT;
  519 }
  520 
  521 static int get_work_color(struct work_struct *work)
  522 {
  523         return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
  524                 ((1 << WORK_STRUCT_COLOR_BITS) - 1);
  525 }
  526 
  527 static int work_next_color(int color)
  528 {
  529         return (color + 1) % WORK_NR_COLORS;
  530 }
  531 
  532 /*
  533  * While queued, %WORK_STRUCT_CWQ is set and non flag bits of a work's data
  534  * contain the pointer to the queued cwq.  Once execution starts, the flag
  535  * is cleared and the high bits contain OFFQ flags and CPU number.
  536  *
  537  * set_work_cwq(), set_work_cpu_and_clear_pending(), mark_work_canceling()
  538  * and clear_work_data() can be used to set the cwq, cpu or clear
  539  * work->data.  These functions should only be called while the work is
  540  * owned - ie. while the PENDING bit is set.
  541  *
  542  * get_work_[g]cwq() can be used to obtain the gcwq or cwq corresponding to
  543  * a work.  gcwq is available once the work has been queued anywhere after
  544  * initialization until it is sync canceled.  cwq is available only while
  545  * the work item is queued.
  546  *
  547  * %WORK_OFFQ_CANCELING is used to mark a work item which is being
  548  * canceled.  While being canceled, a work item may have its PENDING set
  549  * but stay off timer and worklist for arbitrarily long and nobody should
  550  * try to steal the PENDING bit.
  551  */
  552 static inline void set_work_data(struct work_struct *work, unsigned long data,
  553                                  unsigned long flags)
  554 {
  555         BUG_ON(!work_pending(work));
  556         atomic_long_set(&work->data, data | flags | work_static(work));
  557 }
  558 
  559 static void set_work_cwq(struct work_struct *work,
  560                          struct cpu_workqueue_struct *cwq,
  561                          unsigned long extra_flags)
  562 {
  563         set_work_data(work, (unsigned long)cwq,
  564                       WORK_STRUCT_PENDING | WORK_STRUCT_CWQ | extra_flags);
  565 }
  566 
  567 static void set_work_cpu_and_clear_pending(struct work_struct *work,
  568                                            unsigned int cpu)
  569 {
  570         /*
  571          * The following wmb is paired with the implied mb in
  572          * test_and_set_bit(PENDING) and ensures all updates to @work made
  573          * here are visible to and precede any updates by the next PENDING
  574          * owner.
  575          */
  576         smp_wmb();
  577         set_work_data(work, (unsigned long)cpu << WORK_OFFQ_CPU_SHIFT, 0);
  578 }
  579 
  580 static void clear_work_data(struct work_struct *work)
  581 {
  582         smp_wmb();      /* see set_work_cpu_and_clear_pending() */
  583         set_work_data(work, WORK_STRUCT_NO_CPU, 0);
  584 }
  585 
  586 static struct cpu_workqueue_struct *get_work_cwq(struct work_struct *work)
  587 {
  588         unsigned long data = atomic_long_read(&work->data);
  589 
  590         if (data & WORK_STRUCT_CWQ)
  591                 return (void *)(data & WORK_STRUCT_WQ_DATA_MASK);
  592         else
  593                 return NULL;
  594 }
  595 
  596 static struct global_cwq *get_work_gcwq(struct work_struct *work)
  597 {
  598         unsigned long data = atomic_long_read(&work->data);
  599         unsigned int cpu;
  600 
  601         if (data & WORK_STRUCT_CWQ)
  602                 return ((struct cpu_workqueue_struct *)
  603                         (data & WORK_STRUCT_WQ_DATA_MASK))->pool->gcwq;
  604 
  605         cpu = data >> WORK_OFFQ_CPU_SHIFT;
  606         if (cpu == WORK_CPU_NONE)
  607                 return NULL;
  608 
  609         BUG_ON(cpu >= nr_cpu_ids && cpu != WORK_CPU_UNBOUND);
  610         return get_gcwq(cpu);
  611 }
  612 
  613 static void mark_work_canceling(struct work_struct *work)
  614 {
  615         struct global_cwq *gcwq = get_work_gcwq(work);
  616         unsigned long cpu = gcwq ? gcwq->cpu : WORK_CPU_NONE;
  617 
  618         set_work_data(work, (cpu << WORK_OFFQ_CPU_SHIFT) | WORK_OFFQ_CANCELING,
  619                       WORK_STRUCT_PENDING);
  620 }
  621 
  622 static bool work_is_canceling(struct work_struct *work)
  623 {
  624         unsigned long data = atomic_long_read(&work->data);
  625 
  626         return !(data & WORK_STRUCT_CWQ) && (data & WORK_OFFQ_CANCELING);
  627 }
  628 
  629 /*
  630  * Policy functions.  These define the policies on how the global worker
  631  * pools are managed.  Unless noted otherwise, these functions assume that
  632  * they're being called with gcwq->lock held.
  633  */
  634 
  635 static bool __need_more_worker(struct worker_pool *pool)
  636 {
  637         return !atomic_read(get_pool_nr_running(pool));
  638 }
  639 
  640 /*
  641  * Need to wake up a worker?  Called from anything but currently
  642  * running workers.
  643  *
  644  * Note that, because unbound workers never contribute to nr_running, this
  645  * function will always return %true for unbound gcwq as long as the
  646  * worklist isn't empty.
  647  */
  648 static bool need_more_worker(struct worker_pool *pool)
  649 {
  650         return !list_empty(&pool->worklist) && __need_more_worker(pool);
  651 }
  652 
  653 /* Can I start working?  Called from busy but !running workers. */
  654 static bool may_start_working(struct worker_pool *pool)
  655 {
  656         return pool->nr_idle;
  657 }
  658 
  659 /* Do I need to keep working?  Called from currently running workers. */
  660 static bool keep_working(struct worker_pool *pool)
  661 {
  662         atomic_t *nr_running = get_pool_nr_running(pool);
  663 
  664         return !list_empty(&pool->worklist) && atomic_read(nr_running) <= 1;
  665 }
  666 
  667 /* Do we need a new worker?  Called from manager. */
  668 static bool need_to_create_worker(struct worker_pool *pool)
  669 {
  670         return need_more_worker(pool) && !may_start_working(pool);
  671 }
  672 
  673 /* Do I need to be the manager? */
  674 static bool need_to_manage_workers(struct worker_pool *pool)
  675 {
  676         return need_to_create_worker(pool) ||
  677                 (pool->flags & POOL_MANAGE_WORKERS);
  678 }
  679 
  680 /* Do we have too many workers and should some go away? */
  681 static bool too_many_workers(struct worker_pool *pool)
  682 {
  683         bool managing = pool->flags & POOL_MANAGING_WORKERS;
  684         int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
  685         int nr_busy = pool->nr_workers - nr_idle;
  686 
  687         /*
  688          * nr_idle and idle_list may disagree if idle rebinding is in
  689          * progress.  Never return %true if idle_list is empty.
  690          */
  691         if (list_empty(&pool->idle_list))
  692                 return false;
  693 
  694         return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
  695 }
  696 
  697 /*
  698  * Wake up functions.
  699  */
  700 
  701 /* Return the first worker.  Safe with preemption disabled */
  702 static struct worker *first_worker(struct worker_pool *pool)
  703 {
  704         if (unlikely(list_empty(&pool->idle_list)))
  705                 return NULL;
  706 
  707         return list_first_entry(&pool->idle_list, struct worker, entry);
  708 }
  709 
  710 /**
  711  * wake_up_worker - wake up an idle worker
  712  * @pool: worker pool to wake worker from
  713  *
  714  * Wake up the first idle worker of @pool.
  715  *
  716  * CONTEXT:
  717  * spin_lock_irq(gcwq->lock).
  718  */
  719 static void wake_up_worker(struct worker_pool *pool)
  720 {
  721         struct worker *worker = first_worker(pool);
  722 
  723         if (likely(worker))
  724                 wake_up_process(worker->task);
  725 }
  726 
  727 /**
  728  * wq_worker_waking_up - a worker is waking up
  729  * @task: task waking up
  730  * @cpu: CPU @task is waking up to
  731  *
  732  * This function is called during try_to_wake_up() when a worker is
  733  * being awoken.
  734  *
  735  * CONTEXT:
  736  * spin_lock_irq(rq->lock)
  737  */
  738 void wq_worker_waking_up(struct task_struct *task, unsigned int cpu)
  739 {
  740         struct worker *worker = kthread_data(task);
  741 
  742         if (!(worker->flags & WORKER_NOT_RUNNING)) {
  743                 WARN_ON_ONCE(worker->pool->gcwq->cpu != cpu);
  744                 atomic_inc(get_pool_nr_running(worker->pool));
  745         }
  746 }
  747 
  748 /**
  749  * wq_worker_sleeping - a worker is going to sleep
  750  * @task: task going to sleep
  751  * @cpu: CPU in question, must be the current CPU number
  752  *
  753  * This function is called during schedule() when a busy worker is
  754  * going to sleep.  Worker on the same cpu can be woken up by
  755  * returning pointer to its task.
  756  *
  757  * CONTEXT:
  758  * spin_lock_irq(rq->lock)
  759  *
  760  * RETURNS:
  761  * Worker task on @cpu to wake up, %NULL if none.
  762  */
  763 struct task_struct *wq_worker_sleeping(struct task_struct *task,
  764                                        unsigned int cpu)
  765 {
  766         struct worker *worker = kthread_data(task), *to_wakeup = NULL;
  767         struct worker_pool *pool = worker->pool;
  768         atomic_t *nr_running = get_pool_nr_running(pool);
  769 
  770         if (worker->flags & WORKER_NOT_RUNNING)
  771                 return NULL;
  772 
  773         /* this can only happen on the local cpu */
  774         BUG_ON(cpu != raw_smp_processor_id());
  775 
  776         /*
  777          * The counterpart of the following dec_and_test, implied mb,
  778          * worklist not empty test sequence is in insert_work().
  779          * Please read comment there.
  780          *
  781          * NOT_RUNNING is clear.  This means that we're bound to and
  782          * running on the local cpu w/ rq lock held and preemption
  783          * disabled, which in turn means that none else could be
  784          * manipulating idle_list, so dereferencing idle_list without gcwq
  785          * lock is safe.
  786          */
  787         if (atomic_dec_and_test(nr_running) && !list_empty(&pool->worklist))
  788                 to_wakeup = first_worker(pool);
  789         return to_wakeup ? to_wakeup->task : NULL;
  790 }
  791 
  792 /**
  793  * worker_set_flags - set worker flags and adjust nr_running accordingly
  794  * @worker: self
  795  * @flags: flags to set
  796  * @wakeup: wakeup an idle worker if necessary
  797  *
  798  * Set @flags in @worker->flags and adjust nr_running accordingly.  If
  799  * nr_running becomes zero and @wakeup is %true, an idle worker is
  800  * woken up.
  801  *
  802  * CONTEXT:
  803  * spin_lock_irq(gcwq->lock)
  804  */
  805 static inline void worker_set_flags(struct worker *worker, unsigned int flags,
  806                                     bool wakeup)
  807 {
  808         struct worker_pool *pool = worker->pool;
  809 
  810         WARN_ON_ONCE(worker->task != current);
  811 
  812         /*
  813          * If transitioning into NOT_RUNNING, adjust nr_running and
  814          * wake up an idle worker as necessary if requested by
  815          * @wakeup.
  816          */
  817         if ((flags & WORKER_NOT_RUNNING) &&
  818             !(worker->flags & WORKER_NOT_RUNNING)) {
  819                 atomic_t *nr_running = get_pool_nr_running(pool);
  820 
  821                 if (wakeup) {
  822                         if (atomic_dec_and_test(nr_running) &&
  823                             !list_empty(&pool->worklist))
  824                                 wake_up_worker(pool);
  825                 } else
  826                         atomic_dec(nr_running);
  827         }
  828 
  829         worker->flags |= flags;
  830 }
  831 
  832 /**
  833  * worker_clr_flags - clear worker flags and adjust nr_running accordingly
  834  * @worker: self
  835  * @flags: flags to clear
  836  *
  837  * Clear @flags in @worker->flags and adjust nr_running accordingly.
  838  *
  839  * CONTEXT:
  840  * spin_lock_irq(gcwq->lock)
  841  */
  842 static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
  843 {
  844         struct worker_pool *pool = worker->pool;
  845         unsigned int oflags = worker->flags;
  846 
  847         WARN_ON_ONCE(worker->task != current);
  848 
  849         worker->flags &= ~flags;
  850 
  851         /*
  852          * If transitioning out of NOT_RUNNING, increment nr_running.  Note
  853          * that the nested NOT_RUNNING is not a noop.  NOT_RUNNING is mask
  854          * of multiple flags, not a single flag.
  855          */
  856         if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
  857                 if (!(worker->flags & WORKER_NOT_RUNNING))
  858                         atomic_inc(get_pool_nr_running(pool));
  859 }
  860 
  861 /**
  862  * busy_worker_head - return the busy hash head for a work
  863  * @gcwq: gcwq of interest
  864  * @work: work to be hashed
  865  *
  866  * Return hash head of @gcwq for @work.
  867  *
  868  * CONTEXT:
  869  * spin_lock_irq(gcwq->lock).
  870  *
  871  * RETURNS:
  872  * Pointer to the hash head.
  873  */
  874 static struct hlist_head *busy_worker_head(struct global_cwq *gcwq,
  875                                            struct work_struct *work)
  876 {
  877         const int base_shift = ilog2(sizeof(struct work_struct));
  878         unsigned long v = (unsigned long)work;
  879 
  880         /* simple shift and fold hash, do we need something better? */
  881         v >>= base_shift;
  882         v += v >> BUSY_WORKER_HASH_ORDER;
  883         v &= BUSY_WORKER_HASH_MASK;
  884 
  885         return &gcwq->busy_hash[v];
  886 }
  887 
  888 /**
  889  * __find_worker_executing_work - find worker which is executing a work
  890  * @gcwq: gcwq of interest
  891  * @bwh: hash head as returned by busy_worker_head()
  892  * @work: work to find worker for
  893  *
  894  * Find a worker which is executing @work on @gcwq.  @bwh should be
  895  * the hash head obtained by calling busy_worker_head() with the same
  896  * work.
  897  *
  898  * CONTEXT:
  899  * spin_lock_irq(gcwq->lock).
  900  *
  901  * RETURNS:
  902  * Pointer to worker which is executing @work if found, NULL
  903  * otherwise.
  904  */
  905 static struct worker *__find_worker_executing_work(struct global_cwq *gcwq,
  906                                                    struct hlist_head *bwh,
  907                                                    struct work_struct *work)
  908 {
  909         struct worker *worker;
  910         struct hlist_node *tmp;
  911 
  912         hlist_for_each_entry(worker, tmp, bwh, hentry)
  913                 if (worker->current_work == work)
  914                         return worker;
  915         return NULL;
  916 }
  917 
  918 /**
  919  * find_worker_executing_work - find worker which is executing a work
  920  * @gcwq: gcwq of interest
  921  * @work: work to find worker for
  922  *
  923  * Find a worker which is executing @work on @gcwq.  This function is
  924  * identical to __find_worker_executing_work() except that this
  925  * function calculates @bwh itself.
  926  *
  927  * CONTEXT:
  928  * spin_lock_irq(gcwq->lock).
  929  *
  930  * RETURNS:
  931  * Pointer to worker which is executing @work if found, NULL
  932  * otherwise.
  933  */
  934 static struct worker *find_worker_executing_work(struct global_cwq *gcwq,
  935                                                  struct work_struct *work)
  936 {
  937         return __find_worker_executing_work(gcwq, busy_worker_head(gcwq, work),
  938                                             work);
  939 }
  940 
  941 /**
  942  * move_linked_works - move linked works to a list
  943  * @work: start of series of works to be scheduled
  944  * @head: target list to append @work to
  945  * @nextp: out paramter for nested worklist walking
  946  *
  947  * Schedule linked works starting from @work to @head.  Work series to
  948  * be scheduled starts at @work and includes any consecutive work with
  949  * WORK_STRUCT_LINKED set in its predecessor.
  950  *
  951  * If @nextp is not NULL, it's updated to point to the next work of
  952  * the last scheduled work.  This allows move_linked_works() to be
  953  * nested inside outer list_for_each_entry_safe().
  954  *
  955  * CONTEXT:
  956  * spin_lock_irq(gcwq->lock).
  957  */
  958 static void move_linked_works(struct work_struct *work, struct list_head *head,
  959                               struct work_struct **nextp)
  960 {
  961         struct work_struct *n;
  962 
  963         /*
  964          * Linked worklist will always end before the end of the list,
  965          * use NULL for list head.
  966          */
  967         list_for_each_entry_safe_from(work, n, NULL, entry) {
  968                 list_move_tail(&work->entry, head);
  969                 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
  970                         break;
  971         }
  972 
  973         /*
  974          * If we're already inside safe list traversal and have moved
  975          * multiple works to the scheduled queue, the next position
  976          * needs to be updated.
  977          */
  978         if (nextp)
  979                 *nextp = n;
  980 }
  981 
  982 static void cwq_activate_delayed_work(struct work_struct *work)
  983 {
  984         struct cpu_workqueue_struct *cwq = get_work_cwq(work);
  985 
  986         trace_workqueue_activate_work(work);
  987         move_linked_works(work, &cwq->pool->worklist, NULL);
  988         __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
  989         cwq->nr_active++;
  990 }
  991 
  992 static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
  993 {
  994         struct work_struct *work = list_first_entry(&cwq->delayed_works,
  995                                                     struct work_struct, entry);
  996 
  997         cwq_activate_delayed_work(work);
  998 }
  999 
 1000 /**
 1001  * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
 1002  * @cwq: cwq of interest
 1003  * @color: color of work which left the queue
 1004  *
 1005  * A work either has completed or is removed from pending queue,
 1006  * decrement nr_in_flight of its cwq and handle workqueue flushing.
 1007  *
 1008  * CONTEXT:
 1009  * spin_lock_irq(gcwq->lock).
 1010  */
 1011 static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
 1012 {
 1013         /* ignore uncolored works */
 1014         if (color == WORK_NO_COLOR)
 1015                 return;
 1016 
 1017         cwq->nr_in_flight[color]--;
 1018 
 1019         cwq->nr_active--;
 1020         if (!list_empty(&cwq->delayed_works)) {
 1021                 /* one down, submit a delayed one */
 1022                 if (cwq->nr_active < cwq->max_active)
 1023                         cwq_activate_first_delayed(cwq);
 1024         }
 1025 
 1026         /* is flush in progress and are we at the flushing tip? */
 1027         if (likely(cwq->flush_color != color))
 1028                 return;
 1029 
 1030         /* are there still in-flight works? */
 1031         if (cwq->nr_in_flight[color])
 1032                 return;
 1033 
 1034         /* this cwq is done, clear flush_color */
 1035         cwq->flush_color = -1;
 1036 
 1037         /*
 1038          * If this was the last cwq, wake up the first flusher.  It
 1039          * will handle the rest.
 1040          */
 1041         if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush))
 1042                 complete(&cwq->wq->first_flusher->done);
 1043 }
 1044 
 1045 /**
 1046  * try_to_grab_pending - steal work item from worklist and disable irq
 1047  * @work: work item to steal
 1048  * @is_dwork: @work is a delayed_work
 1049  * @flags: place to store irq state
 1050  *
 1051  * Try to grab PENDING bit of @work.  This function can handle @work in any
 1052  * stable state - idle, on timer or on worklist.  Return values are
 1053  *
 1054  *  1           if @work was pending and we successfully stole PENDING
 1055  *  0           if @work was idle and we claimed PENDING
 1056  *  -EAGAIN     if PENDING couldn't be grabbed at the moment, safe to busy-retry
 1057  *  -ENOENT     if someone else is canceling @work, this state may persist
 1058  *              for arbitrarily long
 1059  *
 1060  * On >= 0 return, the caller owns @work's PENDING bit.  To avoid getting
 1061  * interrupted while holding PENDING and @work off queue, irq must be
 1062  * disabled on entry.  This, combined with delayed_work->timer being
 1063  * irqsafe, ensures that we return -EAGAIN for finite short period of time.
 1064  *
 1065  * On successful return, >= 0, irq is disabled and the caller is
 1066  * responsible for releasing it using local_irq_restore(*@flags).
 1067  *
 1068  * This function is safe to call from any context including IRQ handler.
 1069  */
 1070 static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
 1071                                unsigned long *flags)
 1072 {
 1073         struct global_cwq *gcwq;
 1074 
 1075         local_irq_save(*flags);
 1076 
 1077         /* try to steal the timer if it exists */
 1078         if (is_dwork) {
 1079                 struct delayed_work *dwork = to_delayed_work(work);
 1080 
 1081                 /*
 1082                  * dwork->timer is irqsafe.  If del_timer() fails, it's
 1083                  * guaranteed that the timer is not queued anywhere and not
 1084                  * running on the local CPU.
 1085                  */
 1086                 if (likely(del_timer(&dwork->timer)))
 1087                         return 1;
 1088         }
 1089 
 1090         /* try to claim PENDING the normal way */
 1091         if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
 1092                 return 0;
 1093 
 1094         /*
 1095          * The queueing is in progress, or it is already queued. Try to
 1096          * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
 1097          */
 1098         gcwq = get_work_gcwq(work);
 1099         if (!gcwq)
 1100                 goto fail;
 1101 
 1102         spin_lock(&gcwq->lock);
 1103         if (!list_empty(&work->entry)) {
 1104                 /*
 1105                  * This work is queued, but perhaps we locked the wrong gcwq.
 1106                  * In that case we must see the new value after rmb(), see
 1107                  * insert_work()->wmb().
 1108                  */
 1109                 smp_rmb();
 1110                 if (gcwq == get_work_gcwq(work)) {
 1111                         debug_work_deactivate(work);
 1112 
 1113                         /*
 1114                          * A delayed work item cannot be grabbed directly
 1115                          * because it might have linked NO_COLOR work items
 1116                          * which, if left on the delayed_list, will confuse
 1117                          * cwq->nr_active management later on and cause
 1118                          * stall.  Make sure the work item is activated
 1119                          * before grabbing.
 1120                          */
 1121                         if (*work_data_bits(work) & WORK_STRUCT_DELAYED)
 1122                                 cwq_activate_delayed_work(work);
 1123 
 1124                         list_del_init(&work->entry);
 1125                         cwq_dec_nr_in_flight(get_work_cwq(work),
 1126                                 get_work_color(work));
 1127 
 1128                         spin_unlock(&gcwq->lock);
 1129                         return 1;
 1130                 }
 1131         }
 1132         spin_unlock(&gcwq->lock);
 1133 fail:
 1134         local_irq_restore(*flags);
 1135         if (work_is_canceling(work))
 1136                 return -ENOENT;
 1137         cpu_relax();
 1138         return -EAGAIN;
 1139 }
 1140 
 1141 /**
 1142  * insert_work - insert a work into gcwq
 1143  * @cwq: cwq @work belongs to
 1144  * @work: work to insert
 1145  * @head: insertion point
 1146  * @extra_flags: extra WORK_STRUCT_* flags to set
 1147  *
 1148  * Insert @work which belongs to @cwq into @gcwq after @head.
 1149  * @extra_flags is or'd to work_struct flags.
 1150  *
 1151  * CONTEXT:
 1152  * spin_lock_irq(gcwq->lock).
 1153  */
 1154 static void insert_work(struct cpu_workqueue_struct *cwq,
 1155                         struct work_struct *work, struct list_head *head,
 1156                         unsigned int extra_flags)
 1157 {
 1158         struct worker_pool *pool = cwq->pool;
 1159 
 1160         /* we own @work, set data and link */
 1161         set_work_cwq(work, cwq, extra_flags);
 1162 
 1163         /*
 1164          * Ensure that we get the right work->data if we see the
 1165          * result of list_add() below, see try_to_grab_pending().
 1166          */
 1167         smp_wmb();
 1168 
 1169         list_add_tail(&work->entry, head);
 1170 
 1171         /*
 1172          * Ensure either worker_sched_deactivated() sees the above
 1173          * list_add_tail() or we see zero nr_running to avoid workers
 1174          * lying around lazily while there are works to be processed.
 1175          */
 1176         smp_mb();
 1177 
 1178         if (__need_more_worker(pool))
 1179                 wake_up_worker(pool);
 1180 }
 1181 
 1182 /*
 1183  * Test whether @work is being queued from another work executing on the
 1184  * same workqueue.  This is rather expensive and should only be used from
 1185  * cold paths.
 1186  */
 1187 static bool is_chained_work(struct workqueue_struct *wq)
 1188 {
 1189         unsigned long flags;
 1190         unsigned int cpu;
 1191 
 1192         for_each_gcwq_cpu(cpu) {
 1193                 struct global_cwq *gcwq = get_gcwq(cpu);
 1194                 struct worker *worker;
 1195                 struct hlist_node *pos;
 1196                 int i;
 1197 
 1198                 spin_lock_irqsave(&gcwq->lock, flags);
 1199                 for_each_busy_worker(worker, i, pos, gcwq) {
 1200                         if (worker->task != current)
 1201                                 continue;
 1202                         spin_unlock_irqrestore(&gcwq->lock, flags);
 1203                         /*
 1204                          * I'm @worker, no locking necessary.  See if @work
 1205                          * is headed to the same workqueue.
 1206                          */
 1207                         return worker->current_cwq->wq == wq;
 1208                 }
 1209                 spin_unlock_irqrestore(&gcwq->lock, flags);
 1210         }
 1211         return false;
 1212 }
 1213 
 1214 static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
 1215                          struct work_struct *work)
 1216 {
 1217         struct global_cwq *gcwq;
 1218         struct cpu_workqueue_struct *cwq;
 1219         struct list_head *worklist;
 1220         unsigned int work_flags;
 1221         unsigned int req_cpu = cpu;
 1222 
 1223         /*
 1224          * While a work item is PENDING && off queue, a task trying to
 1225          * steal the PENDING will busy-loop waiting for it to either get
 1226          * queued or lose PENDING.  Grabbing PENDING and queueing should
 1227          * happen with IRQ disabled.
 1228          */
 1229         WARN_ON_ONCE(!irqs_disabled());
 1230 
 1231         debug_work_activate(work);
 1232 
 1233         /* if dying, only works from the same workqueue are allowed */
 1234         if (unlikely(wq->flags & WQ_DRAINING) &&
 1235             WARN_ON_ONCE(!is_chained_work(wq)))
 1236                 return;
 1237 
 1238         /* determine gcwq to use */
 1239         if (!(wq->flags & WQ_UNBOUND)) {
 1240                 struct global_cwq *last_gcwq;
 1241 
 1242                 if (cpu == WORK_CPU_UNBOUND)
 1243                         cpu = raw_smp_processor_id();
 1244 
 1245                 /*
 1246                  * It's multi cpu.  If @work was previously on a different
 1247                  * cpu, it might still be running there, in which case the
 1248                  * work needs to be queued on that cpu to guarantee
 1249                  * non-reentrancy.
 1250                  */
 1251                 gcwq = get_gcwq(cpu);
 1252                 last_gcwq = get_work_gcwq(work);
 1253 
 1254                 if (last_gcwq && last_gcwq != gcwq) {
 1255                         struct worker *worker;
 1256 
 1257                         spin_lock(&last_gcwq->lock);
 1258 
 1259                         worker = find_worker_executing_work(last_gcwq, work);
 1260 
 1261                         if (worker && worker->current_cwq->wq == wq)
 1262                                 gcwq = last_gcwq;
 1263                         else {
 1264                                 /* meh... not running there, queue here */
 1265                                 spin_unlock(&last_gcwq->lock);
 1266                                 spin_lock(&gcwq->lock);
 1267                         }
 1268                 } else {
 1269                         spin_lock(&gcwq->lock);
 1270                 }
 1271         } else {
 1272                 gcwq = get_gcwq(WORK_CPU_UNBOUND);
 1273                 spin_lock(&gcwq->lock);
 1274         }
 1275 
 1276         /* gcwq determined, get cwq and queue */
 1277         cwq = get_cwq(gcwq->cpu, wq);
 1278         trace_workqueue_queue_work(req_cpu, cwq, work);
 1279 
 1280         if (WARN_ON(!list_empty(&work->entry))) {
 1281                 spin_unlock(&gcwq->lock);
 1282                 return;
 1283         }
 1284 
 1285         cwq->nr_in_flight[cwq->work_color]++;
 1286         work_flags = work_color_to_flags(cwq->work_color);
 1287 
 1288         if (likely(cwq->nr_active < cwq->max_active)) {
 1289                 trace_workqueue_activate_work(work);
 1290                 cwq->nr_active++;
 1291                 worklist = &cwq->pool->worklist;
 1292         } else {
 1293                 work_flags |= WORK_STRUCT_DELAYED;
 1294                 worklist = &cwq->delayed_works;
 1295         }
 1296 
 1297         insert_work(cwq, work, worklist, work_flags);
 1298 
 1299         spin_unlock(&gcwq->lock);
 1300 }
 1301 
 1302 /**
 1303  * queue_work_on - queue work on specific cpu
 1304  * @cpu: CPU number to execute work on
 1305  * @wq: workqueue to use
 1306  * @work: work to queue
 1307  *
 1308  * Returns %false if @work was already on a queue, %true otherwise.
 1309  *
 1310  * We queue the work to a specific CPU, the caller must ensure it
 1311  * can't go away.
 1312  */
 1313 bool queue_work_on(int cpu, struct workqueue_struct *wq,
 1314                    struct work_struct *work)
 1315 {
 1316         bool ret = false;
 1317         unsigned long flags;
 1318 
 1319         local_irq_save(flags);
 1320 
 1321         if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
 1322                 __queue_work(cpu, wq, work);
 1323                 ret = true;
 1324         }
 1325 
 1326         local_irq_restore(flags);
 1327         return ret;
 1328 }
 1329 EXPORT_SYMBOL_GPL(queue_work_on);
 1330 
 1331 /**
 1332  * queue_work - queue work on a workqueue
 1333  * @wq: workqueue to use
 1334  * @work: work to queue
 1335  *
 1336  * Returns %false if @work was already on a queue, %true otherwise.
 1337  *
 1338  * We queue the work to the CPU on which it was submitted, but if the CPU dies
 1339  * it can be processed by another CPU.
 1340  */
 1341 bool queue_work(struct workqueue_struct *wq, struct work_struct *work)
 1342 {
 1343         return queue_work_on(WORK_CPU_UNBOUND, wq, work);
 1344 }
 1345 EXPORT_SYMBOL_GPL(queue_work);
 1346 
 1347 void delayed_work_timer_fn(unsigned long __data)
 1348 {
 1349         struct delayed_work *dwork = (struct delayed_work *)__data;
 1350         struct cpu_workqueue_struct *cwq = get_work_cwq(&dwork->work);
 1351 
 1352         /* should have been called from irqsafe timer with irq already off */
 1353         __queue_work(dwork->cpu, cwq->wq, &dwork->work);
 1354 }
 1355 EXPORT_SYMBOL_GPL(delayed_work_timer_fn);
 1356 
 1357 static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
 1358                                 struct delayed_work *dwork, unsigned long delay)
 1359 {
 1360         struct timer_list *timer = &dwork->timer;
 1361         struct work_struct *work = &dwork->work;
 1362         unsigned int lcpu;
 1363 
 1364         WARN_ON_ONCE(timer->function != delayed_work_timer_fn ||
 1365                      timer->data != (unsigned long)dwork);
 1366         WARN_ON_ONCE(timer_pending(timer));
 1367         WARN_ON_ONCE(!list_empty(&work->entry));
 1368 
 1369         /*
 1370          * If @delay is 0, queue @dwork->work immediately.  This is for
 1371          * both optimization and correctness.  The earliest @timer can
 1372          * expire is on the closest next tick and delayed_work users depend
 1373          * on that there's no such delay when @delay is 0.
 1374          */
 1375         if (!delay) {
 1376                 __queue_work(cpu, wq, &dwork->work);
 1377                 return;
 1378         }
 1379 
 1380         timer_stats_timer_set_start_info(&dwork->timer);
 1381 
 1382         /*
 1383          * This stores cwq for the moment, for the timer_fn.  Note that the
 1384          * work's gcwq is preserved to allow reentrance detection for
 1385          * delayed works.
 1386          */
 1387         if (!(wq->flags & WQ_UNBOUND)) {
 1388                 struct global_cwq *gcwq = get_work_gcwq(work);
 1389 
 1390                 /*
 1391                  * If we cannot get the last gcwq from @work directly,
 1392                  * select the last CPU such that it avoids unnecessarily
 1393                  * triggering non-reentrancy check in __queue_work().
 1394                  */
 1395                 lcpu = cpu;
 1396                 if (gcwq)
 1397                         lcpu = gcwq->cpu;
 1398                 if (lcpu == WORK_CPU_UNBOUND)
 1399                         lcpu = raw_smp_processor_id();
 1400         } else {
 1401                 lcpu = WORK_CPU_UNBOUND;
 1402         }
 1403 
 1404         set_work_cwq(work, get_cwq(lcpu, wq), 0);
 1405 
 1406         dwork->cpu = cpu;
 1407         timer->expires = jiffies + delay;
 1408 
 1409         if (unlikely(cpu != WORK_CPU_UNBOUND))
 1410                 add_timer_on(timer, cpu);
 1411         else
 1412                 add_timer(timer);
 1413 }
 1414 
 1415 /**
 1416  * queue_delayed_work_on - queue work on specific CPU after delay
 1417  * @cpu: CPU number to execute work on
 1418  * @wq: workqueue to use
 1419  * @dwork: work to queue
 1420  * @delay: number of jiffies to wait before queueing
 1421  *
 1422  * Returns %false if @work was already on a queue, %true otherwise.  If
 1423  * @delay is zero and @dwork is idle, it will be scheduled for immediate
 1424  * execution.
 1425  */
 1426 bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
 1427                            struct delayed_work *dwork, unsigned long delay)
 1428 {
 1429         struct work_struct *work = &dwork->work;
 1430         bool ret = false;
 1431         unsigned long flags;
 1432 
 1433         /* read the comment in __queue_work() */
 1434         local_irq_save(flags);
 1435 
 1436         if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
 1437                 __queue_delayed_work(cpu, wq, dwork, delay);
 1438                 ret = true;
 1439         }
 1440 
 1441         local_irq_restore(flags);
 1442         return ret;
 1443 }
 1444 EXPORT_SYMBOL_GPL(queue_delayed_work_on);
 1445 
 1446 /**
 1447  * queue_delayed_work - queue work on a workqueue after delay
 1448  * @wq: workqueue to use
 1449  * @dwork: delayable work to queue
 1450  * @delay: number of jiffies to wait before queueing
 1451  *
 1452  * Equivalent to queue_delayed_work_on() but tries to use the local CPU.
 1453  */
 1454 bool queue_delayed_work(struct workqueue_struct *wq,
 1455                         struct delayed_work *dwork, unsigned long delay)
 1456 {
 1457         return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
 1458 }
 1459 EXPORT_SYMBOL_GPL(queue_delayed_work);
 1460 
 1461 /**
 1462  * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU
 1463  * @cpu: CPU number to execute work on
 1464  * @wq: workqueue to use
 1465  * @dwork: work to queue
 1466  * @delay: number of jiffies to wait before queueing
 1467  *
 1468  * If @dwork is idle, equivalent to queue_delayed_work_on(); otherwise,
 1469  * modify @dwork's timer so that it expires after @delay.  If @delay is
 1470  * zero, @work is guaranteed to be scheduled immediately regardless of its
 1471  * current state.
 1472  *
 1473  * Returns %false if @dwork was idle and queued, %true if @dwork was
 1474  * pending and its timer was modified.
 1475  *
 1476  * This function is safe to call from any context including IRQ handler.
 1477  * See try_to_grab_pending() for details.
 1478  */
 1479 bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
 1480                          struct delayed_work *dwork, unsigned long delay)
 1481 {
 1482         unsigned long flags;
 1483         int ret;
 1484 
 1485         do {
 1486                 ret = try_to_grab_pending(&dwork->work, true, &flags);
 1487         } while (unlikely(ret == -EAGAIN));
 1488 
 1489         if (likely(ret >= 0)) {
 1490                 __queue_delayed_work(cpu, wq, dwork, delay);
 1491                 local_irq_restore(flags);
 1492         }
 1493 
 1494         /* -ENOENT from try_to_grab_pending() becomes %true */
 1495         return ret;
 1496 }
 1497 EXPORT_SYMBOL_GPL(mod_delayed_work_on);
 1498 
 1499 /**
 1500  * mod_delayed_work - modify delay of or queue a delayed work
 1501  * @wq: workqueue to use
 1502  * @dwork: work to queue
 1503  * @delay: number of jiffies to wait before queueing
 1504  *
 1505  * mod_delayed_work_on() on local CPU.
 1506  */
 1507 bool mod_delayed_work(struct workqueue_struct *wq, struct delayed_work *dwork,
 1508                       unsigned long delay)
 1509 {
 1510         return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
 1511 }
 1512 EXPORT_SYMBOL_GPL(mod_delayed_work);
 1513 
 1514 /**
 1515  * worker_enter_idle - enter idle state
 1516  * @worker: worker which is entering idle state
 1517  *
 1518  * @worker is entering idle state.  Update stats and idle timer if
 1519  * necessary.
 1520  *
 1521  * LOCKING:
 1522  * spin_lock_irq(gcwq->lock).
 1523  */
 1524 static void worker_enter_idle(struct worker *worker)
 1525 {
 1526         struct worker_pool *pool = worker->pool;
 1527         struct global_cwq *gcwq = pool->gcwq;
 1528 
 1529         BUG_ON(worker->flags & WORKER_IDLE);
 1530         BUG_ON(!list_empty(&worker->entry) &&
 1531                (worker->hentry.next || worker->hentry.pprev));
 1532 
 1533         /* can't use worker_set_flags(), also called from start_worker() */
 1534         worker->flags |= WORKER_IDLE;
 1535         pool->nr_idle++;
 1536         worker->last_active = jiffies;
 1537 
 1538         /* idle_list is LIFO */
 1539         list_add(&worker->entry, &pool->idle_list);
 1540 
 1541         if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
 1542                 mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
 1543 
 1544         /*
 1545          * Sanity check nr_running.  Because gcwq_unbind_fn() releases
 1546          * gcwq->lock between setting %WORKER_UNBOUND and zapping
 1547          * nr_running, the warning may trigger spuriously.  Check iff
 1548          * unbind is not in progress.
 1549          */
 1550         WARN_ON_ONCE(!(gcwq->flags & GCWQ_DISASSOCIATED) &&
 1551                      pool->nr_workers == pool->nr_idle &&
 1552                      atomic_read(get_pool_nr_running(pool)));
 1553 }
 1554 
 1555 /**
 1556  * worker_leave_idle - leave idle state
 1557  * @worker: worker which is leaving idle state
 1558  *
 1559  * @worker is leaving idle state.  Update stats.
 1560  *
 1561  * LOCKING:
 1562  * spin_lock_irq(gcwq->lock).
 1563  */
 1564 static void worker_leave_idle(struct worker *worker)
 1565 {
 1566         struct worker_pool *pool = worker->pool;
 1567 
 1568         BUG_ON(!(worker->flags & WORKER_IDLE));
 1569         worker_clr_flags(worker, WORKER_IDLE);
 1570         pool->nr_idle--;
 1571         list_del_init(&worker->entry);
 1572 }
 1573 
 1574 /**
 1575  * worker_maybe_bind_and_lock - bind worker to its cpu if possible and lock gcwq
 1576  * @worker: self
 1577  *
 1578  * Works which are scheduled while the cpu is online must at least be
 1579  * scheduled to a worker which is bound to the cpu so that if they are
 1580  * flushed from cpu callbacks while cpu is going down, they are
 1581  * guaranteed to execute on the cpu.
 1582  *
 1583  * This function is to be used by rogue workers and rescuers to bind
 1584  * themselves to the target cpu and may race with cpu going down or
 1585  * coming online.  kthread_bind() can't be used because it may put the
 1586  * worker to already dead cpu and set_cpus_allowed_ptr() can't be used
 1587  * verbatim as it's best effort and blocking and gcwq may be
 1588  * [dis]associated in the meantime.
 1589  *
 1590  * This function tries set_cpus_allowed() and locks gcwq and verifies the
 1591  * binding against %GCWQ_DISASSOCIATED which is set during
 1592  * %CPU_DOWN_PREPARE and cleared during %CPU_ONLINE, so if the worker
 1593  * enters idle state or fetches works without dropping lock, it can
 1594  * guarantee the scheduling requirement described in the first paragraph.
 1595  *
 1596  * CONTEXT:
 1597  * Might sleep.  Called without any lock but returns with gcwq->lock
 1598  * held.
 1599  *
 1600  * RETURNS:
 1601  * %true if the associated gcwq is online (@worker is successfully
 1602  * bound), %false if offline.
 1603  */
 1604 static bool worker_maybe_bind_and_lock(struct worker *worker)
 1605 __acquires(&gcwq->lock)
 1606 {
 1607         struct global_cwq *gcwq = worker->pool->gcwq;
 1608         struct task_struct *task = worker->task;
 1609 
 1610         while (true) {
 1611                 /*
 1612                  * The following call may fail, succeed or succeed
 1613                  * without actually migrating the task to the cpu if
 1614                  * it races with cpu hotunplug operation.  Verify
 1615                  * against GCWQ_DISASSOCIATED.
 1616                  */
 1617                 if (!(gcwq->flags & GCWQ_DISASSOCIATED))
 1618                         set_cpus_allowed_ptr(task, get_cpu_mask(gcwq->cpu));
 1619 
 1620                 spin_lock_irq(&gcwq->lock);
 1621                 if (gcwq->flags & GCWQ_DISASSOCIATED)
 1622                         return false;
 1623                 if (task_cpu(task) == gcwq->cpu &&
 1624                     cpumask_equal(&current->cpus_allowed,
 1625                                   get_cpu_mask(gcwq->cpu)))
 1626                         return true;
 1627                 spin_unlock_irq(&gcwq->lock);
 1628 
 1629                 /*
 1630                  * We've raced with CPU hot[un]plug.  Give it a breather
 1631                  * and retry migration.  cond_resched() is required here;
 1632                  * otherwise, we might deadlock against cpu_stop trying to
 1633                  * bring down the CPU on non-preemptive kernel.
 1634                  */
 1635                 cpu_relax();
 1636                 cond_resched();
 1637         }
 1638 }
 1639 
 1640 /*
 1641  * Rebind an idle @worker to its CPU.  worker_thread() will test
 1642  * list_empty(@worker->entry) before leaving idle and call this function.
 1643  */
 1644 static void idle_worker_rebind(struct worker *worker)
 1645 {
 1646         struct global_cwq *gcwq = worker->pool->gcwq;
 1647 
 1648         /* CPU may go down again inbetween, clear UNBOUND only on success */
 1649         if (worker_maybe_bind_and_lock(worker))
 1650                 worker_clr_flags(worker, WORKER_UNBOUND);
 1651 
 1652         /* rebind complete, become available again */
 1653         list_add(&worker->entry, &worker->pool->idle_list);
 1654         spin_unlock_irq(&gcwq->lock);
 1655 }
 1656 
 1657 /*
 1658  * Function for @worker->rebind.work used to rebind unbound busy workers to
 1659  * the associated cpu which is coming back online.  This is scheduled by
 1660  * cpu up but can race with other cpu hotplug operations and may be
 1661  * executed twice without intervening cpu down.
 1662  */
 1663 static void busy_worker_rebind_fn(struct work_struct *work)
 1664 {
 1665         struct worker *worker = container_of(work, struct worker, rebind_work);
 1666         struct global_cwq *gcwq = worker->pool->gcwq;
 1667 
 1668         if (worker_maybe_bind_and_lock(worker))
 1669                 worker_clr_flags(worker, WORKER_UNBOUND);
 1670 
 1671         spin_unlock_irq(&gcwq->lock);
 1672 }
 1673 
 1674 /**
 1675  * rebind_workers - rebind all workers of a gcwq to the associated CPU
 1676  * @gcwq: gcwq of interest
 1677  *
 1678  * @gcwq->cpu is coming online.  Rebind all workers to the CPU.  Rebinding
 1679  * is different for idle and busy ones.
 1680  *
 1681  * Idle ones will be removed from the idle_list and woken up.  They will
 1682  * add themselves back after completing rebind.  This ensures that the
 1683  * idle_list doesn't contain any unbound workers when re-bound busy workers
 1684  * try to perform local wake-ups for concurrency management.
 1685  *
 1686  * Busy workers can rebind after they finish their current work items.
 1687  * Queueing the rebind work item at the head of the scheduled list is
 1688  * enough.  Note that nr_running will be properly bumped as busy workers
 1689  * rebind.
 1690  *
 1691  * On return, all non-manager workers are scheduled for rebind - see
 1692  * manage_workers() for the manager special case.  Any idle worker
 1693  * including the manager will not appear on @idle_list until rebind is
 1694  * complete, making local wake-ups safe.
 1695  */
 1696 static void rebind_workers(struct global_cwq *gcwq)
 1697 {
 1698         struct worker_pool *pool;
 1699         struct worker *worker, *n;
 1700         struct hlist_node *pos;
 1701         int i;
 1702 
 1703         lockdep_assert_held(&gcwq->lock);
 1704 
 1705         for_each_worker_pool(pool, gcwq)
 1706                 lockdep_assert_held(&pool->assoc_mutex);
 1707 
 1708         /* dequeue and kick idle ones */
 1709         for_each_worker_pool(pool, gcwq) {
 1710                 list_for_each_entry_safe(worker, n, &pool->idle_list, entry) {
 1711                         /*
 1712                          * idle workers should be off @pool->idle_list
 1713                          * until rebind is complete to avoid receiving
 1714                          * premature local wake-ups.
 1715                          */
 1716                         list_del_init(&worker->entry);
 1717 
 1718                         /*
 1719                          * worker_thread() will see the above dequeuing
 1720                          * and call idle_worker_rebind().
 1721                          */
 1722                         wake_up_process(worker->task);
 1723                 }
 1724         }
 1725 
 1726         /* rebind busy workers */
 1727         for_each_busy_worker(worker, i, pos, gcwq) {
 1728                 struct work_struct *rebind_work = &worker->rebind_work;
 1729                 struct workqueue_struct *wq;
 1730 
 1731                 if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
 1732                                      work_data_bits(rebind_work)))
 1733                         continue;
 1734 
 1735                 debug_work_activate(rebind_work);
 1736 
 1737                 /*
 1738                  * wq doesn't really matter but let's keep @worker->pool
 1739                  * and @cwq->pool consistent for sanity.
 1740                  */
 1741                 if (worker_pool_pri(worker->pool))
 1742                         wq = system_highpri_wq;
 1743                 else
 1744                         wq = system_wq;
 1745 
 1746                 insert_work(get_cwq(gcwq->cpu, wq), rebind_work,
 1747                         worker->scheduled.next,
 1748                         work_color_to_flags(WORK_NO_COLOR));
 1749         }
 1750 }
 1751 
 1752 static struct worker *alloc_worker(void)
 1753 {
 1754         struct worker *worker;
 1755 
 1756         worker = kzalloc(sizeof(*worker), GFP_KERNEL);
 1757         if (worker) {
 1758                 INIT_LIST_HEAD(&worker->entry);
 1759                 INIT_LIST_HEAD(&worker->scheduled);
 1760                 INIT_WORK(&worker->rebind_work, busy_worker_rebind_fn);
 1761                 /* on creation a worker is in !idle && prep state */
 1762                 worker->flags = WORKER_PREP;
 1763         }
 1764         return worker;
 1765 }
 1766 
 1767 /**
 1768  * create_worker - create a new workqueue worker
 1769  * @pool: pool the new worker will belong to
 1770  *
 1771  * Create a new worker which is bound to @pool.  The returned worker
 1772  * can be started by calling start_worker() or destroyed using
 1773  * destroy_worker().
 1774  *
 1775  * CONTEXT:
 1776  * Might sleep.  Does GFP_KERNEL allocations.
 1777  *
 1778  * RETURNS:
 1779  * Pointer to the newly created worker.
 1780  */
 1781 static struct worker *create_worker(struct worker_pool *pool)
 1782 {
 1783         struct global_cwq *gcwq = pool->gcwq;
 1784         const char *pri = worker_pool_pri(pool) ? "H" : "";
 1785         struct worker *worker = NULL;
 1786         int id = -1;
 1787 
 1788         spin_lock_irq(&gcwq->lock);
 1789         while (ida_get_new(&pool->worker_ida, &id)) {
 1790                 spin_unlock_irq(&gcwq->lock);
 1791                 if (!ida_pre_get(&pool->worker_ida, GFP_KERNEL))
 1792                         goto fail;
 1793                 spin_lock_irq(&gcwq->lock);
 1794         }
 1795         spin_unlock_irq(&gcwq->lock);
 1796 
 1797         worker = alloc_worker();
 1798         if (!worker)
 1799                 goto fail;
 1800 
 1801         worker->pool = pool;
 1802         worker->id = id;
 1803 
 1804         if (gcwq->cpu != WORK_CPU_UNBOUND)
 1805                 worker->task = kthread_create_on_node(worker_thread,
 1806                                         worker, cpu_to_node(gcwq->cpu),
 1807                                         "kworker/%u:%d%s", gcwq->cpu, id, pri);
 1808         else
 1809                 worker->task = kthread_create(worker_thread, worker,
 1810                                               "kworker/u:%d%s", id, pri);
 1811         if (IS_ERR(worker->task))
 1812                 goto fail;
 1813 
 1814         if (worker_pool_pri(pool))
 1815                 set_user_nice(worker->task, HIGHPRI_NICE_LEVEL);
 1816 
 1817         /*
 1818          * Determine CPU binding of the new worker depending on
 1819          * %GCWQ_DISASSOCIATED.  The caller is responsible for ensuring the
 1820          * flag remains stable across this function.  See the comments
 1821          * above the flag definition for details.
 1822          *
 1823          * As an unbound worker may later become a regular one if CPU comes
 1824          * online, make sure every worker has %PF_THREAD_BOUND set.
 1825          */
 1826         if (!(gcwq->flags & GCWQ_DISASSOCIATED)) {
 1827                 kthread_bind(worker->task, gcwq->cpu);
 1828         } else {
 1829                 worker->task->flags |= PF_THREAD_BOUND;
 1830                 worker->flags |= WORKER_UNBOUND;
 1831         }
 1832 
 1833         return worker;
 1834 fail:
 1835         if (id >= 0) {
 1836                 spin_lock_irq(&gcwq->lock);
 1837                 ida_remove(&pool->worker_ida, id);
 1838                 spin_unlock_irq(&gcwq->lock);
 1839         }
 1840         kfree(worker);
 1841         return NULL;
 1842 }
 1843 
 1844 /**
 1845  * start_worker - start a newly created worker
 1846  * @worker: worker to start
 1847  *
 1848  * Make the gcwq aware of @worker and start it.
 1849  *
 1850  * CONTEXT:
 1851  * spin_lock_irq(gcwq->lock).
 1852  */
 1853 static void start_worker(struct worker *worker)
 1854 {
 1855         worker->flags |= WORKER_STARTED;
 1856         worker->pool->nr_workers++;
 1857         worker_enter_idle(worker);
 1858         wake_up_process(worker->task);
 1859 }
 1860 
 1861 /**
 1862  * destroy_worker - destroy a workqueue worker
 1863  * @worker: worker to be destroyed
 1864  *
 1865  * Destroy @worker and adjust @gcwq stats accordingly.
 1866  *
 1867  * CONTEXT:
 1868  * spin_lock_irq(gcwq->lock) which is released and regrabbed.
 1869  */
 1870 static void destroy_worker(struct worker *worker)
 1871 {
 1872         struct worker_pool *pool = worker->pool;
 1873         struct global_cwq *gcwq = pool->gcwq;
 1874         int id = worker->id;
 1875 
 1876         /* sanity check frenzy */
 1877         BUG_ON(worker->current_work);
 1878         BUG_ON(!list_empty(&worker->scheduled));
 1879 
 1880         if (worker->flags & WORKER_STARTED)
 1881                 pool->nr_workers--;
 1882         if (worker->flags & WORKER_IDLE)
 1883                 pool->nr_idle--;
 1884 
 1885         list_del_init(&worker->entry);
 1886         worker->flags |= WORKER_DIE;
 1887 
 1888         spin_unlock_irq(&gcwq->lock);
 1889 
 1890         kthread_stop(worker->task);
 1891         kfree(worker);
 1892 
 1893         spin_lock_irq(&gcwq->lock);
 1894         ida_remove(&pool->worker_ida, id);
 1895 }
 1896 
 1897 static void idle_worker_timeout(unsigned long __pool)
 1898 {
 1899         struct worker_pool *pool = (void *)__pool;
 1900         struct global_cwq *gcwq = pool->gcwq;
 1901 
 1902         spin_lock_irq(&gcwq->lock);
 1903 
 1904         if (too_many_workers(pool)) {
 1905                 struct worker *worker;
 1906                 unsigned long expires;
 1907 
 1908                 /* idle_list is kept in LIFO order, check the last one */
 1909                 worker = list_entry(pool->idle_list.prev, struct worker, entry);
 1910                 expires = worker->last_active + IDLE_WORKER_TIMEOUT;
 1911 
 1912                 if (time_before(jiffies, expires))
 1913                         mod_timer(&pool->idle_timer, expires);
 1914                 else {
 1915                         /* it's been idle for too long, wake up manager */
 1916                         pool->flags |= POOL_MANAGE_WORKERS;
 1917                         wake_up_worker(pool);
 1918                 }
 1919         }
 1920 
 1921         spin_unlock_irq(&gcwq->lock);
 1922 }
 1923 
 1924 static bool send_mayday(struct work_struct *work)
 1925 {
 1926         struct cpu_workqueue_struct *cwq = get_work_cwq(work);
 1927         struct workqueue_struct *wq = cwq->wq;
 1928         unsigned int cpu;
 1929 
 1930         if (!(wq->flags & WQ_RESCUER))
 1931                 return false;
 1932 
 1933         /* mayday mayday mayday */
 1934         cpu = cwq->pool->gcwq->cpu;
 1935         /* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */
 1936         if (cpu == WORK_CPU_UNBOUND)
 1937                 cpu = 0;
 1938         if (!mayday_test_and_set_cpu(cpu, wq->mayday_mask))
 1939                 wake_up_process(wq->rescuer->task);
 1940         return true;
 1941 }
 1942 
 1943 static void gcwq_mayday_timeout(unsigned long __pool)
 1944 {
 1945         struct worker_pool *pool = (void *)__pool;
 1946         struct global_cwq *gcwq = pool->gcwq;
 1947         struct work_struct *work;
 1948 
 1949         spin_lock_irq(&gcwq->lock);
 1950 
 1951         if (need_to_create_worker(pool)) {
 1952                 /*
 1953                  * We've been trying to create a new worker but
 1954                  * haven't been successful.  We might be hitting an
 1955                  * allocation deadlock.  Send distress signals to
 1956                  * rescuers.
 1957                  */
 1958                 list_for_each_entry(work, &pool->worklist, entry)
 1959                         send_mayday(work);
 1960         }
 1961 
 1962         spin_unlock_irq(&gcwq->lock);
 1963 
 1964         mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
 1965 }
 1966 
 1967 /**
 1968  * maybe_create_worker - create a new worker if necessary
 1969  * @pool: pool to create a new worker for
 1970  *
 1971  * Create a new worker for @pool if necessary.  @pool is guaranteed to
 1972  * have at least one idle worker on return from this function.  If
 1973  * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
 1974  * sent to all rescuers with works scheduled on @pool to resolve
 1975  * possible allocation deadlock.
 1976  *
 1977  * On return, need_to_create_worker() is guaranteed to be false and
 1978  * may_start_working() true.
 1979  *
 1980  * LOCKING:
 1981  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
 1982  * multiple times.  Does GFP_KERNEL allocations.  Called only from
 1983  * manager.
 1984  *
 1985  * RETURNS:
 1986  * false if no action was taken and gcwq->lock stayed locked, true
 1987  * otherwise.
 1988  */
 1989 static bool maybe_create_worker(struct worker_pool *pool)
 1990 __releases(&gcwq->lock)
 1991 __acquires(&gcwq->lock)
 1992 {
 1993         struct global_cwq *gcwq = pool->gcwq;
 1994 
 1995         if (!need_to_create_worker(pool))
 1996                 return false;
 1997 restart:
 1998         spin_unlock_irq(&gcwq->lock);
 1999 
 2000         /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
 2001         mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
 2002 
 2003         while (true) {
 2004                 struct worker *worker;
 2005 
 2006                 worker = create_worker(pool);
 2007                 if (worker) {
 2008                         del_timer_sync(&pool->mayday_timer);
 2009                         spin_lock_irq(&gcwq->lock);
 2010                         start_worker(worker);
 2011                         BUG_ON(need_to_create_worker(pool));
 2012                         return true;
 2013                 }
 2014 
 2015                 if (!need_to_create_worker(pool))
 2016                         break;
 2017 
 2018                 __set_current_state(TASK_INTERRUPTIBLE);
 2019                 schedule_timeout(CREATE_COOLDOWN);
 2020 
 2021                 if (!need_to_create_worker(pool))
 2022                         break;
 2023         }
 2024 
 2025         del_timer_sync(&pool->mayday_timer);
 2026         spin_lock_irq(&gcwq->lock);
 2027         if (need_to_create_worker(pool))
 2028                 goto restart;
 2029         return true;
 2030 }
 2031 
 2032 /**
 2033  * maybe_destroy_worker - destroy workers which have been idle for a while
 2034  * @pool: pool to destroy workers for
 2035  *
 2036  * Destroy @pool workers which have been idle for longer than
 2037  * IDLE_WORKER_TIMEOUT.
 2038  *
 2039  * LOCKING:
 2040  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
 2041  * multiple times.  Called only from manager.
 2042  *
 2043  * RETURNS:
 2044  * false if no action was taken and gcwq->lock stayed locked, true
 2045  * otherwise.
 2046  */
 2047 static bool maybe_destroy_workers(struct worker_pool *pool)
 2048 {
 2049         bool ret = false;
 2050 
 2051         while (too_many_workers(pool)) {
 2052                 struct worker *worker;
 2053                 unsigned long expires;
 2054 
 2055                 worker = list_entry(pool->idle_list.prev, struct worker, entry);
 2056                 expires = worker->last_active + IDLE_WORKER_TIMEOUT;
 2057 
 2058                 if (time_before(jiffies, expires)) {
 2059                         mod_timer(&pool->idle_timer, expires);
 2060                         break;
 2061                 }
 2062 
 2063                 destroy_worker(worker);
 2064                 ret = true;
 2065         }
 2066 
 2067         return ret;
 2068 }
 2069 
 2070 /**
 2071  * manage_workers - manage worker pool
 2072  * @worker: self
 2073  *
 2074  * Assume the manager role and manage gcwq worker pool @worker belongs
 2075  * to.  At any given time, there can be only zero or one manager per
 2076  * gcwq.  The exclusion is handled automatically by this function.
 2077  *
 2078  * The caller can safely start processing works on false return.  On
 2079  * true return, it's guaranteed that need_to_create_worker() is false
 2080  * and may_start_working() is true.
 2081  *
 2082  * CONTEXT:
 2083  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
 2084  * multiple times.  Does GFP_KERNEL allocations.
 2085  *
 2086  * RETURNS:
 2087  * false if no action was taken and gcwq->lock stayed locked, true if
 2088  * some action was taken.
 2089  */
 2090 static bool manage_workers(struct worker *worker)
 2091 {
 2092         struct worker_pool *pool = worker->pool;
 2093         bool ret = false;
 2094 
 2095         if (pool->flags & POOL_MANAGING_WORKERS)
 2096                 return ret;
 2097 
 2098         pool->flags |= POOL_MANAGING_WORKERS;
 2099 
 2100         /*
 2101          * To simplify both worker management and CPU hotplug, hold off
 2102          * management while hotplug is in progress.  CPU hotplug path can't
 2103          * grab %POOL_MANAGING_WORKERS to achieve this because that can
 2104          * lead to idle worker depletion (all become busy thinking someone
 2105          * else is managing) which in turn can result in deadlock under
 2106          * extreme circumstances.  Use @pool->assoc_mutex to synchronize
 2107          * manager against CPU hotplug.
 2108          *
 2109          * assoc_mutex would always be free unless CPU hotplug is in
 2110          * progress.  trylock first without dropping @gcwq->lock.
 2111          */
 2112         if (unlikely(!mutex_trylock(&pool->assoc_mutex))) {
 2113                 spin_unlock_irq(&pool->gcwq->lock);
 2114                 mutex_lock(&pool->assoc_mutex);
 2115                 /*
 2116                  * CPU hotplug could have happened while we were waiting
 2117                  * for assoc_mutex.  Hotplug itself can't handle us
 2118                  * because manager isn't either on idle or busy list, and
 2119                  * @gcwq's state and ours could have deviated.
 2120                  *
 2121                  * As hotplug is now excluded via assoc_mutex, we can
 2122                  * simply try to bind.  It will succeed or fail depending
 2123                  * on @gcwq's current state.  Try it and adjust
 2124                  * %WORKER_UNBOUND accordingly.
 2125                  */
 2126                 if (worker_maybe_bind_and_lock(worker))
 2127                         worker->flags &= ~WORKER_UNBOUND;
 2128                 else
 2129                         worker->flags |= WORKER_UNBOUND;
 2130 
 2131                 ret = true;
 2132         }
 2133 
 2134         pool->flags &= ~POOL_MANAGE_WORKERS;
 2135 
 2136         /*
 2137          * Destroy and then create so that may_start_working() is true
 2138          * on return.
 2139          */
 2140         ret |= maybe_destroy_workers(pool);
 2141         ret |= maybe_create_worker(pool);
 2142 
 2143         pool->flags &= ~POOL_MANAGING_WORKERS;
 2144         mutex_unlock(&pool->assoc_mutex);
 2145         return ret;
 2146 }
 2147 
 2148 /**
 2149  * process_one_work - process single work
 2150  * @worker: self
 2151  * @work: work to process
 2152  *
 2153  * Process @work.  This function contains all the logics necessary to
 2154  * process a single work including synchronization against and
 2155  * interaction with other workers on the same cpu, queueing and
 2156  * flushing.  As long as context requirement is met, any worker can
 2157  * call this function to process a work.
 2158  *
 2159  * CONTEXT:
 2160  * spin_lock_irq(gcwq->lock) which is released and regrabbed.
 2161  */
 2162 static void process_one_work(struct worker *worker, struct work_struct *work)
 2163 __releases(&gcwq->lock)
 2164 __acquires(&gcwq->lock)
 2165 {
 2166         struct cpu_workqueue_struct *cwq = get_work_cwq(work);
 2167         struct worker_pool *pool = worker->pool;
 2168         struct global_cwq *gcwq = pool->gcwq;
 2169         struct hlist_head *bwh = busy_worker_head(gcwq, work);
 2170         bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE;
 2171         work_func_t f = work->func;
 2172         int work_color;
 2173         struct worker *collision;
 2174 #ifdef CONFIG_LOCKDEP
 2175         /*
 2176          * It is permissible to free the struct work_struct from
 2177          * inside the function that is called from it, this we need to
 2178          * take into account for lockdep too.  To avoid bogus "held
 2179          * lock freed" warnings as well as problems when looking into
 2180          * work->lockdep_map, make a copy and use that here.
 2181          */
 2182         struct lockdep_map lockdep_map;
 2183 
 2184         lockdep_copy_map(&lockdep_map, &work->lockdep_map);
 2185 #endif
 2186         /*
 2187          * Ensure we're on the correct CPU.  DISASSOCIATED test is
 2188          * necessary to avoid spurious warnings from rescuers servicing the
 2189          * unbound or a disassociated gcwq.
 2190          */
 2191         WARN_ON_ONCE(!(worker->flags & WORKER_UNBOUND) &&
 2192                      !(gcwq->flags & GCWQ_DISASSOCIATED) &&
 2193                      raw_smp_processor_id() != gcwq->cpu);
 2194 
 2195         /*
 2196          * A single work shouldn't be executed concurrently by
 2197          * multiple workers on a single cpu.  Check whether anyone is
 2198          * already processing the work.  If so, defer the work to the
 2199          * currently executing one.
 2200          */
 2201         collision = __find_worker_executing_work(gcwq, bwh, work);
 2202         if (unlikely(collision)) {
 2203                 move_linked_works(work, &collision->scheduled, NULL);
 2204                 return;
 2205         }
 2206 
 2207         /* claim and dequeue */
 2208         debug_work_deactivate(work);
 2209         hlist_add_head(&worker->hentry, bwh);
 2210         worker->current_work = work;
 2211         worker->current_cwq = cwq;
 2212         work_color = get_work_color(work);
 2213 
 2214         list_del_init(&work->entry);
 2215 
 2216         /*
 2217          * CPU intensive works don't participate in concurrency
 2218          * management.  They're the scheduler's responsibility.
 2219          */
 2220         if (unlikely(cpu_intensive))
 2221                 worker_set_flags(worker, WORKER_CPU_INTENSIVE, true);
 2222 
 2223         /*
 2224          * Unbound gcwq isn't concurrency managed and work items should be
 2225          * executed ASAP.  Wake up another worker if necessary.
 2226          */
 2227         if ((worker->flags & WORKER_UNBOUND) && need_more_worker(pool))
 2228                 wake_up_worker(pool);
 2229 
 2230         /*
 2231          * Record the last CPU and clear PENDING which should be the last
 2232          * update to @work.  Also, do this inside @gcwq->lock so that
 2233          * PENDING and queued state changes happen together while IRQ is
 2234          * disabled.
 2235          */
 2236         set_work_cpu_and_clear_pending(work, gcwq->cpu);
 2237 
 2238         spin_unlock_irq(&gcwq->lock);
 2239 
 2240         lock_map_acquire_read(&cwq->wq->lockdep_map);
 2241         lock_map_acquire(&lockdep_map);
 2242         trace_workqueue_execute_start(work);
 2243         f(work);
 2244         /*
 2245          * While we must be careful to not use "work" after this, the trace
 2246          * point will only record its address.
 2247          */
 2248         trace_workqueue_execute_end(work);
 2249         lock_map_release(&lockdep_map);
 2250         lock_map_release(&cwq->wq->lockdep_map);
 2251 
 2252         if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
 2253                 pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
 2254                        "     last function: %pf\n",
 2255                        current->comm, preempt_count(), task_pid_nr(current), f);
 2256                 debug_show_held_locks(current);
 2257                 dump_stack();
 2258         }
 2259 
 2260         spin_lock_irq(&gcwq->lock);
 2261 
 2262         /* clear cpu intensive status */
 2263         if (unlikely(cpu_intensive))
 2264                 worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
 2265 
 2266         /* we're done with it, release */
 2267         hlist_del_init(&worker->hentry);
 2268         worker->current_work = NULL;
 2269         worker->current_cwq = NULL;
 2270         cwq_dec_nr_in_flight(cwq, work_color);
 2271 }
 2272 
 2273 /**
 2274  * process_scheduled_works - process scheduled works
 2275  * @worker: self
 2276  *
 2277  * Process all scheduled works.  Please note that the scheduled list
 2278  * may change while processing a work, so this function repeatedly
 2279  * fetches a work from the top and executes it.
 2280  *
 2281  * CONTEXT:
 2282  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
 2283  * multiple times.
 2284  */
 2285 static void process_scheduled_works(struct worker *worker)
 2286 {
 2287         while (!list_empty(&worker->scheduled)) {
 2288                 struct work_struct *work = list_first_entry(&worker->scheduled,
 2289                                                 struct work_struct, entry);
 2290                 process_one_work(worker, work);
 2291         }
 2292 }
 2293 
 2294 /**
 2295  * worker_thread - the worker thread function
 2296  * @__worker: self
 2297  *
 2298  * The gcwq worker thread function.  There's a single dynamic pool of
 2299  * these per each cpu.  These workers process all works regardless of
 2300  * their specific target workqueue.  The only exception is works which
 2301  * belong to workqueues with a rescuer which will be explained in
 2302  * rescuer_thread().
 2303  */
 2304 static int worker_thread(void *__worker)
 2305 {
 2306         struct worker *worker = __worker;
 2307         struct worker_pool *pool = worker->pool;
 2308         struct global_cwq *gcwq = pool->gcwq;
 2309 
 2310         /* tell the scheduler that this is a workqueue worker */
 2311         worker->task->flags |= PF_WQ_WORKER;
 2312 woke_up:
 2313         spin_lock_irq(&gcwq->lock);
 2314 
 2315         /* we are off idle list if destruction or rebind is requested */
 2316         if (unlikely(list_empty(&worker->entry))) {
 2317                 spin_unlock_irq(&gcwq->lock);
 2318 
 2319                 /* if DIE is set, destruction is requested */
 2320                 if (worker->flags & WORKER_DIE) {
 2321                         worker->task->flags &= ~PF_WQ_WORKER;
 2322                         return 0;
 2323                 }
 2324 
 2325                 /* otherwise, rebind */
 2326                 idle_worker_rebind(worker);
 2327                 goto woke_up;
 2328         }
 2329 
 2330         worker_leave_idle(worker);
 2331 recheck:
 2332         /* no more worker necessary? */
 2333         if (!need_more_worker(pool))
 2334                 goto sleep;
 2335 
 2336         /* do we need to manage? */
 2337         if (unlikely(!may_start_working(pool)) && manage_workers(worker))
 2338                 goto recheck;
 2339 
 2340         /*
 2341          * ->scheduled list can only be filled while a worker is
 2342          * preparing to process a work or actually processing it.
 2343          * Make sure nobody diddled with it while I was sleeping.
 2344          */
 2345         BUG_ON(!list_empty(&worker->scheduled));
 2346 
 2347         /*
 2348          * When control reaches this point, we're guaranteed to have
 2349          * at least one idle worker or that someone else has already
 2350          * assumed the manager role.
 2351          */
 2352         worker_clr_flags(worker, WORKER_PREP);
 2353 
 2354         do {
 2355                 struct work_struct *work =
 2356                         list_first_entry(&pool->worklist,
 2357                                          struct work_struct, entry);
 2358 
 2359                 if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
 2360                         /* optimization path, not strictly necessary */
 2361                         process_one_work(worker, work);
 2362                         if (unlikely(!list_empty(&worker->scheduled)))
 2363                                 process_scheduled_works(worker);
 2364                 } else {
 2365                         move_linked_works(work, &worker->scheduled, NULL);
 2366                         process_scheduled_works(worker);
 2367                 }
 2368         } while (keep_working(pool));
 2369 
 2370         worker_set_flags(worker, WORKER_PREP, false);
 2371 sleep:
 2372         if (unlikely(need_to_manage_workers(pool)) && manage_workers(worker))
 2373                 goto recheck;
 2374 
 2375         /*
 2376          * gcwq->lock is held and there's no work to process and no
 2377          * need to manage, sleep.  Workers are woken up only while
 2378          * holding gcwq->lock or from local cpu, so setting the
 2379          * current state before releasing gcwq->lock is enough to
 2380          * prevent losing any event.
 2381          */
 2382         worker_enter_idle(worker);
 2383         __set_current_state(TASK_INTERRUPTIBLE);
 2384         spin_unlock_irq(&gcwq->lock);
 2385         schedule();
 2386         goto woke_up;
 2387 }
 2388 
 2389 /**
 2390  * rescuer_thread - the rescuer thread function
 2391  * @__wq: the associated workqueue
 2392  *
 2393  * Workqueue rescuer thread function.  There's one rescuer for each
 2394  * workqueue which has WQ_RESCUER set.
 2395  *
 2396  * Regular work processing on a gcwq may block trying to create a new
 2397  * worker which uses GFP_KERNEL allocation which has slight chance of
 2398  * developing into deadlock if some works currently on the same queue
 2399  * need to be processed to satisfy the GFP_KERNEL allocation.  This is
 2400  * the problem rescuer solves.
 2401  *
 2402  * When such condition is possible, the gcwq summons rescuers of all
 2403  * workqueues which have works queued on the gcwq and let them process
 2404  * those works so that forward progress can be guaranteed.
 2405  *
 2406  * This should happen rarely.
 2407  */
 2408 static int rescuer_thread(void *__wq)
 2409 {
 2410         struct workqueue_struct *wq = __wq;
 2411         struct worker *rescuer = wq->rescuer;
 2412         struct list_head *scheduled = &rescuer->scheduled;
 2413         bool is_unbound = wq->flags & WQ_UNBOUND;
 2414         unsigned int cpu;
 2415 
 2416         set_user_nice(current, RESCUER_NICE_LEVEL);
 2417 repeat:
 2418         set_current_state(TASK_INTERRUPTIBLE);
 2419 
 2420         if (kthread_should_stop()) {
 2421                 __set_current_state(TASK_RUNNING);
 2422                 return 0;
 2423         }
 2424 
 2425         /*
 2426          * See whether any cpu is asking for help.  Unbounded
 2427          * workqueues use cpu 0 in mayday_mask for CPU_UNBOUND.
 2428          */
 2429         for_each_mayday_cpu(cpu, wq->mayday_mask) {
 2430                 unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu;
 2431                 struct cpu_workqueue_struct *cwq = get_cwq(tcpu, wq);
 2432                 struct worker_pool *pool = cwq->pool;
 2433                 struct global_cwq *gcwq = pool->gcwq;
 2434                 struct work_struct *work, *n;
 2435 
 2436                 __set_current_state(TASK_RUNNING);
 2437                 mayday_clear_cpu(cpu, wq->mayday_mask);
 2438 
 2439                 /* migrate to the target cpu if possible */
 2440                 rescuer->pool = pool;
 2441                 worker_maybe_bind_and_lock(rescuer);
 2442 
 2443                 /*
 2444                  * Slurp in all works issued via this workqueue and
 2445                  * process'em.
 2446                  */
 2447                 BUG_ON(!list_empty(&rescuer->scheduled));
 2448                 list_for_each_entry_safe(work, n, &pool->worklist, entry)
 2449                         if (get_work_cwq(work) == cwq)
 2450                                 move_linked_works(work, scheduled, &n);
 2451 
 2452                 process_scheduled_works(rescuer);
 2453 
 2454                 /*
 2455                  * Leave this gcwq.  If keep_working() is %true, notify a
 2456                  * regular worker; otherwise, we end up with 0 concurrency
 2457                  * and stalling the execution.
 2458                  */
 2459                 if (keep_working(pool))
 2460                         wake_up_worker(pool);
 2461 
 2462                 spin_unlock_irq(&gcwq->lock);
 2463         }
 2464 
 2465         schedule();
 2466         goto repeat;
 2467 }
 2468 
 2469 struct wq_barrier {
 2470         struct work_struct      work;
 2471         struct completion       done;
 2472 };
 2473 
 2474 static void wq_barrier_func(struct work_struct *work)
 2475 {
 2476         struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
 2477         complete(&barr->done);
 2478 }
 2479 
 2480 /**
 2481  * insert_wq_barrier - insert a barrier work
 2482  * @cwq: cwq to insert barrier into
 2483  * @barr: wq_barrier to insert
 2484  * @target: target work to attach @barr to
 2485  * @worker: worker currently executing @target, NULL if @target is not executing
 2486  *
 2487  * @barr is linked to @target such that @barr is completed only after
 2488  * @target finishes execution.  Please note that the ordering
 2489  * guarantee is observed only with respect to @target and on the local
 2490  * cpu.
 2491  *
 2492  * Currently, a queued barrier can't be canceled.  This is because
 2493  * try_to_grab_pending() can't determine whether the work to be
 2494  * grabbed is at the head of the queue and thus can't clear LINKED
 2495  * flag of the previous work while there must be a valid next work
 2496  * after a work with LINKED flag set.
 2497  *
 2498  * Note that when @worker is non-NULL, @target may be modified
 2499  * underneath us, so we can't reliably determine cwq from @target.
 2500  *
 2501  * CONTEXT:
 2502  * spin_lock_irq(gcwq->lock).
 2503  */
 2504 static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
 2505                               struct wq_barrier *barr,
 2506                               struct work_struct *target, struct worker *worker)
 2507 {
 2508         struct list_head *head;
 2509         unsigned int linked = 0;
 2510 
 2511         /*
 2512          * debugobject calls are safe here even with gcwq->lock locked
 2513          * as we know for sure that this will not trigger any of the
 2514          * checks and call back into the fixup functions where we
 2515          * might deadlock.
 2516          */
 2517         INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
 2518         __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
 2519         init_completion(&barr->done);
 2520 
 2521         /*
 2522          * If @target is currently being executed, schedule the
 2523          * barrier to the worker; otherwise, put it after @target.
 2524          */
 2525         if (worker)
 2526                 head = worker->scheduled.next;
 2527         else {
 2528                 unsigned long *bits = work_data_bits(target);
 2529 
 2530                 head = target->entry.next;
 2531                 /* there can already be other linked works, inherit and set */
 2532                 linked = *bits & WORK_STRUCT_LINKED;
 2533                 __set_bit(WORK_STRUCT_LINKED_BIT, bits);
 2534         }
 2535 
 2536         debug_work_activate(&barr->work);
 2537         insert_work(cwq, &barr->work, head,
 2538                     work_color_to_flags(WORK_NO_COLOR) | linked);
 2539 }
 2540 
 2541 /**
 2542  * flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing
 2543  * @wq: workqueue being flushed
 2544  * @flush_color: new flush color, < 0 for no-op
 2545  * @work_color: new work color, < 0 for no-op
 2546  *
 2547  * Prepare cwqs for workqueue flushing.
 2548  *
 2549  * If @flush_color is non-negative, flush_color on all cwqs should be
 2550  * -1.  If no cwq has in-flight commands at the specified color, all
 2551  * cwq->flush_color's stay at -1 and %false is returned.  If any cwq
 2552  * has in flight commands, its cwq->flush_color is set to
 2553  * @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq
 2554  * wakeup logic is armed and %true is returned.
 2555  *
 2556  * The caller should have initialized @wq->first_flusher prior to
 2557  * calling this function with non-negative @flush_color.  If
 2558  * @flush_color is negative, no flush color update is done and %false
 2559  * is returned.
 2560  *
 2561  * If @work_color is non-negative, all cwqs should have the same
 2562  * work_color which is previous to @work_color and all will be
 2563  * advanced to @work_color.
 2564  *
 2565  * CONTEXT:
 2566  * mutex_lock(wq->flush_mutex).
 2567  *
 2568  * RETURNS:
 2569  * %true if @flush_color >= 0 and there's something to flush.  %false
 2570  * otherwise.
 2571  */
 2572 static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq,
 2573                                       int flush_color, int work_color)
 2574 {
 2575         bool wait = false;
 2576         unsigned int cpu;
 2577 
 2578         if (flush_color >= 0) {
 2579                 BUG_ON(atomic_read(&wq->nr_cwqs_to_flush));
 2580                 atomic_set(&wq->nr_cwqs_to_flush, 1);
 2581         }
 2582 
 2583         for_each_cwq_cpu(cpu, wq) {
 2584                 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
 2585                 struct global_cwq *gcwq = cwq->pool->gcwq;
 2586 
 2587                 spin_lock_irq(&gcwq->lock);
 2588 
 2589                 if (flush_color >= 0) {
 2590                         BUG_ON(cwq->flush_color != -1);
 2591 
 2592                         if (cwq->nr_in_flight[flush_color]) {
 2593                                 cwq->flush_color = flush_color;
 2594                                 atomic_inc(&wq->nr_cwqs_to_flush);
 2595                                 wait = true;
 2596                         }
 2597                 }
 2598 
 2599                 if (work_color >= 0) {
 2600                         BUG_ON(work_color != work_next_color(cwq->work_color));
 2601                         cwq->work_color = work_color;
 2602                 }
 2603 
 2604                 spin_unlock_irq(&gcwq->lock);
 2605         }
 2606 
 2607         if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush))
 2608                 complete(&wq->first_flusher->done);
 2609 
 2610         return wait;
 2611 }
 2612 
 2613 /**
 2614  * flush_workqueue - ensure that any scheduled work has run to completion.
 2615  * @wq: workqueue to flush
 2616  *
 2617  * Forces execution of the workqueue and blocks until its completion.
 2618  * This is typically used in driver shutdown handlers.
 2619  *
 2620  * We sleep until all works which were queued on entry have been handled,
 2621  * but we are not livelocked by new incoming ones.
 2622  */
 2623 void flush_workqueue(struct workqueue_struct *wq)
 2624 {
 2625         struct wq_flusher this_flusher = {
 2626                 .list = LIST_HEAD_INIT(this_flusher.list),
 2627                 .flush_color = -1,
 2628                 .done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done),
 2629         };
 2630         int next_color;
 2631 
 2632         lock_map_acquire(&wq->lockdep_map);
 2633         lock_map_release(&wq->lockdep_map);
 2634 
 2635         mutex_lock(&wq->flush_mutex);
 2636 
 2637         /*
 2638          * Start-to-wait phase
 2639          */
 2640         next_color = work_next_color(wq->work_color);
 2641 
 2642         if (next_color != wq->flush_color) {
 2643                 /*
 2644                  * Color space is not full.  The current work_color
 2645                  * becomes our flush_color and work_color is advanced
 2646                  * by one.
 2647                  */
 2648                 BUG_ON(!list_empty(&wq->flusher_overflow));
 2649                 this_flusher.flush_color = wq->work_color;
 2650                 wq->work_color = next_color;
 2651 
 2652                 if (!wq->first_flusher) {
 2653                         /* no flush in progress, become the first flusher */
 2654                         BUG_ON(wq->flush_color != this_flusher.flush_color);
 2655 
 2656                         wq->first_flusher = &this_flusher;
 2657 
 2658                         if (!flush_workqueue_prep_cwqs(wq, wq->flush_color,
 2659                                                        wq->work_color)) {
 2660                                 /* nothing to flush, done */
 2661                                 wq->flush_color = next_color;
 2662                                 wq->first_flusher = NULL;
 2663                                 goto out_unlock;
 2664                         }
 2665                 } else {
 2666                         /* wait in queue */
 2667                         BUG_ON(wq->flush_color == this_flusher.flush_color);
 2668                         list_add_tail(&this_flusher.list, &wq->flusher_queue);
 2669                         flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
 2670                 }
 2671         } else {
 2672                 /*
 2673                  * Oops, color space is full, wait on overflow queue.
 2674                  * The next flush completion will assign us
 2675                  * flush_color and transfer to flusher_queue.
 2676                  */
 2677                 list_add_tail(&this_flusher.list, &wq->flusher_overflow);
 2678         }
 2679 
 2680         mutex_unlock(&wq->flush_mutex);
 2681 
 2682         wait_for_completion(&this_flusher.done);
 2683 
 2684         /*
 2685          * Wake-up-and-cascade phase
 2686          *
 2687          * First flushers are responsible for cascading flushes and
 2688          * handling overflow.  Non-first flushers can simply return.
 2689          */
 2690         if (wq->first_flusher != &this_flusher)
 2691                 return;
 2692 
 2693         mutex_lock(&wq->flush_mutex);
 2694 
 2695         /* we might have raced, check again with mutex held */
 2696         if (wq->first_flusher != &this_flusher)
 2697                 goto out_unlock;
 2698 
 2699         wq->first_flusher = NULL;
 2700 
 2701         BUG_ON(!list_empty(&this_flusher.list));
 2702         BUG_ON(wq->flush_color != this_flusher.flush_color);
 2703 
 2704         while (true) {
 2705                 struct wq_flusher *next, *tmp;
 2706 
 2707                 /* complete all the flushers sharing the current flush color */
 2708                 list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
 2709                         if (next->flush_color != wq->flush_color)
 2710                                 break;
 2711                         list_del_init(&next->list);
 2712                         complete(&next->done);
 2713                 }
 2714 
 2715                 BUG_ON(!list_empty(&wq->flusher_overflow) &&
 2716                        wq->flush_color != work_next_color(wq->work_color));
 2717 
 2718                 /* this flush_color is finished, advance by one */
 2719                 wq->flush_color = work_next_color(wq->flush_color);
 2720 
 2721                 /* one color has been freed, handle overflow queue */
 2722                 if (!list_empty(&wq->flusher_overflow)) {
 2723                         /*
 2724                          * Assign the same color to all overflowed
 2725                          * flushers, advance work_color and append to
 2726                          * flusher_queue.  This is the start-to-wait
 2727                          * phase for these overflowed flushers.
 2728                          */
 2729                         list_for_each_entry(tmp, &wq->flusher_overflow, list)
 2730                                 tmp->flush_color = wq->work_color;
 2731 
 2732                         wq->work_color = work_next_color(wq->work_color);
 2733 
 2734                         list_splice_tail_init(&wq->flusher_overflow,
 2735                                               &wq->flusher_queue);
 2736                         flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
 2737                 }
 2738 
 2739                 if (list_empty(&wq->flusher_queue)) {
 2740                         BUG_ON(wq->flush_color != wq->work_color);
 2741                         break;
 2742                 }
 2743 
 2744                 /*
 2745                  * Need to flush more colors.  Make the next flusher
 2746                  * the new first flusher and arm cwqs.
 2747                  */
 2748                 BUG_ON(wq->flush_color == wq->work_color);
 2749                 BUG_ON(wq->flush_color != next->flush_color);
 2750 
 2751                 list_del_init(&next->list);
 2752                 wq->first_flusher = next;
 2753 
 2754                 if (flush_workqueue_prep_cwqs(wq, wq->flush_color, -1))
 2755                         break;
 2756 
 2757                 /*
 2758                  * Meh... this color is already done, clear first
 2759                  * flusher and repeat cascading.
 2760                  */
 2761                 wq->first_flusher = NULL;
 2762         }
 2763 
 2764 out_unlock:
 2765         mutex_unlock(&wq->flush_mutex);
 2766 }
 2767 EXPORT_SYMBOL_GPL(flush_workqueue);
 2768 
 2769 /**
 2770  * drain_workqueue - drain a workqueue
 2771  * @wq: workqueue to drain
 2772  *
 2773  * Wait until the workqueue becomes empty.  While draining is in progress,
 2774  * only chain queueing is allowed.  IOW, only currently pending or running
 2775  * work items on @wq can queue further work items on it.  @wq is flushed
 2776  * repeatedly until it becomes empty.  The number of flushing is detemined
 2777  * by the depth of chaining and should be relatively short.  Whine if it
 2778  * takes too long.
 2779  */
 2780 void drain_workqueue(struct workqueue_struct *wq)
 2781 {
 2782         unsigned int flush_cnt = 0;
 2783         unsigned int cpu;
 2784 
 2785         /*
 2786          * __queue_work() needs to test whether there are drainers, is much
 2787          * hotter than drain_workqueue() and already looks at @wq->flags.
 2788          * Use WQ_DRAINING so that queue doesn't have to check nr_drainers.
 2789          */
 2790         spin_lock(&workqueue_lock);
 2791         if (!wq->nr_drainers++)
 2792                 wq->flags |= WQ_DRAINING;
 2793         spin_unlock(&workqueue_lock);
 2794 reflush:
 2795         flush_workqueue(wq);
 2796 
 2797         for_each_cwq_cpu(cpu, wq) {
 2798                 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
 2799                 bool drained;
 2800 
 2801                 spin_lock_irq(&cwq->pool->gcwq->lock);
 2802                 drained = !cwq->nr_active && list_empty(&cwq->delayed_works);
 2803                 spin_unlock_irq(&cwq->pool->gcwq->lock);
 2804 
 2805                 if (drained)
 2806                         continue;
 2807 
 2808                 if (++flush_cnt == 10 ||
 2809                     (flush_cnt % 100 == 0 && flush_cnt <= 1000))
 2810                         pr_warn("workqueue %s: flush on destruction isn't complete after %u tries\n",
 2811                                 wq->name, flush_cnt);
 2812                 goto reflush;
 2813         }
 2814 
 2815         spin_lock(&workqueue_lock);
 2816         if (!--wq->nr_drainers)
 2817                 wq->flags &= ~WQ_DRAINING;
 2818         spin_unlock(&workqueue_lock);
 2819 }
 2820 EXPORT_SYMBOL_GPL(drain_workqueue);
 2821 
 2822 static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
 2823 {
 2824         struct worker *worker = NULL;
 2825         struct global_cwq *gcwq;
 2826         struct cpu_workqueue_struct *cwq;
 2827 
 2828         might_sleep();
 2829         gcwq = get_work_gcwq(work);
 2830         if (!gcwq)
 2831                 return false;
 2832 
 2833         spin_lock_irq(&gcwq->lock);
 2834         if (!list_empty(&work->entry)) {
 2835                 /*
 2836                  * See the comment near try_to_grab_pending()->smp_rmb().
 2837                  * If it was re-queued to a different gcwq under us, we
 2838                  * are not going to wait.
 2839                  */
 2840                 smp_rmb();
 2841                 cwq = get_work_cwq(work);
 2842                 if (unlikely(!cwq || gcwq != cwq->pool->gcwq))
 2843                         goto already_gone;
 2844         } else {
 2845                 worker = find_worker_executing_work(gcwq, work);
 2846                 if (!worker)
 2847                         goto already_gone;
 2848                 cwq = worker->current_cwq;
 2849         }
 2850 
 2851         insert_wq_barrier(cwq, barr, work, worker);
 2852         spin_unlock_irq(&gcwq->lock);
 2853 
 2854         /*
 2855          * If @max_active is 1 or rescuer is in use, flushing another work
 2856          * item on the same workqueue may lead to deadlock.  Make sure the
 2857          * flusher is not running on the same workqueue by verifying write
 2858          * access.
 2859          */
 2860         if (cwq->wq->saved_max_active == 1 || cwq->wq->flags & WQ_RESCUER)
 2861                 lock_map_acquire(&cwq->wq->lockdep_map);
 2862         else
 2863                 lock_map_acquire_read(&cwq->wq->lockdep_map);
 2864         lock_map_release(&cwq->wq->lockdep_map);
 2865 
 2866         return true;
 2867 already_gone:
 2868         spin_unlock_irq(&gcwq->lock);
 2869         return false;
 2870 }
 2871 
 2872 /**
 2873  * flush_work - wait for a work to finish executing the last queueing instance
 2874  * @work: the work to flush
 2875  *
 2876  * Wait until @work has finished execution.  @work is guaranteed to be idle
 2877  * on return if it hasn't been requeued since flush started.
 2878  *
 2879  * RETURNS:
 2880  * %true if flush_work() waited for the work to finish execution,
 2881  * %false if it was already idle.
 2882  */
 2883 bool flush_work(struct work_struct *work)
 2884 {
 2885         struct wq_barrier barr;
 2886 
 2887         lock_map_acquire(&work->lockdep_map);
 2888         lock_map_release(&work->lockdep_map);
 2889 
 2890         if (start_flush_work(work, &barr)) {
 2891                 wait_for_completion(&barr.done);
 2892                 destroy_work_on_stack(&barr.work);
 2893                 return true;
 2894         } else {
 2895                 return false;
 2896         }
 2897 }
 2898 EXPORT_SYMBOL_GPL(flush_work);
 2899 
 2900 static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
 2901 {
 2902         unsigned long flags;
 2903         int ret;
 2904 
 2905         do {
 2906                 ret = try_to_grab_pending(work, is_dwork, &flags);
 2907                 /*
 2908                  * If someone else is canceling, wait for the same event it
 2909                  * would be waiting for before retrying.
 2910                  */
 2911                 if (unlikely(ret == -ENOENT))
 2912                         flush_work(work);
 2913         } while (unlikely(ret < 0));
 2914 
 2915         /* tell other tasks trying to grab @work to back off */
 2916         mark_work_canceling(work);
 2917         local_irq_restore(flags);
 2918 
 2919         flush_work(work);
 2920         clear_work_data(work);
 2921         return ret;
 2922 }
 2923 
 2924 /**
 2925  * cancel_work_sync - cancel a work and wait for it to finish
 2926  * @work: the work to cancel
 2927  *
 2928  * Cancel @work and wait for its execution to finish.  This function
 2929  * can be used even if the work re-queues itself or migrates to
 2930  * another workqueue.  On return from this function, @work is
 2931  * guaranteed to be not pending or executing on any CPU.
 2932  *
 2933  * cancel_work_sync(&delayed_work->work) must not be used for
 2934  * delayed_work's.  Use cancel_delayed_work_sync() instead.
 2935  *
 2936  * The caller must ensure that the workqueue on which @work was last
 2937  * queued can't be destroyed before this function returns.
 2938  *
 2939  * RETURNS:
 2940  * %true if @work was pending, %false otherwise.
 2941  */
 2942 bool cancel_work_sync(struct work_struct *work)
 2943 {
 2944         return __cancel_work_timer(work, false);
 2945 }
 2946 EXPORT_SYMBOL_GPL(cancel_work_sync);
 2947 
 2948 /**
 2949  * flush_delayed_work - wait for a dwork to finish executing the last queueing
 2950  * @dwork: the delayed work to flush
 2951  *
 2952  * Delayed timer is cancelled and the pending work is queued for
 2953  * immediate execution.  Like flush_work(), this function only
 2954  * considers the last queueing instance of @dwork.
 2955  *
 2956  * RETURNS:
 2957  * %true if flush_work() waited for the work to finish execution,
 2958  * %false if it was already idle.
 2959  */
 2960 bool flush_delayed_work(struct delayed_work *dwork)
 2961 {
 2962         local_irq_disable();
 2963         if (del_timer_sync(&dwork->timer))
 2964                 __queue_work(dwork->cpu,
 2965                              get_work_cwq(&dwork->work)->wq, &dwork->work);
 2966         local_irq_enable();
 2967         return flush_work(&dwork->work);
 2968 }
 2969 EXPORT_SYMBOL(flush_delayed_work);
 2970 
 2971 /**
 2972  * cancel_delayed_work - cancel a delayed work
 2973  * @dwork: delayed_work to cancel
 2974  *
 2975  * Kill off a pending delayed_work.  Returns %true if @dwork was pending
 2976  * and canceled; %false if wasn't pending.  Note that the work callback
 2977  * function may still be running on return, unless it returns %true and the
 2978  * work doesn't re-arm itself.  Explicitly flush or use
 2979  * cancel_delayed_work_sync() to wait on it.
 2980  *
 2981  * This function is safe to call from any context including IRQ handler.
 2982  */
 2983 bool cancel_delayed_work(struct delayed_work *dwork)
 2984 {
 2985         unsigned long flags;
 2986         int ret;
 2987 
 2988         do {
 2989                 ret = try_to_grab_pending(&dwork->work, true, &flags);
 2990         } while (unlikely(ret == -EAGAIN));
 2991 
 2992         if (unlikely(ret < 0))
 2993                 return false;
 2994 
 2995         set_work_cpu_and_clear_pending(&dwork->work, work_cpu(&dwork->work));
 2996         local_irq_restore(flags);
 2997         return ret;
 2998 }
 2999 EXPORT_SYMBOL(cancel_delayed_work);
 3000 
 3001 /**
 3002  * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
 3003  * @dwork: the delayed work cancel
 3004  *
 3005  * This is cancel_work_sync() for delayed works.
 3006  *
 3007  * RETURNS:
 3008  * %true if @dwork was pending, %false otherwise.
 3009  */
 3010 bool cancel_delayed_work_sync(struct delayed_work *dwork)
 3011 {
 3012         return __cancel_work_timer(&dwork->work, true);
 3013 }
 3014 EXPORT_SYMBOL(cancel_delayed_work_sync);
 3015 
 3016 /**
 3017  * schedule_work_on - put work task on a specific cpu
 3018  * @cpu: cpu to put the work task on
 3019  * @work: job to be done
 3020  *
 3021  * This puts a job on a specific cpu
 3022  */
 3023 bool schedule_work_on(int cpu, struct work_struct *work)
 3024 {
 3025         return queue_work_on(cpu, system_wq, work);
 3026 }
 3027 EXPORT_SYMBOL(schedule_work_on);
 3028 
 3029 /**
 3030  * schedule_work - put work task in global workqueue
 3031  * @work: job to be done
 3032  *
 3033  * Returns %false if @work was already on the kernel-global workqueue and
 3034  * %true otherwise.
 3035  *
 3036  * This puts a job in the kernel-global workqueue if it was not already
 3037  * queued and leaves it in the same position on the kernel-global
 3038  * workqueue otherwise.
 3039  */
 3040 bool schedule_work(struct work_struct *work)
 3041 {
 3042         return queue_work(system_wq, work);
 3043 }
 3044 EXPORT_SYMBOL(schedule_work);
 3045 
 3046 /**
 3047  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
 3048  * @cpu: cpu to use
 3049  * @dwork: job to be done
 3050  * @delay: number of jiffies to wait
 3051  *
 3052  * After waiting for a given time this puts a job in the kernel-global
 3053  * workqueue on the specified CPU.
 3054  */
 3055 bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
 3056                               unsigned long delay)
 3057 {
 3058         return queue_delayed_work_on(cpu, system_wq, dwork, delay);
 3059 }
 3060 EXPORT_SYMBOL(schedule_delayed_work_on);
 3061 
 3062 /**
 3063  * schedule_delayed_work - put work task in global workqueue after delay
 3064  * @dwork: job to be done
 3065  * @delay: number of jiffies to wait or 0 for immediate execution
 3066  *
 3067  * After waiting for a given time this puts a job in the kernel-global
 3068  * workqueue.
 3069  */
 3070 bool schedule_delayed_work(struct delayed_work *dwork, unsigned long delay)
 3071 {
 3072         return queue_delayed_work(system_wq, dwork, delay);
 3073 }
 3074 EXPORT_SYMBOL(schedule_delayed_work);
 3075 
 3076 /**
 3077  * schedule_on_each_cpu - execute a function synchronously on each online CPU
 3078  * @func: the function to call
 3079  *
 3080  * schedule_on_each_cpu() executes @func on each online CPU using the
 3081  * system workqueue and blocks until all CPUs have completed.
 3082  * schedule_on_each_cpu() is very slow.
 3083  *
 3084  * RETURNS:
 3085  * 0 on success, -errno on failure.
 3086  */
 3087 int schedule_on_each_cpu(work_func_t func)
 3088 {
 3089         int cpu;
 3090         struct work_struct __percpu *works;
 3091 
 3092         works = alloc_percpu(struct work_struct);
 3093         if (!works)
 3094                 return -ENOMEM;
 3095 
 3096         get_online_cpus();
 3097 
 3098         for_each_online_cpu(cpu) {
 3099                 struct work_struct *work = per_cpu_ptr(works, cpu);
 3100 
 3101                 INIT_WORK(work, func);
 3102                 schedule_work_on(cpu, work);
 3103         }
 3104 
 3105         for_each_online_cpu(cpu)
 3106                 flush_work(per_cpu_ptr(works, cpu));
 3107 
 3108         put_online_cpus();
 3109         free_percpu(works);
 3110         return 0;
 3111 }
 3112 
 3113 /**
 3114  * flush_scheduled_work - ensure that any scheduled work has run to completion.
 3115  *
 3116  * Forces execution of the kernel-global workqueue and blocks until its
 3117  * completion.
 3118  *
 3119  * Think twice before calling this function!  It's very easy to get into
 3120  * trouble if you don't take great care.  Either of the following situations
 3121  * will lead to deadlock:
 3122  *
 3123  *      One of the work items currently on the workqueue needs to acquire
 3124  *      a lock held by your code or its caller.
 3125  *
 3126  *      Your code is running in the context of a work routine.
 3127  *
 3128  * They will be detected by lockdep when they occur, but the first might not
 3129  * occur very often.  It depends on what work items are on the workqueue and
 3130  * what locks they need, which you have no control over.
 3131  *
 3132  * In most situations flushing the entire workqueue is overkill; you merely
 3133  * need to know that a particular work item isn't queued and isn't running.
 3134  * In such cases you should use cancel_delayed_work_sync() or
 3135  * cancel_work_sync() instead.
 3136  */
 3137 void flush_scheduled_work(void)
 3138 {
 3139         flush_workqueue(system_wq);
 3140 }
 3141 EXPORT_SYMBOL(flush_scheduled_work);
 3142 
 3143 /**
 3144  * execute_in_process_context - reliably execute the routine with user context
 3145  * @fn:         the function to execute
 3146  * @ew:         guaranteed storage for the execute work structure (must
 3147  *              be available when the work executes)
 3148  *
 3149  * Executes the function immediately if process context is available,
 3150  * otherwise schedules the function for delayed execution.
 3151  *
 3152  * Returns:     0 - function was executed
 3153  *              1 - function was scheduled for execution
 3154  */
 3155 int execute_in_process_context(work_func_t fn, struct execute_work *ew)
 3156 {
 3157         if (!in_interrupt()) {
 3158                 fn(&ew->work);
 3159                 return 0;
 3160         }
 3161 
 3162         INIT_WORK(&ew->work, fn);
 3163         schedule_work(&ew->work);
 3164 
 3165         return 1;
 3166 }
 3167 EXPORT_SYMBOL_GPL(execute_in_process_context);
 3168 
 3169 int keventd_up(void)
 3170 {
 3171         return system_wq != NULL;
 3172 }
 3173 
 3174 static int alloc_cwqs(struct workqueue_struct *wq)
 3175 {
 3176         /*
 3177          * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
 3178          * Make sure that the alignment isn't lower than that of
 3179          * unsigned long long.
 3180          */
 3181         const size_t size = sizeof(struct cpu_workqueue_struct);
 3182         const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS,
 3183                                    __alignof__(unsigned long long));
 3184 
 3185         if (!(wq->flags & WQ_UNBOUND))
 3186                 wq->cpu_wq.pcpu = __alloc_percpu(size, align);
 3187         else {
 3188                 void *ptr;
 3189 
 3190                 /*
 3191                  * Allocate enough room to align cwq and put an extra
 3192                  * pointer at the end pointing back to the originally
 3193                  * allocated pointer which will be used for free.
 3194                  */
 3195                 ptr = kzalloc(size + align + sizeof(void *), GFP_KERNEL);
 3196                 if (ptr) {
 3197                         wq->cpu_wq.single = PTR_ALIGN(ptr, align);
 3198                         *(void **)(wq->cpu_wq.single + 1) = ptr;
 3199                 }
 3200         }
 3201 
 3202         /* just in case, make sure it's actually aligned */
 3203         BUG_ON(!IS_ALIGNED(wq->cpu_wq.v, align));
 3204         return wq->cpu_wq.v ? 0 : -ENOMEM;
 3205 }
 3206 
 3207 static void free_cwqs(struct workqueue_struct *wq)
 3208 {
 3209         if (!(wq->flags & WQ_UNBOUND))
 3210                 free_percpu(wq->cpu_wq.pcpu);
 3211         else if (wq->cpu_wq.single) {
 3212                 /* the pointer to free is stored right after the cwq */
 3213                 kfree(*(void **)(wq->cpu_wq.single + 1));
 3214         }
 3215 }
 3216 
 3217 static int wq_clamp_max_active(int max_active, unsigned int flags,
 3218                                const char *name)
 3219 {
 3220         int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
 3221 
 3222         if (max_active < 1 || max_active > lim)
 3223                 pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n",
 3224                         max_active, name, 1, lim);
 3225 
 3226         return clamp_val(max_active, 1, lim);
 3227 }
 3228 
 3229 struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
 3230                                                unsigned int flags,
 3231                                                int max_active,
 3232                                                struct lock_class_key *key,
 3233                                                const char *lock_name, ...)
 3234 {
 3235         va_list args, args1;
 3236         struct workqueue_struct *wq;
 3237         unsigned int cpu;
 3238         size_t namelen;
 3239 
 3240         /* determine namelen, allocate wq and format name */
 3241         va_start(args, lock_name);
 3242         va_copy(args1, args);
 3243         namelen = vsnprintf(NULL, 0, fmt, args) + 1;
 3244 
 3245         wq = kzalloc(sizeof(*wq) + namelen, GFP_KERNEL);
 3246         if (!wq)
 3247                 goto err;
 3248 
 3249         vsnprintf(wq->name, namelen, fmt, args1);
 3250         va_end(args);
 3251         va_end(args1);
 3252 
 3253         /*
 3254          * Workqueues which may be used during memory reclaim should
 3255          * have a rescuer to guarantee forward progress.
 3256          */
 3257         if (flags & WQ_MEM_RECLAIM)
 3258                 flags |= WQ_RESCUER;
 3259 
 3260         max_active = max_active ?: WQ_DFL_ACTIVE;
 3261         max_active = wq_clamp_max_active(max_active, flags, wq->name);
 3262 
 3263         /* init wq */
 3264         wq->flags = flags;
 3265         wq->saved_max_active = max_active;
 3266         mutex_init(&wq->flush_mutex);
 3267         atomic_set(&wq->nr_cwqs_to_flush, 0);
 3268         INIT_LIST_HEAD(&wq->flusher_queue);
 3269         INIT_LIST_HEAD(&wq->flusher_overflow);
 3270 
 3271         lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
 3272         INIT_LIST_HEAD(&wq->list);
 3273 
 3274         if (alloc_cwqs(wq) < 0)
 3275                 goto err;
 3276 
 3277         for_each_cwq_cpu(cpu, wq) {
 3278                 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
 3279                 struct global_cwq *gcwq = get_gcwq(cpu);
 3280                 int pool_idx = (bool)(flags & WQ_HIGHPRI);
 3281 
 3282                 BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK);
 3283                 cwq->pool = &gcwq->pools[pool_idx];
 3284                 cwq->wq = wq;
 3285                 cwq->flush_color = -1;
 3286                 cwq->max_active = max_active;
 3287                 INIT_LIST_HEAD(&cwq->delayed_works);
 3288         }
 3289 
 3290         if (flags & WQ_RESCUER) {
 3291                 struct worker *rescuer;
 3292 
 3293                 if (!alloc_mayday_mask(&wq->mayday_mask, GFP_KERNEL))
 3294                         goto err;
 3295 
 3296                 wq->rescuer = rescuer = alloc_worker();
 3297                 if (!rescuer)
 3298                         goto err;
 3299 
 3300                 rescuer->task = kthread_create(rescuer_thread, wq, "%s",
 3301                                                wq->name);
 3302                 if (IS_ERR(rescuer->task))
 3303                         goto err;
 3304 
 3305                 rescuer->task->flags |= PF_THREAD_BOUND;
 3306                 wake_up_process(rescuer->task);
 3307         }
 3308 
 3309         /*
 3310          * workqueue_lock protects global freeze state and workqueues
 3311          * list.  Grab it, set max_active accordingly and add the new
 3312          * workqueue to workqueues list.
 3313          */
 3314         spin_lock(&workqueue_lock);
 3315 
 3316         if (workqueue_freezing && wq->flags & WQ_FREEZABLE)
 3317                 for_each_cwq_cpu(cpu, wq)
 3318                         get_cwq(cpu, wq)->max_active = 0;
 3319 
 3320         list_add(&wq->list, &workqueues);
 3321 
 3322         spin_unlock(&workqueue_lock);
 3323 
 3324         return wq;
 3325 err:
 3326         if (wq) {
 3327                 free_cwqs(wq);
 3328                 free_mayday_mask(wq->mayday_mask);
 3329                 kfree(wq->rescuer);
 3330                 kfree(wq);
 3331         }
 3332         return NULL;
 3333 }
 3334 EXPORT_SYMBOL_GPL(__alloc_workqueue_key);
 3335 
 3336 /**
 3337  * destroy_workqueue - safely terminate a workqueue
 3338  * @wq: target workqueue
 3339  *
 3340  * Safely destroy a workqueue. All work currently pending will be done first.
 3341  */
 3342 void destroy_workqueue(struct workqueue_struct *wq)
 3343 {
 3344         unsigned int cpu;
 3345 
 3346         /* drain it before proceeding with destruction */
 3347         drain_workqueue(wq);
 3348 
 3349         /*
 3350          * wq list is used to freeze wq, remove from list after
 3351          * flushing is complete in case freeze races us.
 3352          */
 3353         spin_lock(&workqueue_lock);
 3354         list_del(&wq->list);
 3355         spin_unlock(&workqueue_lock);
 3356 
 3357         /* sanity check */
 3358         for_each_cwq_cpu(cpu, wq) {
 3359                 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
 3360                 int i;
 3361 
 3362                 for (i = 0; i < WORK_NR_COLORS; i++)
 3363                         BUG_ON(cwq->nr_in_flight[i]);
 3364                 BUG_ON(cwq->nr_active);
 3365                 BUG_ON(!list_empty(&cwq->delayed_works));
 3366         }
 3367 
 3368         if (wq->flags & WQ_RESCUER) {
 3369                 kthread_stop(wq->rescuer->task);
 3370                 free_mayday_mask(wq->mayday_mask);
 3371                 kfree(wq->rescuer);
 3372         }
 3373 
 3374         free_cwqs(wq);
 3375         kfree(wq);
 3376 }
 3377 EXPORT_SYMBOL_GPL(destroy_workqueue);
 3378 
 3379 /**
 3380  * cwq_set_max_active - adjust max_active of a cwq
 3381  * @cwq: target cpu_workqueue_struct
 3382  * @max_active: new max_active value.
 3383  *
 3384  * Set @cwq->max_active to @max_active and activate delayed works if
 3385  * increased.
 3386  *
 3387  * CONTEXT:
 3388  * spin_lock_irq(gcwq->lock).
 3389  */
 3390 static void cwq_set_max_active(struct cpu_workqueue_struct *cwq, int max_active)
 3391 {
 3392         cwq->max_active = max_active;
 3393 
 3394         while (!list_empty(&cwq->delayed_works) &&
 3395                cwq->nr_active < cwq->max_active)
 3396                 cwq_activate_first_delayed(cwq);
 3397 }
 3398 
 3399 /**
 3400  * workqueue_set_max_active - adjust max_active of a workqueue
 3401  * @wq: target workqueue
 3402  * @max_active: new max_active value.
 3403  *
 3404  * Set max_active of @wq to @max_active.
 3405  *
 3406  * CONTEXT:
 3407  * Don't call from IRQ context.
 3408  */
 3409 void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
 3410 {
 3411         unsigned int cpu;
 3412 
 3413         max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
 3414 
 3415         spin_lock(&workqueue_lock);
 3416 
 3417         wq->saved_max_active = max_active;
 3418 
 3419         for_each_cwq_cpu(cpu, wq) {
 3420                 struct global_cwq *gcwq = get_gcwq(cpu);
 3421 
 3422                 spin_lock_irq(&gcwq->lock);
 3423 
 3424                 if (!(wq->flags & WQ_FREEZABLE) ||
 3425                     !(gcwq->flags & GCWQ_FREEZING))
 3426                         cwq_set_max_active(get_cwq(gcwq->cpu, wq), max_active);
 3427 
 3428                 spin_unlock_irq(&gcwq->lock);
 3429         }
 3430 
 3431         spin_unlock(&workqueue_lock);
 3432 }
 3433 EXPORT_SYMBOL_GPL(workqueue_set_max_active);
 3434 
 3435 /**
 3436  * workqueue_congested - test whether a workqueue is congested
 3437  * @cpu: CPU in question
 3438  * @wq: target workqueue
 3439  *
 3440  * Test whether @wq's cpu workqueue for @cpu is congested.  There is
 3441  * no synchronization around this function and the test result is
 3442  * unreliable and only useful as advisory hints or for debugging.
 3443  *
 3444  * RETURNS:
 3445  * %true if congested, %false otherwise.
 3446  */
 3447 bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq)
 3448 {
 3449         struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
 3450 
 3451         return !list_empty(&cwq->delayed_works);
 3452 }
 3453 EXPORT_SYMBOL_GPL(workqueue_congested);
 3454 
 3455 /**
 3456  * work_cpu - return the last known associated cpu for @work
 3457  * @work: the work of interest
 3458  *
 3459  * RETURNS:
 3460  * CPU number if @work was ever queued.  WORK_CPU_NONE otherwise.
 3461  */
 3462 unsigned int work_cpu(struct work_struct *work)
 3463 {
 3464         struct global_cwq *gcwq = get_work_gcwq(work);
 3465 
 3466         return gcwq ? gcwq->cpu : WORK_CPU_NONE;
 3467 }
 3468 EXPORT_SYMBOL_GPL(work_cpu);
 3469 
 3470 /**
 3471  * work_busy - test whether a work is currently pending or running
 3472  * @work: the work to be tested
 3473  *
 3474  * Test whether @work is currently pending or running.  There is no
 3475  * synchronization around this function and the test result is
 3476  * unreliable and only useful as advisory hints or for debugging.
 3477  * Especially for reentrant wqs, the pending state might hide the
 3478  * running state.
 3479  *
 3480  * RETURNS:
 3481  * OR'd bitmask of WORK_BUSY_* bits.
 3482  */
 3483 unsigned int work_busy(struct work_struct *work)
 3484 {
 3485         struct global_cwq *gcwq = get_work_gcwq(work);
 3486         unsigned long flags;
 3487         unsigned int ret = 0;
 3488 
 3489         if (!gcwq)
 3490                 return 0;
 3491 
 3492         spin_lock_irqsave(&gcwq->lock, flags);
 3493 
 3494         if (work_pending(work))
 3495                 ret |= WORK_BUSY_PENDING;
 3496         if (find_worker_executing_work(gcwq, work))
 3497                 ret |= WORK_BUSY_RUNNING;
 3498 
 3499         spin_unlock_irqrestore(&gcwq->lock, flags);
 3500 
 3501         return ret;
 3502 }
 3503 EXPORT_SYMBOL_GPL(work_busy);
 3504 
 3505 /*
 3506  * CPU hotplug.
 3507  *
 3508  * There are two challenges in supporting CPU hotplug.  Firstly, there
 3509  * are a lot of assumptions on strong associations among work, cwq and
 3510  * gcwq which make migrating pending and scheduled works very
 3511  * difficult to implement without impacting hot paths.  Secondly,
 3512  * gcwqs serve mix of short, long and very long running works making
 3513  * blocked draining impractical.
 3514  *
 3515  * This is solved by allowing a gcwq to be disassociated from the CPU
 3516  * running as an unbound one and allowing it to be reattached later if the
 3517  * cpu comes back online.
 3518  */
 3519 
 3520 /* claim manager positions of all pools */
 3521 static void gcwq_claim_assoc_and_lock(struct global_cwq *gcwq)
 3522 {
 3523         struct worker_pool *pool;
 3524 
 3525         for_each_worker_pool(pool, gcwq)
 3526                 mutex_lock_nested(&pool->assoc_mutex, pool - gcwq->pools);
 3527         spin_lock_irq(&gcwq->lock);
 3528 }
 3529 
 3530 /* release manager positions */
 3531 static void gcwq_release_assoc_and_unlock(struct global_cwq *gcwq)
 3532 {
 3533         struct worker_pool *pool;
 3534 
 3535         spin_unlock_irq(&gcwq->lock);
 3536         for_each_worker_pool(pool, gcwq)
 3537                 mutex_unlock(&pool->assoc_mutex);
 3538 }
 3539 
 3540 static void gcwq_unbind_fn(struct work_struct *work)
 3541 {
 3542         struct global_cwq *gcwq = get_gcwq(smp_processor_id());
 3543         struct worker_pool *pool;
 3544         struct worker *worker;
 3545         struct hlist_node *pos;
 3546         int i;
 3547 
 3548         BUG_ON(gcwq->cpu != smp_processor_id());
 3549 
 3550         gcwq_claim_assoc_and_lock(gcwq);
 3551 
 3552         /*
 3553          * We've claimed all manager positions.  Make all workers unbound
 3554          * and set DISASSOCIATED.  Before this, all workers except for the
 3555          * ones which are still executing works from before the last CPU
 3556          * down must be on the cpu.  After this, they may become diasporas.
 3557          */
 3558         for_each_worker_pool(pool, gcwq)
 3559                 list_for_each_entry(worker, &pool->idle_list, entry)
 3560                         worker->flags |= WORKER_UNBOUND;
 3561 
 3562         for_each_busy_worker(worker, i, pos, gcwq)
 3563                 worker->flags |= WORKER_UNBOUND;
 3564 
 3565         gcwq->flags |= GCWQ_DISASSOCIATED;
 3566 
 3567         gcwq_release_assoc_and_unlock(gcwq);
 3568 
 3569         /*
 3570          * Call schedule() so that we cross rq->lock and thus can guarantee
 3571          * sched callbacks see the %WORKER_UNBOUND flag.  This is necessary
 3572          * as scheduler callbacks may be invoked from other cpus.
 3573          */
 3574         schedule();
 3575 
 3576         /*
 3577          * Sched callbacks are disabled now.  Zap nr_running.  After this,
 3578          * nr_running stays zero and need_more_worker() and keep_working()
 3579          * are always true as long as the worklist is not empty.  @gcwq now
 3580          * behaves as unbound (in terms of concurrency management) gcwq
 3581          * which is served by workers tied to the CPU.
 3582          *
 3583          * On return from this function, the current worker would trigger
 3584          * unbound chain execution of pending work items if other workers
 3585          * didn't already.
 3586          */
 3587         for_each_worker_pool(pool, gcwq)
 3588                 atomic_set(get_pool_nr_running(pool), 0);
 3589 }
 3590 
 3591 /*
 3592  * Workqueues should be brought up before normal priority CPU notifiers.
 3593  * This will be registered high priority CPU notifier.
 3594  */
 3595 static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb,
 3596                                                unsigned long action,
 3597                                                void *hcpu)
 3598 {
 3599         unsigned int cpu = (unsigned long)hcpu;
 3600         struct global_cwq *gcwq = get_gcwq(cpu);
 3601         struct worker_pool *pool;
 3602 
 3603         switch (action & ~CPU_TASKS_FROZEN) {
 3604         case CPU_UP_PREPARE:
 3605                 for_each_worker_pool(pool, gcwq) {
 3606                         struct worker *worker;
 3607 
 3608                         if (pool->nr_workers)
 3609                                 continue;
 3610 
 3611                         worker = create_worker(pool);
 3612                         if (!worker)
 3613                                 return NOTIFY_BAD;
 3614 
 3615                         spin_lock_irq(&gcwq->lock);
 3616                         start_worker(worker);
 3617                         spin_unlock_irq(&gcwq->lock);
 3618                 }
 3619                 break;
 3620 
 3621         case CPU_DOWN_FAILED:
 3622         case CPU_ONLINE:
 3623                 gcwq_claim_assoc_and_lock(gcwq);
 3624                 gcwq->flags &= ~GCWQ_DISASSOCIATED;
 3625                 rebind_workers(gcwq);
 3626                 gcwq_release_assoc_and_unlock(gcwq);
 3627                 break;
 3628         }
 3629         return NOTIFY_OK;
 3630 }
 3631 
 3632 /*
 3633  * Workqueues should be brought down after normal priority CPU notifiers.
 3634  * This will be registered as low priority CPU notifier.
 3635  */
 3636 static int __cpuinit workqueue_cpu_down_callback(struct notifier_block *nfb,
 3637                                                  unsigned long action,
 3638                                                  void *hcpu)
 3639 {
 3640         unsigned int cpu = (unsigned long)hcpu;
 3641         struct work_struct unbind_work;
 3642 
 3643         switch (action & ~CPU_TASKS_FROZEN) {
 3644         case CPU_DOWN_PREPARE:
 3645                 /* unbinding should happen on the local CPU */
 3646                 INIT_WORK_ONSTACK(&unbind_work, gcwq_unbind_fn);
 3647                 queue_work_on(cpu, system_highpri_wq, &unbind_work);
 3648                 flush_work(&unbind_work);
 3649                 break;
 3650         }
 3651         return NOTIFY_OK;
 3652 }
 3653 
 3654 #ifdef CONFIG_SMP
 3655 
 3656 struct work_for_cpu {
 3657         struct work_struct work;
 3658         long (*fn)(void *);
 3659         void *arg;
 3660         long ret;
 3661 };
 3662 
 3663 static void work_for_cpu_fn(struct work_struct *work)
 3664 {
 3665         struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work);
 3666 
 3667         wfc->ret = wfc->fn(wfc->arg);
 3668 }
 3669 
 3670 /**
 3671  * work_on_cpu - run a function in user context on a particular cpu
 3672  * @cpu: the cpu to run on
 3673  * @fn: the function to run
 3674  * @arg: the function arg
 3675  *
 3676  * This will return the value @fn returns.
 3677  * It is up to the caller to ensure that the cpu doesn't go offline.
 3678  * The caller must not hold any locks which would prevent @fn from completing.
 3679  */
 3680 long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
 3681 {
 3682         struct work_for_cpu wfc = { .fn = fn, .arg = arg };
 3683 
 3684         INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
 3685         schedule_work_on(cpu, &wfc.work);
 3686         flush_work(&wfc.work);
 3687         return wfc.ret;
 3688 }
 3689 EXPORT_SYMBOL_GPL(work_on_cpu);
 3690 #endif /* CONFIG_SMP */
 3691 
 3692 #ifdef CONFIG_FREEZER
 3693 
 3694 /**
 3695  * freeze_workqueues_begin - begin freezing workqueues
 3696  *
 3697  * Start freezing workqueues.  After this function returns, all freezable
 3698  * workqueues will queue new works to their frozen_works list instead of
 3699  * gcwq->worklist.
 3700  *
 3701  * CONTEXT:
 3702  * Grabs and releases workqueue_lock and gcwq->lock's.
 3703  */
 3704 void freeze_workqueues_begin(void)
 3705 {
 3706         unsigned int cpu;
 3707 
 3708         spin_lock(&workqueue_lock);
 3709 
 3710         BUG_ON(workqueue_freezing);
 3711         workqueue_freezing = true;
 3712 
 3713         for_each_gcwq_cpu(cpu) {
 3714                 struct global_cwq *gcwq = get_gcwq(cpu);
 3715                 struct workqueue_struct *wq;
 3716 
 3717                 spin_lock_irq(&gcwq->lock);
 3718 
 3719                 BUG_ON(gcwq->flags & GCWQ_FREEZING);
 3720                 gcwq->flags |= GCWQ_FREEZING;
 3721 
 3722                 list_for_each_entry(wq, &workqueues, list) {
 3723                         struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
 3724 
 3725                         if (cwq && wq->flags & WQ_FREEZABLE)
 3726                                 cwq->max_active = 0;
 3727                 }
 3728 
 3729                 spin_unlock_irq(&gcwq->lock);
 3730         }
 3731 
 3732         spin_unlock(&workqueue_lock);
 3733 }
 3734 
 3735 /**
 3736  * freeze_workqueues_busy - are freezable workqueues still busy?
 3737  *
 3738  * Check whether freezing is complete.  This function must be called
 3739  * between freeze_workqueues_begin() and thaw_workqueues().
 3740  *
 3741  * CONTEXT:
 3742  * Grabs and releases workqueue_lock.
 3743  *
 3744  * RETURNS:
 3745  * %true if some freezable workqueues are still busy.  %false if freezing
 3746  * is complete.
 3747  */
 3748 bool freeze_workqueues_busy(void)
 3749 {
 3750         unsigned int cpu;
 3751         bool busy = false;
 3752 
 3753         spin_lock(&workqueue_lock);
 3754 
 3755         BUG_ON(!workqueue_freezing);
 3756 
 3757         for_each_gcwq_cpu(cpu) {
 3758                 struct workqueue_struct *wq;
 3759                 /*
 3760                  * nr_active is monotonically decreasing.  It's safe
 3761                  * to peek without lock.
 3762                  */
 3763                 list_for_each_entry(wq, &workqueues, list) {
 3764                         struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
 3765 
 3766                         if (!cwq || !(wq->flags & WQ_FREEZABLE))
 3767                                 continue;
 3768 
 3769                         BUG_ON(cwq->nr_active < 0);
 3770                         if (cwq->nr_active) {
 3771                                 busy = true;
 3772                                 goto out_unlock;
 3773                         }
 3774                 }
 3775         }
 3776 out_unlock:
 3777         spin_unlock(&workqueue_lock);
 3778         return busy;
 3779 }
 3780 
 3781 /**
 3782  * thaw_workqueues - thaw workqueues
 3783  *
 3784  * Thaw workqueues.  Normal queueing is restored and all collected
 3785  * frozen works are transferred to their respective gcwq worklists.
 3786  *
 3787  * CONTEXT:
 3788  * Grabs and releases workqueue_lock and gcwq->lock's.
 3789  */
 3790 void thaw_workqueues(void)
 3791 {
 3792         unsigned int cpu;
 3793 
 3794         spin_lock(&workqueue_lock);
 3795 
 3796         if (!workqueue_freezing)
 3797                 goto out_unlock;
 3798 
 3799         for_each_gcwq_cpu(cpu) {
 3800                 struct global_cwq *gcwq = get_gcwq(cpu);
 3801                 struct worker_pool *pool;
 3802                 struct workqueue_struct *wq;
 3803 
 3804                 spin_lock_irq(&gcwq->lock);
 3805 
 3806                 BUG_ON(!(gcwq->flags & GCWQ_FREEZING));
 3807                 gcwq->flags &= ~GCWQ_FREEZING;
 3808 
 3809                 list_for_each_entry(wq, &workqueues, list) {
 3810                         struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
 3811 
 3812                         if (!cwq || !(wq->flags & WQ_FREEZABLE))
 3813                                 continue;
 3814 
 3815                         /* restore max_active and repopulate worklist */
 3816                         cwq_set_max_active(cwq, wq->saved_max_active);
 3817                 }
 3818 
 3819                 for_each_worker_pool(pool, gcwq)
 3820                         wake_up_worker(pool);
 3821 
 3822                 spin_unlock_irq(&gcwq->lock);
 3823         }
 3824 
 3825         workqueue_freezing = false;
 3826 out_unlock:
 3827         spin_unlock(&workqueue_lock);
 3828 }
 3829 #endif /* CONFIG_FREEZER */
 3830 
 3831 static int __init init_workqueues(void)
 3832 {
 3833         unsigned int cpu;
 3834         int i;
 3835 
 3836         /* make sure we have enough bits for OFFQ CPU number */
 3837         BUILD_BUG_ON((1LU << (BITS_PER_LONG - WORK_OFFQ_CPU_SHIFT)) <
 3838                      WORK_CPU_LAST);
 3839 
 3840         cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP);
 3841         hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN);
 3842 
 3843         /* initialize gcwqs */
 3844         for_each_gcwq_cpu(cpu) {
 3845                 struct global_cwq *gcwq = get_gcwq(cpu);
 3846                 struct worker_pool *pool;
 3847 
 3848                 spin_lock_init(&gcwq->lock);
 3849                 gcwq->cpu = cpu;
 3850                 gcwq->flags |= GCWQ_DISASSOCIATED;
 3851 
 3852                 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
 3853                         INIT_HLIST_HEAD(&gcwq->busy_hash[i]);
 3854 
 3855                 for_each_worker_pool(pool, gcwq) {
 3856                         pool->gcwq = gcwq;
 3857                         INIT_LIST_HEAD(&pool->worklist);
 3858                         INIT_LIST_HEAD(&pool->idle_list);
 3859 
 3860                         init_timer_deferrable(&pool->idle_timer);
 3861                         pool->idle_timer.function = idle_worker_timeout;
 3862                         pool->idle_timer.data = (unsigned long)pool;
 3863 
 3864                         setup_timer(&pool->mayday_timer, gcwq_mayday_timeout,
 3865                                     (unsigned long)pool);
 3866 
 3867                         mutex_init(&pool->assoc_mutex);
 3868                         ida_init(&pool->worker_ida);
 3869                 }
 3870         }
 3871 
 3872         /* create the initial worker */
 3873         for_each_online_gcwq_cpu(cpu) {
 3874                 struct global_cwq *gcwq = get_gcwq(cpu);
 3875                 struct worker_pool *pool;
 3876 
 3877                 if (cpu != WORK_CPU_UNBOUND)
 3878                         gcwq->flags &= ~GCWQ_DISASSOCIATED;
 3879 
 3880                 for_each_worker_pool(pool, gcwq) {
 3881                         struct worker *worker;
 3882 
 3883                         worker = create_worker(pool);
 3884                         BUG_ON(!worker);
 3885                         spin_lock_irq(&gcwq->lock);
 3886                         start_worker(worker);
 3887                         spin_unlock_irq(&gcwq->lock);
 3888                 }
 3889         }
 3890 
 3891         system_wq = alloc_workqueue("events", 0, 0);
 3892         system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0);
 3893         system_long_wq = alloc_workqueue("events_long", 0, 0);
 3894         system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
 3895                                             WQ_UNBOUND_MAX_ACTIVE);
 3896         system_freezable_wq = alloc_workqueue("events_freezable",
 3897                                               WQ_FREEZABLE, 0);
 3898         BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq ||
 3899                !system_unbound_wq || !system_freezable_wq);
 3900         return 0;
 3901 }
 3902 early_initcall(init_workqueues);

Cache object: 1910c56a3eab2b2b30a34cc8ca42987b


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.