The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kernel/slow-work.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /* Worker thread pool for slow items, such as filesystem lookups or mkdirs
    2  *
    3  * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
    4  * Written by David Howells (dhowells@redhat.com)
    5  *
    6  * This program is free software; you can redistribute it and/or
    7  * modify it under the terms of the GNU General Public Licence
    8  * as published by the Free Software Foundation; either version
    9  * 2 of the Licence, or (at your option) any later version.
   10  *
   11  * See Documentation/slow-work.txt
   12  */
   13 
   14 #include <linux/module.h>
   15 #include <linux/slow-work.h>
   16 #include <linux/kthread.h>
   17 #include <linux/freezer.h>
   18 #include <linux/wait.h>
   19 
   20 #define SLOW_WORK_CULL_TIMEOUT (5 * HZ) /* cull threads 5s after running out of
   21                                          * things to do */
   22 #define SLOW_WORK_OOM_TIMEOUT (5 * HZ)  /* can't start new threads for 5s after
   23                                          * OOM */
   24 
   25 static void slow_work_cull_timeout(unsigned long);
   26 static void slow_work_oom_timeout(unsigned long);
   27 
   28 #ifdef CONFIG_SYSCTL
   29 static int slow_work_min_threads_sysctl(struct ctl_table *, int,
   30                                         void __user *, size_t *, loff_t *);
   31 
   32 static int slow_work_max_threads_sysctl(struct ctl_table *, int ,
   33                                         void __user *, size_t *, loff_t *);
   34 #endif
   35 
   36 /*
   37  * The pool of threads has at least min threads in it as long as someone is
   38  * using the facility, and may have as many as max.
   39  *
   40  * A portion of the pool may be processing very slow operations.
   41  */
   42 static unsigned slow_work_min_threads = 2;
   43 static unsigned slow_work_max_threads = 4;
   44 static unsigned vslow_work_proportion = 50; /* % of threads that may process
   45                                              * very slow work */
   46 
   47 #ifdef CONFIG_SYSCTL
   48 static const int slow_work_min_min_threads = 2;
   49 static int slow_work_max_max_threads = 255;
   50 static const int slow_work_min_vslow = 1;
   51 static const int slow_work_max_vslow = 99;
   52 
   53 ctl_table slow_work_sysctls[] = {
   54         {
   55                 .ctl_name       = CTL_UNNUMBERED,
   56                 .procname       = "min-threads",
   57                 .data           = &slow_work_min_threads,
   58                 .maxlen         = sizeof(unsigned),
   59                 .mode           = 0644,
   60                 .proc_handler   = slow_work_min_threads_sysctl,
   61                 .extra1         = (void *) &slow_work_min_min_threads,
   62                 .extra2         = &slow_work_max_threads,
   63         },
   64         {
   65                 .ctl_name       = CTL_UNNUMBERED,
   66                 .procname       = "max-threads",
   67                 .data           = &slow_work_max_threads,
   68                 .maxlen         = sizeof(unsigned),
   69                 .mode           = 0644,
   70                 .proc_handler   = slow_work_max_threads_sysctl,
   71                 .extra1         = &slow_work_min_threads,
   72                 .extra2         = (void *) &slow_work_max_max_threads,
   73         },
   74         {
   75                 .ctl_name       = CTL_UNNUMBERED,
   76                 .procname       = "vslow-percentage",
   77                 .data           = &vslow_work_proportion,
   78                 .maxlen         = sizeof(unsigned),
   79                 .mode           = 0644,
   80                 .proc_handler   = &proc_dointvec_minmax,
   81                 .extra1         = (void *) &slow_work_min_vslow,
   82                 .extra2         = (void *) &slow_work_max_vslow,
   83         },
   84         { .ctl_name = 0 }
   85 };
   86 #endif
   87 
   88 /*
   89  * The active state of the thread pool
   90  */
   91 static atomic_t slow_work_thread_count;
   92 static atomic_t vslow_work_executing_count;
   93 
   94 static bool slow_work_may_not_start_new_thread;
   95 static bool slow_work_cull; /* cull a thread due to lack of activity */
   96 static DEFINE_TIMER(slow_work_cull_timer, slow_work_cull_timeout, 0, 0);
   97 static DEFINE_TIMER(slow_work_oom_timer, slow_work_oom_timeout, 0, 0);
   98 static struct slow_work slow_work_new_thread; /* new thread starter */
   99 
  100 /*
  101  * The queues of work items and the lock governing access to them.  These are
  102  * shared between all the CPUs.  It doesn't make sense to have per-CPU queues
  103  * as the number of threads bears no relation to the number of CPUs.
  104  *
  105  * There are two queues of work items: one for slow work items, and one for
  106  * very slow work items.
  107  */
  108 static LIST_HEAD(slow_work_queue);
  109 static LIST_HEAD(vslow_work_queue);
  110 static DEFINE_SPINLOCK(slow_work_queue_lock);
  111 
  112 /*
  113  * The thread controls.  A variable used to signal to the threads that they
  114  * should exit when the queue is empty, a waitqueue used by the threads to wait
  115  * for signals, and a completion set by the last thread to exit.
  116  */
  117 static bool slow_work_threads_should_exit;
  118 static DECLARE_WAIT_QUEUE_HEAD(slow_work_thread_wq);
  119 static DECLARE_COMPLETION(slow_work_last_thread_exited);
  120 
  121 /*
  122  * The number of users of the thread pool and its lock.  Whilst this is zero we
  123  * have no threads hanging around, and when this reaches zero, we wait for all
  124  * active or queued work items to complete and kill all the threads we do have.
  125  */
  126 static int slow_work_user_count;
  127 static DEFINE_MUTEX(slow_work_user_lock);
  128 
  129 /*
  130  * Calculate the maximum number of active threads in the pool that are
  131  * permitted to process very slow work items.
  132  *
  133  * The answer is rounded up to at least 1, but may not equal or exceed the
  134  * maximum number of the threads in the pool.  This means we always have at
  135  * least one thread that can process slow work items, and we always have at
  136  * least one thread that won't get tied up doing so.
  137  */
  138 static unsigned slow_work_calc_vsmax(void)
  139 {
  140         unsigned vsmax;
  141 
  142         vsmax = atomic_read(&slow_work_thread_count) * vslow_work_proportion;
  143         vsmax /= 100;
  144         vsmax = max(vsmax, 1U);
  145         return min(vsmax, slow_work_max_threads - 1);
  146 }
  147 
  148 /*
  149  * Attempt to execute stuff queued on a slow thread.  Return true if we managed
  150  * it, false if there was nothing to do.
  151  */
  152 static bool slow_work_execute(void)
  153 {
  154         struct slow_work *work = NULL;
  155         unsigned vsmax;
  156         bool very_slow;
  157 
  158         vsmax = slow_work_calc_vsmax();
  159 
  160         /* see if we can schedule a new thread to be started if we're not
  161          * keeping up with the work */
  162         if (!waitqueue_active(&slow_work_thread_wq) &&
  163             (!list_empty(&slow_work_queue) || !list_empty(&vslow_work_queue)) &&
  164             atomic_read(&slow_work_thread_count) < slow_work_max_threads &&
  165             !slow_work_may_not_start_new_thread)
  166                 slow_work_enqueue(&slow_work_new_thread);
  167 
  168         /* find something to execute */
  169         spin_lock_irq(&slow_work_queue_lock);
  170         if (!list_empty(&vslow_work_queue) &&
  171             atomic_read(&vslow_work_executing_count) < vsmax) {
  172                 work = list_entry(vslow_work_queue.next,
  173                                   struct slow_work, link);
  174                 if (test_and_set_bit_lock(SLOW_WORK_EXECUTING, &work->flags))
  175                         BUG();
  176                 list_del_init(&work->link);
  177                 atomic_inc(&vslow_work_executing_count);
  178                 very_slow = true;
  179         } else if (!list_empty(&slow_work_queue)) {
  180                 work = list_entry(slow_work_queue.next,
  181                                   struct slow_work, link);
  182                 if (test_and_set_bit_lock(SLOW_WORK_EXECUTING, &work->flags))
  183                         BUG();
  184                 list_del_init(&work->link);
  185                 very_slow = false;
  186         } else {
  187                 very_slow = false; /* avoid the compiler warning */
  188         }
  189         spin_unlock_irq(&slow_work_queue_lock);
  190 
  191         if (!work)
  192                 return false;
  193 
  194         if (!test_and_clear_bit(SLOW_WORK_PENDING, &work->flags))
  195                 BUG();
  196 
  197         work->ops->execute(work);
  198 
  199         if (very_slow)
  200                 atomic_dec(&vslow_work_executing_count);
  201         clear_bit_unlock(SLOW_WORK_EXECUTING, &work->flags);
  202 
  203         /* if someone tried to enqueue the item whilst we were executing it,
  204          * then it'll be left unenqueued to avoid multiple threads trying to
  205          * execute it simultaneously
  206          *
  207          * there is, however, a race between us testing the pending flag and
  208          * getting the spinlock, and between the enqueuer setting the pending
  209          * flag and getting the spinlock, so we use a deferral bit to tell us
  210          * if the enqueuer got there first
  211          */
  212         if (test_bit(SLOW_WORK_PENDING, &work->flags)) {
  213                 spin_lock_irq(&slow_work_queue_lock);
  214 
  215                 if (!test_bit(SLOW_WORK_EXECUTING, &work->flags) &&
  216                     test_and_clear_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags))
  217                         goto auto_requeue;
  218 
  219                 spin_unlock_irq(&slow_work_queue_lock);
  220         }
  221 
  222         work->ops->put_ref(work);
  223         return true;
  224 
  225 auto_requeue:
  226         /* we must complete the enqueue operation
  227          * - we transfer our ref on the item back to the appropriate queue
  228          * - don't wake another thread up as we're awake already
  229          */
  230         if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags))
  231                 list_add_tail(&work->link, &vslow_work_queue);
  232         else
  233                 list_add_tail(&work->link, &slow_work_queue);
  234         spin_unlock_irq(&slow_work_queue_lock);
  235         return true;
  236 }
  237 
  238 /**
  239  * slow_work_enqueue - Schedule a slow work item for processing
  240  * @work: The work item to queue
  241  *
  242  * Schedule a slow work item for processing.  If the item is already undergoing
  243  * execution, this guarantees not to re-enter the execution routine until the
  244  * first execution finishes.
  245  *
  246  * The item is pinned by this function as it retains a reference to it, managed
  247  * through the item operations.  The item is unpinned once it has been
  248  * executed.
  249  *
  250  * An item may hog the thread that is running it for a relatively large amount
  251  * of time, sufficient, for example, to perform several lookup, mkdir, create
  252  * and setxattr operations.  It may sleep on I/O and may sleep to obtain locks.
  253  *
  254  * Conversely, if a number of items are awaiting processing, it may take some
  255  * time before any given item is given attention.  The number of threads in the
  256  * pool may be increased to deal with demand, but only up to a limit.
  257  *
  258  * If SLOW_WORK_VERY_SLOW is set on the work item, then it will be placed in
  259  * the very slow queue, from which only a portion of the threads will be
  260  * allowed to pick items to execute.  This ensures that very slow items won't
  261  * overly block ones that are just ordinarily slow.
  262  *
  263  * Returns 0 if successful, -EAGAIN if not.
  264  */
  265 int slow_work_enqueue(struct slow_work *work)
  266 {
  267         unsigned long flags;
  268 
  269         BUG_ON(slow_work_user_count <= 0);
  270         BUG_ON(!work);
  271         BUG_ON(!work->ops);
  272         BUG_ON(!work->ops->get_ref);
  273 
  274         /* when honouring an enqueue request, we only promise that we will run
  275          * the work function in the future; we do not promise to run it once
  276          * per enqueue request
  277          *
  278          * we use the PENDING bit to merge together repeat requests without
  279          * having to disable IRQs and take the spinlock, whilst still
  280          * maintaining our promise
  281          */
  282         if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) {
  283                 spin_lock_irqsave(&slow_work_queue_lock, flags);
  284 
  285                 /* we promise that we will not attempt to execute the work
  286                  * function in more than one thread simultaneously
  287                  *
  288                  * this, however, leaves us with a problem if we're asked to
  289                  * enqueue the work whilst someone is executing the work
  290                  * function as simply queueing the work immediately means that
  291                  * another thread may try executing it whilst it is already
  292                  * under execution
  293                  *
  294                  * to deal with this, we set the ENQ_DEFERRED bit instead of
  295                  * enqueueing, and the thread currently executing the work
  296                  * function will enqueue the work item when the work function
  297                  * returns and it has cleared the EXECUTING bit
  298                  */
  299                 if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) {
  300                         set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags);
  301                 } else {
  302                         if (work->ops->get_ref(work) < 0)
  303                                 goto cant_get_ref;
  304                         if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags))
  305                                 list_add_tail(&work->link, &vslow_work_queue);
  306                         else
  307                                 list_add_tail(&work->link, &slow_work_queue);
  308                         wake_up(&slow_work_thread_wq);
  309                 }
  310 
  311                 spin_unlock_irqrestore(&slow_work_queue_lock, flags);
  312         }
  313         return 0;
  314 
  315 cant_get_ref:
  316         spin_unlock_irqrestore(&slow_work_queue_lock, flags);
  317         return -EAGAIN;
  318 }
  319 EXPORT_SYMBOL(slow_work_enqueue);
  320 
  321 /*
  322  * Schedule a cull of the thread pool at some time in the near future
  323  */
  324 static void slow_work_schedule_cull(void)
  325 {
  326         mod_timer(&slow_work_cull_timer,
  327                   round_jiffies(jiffies + SLOW_WORK_CULL_TIMEOUT));
  328 }
  329 
  330 /*
  331  * Worker thread culling algorithm
  332  */
  333 static bool slow_work_cull_thread(void)
  334 {
  335         unsigned long flags;
  336         bool do_cull = false;
  337 
  338         spin_lock_irqsave(&slow_work_queue_lock, flags);
  339 
  340         if (slow_work_cull) {
  341                 slow_work_cull = false;
  342 
  343                 if (list_empty(&slow_work_queue) &&
  344                     list_empty(&vslow_work_queue) &&
  345                     atomic_read(&slow_work_thread_count) >
  346                     slow_work_min_threads) {
  347                         slow_work_schedule_cull();
  348                         do_cull = true;
  349                 }
  350         }
  351 
  352         spin_unlock_irqrestore(&slow_work_queue_lock, flags);
  353         return do_cull;
  354 }
  355 
  356 /*
  357  * Determine if there is slow work available for dispatch
  358  */
  359 static inline bool slow_work_available(int vsmax)
  360 {
  361         return !list_empty(&slow_work_queue) ||
  362                 (!list_empty(&vslow_work_queue) &&
  363                  atomic_read(&vslow_work_executing_count) < vsmax);
  364 }
  365 
  366 /*
  367  * Worker thread dispatcher
  368  */
  369 static int slow_work_thread(void *_data)
  370 {
  371         int vsmax;
  372 
  373         DEFINE_WAIT(wait);
  374 
  375         set_freezable();
  376         set_user_nice(current, -5);
  377 
  378         for (;;) {
  379                 vsmax = vslow_work_proportion;
  380                 vsmax *= atomic_read(&slow_work_thread_count);
  381                 vsmax /= 100;
  382 
  383                 prepare_to_wait_exclusive(&slow_work_thread_wq, &wait,
  384                                           TASK_INTERRUPTIBLE);
  385                 if (!freezing(current) &&
  386                     !slow_work_threads_should_exit &&
  387                     !slow_work_available(vsmax) &&
  388                     !slow_work_cull)
  389                         schedule();
  390                 finish_wait(&slow_work_thread_wq, &wait);
  391 
  392                 try_to_freeze();
  393 
  394                 vsmax = vslow_work_proportion;
  395                 vsmax *= atomic_read(&slow_work_thread_count);
  396                 vsmax /= 100;
  397 
  398                 if (slow_work_available(vsmax) && slow_work_execute()) {
  399                         cond_resched();
  400                         if (list_empty(&slow_work_queue) &&
  401                             list_empty(&vslow_work_queue) &&
  402                             atomic_read(&slow_work_thread_count) >
  403                             slow_work_min_threads)
  404                                 slow_work_schedule_cull();
  405                         continue;
  406                 }
  407 
  408                 if (slow_work_threads_should_exit)
  409                         break;
  410 
  411                 if (slow_work_cull && slow_work_cull_thread())
  412                         break;
  413         }
  414 
  415         if (atomic_dec_and_test(&slow_work_thread_count))
  416                 complete_and_exit(&slow_work_last_thread_exited, 0);
  417         return 0;
  418 }
  419 
  420 /*
  421  * Handle thread cull timer expiration
  422  */
  423 static void slow_work_cull_timeout(unsigned long data)
  424 {
  425         slow_work_cull = true;
  426         wake_up(&slow_work_thread_wq);
  427 }
  428 
  429 /*
  430  * Get a reference on slow work thread starter
  431  */
  432 static int slow_work_new_thread_get_ref(struct slow_work *work)
  433 {
  434         return 0;
  435 }
  436 
  437 /*
  438  * Drop a reference on slow work thread starter
  439  */
  440 static void slow_work_new_thread_put_ref(struct slow_work *work)
  441 {
  442 }
  443 
  444 /*
  445  * Start a new slow work thread
  446  */
  447 static void slow_work_new_thread_execute(struct slow_work *work)
  448 {
  449         struct task_struct *p;
  450 
  451         if (slow_work_threads_should_exit)
  452                 return;
  453 
  454         if (atomic_read(&slow_work_thread_count) >= slow_work_max_threads)
  455                 return;
  456 
  457         if (!mutex_trylock(&slow_work_user_lock))
  458                 return;
  459 
  460         slow_work_may_not_start_new_thread = true;
  461         atomic_inc(&slow_work_thread_count);
  462         p = kthread_run(slow_work_thread, NULL, "kslowd");
  463         if (IS_ERR(p)) {
  464                 printk(KERN_DEBUG "Slow work thread pool: OOM\n");
  465                 if (atomic_dec_and_test(&slow_work_thread_count))
  466                         BUG(); /* we're running on a slow work thread... */
  467                 mod_timer(&slow_work_oom_timer,
  468                           round_jiffies(jiffies + SLOW_WORK_OOM_TIMEOUT));
  469         } else {
  470                 /* ratelimit the starting of new threads */
  471                 mod_timer(&slow_work_oom_timer, jiffies + 1);
  472         }
  473 
  474         mutex_unlock(&slow_work_user_lock);
  475 }
  476 
  477 static const struct slow_work_ops slow_work_new_thread_ops = {
  478         .get_ref        = slow_work_new_thread_get_ref,
  479         .put_ref        = slow_work_new_thread_put_ref,
  480         .execute        = slow_work_new_thread_execute,
  481 };
  482 
  483 /*
  484  * post-OOM new thread start suppression expiration
  485  */
  486 static void slow_work_oom_timeout(unsigned long data)
  487 {
  488         slow_work_may_not_start_new_thread = false;
  489 }
  490 
  491 #ifdef CONFIG_SYSCTL
  492 /*
  493  * Handle adjustment of the minimum number of threads
  494  */
  495 static int slow_work_min_threads_sysctl(struct ctl_table *table, int write,
  496                                         void __user *buffer,
  497                                         size_t *lenp, loff_t *ppos)
  498 {
  499         int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
  500         int n;
  501 
  502         if (ret == 0) {
  503                 mutex_lock(&slow_work_user_lock);
  504                 if (slow_work_user_count > 0) {
  505                         /* see if we need to start or stop threads */
  506                         n = atomic_read(&slow_work_thread_count) -
  507                                 slow_work_min_threads;
  508 
  509                         if (n < 0 && !slow_work_may_not_start_new_thread)
  510                                 slow_work_enqueue(&slow_work_new_thread);
  511                         else if (n > 0)
  512                                 slow_work_schedule_cull();
  513                 }
  514                 mutex_unlock(&slow_work_user_lock);
  515         }
  516 
  517         return ret;
  518 }
  519 
  520 /*
  521  * Handle adjustment of the maximum number of threads
  522  */
  523 static int slow_work_max_threads_sysctl(struct ctl_table *table, int write,
  524                                         void __user *buffer,
  525                                         size_t *lenp, loff_t *ppos)
  526 {
  527         int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
  528         int n;
  529 
  530         if (ret == 0) {
  531                 mutex_lock(&slow_work_user_lock);
  532                 if (slow_work_user_count > 0) {
  533                         /* see if we need to stop threads */
  534                         n = slow_work_max_threads -
  535                                 atomic_read(&slow_work_thread_count);
  536 
  537                         if (n < 0)
  538                                 slow_work_schedule_cull();
  539                 }
  540                 mutex_unlock(&slow_work_user_lock);
  541         }
  542 
  543         return ret;
  544 }
  545 #endif /* CONFIG_SYSCTL */
  546 
  547 /**
  548  * slow_work_register_user - Register a user of the facility
  549  *
  550  * Register a user of the facility, starting up the initial threads if there
  551  * aren't any other users at this point.  This will return 0 if successful, or
  552  * an error if not.
  553  */
  554 int slow_work_register_user(void)
  555 {
  556         struct task_struct *p;
  557         int loop;
  558 
  559         mutex_lock(&slow_work_user_lock);
  560 
  561         if (slow_work_user_count == 0) {
  562                 printk(KERN_NOTICE "Slow work thread pool: Starting up\n");
  563                 init_completion(&slow_work_last_thread_exited);
  564 
  565                 slow_work_threads_should_exit = false;
  566                 slow_work_init(&slow_work_new_thread,
  567                                &slow_work_new_thread_ops);
  568                 slow_work_may_not_start_new_thread = false;
  569                 slow_work_cull = false;
  570 
  571                 /* start the minimum number of threads */
  572                 for (loop = 0; loop < slow_work_min_threads; loop++) {
  573                         atomic_inc(&slow_work_thread_count);
  574                         p = kthread_run(slow_work_thread, NULL, "kslowd");
  575                         if (IS_ERR(p))
  576                                 goto error;
  577                 }
  578                 printk(KERN_NOTICE "Slow work thread pool: Ready\n");
  579         }
  580 
  581         slow_work_user_count++;
  582         mutex_unlock(&slow_work_user_lock);
  583         return 0;
  584 
  585 error:
  586         if (atomic_dec_and_test(&slow_work_thread_count))
  587                 complete(&slow_work_last_thread_exited);
  588         if (loop > 0) {
  589                 printk(KERN_ERR "Slow work thread pool:"
  590                        " Aborting startup on ENOMEM\n");
  591                 slow_work_threads_should_exit = true;
  592                 wake_up_all(&slow_work_thread_wq);
  593                 wait_for_completion(&slow_work_last_thread_exited);
  594                 printk(KERN_ERR "Slow work thread pool: Aborted\n");
  595         }
  596         mutex_unlock(&slow_work_user_lock);
  597         return PTR_ERR(p);
  598 }
  599 EXPORT_SYMBOL(slow_work_register_user);
  600 
  601 /**
  602  * slow_work_unregister_user - Unregister a user of the facility
  603  *
  604  * Unregister a user of the facility, killing all the threads if this was the
  605  * last one.
  606  */
  607 void slow_work_unregister_user(void)
  608 {
  609         mutex_lock(&slow_work_user_lock);
  610 
  611         BUG_ON(slow_work_user_count <= 0);
  612 
  613         slow_work_user_count--;
  614         if (slow_work_user_count == 0) {
  615                 printk(KERN_NOTICE "Slow work thread pool: Shutting down\n");
  616                 slow_work_threads_should_exit = true;
  617                 del_timer_sync(&slow_work_cull_timer);
  618                 del_timer_sync(&slow_work_oom_timer);
  619                 wake_up_all(&slow_work_thread_wq);
  620                 wait_for_completion(&slow_work_last_thread_exited);
  621                 printk(KERN_NOTICE "Slow work thread pool:"
  622                        " Shut down complete\n");
  623         }
  624 
  625         mutex_unlock(&slow_work_user_lock);
  626 }
  627 EXPORT_SYMBOL(slow_work_unregister_user);
  628 
  629 /*
  630  * Initialise the slow work facility
  631  */
  632 static int __init init_slow_work(void)
  633 {
  634         unsigned nr_cpus = num_possible_cpus();
  635 
  636         if (slow_work_max_threads < nr_cpus)
  637                 slow_work_max_threads = nr_cpus;
  638 #ifdef CONFIG_SYSCTL
  639         if (slow_work_max_max_threads < nr_cpus * 2)
  640                 slow_work_max_max_threads = nr_cpus * 2;
  641 #endif
  642         return 0;
  643 }
  644 
  645 subsys_initcall(init_slow_work);

Cache object: 64bd9334c3010a01a32c4bb59a464fd7


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.