The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kernel/rcutiny.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
    3  *
    4  * This program is free software; you can redistribute it and/or modify
    5  * it under the terms of the GNU General Public License as published by
    6  * the Free Software Foundation; either version 2 of the License, or
    7  * (at your option) any later version.
    8  *
    9  * This program is distributed in the hope that it will be useful,
   10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
   11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   12  * GNU General Public License for more details.
   13  *
   14  * You should have received a copy of the GNU General Public License
   15  * along with this program; if not, write to the Free Software
   16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
   17  *
   18  * Copyright IBM Corporation, 2008
   19  *
   20  * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
   21  *
   22  * For detailed explanation of Read-Copy Update mechanism see -
   23  *              Documentation/RCU
   24  */
   25 #include <linux/completion.h>
   26 #include <linux/interrupt.h>
   27 #include <linux/notifier.h>
   28 #include <linux/rcupdate.h>
   29 #include <linux/kernel.h>
   30 #include <linux/export.h>
   31 #include <linux/mutex.h>
   32 #include <linux/sched.h>
   33 #include <linux/types.h>
   34 #include <linux/init.h>
   35 #include <linux/time.h>
   36 #include <linux/cpu.h>
   37 #include <linux/prefetch.h>
   38 
   39 #ifdef CONFIG_RCU_TRACE
   40 #include <trace/events/rcu.h>
   41 #endif /* #else #ifdef CONFIG_RCU_TRACE */
   42 
   43 #include "rcu.h"
   44 
   45 /* Forward declarations for rcutiny_plugin.h. */
   46 struct rcu_ctrlblk;
   47 static void invoke_rcu_callbacks(void);
   48 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
   49 static void rcu_process_callbacks(struct softirq_action *unused);
   50 static void __call_rcu(struct rcu_head *head,
   51                        void (*func)(struct rcu_head *rcu),
   52                        struct rcu_ctrlblk *rcp);
   53 
   54 #include "rcutiny_plugin.h"
   55 
   56 static long long rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
   57 
   58 /* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcutree.c. */
   59 static void rcu_idle_enter_common(long long newval)
   60 {
   61         if (newval) {
   62                 RCU_TRACE(trace_rcu_dyntick("--=",
   63                                             rcu_dynticks_nesting, newval));
   64                 rcu_dynticks_nesting = newval;
   65                 return;
   66         }
   67         RCU_TRACE(trace_rcu_dyntick("Start", rcu_dynticks_nesting, newval));
   68         if (!is_idle_task(current)) {
   69                 struct task_struct *idle = idle_task(smp_processor_id());
   70 
   71                 RCU_TRACE(trace_rcu_dyntick("Error on entry: not idle task",
   72                                             rcu_dynticks_nesting, newval));
   73                 ftrace_dump(DUMP_ALL);
   74                 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
   75                           current->pid, current->comm,
   76                           idle->pid, idle->comm); /* must be idle task! */
   77         }
   78         rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */
   79         barrier();
   80         rcu_dynticks_nesting = newval;
   81 }
   82 
   83 /*
   84  * Enter idle, which is an extended quiescent state if we have fully
   85  * entered that mode (i.e., if the new value of dynticks_nesting is zero).
   86  */
   87 void rcu_idle_enter(void)
   88 {
   89         unsigned long flags;
   90         long long newval;
   91 
   92         local_irq_save(flags);
   93         WARN_ON_ONCE((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) == 0);
   94         if ((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) ==
   95             DYNTICK_TASK_NEST_VALUE)
   96                 newval = 0;
   97         else
   98                 newval = rcu_dynticks_nesting - DYNTICK_TASK_NEST_VALUE;
   99         rcu_idle_enter_common(newval);
  100         local_irq_restore(flags);
  101 }
  102 EXPORT_SYMBOL_GPL(rcu_idle_enter);
  103 
  104 /*
  105  * Exit an interrupt handler towards idle.
  106  */
  107 void rcu_irq_exit(void)
  108 {
  109         unsigned long flags;
  110         long long newval;
  111 
  112         local_irq_save(flags);
  113         newval = rcu_dynticks_nesting - 1;
  114         WARN_ON_ONCE(newval < 0);
  115         rcu_idle_enter_common(newval);
  116         local_irq_restore(flags);
  117 }
  118 EXPORT_SYMBOL_GPL(rcu_irq_exit);
  119 
  120 /* Common code for rcu_idle_exit() and rcu_irq_enter(), see kernel/rcutree.c. */
  121 static void rcu_idle_exit_common(long long oldval)
  122 {
  123         if (oldval) {
  124                 RCU_TRACE(trace_rcu_dyntick("++=",
  125                                             oldval, rcu_dynticks_nesting));
  126                 return;
  127         }
  128         RCU_TRACE(trace_rcu_dyntick("End", oldval, rcu_dynticks_nesting));
  129         if (!is_idle_task(current)) {
  130                 struct task_struct *idle = idle_task(smp_processor_id());
  131 
  132                 RCU_TRACE(trace_rcu_dyntick("Error on exit: not idle task",
  133                           oldval, rcu_dynticks_nesting));
  134                 ftrace_dump(DUMP_ALL);
  135                 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
  136                           current->pid, current->comm,
  137                           idle->pid, idle->comm); /* must be idle task! */
  138         }
  139 }
  140 
  141 /*
  142  * Exit idle, so that we are no longer in an extended quiescent state.
  143  */
  144 void rcu_idle_exit(void)
  145 {
  146         unsigned long flags;
  147         long long oldval;
  148 
  149         local_irq_save(flags);
  150         oldval = rcu_dynticks_nesting;
  151         WARN_ON_ONCE(rcu_dynticks_nesting < 0);
  152         if (rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK)
  153                 rcu_dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
  154         else
  155                 rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
  156         rcu_idle_exit_common(oldval);
  157         local_irq_restore(flags);
  158 }
  159 EXPORT_SYMBOL_GPL(rcu_idle_exit);
  160 
  161 /*
  162  * Enter an interrupt handler, moving away from idle.
  163  */
  164 void rcu_irq_enter(void)
  165 {
  166         unsigned long flags;
  167         long long oldval;
  168 
  169         local_irq_save(flags);
  170         oldval = rcu_dynticks_nesting;
  171         rcu_dynticks_nesting++;
  172         WARN_ON_ONCE(rcu_dynticks_nesting == 0);
  173         rcu_idle_exit_common(oldval);
  174         local_irq_restore(flags);
  175 }
  176 EXPORT_SYMBOL_GPL(rcu_irq_enter);
  177 
  178 #ifdef CONFIG_DEBUG_LOCK_ALLOC
  179 
  180 /*
  181  * Test whether RCU thinks that the current CPU is idle.
  182  */
  183 int rcu_is_cpu_idle(void)
  184 {
  185         return !rcu_dynticks_nesting;
  186 }
  187 EXPORT_SYMBOL(rcu_is_cpu_idle);
  188 
  189 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
  190 
  191 /*
  192  * Test whether the current CPU was interrupted from idle.  Nested
  193  * interrupts don't count, we must be running at the first interrupt
  194  * level.
  195  */
  196 int rcu_is_cpu_rrupt_from_idle(void)
  197 {
  198         return rcu_dynticks_nesting <= 1;
  199 }
  200 
  201 /*
  202  * Helper function for rcu_sched_qs() and rcu_bh_qs().
  203  * Also irqs are disabled to avoid confusion due to interrupt handlers
  204  * invoking call_rcu().
  205  */
  206 static int rcu_qsctr_help(struct rcu_ctrlblk *rcp)
  207 {
  208         if (rcp->rcucblist != NULL &&
  209             rcp->donetail != rcp->curtail) {
  210                 rcp->donetail = rcp->curtail;
  211                 return 1;
  212         }
  213 
  214         return 0;
  215 }
  216 
  217 /*
  218  * Record an rcu quiescent state.  And an rcu_bh quiescent state while we
  219  * are at it, given that any rcu quiescent state is also an rcu_bh
  220  * quiescent state.  Use "+" instead of "||" to defeat short circuiting.
  221  */
  222 void rcu_sched_qs(int cpu)
  223 {
  224         unsigned long flags;
  225 
  226         local_irq_save(flags);
  227         if (rcu_qsctr_help(&rcu_sched_ctrlblk) +
  228             rcu_qsctr_help(&rcu_bh_ctrlblk))
  229                 invoke_rcu_callbacks();
  230         local_irq_restore(flags);
  231 }
  232 
  233 /*
  234  * Record an rcu_bh quiescent state.
  235  */
  236 void rcu_bh_qs(int cpu)
  237 {
  238         unsigned long flags;
  239 
  240         local_irq_save(flags);
  241         if (rcu_qsctr_help(&rcu_bh_ctrlblk))
  242                 invoke_rcu_callbacks();
  243         local_irq_restore(flags);
  244 }
  245 
  246 /*
  247  * Check to see if the scheduling-clock interrupt came from an extended
  248  * quiescent state, and, if so, tell RCU about it.  This function must
  249  * be called from hardirq context.  It is normally called from the
  250  * scheduling-clock interrupt.
  251  */
  252 void rcu_check_callbacks(int cpu, int user)
  253 {
  254         if (user || rcu_is_cpu_rrupt_from_idle())
  255                 rcu_sched_qs(cpu);
  256         else if (!in_softirq())
  257                 rcu_bh_qs(cpu);
  258         rcu_preempt_check_callbacks();
  259 }
  260 
  261 /*
  262  * Invoke the RCU callbacks on the specified rcu_ctrlkblk structure
  263  * whose grace period has elapsed.
  264  */
  265 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
  266 {
  267         char *rn = NULL;
  268         struct rcu_head *next, *list;
  269         unsigned long flags;
  270         RCU_TRACE(int cb_count = 0);
  271 
  272         /* If no RCU callbacks ready to invoke, just return. */
  273         if (&rcp->rcucblist == rcp->donetail) {
  274                 RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, 0, -1));
  275                 RCU_TRACE(trace_rcu_batch_end(rcp->name, 0,
  276                                               ACCESS_ONCE(rcp->rcucblist),
  277                                               need_resched(),
  278                                               is_idle_task(current),
  279                                               rcu_is_callbacks_kthread()));
  280                 return;
  281         }
  282 
  283         /* Move the ready-to-invoke callbacks to a local list. */
  284         local_irq_save(flags);
  285         RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, rcp->qlen, -1));
  286         list = rcp->rcucblist;
  287         rcp->rcucblist = *rcp->donetail;
  288         *rcp->donetail = NULL;
  289         if (rcp->curtail == rcp->donetail)
  290                 rcp->curtail = &rcp->rcucblist;
  291         rcu_preempt_remove_callbacks(rcp);
  292         rcp->donetail = &rcp->rcucblist;
  293         local_irq_restore(flags);
  294 
  295         /* Invoke the callbacks on the local list. */
  296         RCU_TRACE(rn = rcp->name);
  297         while (list) {
  298                 next = list->next;
  299                 prefetch(next);
  300                 debug_rcu_head_unqueue(list);
  301                 local_bh_disable();
  302                 __rcu_reclaim(rn, list);
  303                 local_bh_enable();
  304                 list = next;
  305                 RCU_TRACE(cb_count++);
  306         }
  307         RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count));
  308         RCU_TRACE(trace_rcu_batch_end(rcp->name, cb_count, 0, need_resched(),
  309                                       is_idle_task(current),
  310                                       rcu_is_callbacks_kthread()));
  311 }
  312 
  313 static void rcu_process_callbacks(struct softirq_action *unused)
  314 {
  315         __rcu_process_callbacks(&rcu_sched_ctrlblk);
  316         __rcu_process_callbacks(&rcu_bh_ctrlblk);
  317         rcu_preempt_process_callbacks();
  318 }
  319 
  320 /*
  321  * Wait for a grace period to elapse.  But it is illegal to invoke
  322  * synchronize_sched() from within an RCU read-side critical section.
  323  * Therefore, any legal call to synchronize_sched() is a quiescent
  324  * state, and so on a UP system, synchronize_sched() need do nothing.
  325  * Ditto for synchronize_rcu_bh().  (But Lai Jiangshan points out the
  326  * benefits of doing might_sleep() to reduce latency.)
  327  *
  328  * Cool, huh?  (Due to Josh Triplett.)
  329  *
  330  * But we want to make this a static inline later.  The cond_resched()
  331  * currently makes this problematic.
  332  */
  333 void synchronize_sched(void)
  334 {
  335         rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
  336                            !lock_is_held(&rcu_lock_map) &&
  337                            !lock_is_held(&rcu_sched_lock_map),
  338                            "Illegal synchronize_sched() in RCU read-side critical section");
  339         cond_resched();
  340 }
  341 EXPORT_SYMBOL_GPL(synchronize_sched);
  342 
  343 /*
  344  * Helper function for call_rcu() and call_rcu_bh().
  345  */
  346 static void __call_rcu(struct rcu_head *head,
  347                        void (*func)(struct rcu_head *rcu),
  348                        struct rcu_ctrlblk *rcp)
  349 {
  350         unsigned long flags;
  351 
  352         debug_rcu_head_queue(head);
  353         head->func = func;
  354         head->next = NULL;
  355 
  356         local_irq_save(flags);
  357         *rcp->curtail = head;
  358         rcp->curtail = &head->next;
  359         RCU_TRACE(rcp->qlen++);
  360         local_irq_restore(flags);
  361 }
  362 
  363 /*
  364  * Post an RCU callback to be invoked after the end of an RCU-sched grace
  365  * period.  But since we have but one CPU, that would be after any
  366  * quiescent state.
  367  */
  368 void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
  369 {
  370         __call_rcu(head, func, &rcu_sched_ctrlblk);
  371 }
  372 EXPORT_SYMBOL_GPL(call_rcu_sched);
  373 
  374 /*
  375  * Post an RCU bottom-half callback to be invoked after any subsequent
  376  * quiescent state.
  377  */
  378 void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
  379 {
  380         __call_rcu(head, func, &rcu_bh_ctrlblk);
  381 }
  382 EXPORT_SYMBOL_GPL(call_rcu_bh);

Cache object: 06bfeb7aaa5855abc1cfe0308884ecdf


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.