The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/subr_epoch.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    3  *
    4  * Copyright (c) 2018, Matthew Macy <mmacy@freebsd.org>
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice, this list of conditions and the following disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  *
   15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   25  * SUCH DAMAGE.
   26  *
   27  */
   28 
   29 #include <sys/cdefs.h>
   30 __FBSDID("$FreeBSD$");
   31 
   32 #include <sys/param.h>
   33 #include <sys/systm.h>
   34 #include <sys/counter.h>
   35 #include <sys/epoch.h>
   36 #include <sys/gtaskqueue.h>
   37 #include <sys/kernel.h>
   38 #include <sys/limits.h>
   39 #include <sys/lock.h>
   40 #include <sys/malloc.h>
   41 #include <sys/mutex.h>
   42 #include <sys/pcpu.h>
   43 #include <sys/proc.h>
   44 #include <sys/sched.h>
   45 #include <sys/sx.h>
   46 #include <sys/smp.h>
   47 #include <sys/sysctl.h>
   48 #include <sys/turnstile.h>
   49 #ifdef EPOCH_TRACE
   50 #include <machine/stdarg.h>
   51 #include <sys/stack.h>
   52 #include <sys/tree.h>
   53 #endif
   54 #include <vm/vm.h>
   55 #include <vm/vm_extern.h>
   56 #include <vm/vm_kern.h>
   57 #include <vm/uma.h>
   58 
   59 #include <ck_epoch.h>
   60 
   61 #ifdef __amd64__
   62 #define EPOCH_ALIGN CACHE_LINE_SIZE*2
   63 #else
   64 #define EPOCH_ALIGN CACHE_LINE_SIZE
   65 #endif
   66 
   67 TAILQ_HEAD (epoch_tdlist, epoch_tracker);
   68 typedef struct epoch_record {
   69         ck_epoch_record_t er_record;
   70         struct epoch_context er_drain_ctx;
   71         struct epoch *er_parent;
   72         volatile struct epoch_tdlist er_tdlist;
   73         volatile uint32_t er_gen;
   74         uint32_t er_cpuid;
   75 #ifdef INVARIANTS
   76         /* Used to verify record ownership for non-preemptible epochs. */
   77         struct thread *er_td;
   78 #endif
   79 } __aligned(EPOCH_ALIGN)     *epoch_record_t;
   80 
   81 struct epoch {
   82         struct ck_epoch e_epoch __aligned(EPOCH_ALIGN);
   83         epoch_record_t e_pcpu_record;
   84         int     e_in_use;
   85         int     e_flags;
   86         struct sx e_drain_sx;
   87         struct mtx e_drain_mtx;
   88         volatile int e_drain_count;
   89         const char *e_name;
   90 };
   91 
   92 /* arbitrary --- needs benchmarking */
   93 #define MAX_ADAPTIVE_SPIN 100
   94 #define MAX_EPOCHS 64
   95 
   96 CTASSERT(sizeof(ck_epoch_entry_t) == sizeof(struct epoch_context));
   97 SYSCTL_NODE(_kern, OID_AUTO, epoch, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
   98     "epoch information");
   99 SYSCTL_NODE(_kern_epoch, OID_AUTO, stats, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
  100     "epoch stats");
  101 
  102 /* Stats. */
  103 static counter_u64_t block_count;
  104 
  105 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, nblocked, CTLFLAG_RW,
  106     &block_count, "# of times a thread was in an epoch when epoch_wait was called");
  107 static counter_u64_t migrate_count;
  108 
  109 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, migrations, CTLFLAG_RW,
  110     &migrate_count, "# of times thread was migrated to another CPU in epoch_wait");
  111 static counter_u64_t turnstile_count;
  112 
  113 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, ncontended, CTLFLAG_RW,
  114     &turnstile_count, "# of times a thread was blocked on a lock in an epoch during an epoch_wait");
  115 static counter_u64_t switch_count;
  116 
  117 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, switches, CTLFLAG_RW,
  118     &switch_count, "# of times a thread voluntarily context switched in epoch_wait");
  119 static counter_u64_t epoch_call_count;
  120 
  121 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, epoch_calls, CTLFLAG_RW,
  122     &epoch_call_count, "# of times a callback was deferred");
  123 static counter_u64_t epoch_call_task_count;
  124 
  125 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, epoch_call_tasks, CTLFLAG_RW,
  126     &epoch_call_task_count, "# of times a callback task was run");
  127 
  128 TAILQ_HEAD (threadlist, thread);
  129 
  130 CK_STACK_CONTAINER(struct ck_epoch_entry, stack_entry,
  131     ck_epoch_entry_container)
  132 
  133 static struct epoch epoch_array[MAX_EPOCHS];
  134 
  135 DPCPU_DEFINE(struct grouptask, epoch_cb_task);
  136 DPCPU_DEFINE(int, epoch_cb_count);
  137 
  138 static __read_mostly int inited;
  139 __read_mostly epoch_t global_epoch;
  140 __read_mostly epoch_t global_epoch_preempt;
  141 
  142 static void epoch_call_task(void *context __unused);
  143 static  uma_zone_t pcpu_zone_record;
  144 
  145 static struct sx epoch_sx;
  146 
  147 #define EPOCH_LOCK() sx_xlock(&epoch_sx)
  148 #define EPOCH_UNLOCK() sx_xunlock(&epoch_sx)
  149 
  150 static epoch_record_t
  151 epoch_currecord(epoch_t epoch)
  152 {
  153 
  154         return (zpcpu_get(epoch->e_pcpu_record));
  155 }
  156 
  157 #ifdef EPOCH_TRACE
  158 struct stackentry {
  159         RB_ENTRY(stackentry) se_node;
  160         struct stack se_stack;
  161 };
  162 
  163 static int
  164 stackentry_compare(struct stackentry *a, struct stackentry *b)
  165 {
  166 
  167         if (a->se_stack.depth > b->se_stack.depth)
  168                 return (1);
  169         if (a->se_stack.depth < b->se_stack.depth)
  170                 return (-1);
  171         for (int i = 0; i < a->se_stack.depth; i++) {
  172                 if (a->se_stack.pcs[i] > b->se_stack.pcs[i])
  173                         return (1);
  174                 if (a->se_stack.pcs[i] < b->se_stack.pcs[i])
  175                         return (-1);
  176         }
  177 
  178         return (0);
  179 }
  180 
  181 RB_HEAD(stacktree, stackentry) epoch_stacks = RB_INITIALIZER(&epoch_stacks);
  182 RB_GENERATE_STATIC(stacktree, stackentry, se_node, stackentry_compare);
  183 
  184 static struct mtx epoch_stacks_lock;
  185 MTX_SYSINIT(epochstacks, &epoch_stacks_lock, "epoch_stacks", MTX_DEF);
  186 
  187 static bool epoch_trace_stack_print = true;
  188 SYSCTL_BOOL(_kern_epoch, OID_AUTO, trace_stack_print, CTLFLAG_RWTUN,
  189     &epoch_trace_stack_print, 0, "Print stack traces on epoch reports");
  190 
  191 static void epoch_trace_report(const char *fmt, ...) __printflike(1, 2);
  192 static inline void
  193 epoch_trace_report(const char *fmt, ...)
  194 {
  195         va_list ap;
  196         struct stackentry se, *new;
  197 
  198         stack_save(&se.se_stack);
  199 
  200         /* Tree is never reduced - go lockless. */
  201         if (RB_FIND(stacktree, &epoch_stacks, &se) != NULL)
  202                 return;
  203 
  204         new = malloc(sizeof(*new), M_STACK, M_NOWAIT);
  205         if (new != NULL) {
  206                 bcopy(&se.se_stack, &new->se_stack, sizeof(struct stack));
  207 
  208                 mtx_lock(&epoch_stacks_lock);
  209                 new = RB_INSERT(stacktree, &epoch_stacks, new);
  210                 mtx_unlock(&epoch_stacks_lock);
  211                 if (new != NULL)
  212                         free(new, M_STACK);
  213         }
  214 
  215         va_start(ap, fmt);
  216         (void)vprintf(fmt, ap);
  217         va_end(ap);
  218         if (epoch_trace_stack_print)
  219                 stack_print_ddb(&se.se_stack);
  220 }
  221 
  222 static inline void
  223 epoch_trace_enter(struct thread *td, epoch_t epoch, epoch_tracker_t et,
  224     const char *file, int line)
  225 {
  226         epoch_tracker_t iet;
  227 
  228         SLIST_FOREACH(iet, &td->td_epochs, et_tlink) {
  229                 if (iet->et_epoch != epoch)
  230                         continue;
  231                 epoch_trace_report("Recursively entering epoch %s "
  232                     "at %s:%d, previously entered at %s:%d\n",
  233                     epoch->e_name, file, line,
  234                     iet->et_file, iet->et_line);
  235         }
  236         et->et_epoch = epoch;
  237         et->et_file = file;
  238         et->et_line = line;
  239         et->et_flags = 0;
  240         SLIST_INSERT_HEAD(&td->td_epochs, et, et_tlink);
  241 }
  242 
  243 static inline void
  244 epoch_trace_exit(struct thread *td, epoch_t epoch, epoch_tracker_t et,
  245     const char *file, int line)
  246 {
  247 
  248         if (SLIST_FIRST(&td->td_epochs) != et) {
  249                 epoch_trace_report("Exiting epoch %s in a not nested order "
  250                     "at %s:%d. Most recently entered %s at %s:%d\n",
  251                     epoch->e_name,
  252                     file, line,
  253                     SLIST_FIRST(&td->td_epochs)->et_epoch->e_name,
  254                     SLIST_FIRST(&td->td_epochs)->et_file,
  255                     SLIST_FIRST(&td->td_epochs)->et_line);
  256                 /* This will panic if et is not anywhere on td_epochs. */
  257                 SLIST_REMOVE(&td->td_epochs, et, epoch_tracker, et_tlink);
  258         } else
  259                 SLIST_REMOVE_HEAD(&td->td_epochs, et_tlink);
  260         if (et->et_flags & ET_REPORT_EXIT)
  261                 printf("Td %p exiting epoch %s at %s:%d\n", td, epoch->e_name,
  262                     file, line);
  263 }
  264 
  265 /* Used by assertions that check thread state before going to sleep. */
  266 void
  267 epoch_trace_list(struct thread *td)
  268 {
  269         epoch_tracker_t iet;
  270 
  271         SLIST_FOREACH(iet, &td->td_epochs, et_tlink)
  272                 printf("Epoch %s entered at %s:%d\n", iet->et_epoch->e_name,
  273                     iet->et_file, iet->et_line);
  274 }
  275 
  276 void
  277 epoch_where_report(epoch_t epoch)
  278 {
  279         epoch_record_t er;
  280         struct epoch_tracker *tdwait;
  281 
  282         MPASS(epoch != NULL);
  283         MPASS((epoch->e_flags & EPOCH_PREEMPT) != 0);
  284         MPASS(!THREAD_CAN_SLEEP());
  285         critical_enter();
  286         er = epoch_currecord(epoch);
  287         TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link)
  288                 if (tdwait->et_td == curthread)
  289                         break;
  290         critical_exit();
  291         if (tdwait != NULL) {
  292                 tdwait->et_flags |= ET_REPORT_EXIT;
  293                 printf("Td %p entered epoch %s at %s:%d\n", curthread,
  294                     epoch->e_name, tdwait->et_file, tdwait->et_line);
  295         }
  296 }
  297 #endif /* EPOCH_TRACE */
  298 
  299 static void
  300 epoch_init(void *arg __unused)
  301 {
  302         int cpu;
  303 
  304         block_count = counter_u64_alloc(M_WAITOK);
  305         migrate_count = counter_u64_alloc(M_WAITOK);
  306         turnstile_count = counter_u64_alloc(M_WAITOK);
  307         switch_count = counter_u64_alloc(M_WAITOK);
  308         epoch_call_count = counter_u64_alloc(M_WAITOK);
  309         epoch_call_task_count = counter_u64_alloc(M_WAITOK);
  310 
  311         pcpu_zone_record = uma_zcreate("epoch_record pcpu",
  312             sizeof(struct epoch_record), NULL, NULL, NULL, NULL,
  313             UMA_ALIGN_PTR, UMA_ZONE_PCPU);
  314         CPU_FOREACH(cpu) {
  315                 GROUPTASK_INIT(DPCPU_ID_PTR(cpu, epoch_cb_task), 0,
  316                     epoch_call_task, NULL);
  317                 taskqgroup_attach_cpu(qgroup_softirq,
  318                     DPCPU_ID_PTR(cpu, epoch_cb_task), NULL, cpu, NULL, NULL,
  319                     "epoch call task");
  320         }
  321 #ifdef EPOCH_TRACE
  322         SLIST_INIT(&thread0.td_epochs);
  323 #endif
  324         sx_init(&epoch_sx, "epoch-sx");
  325         inited = 1;
  326         global_epoch = epoch_alloc("Global", 0);
  327         global_epoch_preempt = epoch_alloc("Global preemptible", EPOCH_PREEMPT);
  328 }
  329 SYSINIT(epoch, SI_SUB_EPOCH, SI_ORDER_FIRST, epoch_init, NULL);
  330 
  331 #if !defined(EARLY_AP_STARTUP)
  332 static void
  333 epoch_init_smp(void *dummy __unused)
  334 {
  335         inited = 2;
  336 }
  337 SYSINIT(epoch_smp, SI_SUB_SMP + 1, SI_ORDER_FIRST, epoch_init_smp, NULL);
  338 #endif
  339 
  340 static void
  341 epoch_ctor(epoch_t epoch)
  342 {
  343         epoch_record_t er;
  344         int cpu;
  345 
  346         epoch->e_pcpu_record = uma_zalloc_pcpu(pcpu_zone_record, M_WAITOK);
  347         CPU_FOREACH(cpu) {
  348                 er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu);
  349                 bzero(er, sizeof(*er));
  350                 ck_epoch_register(&epoch->e_epoch, &er->er_record, NULL);
  351                 TAILQ_INIT((struct threadlist *)(uintptr_t)&er->er_tdlist);
  352                 er->er_cpuid = cpu;
  353                 er->er_parent = epoch;
  354         }
  355 }
  356 
  357 static void
  358 epoch_adjust_prio(struct thread *td, u_char prio)
  359 {
  360 
  361         thread_lock(td);
  362         sched_prio(td, prio);
  363         thread_unlock(td);
  364 }
  365 
  366 epoch_t
  367 epoch_alloc(const char *name, int flags)
  368 {
  369         epoch_t epoch;
  370         int i;
  371 
  372         MPASS(name != NULL);
  373 
  374         if (__predict_false(!inited))
  375                 panic("%s called too early in boot", __func__);
  376 
  377         EPOCH_LOCK();
  378 
  379         /*
  380          * Find a free index in the epoch array. If no free index is
  381          * found, try to use the index after the last one.
  382          */
  383         for (i = 0;; i++) {
  384                 /*
  385                  * If too many epochs are currently allocated,
  386                  * return NULL.
  387                  */
  388                 if (i == MAX_EPOCHS) {
  389                         epoch = NULL;
  390                         goto done;
  391                 }
  392                 if (epoch_array[i].e_in_use == 0)
  393                         break;
  394         }
  395 
  396         epoch = epoch_array + i;
  397         ck_epoch_init(&epoch->e_epoch);
  398         epoch_ctor(epoch);
  399         epoch->e_flags = flags;
  400         epoch->e_name = name;
  401         sx_init(&epoch->e_drain_sx, "epoch-drain-sx");
  402         mtx_init(&epoch->e_drain_mtx, "epoch-drain-mtx", NULL, MTX_DEF);
  403 
  404         /*
  405          * Set e_in_use last, because when this field is set the
  406          * epoch_call_task() function will start scanning this epoch
  407          * structure.
  408          */
  409         atomic_store_rel_int(&epoch->e_in_use, 1);
  410 done:
  411         EPOCH_UNLOCK();
  412         return (epoch);
  413 }
  414 
  415 void
  416 epoch_free(epoch_t epoch)
  417 {
  418 #ifdef INVARIANTS
  419         int cpu;
  420 #endif
  421 
  422         EPOCH_LOCK();
  423 
  424         MPASS(epoch->e_in_use != 0);
  425 
  426         epoch_drain_callbacks(epoch);
  427 
  428         atomic_store_rel_int(&epoch->e_in_use, 0);
  429         /*
  430          * Make sure the epoch_call_task() function see e_in_use equal
  431          * to zero, by calling epoch_wait() on the global_epoch:
  432          */
  433         epoch_wait(global_epoch);
  434 #ifdef INVARIANTS
  435         CPU_FOREACH(cpu) {
  436                 epoch_record_t er;
  437 
  438                 er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu);
  439 
  440                 /*
  441                  * Sanity check: none of the records should be in use anymore.
  442                  * We drained callbacks above and freeing the pcpu records is
  443                  * imminent.
  444                  */
  445                 MPASS(er->er_td == NULL);
  446                 MPASS(TAILQ_EMPTY(&er->er_tdlist));
  447         }
  448 #endif
  449         uma_zfree_pcpu(pcpu_zone_record, epoch->e_pcpu_record);
  450         mtx_destroy(&epoch->e_drain_mtx);
  451         sx_destroy(&epoch->e_drain_sx);
  452         memset(epoch, 0, sizeof(*epoch));
  453 
  454         EPOCH_UNLOCK();
  455 }
  456 
  457 #define INIT_CHECK(epoch)                                       \
  458         do {                                                    \
  459                 if (__predict_false((epoch) == NULL))           \
  460                         return;                                 \
  461         } while (0)
  462 
  463 void
  464 _epoch_enter_preempt(epoch_t epoch, epoch_tracker_t et EPOCH_FILE_LINE)
  465 {
  466         struct epoch_record *er;
  467         struct thread *td;
  468 
  469         MPASS(cold || epoch != NULL);
  470         td = curthread;
  471         MPASS(kstack_contains(td, (vm_offset_t)et, sizeof(*et)));
  472 
  473         INIT_CHECK(epoch);
  474         MPASS(epoch->e_flags & EPOCH_PREEMPT);
  475 
  476 #ifdef EPOCH_TRACE
  477         epoch_trace_enter(td, epoch, et, file, line);
  478 #endif
  479         et->et_td = td;
  480         THREAD_NO_SLEEPING();
  481         critical_enter();
  482         sched_pin();
  483         et->et_old_priority = td->td_priority;
  484         er = epoch_currecord(epoch);
  485         /* Record-level tracking is reserved for non-preemptible epochs. */
  486         MPASS(er->er_td == NULL);
  487         TAILQ_INSERT_TAIL(&er->er_tdlist, et, et_link);
  488         ck_epoch_begin(&er->er_record, &et->et_section);
  489         critical_exit();
  490 }
  491 
  492 void
  493 epoch_enter(epoch_t epoch)
  494 {
  495         epoch_record_t er;
  496 
  497         MPASS(cold || epoch != NULL);
  498         INIT_CHECK(epoch);
  499         critical_enter();
  500         er = epoch_currecord(epoch);
  501 #ifdef INVARIANTS
  502         if (er->er_record.active == 0) {
  503                 MPASS(er->er_td == NULL);
  504                 er->er_td = curthread;
  505         } else {
  506                 /* We've recursed, just make sure our accounting isn't wrong. */
  507                 MPASS(er->er_td == curthread);
  508         }
  509 #endif
  510         ck_epoch_begin(&er->er_record, NULL);
  511 }
  512 
  513 void
  514 _epoch_exit_preempt(epoch_t epoch, epoch_tracker_t et EPOCH_FILE_LINE)
  515 {
  516         struct epoch_record *er;
  517         struct thread *td;
  518 
  519         INIT_CHECK(epoch);
  520         td = curthread;
  521         critical_enter();
  522         sched_unpin();
  523         THREAD_SLEEPING_OK();
  524         er = epoch_currecord(epoch);
  525         MPASS(epoch->e_flags & EPOCH_PREEMPT);
  526         MPASS(et != NULL);
  527         MPASS(et->et_td == td);
  528 #ifdef INVARIANTS
  529         et->et_td = (void*)0xDEADBEEF;
  530         /* Record-level tracking is reserved for non-preemptible epochs. */
  531         MPASS(er->er_td == NULL);
  532 #endif
  533         ck_epoch_end(&er->er_record, &et->et_section);
  534         TAILQ_REMOVE(&er->er_tdlist, et, et_link);
  535         er->er_gen++;
  536         if (__predict_false(et->et_old_priority != td->td_priority))
  537                 epoch_adjust_prio(td, et->et_old_priority);
  538         critical_exit();
  539 #ifdef EPOCH_TRACE
  540         epoch_trace_exit(td, epoch, et, file, line);
  541 #endif
  542 }
  543 
  544 void
  545 epoch_exit(epoch_t epoch)
  546 {
  547         epoch_record_t er;
  548 
  549         INIT_CHECK(epoch);
  550         er = epoch_currecord(epoch);
  551         ck_epoch_end(&er->er_record, NULL);
  552 #ifdef INVARIANTS
  553         MPASS(er->er_td == curthread);
  554         if (er->er_record.active == 0)
  555                 er->er_td = NULL;
  556 #endif
  557         critical_exit();
  558 }
  559 
  560 /*
  561  * epoch_block_handler_preempt() is a callback from the CK code when another
  562  * thread is currently in an epoch section.
  563  */
  564 static void
  565 epoch_block_handler_preempt(struct ck_epoch *global __unused,
  566     ck_epoch_record_t *cr, void *arg __unused)
  567 {
  568         epoch_record_t record;
  569         struct thread *td, *owner, *curwaittd;
  570         struct epoch_tracker *tdwait;
  571         struct turnstile *ts;
  572         struct lock_object *lock;
  573         int spincount, gen;
  574         int locksheld __unused;
  575 
  576         record = __containerof(cr, struct epoch_record, er_record);
  577         td = curthread;
  578         locksheld = td->td_locks;
  579         spincount = 0;
  580         counter_u64_add(block_count, 1);
  581         /*
  582          * We lost a race and there's no longer any threads
  583          * on the CPU in an epoch section.
  584          */
  585         if (TAILQ_EMPTY(&record->er_tdlist))
  586                 return;
  587 
  588         if (record->er_cpuid != curcpu) {
  589                 /*
  590                  * If the head of the list is running, we can wait for it
  591                  * to remove itself from the list and thus save us the
  592                  * overhead of a migration
  593                  */
  594                 gen = record->er_gen;
  595                 thread_unlock(td);
  596                 /*
  597                  * We can't actually check if the waiting thread is running
  598                  * so we simply poll for it to exit before giving up and
  599                  * migrating.
  600                  */
  601                 do {
  602                         cpu_spinwait();
  603                 } while (!TAILQ_EMPTY(&record->er_tdlist) &&
  604                                  gen == record->er_gen &&
  605                                  spincount++ < MAX_ADAPTIVE_SPIN);
  606                 thread_lock(td);
  607                 /*
  608                  * If the generation has changed we can poll again
  609                  * otherwise we need to migrate.
  610                  */
  611                 if (gen != record->er_gen)
  612                         return;
  613                 /*
  614                  * Being on the same CPU as that of the record on which
  615                  * we need to wait allows us access to the thread
  616                  * list associated with that CPU. We can then examine the
  617                  * oldest thread in the queue and wait on its turnstile
  618                  * until it resumes and so on until a grace period
  619                  * elapses.
  620                  *
  621                  */
  622                 counter_u64_add(migrate_count, 1);
  623                 sched_bind(td, record->er_cpuid);
  624                 /*
  625                  * At this point we need to return to the ck code
  626                  * to scan to see if a grace period has elapsed.
  627                  * We can't move on to check the thread list, because
  628                  * in the meantime new threads may have arrived that
  629                  * in fact belong to a different epoch.
  630                  */
  631                 return;
  632         }
  633         /*
  634          * Try to find a thread in an epoch section on this CPU
  635          * waiting on a turnstile. Otherwise find the lowest
  636          * priority thread (highest prio value) and drop our priority
  637          * to match to allow it to run.
  638          */
  639         TAILQ_FOREACH(tdwait, &record->er_tdlist, et_link) {
  640                 /*
  641                  * Propagate our priority to any other waiters to prevent us
  642                  * from starving them. They will have their original priority
  643                  * restore on exit from epoch_wait().
  644                  */
  645                 curwaittd = tdwait->et_td;
  646                 if (!TD_IS_INHIBITED(curwaittd) && curwaittd->td_priority > td->td_priority) {
  647                         critical_enter();
  648                         thread_unlock(td);
  649                         thread_lock(curwaittd);
  650                         sched_prio(curwaittd, td->td_priority);
  651                         thread_unlock(curwaittd);
  652                         thread_lock(td);
  653                         critical_exit();
  654                 }
  655                 if (TD_IS_INHIBITED(curwaittd) && TD_ON_LOCK(curwaittd) &&
  656                     ((ts = curwaittd->td_blocked) != NULL)) {
  657                         /*
  658                          * We unlock td to allow turnstile_wait to reacquire
  659                          * the thread lock. Before unlocking it we enter a
  660                          * critical section to prevent preemption after we
  661                          * reenable interrupts by dropping the thread lock in
  662                          * order to prevent curwaittd from getting to run.
  663                          */
  664                         critical_enter();
  665                         thread_unlock(td);
  666 
  667                         if (turnstile_lock(ts, &lock, &owner)) {
  668                                 if (ts == curwaittd->td_blocked) {
  669                                         MPASS(TD_IS_INHIBITED(curwaittd) &&
  670                                             TD_ON_LOCK(curwaittd));
  671                                         critical_exit();
  672                                         turnstile_wait(ts, owner,
  673                                             curwaittd->td_tsqueue);
  674                                         counter_u64_add(turnstile_count, 1);
  675                                         thread_lock(td);
  676                                         return;
  677                                 }
  678                                 turnstile_unlock(ts, lock);
  679                         }
  680                         thread_lock(td);
  681                         critical_exit();
  682                         KASSERT(td->td_locks == locksheld,
  683                             ("%d extra locks held", td->td_locks - locksheld));
  684                 }
  685         }
  686         /*
  687          * We didn't find any threads actually blocked on a lock
  688          * so we have nothing to do except context switch away.
  689          */
  690         counter_u64_add(switch_count, 1);
  691         mi_switch(SW_VOL | SWT_RELINQUISH);
  692         /*
  693          * It is important the thread lock is dropped while yielding
  694          * to allow other threads to acquire the lock pointed to by
  695          * TDQ_LOCKPTR(td). Currently mi_switch() will unlock the
  696          * thread lock before returning. Else a deadlock like
  697          * situation might happen.
  698          */
  699         thread_lock(td);
  700 }
  701 
  702 void
  703 epoch_wait_preempt(epoch_t epoch)
  704 {
  705         struct thread *td;
  706         int was_bound;
  707         int old_cpu;
  708         int old_pinned;
  709         u_char old_prio;
  710         int locks __unused;
  711 
  712         MPASS(cold || epoch != NULL);
  713         INIT_CHECK(epoch);
  714         td = curthread;
  715 #ifdef INVARIANTS
  716         locks = curthread->td_locks;
  717         MPASS(epoch->e_flags & EPOCH_PREEMPT);
  718         if ((epoch->e_flags & EPOCH_LOCKED) == 0)
  719                 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
  720                     "epoch_wait() can be long running");
  721         KASSERT(!in_epoch(epoch), ("epoch_wait_preempt() called in the middle "
  722             "of an epoch section of the same epoch"));
  723 #endif
  724         DROP_GIANT();
  725         thread_lock(td);
  726 
  727         old_cpu = PCPU_GET(cpuid);
  728         old_pinned = td->td_pinned;
  729         old_prio = td->td_priority;
  730         was_bound = sched_is_bound(td);
  731         sched_unbind(td);
  732         td->td_pinned = 0;
  733         sched_bind(td, old_cpu);
  734 
  735         ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler_preempt,
  736             NULL);
  737 
  738         /* restore CPU binding, if any */
  739         if (was_bound != 0) {
  740                 sched_bind(td, old_cpu);
  741         } else {
  742                 /* get thread back to initial CPU, if any */
  743                 if (old_pinned != 0)
  744                         sched_bind(td, old_cpu);
  745                 sched_unbind(td);
  746         }
  747         /* restore pinned after bind */
  748         td->td_pinned = old_pinned;
  749 
  750         /* restore thread priority */
  751         sched_prio(td, old_prio);
  752         thread_unlock(td);
  753         PICKUP_GIANT();
  754         KASSERT(td->td_locks == locks,
  755             ("%d residual locks held", td->td_locks - locks));
  756 }
  757 
  758 static void
  759 epoch_block_handler(struct ck_epoch *g __unused, ck_epoch_record_t *c __unused,
  760     void *arg __unused)
  761 {
  762         cpu_spinwait();
  763 }
  764 
  765 void
  766 epoch_wait(epoch_t epoch)
  767 {
  768 
  769         MPASS(cold || epoch != NULL);
  770         INIT_CHECK(epoch);
  771         MPASS(epoch->e_flags == 0);
  772         critical_enter();
  773         ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler, NULL);
  774         critical_exit();
  775 }
  776 
  777 void
  778 epoch_call(epoch_t epoch, epoch_callback_t callback, epoch_context_t ctx)
  779 {
  780         epoch_record_t er;
  781         ck_epoch_entry_t *cb;
  782 
  783         cb = (void *)ctx;
  784 
  785         MPASS(callback);
  786         /* too early in boot to have epoch set up */
  787         if (__predict_false(epoch == NULL))
  788                 goto boottime;
  789 #if !defined(EARLY_AP_STARTUP)
  790         if (__predict_false(inited < 2))
  791                 goto boottime;
  792 #endif
  793 
  794         critical_enter();
  795         *DPCPU_PTR(epoch_cb_count) += 1;
  796         er = epoch_currecord(epoch);
  797         ck_epoch_call(&er->er_record, cb, (ck_epoch_cb_t *)callback);
  798         critical_exit();
  799         return;
  800 boottime:
  801         callback(ctx);
  802 }
  803 
  804 static void
  805 epoch_call_task(void *arg __unused)
  806 {
  807         ck_stack_entry_t *cursor, *head, *next;
  808         ck_epoch_record_t *record;
  809         epoch_record_t er;
  810         epoch_t epoch;
  811         ck_stack_t cb_stack;
  812         int i, npending, total;
  813 
  814         ck_stack_init(&cb_stack);
  815         critical_enter();
  816         epoch_enter(global_epoch);
  817         for (total = i = 0; i != MAX_EPOCHS; i++) {
  818                 epoch = epoch_array + i;
  819                 if (__predict_false(
  820                     atomic_load_acq_int(&epoch->e_in_use) == 0))
  821                         continue;
  822                 er = epoch_currecord(epoch);
  823                 record = &er->er_record;
  824                 if ((npending = record->n_pending) == 0)
  825                         continue;
  826                 ck_epoch_poll_deferred(record, &cb_stack);
  827                 total += npending - record->n_pending;
  828         }
  829         epoch_exit(global_epoch);
  830         *DPCPU_PTR(epoch_cb_count) -= total;
  831         critical_exit();
  832 
  833         counter_u64_add(epoch_call_count, total);
  834         counter_u64_add(epoch_call_task_count, 1);
  835 
  836         head = ck_stack_batch_pop_npsc(&cb_stack);
  837         for (cursor = head; cursor != NULL; cursor = next) {
  838                 struct ck_epoch_entry *entry =
  839                     ck_epoch_entry_container(cursor);
  840 
  841                 next = CK_STACK_NEXT(cursor);
  842                 entry->function(entry);
  843         }
  844 }
  845 
  846 static int
  847 in_epoch_verbose_preempt(epoch_t epoch, int dump_onfail)
  848 {
  849         epoch_record_t er;
  850         struct epoch_tracker *tdwait;
  851         struct thread *td;
  852 
  853         MPASS(epoch != NULL);
  854         MPASS((epoch->e_flags & EPOCH_PREEMPT) != 0);
  855         td = curthread;
  856         if (THREAD_CAN_SLEEP())
  857                 return (0);
  858         critical_enter();
  859         er = epoch_currecord(epoch);
  860         TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link)
  861                 if (tdwait->et_td == td) {
  862                         critical_exit();
  863                         return (1);
  864                 }
  865 #ifdef INVARIANTS
  866         if (dump_onfail) {
  867                 MPASS(td->td_pinned);
  868                 printf("cpu: %d id: %d\n", curcpu, td->td_tid);
  869                 TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link)
  870                         printf("td_tid: %d ", tdwait->et_td->td_tid);
  871                 printf("\n");
  872         }
  873 #endif
  874         critical_exit();
  875         return (0);
  876 }
  877 
  878 #ifdef INVARIANTS
  879 static void
  880 epoch_assert_nocpu(epoch_t epoch, struct thread *td)
  881 {
  882         epoch_record_t er;
  883         int cpu;
  884         bool crit;
  885 
  886         crit = td->td_critnest > 0;
  887 
  888         /* Check for a critical section mishap. */
  889         CPU_FOREACH(cpu) {
  890                 er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu);
  891                 KASSERT(er->er_td != td,
  892                     ("%s critical section in epoch '%s', from cpu %d",
  893                     (crit ? "exited" : "re-entered"), epoch->e_name, cpu));
  894         }
  895 }
  896 #else
  897 #define epoch_assert_nocpu(e, td) do {} while (0)
  898 #endif
  899 
  900 int
  901 in_epoch_verbose(epoch_t epoch, int dump_onfail)
  902 {
  903         epoch_record_t er;
  904         struct thread *td;
  905 
  906         if (__predict_false((epoch) == NULL))
  907                 return (0);
  908         if ((epoch->e_flags & EPOCH_PREEMPT) != 0)
  909                 return (in_epoch_verbose_preempt(epoch, dump_onfail));
  910 
  911         /*
  912          * The thread being in a critical section is a necessary
  913          * condition to be correctly inside a non-preemptible epoch,
  914          * so it's definitely not in this epoch.
  915          */
  916         td = curthread;
  917         if (td->td_critnest == 0) {
  918                 epoch_assert_nocpu(epoch, td);
  919                 return (0);
  920         }
  921 
  922         /*
  923          * The current cpu is in a critical section, so the epoch record will be
  924          * stable for the rest of this function.  Knowing that the record is not
  925          * active is sufficient for knowing whether we're in this epoch or not,
  926          * since it's a pcpu record.
  927          */
  928         er = epoch_currecord(epoch);
  929         if (er->er_record.active == 0) {
  930                 epoch_assert_nocpu(epoch, td);
  931                 return (0);
  932         }
  933 
  934         MPASS(er->er_td == td);
  935         return (1);
  936 }
  937 
  938 int
  939 in_epoch(epoch_t epoch)
  940 {
  941         return (in_epoch_verbose(epoch, 0));
  942 }
  943 
  944 static void
  945 epoch_drain_cb(struct epoch_context *ctx)
  946 {
  947         struct epoch *epoch =
  948             __containerof(ctx, struct epoch_record, er_drain_ctx)->er_parent;
  949 
  950         if (atomic_fetchadd_int(&epoch->e_drain_count, -1) == 1) {
  951                 mtx_lock(&epoch->e_drain_mtx);
  952                 wakeup(epoch);
  953                 mtx_unlock(&epoch->e_drain_mtx);
  954         }
  955 }
  956 
  957 void
  958 epoch_drain_callbacks(epoch_t epoch)
  959 {
  960         epoch_record_t er;
  961         struct thread *td;
  962         int was_bound;
  963         int old_pinned;
  964         int old_cpu;
  965         int cpu;
  966 
  967         WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
  968             "epoch_drain_callbacks() may sleep!");
  969 
  970         /* too early in boot to have epoch set up */
  971         if (__predict_false(epoch == NULL))
  972                 return;
  973 #if !defined(EARLY_AP_STARTUP)
  974         if (__predict_false(inited < 2))
  975                 return;
  976 #endif
  977         DROP_GIANT();
  978 
  979         sx_xlock(&epoch->e_drain_sx);
  980         mtx_lock(&epoch->e_drain_mtx);
  981 
  982         td = curthread;
  983         thread_lock(td);
  984         old_cpu = PCPU_GET(cpuid);
  985         old_pinned = td->td_pinned;
  986         was_bound = sched_is_bound(td);
  987         sched_unbind(td);
  988         td->td_pinned = 0;
  989 
  990         CPU_FOREACH(cpu)
  991                 epoch->e_drain_count++;
  992         CPU_FOREACH(cpu) {
  993                 er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu);
  994                 sched_bind(td, cpu);
  995                 epoch_call(epoch, &epoch_drain_cb, &er->er_drain_ctx);
  996         }
  997 
  998         /* restore CPU binding, if any */
  999         if (was_bound != 0) {
 1000                 sched_bind(td, old_cpu);
 1001         } else {
 1002                 /* get thread back to initial CPU, if any */
 1003                 if (old_pinned != 0)
 1004                         sched_bind(td, old_cpu);
 1005                 sched_unbind(td);
 1006         }
 1007         /* restore pinned after bind */
 1008         td->td_pinned = old_pinned;
 1009 
 1010         thread_unlock(td);
 1011 
 1012         while (epoch->e_drain_count != 0)
 1013                 msleep(epoch, &epoch->e_drain_mtx, PZERO, "EDRAIN", 0);
 1014 
 1015         mtx_unlock(&epoch->e_drain_mtx);
 1016         sx_xunlock(&epoch->e_drain_sx);
 1017 
 1018         PICKUP_GIANT();
 1019 }

Cache object: 844869374a5f6e6d03d31f7aeb81437e


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.