The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/subr_epoch.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    3  *
    4  * Copyright (c) 2018, Matthew Macy <mmacy@freebsd.org>
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice, this list of conditions and the following disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  *
   15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   25  * SUCH DAMAGE.
   26  *
   27  */
   28 
   29 #include <sys/cdefs.h>
   30 __FBSDID("$FreeBSD: releng/12.0/sys/kern/subr_epoch.c 337525 2018-08-09 05:18:27Z mmacy $");
   31 
   32 #include <sys/param.h>
   33 #include <sys/types.h>
   34 #include <sys/systm.h>
   35 #include <sys/counter.h>
   36 #include <sys/epoch.h>
   37 #include <sys/gtaskqueue.h>
   38 #include <sys/kernel.h>
   39 #include <sys/limits.h>
   40 #include <sys/lock.h>
   41 #include <sys/malloc.h>
   42 #include <sys/mutex.h>
   43 #include <sys/pcpu.h>
   44 #include <sys/proc.h>
   45 #include <sys/sched.h>
   46 #include <sys/smp.h>
   47 #include <sys/sysctl.h>
   48 #include <sys/turnstile.h>
   49 #include <vm/vm.h>
   50 #include <vm/vm_extern.h>
   51 #include <vm/vm_kern.h>
   52 #include <vm/uma.h>
   53 
   54 #include <ck_epoch.h>
   55 
   56 static MALLOC_DEFINE(M_EPOCH, "epoch", "epoch based reclamation");
   57 
   58 /* arbitrary --- needs benchmarking */
   59 #define MAX_ADAPTIVE_SPIN 100
   60 #define MAX_EPOCHS 64
   61 
   62 CTASSERT(sizeof(ck_epoch_entry_t) == sizeof(struct epoch_context));
   63 SYSCTL_NODE(_kern, OID_AUTO, epoch, CTLFLAG_RW, 0, "epoch information");
   64 SYSCTL_NODE(_kern_epoch, OID_AUTO, stats, CTLFLAG_RW, 0, "epoch stats");
   65 
   66 /* Stats. */
   67 static counter_u64_t block_count;
   68 
   69 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, nblocked, CTLFLAG_RW,
   70     &block_count, "# of times a thread was in an epoch when epoch_wait was called");
   71 static counter_u64_t migrate_count;
   72 
   73 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, migrations, CTLFLAG_RW,
   74     &migrate_count, "# of times thread was migrated to another CPU in epoch_wait");
   75 static counter_u64_t turnstile_count;
   76 
   77 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, ncontended, CTLFLAG_RW,
   78     &turnstile_count, "# of times a thread was blocked on a lock in an epoch during an epoch_wait");
   79 static counter_u64_t switch_count;
   80 
   81 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, switches, CTLFLAG_RW,
   82     &switch_count, "# of times a thread voluntarily context switched in epoch_wait");
   83 static counter_u64_t epoch_call_count;
   84 
   85 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, epoch_calls, CTLFLAG_RW,
   86     &epoch_call_count, "# of times a callback was deferred");
   87 static counter_u64_t epoch_call_task_count;
   88 
   89 SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, epoch_call_tasks, CTLFLAG_RW,
   90     &epoch_call_task_count, "# of times a callback task was run");
   91 
   92 TAILQ_HEAD (threadlist, thread);
   93 
   94 CK_STACK_CONTAINER(struct ck_epoch_entry, stack_entry,
   95     ck_epoch_entry_container)
   96 
   97 epoch_t allepochs[MAX_EPOCHS];
   98 
   99 DPCPU_DEFINE(struct grouptask, epoch_cb_task);
  100 DPCPU_DEFINE(int, epoch_cb_count);
  101 
  102 static __read_mostly int inited;
  103 static __read_mostly int epoch_count;
  104 __read_mostly epoch_t global_epoch;
  105 __read_mostly epoch_t global_epoch_preempt;
  106 
  107 static void epoch_call_task(void *context __unused);
  108 static  uma_zone_t pcpu_zone_record;
  109 
  110 static void
  111 epoch_init(void *arg __unused)
  112 {
  113         int cpu;
  114 
  115         block_count = counter_u64_alloc(M_WAITOK);
  116         migrate_count = counter_u64_alloc(M_WAITOK);
  117         turnstile_count = counter_u64_alloc(M_WAITOK);
  118         switch_count = counter_u64_alloc(M_WAITOK);
  119         epoch_call_count = counter_u64_alloc(M_WAITOK);
  120         epoch_call_task_count = counter_u64_alloc(M_WAITOK);
  121 
  122         pcpu_zone_record = uma_zcreate("epoch_record pcpu", sizeof(struct epoch_record),
  123             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_PCPU);
  124         CPU_FOREACH(cpu) {
  125                 GROUPTASK_INIT(DPCPU_ID_PTR(cpu, epoch_cb_task), 0, epoch_call_task, NULL);
  126                 taskqgroup_attach_cpu(qgroup_softirq, DPCPU_ID_PTR(cpu, epoch_cb_task), NULL, cpu, -1, "epoch call task");
  127         }
  128         inited = 1;
  129         global_epoch = epoch_alloc(0);
  130         global_epoch_preempt = epoch_alloc(EPOCH_PREEMPT);
  131 }
  132 SYSINIT(epoch, SI_SUB_TASKQ + 1, SI_ORDER_FIRST, epoch_init, NULL);
  133 
  134 #if !defined(EARLY_AP_STARTUP)
  135 static void
  136 epoch_init_smp(void *dummy __unused)
  137 {
  138         inited = 2;
  139 }
  140 SYSINIT(epoch_smp, SI_SUB_SMP + 1, SI_ORDER_FIRST, epoch_init_smp, NULL);
  141 #endif
  142 
  143 static void
  144 epoch_ctor(epoch_t epoch)
  145 {
  146         epoch_record_t er;
  147         int cpu;
  148 
  149         epoch->e_pcpu_record = uma_zalloc_pcpu(pcpu_zone_record, M_WAITOK);
  150         CPU_FOREACH(cpu) {
  151                 er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu);
  152                 bzero(er, sizeof(*er));
  153                 ck_epoch_register(&epoch->e_epoch, &er->er_record, NULL);
  154                 TAILQ_INIT((struct threadlist *)(uintptr_t)&er->er_tdlist);
  155                 er->er_cpuid = cpu;
  156         }
  157 }
  158 
  159 epoch_t
  160 epoch_alloc(int flags)
  161 {
  162         epoch_t epoch;
  163 
  164         if (__predict_false(!inited))
  165                 panic("%s called too early in boot", __func__);
  166         epoch = malloc(sizeof(struct epoch), M_EPOCH, M_ZERO | M_WAITOK);
  167         ck_epoch_init(&epoch->e_epoch);
  168         epoch_ctor(epoch);
  169         MPASS(epoch_count < MAX_EPOCHS - 2);
  170         epoch->e_flags = flags;
  171         epoch->e_idx = epoch_count;
  172         allepochs[epoch_count++] = epoch;
  173         return (epoch);
  174 }
  175 
  176 void
  177 epoch_free(epoch_t epoch)
  178 {
  179 #ifdef INVARIANTS
  180         struct epoch_record *er;
  181         int cpu;
  182 
  183         CPU_FOREACH(cpu) {
  184                 er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu);
  185                 MPASS(TAILQ_EMPTY(&er->er_tdlist));
  186         }
  187 #endif
  188         allepochs[epoch->e_idx] = NULL;
  189         epoch_wait(global_epoch);
  190         uma_zfree_pcpu(pcpu_zone_record, epoch->e_pcpu_record);
  191         free(epoch, M_EPOCH);
  192 }
  193 
  194 void
  195 epoch_enter_preempt_KBI(epoch_t epoch, epoch_tracker_t et)
  196 {
  197 
  198         epoch_enter_preempt(epoch, et);
  199 }
  200 
  201 void
  202 epoch_exit_preempt_KBI(epoch_t epoch, epoch_tracker_t et)
  203 {
  204 
  205         epoch_exit_preempt(epoch, et);
  206 }
  207 
  208 void
  209 epoch_enter_KBI(epoch_t epoch)
  210 {
  211 
  212         epoch_enter(epoch);
  213 }
  214 
  215 void
  216 epoch_exit_KBI(epoch_t epoch)
  217 {
  218 
  219         epoch_exit(epoch);
  220 }
  221 
  222 /*
  223  * epoch_block_handler_preempt is a callback from the ck code when another thread is
  224  * currently in an epoch section.
  225  */
  226 static void
  227 epoch_block_handler_preempt(struct ck_epoch *global __unused, ck_epoch_record_t *cr,
  228     void *arg __unused)
  229 {
  230         epoch_record_t record;
  231         struct thread *td, *owner, *curwaittd;
  232         struct epoch_thread *tdwait;
  233         struct turnstile *ts;
  234         struct lock_object *lock;
  235         int spincount, gen;
  236         int locksheld __unused;
  237 
  238         record = __containerof(cr, struct epoch_record, er_record);
  239         td = curthread;
  240         locksheld = td->td_locks;
  241         spincount = 0;
  242         counter_u64_add(block_count, 1);
  243         /*
  244          * We lost a race and there's no longer any threads
  245          * on the CPU in an epoch section.
  246          */
  247         if (TAILQ_EMPTY(&record->er_tdlist))
  248                 return;
  249 
  250         if (record->er_cpuid != curcpu) {
  251                 /*
  252                  * If the head of the list is running, we can wait for it
  253                  * to remove itself from the list and thus save us the
  254                  * overhead of a migration
  255                  */
  256                 gen = record->er_gen;
  257                 thread_unlock(td);
  258                 /*
  259                  * We can't actually check if the waiting thread is running
  260                  * so we simply poll for it to exit before giving up and
  261                  * migrating.
  262                  */
  263                 do {
  264                         cpu_spinwait();
  265                 } while (!TAILQ_EMPTY(&record->er_tdlist) &&
  266                                  gen == record->er_gen &&
  267                                  spincount++ < MAX_ADAPTIVE_SPIN);
  268                 thread_lock(td);
  269                 /*
  270                  * If the generation has changed we can poll again
  271                  * otherwise we need to migrate.
  272                  */
  273                 if (gen != record->er_gen)
  274                         return;
  275                 /*
  276                  * Being on the same CPU as that of the record on which
  277                  * we need to wait allows us access to the thread
  278                  * list associated with that CPU. We can then examine the
  279                  * oldest thread in the queue and wait on its turnstile
  280                  * until it resumes and so on until a grace period
  281                  * elapses.
  282                  *
  283                  */
  284                 counter_u64_add(migrate_count, 1);
  285                 sched_bind(td, record->er_cpuid);
  286                 /*
  287                  * At this point we need to return to the ck code
  288                  * to scan to see if a grace period has elapsed.
  289                  * We can't move on to check the thread list, because
  290                  * in the meantime new threads may have arrived that
  291                  * in fact belong to a different epoch.
  292                  */
  293                 return;
  294         }
  295         /*
  296          * Try to find a thread in an epoch section on this CPU
  297          * waiting on a turnstile. Otherwise find the lowest
  298          * priority thread (highest prio value) and drop our priority
  299          * to match to allow it to run.
  300          */
  301         TAILQ_FOREACH(tdwait, &record->er_tdlist, et_link) {
  302                 /*
  303                  * Propagate our priority to any other waiters to prevent us
  304                  * from starving them. They will have their original priority
  305                  * restore on exit from epoch_wait().
  306                  */
  307                 curwaittd = tdwait->et_td;
  308                 if (!TD_IS_INHIBITED(curwaittd) && curwaittd->td_priority > td->td_priority) {
  309                         critical_enter();
  310                         thread_unlock(td);
  311                         thread_lock(curwaittd);
  312                         sched_prio(curwaittd, td->td_priority);
  313                         thread_unlock(curwaittd);
  314                         thread_lock(td);
  315                         critical_exit();
  316                 }
  317                 if (TD_IS_INHIBITED(curwaittd) && TD_ON_LOCK(curwaittd) &&
  318                     ((ts = curwaittd->td_blocked) != NULL)) {
  319                         /*
  320                          * We unlock td to allow turnstile_wait to reacquire the
  321                          * the thread lock. Before unlocking it we enter a critical
  322                          * section to prevent preemption after we reenable interrupts
  323                          * by dropping the thread lock in order to prevent curwaittd
  324                          * from getting to run.
  325                          */
  326                         critical_enter();
  327                         thread_unlock(td);
  328                         owner = turnstile_lock(ts, &lock);
  329                         /*
  330                          * The owner pointer indicates that the lock succeeded. Only
  331                          * in case we hold the lock and the turnstile we locked is still
  332                          * the one that curwaittd is blocked on can we continue. Otherwise
  333                          * The turnstile pointer has been changed out from underneath
  334                          * us, as in the case where the lock holder has signalled curwaittd,
  335                          * and we need to continue.
  336                          */
  337                         if (owner != NULL && ts == curwaittd->td_blocked) {
  338                                 MPASS(TD_IS_INHIBITED(curwaittd) && TD_ON_LOCK(curwaittd));
  339                                 critical_exit();
  340                                 turnstile_wait(ts, owner, curwaittd->td_tsqueue);
  341                                 counter_u64_add(turnstile_count, 1);
  342                                 thread_lock(td);
  343                                 return;
  344                         } else if (owner != NULL)
  345                                 turnstile_unlock(ts, lock);
  346                         thread_lock(td);
  347                         critical_exit();
  348                         KASSERT(td->td_locks == locksheld,
  349                             ("%d extra locks held", td->td_locks - locksheld));
  350                 }
  351         }
  352         /*
  353          * We didn't find any threads actually blocked on a lock
  354          * so we have nothing to do except context switch away.
  355          */
  356         counter_u64_add(switch_count, 1);
  357         mi_switch(SW_VOL | SWT_RELINQUISH, NULL);
  358 
  359         /*
  360          * Release the thread lock while yielding to
  361          * allow other threads to acquire the lock
  362          * pointed to by TDQ_LOCKPTR(td). Else a
  363          * deadlock like situation might happen. (HPS)
  364          */
  365         thread_unlock(td);
  366         thread_lock(td);
  367 }
  368 
  369 void
  370 epoch_wait_preempt(epoch_t epoch)
  371 {
  372         struct thread *td;
  373         int was_bound;
  374         int old_cpu;
  375         int old_pinned;
  376         u_char old_prio;
  377         int locks __unused;
  378 
  379         MPASS(cold || epoch != NULL);
  380         INIT_CHECK(epoch);
  381         td = curthread;
  382 #ifdef INVARIANTS
  383         locks = curthread->td_locks;
  384         MPASS(epoch->e_flags & EPOCH_PREEMPT);
  385         if ((epoch->e_flags & EPOCH_LOCKED) == 0)
  386                 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
  387                     "epoch_wait() can be long running");
  388         KASSERT(!in_epoch(epoch),
  389                         ("epoch_wait_preempt() called in the middle "
  390                          "of an epoch section of the same epoch"));
  391 #endif
  392         thread_lock(td);
  393         DROP_GIANT();
  394 
  395         old_cpu = PCPU_GET(cpuid);
  396         old_pinned = td->td_pinned;
  397         old_prio = td->td_priority;
  398         was_bound = sched_is_bound(td);
  399         sched_unbind(td);
  400         td->td_pinned = 0;
  401         sched_bind(td, old_cpu);
  402 
  403         ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler_preempt, NULL);
  404 
  405         /* restore CPU binding, if any */
  406         if (was_bound != 0) {
  407                 sched_bind(td, old_cpu);
  408         } else {
  409                 /* get thread back to initial CPU, if any */
  410                 if (old_pinned != 0)
  411                         sched_bind(td, old_cpu);
  412                 sched_unbind(td);
  413         }
  414         /* restore pinned after bind */
  415         td->td_pinned = old_pinned;
  416 
  417         /* restore thread priority */
  418         sched_prio(td, old_prio);
  419         thread_unlock(td);
  420         PICKUP_GIANT();
  421         KASSERT(td->td_locks == locks,
  422             ("%d residual locks held", td->td_locks - locks));
  423 }
  424 
  425 static void
  426 epoch_block_handler(struct ck_epoch *g __unused, ck_epoch_record_t *c __unused,
  427     void *arg __unused)
  428 {
  429         cpu_spinwait();
  430 }
  431 
  432 void
  433 epoch_wait(epoch_t epoch)
  434 {
  435 
  436         MPASS(cold || epoch != NULL);
  437         INIT_CHECK(epoch);
  438         MPASS(epoch->e_flags == 0);
  439         critical_enter();
  440         ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler, NULL);
  441         critical_exit();
  442 }
  443 
  444 void
  445 epoch_call(epoch_t epoch, epoch_context_t ctx, void (*callback) (epoch_context_t))
  446 {
  447         epoch_record_t er;
  448         ck_epoch_entry_t *cb;
  449 
  450         cb = (void *)ctx;
  451 
  452         MPASS(callback);
  453         /* too early in boot to have epoch set up */
  454         if (__predict_false(epoch == NULL))
  455                 goto boottime;
  456 #if !defined(EARLY_AP_STARTUP)
  457         if (__predict_false(inited < 2))
  458                 goto boottime;
  459 #endif
  460 
  461         critical_enter();
  462         *DPCPU_PTR(epoch_cb_count) += 1;
  463         er = epoch_currecord(epoch);
  464         ck_epoch_call(&er->er_record, cb, (ck_epoch_cb_t *)callback);
  465         critical_exit();
  466         return;
  467 boottime:
  468         callback(ctx);
  469 }
  470 
  471 static void
  472 epoch_call_task(void *arg __unused)
  473 {
  474         ck_stack_entry_t *cursor, *head, *next;
  475         ck_epoch_record_t *record;
  476         epoch_record_t er;
  477         epoch_t epoch;
  478         ck_stack_t cb_stack;
  479         int i, npending, total;
  480 
  481         ck_stack_init(&cb_stack);
  482         critical_enter();
  483         epoch_enter(global_epoch);
  484         for (total = i = 0; i < epoch_count; i++) {
  485                 if (__predict_false((epoch = allepochs[i]) == NULL))
  486                         continue;
  487                 er = epoch_currecord(epoch);
  488                 record = &er->er_record;
  489                 if ((npending = record->n_pending) == 0)
  490                         continue;
  491                 ck_epoch_poll_deferred(record, &cb_stack);
  492                 total += npending - record->n_pending;
  493         }
  494         epoch_exit(global_epoch);
  495         *DPCPU_PTR(epoch_cb_count) -= total;
  496         critical_exit();
  497 
  498         counter_u64_add(epoch_call_count, total);
  499         counter_u64_add(epoch_call_task_count, 1);
  500 
  501         head = ck_stack_batch_pop_npsc(&cb_stack);
  502         for (cursor = head; cursor != NULL; cursor = next) {
  503                 struct ck_epoch_entry *entry =
  504                 ck_epoch_entry_container(cursor);
  505 
  506                 next = CK_STACK_NEXT(cursor);
  507                 entry->function(entry);
  508         }
  509 }
  510 
  511 int
  512 in_epoch_verbose(epoch_t epoch, int dump_onfail)
  513 {
  514         struct epoch_thread *tdwait;
  515         struct thread *td;
  516         epoch_record_t er;
  517 
  518         td = curthread;
  519         if (td->td_epochnest == 0)
  520                 return (0);
  521         if (__predict_false((epoch) == NULL))
  522                 return (0);
  523         critical_enter();
  524         er = epoch_currecord(epoch);
  525         TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link)
  526                 if (tdwait->et_td == td) {
  527                         critical_exit();
  528                         return (1);
  529                 }
  530 #ifdef INVARIANTS
  531         if (dump_onfail) {
  532                 MPASS(td->td_pinned);
  533                 printf("cpu: %d id: %d\n", curcpu, td->td_tid);
  534                 TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link)
  535                         printf("td_tid: %d ", tdwait->et_td->td_tid);
  536                 printf("\n");
  537         }
  538 #endif
  539         critical_exit();
  540         return (0);
  541 }
  542 
  543 int
  544 in_epoch(epoch_t epoch)
  545 {
  546         return (in_epoch_verbose(epoch, 0));
  547 }
  548 
  549 void
  550 epoch_adjust_prio(struct thread *td, u_char prio)
  551 {
  552         thread_lock(td);
  553         sched_prio(td, prio);
  554         thread_unlock(td);
  555 }

Cache object: 4bb01369e85f35a755c0bf2de1adf8a0


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.