The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/subr_pcu.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: subr_pcu.c,v 1.27 2022/10/26 23:38:57 riastradh Exp $  */
    2 
    3 /*-
    4  * Copyright (c) 2011, 2014 The NetBSD Foundation, Inc.
    5  * All rights reserved.
    6  *
    7  * This code is derived from software contributed to The NetBSD Foundation
    8  * by Mindaugas Rasiukevicius.
    9  *
   10  * Redistribution and use in source and binary forms, with or without
   11  * modification, are permitted provided that the following conditions
   12  * are met:
   13  * 1. Redistributions of source code must retain the above copyright
   14  *    notice, this list of conditions and the following disclaimer.
   15  * 2. Redistributions in binary form must reproduce the above copyright
   16  *    notice, this list of conditions and the following disclaimer in the
   17  *    documentation and/or other materials provided with the distribution.
   18  *
   19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
   20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
   23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   29  * POSSIBILITY OF SUCH DAMAGE.
   30  */
   31 
   32 /*
   33  * Per CPU Unit (PCU) - is an interface to manage synchronization of any
   34  * per CPU context (unit) tied with LWP context.  Typical use: FPU state.
   35  *
   36  * Concurrency notes:
   37  *
   38  *      PCU state may be loaded only by the current LWP, that is, curlwp.
   39  *      Therefore, only LWP itself can set a CPU for lwp_t::l_pcu_cpu[id].
   40  *
   41  *      There are some important rules about operation calls.  The request
   42  *      for a PCU release can be from a) the owner LWP (regardless whether
   43  *      the PCU state is on the current CPU or remote CPU) b) any other LWP
   44  *      running on that CPU (in such case, the owner LWP is on a remote CPU
   45  *      or sleeping).
   46  *
   47  *      In any case, the PCU state can *only* be changed from the current
   48  *      CPU.  If said PCU state is on the remote CPU, a cross-call will be
   49  *      sent by the owner LWP.  Therefore struct cpu_info::ci_pcu_curlwp[id]
   50  *      may only be changed by the current CPU and lwp_t::l_pcu_cpu[id] may
   51  *      only be cleared by the CPU which has the PCU state loaded.
   52  */
   53 
   54 #include <sys/cdefs.h>
   55 __KERNEL_RCSID(0, "$NetBSD: subr_pcu.c,v 1.27 2022/10/26 23:38:57 riastradh Exp $");
   56 
   57 #include <sys/param.h>
   58 #include <sys/cpu.h>
   59 #include <sys/lwp.h>
   60 #include <sys/pcu.h>
   61 #include <sys/ipi.h>
   62 
   63 #if PCU_UNIT_COUNT > 0
   64 
   65 static inline void pcu_do_op(const pcu_ops_t *, lwp_t * const, const int);
   66 static void pcu_lwp_op(const pcu_ops_t *, lwp_t *, const int);
   67 
   68 /*
   69  * Internal PCU commands for the pcu_do_op() function.
   70  */
   71 #define PCU_CMD_SAVE            0x01    /* save PCU state to the LWP */
   72 #define PCU_CMD_RELEASE         0x02    /* release PCU state on the CPU */
   73 
   74 /*
   75  * Message structure for another CPU passed via ipi(9).
   76  */
   77 typedef struct {
   78         const pcu_ops_t *pcu;
   79         lwp_t *         owner;
   80         const int       flags;
   81 } pcu_ipi_msg_t;
   82 
   83 /*
   84  * PCU IPIs run at IPL_HIGH (aka IPL_PCU in this code).
   85  */
   86 #define splpcu          splhigh
   87 
   88 /*
   89  * pcu_available_p: true if lwp is allowed to use PCU state.
   90  */
   91 static inline bool __diagused
   92 pcu_available_p(struct lwp *l)
   93 {
   94 
   95         /* XXX Not sure this is safe unless l is locked!  */
   96         return (l->l_flag & (LW_SYSTEM|LW_SYSTEM_FPU)) != LW_SYSTEM;
   97 }
   98 
   99 /*
  100  * pcu_switchpoint: release PCU state if the LWP is being run on another CPU.
  101  * This routine is called on each context switch by by mi_switch().
  102  */
  103 void
  104 pcu_switchpoint(lwp_t *l)
  105 {
  106         const uint32_t pcu_valid = l->l_pcu_valid;
  107         int s;
  108 
  109         KASSERTMSG(l == curlwp, "l %p != curlwp %p", l, curlwp);
  110 
  111         if (__predict_true(pcu_valid == 0)) {
  112                 /* PCUs are not in use. */
  113                 return;
  114         }
  115         s = splpcu();
  116         for (u_int id = 0; id < PCU_UNIT_COUNT; id++) {
  117                 if ((pcu_valid & (1U << id)) == 0) {
  118                         continue;
  119                 }
  120                 struct cpu_info * const pcu_ci = l->l_pcu_cpu[id];
  121                 if (pcu_ci == l->l_cpu) {
  122                         KASSERT(pcu_ci->ci_pcu_curlwp[id] == l);
  123                         continue;
  124                 }
  125                 const pcu_ops_t * const pcu = pcu_ops_md_defs[id];
  126                 pcu->pcu_state_release(l);
  127         }
  128         splx(s);
  129 }
  130 
  131 /*
  132  * pcu_discard_all: discard PCU state of the given LWP.
  133  *
  134  * Used by exec and LWP exit.
  135  */
  136 void
  137 pcu_discard_all(lwp_t *l)
  138 {
  139         const uint32_t pcu_valid = l->l_pcu_valid;
  140 
  141         /*
  142          * The check for LSIDL here is to catch the case where the LWP exits
  143          * due to an error in the LWP creation path before it ever runs.
  144          */
  145         KASSERT(l == curlwp || l->l_stat == LSIDL ||
  146                 (!pcu_available_p(l) && pcu_valid == 0));
  147 
  148         if (__predict_true(pcu_valid == 0)) {
  149                 /* PCUs are not in use. */
  150                 return;
  151         }
  152         for (u_int id = 0; id < PCU_UNIT_COUNT; id++) {
  153                 if ((pcu_valid & (1U << id)) == 0) {
  154                         continue;
  155                 }
  156                 if (__predict_true(l->l_pcu_cpu[id] == NULL)) {
  157                         continue;
  158                 }
  159                 const pcu_ops_t * const pcu = pcu_ops_md_defs[id];
  160                 pcu_lwp_op(pcu, l, PCU_CMD_RELEASE);
  161         }
  162         l->l_pcu_valid = 0;
  163 }
  164 
  165 /*
  166  * pcu_save_all: save PCU state of the given LWP so that eg. coredump can
  167  * examine it.
  168  */
  169 void
  170 pcu_save_all(lwp_t *l)
  171 {
  172         const uint32_t pcu_valid = l->l_pcu_valid;
  173         int flags = PCU_CMD_SAVE;
  174 
  175         /* If LW_WCORE, we are also releasing the state. */
  176         if (__predict_false(l->l_flag & LW_WCORE)) {
  177                 flags |= PCU_CMD_RELEASE;
  178         }
  179 
  180         /*
  181          * Normally we save for the current LWP, but sometimes we get called
  182          * with a different LWP (forking a system LWP or doing a coredump of
  183          * a process with multiple threads) and we need to deal with that.
  184          */
  185         KASSERT(l == curlwp || ((!pcu_available_p(l) ||
  186             (curlwp->l_proc == l->l_proc && l->l_stat == LSSUSPENDED)) &&
  187             pcu_valid == 0));
  188 
  189         if (__predict_true(pcu_valid == 0)) {
  190                 /* PCUs are not in use. */
  191                 return;
  192         }
  193         for (u_int id = 0; id < PCU_UNIT_COUNT; id++) {
  194                 if ((pcu_valid & (1U << id)) == 0) {
  195                         continue;
  196                 }
  197                 if (__predict_true(l->l_pcu_cpu[id] == NULL)) {
  198                         continue;
  199                 }
  200                 const pcu_ops_t * const pcu = pcu_ops_md_defs[id];
  201                 pcu_lwp_op(pcu, l, flags);
  202         }
  203 }
  204 
  205 /*
  206  * pcu_do_op: save/release PCU state on the current CPU.
  207  *
  208  * => Must be called at IPL_PCU or from the interrupt.
  209  */
  210 static inline void
  211 pcu_do_op(const pcu_ops_t *pcu, lwp_t * const l, const int flags)
  212 {
  213         struct cpu_info * const ci = curcpu();
  214         const u_int id = pcu->pcu_id;
  215 
  216         KASSERT(l->l_pcu_cpu[id] == ci);
  217 
  218         if (flags & PCU_CMD_SAVE) {
  219                 pcu->pcu_state_save(l);
  220         }
  221         if (flags & PCU_CMD_RELEASE) {
  222                 pcu->pcu_state_release(l);
  223                 ci->ci_pcu_curlwp[id] = NULL;
  224                 l->l_pcu_cpu[id] = NULL;
  225         }
  226 }
  227 
  228 /*
  229  * pcu_cpu_ipi: helper routine to call pcu_do_op() via ipi(9).
  230  */
  231 static void
  232 pcu_cpu_ipi(void *arg)
  233 {
  234         const pcu_ipi_msg_t *pcu_msg = arg;
  235         const pcu_ops_t *pcu = pcu_msg->pcu;
  236         const u_int id = pcu->pcu_id;
  237         lwp_t *l = pcu_msg->owner;
  238 
  239         KASSERT(pcu_msg->owner != NULL);
  240 
  241         if (curcpu()->ci_pcu_curlwp[id] != l) {
  242                 /*
  243                  * Different ownership: another LWP raced with us and
  244                  * perform save and release.  There is nothing to do.
  245                  */
  246                 KASSERT(l->l_pcu_cpu[id] == NULL);
  247                 return;
  248         }
  249         pcu_do_op(pcu, l, pcu_msg->flags);
  250 }
  251 
  252 /*
  253  * pcu_lwp_op: perform PCU state save, release or both operations on LWP.
  254  */
  255 static void
  256 pcu_lwp_op(const pcu_ops_t *pcu, lwp_t *l, const int flags)
  257 {
  258         const u_int id = pcu->pcu_id;
  259         struct cpu_info *ci;
  260         int s;
  261 
  262         /*
  263          * Caller should have re-checked if there is any state to manage.
  264          * Block the interrupts and inspect again, since cross-call sent
  265          * by remote CPU could have changed the state.
  266          */
  267         s = splpcu();
  268         ci = l->l_pcu_cpu[id];
  269         if (ci == curcpu()) {
  270                 /*
  271                  * State is on the current CPU - just perform the operations.
  272                  */
  273                 KASSERTMSG(ci->ci_pcu_curlwp[id] == l,
  274                     "%s: cpu%u: pcu_curlwp[%u] (%p) != l (%p)",
  275                      __func__, cpu_index(ci), id, ci->ci_pcu_curlwp[id], l);
  276                 pcu_do_op(pcu, l, flags);
  277                 splx(s);
  278                 return;
  279         }
  280         if (__predict_false(ci == NULL)) {
  281                 /* Cross-call has won the race - no state to manage. */
  282                 splx(s);
  283                 return;
  284         }
  285 
  286         /*
  287          * The state is on the remote CPU: perform the operation(s) there.
  288          */
  289         pcu_ipi_msg_t pcu_msg = { .pcu = pcu, .owner = l, .flags = flags };
  290         ipi_msg_t ipi_msg = { .func = pcu_cpu_ipi, .arg = &pcu_msg };
  291         ipi_unicast(&ipi_msg, ci);
  292         splx(s);
  293 
  294         /* Wait for completion. */
  295         ipi_wait(&ipi_msg);
  296 
  297         KASSERT((flags & PCU_CMD_RELEASE) == 0 || l->l_pcu_cpu[id] == NULL);
  298 }
  299 
  300 /*
  301  * pcu_load: load/initialize the PCU state of current LWP on current CPU.
  302  */
  303 void
  304 pcu_load(const pcu_ops_t *pcu)
  305 {
  306         lwp_t *oncpu_lwp, * const l = curlwp;
  307         const u_int id = pcu->pcu_id;
  308         struct cpu_info *ci, *curci;
  309         int s;
  310 
  311         KASSERT(!cpu_intr_p() && !cpu_softintr_p());
  312 
  313         s = splpcu();
  314         curci = curcpu();
  315         ci = l->l_pcu_cpu[id];
  316 
  317         /* Does this CPU already have our PCU state loaded? */
  318         if (ci == curci) {
  319                 /*
  320                  * Fault reoccurred while the PCU state is loaded and
  321                  * therefore PCU should be reā€enabled.  This happens
  322                  * if LWP is context switched to another CPU and then
  323                  * switched back to the original CPU while the state
  324                  * on that CPU has not been changed by other LWPs.
  325                  *
  326                  * It may also happen due to instruction "bouncing" on
  327                  * some architectures.
  328                  */
  329                 KASSERT(curci->ci_pcu_curlwp[id] == l);
  330                 KASSERT(pcu_valid_p(pcu, l));
  331                 pcu->pcu_state_load(l, PCU_VALID | PCU_REENABLE);
  332                 splx(s);
  333                 return;
  334         }
  335 
  336         /* If PCU state of this LWP is on the remote CPU - save it there. */
  337         if (ci) {
  338                 pcu_ipi_msg_t pcu_msg = { .pcu = pcu, .owner = l,
  339                     .flags = PCU_CMD_SAVE | PCU_CMD_RELEASE };
  340                 ipi_msg_t ipi_msg = { .func = pcu_cpu_ipi, .arg = &pcu_msg };
  341                 ipi_unicast(&ipi_msg, ci);
  342                 splx(s);
  343 
  344                 /*
  345                  * Wait for completion, re-enter IPL_PCU and re-fetch
  346                  * the current CPU.
  347                  */
  348                 ipi_wait(&ipi_msg);
  349                 s = splpcu();
  350                 curci = curcpu();
  351         }
  352         KASSERT(l->l_pcu_cpu[id] == NULL);
  353 
  354         /* Save the PCU state on the current CPU, if there is any. */
  355         if ((oncpu_lwp = curci->ci_pcu_curlwp[id]) != NULL) {
  356                 pcu_do_op(pcu, oncpu_lwp, PCU_CMD_SAVE | PCU_CMD_RELEASE);
  357                 KASSERT(curci->ci_pcu_curlwp[id] == NULL);
  358         }
  359 
  360         /*
  361          * Finally, load the state for this LWP on this CPU.  Indicate to
  362          * the load function whether PCU state was valid before this call.
  363          */
  364         const bool valid = ((1U << id) & l->l_pcu_valid) != 0;
  365         pcu->pcu_state_load(l, valid ? PCU_VALID : 0);
  366         curci->ci_pcu_curlwp[id] = l;
  367         l->l_pcu_cpu[id] = curci;
  368         l->l_pcu_valid |= (1U << id);
  369         splx(s);
  370 }
  371 
  372 /*
  373  * pcu_discard: discard the PCU state of the given LWP.  If "valid"
  374  * parameter is true, then keep considering the PCU state as valid.
  375  */
  376 void
  377 pcu_discard(const pcu_ops_t *pcu, lwp_t *l, bool valid)
  378 {
  379         const u_int id = pcu->pcu_id;
  380 
  381         KASSERT(!cpu_intr_p() && !cpu_softintr_p());
  382 
  383         if (__predict_false(valid)) {
  384                 l->l_pcu_valid |= (1U << id);
  385         } else {
  386                 l->l_pcu_valid &= ~(1U << id);
  387         }
  388         if (__predict_true(l->l_pcu_cpu[id] == NULL)) {
  389                 return;
  390         }
  391         pcu_lwp_op(pcu, l, PCU_CMD_RELEASE);
  392 }
  393 
  394 /*
  395  * pcu_save_lwp: save PCU state to the given LWP.
  396  */
  397 void
  398 pcu_save(const pcu_ops_t *pcu, lwp_t *l)
  399 {
  400         const u_int id = pcu->pcu_id;
  401 
  402         KASSERT(!cpu_intr_p() && !cpu_softintr_p());
  403 
  404         if (__predict_true(l->l_pcu_cpu[id] == NULL)) {
  405                 return;
  406         }
  407         pcu_lwp_op(pcu, l, PCU_CMD_SAVE | PCU_CMD_RELEASE);
  408 }
  409 
  410 /*
  411  * pcu_save_all_on_cpu: save all PCU states on the current CPU.
  412  */
  413 void
  414 pcu_save_all_on_cpu(void)
  415 {
  416         int s;
  417 
  418         s = splpcu();
  419         for (u_int id = 0; id < PCU_UNIT_COUNT; id++) {
  420                 const pcu_ops_t * const pcu = pcu_ops_md_defs[id];
  421                 lwp_t *l;
  422 
  423                 if ((l = curcpu()->ci_pcu_curlwp[id]) != NULL) {
  424                         pcu_do_op(pcu, l, PCU_CMD_SAVE | PCU_CMD_RELEASE);
  425                 }
  426         }
  427         splx(s);
  428 }
  429 
  430 /*
  431  * pcu_valid_p: return true if PCU state is considered valid.  Generally,
  432  * it always becomes "valid" when pcu_load() is called.
  433  */
  434 bool
  435 pcu_valid_p(const pcu_ops_t *pcu, const lwp_t *l)
  436 {
  437         const u_int id = pcu->pcu_id;
  438 
  439         return (l->l_pcu_valid & (1U << id)) != 0;
  440 }
  441 
  442 #endif /* PCU_UNIT_COUNT > 0 */

Cache object: 298b725e63f15e2cf10857ac851eef9d


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.