The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_cpu.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: kern_cpu.c,v 1.36.4.2 2008/11/13 00:04:07 snj Exp $    */
    2 
    3 /*-
    4  * Copyright (c) 2007, 2008 The NetBSD Foundation, Inc.
    5  * All rights reserved.
    6  *
    7  * This code is derived from software contributed to The NetBSD Foundation
    8  * by Andrew Doran.
    9  *
   10  * Redistribution and use in source and binary forms, with or without
   11  * modification, are permitted provided that the following conditions
   12  * are met:
   13  * 1. Redistributions of source code must retain the above copyright
   14  *    notice, this list of conditions and the following disclaimer.
   15  * 2. Redistributions in binary form must reproduce the above copyright
   16  *    notice, this list of conditions and the following disclaimer in the
   17  *    documentation and/or other materials provided with the distribution.
   18  *
   19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
   20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
   23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   29  * POSSIBILITY OF SUCH DAMAGE.
   30  */
   31 
   32 /*-
   33  * Copyright (c)2007 YAMAMOTO Takashi,
   34  * All rights reserved.
   35  *
   36  * Redistribution and use in source and binary forms, with or without
   37  * modification, are permitted provided that the following conditions
   38  * are met:
   39  * 1. Redistributions of source code must retain the above copyright
   40  *    notice, this list of conditions and the following disclaimer.
   41  * 2. Redistributions in binary form must reproduce the above copyright
   42  *    notice, this list of conditions and the following disclaimer in the
   43  *    documentation and/or other materials provided with the distribution.
   44  *
   45  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   46  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   47  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   48  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   49  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   50  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   51  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   52  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   53  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   54  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   55  * SUCH DAMAGE.
   56  */
   57 
   58 #include <sys/cdefs.h>
   59 __KERNEL_RCSID(0, "$NetBSD: kern_cpu.c,v 1.36.4.2 2008/11/13 00:04:07 snj Exp $");
   60 
   61 #include <sys/param.h>
   62 #include <sys/systm.h>
   63 #include <sys/idle.h>
   64 #include <sys/sched.h>
   65 #include <sys/intr.h>
   66 #include <sys/conf.h>
   67 #include <sys/cpu.h>
   68 #include <sys/cpuio.h>
   69 #include <sys/proc.h>
   70 #include <sys/percpu.h>
   71 #include <sys/kernel.h>
   72 #include <sys/kauth.h>
   73 #include <sys/xcall.h>
   74 #include <sys/pool.h>
   75 #include <sys/kmem.h>
   76 #include <sys/select.h>
   77 #include <sys/namei.h>
   78 #include <sys/callout.h>
   79 
   80 #include <uvm/uvm_extern.h>
   81 
   82 void    cpuctlattach(int);
   83 
   84 static void     cpu_xc_online(struct cpu_info *);
   85 static void     cpu_xc_offline(struct cpu_info *);
   86 
   87 dev_type_ioctl(cpuctl_ioctl);
   88 
   89 const struct cdevsw cpuctl_cdevsw = {
   90         nullopen, nullclose, nullread, nullwrite, cpuctl_ioctl,
   91         nullstop, notty, nopoll, nommap, nokqfilter,
   92         D_OTHER | D_MPSAFE
   93 };
   94 
   95 kmutex_t cpu_lock;
   96 int     ncpu;
   97 int     ncpuonline;
   98 bool    mp_online;
   99 struct  cpuqueue cpu_queue = CIRCLEQ_HEAD_INITIALIZER(cpu_queue);
  100 
  101 static struct cpu_info *cpu_infos[MAXCPUS];
  102 
  103 int
  104 mi_cpu_attach(struct cpu_info *ci)
  105 {
  106         int error;
  107 
  108         ci->ci_index = ncpu;
  109         cpu_infos[cpu_index(ci)] = ci;
  110         CIRCLEQ_INSERT_TAIL(&cpu_queue, ci, ci_data.cpu_qchain);
  111         TAILQ_INIT(&ci->ci_data.cpu_ld_locks);
  112         __cpu_simple_lock_init(&ci->ci_data.cpu_ld_lock);
  113 
  114         sched_cpuattach(ci);
  115 
  116         error = create_idle_lwp(ci);
  117         if (error != 0) {
  118                 /* XXX revert sched_cpuattach */
  119                 return error;
  120         }
  121 
  122         if (ci == curcpu())
  123                 ci->ci_data.cpu_onproc = curlwp;
  124         else
  125                 ci->ci_data.cpu_onproc = ci->ci_data.cpu_idlelwp;
  126 
  127         percpu_init_cpu(ci);
  128         softint_init(ci);
  129         callout_init_cpu(ci);
  130         xc_init_cpu(ci);
  131         pool_cache_cpu_init(ci);
  132         selsysinit(ci);
  133         cache_cpu_init(ci);
  134         TAILQ_INIT(&ci->ci_data.cpu_biodone);
  135         ncpu++;
  136         ncpuonline++;
  137 
  138         return 0;
  139 }
  140 
  141 void
  142 cpuctlattach(int dummy)
  143 {
  144 
  145 }
  146 
  147 int
  148 cpuctl_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
  149 {
  150         CPU_INFO_ITERATOR cii;
  151         cpustate_t *cs;
  152         struct cpu_info *ci;
  153         int error, i;
  154         u_int id;
  155 
  156         error = 0;
  157 
  158         mutex_enter(&cpu_lock);
  159         switch (cmd) {
  160         case IOC_CPU_SETSTATE:
  161                 cs = data;
  162                 error = kauth_authorize_system(l->l_cred,
  163                     KAUTH_SYSTEM_CPU, KAUTH_REQ_SYSTEM_CPU_SETSTATE, cs, NULL,
  164                     NULL);
  165                 if (error != 0)
  166                         break;
  167                 if (cs->cs_id >= __arraycount(cpu_infos) ||
  168                     (ci = cpu_lookup(cs->cs_id)) == NULL) {
  169                         error = ESRCH;
  170                         break;
  171                 }
  172                 if (!cs->cs_intr) {
  173                         error = EOPNOTSUPP;
  174                         break;
  175                 }
  176                 error = cpu_setstate(ci, cs->cs_online);
  177                 break;
  178 
  179         case IOC_CPU_GETSTATE:
  180                 cs = data;
  181                 id = cs->cs_id;
  182                 memset(cs, 0, sizeof(*cs));
  183                 cs->cs_id = id;
  184                 if (cs->cs_id >= __arraycount(cpu_infos) ||
  185                     (ci = cpu_lookup(id)) == NULL) {
  186                         error = ESRCH;
  187                         break;
  188                 }
  189                 if ((ci->ci_schedstate.spc_flags & SPCF_OFFLINE) != 0)
  190                         cs->cs_online = false;
  191                 else
  192                         cs->cs_online = true;
  193                 cs->cs_intr = true;
  194                 cs->cs_lastmod = ci->ci_schedstate.spc_lastmod;
  195                 break;
  196 
  197         case IOC_CPU_MAPID:
  198                 i = 0;
  199                 for (CPU_INFO_FOREACH(cii, ci)) {
  200                         if (i++ == *(int *)data)
  201                                 break;
  202                 }
  203                 if (ci == NULL)
  204                         error = ESRCH;
  205                 else
  206                         *(int *)data = cpu_index(ci);
  207                 break;
  208 
  209         case IOC_CPU_GETCOUNT:
  210                 *(int *)data = ncpu;
  211                 break;
  212 
  213         default:
  214                 error = ENOTTY;
  215                 break;
  216         }
  217         mutex_exit(&cpu_lock);
  218 
  219         return error;
  220 }
  221 
  222 struct cpu_info *
  223 cpu_lookup(u_int idx)
  224 {
  225         struct cpu_info *ci = cpu_infos[idx];
  226 
  227         KASSERT(idx < __arraycount(cpu_infos));
  228         KASSERT(ci == NULL || cpu_index(ci) == idx);
  229 
  230         return ci;
  231 }
  232 
  233 static void
  234 cpu_xc_offline(struct cpu_info *ci)
  235 {
  236         struct schedstate_percpu *spc, *mspc = NULL;
  237         struct cpu_info *target_ci;
  238         struct lwp *l;
  239         CPU_INFO_ITERATOR cii;
  240         int s;
  241 
  242         /*
  243          * Thread which sent unicast (separate context) is holding
  244          * the cpu_lock for us.
  245          */
  246         spc = &ci->ci_schedstate;
  247         s = splsched();
  248         spc->spc_flags |= SPCF_OFFLINE;
  249         splx(s);
  250 
  251         /* Take the first available CPU for the migration */
  252         for (CPU_INFO_FOREACH(cii, target_ci)) {
  253                 mspc = &target_ci->ci_schedstate;
  254                 if ((mspc->spc_flags & SPCF_OFFLINE) == 0)
  255                         break;
  256         }
  257         KASSERT(target_ci != NULL);
  258 
  259         /*
  260          * Migrate all non-bound threads to the other CPU.  Note that this
  261          * runs from the xcall thread, thus handling of LSONPROC is not needed.
  262          */
  263         mutex_enter(proc_lock);
  264         LIST_FOREACH(l, &alllwp, l_list) {
  265                 struct cpu_info *mci;
  266 
  267                 lwp_lock(l);
  268                 if (l->l_cpu != ci || (l->l_pflag & (LP_BOUND | LP_INTR))) {
  269                         lwp_unlock(l);
  270                         continue;
  271                 }
  272                 /* Normal case - no affinity */
  273                 if ((l->l_flag & LW_AFFINITY) == 0) {
  274                         lwp_migrate(l, target_ci);
  275                         continue;
  276                 }
  277                 /* Affinity is set, find an online CPU in the set */
  278                 KASSERT(l->l_affinity != NULL);
  279                 for (CPU_INFO_FOREACH(cii, mci)) {
  280                         mspc = &mci->ci_schedstate;
  281                         if ((mspc->spc_flags & SPCF_OFFLINE) == 0 &&
  282                             kcpuset_isset(cpu_index(mci), l->l_affinity))
  283                                 break;
  284                 }
  285                 if (mci == NULL) {
  286                         lwp_unlock(l);
  287                         mutex_exit(proc_lock);
  288                         goto fail;
  289                 }
  290                 lwp_migrate(l, mci);
  291         }
  292         mutex_exit(proc_lock);
  293 
  294 #ifdef __HAVE_MD_CPU_OFFLINE
  295         cpu_offline_md();
  296 #endif
  297         return;
  298 fail:
  299         /* Just unset the SPCF_OFFLINE flag, caller will check */
  300         s = splsched();
  301         spc->spc_flags &= ~SPCF_OFFLINE;
  302         splx(s);
  303 }
  304 
  305 static void
  306 cpu_xc_online(struct cpu_info *ci)
  307 {
  308         struct schedstate_percpu *spc;
  309         int s;
  310 
  311         spc = &ci->ci_schedstate;
  312         s = splsched();
  313         spc->spc_flags &= ~SPCF_OFFLINE;
  314         splx(s);
  315 }
  316 
  317 int
  318 cpu_setstate(struct cpu_info *ci, bool online)
  319 {
  320         struct schedstate_percpu *spc;
  321         CPU_INFO_ITERATOR cii;
  322         struct cpu_info *ci2;
  323         uint64_t where;
  324         xcfunc_t func;
  325         int nonline;
  326 
  327         spc = &ci->ci_schedstate;
  328 
  329         KASSERT(mutex_owned(&cpu_lock));
  330 
  331         if (online) {
  332                 if ((spc->spc_flags & SPCF_OFFLINE) == 0)
  333                         return 0;
  334                 func = (xcfunc_t)cpu_xc_online;
  335                 ncpuonline++;
  336         } else {
  337                 if ((spc->spc_flags & SPCF_OFFLINE) != 0)
  338                         return 0;
  339                 nonline = 0;
  340                 /*
  341                  * Ensure that at least one CPU within the processor set
  342                  * stays online.  Revisit this later.
  343                  */
  344                 for (CPU_INFO_FOREACH(cii, ci2)) {
  345                         if ((ci2->ci_schedstate.spc_flags & SPCF_OFFLINE) != 0)
  346                                 continue;
  347                         if (ci2->ci_schedstate.spc_psid != spc->spc_psid)
  348                                 continue;
  349                         nonline++;
  350                 }
  351                 if (nonline == 1)
  352                         return EBUSY;
  353                 func = (xcfunc_t)cpu_xc_offline;
  354                 ncpuonline--;
  355         }
  356 
  357         where = xc_unicast(0, func, ci, NULL, ci);
  358         xc_wait(where);
  359         if (online) {
  360                 KASSERT((spc->spc_flags & SPCF_OFFLINE) == 0);
  361         } else if ((spc->spc_flags & SPCF_OFFLINE) == 0) {
  362                 /* If was not set offline, then it is busy */
  363                 return EBUSY;
  364         }
  365 
  366         spc->spc_lastmod = time_second;
  367         return 0;
  368 }

Cache object: b7510743f9a8e8f156ffe1ef3f0f1124


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.