The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_cpu.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: kern_cpu.c,v 1.93 2020/10/08 09:16:13 rin Exp $        */
    2 
    3 /*-
    4  * Copyright (c) 2007, 2008, 2009, 2010, 2012, 2019 The NetBSD Foundation, Inc.
    5  * All rights reserved.
    6  *
    7  * This code is derived from software contributed to The NetBSD Foundation
    8  * by Andrew Doran.
    9  *
   10  * Redistribution and use in source and binary forms, with or without
   11  * modification, are permitted provided that the following conditions
   12  * are met:
   13  * 1. Redistributions of source code must retain the above copyright
   14  *    notice, this list of conditions and the following disclaimer.
   15  * 2. Redistributions in binary form must reproduce the above copyright
   16  *    notice, this list of conditions and the following disclaimer in the
   17  *    documentation and/or other materials provided with the distribution.
   18  *
   19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
   20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
   23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   29  * POSSIBILITY OF SUCH DAMAGE.
   30  */
   31 
   32 /*-
   33  * Copyright (c)2007 YAMAMOTO Takashi,
   34  * All rights reserved.
   35  *
   36  * Redistribution and use in source and binary forms, with or without
   37  * modification, are permitted provided that the following conditions
   38  * are met:
   39  * 1. Redistributions of source code must retain the above copyright
   40  *    notice, this list of conditions and the following disclaimer.
   41  * 2. Redistributions in binary form must reproduce the above copyright
   42  *    notice, this list of conditions and the following disclaimer in the
   43  *    documentation and/or other materials provided with the distribution.
   44  *
   45  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   46  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   47  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   48  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   49  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   50  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   51  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   52  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   53  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   54  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   55  * SUCH DAMAGE.
   56  */
   57 
   58 /*
   59  * CPU related routines not shared with rump.
   60  */
   61 
   62 #include <sys/cdefs.h>
   63 __KERNEL_RCSID(0, "$NetBSD: kern_cpu.c,v 1.93 2020/10/08 09:16:13 rin Exp $");
   64 
   65 #ifdef _KERNEL_OPT
   66 #include "opt_cpu_ucode.h"
   67 #endif
   68 
   69 #include <sys/param.h>
   70 #include <sys/systm.h>
   71 #include <sys/idle.h>
   72 #include <sys/sched.h>
   73 #include <sys/intr.h>
   74 #include <sys/conf.h>
   75 #include <sys/cpu.h>
   76 #include <sys/cpuio.h>
   77 #include <sys/proc.h>
   78 #include <sys/percpu.h>
   79 #include <sys/kernel.h>
   80 #include <sys/kauth.h>
   81 #include <sys/xcall.h>
   82 #include <sys/pool.h>
   83 #include <sys/kmem.h>
   84 #include <sys/select.h>
   85 #include <sys/namei.h>
   86 #include <sys/callout.h>
   87 #include <sys/pcu.h>
   88 
   89 #include <uvm/uvm_extern.h>
   90 
   91 #include "ioconf.h"
   92 
   93 /*
   94  * If the port has stated that cpu_data is the first thing in cpu_info,
   95  * verify that the claim is true. This will prevent them from getting out
   96  * of sync.
   97  */
   98 #ifdef __HAVE_CPU_DATA_FIRST
   99 CTASSERT(offsetof(struct cpu_info, ci_data) == 0);
  100 #else
  101 CTASSERT(offsetof(struct cpu_info, ci_data) != 0);
  102 #endif
  103 
  104 int (*compat_cpuctl_ioctl)(struct lwp *, u_long, void *) = (void *)enosys;
  105 
  106 static void     cpu_xc_online(struct cpu_info *, void *);
  107 static void     cpu_xc_offline(struct cpu_info *, void *);
  108 
  109 dev_type_ioctl(cpuctl_ioctl);
  110 
  111 const struct cdevsw cpuctl_cdevsw = {
  112         .d_open = nullopen,
  113         .d_close = nullclose,
  114         .d_read = nullread,
  115         .d_write = nullwrite,
  116         .d_ioctl = cpuctl_ioctl,
  117         .d_stop = nullstop,
  118         .d_tty = notty,
  119         .d_poll = nopoll,
  120         .d_mmap = nommap,
  121         .d_kqfilter = nokqfilter,
  122         .d_discard = nodiscard,
  123         .d_flag = D_OTHER | D_MPSAFE
  124 };
  125 
  126 int
  127 mi_cpu_attach(struct cpu_info *ci)
  128 {
  129         int error;
  130 
  131         KASSERT(maxcpus > 0);
  132 
  133         if ((ci->ci_index = ncpu) >= maxcpus)
  134                 panic("Too many CPUs.  Increase MAXCPUS?");
  135         kcpuset_set(kcpuset_attached, cpu_index(ci));
  136 
  137         /*
  138          * Create a convenience cpuset of just ourselves.
  139          */
  140         kcpuset_create(&ci->ci_data.cpu_kcpuset, true);
  141         kcpuset_set(ci->ci_data.cpu_kcpuset, cpu_index(ci));
  142 
  143         TAILQ_INIT(&ci->ci_data.cpu_ld_locks);
  144         __cpu_simple_lock_init(&ci->ci_data.cpu_ld_lock);
  145 
  146         /* This is useful for eg, per-cpu evcnt */
  147         snprintf(ci->ci_data.cpu_name, sizeof(ci->ci_data.cpu_name), "cpu%d",
  148             cpu_index(ci));
  149 
  150         if (__predict_false(cpu_infos == NULL)) {
  151                 size_t ci_bufsize = (maxcpus + 1) * sizeof(struct cpu_info *);
  152                 cpu_infos = kmem_zalloc(ci_bufsize, KM_SLEEP);
  153         }
  154         cpu_infos[cpu_index(ci)] = ci;
  155 
  156         sched_cpuattach(ci);
  157 
  158         error = create_idle_lwp(ci);
  159         if (error != 0) {
  160                 /* XXX revert sched_cpuattach */
  161                 return error;
  162         }
  163 
  164         if (ci == curcpu())
  165                 ci->ci_onproc = curlwp;
  166         else
  167                 ci->ci_onproc = ci->ci_data.cpu_idlelwp;
  168 
  169         percpu_init_cpu(ci);
  170         softint_init(ci);
  171         callout_init_cpu(ci);
  172         xc_init_cpu(ci);
  173         pool_cache_cpu_init(ci);
  174         selsysinit(ci);
  175         cache_cpu_init(ci);
  176         TAILQ_INIT(&ci->ci_data.cpu_biodone);
  177         ncpu++;
  178         ncpuonline++;
  179 
  180         return 0;
  181 }
  182 
  183 void
  184 cpuctlattach(int dummy __unused)
  185 {
  186 
  187         KASSERT(cpu_infos != NULL);
  188 }
  189 
  190 int
  191 cpuctl_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
  192 {
  193         CPU_INFO_ITERATOR cii;
  194         cpustate_t *cs;
  195         struct cpu_info *ci;
  196         int error, i;
  197         u_int id;
  198 
  199         error = 0;
  200 
  201         mutex_enter(&cpu_lock);
  202         switch (cmd) {
  203         case IOC_CPU_SETSTATE:
  204                 cs = data;
  205                 error = kauth_authorize_system(l->l_cred,
  206                     KAUTH_SYSTEM_CPU, KAUTH_REQ_SYSTEM_CPU_SETSTATE, cs, NULL,
  207                     NULL);
  208                 if (error != 0)
  209                         break;
  210                 if (cs->cs_id >= maxcpus ||
  211                     (ci = cpu_lookup(cs->cs_id)) == NULL) {
  212                         error = ESRCH;
  213                         break;
  214                 }
  215                 cpu_setintr(ci, cs->cs_intr);   /* XXX neglect errors */
  216                 error = cpu_setstate(ci, cs->cs_online);
  217                 break;
  218 
  219         case IOC_CPU_GETSTATE:
  220                 cs = data;
  221                 id = cs->cs_id;
  222                 memset(cs, 0, sizeof(*cs));
  223                 cs->cs_id = id;
  224                 if (cs->cs_id >= maxcpus ||
  225                     (ci = cpu_lookup(id)) == NULL) {
  226                         error = ESRCH;
  227                         break;
  228                 }
  229                 if ((ci->ci_schedstate.spc_flags & SPCF_OFFLINE) != 0)
  230                         cs->cs_online = false;
  231                 else
  232                         cs->cs_online = true;
  233                 if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) != 0)
  234                         cs->cs_intr = false;
  235                 else
  236                         cs->cs_intr = true;
  237                 cs->cs_lastmod = (int32_t)ci->ci_schedstate.spc_lastmod;
  238                 cs->cs_lastmodhi = (int32_t)
  239                     (ci->ci_schedstate.spc_lastmod >> 32);
  240                 cs->cs_intrcnt = cpu_intr_count(ci) + 1;
  241                 cs->cs_hwid = ci->ci_cpuid;
  242                 break;
  243 
  244         case IOC_CPU_MAPID:
  245                 i = 0;
  246                 for (CPU_INFO_FOREACH(cii, ci)) {
  247                         if (i++ == *(int *)data)
  248                                 break;
  249                 }
  250                 if (ci == NULL)
  251                         error = ESRCH;
  252                 else
  253                         *(int *)data = cpu_index(ci);
  254                 break;
  255 
  256         case IOC_CPU_GETCOUNT:
  257                 *(int *)data = ncpu;
  258                 break;
  259 
  260 #ifdef CPU_UCODE
  261         case IOC_CPU_UCODE_GET_VERSION:
  262                 error = cpu_ucode_get_version((struct cpu_ucode_version *)data);
  263                 break;
  264 
  265         case IOC_CPU_UCODE_APPLY:
  266                 error = kauth_authorize_machdep(l->l_cred,
  267                     KAUTH_MACHDEP_CPU_UCODE_APPLY,
  268                     NULL, NULL, NULL, NULL);
  269                 if (error != 0)
  270                         break;
  271                 error = cpu_ucode_apply((const struct cpu_ucode *)data);
  272                 break;
  273 #endif
  274 
  275         default:
  276                 error = (*compat_cpuctl_ioctl)(l, cmd, data);
  277                 break;
  278         }
  279         mutex_exit(&cpu_lock);
  280 
  281         return error;
  282 }
  283 
  284 struct cpu_info *
  285 cpu_lookup(u_int idx)
  286 {
  287         struct cpu_info *ci;
  288 
  289         /*
  290          * cpu_infos is a NULL terminated array of MAXCPUS + 1 entries,
  291          * so an index of MAXCPUS here is ok.  See mi_cpu_attach.
  292          */
  293         KASSERT(idx <= maxcpus);
  294 
  295         if (__predict_false(cpu_infos == NULL)) {
  296                 KASSERT(idx == 0);
  297                 return curcpu();
  298         }
  299 
  300         ci = cpu_infos[idx];
  301         KASSERT(ci == NULL || cpu_index(ci) == idx);
  302         KASSERTMSG(idx < maxcpus || ci == NULL, "idx %d ci %p", idx, ci);
  303 
  304         return ci;
  305 }
  306 
  307 static void
  308 cpu_xc_offline(struct cpu_info *ci, void *unused)
  309 {
  310         struct schedstate_percpu *spc, *mspc = NULL;
  311         struct cpu_info *target_ci;
  312         struct lwp *l;
  313         CPU_INFO_ITERATOR cii;
  314         int s;
  315 
  316         /*
  317          * Thread that made the cross call (separate context) holds
  318          * cpu_lock on our behalf.
  319          */
  320         spc = &ci->ci_schedstate;
  321         s = splsched();
  322         spc->spc_flags |= SPCF_OFFLINE;
  323         splx(s);
  324 
  325         /* Take the first available CPU for the migration. */
  326         for (CPU_INFO_FOREACH(cii, target_ci)) {
  327                 mspc = &target_ci->ci_schedstate;
  328                 if ((mspc->spc_flags & SPCF_OFFLINE) == 0)
  329                         break;
  330         }
  331         KASSERT(target_ci != NULL);
  332 
  333         /*
  334          * Migrate all non-bound threads to the other CPU.  Note that this
  335          * runs from the xcall thread, thus handling of LSONPROC is not needed.
  336          */
  337         mutex_enter(&proc_lock);
  338         LIST_FOREACH(l, &alllwp, l_list) {
  339                 struct cpu_info *mci;
  340 
  341                 lwp_lock(l);
  342                 if (l->l_cpu != ci || (l->l_pflag & (LP_BOUND | LP_INTR))) {
  343                         lwp_unlock(l);
  344                         continue;
  345                 }
  346                 /* Regular case - no affinity. */
  347                 if (l->l_affinity == NULL) {
  348                         lwp_migrate(l, target_ci);
  349                         continue;
  350                 }
  351                 /* Affinity is set, find an online CPU in the set. */
  352                 for (CPU_INFO_FOREACH(cii, mci)) {
  353                         mspc = &mci->ci_schedstate;
  354                         if ((mspc->spc_flags & SPCF_OFFLINE) == 0 &&
  355                             kcpuset_isset(l->l_affinity, cpu_index(mci)))
  356                                 break;
  357                 }
  358                 if (mci == NULL) {
  359                         lwp_unlock(l);
  360                         mutex_exit(&proc_lock);
  361                         goto fail;
  362                 }
  363                 lwp_migrate(l, mci);
  364         }
  365         mutex_exit(&proc_lock);
  366 
  367 #if PCU_UNIT_COUNT > 0
  368         pcu_save_all_on_cpu();
  369 #endif
  370 
  371 #ifdef __HAVE_MD_CPU_OFFLINE
  372         cpu_offline_md();
  373 #endif
  374         return;
  375 fail:
  376         /* Just unset the SPCF_OFFLINE flag, caller will check */
  377         s = splsched();
  378         spc->spc_flags &= ~SPCF_OFFLINE;
  379         splx(s);
  380 }
  381 
  382 static void
  383 cpu_xc_online(struct cpu_info *ci, void *unused)
  384 {
  385         struct schedstate_percpu *spc;
  386         int s;
  387 
  388         spc = &ci->ci_schedstate;
  389         s = splsched();
  390         spc->spc_flags &= ~SPCF_OFFLINE;
  391         splx(s);
  392 }
  393 
  394 int
  395 cpu_setstate(struct cpu_info *ci, bool online)
  396 {
  397         struct schedstate_percpu *spc;
  398         CPU_INFO_ITERATOR cii;
  399         struct cpu_info *ci2;
  400         uint64_t where;
  401         xcfunc_t func;
  402         int nonline;
  403 
  404         spc = &ci->ci_schedstate;
  405 
  406         KASSERT(mutex_owned(&cpu_lock));
  407 
  408         if (online) {
  409                 if ((spc->spc_flags & SPCF_OFFLINE) == 0)
  410                         return 0;
  411                 func = (xcfunc_t)cpu_xc_online;
  412         } else {
  413                 if ((spc->spc_flags & SPCF_OFFLINE) != 0)
  414                         return 0;
  415                 nonline = 0;
  416                 /*
  417                  * Ensure that at least one CPU within the processor set
  418                  * stays online.  Revisit this later.
  419                  */
  420                 for (CPU_INFO_FOREACH(cii, ci2)) {
  421                         if ((ci2->ci_schedstate.spc_flags & SPCF_OFFLINE) != 0)
  422                                 continue;
  423                         if (ci2->ci_schedstate.spc_psid != spc->spc_psid)
  424                                 continue;
  425                         nonline++;
  426                 }
  427                 if (nonline == 1)
  428                         return EBUSY;
  429                 func = (xcfunc_t)cpu_xc_offline;
  430         }
  431 
  432         where = xc_unicast(0, func, ci, NULL, ci);
  433         xc_wait(where);
  434         if (online) {
  435                 KASSERT((spc->spc_flags & SPCF_OFFLINE) == 0);
  436                 ncpuonline++;
  437         } else {
  438                 if ((spc->spc_flags & SPCF_OFFLINE) == 0) {
  439                         /* If was not set offline, then it is busy */
  440                         return EBUSY;
  441                 }
  442                 ncpuonline--;
  443         }
  444 
  445         spc->spc_lastmod = time_second;
  446         return 0;
  447 }
  448 
  449 #if defined(__HAVE_INTR_CONTROL)
  450 static void
  451 cpu_xc_intr(struct cpu_info *ci, void *unused)
  452 {
  453         struct schedstate_percpu *spc;
  454         int s;
  455 
  456         spc = &ci->ci_schedstate;
  457         s = splsched();
  458         spc->spc_flags &= ~SPCF_NOINTR;
  459         splx(s);
  460 }
  461 
  462 static void
  463 cpu_xc_nointr(struct cpu_info *ci, void *unused)
  464 {
  465         struct schedstate_percpu *spc;
  466         int s;
  467 
  468         spc = &ci->ci_schedstate;
  469         s = splsched();
  470         spc->spc_flags |= SPCF_NOINTR;
  471         splx(s);
  472 }
  473 
  474 int
  475 cpu_setintr(struct cpu_info *ci, bool intr)
  476 {
  477         struct schedstate_percpu *spc;
  478         CPU_INFO_ITERATOR cii;
  479         struct cpu_info *ci2;
  480         uint64_t where;
  481         xcfunc_t func;
  482         int nintr;
  483 
  484         spc = &ci->ci_schedstate;
  485 
  486         KASSERT(mutex_owned(&cpu_lock));
  487 
  488         if (intr) {
  489                 if ((spc->spc_flags & SPCF_NOINTR) == 0)
  490                         return 0;
  491                 func = (xcfunc_t)cpu_xc_intr;
  492         } else {
  493                 if (CPU_IS_PRIMARY(ci)) /* XXX kern/45117 */
  494                         return EINVAL;
  495                 if ((spc->spc_flags & SPCF_NOINTR) != 0)
  496                         return 0;
  497                 /*
  498                  * Ensure that at least one CPU within the system
  499                  * is handing device interrupts.
  500                  */
  501                 nintr = 0;
  502                 for (CPU_INFO_FOREACH(cii, ci2)) {
  503                         if ((ci2->ci_schedstate.spc_flags & SPCF_NOINTR) != 0)
  504                                 continue;
  505                         if (ci2 == ci)
  506                                 continue;
  507                         nintr++;
  508                 }
  509                 if (nintr == 0)
  510                         return EBUSY;
  511                 func = (xcfunc_t)cpu_xc_nointr;
  512         }
  513 
  514         where = xc_unicast(0, func, ci, NULL, ci);
  515         xc_wait(where);
  516         if (intr) {
  517                 KASSERT((spc->spc_flags & SPCF_NOINTR) == 0);
  518         } else if ((spc->spc_flags & SPCF_NOINTR) == 0) {
  519                 /* If was not set offline, then it is busy */
  520                 return EBUSY;
  521         }
  522 
  523         /* Direct interrupts away from the CPU and record the change. */
  524         cpu_intr_redistribute();
  525         spc->spc_lastmod = time_second;
  526         return 0;
  527 }
  528 #else   /* __HAVE_INTR_CONTROL */
  529 int
  530 cpu_setintr(struct cpu_info *ci, bool intr)
  531 {
  532 
  533         return EOPNOTSUPP;
  534 }
  535 
  536 u_int
  537 cpu_intr_count(struct cpu_info *ci)
  538 {
  539 
  540         return 0;       /* 0 == "don't know" */
  541 }
  542 #endif  /* __HAVE_INTR_CONTROL */
  543 
  544 #ifdef CPU_UCODE
  545 int
  546 cpu_ucode_load(struct cpu_ucode_softc *sc, const char *fwname)
  547 {
  548         firmware_handle_t fwh;
  549         int error;
  550 
  551         if (sc->sc_blob != NULL) {
  552                 firmware_free(sc->sc_blob, sc->sc_blobsize);
  553                 sc->sc_blob = NULL;
  554                 sc->sc_blobsize = 0;
  555         }
  556 
  557         error = cpu_ucode_md_open(&fwh, sc->loader_version, fwname);
  558         if (error != 0) {
  559 #ifdef DEBUG
  560                 printf("ucode: firmware_open(%s) failed: %i\n", fwname, error);
  561 #endif
  562                 goto err0;
  563         }
  564 
  565         sc->sc_blobsize = firmware_get_size(fwh);
  566         if (sc->sc_blobsize == 0) {
  567                 error = EFTYPE;
  568                 firmware_close(fwh);
  569                 goto err0;
  570         }
  571         sc->sc_blob = firmware_malloc(sc->sc_blobsize);
  572         if (sc->sc_blob == NULL) {
  573                 error = ENOMEM;
  574                 firmware_close(fwh);
  575                 goto err0;
  576         }
  577 
  578         error = firmware_read(fwh, 0, sc->sc_blob, sc->sc_blobsize);
  579         firmware_close(fwh);
  580         if (error != 0)
  581                 goto err1;
  582 
  583         return 0;
  584 
  585 err1:
  586         firmware_free(sc->sc_blob, sc->sc_blobsize);
  587         sc->sc_blob = NULL;
  588         sc->sc_blobsize = 0;
  589 err0:
  590         return error;
  591 }
  592 #endif

Cache object: 8b8e4aea356d8c1a1acd01b5d8fd88d2


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.