The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/amd64/amd64/mp_machdep.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1996, by Steve Passe
    3  * Copyright (c) 2003, by Peter Wemm
    4  * All rights reserved.
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice, this list of conditions and the following disclaimer.
   11  * 2. The name of the developer may NOT be used to endorse or promote products
   12  *    derived from this software without specific prior written permission.
   13  *
   14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24  * SUCH DAMAGE.
   25  */
   26 
   27 #include <sys/cdefs.h>
   28 __FBSDID("$FreeBSD: releng/5.3/sys/amd64/amd64/mp_machdep.c 146167 2005-05-13 00:02:47Z nectar $");
   29 
   30 #include "opt_cpu.h"
   31 #include "opt_kstack_pages.h"
   32 
   33 #include <sys/param.h>
   34 #include <sys/systm.h>
   35 #include <sys/bus.h>
   36 #ifdef GPROF 
   37 #include <sys/gmon.h>
   38 #endif
   39 #include <sys/kernel.h>
   40 #include <sys/ktr.h>
   41 #include <sys/lock.h>
   42 #include <sys/malloc.h>
   43 #include <sys/memrange.h>
   44 #include <sys/mutex.h>
   45 #include <sys/pcpu.h>
   46 #include <sys/proc.h>
   47 #include <sys/smp.h>
   48 #include <sys/sysctl.h>
   49 
   50 #include <vm/vm.h>
   51 #include <vm/vm_param.h>
   52 #include <vm/pmap.h>
   53 #include <vm/vm_kern.h>
   54 #include <vm/vm_extern.h>
   55 
   56 #include <machine/apicreg.h>
   57 #include <machine/clock.h>
   58 #include <machine/md_var.h>
   59 #include <machine/pcb.h>
   60 #include <machine/psl.h>
   61 #include <machine/smp.h>
   62 #include <machine/specialreg.h>
   63 #include <machine/tss.h>
   64 
   65 #define WARMBOOT_TARGET         0
   66 #define WARMBOOT_OFF            (KERNBASE + 0x0467)
   67 #define WARMBOOT_SEG            (KERNBASE + 0x0469)
   68 
   69 #define CMOS_REG                (0x70)
   70 #define CMOS_DATA               (0x71)
   71 #define BIOS_RESET              (0x0f)
   72 #define BIOS_WARM               (0x0a)
   73 
   74 /* lock region used by kernel profiling */
   75 int     mcount_lock;
   76 
   77 int     mp_naps;                /* # of Applications processors */
   78 int     boot_cpu_id = -1;       /* designated BSP */
   79 extern  int nkpt;
   80 
   81 /*
   82  * CPU topology map datastructures for HTT.
   83  */
   84 static struct cpu_group mp_groups[MAXCPU];
   85 static struct cpu_top mp_top;
   86 
   87 /* AP uses this during bootstrap.  Do not staticize.  */
   88 char *bootSTK;
   89 static int bootAP;
   90 
   91 /* Free these after use */
   92 void *bootstacks[MAXCPU];
   93 
   94 /* Hotwire a 0->4MB V==P mapping */
   95 extern pt_entry_t *KPTphys;
   96 
   97 /* SMP page table page */
   98 extern pt_entry_t *SMPpt;
   99 
  100 struct pcb stoppcbs[MAXCPU];
  101 
  102 /* Variables needed for SMP tlb shootdown. */
  103 vm_offset_t smp_tlb_addr1;
  104 vm_offset_t smp_tlb_addr2;
  105 volatile int smp_tlb_wait;
  106 
  107 extern inthand_t IDTVEC(fast_syscall), IDTVEC(fast_syscall32);
  108 
  109 /*
  110  * Local data and functions.
  111  */
  112 
  113 static u_int logical_cpus;
  114 
  115 /* used to hold the AP's until we are ready to release them */
  116 static struct mtx ap_boot_mtx;
  117 
  118 /* Set to 1 once we're ready to let the APs out of the pen. */
  119 static volatile int aps_ready = 0;
  120 
  121 /*
  122  * Store data from cpu_add() until later in the boot when we actually setup
  123  * the APs.
  124  */
  125 struct cpu_info {
  126         int     cpu_present:1;
  127         int     cpu_bsp:1;
  128 } static cpu_info[MAXCPU];
  129 static int cpu_apic_ids[MAXCPU];
  130 
  131 static u_int boot_address;
  132 
  133 static void     set_logical_apic_ids(void);
  134 static int      start_all_aps(void);
  135 static int      start_ap(int apic_id);
  136 static void     release_aps(void *dummy);
  137 
  138 static int      hlt_logical_cpus;
  139 static u_int    hyperthreading_cpus;
  140 static cpumask_t        hyperthreading_cpus_mask;
  141 static int      hyperthreading_allowed;
  142 static struct   sysctl_ctx_list logical_cpu_clist;
  143 static u_int    bootMP_size;
  144 
  145 static void
  146 mem_range_AP_init(void)
  147 {
  148         if (mem_range_softc.mr_op && mem_range_softc.mr_op->initAP)
  149                 mem_range_softc.mr_op->initAP(&mem_range_softc);
  150 }
  151 
  152 void
  153 mp_topology(void)
  154 {
  155         struct cpu_group *group;
  156         int logical_cpus;
  157         int apic_id;
  158         int groups;
  159         int cpu;
  160 
  161         /* Build the smp_topology map. */
  162         /* Nothing to do if there is no HTT support. */
  163         if ((cpu_feature & CPUID_HTT) == 0)
  164                 return;
  165         logical_cpus = (cpu_procinfo & CPUID_HTT_CORES) >> 16;
  166         if (logical_cpus <= 1)
  167                 return;
  168         group = &mp_groups[0];
  169         groups = 1;
  170         for (cpu = 0, apic_id = 0; apic_id < MAXCPU; apic_id++) {
  171                 if (!cpu_info[apic_id].cpu_present)
  172                         continue;
  173                 /*
  174                  * If the current group has members and we're not a logical
  175                  * cpu, create a new group.
  176                  */
  177                 if (group->cg_count != 0 && (apic_id % logical_cpus) == 0) {
  178                         group++;
  179                         groups++;
  180                 }
  181                 group->cg_count++;
  182                 group->cg_mask |= 1 << cpu;
  183                 cpu++;
  184         }
  185 
  186         mp_top.ct_count = groups;
  187         mp_top.ct_group = mp_groups;
  188         smp_topology = &mp_top;
  189 }
  190 
  191 
  192 /*
  193  * Calculate usable address in base memory for AP trampoline code.
  194  */
  195 u_int
  196 mp_bootaddress(u_int basemem)
  197 {
  198 
  199         bootMP_size = mptramp_end - mptramp_start;
  200         boot_address = trunc_page(basemem * 1024); /* round down to 4k boundary */
  201         if (((basemem * 1024) - boot_address) < bootMP_size)
  202                 boot_address -= PAGE_SIZE;      /* not enough, lower by 4k */
  203         /* 3 levels of page table pages */
  204         mptramp_pagetables = boot_address - (PAGE_SIZE * 3);
  205 
  206         return mptramp_pagetables;
  207 }
  208 
  209 void
  210 cpu_add(u_int apic_id, char boot_cpu)
  211 {
  212 
  213         if (apic_id >= MAXCPU) {
  214                 printf("SMP: CPU %d exceeds maximum CPU %d, ignoring\n",
  215                     apic_id, MAXCPU - 1);
  216                 return;
  217         }
  218         KASSERT(cpu_info[apic_id].cpu_present == 0, ("CPU %d added twice",
  219             apic_id));
  220         cpu_info[apic_id].cpu_present = 1;
  221         if (boot_cpu) {
  222                 KASSERT(boot_cpu_id == -1,
  223                     ("CPU %d claims to be BSP, but CPU %d already is", apic_id,
  224                     boot_cpu_id));
  225                 boot_cpu_id = apic_id;
  226                 cpu_info[apic_id].cpu_bsp = 1;
  227         }
  228         mp_ncpus++;
  229         if (apic_id > mp_maxid)
  230                 mp_maxid = apic_id;
  231         if (bootverbose)
  232                 printf("SMP: Added CPU %d (%s)\n", apic_id, boot_cpu ? "BSP" :
  233                     "AP");
  234         
  235 }
  236 
  237 void
  238 cpu_mp_setmaxid(void)
  239 {
  240 
  241         /*
  242          * mp_maxid should be already set by calls to cpu_add().
  243          * Just sanity check its value here.
  244          */
  245         if (mp_ncpus == 0)
  246                 KASSERT(mp_maxid == 0,
  247                     ("%s: mp_ncpus is zero, but mp_maxid is not", __func__));
  248         else if (mp_ncpus == 1)
  249                 mp_maxid = 0;
  250         else
  251                 KASSERT(mp_maxid >= mp_ncpus - 1,
  252                     ("%s: counters out of sync: max %d, count %d", __func__,
  253                         mp_maxid, mp_ncpus));
  254                 
  255 }
  256 
  257 int
  258 cpu_mp_probe(void)
  259 {
  260 
  261         /*
  262          * Always record BSP in CPU map so that the mbuf init code works
  263          * correctly.
  264          */
  265         all_cpus = 1;
  266         if (mp_ncpus == 0) {
  267                 /*
  268                  * No CPUs were found, so this must be a UP system.  Setup
  269                  * the variables to represent a system with a single CPU
  270                  * with an id of 0.
  271                  */
  272                 mp_ncpus = 1;
  273                 return (0);
  274         }
  275 
  276         /* At least one CPU was found. */
  277         if (mp_ncpus == 1) {
  278                 /*
  279                  * One CPU was found, so this must be a UP system with
  280                  * an I/O APIC.
  281                  */
  282                 mp_maxid = 0;
  283                 return (0);
  284         }
  285 
  286         /* At least two CPUs were found. */
  287         return (1);
  288 }
  289 
  290 /*
  291  * Initialize the IPI handlers and start up the AP's.
  292  */
  293 void
  294 cpu_mp_start(void)
  295 {
  296         int i;
  297         u_int threads_per_cache, p[4];
  298 
  299         /* Initialize the logical ID to APIC ID table. */
  300         for (i = 0; i < MAXCPU; i++)
  301                 cpu_apic_ids[i] = -1;
  302 
  303         /* Install an inter-CPU IPI for TLB invalidation */
  304         setidt(IPI_INVLTLB, IDTVEC(invltlb), SDT_SYSIGT, SEL_KPL, 0);
  305         setidt(IPI_INVLPG, IDTVEC(invlpg), SDT_SYSIGT, SEL_KPL, 0);
  306         setidt(IPI_INVLRNG, IDTVEC(invlrng), SDT_SYSIGT, SEL_KPL, 0);
  307 
  308         /* Install an inter-CPU IPI for forwarding hardclock() */
  309         setidt(IPI_HARDCLOCK, IDTVEC(hardclock), SDT_SYSIGT, SEL_KPL, 0);
  310         
  311         /* Install an inter-CPU IPI for forwarding statclock() */
  312         setidt(IPI_STATCLOCK, IDTVEC(statclock), SDT_SYSIGT, SEL_KPL, 0);
  313         
  314         /* Install an inter-CPU IPI for all-CPU rendezvous */
  315         setidt(IPI_RENDEZVOUS, IDTVEC(rendezvous), SDT_SYSIGT, SEL_KPL, 0);
  316 
  317         /* Install an inter-CPU IPI for forcing an additional software trap */
  318         setidt(IPI_AST, IDTVEC(cpuast), SDT_SYSIGT, SEL_KPL, 0);
  319 
  320         /* Install an inter-CPU IPI for CPU stop/restart */
  321         setidt(IPI_STOP, IDTVEC(cpustop), SDT_SYSIGT, SEL_KPL, 0);
  322 
  323         /* Set boot_cpu_id if needed. */
  324         if (boot_cpu_id == -1) {
  325                 boot_cpu_id = PCPU_GET(apic_id);
  326                 cpu_info[boot_cpu_id].cpu_bsp = 1;
  327         } else
  328                 KASSERT(boot_cpu_id == PCPU_GET(apic_id),
  329                     ("BSP's APIC ID doesn't match boot_cpu_id"));
  330         cpu_apic_ids[0] = boot_cpu_id;
  331 
  332         /* Start each Application Processor */
  333         start_all_aps();
  334 
  335         /* Setup the initial logical CPUs info. */
  336         logical_cpus = logical_cpus_mask = 0;
  337         if (cpu_feature & CPUID_HTT)
  338                 logical_cpus = (cpu_procinfo & CPUID_HTT_CORES) >> 16;
  339 
  340         /*
  341          * Work out if hyperthreading is *really* enabled.  This
  342          * is made really ugly by the fact that processors lie: Dual
  343          * core processors claim to be hyperthreaded even when they're
  344          * not, presumably because they want to be treated the same
  345          * way as HTT with respect to per-cpu software licensing.
  346          * At the time of writing (May 12, 2005) the only hyperthreaded
  347          * cpus are from Intel, and Intel's dual-core processors can be
  348          * identified via the "deterministic cache parameters" cpuid
  349          * calls.
  350          */
  351         /*
  352          * First determine if this is an Intel processor which claims
  353          * to have hyperthreading support.
  354          */
  355         if ((cpu_feature & CPUID_HTT) &&
  356             (strcmp(cpu_vendor, "GenuineIntel") == 0)) {
  357                 /*
  358                  * If the "deterministic cache parameters" cpuid calls
  359                  * are available, use them.
  360                  */
  361                 if (cpu_high >= 4) {
  362                         /* Ask the processor about up to 32 caches. */
  363                         for (i = 0; i < 32; i++) {
  364                                 cpuid_count(4, i, p);
  365                                 threads_per_cache = ((p[0] & 0x3ffc000) >> 14) + 1;
  366                                 if (hyperthreading_cpus < threads_per_cache)
  367                                         hyperthreading_cpus = threads_per_cache;
  368                                 if ((p[0] & 0x1f) == 0)
  369                                         break;
  370                         }
  371                 }
  372 
  373                 /*
  374                  * If the deterministic cache parameters are not
  375                  * available, or if no caches were reported to exist,
  376                  * just accept what the HTT flag indicated.
  377                  */
  378                 if (hyperthreading_cpus == 0)
  379                         hyperthreading_cpus = logical_cpus;
  380         }
  381 
  382         set_logical_apic_ids();
  383 }
  384 
  385 
  386 /*
  387  * Print various information about the SMP system hardware and setup.
  388  */
  389 void
  390 cpu_mp_announce(void)
  391 {
  392         int i, x;
  393 
  394         /* List CPUs */
  395         printf(" cpu0 (BSP): APIC ID: %2d\n", boot_cpu_id);
  396         for (i = 1, x = 0; x < MAXCPU; x++) {
  397                 if (cpu_info[x].cpu_present && !cpu_info[x].cpu_bsp) {
  398                         KASSERT(i < mp_ncpus,
  399                             ("mp_ncpus and actual cpus are out of whack"));
  400                         printf(" cpu%d (AP): APIC ID: %2d\n", i++, x);
  401                 }
  402         }
  403 }
  404 
  405 /*
  406  * AP CPU's call this to initialize themselves.
  407  */
  408 void
  409 init_secondary(void)
  410 {
  411         struct pcpu *pc;
  412         u_int64_t msr, cr0;
  413         int cpu, gsel_tss;
  414 
  415         /* Set by the startup code for us to use */
  416         cpu = bootAP;
  417 
  418         /* Init tss */
  419         common_tss[cpu] = common_tss[0];
  420         common_tss[cpu].tss_rsp0 = 0;   /* not used until after switch */
  421         common_tss[cpu].tss_iobase = sizeof(struct amd64tss);
  422 
  423         gdt_segs[GPROC0_SEL].ssd_base = (long) &common_tss[cpu];
  424         ssdtosyssd(&gdt_segs[GPROC0_SEL],
  425            (struct system_segment_descriptor *)&gdt[GPROC0_SEL]);
  426 
  427         lgdt(&r_gdt);                   /* does magic intra-segment return */
  428 
  429         /* Get per-cpu data */
  430         pc = &__pcpu[cpu];
  431 
  432         /* prime data page for it to use */
  433         pcpu_init(pc, cpu, sizeof(struct pcpu));
  434         pc->pc_apic_id = cpu_apic_ids[cpu];
  435         pc->pc_prvspace = pc;
  436         pc->pc_curthread = 0;
  437         pc->pc_tssp = &common_tss[cpu];
  438         pc->pc_rsp0 = 0;
  439 
  440         wrmsr(MSR_FSBASE, 0);           /* User value */
  441         wrmsr(MSR_GSBASE, (u_int64_t)pc);
  442         wrmsr(MSR_KGSBASE, (u_int64_t)pc);      /* XXX User value while we're in the kernel */
  443 
  444         lidt(&r_idt);
  445 
  446         gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
  447         ltr(gsel_tss);
  448 
  449         /*
  450          * Set to a known state:
  451          * Set by mpboot.s: CR0_PG, CR0_PE
  452          * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
  453          */
  454         cr0 = rcr0();
  455         cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
  456         load_cr0(cr0);
  457 
  458         /* Set up the fast syscall stuff */
  459         msr = rdmsr(MSR_EFER) | EFER_SCE;
  460         wrmsr(MSR_EFER, msr);
  461         wrmsr(MSR_LSTAR, (u_int64_t)IDTVEC(fast_syscall));
  462         wrmsr(MSR_CSTAR, (u_int64_t)IDTVEC(fast_syscall32));
  463         msr = ((u_int64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) |
  464               ((u_int64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48);
  465         wrmsr(MSR_STAR, msr);
  466         wrmsr(MSR_SF_MASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D);
  467 
  468         /* Disable local apic just to be sure. */
  469         lapic_disable();
  470 
  471         /* signal our startup to the BSP. */
  472         mp_naps++;
  473 
  474         /* Spin until the BSP releases the AP's. */
  475         while (!aps_ready)
  476                 ia32_pause();
  477 
  478         /* set up CPU registers and state */
  479         cpu_setregs();
  480 
  481         /* set up SSE/NX registers */
  482         initializecpu();
  483 
  484         /* set up FPU state on the AP */
  485         fpuinit();
  486 
  487         /* A quick check from sanity claus */
  488         if (PCPU_GET(apic_id) != lapic_id()) {
  489                 printf("SMP: cpuid = %d\n", PCPU_GET(cpuid));
  490                 printf("SMP: actual apic_id = %d\n", lapic_id());
  491                 printf("SMP: correct apic_id = %d\n", PCPU_GET(apic_id));
  492                 panic("cpuid mismatch! boom!!");
  493         }
  494 
  495         mtx_lock_spin(&ap_boot_mtx);
  496 
  497         /* Init local apic for irq's */
  498         lapic_setup();
  499 
  500         /* Set memory range attributes for this CPU to match the BSP */
  501         mem_range_AP_init();
  502 
  503         smp_cpus++;
  504 
  505         CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", PCPU_GET(cpuid));
  506         printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
  507 
  508         /* Determine if we are a logical CPU. */
  509         if (logical_cpus > 1 && PCPU_GET(apic_id) % logical_cpus != 0)
  510                 logical_cpus_mask |= PCPU_GET(cpumask);
  511         
  512         /* Determine if we are a hyperthread. */
  513         if (hyperthreading_cpus > 1 &&
  514             PCPU_GET(apic_id) % hyperthreading_cpus != 0)
  515                 hyperthreading_cpus_mask |= PCPU_GET(cpumask);
  516 
  517         /* Build our map of 'other' CPUs. */
  518         PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
  519 
  520         if (bootverbose)
  521                 lapic_dump("AP");
  522 
  523         if (smp_cpus == mp_ncpus) {
  524                 /* enable IPI's, tlb shootdown, freezes etc */
  525                 atomic_store_rel_int(&smp_started, 1);
  526                 smp_active = 1;  /* historic */
  527         }
  528 
  529         mtx_unlock_spin(&ap_boot_mtx);
  530 
  531         /* wait until all the AP's are up */
  532         while (smp_started == 0)
  533                 ia32_pause();
  534 
  535         /* ok, now grab sched_lock and enter the scheduler */
  536         mtx_lock_spin(&sched_lock);
  537 
  538         binuptime(PCPU_PTR(switchtime));
  539         PCPU_SET(switchticks, ticks);
  540 
  541         cpu_throw(NULL, choosethread());        /* doesn't return */
  542 
  543         panic("scheduler returned us to %s", __func__);
  544         /* NOTREACHED */
  545 }
  546 
  547 /*******************************************************************
  548  * local functions and data
  549  */
  550 
  551 /*
  552  * Set the APIC logical IDs.
  553  *
  554  * We want to cluster logical CPU's within the same APIC ID cluster.
  555  * Since logical CPU's are aligned simply filling in the clusters in
  556  * APIC ID order works fine.  Note that this does not try to balance
  557  * the number of CPU's in each cluster. (XXX?)
  558  */
  559 static void
  560 set_logical_apic_ids(void)
  561 {
  562         u_int apic_id, cluster, cluster_id;
  563 
  564         /* Force us to allocate cluster 0 at the start. */
  565         cluster = -1;
  566         cluster_id = APIC_MAX_INTRACLUSTER_ID;
  567         for (apic_id = 0; apic_id < MAXCPU; apic_id++) {
  568                 if (!cpu_info[apic_id].cpu_present)
  569                         continue;
  570                 if (cluster_id == APIC_MAX_INTRACLUSTER_ID) {
  571                         cluster = ioapic_next_logical_cluster();
  572                         cluster_id = 0;
  573                 } else
  574                         cluster_id++;
  575                 if (bootverbose)
  576                         printf("APIC ID: physical %u, logical %u:%u\n",
  577                             apic_id, cluster, cluster_id);
  578                 lapic_set_logical_id(apic_id, cluster, cluster_id);
  579         }
  580 }
  581 
  582 /*
  583  * start each AP in our list
  584  */
  585 static int
  586 start_all_aps(void)
  587 {
  588         u_char mpbiosreason;
  589         u_int32_t mpbioswarmvec;
  590         int apic_id, cpu, i;
  591         u_int64_t *pt4, *pt3, *pt2;
  592 
  593         mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
  594 
  595         /* install the AP 1st level boot code */
  596         pmap_kenter(boot_address + KERNBASE, boot_address);
  597         bcopy(mptramp_start, (void *)((uintptr_t)boot_address + KERNBASE), bootMP_size);
  598 
  599         /* Locate the page tables, they'll be below the trampoline */
  600         pt4 = (u_int64_t *)(uintptr_t)(mptramp_pagetables + KERNBASE);
  601         pt3 = pt4 + (PAGE_SIZE) / sizeof(u_int64_t);
  602         pt2 = pt3 + (PAGE_SIZE) / sizeof(u_int64_t);
  603 
  604         /* Create the initial 1GB replicated page tables */
  605         for (i = 0; i < 512; i++) {
  606                 /* Each slot of the level 4 pages points to the same level 3 page */
  607                 pt4[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + PAGE_SIZE);
  608                 pt4[i] |= PG_V | PG_RW | PG_U;
  609 
  610                 /* Each slot of the level 3 pages points to the same level 2 page */
  611                 pt3[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + (2 * PAGE_SIZE));
  612                 pt3[i] |= PG_V | PG_RW | PG_U;
  613 
  614                 /* The level 2 page slots are mapped with 2MB pages for 1GB. */
  615                 pt2[i] = i * (2 * 1024 * 1024);
  616                 pt2[i] |= PG_V | PG_RW | PG_PS | PG_U;
  617         }
  618 
  619         /* save the current value of the warm-start vector */
  620         mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF);
  621         outb(CMOS_REG, BIOS_RESET);
  622         mpbiosreason = inb(CMOS_DATA);
  623 
  624         /* setup a vector to our boot code */
  625         *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
  626         *((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4);
  627         outb(CMOS_REG, BIOS_RESET);
  628         outb(CMOS_DATA, BIOS_WARM);     /* 'warm-start' */
  629 
  630         /* start each AP */
  631         cpu = 0;
  632         for (apic_id = 0; apic_id < MAXCPU; apic_id++) {
  633                 if (!cpu_info[apic_id].cpu_present ||
  634                     cpu_info[apic_id].cpu_bsp)
  635                         continue;
  636                 cpu++;
  637 
  638                 /* save APIC ID for this logical ID */
  639                 cpu_apic_ids[cpu] = apic_id;
  640 
  641                 /* allocate and set up an idle stack data page */
  642                 bootstacks[cpu] = (char *)kmem_alloc(kernel_map, KSTACK_PAGES * PAGE_SIZE);
  643 
  644                 bootSTK = (char *)bootstacks[cpu] + KSTACK_PAGES * PAGE_SIZE - 8;
  645                 bootAP = cpu;
  646 
  647                 /* attempt to start the Application Processor */
  648                 if (!start_ap(apic_id)) {
  649                         /* restore the warmstart vector */
  650                         *(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
  651                         panic("AP #%d (PHY# %d) failed!", cpu, apic_id);
  652                 }
  653 
  654                 all_cpus |= (1 << cpu);         /* record AP in CPU map */
  655         }
  656 
  657         /* build our map of 'other' CPUs */
  658         PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
  659 
  660         /* restore the warmstart vector */
  661         *(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
  662 
  663         outb(CMOS_REG, BIOS_RESET);
  664         outb(CMOS_DATA, mpbiosreason);
  665 
  666         /* number of APs actually started */
  667         return mp_naps;
  668 }
  669 
  670 
  671 /*
  672  * This function starts the AP (application processor) identified
  673  * by the APIC ID 'physicalCpu'.  It does quite a "song and dance"
  674  * to accomplish this.  This is necessary because of the nuances
  675  * of the different hardware we might encounter.  It isn't pretty,
  676  * but it seems to work.
  677  */
  678 static int
  679 start_ap(int apic_id)
  680 {
  681         int vector, ms;
  682         int cpus;
  683 
  684         /* calculate the vector */
  685         vector = (boot_address >> 12) & 0xff;
  686 
  687         /* used as a watchpoint to signal AP startup */
  688         cpus = mp_naps;
  689 
  690         /*
  691          * first we do an INIT/RESET IPI this INIT IPI might be run, reseting
  692          * and running the target CPU. OR this INIT IPI might be latched (P5
  693          * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be
  694          * ignored.
  695          */
  696 
  697         /* do an INIT IPI: assert RESET */
  698         lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
  699             APIC_LEVEL_ASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, apic_id);
  700 
  701         /* wait for pending status end */
  702         lapic_ipi_wait(-1);
  703 
  704         /* do an INIT IPI: deassert RESET */
  705         lapic_ipi_raw(APIC_DEST_ALLESELF | APIC_TRIGMOD_LEVEL |
  706             APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, 0);
  707 
  708         /* wait for pending status end */
  709         DELAY(10000);           /* wait ~10mS */
  710         lapic_ipi_wait(-1);
  711 
  712         /*
  713          * next we do a STARTUP IPI: the previous INIT IPI might still be
  714          * latched, (P5 bug) this 1st STARTUP would then terminate
  715          * immediately, and the previously started INIT IPI would continue. OR
  716          * the previous INIT IPI has already run. and this STARTUP IPI will
  717          * run. OR the previous INIT IPI was ignored. and this STARTUP IPI
  718          * will run.
  719          */
  720 
  721         /* do a STARTUP IPI */
  722         lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
  723             APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
  724             vector, apic_id);
  725         lapic_ipi_wait(-1);
  726         DELAY(200);             /* wait ~200uS */
  727 
  728         /*
  729          * finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF
  730          * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR
  731          * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is
  732          * recognized after hardware RESET or INIT IPI.
  733          */
  734 
  735         lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE |
  736             APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP |
  737             vector, apic_id);
  738         lapic_ipi_wait(-1);
  739         DELAY(200);             /* wait ~200uS */
  740 
  741         /* Wait up to 5 seconds for it to start. */
  742         for (ms = 0; ms < 50; ms++) {
  743                 if (mp_naps > cpus)
  744                         return 1;       /* return SUCCESS */
  745                 DELAY(100000);
  746         }
  747         return 0;               /* return FAILURE */
  748 }
  749 
  750 /*
  751  * Flush the TLB on all other CPU's
  752  */
  753 static void
  754 smp_tlb_shootdown(u_int vector, vm_offset_t addr1, vm_offset_t addr2)
  755 {
  756         u_int ncpu;
  757 
  758         ncpu = mp_ncpus - 1;    /* does not shootdown self */
  759         if (ncpu < 1)
  760                 return;         /* no other cpus */
  761         mtx_assert(&smp_rv_mtx, MA_OWNED);
  762         smp_tlb_addr1 = addr1;
  763         smp_tlb_addr2 = addr2;
  764         atomic_store_rel_int(&smp_tlb_wait, 0);
  765         ipi_all_but_self(vector);
  766         while (smp_tlb_wait < ncpu)
  767                 ia32_pause();
  768 }
  769 
  770 /*
  771  * This is about as magic as it gets.  fortune(1) has got similar code
  772  * for reversing bits in a word.  Who thinks up this stuff??
  773  *
  774  * Yes, it does appear to be consistently faster than:
  775  * while (i = ffs(m)) {
  776  *      m >>= i;
  777  *      bits++;
  778  * }
  779  * and
  780  * while (lsb = (m & -m)) {     // This is magic too
  781  *      m &= ~lsb;              // or: m ^= lsb
  782  *      bits++;
  783  * }
  784  * Both of these latter forms do some very strange things on gcc-3.1 with
  785  * -mcpu=pentiumpro and/or -march=pentiumpro and/or -O or -O2.
  786  * There is probably an SSE or MMX popcnt instruction.
  787  *
  788  * I wonder if this should be in libkern?
  789  *
  790  * XXX Stop the presses!  Another one:
  791  * static __inline u_int32_t
  792  * popcnt1(u_int32_t v)
  793  * {
  794  *      v -= ((v >> 1) & 0x55555555);
  795  *      v = (v & 0x33333333) + ((v >> 2) & 0x33333333);
  796  *      v = (v + (v >> 4)) & 0x0F0F0F0F;
  797  *      return (v * 0x01010101) >> 24;
  798  * }
  799  * The downside is that it has a multiply.  With a pentium3 with
  800  * -mcpu=pentiumpro and -march=pentiumpro then gcc-3.1 will use
  801  * an imull, and in that case it is faster.  In most other cases
  802  * it appears slightly slower.
  803  *
  804  * Another variant (also from fortune):
  805  * #define BITCOUNT(x) (((BX_(x)+(BX_(x)>>4)) & 0x0F0F0F0F) % 255)
  806  * #define  BX_(x)     ((x) - (((x)>>1)&0x77777777)            \
  807  *                          - (((x)>>2)&0x33333333)            \
  808  *                          - (((x)>>3)&0x11111111))
  809  */
  810 static __inline u_int32_t
  811 popcnt(u_int32_t m)
  812 {
  813 
  814         m = (m & 0x55555555) + ((m & 0xaaaaaaaa) >> 1);
  815         m = (m & 0x33333333) + ((m & 0xcccccccc) >> 2);
  816         m = (m & 0x0f0f0f0f) + ((m & 0xf0f0f0f0) >> 4);
  817         m = (m & 0x00ff00ff) + ((m & 0xff00ff00) >> 8);
  818         m = (m & 0x0000ffff) + ((m & 0xffff0000) >> 16);
  819         return m;
  820 }
  821 
  822 static void
  823 smp_targeted_tlb_shootdown(u_int mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2)
  824 {
  825         int ncpu, othercpus;
  826 
  827         othercpus = mp_ncpus - 1;
  828         if (mask == (u_int)-1) {
  829                 ncpu = othercpus;
  830                 if (ncpu < 1)
  831                         return;
  832         } else {
  833                 mask &= ~PCPU_GET(cpumask);
  834                 if (mask == 0)
  835                         return;
  836                 ncpu = popcnt(mask);
  837                 if (ncpu > othercpus) {
  838                         /* XXX this should be a panic offence */
  839                         printf("SMP: tlb shootdown to %d other cpus (only have %d)\n",
  840                             ncpu, othercpus);
  841                         ncpu = othercpus;
  842                 }
  843                 /* XXX should be a panic, implied by mask == 0 above */
  844                 if (ncpu < 1)
  845                         return;
  846         }
  847         mtx_assert(&smp_rv_mtx, MA_OWNED);
  848         smp_tlb_addr1 = addr1;
  849         smp_tlb_addr2 = addr2;
  850         atomic_store_rel_int(&smp_tlb_wait, 0);
  851         if (mask == (u_int)-1)
  852                 ipi_all_but_self(vector);
  853         else
  854                 ipi_selected(mask, vector);
  855         while (smp_tlb_wait < ncpu)
  856                 ia32_pause();
  857 }
  858 
  859 void
  860 smp_invltlb(void)
  861 {
  862 
  863         if (smp_started)
  864                 smp_tlb_shootdown(IPI_INVLTLB, 0, 0);
  865 }
  866 
  867 void
  868 smp_invlpg(vm_offset_t addr)
  869 {
  870 
  871         if (smp_started)
  872                 smp_tlb_shootdown(IPI_INVLPG, addr, 0);
  873 }
  874 
  875 void
  876 smp_invlpg_range(vm_offset_t addr1, vm_offset_t addr2)
  877 {
  878 
  879         if (smp_started)
  880                 smp_tlb_shootdown(IPI_INVLRNG, addr1, addr2);
  881 }
  882 
  883 void
  884 smp_masked_invltlb(u_int mask)
  885 {
  886 
  887         if (smp_started)
  888                 smp_targeted_tlb_shootdown(mask, IPI_INVLTLB, 0, 0);
  889 }
  890 
  891 void
  892 smp_masked_invlpg(u_int mask, vm_offset_t addr)
  893 {
  894 
  895         if (smp_started)
  896                 smp_targeted_tlb_shootdown(mask, IPI_INVLPG, addr, 0);
  897 }
  898 
  899 void
  900 smp_masked_invlpg_range(u_int mask, vm_offset_t addr1, vm_offset_t addr2)
  901 {
  902 
  903         if (smp_started)
  904                 smp_targeted_tlb_shootdown(mask, IPI_INVLRNG, addr1, addr2);
  905 }
  906 
  907 
  908 /*
  909  * For statclock, we send an IPI to all CPU's to have them call this
  910  * function.
  911  */
  912 void
  913 forwarded_statclock(struct clockframe frame)
  914 {
  915         struct thread *td;
  916 
  917         CTR0(KTR_SMP, "forwarded_statclock");
  918         td = curthread;
  919         td->td_intr_nesting_level++;
  920         if (profprocs != 0)
  921                 profclock(&frame);
  922         if (pscnt == psdiv)
  923                 statclock(&frame);
  924         td->td_intr_nesting_level--;
  925 }
  926 
  927 void
  928 forward_statclock(void)
  929 {
  930         int map;
  931 
  932         CTR0(KTR_SMP, "forward_statclock");
  933 
  934         if (!smp_started || cold || panicstr)
  935                 return;
  936 
  937         map = PCPU_GET(other_cpus) & ~(stopped_cpus|hlt_cpus_mask);
  938         if (map != 0)
  939                 ipi_selected(map, IPI_STATCLOCK);
  940 }
  941 
  942 /*
  943  * For each hardclock(), we send an IPI to all other CPU's to have them
  944  * execute this function.  It would be nice to reduce contention on
  945  * sched_lock if we could simply peek at the CPU to determine the user/kernel
  946  * state and call hardclock_process() on the CPU receiving the clock interrupt
  947  * and then just use a simple IPI to handle any ast's if needed.
  948  */
  949 void
  950 forwarded_hardclock(struct clockframe frame)
  951 {
  952         struct thread *td;
  953 
  954         CTR0(KTR_SMP, "forwarded_hardclock");
  955         td = curthread;
  956         td->td_intr_nesting_level++;
  957         hardclock_process(&frame);
  958         td->td_intr_nesting_level--;
  959 }
  960 
  961 void 
  962 forward_hardclock(void)
  963 {
  964         u_int map;
  965 
  966         CTR0(KTR_SMP, "forward_hardclock");
  967 
  968         if (!smp_started || cold || panicstr)
  969                 return;
  970 
  971         map = PCPU_GET(other_cpus) & ~(stopped_cpus|hlt_cpus_mask);
  972         if (map != 0)
  973                 ipi_selected(map, IPI_HARDCLOCK);
  974 }
  975 
  976 /*
  977  * send an IPI to a set of cpus.
  978  */
  979 void
  980 ipi_selected(u_int32_t cpus, u_int ipi)
  981 {
  982         int cpu;
  983 
  984         CTR3(KTR_SMP, "%s: cpus: %x ipi: %x", __func__, cpus, ipi);
  985         while ((cpu = ffs(cpus)) != 0) {
  986                 cpu--;
  987                 KASSERT(cpu_apic_ids[cpu] != -1,
  988                     ("IPI to non-existent CPU %d", cpu));
  989                 lapic_ipi_vectored(ipi, cpu_apic_ids[cpu]);
  990                 cpus &= ~(1 << cpu);
  991         }
  992 }
  993 
  994 /*
  995  * send an IPI INTerrupt containing 'vector' to all CPUs, including myself
  996  */
  997 void
  998 ipi_all(u_int ipi)
  999 {
 1000 
 1001         CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
 1002         lapic_ipi_vectored(ipi, APIC_IPI_DEST_ALL);
 1003 }
 1004 
 1005 /*
 1006  * send an IPI to all CPUs EXCEPT myself
 1007  */
 1008 void
 1009 ipi_all_but_self(u_int ipi)
 1010 {
 1011 
 1012         CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
 1013         lapic_ipi_vectored(ipi, APIC_IPI_DEST_OTHERS);
 1014 }
 1015 
 1016 /*
 1017  * send an IPI to myself
 1018  */
 1019 void
 1020 ipi_self(u_int ipi)
 1021 {
 1022 
 1023         CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
 1024         lapic_ipi_vectored(ipi, APIC_IPI_DEST_SELF);
 1025 }
 1026 
 1027 /*
 1028  * This is called once the rest of the system is up and running and we're
 1029  * ready to let the AP's out of the pen.
 1030  */
 1031 static void
 1032 release_aps(void *dummy __unused)
 1033 {
 1034 
 1035         if (mp_ncpus == 1) 
 1036                 return;
 1037         mtx_lock_spin(&sched_lock);
 1038         atomic_store_rel_int(&aps_ready, 1);
 1039         while (smp_started == 0)
 1040                 ia32_pause();
 1041         mtx_unlock_spin(&sched_lock);
 1042 }
 1043 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
 1044 
 1045 static int
 1046 sysctl_hlt_cpus(SYSCTL_HANDLER_ARGS)
 1047 {
 1048         u_int mask;
 1049         int error;
 1050 
 1051         mask = hlt_cpus_mask;
 1052         error = sysctl_handle_int(oidp, &mask, 0, req);
 1053         if (error || !req->newptr)
 1054                 return (error);
 1055 
 1056         if (logical_cpus_mask != 0 &&
 1057             (mask & logical_cpus_mask) == logical_cpus_mask)
 1058                 hlt_logical_cpus = 1;
 1059         else
 1060                 hlt_logical_cpus = 0;
 1061 
 1062         if (! hyperthreading_allowed)
 1063                 mask |= hyperthreading_cpus_mask;
 1064 
 1065         if ((mask & all_cpus) == all_cpus)
 1066                 mask &= ~(1<<0);
 1067         hlt_cpus_mask = mask;
 1068         return (error);
 1069 }
 1070 SYSCTL_PROC(_machdep, OID_AUTO, hlt_cpus, CTLTYPE_INT|CTLFLAG_RW,
 1071     0, 0, sysctl_hlt_cpus, "IU",
 1072     "Bitmap of CPUs to halt.  101 (binary) will halt CPUs 0 and 2.");
 1073 
 1074 static int
 1075 sysctl_hlt_logical_cpus(SYSCTL_HANDLER_ARGS)
 1076 {
 1077         int disable, error;
 1078 
 1079         disable = hlt_logical_cpus;
 1080         error = sysctl_handle_int(oidp, &disable, 0, req);
 1081         if (error || !req->newptr)
 1082                 return (error);
 1083 
 1084         if (disable)
 1085                 hlt_cpus_mask |= logical_cpus_mask;
 1086         else
 1087                 hlt_cpus_mask &= ~logical_cpus_mask;
 1088 
 1089         if (! hyperthreading_allowed)
 1090                 hlt_cpus_mask |= hyperthreading_cpus_mask;
 1091 
 1092         if ((hlt_cpus_mask & all_cpus) == all_cpus)
 1093                 hlt_cpus_mask &= ~(1<<0);
 1094 
 1095         hlt_logical_cpus = disable;
 1096         return (error);
 1097 }
 1098 
 1099 static int
 1100 sysctl_hyperthreading_allowed(SYSCTL_HANDLER_ARGS)
 1101 {
 1102         int allowed, error;
 1103 
 1104         allowed = hyperthreading_allowed;
 1105         error = sysctl_handle_int(oidp, &allowed, 0, req);
 1106         if (error || !req->newptr)
 1107                 return (error);
 1108 
 1109         if (allowed)
 1110                 hlt_cpus_mask &= ~hyperthreading_cpus_mask;
 1111         else
 1112                 hlt_cpus_mask |= hyperthreading_cpus_mask;
 1113 
 1114         if (logical_cpus_mask != 0 &&
 1115             (hlt_cpus_mask & logical_cpus_mask) == logical_cpus_mask)
 1116                 hlt_logical_cpus = 1;
 1117         else
 1118                 hlt_logical_cpus = 0;
 1119 
 1120         if ((hlt_cpus_mask & all_cpus) == all_cpus)
 1121                 hlt_cpus_mask &= ~(1<<0);
 1122 
 1123         hyperthreading_allowed = allowed;
 1124         return (error);
 1125 }
 1126 
 1127 static void
 1128 cpu_hlt_setup(void *dummy __unused)
 1129 {
 1130 
 1131         if (logical_cpus_mask != 0) {
 1132                 TUNABLE_INT_FETCH("machdep.hlt_logical_cpus",
 1133                     &hlt_logical_cpus);
 1134                 sysctl_ctx_init(&logical_cpu_clist);
 1135                 SYSCTL_ADD_PROC(&logical_cpu_clist,
 1136                     SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
 1137                     "hlt_logical_cpus", CTLTYPE_INT|CTLFLAG_RW, 0, 0,
 1138                     sysctl_hlt_logical_cpus, "IU", "");
 1139                 SYSCTL_ADD_UINT(&logical_cpu_clist,
 1140                     SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
 1141                     "logical_cpus_mask", CTLTYPE_INT|CTLFLAG_RD,
 1142                     &logical_cpus_mask, 0, "");
 1143 
 1144                 if (hlt_logical_cpus)
 1145                         hlt_cpus_mask |= logical_cpus_mask;
 1146 
 1147                 /*
 1148                  * If necessary for security purposes, force
 1149                  * hyperthreading off, regardless of the value
 1150                  * of hlt_logical_cpus.
 1151                  */
 1152                 if (hyperthreading_cpus_mask) {
 1153                         TUNABLE_INT_FETCH("machdep.hyperthreading_allowed",
 1154                             &hyperthreading_allowed);
 1155                         SYSCTL_ADD_PROC(&logical_cpu_clist,
 1156                             SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO,
 1157                             "hyperthreading_allowed", CTLTYPE_INT|CTLFLAG_RW,
 1158                             0, 0, sysctl_hyperthreading_allowed, "IU", "");
 1159                         if (! hyperthreading_allowed)
 1160                                 hlt_cpus_mask |= hyperthreading_cpus_mask;
 1161                 }
 1162         }
 1163 }
 1164 SYSINIT(cpu_hlt, SI_SUB_SMP, SI_ORDER_ANY, cpu_hlt_setup, NULL);
 1165 
 1166 int
 1167 mp_grab_cpu_hlt(void)
 1168 {
 1169         u_int mask = PCPU_GET(cpumask);
 1170         int retval;
 1171 
 1172         retval = mask & hlt_cpus_mask;
 1173         while (mask & hlt_cpus_mask)
 1174                 __asm __volatile("sti; hlt" : : : "memory");
 1175         return (retval);
 1176 }

Cache object: 1a4c98454011dafbf2f606d7cdb131da


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.