The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/x86/x86/local_apic.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2003 John Baldwin <jhb@FreeBSD.org>
    3  * Copyright (c) 1996, by Steve Passe
    4  * All rights reserved.
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice, this list of conditions and the following disclaimer.
   11  * 2. The name of the developer may NOT be used to endorse or promote products
   12  *    derived from this software without specific prior written permission.
   13  * 3. Neither the name of the author nor the names of any co-contributors
   14  *    may be used to endorse or promote products derived from this software
   15  *    without specific prior written permission.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  */
   29 
   30 /*
   31  * Local APIC support on Pentium and later processors.
   32  */
   33 
   34 #include <sys/cdefs.h>
   35 __FBSDID("$FreeBSD$");
   36 
   37 #include "opt_hwpmc_hooks.h"
   38 #include "opt_kdtrace.h"
   39 
   40 #include "opt_ddb.h"
   41 
   42 #include <sys/param.h>
   43 #include <sys/systm.h>
   44 #include <sys/bus.h>
   45 #include <sys/kernel.h>
   46 #include <sys/lock.h>
   47 #include <sys/mutex.h>
   48 #include <sys/pcpu.h>
   49 #include <sys/proc.h>
   50 #include <sys/sched.h>
   51 #include <sys/smp.h>
   52 
   53 #include <vm/vm.h>
   54 #include <vm/pmap.h>
   55 
   56 #include <machine/apicreg.h>
   57 #include <machine/cpu.h>
   58 #include <machine/cputypes.h>
   59 #include <machine/frame.h>
   60 #include <machine/intr_machdep.h>
   61 #include <machine/apicvar.h>
   62 #include <machine/mca.h>
   63 #include <machine/md_var.h>
   64 #include <machine/smp.h>
   65 #include <machine/specialreg.h>
   66 
   67 #ifdef DDB
   68 #include <sys/interrupt.h>
   69 #include <ddb/ddb.h>
   70 #endif
   71 
   72 #ifdef __amd64__
   73 #define SDT_APIC        SDT_SYSIGT
   74 #define SDT_APICT       SDT_SYSIGT
   75 #define GSEL_APIC       0
   76 #else
   77 #define SDT_APIC        SDT_SYS386IGT
   78 #define SDT_APICT       SDT_SYS386TGT
   79 #define GSEL_APIC       GSEL(GCODE_SEL, SEL_KPL)
   80 #endif
   81 
   82 #ifdef KDTRACE_HOOKS
   83 #include <sys/dtrace_bsd.h>
   84 cyclic_clock_func_t     cyclic_clock_func[MAXCPU];
   85 #endif
   86 
   87 /* Sanity checks on IDT vectors. */
   88 CTASSERT(APIC_IO_INTS + APIC_NUM_IOINTS == APIC_TIMER_INT);
   89 CTASSERT(APIC_TIMER_INT < APIC_LOCAL_INTS);
   90 CTASSERT(APIC_LOCAL_INTS == 240);
   91 CTASSERT(IPI_STOP < APIC_SPURIOUS_INT);
   92 
   93 /* Magic IRQ values for the timer and syscalls. */
   94 #define IRQ_TIMER       (NUM_IO_INTS + 1)
   95 #define IRQ_SYSCALL     (NUM_IO_INTS + 2)
   96 #define IRQ_DTRACE_RET  (NUM_IO_INTS + 3)
   97 
   98 /*
   99  * Support for local APICs.  Local APICs manage interrupts on each
  100  * individual processor as opposed to I/O APICs which receive interrupts
  101  * from I/O devices and then forward them on to the local APICs.
  102  *
  103  * Local APICs can also send interrupts to each other thus providing the
  104  * mechanism for IPIs.
  105  */
  106 
  107 struct lvt {
  108         u_int lvt_edgetrigger:1;
  109         u_int lvt_activehi:1;
  110         u_int lvt_masked:1;
  111         u_int lvt_active:1;
  112         u_int lvt_mode:16;
  113         u_int lvt_vector:8;
  114 };
  115 
  116 struct lapic {
  117         struct lvt la_lvts[LVT_MAX + 1];
  118         u_int la_id:8;
  119         u_int la_cluster:4;
  120         u_int la_cluster_id:2;
  121         u_int la_present:1;
  122         u_long *la_timer_count;
  123         u_long la_hard_ticks;
  124         u_long la_stat_ticks;
  125         u_long la_prof_ticks;
  126         /* Include IDT_SYSCALL to make indexing easier. */
  127         int la_ioint_irqs[APIC_NUM_IOINTS + 1];
  128 } static lapics[MAX_APIC_ID + 1];
  129 
  130 /* Global defaults for local APIC LVT entries. */
  131 static struct lvt lvts[LVT_MAX + 1] = {
  132         { 1, 1, 1, 1, APIC_LVT_DM_EXTINT, 0 },  /* LINT0: masked ExtINT */
  133         { 1, 1, 0, 1, APIC_LVT_DM_NMI, 0 },     /* LINT1: NMI */
  134         { 1, 1, 1, 1, APIC_LVT_DM_FIXED, APIC_TIMER_INT },      /* Timer */
  135         { 1, 1, 0, 1, APIC_LVT_DM_FIXED, APIC_ERROR_INT },      /* Error */
  136         { 1, 1, 1, 1, APIC_LVT_DM_NMI, 0 },     /* PMC */
  137         { 1, 1, 1, 1, APIC_LVT_DM_FIXED, APIC_THERMAL_INT },    /* Thermal */
  138         { 1, 1, 1, 1, APIC_LVT_DM_FIXED, APIC_CMC_INT },        /* CMCI */
  139 };
  140 
  141 static inthand_t *ioint_handlers[] = {
  142         NULL,                   /* 0 - 31 */
  143         IDTVEC(apic_isr1),      /* 32 - 63 */
  144         IDTVEC(apic_isr2),      /* 64 - 95 */
  145         IDTVEC(apic_isr3),      /* 96 - 127 */
  146         IDTVEC(apic_isr4),      /* 128 - 159 */
  147         IDTVEC(apic_isr5),      /* 160 - 191 */
  148         IDTVEC(apic_isr6),      /* 192 - 223 */
  149         IDTVEC(apic_isr7),      /* 224 - 255 */
  150 };
  151 
  152 
  153 static u_int32_t lapic_timer_divisors[] = {
  154         APIC_TDCR_1, APIC_TDCR_2, APIC_TDCR_4, APIC_TDCR_8, APIC_TDCR_16,
  155         APIC_TDCR_32, APIC_TDCR_64, APIC_TDCR_128
  156 };
  157 
  158 extern inthand_t IDTVEC(rsvd);
  159 
  160 volatile lapic_t *lapic;
  161 vm_paddr_t lapic_paddr;
  162 static u_long lapic_timer_divisor, lapic_timer_period, lapic_timer_hz;
  163 static enum lapic_clock clockcoverage;
  164 
  165 static void     lapic_enable(void);
  166 static void     lapic_resume(struct pic *pic);
  167 static void     lapic_timer_enable_intr(void);
  168 static void     lapic_timer_oneshot(u_int count);
  169 static void     lapic_timer_periodic(u_int count);
  170 static void     lapic_timer_set_divisor(u_int divisor);
  171 static uint32_t lvt_mode(struct lapic *la, u_int pin, uint32_t value);
  172 
  173 struct pic lapic_pic = { .pic_resume = lapic_resume };
  174 
  175 static uint32_t
  176 lvt_mode(struct lapic *la, u_int pin, uint32_t value)
  177 {
  178         struct lvt *lvt;
  179 
  180         KASSERT(pin <= LVT_MAX, ("%s: pin %u out of range", __func__, pin));
  181         if (la->la_lvts[pin].lvt_active)
  182                 lvt = &la->la_lvts[pin];
  183         else
  184                 lvt = &lvts[pin];
  185 
  186         value &= ~(APIC_LVT_M | APIC_LVT_TM | APIC_LVT_IIPP | APIC_LVT_DM |
  187             APIC_LVT_VECTOR);
  188         if (lvt->lvt_edgetrigger == 0)
  189                 value |= APIC_LVT_TM;
  190         if (lvt->lvt_activehi == 0)
  191                 value |= APIC_LVT_IIPP_INTALO;
  192         if (lvt->lvt_masked)
  193                 value |= APIC_LVT_M;
  194         value |= lvt->lvt_mode;
  195         switch (lvt->lvt_mode) {
  196         case APIC_LVT_DM_NMI:
  197         case APIC_LVT_DM_SMI:
  198         case APIC_LVT_DM_INIT:
  199         case APIC_LVT_DM_EXTINT:
  200                 if (!lvt->lvt_edgetrigger) {
  201                         printf("lapic%u: Forcing LINT%u to edge trigger\n",
  202                             la->la_id, pin);
  203                         value |= APIC_LVT_TM;
  204                 }
  205                 /* Use a vector of 0. */
  206                 break;
  207         case APIC_LVT_DM_FIXED:
  208                 value |= lvt->lvt_vector;
  209                 break;
  210         default:
  211                 panic("bad APIC LVT delivery mode: %#x\n", value);
  212         }
  213         return (value);
  214 }
  215 
  216 /*
  217  * Map the local APIC and setup necessary interrupt vectors.
  218  */
  219 void
  220 lapic_init(vm_paddr_t addr)
  221 {
  222 
  223         /* Map the local APIC and setup the spurious interrupt handler. */
  224         KASSERT(trunc_page(addr) == addr,
  225             ("local APIC not aligned on a page boundary"));
  226         lapic_paddr = addr;
  227         lapic = pmap_mapdev(addr, sizeof(lapic_t));
  228         setidt(APIC_SPURIOUS_INT, IDTVEC(spuriousint), SDT_APIC, SEL_KPL,
  229             GSEL_APIC);
  230 
  231         /* Perform basic initialization of the BSP's local APIC. */
  232         lapic_enable();
  233 
  234         /* Set BSP's per-CPU local APIC ID. */
  235         PCPU_SET(apic_id, lapic_id());
  236 
  237         /* Local APIC timer interrupt. */
  238         setidt(APIC_TIMER_INT, IDTVEC(timerint), SDT_APIC, SEL_KPL, GSEL_APIC);
  239 
  240         /* Local APIC error interrupt. */
  241         setidt(APIC_ERROR_INT, IDTVEC(errorint), SDT_APIC, SEL_KPL, GSEL_APIC);
  242 
  243         /* XXX: Thermal interrupt */
  244 
  245         /* Local APIC CMCI. */
  246         setidt(APIC_CMC_INT, IDTVEC(cmcint), SDT_APICT, SEL_KPL,
  247             GSEL(GCODE_SEL, SEL_KPL));
  248 }
  249 
  250 /*
  251  * Create a local APIC instance.
  252  */
  253 void
  254 lapic_create(u_int apic_id, int boot_cpu)
  255 {
  256         int i;
  257 
  258         if (apic_id > MAX_APIC_ID) {
  259                 printf("APIC: Ignoring local APIC with ID %d\n", apic_id);
  260                 if (boot_cpu)
  261                         panic("Can't ignore BSP");
  262                 return;
  263         }
  264         KASSERT(!lapics[apic_id].la_present, ("duplicate local APIC %u",
  265             apic_id));
  266 
  267         /*
  268          * Assume no local LVT overrides and a cluster of 0 and
  269          * intra-cluster ID of 0.
  270          */
  271         lapics[apic_id].la_present = 1;
  272         lapics[apic_id].la_id = apic_id;
  273         for (i = 0; i <= LVT_MAX; i++) {
  274                 lapics[apic_id].la_lvts[i] = lvts[i];
  275                 lapics[apic_id].la_lvts[i].lvt_active = 0;
  276         }
  277         for (i = 0; i <= APIC_NUM_IOINTS; i++)
  278             lapics[apic_id].la_ioint_irqs[i] = -1;
  279         lapics[apic_id].la_ioint_irqs[IDT_SYSCALL - APIC_IO_INTS] = IRQ_SYSCALL;
  280         lapics[apic_id].la_ioint_irqs[APIC_TIMER_INT - APIC_IO_INTS] =
  281             IRQ_TIMER;
  282 #ifdef KDTRACE_HOOKS
  283         lapics[apic_id].la_ioint_irqs[IDT_DTRACE_RET - APIC_IO_INTS] = IRQ_DTRACE_RET;
  284 #endif
  285 
  286 
  287 #ifdef SMP
  288         cpu_add(apic_id, boot_cpu);
  289 #endif
  290 }
  291 
  292 /*
  293  * Dump contents of local APIC registers
  294  */
  295 void
  296 lapic_dump(const char* str)
  297 {
  298         uint32_t maxlvt;
  299 
  300         maxlvt = (lapic->version & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
  301         printf("cpu%d %s:\n", PCPU_GET(cpuid), str);
  302         printf("     ID: 0x%08x   VER: 0x%08x LDR: 0x%08x DFR: 0x%08x\n",
  303             lapic->id, lapic->version, lapic->ldr, lapic->dfr);
  304         printf("  lint0: 0x%08x lint1: 0x%08x TPR: 0x%08x SVR: 0x%08x\n",
  305             lapic->lvt_lint0, lapic->lvt_lint1, lapic->tpr, lapic->svr);
  306         printf("  timer: 0x%08x therm: 0x%08x err: 0x%08x",
  307             lapic->lvt_timer, lapic->lvt_thermal, lapic->lvt_error);
  308         if (maxlvt >= LVT_PMC)
  309                 printf(" pmc: 0x%08x", lapic->lvt_pcint);
  310         printf("\n");
  311         if (maxlvt >= LVT_CMCI)
  312                 printf("   cmci: 0x%08x\n", lapic->lvt_cmci);
  313 }
  314 
  315 void
  316 lapic_setup(int boot)
  317 {
  318         struct lapic *la;
  319         u_int32_t maxlvt;
  320         register_t saveintr;
  321         char buf[MAXCOMLEN + 1];
  322 
  323         la = &lapics[lapic_id()];
  324         KASSERT(la->la_present, ("missing APIC structure"));
  325         saveintr = intr_disable();
  326         maxlvt = (lapic->version & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
  327 
  328         /* Initialize the TPR to allow all interrupts. */
  329         lapic_set_tpr(0);
  330 
  331         /* Setup spurious vector and enable the local APIC. */
  332         lapic_enable();
  333 
  334         /* Program LINT[01] LVT entries. */
  335         lapic->lvt_lint0 = lvt_mode(la, LVT_LINT0, lapic->lvt_lint0);
  336         lapic->lvt_lint1 = lvt_mode(la, LVT_LINT1, lapic->lvt_lint1);
  337 
  338         /* Program the PMC LVT entry if present. */
  339         if (maxlvt >= LVT_PMC)
  340                 lapic->lvt_pcint = lvt_mode(la, LVT_PMC, lapic->lvt_pcint);
  341 
  342         /* Program timer LVT and setup handler. */
  343         lapic->lvt_timer = lvt_mode(la, LVT_TIMER, lapic->lvt_timer);
  344         if (boot) {
  345                 snprintf(buf, sizeof(buf), "cpu%d: timer", PCPU_GET(cpuid));
  346                 intrcnt_add(buf, &la->la_timer_count);
  347         }
  348 
  349         /* We don't setup the timer during boot on the BSP until later. */
  350         if (!(boot && PCPU_GET(cpuid) == 0) && lapic_timer_hz != 0) {
  351                 KASSERT(lapic_timer_period != 0, ("lapic%u: zero divisor",
  352                     lapic_id()));
  353                 lapic_timer_set_divisor(lapic_timer_divisor);
  354                 lapic_timer_periodic(lapic_timer_period);
  355                 lapic_timer_enable_intr();
  356         }
  357 
  358         /* Program error LVT and clear any existing errors. */
  359         lapic->lvt_error = lvt_mode(la, LVT_ERROR, lapic->lvt_error);
  360         lapic->esr = 0;
  361 
  362         /* XXX: Thermal LVT */
  363 
  364         /* Program the CMCI LVT entry if present. */
  365         if (maxlvt >= LVT_CMCI)
  366                 lapic->lvt_cmci = lvt_mode(la, LVT_CMCI, lapic->lvt_cmci);
  367             
  368         intr_restore(saveintr);
  369 }
  370 
  371 void
  372 lapic_reenable_pmc(void)
  373 {
  374 #ifdef HWPMC_HOOKS
  375         uint32_t value;
  376 
  377         value =  lapic->lvt_pcint;
  378         value &= ~APIC_LVT_M;
  379         lapic->lvt_pcint = value;
  380 #endif
  381 }
  382 
  383 #ifdef HWPMC_HOOKS
  384 static void
  385 lapic_update_pmc(void *dummy)
  386 {
  387         struct lapic *la;
  388 
  389         la = &lapics[lapic_id()];
  390         lapic->lvt_pcint = lvt_mode(la, LVT_PMC, lapic->lvt_pcint);
  391 }
  392 #endif
  393 
  394 int
  395 lapic_enable_pmc(void)
  396 {
  397 #ifdef HWPMC_HOOKS
  398         u_int32_t maxlvt;
  399 
  400         /* Fail if the local APIC is not present. */
  401         if (lapic == NULL)
  402                 return (0);
  403 
  404         /* Fail if the PMC LVT is not present. */
  405         maxlvt = (lapic->version & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
  406         if (maxlvt < LVT_PMC)
  407                 return (0);
  408 
  409         lvts[LVT_PMC].lvt_masked = 0;
  410 
  411 #ifdef SMP
  412         /*
  413          * If hwpmc was loaded at boot time then the APs may not be
  414          * started yet.  In that case, don't forward the request to
  415          * them as they will program the lvt when they start.
  416          */
  417         if (smp_started)
  418                 smp_rendezvous(NULL, lapic_update_pmc, NULL, NULL);
  419         else
  420 #endif
  421                 lapic_update_pmc(NULL);
  422         return (1);
  423 #else
  424         return (0);
  425 #endif
  426 }
  427 
  428 void
  429 lapic_disable_pmc(void)
  430 {
  431 #ifdef HWPMC_HOOKS
  432         u_int32_t maxlvt;
  433 
  434         /* Fail if the local APIC is not present. */
  435         if (lapic == NULL)
  436                 return;
  437 
  438         /* Fail if the PMC LVT is not present. */
  439         maxlvt = (lapic->version & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
  440         if (maxlvt < LVT_PMC)
  441                 return;
  442 
  443         lvts[LVT_PMC].lvt_masked = 1;
  444 
  445 #ifdef SMP
  446         /* The APs should always be started when hwpmc is unloaded. */
  447         KASSERT(mp_ncpus == 1 || smp_started, ("hwpmc unloaded too early"));
  448 #endif
  449         smp_rendezvous(NULL, lapic_update_pmc, NULL, NULL);
  450 #endif
  451 }
  452 
  453 /*
  454  * Called by cpu_initclocks() on the BSP to setup the local APIC timer so
  455  * that it can drive hardclock, statclock, and profclock. 
  456  */
  457 enum lapic_clock
  458 lapic_setup_clock(enum lapic_clock srcsdes)
  459 {
  460         u_long value;
  461         int i;
  462 
  463         /* lapic_setup_clock() should not be called with LAPIC_CLOCK_NONE. */
  464         MPASS(srcsdes != LAPIC_CLOCK_NONE);
  465 
  466         /* Can't drive the timer without a local APIC. */
  467         if (lapic == NULL ||
  468             (resource_int_value("apic", 0, "clock", &i) == 0 && i == 0)) {
  469                 clockcoverage = LAPIC_CLOCK_NONE;
  470                 return (clockcoverage);
  471         }
  472 
  473         /* Start off with a divisor of 2 (power on reset default). */
  474         lapic_timer_divisor = 2;
  475 
  476         /* Try to calibrate the local APIC timer. */
  477         do {
  478                 lapic_timer_set_divisor(lapic_timer_divisor);
  479                 lapic_timer_oneshot(APIC_TIMER_MAX_COUNT);
  480                 DELAY(2000000);
  481                 value = APIC_TIMER_MAX_COUNT - lapic->ccr_timer;
  482                 if (value != APIC_TIMER_MAX_COUNT)
  483                         break;
  484                 lapic_timer_divisor <<= 1;
  485         } while (lapic_timer_divisor <= 128);
  486         if (lapic_timer_divisor > 128)
  487                 panic("lapic: Divisor too big");
  488         value /= 2;
  489         if (bootverbose)
  490                 printf("lapic: Divisor %lu, Frequency %lu Hz\n",
  491                     lapic_timer_divisor, value);
  492 
  493         /*
  494          * We want to run stathz in the neighborhood of 128hz.  We would
  495          * like profhz to run as often as possible, so we let it run on
  496          * each clock tick.  We try to honor the requested 'hz' value as
  497          * much as possible.
  498          *
  499          * If 'hz' is above 1500, then we just let the lapic timer
  500          * (and profhz) run at hz.  If 'hz' is below 1500 but above
  501          * 750, then we let the lapic timer run at 2 * 'hz'.  If 'hz'
  502          * is below 750 then we let the lapic timer run at 4 * 'hz'.
  503          *
  504          * Please note that stathz and profhz are set only if all the
  505          * clocks are handled through the local APIC.
  506          */
  507         if (srcsdes == LAPIC_CLOCK_ALL) {
  508                 if (hz >= 1500)
  509                         lapic_timer_hz = hz;
  510                 else if (hz >= 750)
  511                         lapic_timer_hz = hz * 2;
  512                 else
  513                         lapic_timer_hz = hz * 4;
  514         } else
  515                 lapic_timer_hz = hz;
  516         lapic_timer_period = value / lapic_timer_hz;
  517         if (srcsdes == LAPIC_CLOCK_ALL) {
  518                 if (lapic_timer_hz < 128)
  519                         stathz = lapic_timer_hz;
  520                 else
  521                         stathz = lapic_timer_hz / (lapic_timer_hz / 128);
  522                 profhz = lapic_timer_hz;
  523         }
  524 
  525         /*
  526          * Start up the timer on the BSP.  The APs will kick off their
  527          * timer during lapic_setup().
  528          */
  529         lapic_timer_periodic(lapic_timer_period);
  530         lapic_timer_enable_intr();
  531         clockcoverage = srcsdes;
  532         return (srcsdes);
  533 }
  534 
  535 void
  536 lapic_disable(void)
  537 {
  538         uint32_t value;
  539 
  540         /* Software disable the local APIC. */
  541         value = lapic->svr;
  542         value &= ~APIC_SVR_SWEN;
  543         lapic->svr = value;
  544 }
  545 
  546 static void
  547 lapic_enable(void)
  548 {
  549         u_int32_t value;
  550 
  551         /* Program the spurious vector to enable the local APIC. */
  552         value = lapic->svr;
  553         value &= ~(APIC_SVR_VECTOR | APIC_SVR_FOCUS);
  554         value |= (APIC_SVR_FEN | APIC_SVR_SWEN | APIC_SPURIOUS_INT);
  555         lapic->svr = value;
  556 }
  557 
  558 /* Reset the local APIC on the BSP during resume. */
  559 static void
  560 lapic_resume(struct pic *pic)
  561 {
  562 
  563         lapic_setup(0);
  564 }
  565 
  566 int
  567 lapic_id(void)
  568 {
  569 
  570         KASSERT(lapic != NULL, ("local APIC is not mapped"));
  571         return (lapic->id >> APIC_ID_SHIFT);
  572 }
  573 
  574 int
  575 lapic_intr_pending(u_int vector)
  576 {
  577         volatile u_int32_t *irr;
  578 
  579         /*
  580          * The IRR registers are an array of 128-bit registers each of
  581          * which only describes 32 interrupts in the low 32 bits..  Thus,
  582          * we divide the vector by 32 to get the 128-bit index.  We then
  583          * multiply that index by 4 to get the equivalent index from
  584          * treating the IRR as an array of 32-bit registers.  Finally, we
  585          * modulus the vector by 32 to determine the individual bit to
  586          * test.
  587          */
  588         irr = &lapic->irr0;
  589         return (irr[(vector / 32) * 4] & 1 << (vector % 32));
  590 }
  591 
  592 void
  593 lapic_set_logical_id(u_int apic_id, u_int cluster, u_int cluster_id)
  594 {
  595         struct lapic *la;
  596 
  597         KASSERT(lapics[apic_id].la_present, ("%s: APIC %u doesn't exist",
  598             __func__, apic_id));
  599         KASSERT(cluster <= APIC_MAX_CLUSTER, ("%s: cluster %u too big",
  600             __func__, cluster));
  601         KASSERT(cluster_id <= APIC_MAX_INTRACLUSTER_ID,
  602             ("%s: intra cluster id %u too big", __func__, cluster_id));
  603         la = &lapics[apic_id];
  604         la->la_cluster = cluster;
  605         la->la_cluster_id = cluster_id;
  606 }
  607 
  608 int
  609 lapic_set_lvt_mask(u_int apic_id, u_int pin, u_char masked)
  610 {
  611 
  612         if (pin > LVT_MAX)
  613                 return (EINVAL);
  614         if (apic_id == APIC_ID_ALL) {
  615                 lvts[pin].lvt_masked = masked;
  616                 if (bootverbose)
  617                         printf("lapic:");
  618         } else {
  619                 KASSERT(lapics[apic_id].la_present,
  620                     ("%s: missing APIC %u", __func__, apic_id));
  621                 lapics[apic_id].la_lvts[pin].lvt_masked = masked;
  622                 lapics[apic_id].la_lvts[pin].lvt_active = 1;
  623                 if (bootverbose)
  624                         printf("lapic%u:", apic_id);
  625         }
  626         if (bootverbose)
  627                 printf(" LINT%u %s\n", pin, masked ? "masked" : "unmasked");
  628         return (0);
  629 }
  630 
  631 int
  632 lapic_set_lvt_mode(u_int apic_id, u_int pin, u_int32_t mode)
  633 {
  634         struct lvt *lvt;
  635 
  636         if (pin > LVT_MAX)
  637                 return (EINVAL);
  638         if (apic_id == APIC_ID_ALL) {
  639                 lvt = &lvts[pin];
  640                 if (bootverbose)
  641                         printf("lapic:");
  642         } else {
  643                 KASSERT(lapics[apic_id].la_present,
  644                     ("%s: missing APIC %u", __func__, apic_id));
  645                 lvt = &lapics[apic_id].la_lvts[pin];
  646                 lvt->lvt_active = 1;
  647                 if (bootverbose)
  648                         printf("lapic%u:", apic_id);
  649         }
  650         lvt->lvt_mode = mode;
  651         switch (mode) {
  652         case APIC_LVT_DM_NMI:
  653         case APIC_LVT_DM_SMI:
  654         case APIC_LVT_DM_INIT:
  655         case APIC_LVT_DM_EXTINT:
  656                 lvt->lvt_edgetrigger = 1;
  657                 lvt->lvt_activehi = 1;
  658                 if (mode == APIC_LVT_DM_EXTINT)
  659                         lvt->lvt_masked = 1;
  660                 else
  661                         lvt->lvt_masked = 0;
  662                 break;
  663         default:
  664                 panic("Unsupported delivery mode: 0x%x\n", mode);
  665         }
  666         if (bootverbose) {
  667                 printf(" Routing ");
  668                 switch (mode) {
  669                 case APIC_LVT_DM_NMI:
  670                         printf("NMI");
  671                         break;
  672                 case APIC_LVT_DM_SMI:
  673                         printf("SMI");
  674                         break;
  675                 case APIC_LVT_DM_INIT:
  676                         printf("INIT");
  677                         break;
  678                 case APIC_LVT_DM_EXTINT:
  679                         printf("ExtINT");
  680                         break;
  681                 }
  682                 printf(" -> LINT%u\n", pin);
  683         }
  684         return (0);
  685 }
  686 
  687 int
  688 lapic_set_lvt_polarity(u_int apic_id, u_int pin, enum intr_polarity pol)
  689 {
  690 
  691         if (pin > LVT_MAX || pol == INTR_POLARITY_CONFORM)
  692                 return (EINVAL);
  693         if (apic_id == APIC_ID_ALL) {
  694                 lvts[pin].lvt_activehi = (pol == INTR_POLARITY_HIGH);
  695                 if (bootverbose)
  696                         printf("lapic:");
  697         } else {
  698                 KASSERT(lapics[apic_id].la_present,
  699                     ("%s: missing APIC %u", __func__, apic_id));
  700                 lapics[apic_id].la_lvts[pin].lvt_active = 1;
  701                 lapics[apic_id].la_lvts[pin].lvt_activehi =
  702                     (pol == INTR_POLARITY_HIGH);
  703                 if (bootverbose)
  704                         printf("lapic%u:", apic_id);
  705         }
  706         if (bootverbose)
  707                 printf(" LINT%u polarity: %s\n", pin,
  708                     pol == INTR_POLARITY_HIGH ? "high" : "low");
  709         return (0);
  710 }
  711 
  712 int
  713 lapic_set_lvt_triggermode(u_int apic_id, u_int pin, enum intr_trigger trigger)
  714 {
  715 
  716         if (pin > LVT_MAX || trigger == INTR_TRIGGER_CONFORM)
  717                 return (EINVAL);
  718         if (apic_id == APIC_ID_ALL) {
  719                 lvts[pin].lvt_edgetrigger = (trigger == INTR_TRIGGER_EDGE);
  720                 if (bootverbose)
  721                         printf("lapic:");
  722         } else {
  723                 KASSERT(lapics[apic_id].la_present,
  724                     ("%s: missing APIC %u", __func__, apic_id));
  725                 lapics[apic_id].la_lvts[pin].lvt_edgetrigger =
  726                     (trigger == INTR_TRIGGER_EDGE);
  727                 lapics[apic_id].la_lvts[pin].lvt_active = 1;
  728                 if (bootverbose)
  729                         printf("lapic%u:", apic_id);
  730         }
  731         if (bootverbose)
  732                 printf(" LINT%u trigger: %s\n", pin,
  733                     trigger == INTR_TRIGGER_EDGE ? "edge" : "level");
  734         return (0);
  735 }
  736 
  737 /*
  738  * Adjust the TPR of the current CPU so that it blocks all interrupts below
  739  * the passed in vector.
  740  */
  741 void
  742 lapic_set_tpr(u_int vector)
  743 {
  744 #ifdef CHEAP_TPR
  745         lapic->tpr = vector;
  746 #else
  747         u_int32_t tpr;
  748 
  749         tpr = lapic->tpr & ~APIC_TPR_PRIO;
  750         tpr |= vector;
  751         lapic->tpr = tpr;
  752 #endif
  753 }
  754 
  755 void
  756 lapic_eoi(void)
  757 {
  758 
  759         lapic->eoi = 0;
  760 }
  761 
  762 void
  763 lapic_handle_intr(int vector, struct trapframe *frame)
  764 {
  765         struct intsrc *isrc;
  766 
  767         if (vector == -1)
  768                 panic("Couldn't get vector from ISR!");
  769         isrc = intr_lookup_source(apic_idt_to_irq(PCPU_GET(apic_id),
  770             vector));
  771         intr_execute_handlers(isrc, frame);
  772 }
  773 
  774 void
  775 lapic_handle_timer(struct trapframe *frame)
  776 {
  777         struct lapic *la;
  778 
  779         /* Send EOI first thing. */
  780         lapic_eoi();
  781 
  782 #if defined(SMP) && !defined(SCHED_ULE)
  783         /*
  784          * Don't do any accounting for the disabled HTT cores, since it
  785          * will provide misleading numbers for the userland.
  786          *
  787          * No locking is necessary here, since even if we loose the race
  788          * when hlt_cpus_mask changes it is not a big deal, really.
  789          *
  790          * Don't do that for ULE, since ULE doesn't consider hlt_cpus_mask
  791          * and unlike other schedulers it actually schedules threads to
  792          * those CPUs.
  793          */
  794         if ((hlt_cpus_mask & (1 << PCPU_GET(cpuid))) != 0)
  795                 return;
  796 #endif
  797 
  798         /* Look up our local APIC structure for the tick counters. */
  799         la = &lapics[PCPU_GET(apic_id)];
  800         (*la->la_timer_count)++;
  801         critical_enter();
  802 
  803 #ifdef KDTRACE_HOOKS
  804         /*
  805          * If the DTrace hooks are configured and a callback function
  806          * has been registered, then call it to process the high speed
  807          * timers.
  808          */
  809         int cpu = PCPU_GET(cpuid);
  810         if (cyclic_clock_func[cpu] != NULL)
  811                 (*cyclic_clock_func[cpu])(frame);
  812 #endif
  813 
  814         /* Fire hardclock at hz. */
  815         la->la_hard_ticks += hz;
  816         if (la->la_hard_ticks >= lapic_timer_hz) {
  817                 la->la_hard_ticks -= lapic_timer_hz;
  818                 if (PCPU_GET(cpuid) == 0)
  819                         hardclock(TRAPF_USERMODE(frame), TRAPF_PC(frame));
  820                 else
  821                         hardclock_cpu(TRAPF_USERMODE(frame));
  822         }
  823         if (clockcoverage == LAPIC_CLOCK_ALL) {
  824 
  825                 /* Fire statclock at stathz. */
  826                 la->la_stat_ticks += stathz;
  827                 if (la->la_stat_ticks >= lapic_timer_hz) {
  828                         la->la_stat_ticks -= lapic_timer_hz;
  829                         statclock(TRAPF_USERMODE(frame));
  830                 }
  831 
  832                 /* Fire profclock at profhz, but only when needed. */
  833                 la->la_prof_ticks += profhz;
  834                 if (la->la_prof_ticks >= lapic_timer_hz) {
  835                         la->la_prof_ticks -= lapic_timer_hz;
  836                         if (profprocs != 0)
  837                                 profclock(TRAPF_USERMODE(frame),
  838                                     TRAPF_PC(frame));
  839                 }
  840         }
  841         critical_exit();
  842 }
  843 
  844 static void
  845 lapic_timer_set_divisor(u_int divisor)
  846 {
  847 
  848         KASSERT(powerof2(divisor), ("lapic: invalid divisor %u", divisor));
  849         KASSERT(ffs(divisor) <= sizeof(lapic_timer_divisors) /
  850             sizeof(u_int32_t), ("lapic: invalid divisor %u", divisor));
  851         lapic->dcr_timer = lapic_timer_divisors[ffs(divisor) - 1];
  852 }
  853 
  854 static void
  855 lapic_timer_oneshot(u_int count)
  856 {
  857         u_int32_t value;
  858 
  859         value = lapic->lvt_timer;
  860         value &= ~APIC_LVTT_TM;
  861         value |= APIC_LVTT_TM_ONE_SHOT;
  862         lapic->lvt_timer = value;
  863         lapic->icr_timer = count;
  864 }
  865 
  866 static void
  867 lapic_timer_periodic(u_int count)
  868 {
  869         u_int32_t value;
  870 
  871         value = lapic->lvt_timer;
  872         value &= ~APIC_LVTT_TM;
  873         value |= APIC_LVTT_TM_PERIODIC;
  874         lapic->lvt_timer = value;
  875         lapic->icr_timer = count;
  876 }
  877 
  878 static void
  879 lapic_timer_enable_intr(void)
  880 {
  881         u_int32_t value;
  882 
  883         value = lapic->lvt_timer;
  884         value &= ~APIC_LVT_M;
  885         lapic->lvt_timer = value;
  886 }
  887 
  888 void
  889 lapic_handle_cmc(void)
  890 {
  891 
  892         lapic_eoi();
  893         cmc_intr();
  894 }
  895 
  896 /*
  897  * Called from the mca_init() to activate the CMC interrupt if this CPU is
  898  * responsible for monitoring any MC banks for CMC events.  Since mca_init()
  899  * is called prior to lapic_setup() during boot, this just needs to unmask
  900  * this CPU's LVT_CMCI entry.
  901  */
  902 void
  903 lapic_enable_cmc(void)
  904 {
  905         u_int apic_id;
  906 
  907         apic_id = PCPU_GET(apic_id);
  908         KASSERT(lapics[apic_id].la_present,
  909             ("%s: missing APIC %u", __func__, apic_id));
  910         lapics[apic_id].la_lvts[LVT_CMCI].lvt_masked = 0;
  911         lapics[apic_id].la_lvts[LVT_CMCI].lvt_active = 1;
  912         if (bootverbose)
  913                 printf("lapic%u: CMCI unmasked\n", apic_id);
  914 }
  915 
  916 void
  917 lapic_handle_error(void)
  918 {
  919         u_int32_t esr;
  920 
  921         /*
  922          * Read the contents of the error status register.  Write to
  923          * the register first before reading from it to force the APIC
  924          * to update its value to indicate any errors that have
  925          * occurred since the previous write to the register.
  926          */
  927         lapic->esr = 0;
  928         esr = lapic->esr;
  929 
  930         printf("CPU%d: local APIC error 0x%x\n", PCPU_GET(cpuid), esr);
  931         lapic_eoi();
  932 }
  933 
  934 u_int
  935 apic_cpuid(u_int apic_id)
  936 {
  937 #ifdef SMP
  938         return apic_cpuids[apic_id];
  939 #else
  940         return 0;
  941 #endif
  942 }
  943 
  944 /* Request a free IDT vector to be used by the specified IRQ. */
  945 u_int
  946 apic_alloc_vector(u_int apic_id, u_int irq)
  947 {
  948         u_int vector;
  949 
  950         KASSERT(irq < NUM_IO_INTS, ("Invalid IRQ %u", irq));
  951 
  952         /*
  953          * Search for a free vector.  Currently we just use a very simple
  954          * algorithm to find the first free vector.
  955          */
  956         mtx_lock_spin(&icu_lock);
  957         for (vector = 0; vector < APIC_NUM_IOINTS; vector++) {
  958                 if (lapics[apic_id].la_ioint_irqs[vector] != -1)
  959                         continue;
  960                 lapics[apic_id].la_ioint_irqs[vector] = irq;
  961                 mtx_unlock_spin(&icu_lock);
  962                 return (vector + APIC_IO_INTS);
  963         }
  964         mtx_unlock_spin(&icu_lock);
  965         return (0);
  966 }
  967 
  968 /*
  969  * Request 'count' free contiguous IDT vectors to be used by 'count'
  970  * IRQs.  'count' must be a power of two and the vectors will be
  971  * aligned on a boundary of 'align'.  If the request cannot be
  972  * satisfied, 0 is returned.
  973  */
  974 u_int
  975 apic_alloc_vectors(u_int apic_id, u_int *irqs, u_int count, u_int align)
  976 {
  977         u_int first, run, vector;
  978 
  979         KASSERT(powerof2(count), ("bad count"));
  980         KASSERT(powerof2(align), ("bad align"));
  981         KASSERT(align >= count, ("align < count"));
  982 #ifdef INVARIANTS
  983         for (run = 0; run < count; run++)
  984                 KASSERT(irqs[run] < NUM_IO_INTS, ("Invalid IRQ %u at index %u",
  985                     irqs[run], run));
  986 #endif
  987 
  988         /*
  989          * Search for 'count' free vectors.  As with apic_alloc_vector(),
  990          * this just uses a simple first fit algorithm.
  991          */
  992         run = 0;
  993         first = 0;
  994         mtx_lock_spin(&icu_lock);
  995         for (vector = 0; vector < APIC_NUM_IOINTS; vector++) {
  996 
  997                 /* Vector is in use, end run. */
  998                 if (lapics[apic_id].la_ioint_irqs[vector] != -1) {
  999                         run = 0;
 1000                         first = 0;
 1001                         continue;
 1002                 }
 1003 
 1004                 /* Start a new run if run == 0 and vector is aligned. */
 1005                 if (run == 0) {
 1006                         if ((vector & (align - 1)) != 0)
 1007                                 continue;
 1008                         first = vector;
 1009                 }
 1010                 run++;
 1011 
 1012                 /* Keep looping if the run isn't long enough yet. */
 1013                 if (run < count)
 1014                         continue;
 1015 
 1016                 /* Found a run, assign IRQs and return the first vector. */
 1017                 for (vector = 0; vector < count; vector++)
 1018                         lapics[apic_id].la_ioint_irqs[first + vector] =
 1019                             irqs[vector];
 1020                 mtx_unlock_spin(&icu_lock);
 1021                 return (first + APIC_IO_INTS);
 1022         }
 1023         mtx_unlock_spin(&icu_lock);
 1024         printf("APIC: Couldn't find APIC vectors for %u IRQs\n", count);
 1025         return (0);
 1026 }
 1027 
 1028 /*
 1029  * Enable a vector for a particular apic_id.  Since all lapics share idt
 1030  * entries and ioint_handlers this enables the vector on all lapics.  lapics
 1031  * which do not have the vector configured would report spurious interrupts
 1032  * should it fire.
 1033  */
 1034 void
 1035 apic_enable_vector(u_int apic_id, u_int vector)
 1036 {
 1037 
 1038         KASSERT(vector != IDT_SYSCALL, ("Attempt to overwrite syscall entry"));
 1039         KASSERT(ioint_handlers[vector / 32] != NULL,
 1040             ("No ISR handler for vector %u", vector));
 1041 #ifdef KDTRACE_HOOKS
 1042         KASSERT(vector != IDT_DTRACE_RET,
 1043             ("Attempt to overwrite DTrace entry"));
 1044 #endif
 1045         setidt(vector, ioint_handlers[vector / 32], SDT_APIC, SEL_KPL,
 1046             GSEL_APIC);
 1047 }
 1048 
 1049 void
 1050 apic_disable_vector(u_int apic_id, u_int vector)
 1051 {
 1052 
 1053         KASSERT(vector != IDT_SYSCALL, ("Attempt to overwrite syscall entry"));
 1054 #ifdef KDTRACE_HOOKS
 1055         KASSERT(vector != IDT_DTRACE_RET,
 1056             ("Attempt to overwrite DTrace entry"));
 1057 #endif
 1058         KASSERT(ioint_handlers[vector / 32] != NULL,
 1059             ("No ISR handler for vector %u", vector));
 1060 #ifdef notyet
 1061         /*
 1062          * We can not currently clear the idt entry because other cpus
 1063          * may have a valid vector at this offset.
 1064          */
 1065         setidt(vector, &IDTVEC(rsvd), SDT_APICT, SEL_KPL, GSEL_APIC);
 1066 #endif
 1067 }
 1068 
 1069 /* Release an APIC vector when it's no longer in use. */
 1070 void
 1071 apic_free_vector(u_int apic_id, u_int vector, u_int irq)
 1072 {
 1073         struct thread *td;
 1074 
 1075         KASSERT(vector >= APIC_IO_INTS && vector != IDT_SYSCALL &&
 1076             vector <= APIC_IO_INTS + APIC_NUM_IOINTS,
 1077             ("Vector %u does not map to an IRQ line", vector));
 1078         KASSERT(irq < NUM_IO_INTS, ("Invalid IRQ %u", irq));
 1079         KASSERT(lapics[apic_id].la_ioint_irqs[vector - APIC_IO_INTS] ==
 1080             irq, ("IRQ mismatch"));
 1081 #ifdef KDTRACE_HOOKS
 1082         KASSERT(vector != IDT_DTRACE_RET,
 1083             ("Attempt to overwrite DTrace entry"));
 1084 #endif
 1085 
 1086         /*
 1087          * Bind us to the cpu that owned the vector before freeing it so
 1088          * we don't lose an interrupt delivery race.
 1089          */
 1090         td = curthread;
 1091         if (!rebooting) {
 1092                 thread_lock(td);
 1093                 if (sched_is_bound(td))
 1094                         panic("apic_free_vector: Thread already bound.\n");
 1095                 sched_bind(td, apic_cpuid(apic_id));
 1096                 thread_unlock(td);
 1097         }
 1098         mtx_lock_spin(&icu_lock);
 1099         lapics[apic_id].la_ioint_irqs[vector - APIC_IO_INTS] = -1;
 1100         mtx_unlock_spin(&icu_lock);
 1101         if (!rebooting) {
 1102                 thread_lock(td);
 1103                 sched_unbind(td);
 1104                 thread_unlock(td);
 1105         }
 1106 }
 1107 
 1108 /* Map an IDT vector (APIC) to an IRQ (interrupt source). */
 1109 u_int
 1110 apic_idt_to_irq(u_int apic_id, u_int vector)
 1111 {
 1112         int irq;
 1113 
 1114         KASSERT(vector >= APIC_IO_INTS && vector != IDT_SYSCALL &&
 1115             vector <= APIC_IO_INTS + APIC_NUM_IOINTS,
 1116             ("Vector %u does not map to an IRQ line", vector));
 1117 #ifdef KDTRACE_HOOKS
 1118         KASSERT(vector != IDT_DTRACE_RET,
 1119             ("Attempt to overwrite DTrace entry"));
 1120 #endif
 1121         irq = lapics[apic_id].la_ioint_irqs[vector - APIC_IO_INTS];
 1122         if (irq < 0)
 1123                 irq = 0;
 1124         return (irq);
 1125 }
 1126 
 1127 #ifdef DDB
 1128 /*
 1129  * Dump data about APIC IDT vector mappings.
 1130  */
 1131 DB_SHOW_COMMAND(apic, db_show_apic)
 1132 {
 1133         struct intsrc *isrc;
 1134         int i, verbose;
 1135         u_int apic_id;
 1136         u_int irq;
 1137 
 1138         if (strcmp(modif, "vv") == 0)
 1139                 verbose = 2;
 1140         else if (strcmp(modif, "v") == 0)
 1141                 verbose = 1;
 1142         else
 1143                 verbose = 0;
 1144         for (apic_id = 0; apic_id <= MAX_APIC_ID; apic_id++) {
 1145                 if (lapics[apic_id].la_present == 0)
 1146                         continue;
 1147                 db_printf("Interrupts bound to lapic %u\n", apic_id);
 1148                 for (i = 0; i < APIC_NUM_IOINTS + 1 && !db_pager_quit; i++) {
 1149                         irq = lapics[apic_id].la_ioint_irqs[i];
 1150                         if (irq == -1 || irq == IRQ_SYSCALL)
 1151                                 continue;
 1152 #ifdef KDTRACE_HOOKS
 1153                         if (irq == IRQ_DTRACE_RET)
 1154                                 continue;
 1155 #endif
 1156                         db_printf("vec 0x%2x -> ", i + APIC_IO_INTS);
 1157                         if (irq == IRQ_TIMER)
 1158                                 db_printf("lapic timer\n");
 1159                         else if (irq < NUM_IO_INTS) {
 1160                                 isrc = intr_lookup_source(irq);
 1161                                 if (isrc == NULL || verbose == 0)
 1162                                         db_printf("IRQ %u\n", irq);
 1163                                 else
 1164                                         db_dump_intr_event(isrc->is_event,
 1165                                             verbose == 2);
 1166                         } else
 1167                                 db_printf("IRQ %u ???\n", irq);
 1168                 }
 1169         }
 1170 }
 1171 
 1172 static void
 1173 dump_mask(const char *prefix, uint32_t v, int base)
 1174 {
 1175         int i, first;
 1176 
 1177         first = 1;
 1178         for (i = 0; i < 32; i++)
 1179                 if (v & (1 << i)) {
 1180                         if (first) {
 1181                                 db_printf("%s:", prefix);
 1182                                 first = 0;
 1183                         }
 1184                         db_printf(" %02x", base + i);
 1185                 }
 1186         if (!first)
 1187                 db_printf("\n");
 1188 }
 1189 
 1190 /* Show info from the lapic regs for this CPU. */
 1191 DB_SHOW_COMMAND(lapic, db_show_lapic)
 1192 {
 1193         uint32_t v;
 1194 
 1195         db_printf("lapic ID = %d\n", lapic_id());
 1196         v = lapic->version;
 1197         db_printf("version  = %d.%d\n", (v & APIC_VER_VERSION) >> 4,
 1198             v & 0xf);
 1199         db_printf("max LVT  = %d\n", (v & APIC_VER_MAXLVT) >> MAXLVTSHIFT);
 1200         v = lapic->svr;
 1201         db_printf("SVR      = %02x (%s)\n", v & APIC_SVR_VECTOR,
 1202             v & APIC_SVR_ENABLE ? "enabled" : "disabled");
 1203         db_printf("TPR      = %02x\n", lapic->tpr);
 1204 
 1205 #define dump_field(prefix, index)                                       \
 1206         dump_mask(__XSTRING(prefix ## index), lapic->prefix ## index,   \
 1207             index * 32)
 1208 
 1209         db_printf("In-service Interrupts:\n");
 1210         dump_field(isr, 0);
 1211         dump_field(isr, 1);
 1212         dump_field(isr, 2);
 1213         dump_field(isr, 3);
 1214         dump_field(isr, 4);
 1215         dump_field(isr, 5);
 1216         dump_field(isr, 6);
 1217         dump_field(isr, 7);
 1218 
 1219         db_printf("TMR Interrupts:\n");
 1220         dump_field(tmr, 0);
 1221         dump_field(tmr, 1);
 1222         dump_field(tmr, 2);
 1223         dump_field(tmr, 3);
 1224         dump_field(tmr, 4);
 1225         dump_field(tmr, 5);
 1226         dump_field(tmr, 6);
 1227         dump_field(tmr, 7);
 1228 
 1229         db_printf("IRR Interrupts:\n");
 1230         dump_field(irr, 0);
 1231         dump_field(irr, 1);
 1232         dump_field(irr, 2);
 1233         dump_field(irr, 3);
 1234         dump_field(irr, 4);
 1235         dump_field(irr, 5);
 1236         dump_field(irr, 6);
 1237         dump_field(irr, 7);
 1238 
 1239 #undef dump_field
 1240 }
 1241 #endif
 1242 
 1243 /*
 1244  * APIC probing support code.  This includes code to manage enumerators.
 1245  */
 1246 
 1247 static SLIST_HEAD(, apic_enumerator) enumerators =
 1248         SLIST_HEAD_INITIALIZER(enumerators);
 1249 static struct apic_enumerator *best_enum;
 1250 
 1251 void
 1252 apic_register_enumerator(struct apic_enumerator *enumerator)
 1253 {
 1254 #ifdef INVARIANTS
 1255         struct apic_enumerator *apic_enum;
 1256 
 1257         SLIST_FOREACH(apic_enum, &enumerators, apic_next) {
 1258                 if (apic_enum == enumerator)
 1259                         panic("%s: Duplicate register of %s", __func__,
 1260                             enumerator->apic_name);
 1261         }
 1262 #endif
 1263         SLIST_INSERT_HEAD(&enumerators, enumerator, apic_next);
 1264 }
 1265 
 1266 /*
 1267  * We have to look for CPU's very, very early because certain subsystems
 1268  * want to know how many CPU's we have extremely early on in the boot
 1269  * process.
 1270  */
 1271 static void
 1272 apic_init(void *dummy __unused)
 1273 {
 1274         struct apic_enumerator *enumerator;
 1275 #ifndef __amd64__
 1276         uint64_t apic_base;
 1277 #endif
 1278         int retval, best;
 1279 
 1280         /* We only support built in local APICs. */
 1281         if (!(cpu_feature & CPUID_APIC))
 1282                 return;
 1283 
 1284         /* Don't probe if APIC mode is disabled. */
 1285         if (resource_disabled("apic", 0))
 1286                 return;
 1287 
 1288         /* First, probe all the enumerators to find the best match. */
 1289         best_enum = NULL;
 1290         best = 0;
 1291         SLIST_FOREACH(enumerator, &enumerators, apic_next) {
 1292                 retval = enumerator->apic_probe();
 1293                 if (retval > 0)
 1294                         continue;
 1295                 if (best_enum == NULL || best < retval) {
 1296                         best_enum = enumerator;
 1297                         best = retval;
 1298                 }
 1299         }
 1300         if (best_enum == NULL) {
 1301                 if (bootverbose)
 1302                         printf("APIC: Could not find any APICs.\n");
 1303                 return;
 1304         }
 1305 
 1306         if (bootverbose)
 1307                 printf("APIC: Using the %s enumerator.\n",
 1308                     best_enum->apic_name);
 1309 
 1310 #ifndef __amd64__
 1311         /*
 1312          * To work around an errata, we disable the local APIC on some
 1313          * CPUs during early startup.  We need to turn the local APIC back
 1314          * on on such CPUs now.
 1315          */
 1316         if (cpu == CPU_686 && cpu_vendor_id == CPU_VENDOR_INTEL &&
 1317             (cpu_id & 0xff0) == 0x610) {
 1318                 apic_base = rdmsr(MSR_APICBASE);
 1319                 apic_base |= APICBASE_ENABLED;
 1320                 wrmsr(MSR_APICBASE, apic_base);
 1321         }
 1322 #endif
 1323 
 1324         /* Second, probe the CPU's in the system. */
 1325         retval = best_enum->apic_probe_cpus();
 1326         if (retval != 0)
 1327                 printf("%s: Failed to probe CPUs: returned %d\n",
 1328                     best_enum->apic_name, retval);
 1329 
 1330 #ifdef __amd64__
 1331 }
 1332 SYSINIT(apic_init, SI_SUB_TUNABLES - 1, SI_ORDER_SECOND, apic_init, NULL);
 1333 
 1334 /*
 1335  * Setup the local APIC.  We have to do this prior to starting up the APs
 1336  * in the SMP case.
 1337  */
 1338 static void
 1339 apic_setup_local(void *dummy __unused)
 1340 {
 1341         int retval;
 1342  
 1343         if (best_enum == NULL)
 1344                 return;
 1345 #endif
 1346         /* Third, initialize the local APIC. */
 1347         retval = best_enum->apic_setup_local();
 1348         if (retval != 0)
 1349                 printf("%s: Failed to setup the local APIC: returned %d\n",
 1350                     best_enum->apic_name, retval);
 1351 }
 1352 #ifdef __amd64__
 1353 SYSINIT(apic_setup_local, SI_SUB_CPU, SI_ORDER_SECOND, apic_setup_local,
 1354     NULL);
 1355 #else
 1356 SYSINIT(apic_init, SI_SUB_CPU, SI_ORDER_SECOND, apic_init, NULL);
 1357 #endif
 1358 
 1359 /*
 1360  * Setup the I/O APICs.
 1361  */
 1362 static void
 1363 apic_setup_io(void *dummy __unused)
 1364 {
 1365         int retval;
 1366 
 1367         if (best_enum == NULL)
 1368                 return;
 1369 
 1370         /*
 1371          * Local APIC must be registered before other PICs and pseudo PICs
 1372          * for proper suspend/resume order.
 1373          */
 1374 #ifndef XEN
 1375         intr_register_pic(&lapic_pic);
 1376 #endif
 1377 
 1378         retval = best_enum->apic_setup_io();
 1379         if (retval != 0)
 1380                 printf("%s: Failed to setup I/O APICs: returned %d\n",
 1381                     best_enum->apic_name, retval);
 1382 #ifdef XEN
 1383         return;
 1384 #endif
 1385         /*
 1386          * Finish setting up the local APIC on the BSP once we know how to
 1387          * properly program the LINT pins.
 1388          */
 1389         lapic_setup(1);
 1390         if (bootverbose)
 1391                 lapic_dump("BSP");
 1392 
 1393         /* Enable the MSI "pic". */
 1394         msi_init();
 1395 }
 1396 SYSINIT(apic_setup_io, SI_SUB_INTR, SI_ORDER_SECOND, apic_setup_io, NULL);
 1397 
 1398 #ifdef SMP
 1399 /*
 1400  * Inter Processor Interrupt functions.  The lapic_ipi_*() functions are
 1401  * private to the MD code.  The public interface for the rest of the
 1402  * kernel is defined in mp_machdep.c.
 1403  */
 1404 int
 1405 lapic_ipi_wait(int delay)
 1406 {
 1407         int x, incr;
 1408 
 1409         /*
 1410          * Wait delay loops for IPI to be sent.  This is highly bogus
 1411          * since this is sensitive to CPU clock speed.  If delay is
 1412          * -1, we wait forever.
 1413          */
 1414         if (delay == -1) {
 1415                 incr = 0;
 1416                 delay = 1;
 1417         } else
 1418                 incr = 1;
 1419         for (x = 0; x < delay; x += incr) {
 1420                 if ((lapic->icr_lo & APIC_DELSTAT_MASK) == APIC_DELSTAT_IDLE)
 1421                         return (1);
 1422                 ia32_pause();
 1423         }
 1424         return (0);
 1425 }
 1426 
 1427 void
 1428 lapic_ipi_raw(register_t icrlo, u_int dest)
 1429 {
 1430         register_t value, saveintr;
 1431 
 1432         /* XXX: Need more sanity checking of icrlo? */
 1433         KASSERT(lapic != NULL, ("%s called too early", __func__));
 1434         KASSERT((dest & ~(APIC_ID_MASK >> APIC_ID_SHIFT)) == 0,
 1435             ("%s: invalid dest field", __func__));
 1436         KASSERT((icrlo & APIC_ICRLO_RESV_MASK) == 0,
 1437             ("%s: reserved bits set in ICR LO register", __func__));
 1438 
 1439         /* Set destination in ICR HI register if it is being used. */
 1440         saveintr = intr_disable();
 1441         if ((icrlo & APIC_DEST_MASK) == APIC_DEST_DESTFLD) {
 1442                 value = lapic->icr_hi;
 1443                 value &= ~APIC_ID_MASK;
 1444                 value |= dest << APIC_ID_SHIFT;
 1445                 lapic->icr_hi = value;
 1446         }
 1447 
 1448         /* Program the contents of the IPI and dispatch it. */
 1449         value = lapic->icr_lo;
 1450         value &= APIC_ICRLO_RESV_MASK;
 1451         value |= icrlo;
 1452         lapic->icr_lo = value;
 1453         intr_restore(saveintr);
 1454 }
 1455 
 1456 #define BEFORE_SPIN     1000000
 1457 #ifdef DETECT_DEADLOCK
 1458 #define AFTER_SPIN      1000
 1459 #endif
 1460 
 1461 void
 1462 lapic_ipi_vectored(u_int vector, int dest)
 1463 {
 1464         register_t icrlo, destfield;
 1465 
 1466         KASSERT((vector & ~APIC_VECTOR_MASK) == 0,
 1467             ("%s: invalid vector %d", __func__, vector));
 1468 
 1469         icrlo = APIC_DESTMODE_PHY | APIC_TRIGMOD_EDGE;
 1470 
 1471         /*
 1472          * IPI_STOP_HARD is just a "fake" vector used to send a NMI.
 1473          * Use special rules regard NMI if passed, otherwise specify
 1474          * the vector.
 1475          */
 1476         if (vector == IPI_STOP_HARD)
 1477                 icrlo |= APIC_DELMODE_NMI | APIC_LEVEL_ASSERT;
 1478         else
 1479                 icrlo |= vector | APIC_DELMODE_FIXED | APIC_LEVEL_DEASSERT;
 1480         destfield = 0;
 1481         switch (dest) {
 1482         case APIC_IPI_DEST_SELF:
 1483                 icrlo |= APIC_DEST_SELF;
 1484                 break;
 1485         case APIC_IPI_DEST_ALL:
 1486                 icrlo |= APIC_DEST_ALLISELF;
 1487                 break;
 1488         case APIC_IPI_DEST_OTHERS:
 1489                 icrlo |= APIC_DEST_ALLESELF;
 1490                 break;
 1491         default:
 1492                 KASSERT((dest & ~(APIC_ID_MASK >> APIC_ID_SHIFT)) == 0,
 1493                     ("%s: invalid destination 0x%x", __func__, dest));
 1494                 destfield = dest;
 1495         }
 1496 
 1497         /* Wait for an earlier IPI to finish. */
 1498         if (!lapic_ipi_wait(BEFORE_SPIN)) {
 1499                 if (panicstr != NULL)
 1500                         return;
 1501                 else
 1502                         panic("APIC: Previous IPI is stuck");
 1503         }
 1504 
 1505         lapic_ipi_raw(icrlo, destfield);
 1506 
 1507 #ifdef DETECT_DEADLOCK
 1508         /* Wait for IPI to be delivered. */
 1509         if (!lapic_ipi_wait(AFTER_SPIN)) {
 1510 #ifdef needsattention
 1511                 /*
 1512                  * XXX FIXME:
 1513                  *
 1514                  * The above function waits for the message to actually be
 1515                  * delivered.  It breaks out after an arbitrary timeout
 1516                  * since the message should eventually be delivered (at
 1517                  * least in theory) and that if it wasn't we would catch
 1518                  * the failure with the check above when the next IPI is
 1519                  * sent.
 1520                  *
 1521                  * We could skip this wait entirely, EXCEPT it probably
 1522                  * protects us from other routines that assume that the
 1523                  * message was delivered and acted upon when this function
 1524                  * returns.
 1525                  */
 1526                 printf("APIC: IPI might be stuck\n");
 1527 #else /* !needsattention */
 1528                 /* Wait until mesage is sent without a timeout. */
 1529                 while (lapic->icr_lo & APIC_DELSTAT_PEND)
 1530                         ia32_pause();
 1531 #endif /* needsattention */
 1532         }
 1533 #endif /* DETECT_DEADLOCK */
 1534 }
 1535 #endif /* SMP */

Cache object: 0698c8b51510e11391a56bb9df97cb1e


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.