1 /*-
2 * Copyright (c) 2003 John Baldwin <jhb@FreeBSD.org>
3 * Copyright (c) 1996, by Steve Passe
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. The name of the developer may NOT be used to endorse or promote products
12 * derived from this software without specific prior written permission.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 /*
31 * Local APIC support on Pentium and later processors.
32 */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/bus.h>
40 #include <sys/kernel.h>
41 #include <sys/pcpu.h>
42 #include <sys/proc.h>
43
44 #include <vm/vm.h>
45 #include <vm/pmap.h>
46
47 #include <machine/apicreg.h>
48 #include <machine/cputypes.h>
49 #include <machine/frame.h>
50 #include <machine/intr_machdep.h>
51 #include <machine/apicvar.h>
52 #include <machine/md_var.h>
53 #include <machine/smp.h>
54 #include <machine/specialreg.h>
55
56 /*
57 * We can handle up to 60 APICs via our logical cluster IDs, but currently
58 * the physical IDs on Intel processors up to the Pentium 4 are limited to
59 * 16.
60 */
61 #define MAX_APICID 16
62
63 /* Sanity checks on IDT vectors. */
64 CTASSERT(APIC_IO_INTS + APIC_NUM_IOINTS == APIC_TIMER_INT);
65 CTASSERT(APIC_TIMER_INT < APIC_LOCAL_INTS);
66 CTASSERT(APIC_LOCAL_INTS == 240);
67 CTASSERT(IPI_STOP < APIC_SPURIOUS_INT);
68
69 /*
70 * Support for local APICs. Local APICs manage interrupts on each
71 * individual processor as opposed to I/O APICs which receive interrupts
72 * from I/O devices and then forward them on to the local APICs.
73 *
74 * Local APICs can also send interrupts to each other thus providing the
75 * mechanism for IPIs.
76 */
77
78 struct lvt {
79 u_int lvt_edgetrigger:1;
80 u_int lvt_activehi:1;
81 u_int lvt_masked:1;
82 u_int lvt_active:1;
83 u_int lvt_mode:16;
84 u_int lvt_vector:8;
85 };
86
87 struct lapic {
88 struct lvt la_lvts[LVT_MAX + 1];
89 u_int la_id:8;
90 u_int la_cluster:4;
91 u_int la_cluster_id:2;
92 u_int la_present:1;
93 } static lapics[MAX_APICID];
94
95 /* XXX: should thermal be an NMI? */
96
97 /* Global defaults for local APIC LVT entries. */
98 static struct lvt lvts[LVT_MAX + 1] = {
99 { 1, 1, 1, 1, APIC_LVT_DM_EXTINT, 0 }, /* LINT0: masked ExtINT */
100 { 1, 1, 0, 1, APIC_LVT_DM_NMI, 0 }, /* LINT1: NMI */
101 { 1, 1, 1, 1, APIC_LVT_DM_FIXED, APIC_TIMER_INT }, /* Timer */
102 { 1, 1, 1, 1, APIC_LVT_DM_FIXED, APIC_ERROR_INT }, /* Error */
103 { 1, 1, 1, 1, APIC_LVT_DM_FIXED, 0 }, /* PMC */
104 { 1, 1, 1, 1, APIC_LVT_DM_FIXED, APIC_THERMAL_INT }, /* Thermal */
105 };
106
107 static inthand_t *ioint_handlers[] = {
108 NULL, /* 0 - 31 */
109 IDTVEC(apic_isr1), /* 32 - 63 */
110 IDTVEC(apic_isr2), /* 64 - 95 */
111 IDTVEC(apic_isr3), /* 96 - 127 */
112 IDTVEC(apic_isr4), /* 128 - 159 */
113 IDTVEC(apic_isr5), /* 160 - 191 */
114 IDTVEC(apic_isr6), /* 192 - 223 */
115 IDTVEC(apic_isr7), /* 224 - 255 */
116 };
117
118 volatile lapic_t *lapic;
119
120 static void lapic_enable(void);
121 static uint32_t lvt_mode(struct lapic *la, u_int pin, uint32_t value);
122
123 static uint32_t
124 lvt_mode(struct lapic *la, u_int pin, uint32_t value)
125 {
126 struct lvt *lvt;
127
128 KASSERT(pin <= LVT_MAX, ("%s: pin %u out of range", __func__, pin));
129 if (la->la_lvts[pin].lvt_active)
130 lvt = &la->la_lvts[pin];
131 else
132 lvt = &lvts[pin];
133
134 value &= ~(APIC_LVT_M | APIC_LVT_TM | APIC_LVT_IIPP | APIC_LVT_DM |
135 APIC_LVT_VECTOR);
136 if (lvt->lvt_edgetrigger == 0)
137 value |= APIC_LVT_TM;
138 if (lvt->lvt_activehi == 0)
139 value |= APIC_LVT_IIPP_INTALO;
140 if (lvt->lvt_masked)
141 value |= APIC_LVT_M;
142 value |= lvt->lvt_mode;
143 switch (lvt->lvt_mode) {
144 case APIC_LVT_DM_NMI:
145 case APIC_LVT_DM_SMI:
146 case APIC_LVT_DM_INIT:
147 case APIC_LVT_DM_EXTINT:
148 if (!lvt->lvt_edgetrigger) {
149 printf("lapic%u: Forcing LINT%u to edge trigger\n",
150 la->la_id, pin);
151 value |= APIC_LVT_TM;
152 }
153 /* Use a vector of 0. */
154 break;
155 case APIC_LVT_DM_FIXED:
156 value |= lvt->lvt_vector;
157 break;
158 default:
159 panic("bad APIC LVT delivery mode: %#x\n", value);
160 }
161 return (value);
162 }
163
164 /*
165 * Map the local APIC and setup necessary interrupt vectors.
166 */
167 void
168 lapic_init(uintptr_t addr)
169 {
170
171 /* Map the local APIC and setup the spurious interrupt handler. */
172 KASSERT(trunc_page(addr) == addr,
173 ("local APIC not aligned on a page boundary"));
174 lapic = (lapic_t *)pmap_mapdev(addr, sizeof(lapic_t));
175 setidt(APIC_SPURIOUS_INT, IDTVEC(spuriousint), SDT_SYSIGT, SEL_KPL, 0);
176
177 /* Perform basic initialization of the BSP's local APIC. */
178 lapic_enable();
179
180 /* Set BSP's per-CPU local APIC ID. */
181 PCPU_SET(apic_id, lapic_id());
182
183 /* XXX: timer/error/thermal interrupts */
184 }
185
186 /*
187 * Create a local APIC instance.
188 */
189 void
190 lapic_create(u_int apic_id, int boot_cpu)
191 {
192 int i;
193
194 if (apic_id >= MAX_APICID) {
195 printf("APIC: Ignoring local APIC with ID %d\n", apic_id);
196 if (boot_cpu)
197 panic("Can't ignore BSP");
198 return;
199 }
200 KASSERT(!lapics[apic_id].la_present, ("duplicate local APIC %u",
201 apic_id));
202
203 /*
204 * Assume no local LVT overrides and a cluster of 0 and
205 * intra-cluster ID of 0.
206 */
207 lapics[apic_id].la_present = 1;
208 lapics[apic_id].la_id = apic_id;
209 for (i = 0; i < LVT_MAX; i++) {
210 lapics[apic_id].la_lvts[i] = lvts[i];
211 lapics[apic_id].la_lvts[i].lvt_active = 0;
212 }
213
214 #ifdef SMP
215 cpu_add(apic_id, boot_cpu);
216 #endif
217 }
218
219 /*
220 * Dump contents of local APIC registers
221 */
222 void
223 lapic_dump(const char* str)
224 {
225
226 printf("cpu%d %s:\n", PCPU_GET(cpuid), str);
227 printf(" ID: 0x%08x VER: 0x%08x LDR: 0x%08x DFR: 0x%08x\n",
228 lapic->id, lapic->version, lapic->ldr, lapic->dfr);
229 printf(" lint0: 0x%08x lint1: 0x%08x TPR: 0x%08x SVR: 0x%08x\n",
230 lapic->lvt_lint0, lapic->lvt_lint1, lapic->tpr, lapic->svr);
231 printf(" timer: 0x%08x therm: 0x%08x err: 0x%08x pcm: 0x%08x\n",
232 lapic->lvt_timer, lapic->lvt_thermal, lapic->lvt_error,
233 lapic->lvt_pcint);
234 }
235
236 void
237 lapic_enable_intr(u_int irq)
238 {
239 u_int vector;
240
241 vector = apic_irq_to_idt(irq);
242 KASSERT(vector != IDT_SYSCALL, ("Attempt to overwrite syscall entry"));
243 KASSERT(ioint_handlers[vector / 32] != NULL,
244 ("No ISR handler for IRQ %u", irq));
245 setidt(vector, ioint_handlers[vector / 32], SDT_SYSIGT, SEL_KPL, 0);
246 }
247
248 void
249 lapic_setup(void)
250 {
251 struct lapic *la;
252 u_int32_t value, maxlvt;
253 register_t eflags;
254
255 la = &lapics[lapic_id()];
256 KASSERT(la->la_present, ("missing APIC structure"));
257 eflags = intr_disable();
258 maxlvt = (lapic->version & APIC_VER_MAXLVT) >> MAXLVTSHIFT;
259
260 /* Initialize the TPR to allow all interrupts. */
261 lapic_set_tpr(0);
262
263 /* Use the cluster model for logical IDs. */
264 value = lapic->dfr;
265 value &= ~APIC_DFR_MODEL_MASK;
266 value |= APIC_DFR_MODEL_CLUSTER;
267 lapic->dfr = value;
268
269 /* Set this APIC's logical ID. */
270 value = lapic->ldr;
271 value &= ~APIC_ID_MASK;
272 value |= (la->la_cluster << APIC_ID_CLUSTER_SHIFT |
273 1 << la->la_cluster_id) << APIC_ID_SHIFT;
274 lapic->ldr = value;
275
276 /* Setup spurious vector and enable the local APIC. */
277 lapic_enable();
278
279 /* Program LINT[01] LVT entries. */
280 lapic->lvt_lint0 = lvt_mode(la, LVT_LINT0, lapic->lvt_lint0);
281 lapic->lvt_lint1 = lvt_mode(la, LVT_LINT1, lapic->lvt_lint1);
282
283 /* XXX: more LVT entries */
284
285 intr_restore(eflags);
286 }
287
288 void
289 lapic_disable(void)
290 {
291 uint32_t value;
292
293 /* Software disable the local APIC. */
294 value = lapic->svr;
295 value &= ~APIC_SVR_SWEN;
296 lapic->svr = value;
297 }
298
299 static void
300 lapic_enable(void)
301 {
302 u_int32_t value;
303
304 /* Program the spurious vector to enable the local APIC. */
305 value = lapic->svr;
306 value &= ~(APIC_SVR_VECTOR | APIC_SVR_FOCUS);
307 value |= (APIC_SVR_FEN | APIC_SVR_SWEN | APIC_SPURIOUS_INT);
308 lapic->svr = value;
309 }
310
311 int
312 lapic_id(void)
313 {
314
315 KASSERT(lapic != NULL, ("local APIC is not mapped"));
316 return (lapic->id >> APIC_ID_SHIFT);
317 }
318
319 int
320 lapic_intr_pending(u_int vector)
321 {
322 volatile u_int32_t *irr;
323
324 /*
325 * The IRR registers are an array of 128-bit registers each of
326 * which only describes 32 interrupts in the low 32 bits.. Thus,
327 * we divide the vector by 32 to get the 128-bit index. We then
328 * multiply that index by 4 to get the equivalent index from
329 * treating the IRR as an array of 32-bit registers. Finally, we
330 * modulus the vector by 32 to determine the individual bit to
331 * test.
332 */
333 irr = &lapic->irr0;
334 return (irr[(vector / 32) * 4] & 1 << (vector % 32));
335 }
336
337 void
338 lapic_set_logical_id(u_int apic_id, u_int cluster, u_int cluster_id)
339 {
340 struct lapic *la;
341
342 KASSERT(lapics[apic_id].la_present, ("%s: APIC %u doesn't exist",
343 __func__, apic_id));
344 KASSERT(cluster <= APIC_MAX_CLUSTER, ("%s: cluster %u too big",
345 __func__, cluster));
346 KASSERT(cluster_id <= APIC_MAX_INTRACLUSTER_ID,
347 ("%s: intra cluster id %u too big", __func__, cluster_id));
348 la = &lapics[apic_id];
349 la->la_cluster = cluster;
350 la->la_cluster_id = cluster_id;
351 }
352
353 int
354 lapic_set_lvt_mask(u_int apic_id, u_int pin, u_char masked)
355 {
356
357 if (pin > LVT_MAX)
358 return (EINVAL);
359 if (apic_id == APIC_ID_ALL) {
360 lvts[pin].lvt_masked = masked;
361 if (bootverbose)
362 printf("lapic:");
363 } else {
364 KASSERT(lapics[apic_id].la_present,
365 ("%s: missing APIC %u", __func__, apic_id));
366 lapics[apic_id].la_lvts[pin].lvt_masked = masked;
367 lapics[apic_id].la_lvts[pin].lvt_active = 1;
368 if (bootverbose)
369 printf("lapic%u:", apic_id);
370 }
371 if (bootverbose)
372 printf(" LINT%u %s\n", pin, masked ? "masked" : "unmasked");
373 return (0);
374 }
375
376 int
377 lapic_set_lvt_mode(u_int apic_id, u_int pin, u_int32_t mode)
378 {
379 struct lvt *lvt;
380
381 if (pin > LVT_MAX)
382 return (EINVAL);
383 if (apic_id == APIC_ID_ALL) {
384 lvt = &lvts[pin];
385 if (bootverbose)
386 printf("lapic:");
387 } else {
388 KASSERT(lapics[apic_id].la_present,
389 ("%s: missing APIC %u", __func__, apic_id));
390 lvt = &lapics[apic_id].la_lvts[pin];
391 lvt->lvt_active = 1;
392 if (bootverbose)
393 printf("lapic%u:", apic_id);
394 }
395 lvt->lvt_mode = mode;
396 switch (mode) {
397 case APIC_LVT_DM_NMI:
398 case APIC_LVT_DM_SMI:
399 case APIC_LVT_DM_INIT:
400 case APIC_LVT_DM_EXTINT:
401 lvt->lvt_edgetrigger = 1;
402 lvt->lvt_activehi = 1;
403 if (mode == APIC_LVT_DM_EXTINT)
404 lvt->lvt_masked = 1;
405 else
406 lvt->lvt_masked = 0;
407 break;
408 default:
409 panic("Unsupported delivery mode: 0x%x\n", mode);
410 }
411 if (bootverbose) {
412 printf(" Routing ");
413 switch (mode) {
414 case APIC_LVT_DM_NMI:
415 printf("NMI");
416 break;
417 case APIC_LVT_DM_SMI:
418 printf("SMI");
419 break;
420 case APIC_LVT_DM_INIT:
421 printf("INIT");
422 break;
423 case APIC_LVT_DM_EXTINT:
424 printf("ExtINT");
425 break;
426 }
427 printf(" -> LINT%u\n", pin);
428 }
429 return (0);
430 }
431
432 int
433 lapic_set_lvt_polarity(u_int apic_id, u_int pin, enum intr_polarity pol)
434 {
435
436 if (pin > LVT_MAX || pol == INTR_POLARITY_CONFORM)
437 return (EINVAL);
438 if (apic_id == APIC_ID_ALL) {
439 lvts[pin].lvt_activehi = (pol == INTR_POLARITY_HIGH);
440 if (bootverbose)
441 printf("lapic:");
442 } else {
443 KASSERT(lapics[apic_id].la_present,
444 ("%s: missing APIC %u", __func__, apic_id));
445 lapics[apic_id].la_lvts[pin].lvt_active = 1;
446 lapics[apic_id].la_lvts[pin].lvt_activehi =
447 (pol == INTR_POLARITY_HIGH);
448 if (bootverbose)
449 printf("lapic%u:", apic_id);
450 }
451 if (bootverbose)
452 printf(" LINT%u polarity: %s\n", pin,
453 pol == INTR_POLARITY_HIGH ? "high" : "low");
454 return (0);
455 }
456
457 int
458 lapic_set_lvt_triggermode(u_int apic_id, u_int pin, enum intr_trigger trigger)
459 {
460
461 if (pin > LVT_MAX || trigger == INTR_TRIGGER_CONFORM)
462 return (EINVAL);
463 if (apic_id == APIC_ID_ALL) {
464 lvts[pin].lvt_edgetrigger = (trigger == INTR_TRIGGER_EDGE);
465 if (bootverbose)
466 printf("lapic:");
467 } else {
468 KASSERT(lapics[apic_id].la_present,
469 ("%s: missing APIC %u", __func__, apic_id));
470 lapics[apic_id].la_lvts[pin].lvt_edgetrigger =
471 (trigger == INTR_TRIGGER_EDGE);
472 lapics[apic_id].la_lvts[pin].lvt_active = 1;
473 if (bootverbose)
474 printf("lapic%u:", apic_id);
475 }
476 if (bootverbose)
477 printf(" LINT%u trigger: %s\n", pin,
478 trigger == INTR_TRIGGER_EDGE ? "edge" : "level");
479 return (0);
480 }
481
482 /*
483 * Adjust the TPR of the current CPU so that it blocks all interrupts below
484 * the passed in vector.
485 */
486 void
487 lapic_set_tpr(u_int vector)
488 {
489 #ifdef CHEAP_TPR
490 lapic->tpr = vector;
491 #else
492 u_int32_t tpr;
493
494 tpr = lapic->tpr & ~APIC_TPR_PRIO;
495 tpr |= vector;
496 lapic->tpr = tpr;
497 #endif
498 }
499
500 void
501 lapic_eoi(void)
502 {
503
504 lapic->eoi = 0;
505 }
506
507 void
508 lapic_handle_intr(void *cookie, struct intrframe frame)
509 {
510 struct intsrc *isrc;
511 int vec = (uintptr_t)cookie;
512
513 if (vec == -1)
514 panic("Couldn't get vector from ISR!");
515 isrc = intr_lookup_source(apic_idt_to_irq(vec));
516 intr_execute_handlers(isrc, &frame);
517 }
518
519 /* Translate between IDT vectors and IRQ vectors. */
520 u_int
521 apic_irq_to_idt(u_int irq)
522 {
523 u_int vector;
524
525 KASSERT(irq < NUM_IO_INTS, ("Invalid IRQ %u", irq));
526 vector = irq + APIC_IO_INTS;
527 if (vector >= IDT_SYSCALL)
528 vector++;
529 return (vector);
530 }
531
532 u_int
533 apic_idt_to_irq(u_int vector)
534 {
535
536 KASSERT(vector >= APIC_IO_INTS && vector != IDT_SYSCALL &&
537 vector <= APIC_IO_INTS + NUM_IO_INTS,
538 ("Vector %u does not map to an IRQ line", vector));
539 if (vector > IDT_SYSCALL)
540 vector--;
541 return (vector - APIC_IO_INTS);
542 }
543
544 /*
545 * APIC probing support code. This includes code to manage enumerators.
546 */
547
548 static SLIST_HEAD(, apic_enumerator) enumerators =
549 SLIST_HEAD_INITIALIZER(enumerators);
550 static struct apic_enumerator *best_enum;
551
552 void
553 apic_register_enumerator(struct apic_enumerator *enumerator)
554 {
555 #ifdef INVARIANTS
556 struct apic_enumerator *apic_enum;
557
558 SLIST_FOREACH(apic_enum, &enumerators, apic_next) {
559 if (apic_enum == enumerator)
560 panic("%s: Duplicate register of %s", __func__,
561 enumerator->apic_name);
562 }
563 #endif
564 SLIST_INSERT_HEAD(&enumerators, enumerator, apic_next);
565 }
566
567 /*
568 * We have to look for CPU's very, very early because certain subsystems
569 * want to know how many CPU's we have extremely early on in the boot
570 * process.
571 */
572 static void
573 apic_init(void *dummy __unused)
574 {
575 struct apic_enumerator *enumerator;
576 int retval, best;
577
578 /* We only support built in local APICs. */
579 if (!(cpu_feature & CPUID_APIC))
580 return;
581
582 /* Don't probe if APIC mode is disabled. */
583 if (resource_disabled("apic", 0))
584 return;
585
586 /* First, probe all the enumerators to find the best match. */
587 best_enum = NULL;
588 best = 0;
589 SLIST_FOREACH(enumerator, &enumerators, apic_next) {
590 retval = enumerator->apic_probe();
591 if (retval > 0)
592 continue;
593 if (best_enum == NULL || best < retval) {
594 best_enum = enumerator;
595 best = retval;
596 }
597 }
598 if (best_enum == NULL) {
599 if (bootverbose)
600 printf("APIC: Could not find any APICs.\n");
601 return;
602 }
603
604 if (bootverbose)
605 printf("APIC: Using the %s enumerator.\n",
606 best_enum->apic_name);
607
608 /* Second, probe the CPU's in the system. */
609 retval = best_enum->apic_probe_cpus();
610 if (retval != 0)
611 printf("%s: Failed to probe CPUs: returned %d\n",
612 best_enum->apic_name, retval);
613 }
614 SYSINIT(apic_init, SI_SUB_TUNABLES - 1, SI_ORDER_SECOND, apic_init, NULL)
615
616 /*
617 * Setup the local APIC. We have to do this prior to starting up the APs
618 * in the SMP case.
619 */
620 static void
621 apic_setup_local(void *dummy __unused)
622 {
623 int retval;
624
625 if (best_enum == NULL)
626 return;
627 retval = best_enum->apic_setup_local();
628 if (retval != 0)
629 printf("%s: Failed to setup the local APIC: returned %d\n",
630 best_enum->apic_name, retval);
631 #ifdef SMP
632 /* Last, setup the cpu topology now that we have probed CPUs */
633 mp_topology();
634 #endif
635 }
636 SYSINIT(apic_setup_local, SI_SUB_CPU, SI_ORDER_FIRST, apic_setup_local, NULL)
637
638 /*
639 * Setup the I/O APICs.
640 */
641 static void
642 apic_setup_io(void *dummy __unused)
643 {
644 int retval;
645
646 if (best_enum == NULL)
647 return;
648 retval = best_enum->apic_setup_io();
649 if (retval != 0)
650 printf("%s: Failed to setup I/O APICs: returned %d\n",
651 best_enum->apic_name, retval);
652
653 /*
654 * Finish setting up the local APIC on the BSP once we know how to
655 * properly program the LINT pins.
656 */
657 lapic_setup();
658 if (bootverbose)
659 lapic_dump("BSP");
660 }
661 SYSINIT(apic_setup_io, SI_SUB_INTR, SI_ORDER_SECOND, apic_setup_io, NULL)
662
663 #ifdef SMP
664 /*
665 * Inter Processor Interrupt functions. The lapic_ipi_*() functions are
666 * private to the sys/amd64 code. The public interface for the rest of the
667 * kernel is defined in mp_machdep.c.
668 */
669 int
670 lapic_ipi_wait(int delay)
671 {
672 int x, incr;
673
674 /*
675 * Wait delay loops for IPI to be sent. This is highly bogus
676 * since this is sensitive to CPU clock speed. If delay is
677 * -1, we wait forever.
678 */
679 if (delay == -1) {
680 incr = 0;
681 delay = 1;
682 } else
683 incr = 1;
684 for (x = 0; x < delay; x += incr) {
685 if ((lapic->icr_lo & APIC_DELSTAT_MASK) == APIC_DELSTAT_IDLE)
686 return (1);
687 ia32_pause();
688 }
689 return (0);
690 }
691
692 void
693 lapic_ipi_raw(register_t icrlo, u_int dest)
694 {
695 register_t value, eflags;
696
697 /* XXX: Need more sanity checking of icrlo? */
698 KASSERT(lapic != NULL, ("%s called too early", __func__));
699 KASSERT((dest & ~(APIC_ID_MASK >> APIC_ID_SHIFT)) == 0,
700 ("%s: invalid dest field", __func__));
701 KASSERT((icrlo & APIC_ICRLO_RESV_MASK) == 0,
702 ("%s: reserved bits set in ICR LO register", __func__));
703
704 /* Set destination in ICR HI register if it is being used. */
705 eflags = intr_disable();
706 if ((icrlo & APIC_DEST_MASK) == APIC_DEST_DESTFLD) {
707 value = lapic->icr_hi;
708 value &= ~APIC_ID_MASK;
709 value |= dest << APIC_ID_SHIFT;
710 lapic->icr_hi = value;
711 }
712
713 /* Program the contents of the IPI and dispatch it. */
714 value = lapic->icr_lo;
715 value &= APIC_ICRLO_RESV_MASK;
716 value |= icrlo;
717 lapic->icr_lo = value;
718 intr_restore(eflags);
719 }
720
721 #define BEFORE_SPIN 1000000
722 #ifdef DETECT_DEADLOCK
723 #define AFTER_SPIN 1000
724 #endif
725
726 void
727 lapic_ipi_vectored(u_int vector, int dest)
728 {
729 register_t icrlo, destfield;
730
731 KASSERT((vector & ~APIC_VECTOR_MASK) == 0,
732 ("%s: invalid vector %d", __func__, vector));
733
734 icrlo = vector | APIC_DELMODE_FIXED | APIC_DESTMODE_PHY |
735 APIC_LEVEL_DEASSERT | APIC_TRIGMOD_EDGE;
736 destfield = 0;
737 switch (dest) {
738 case APIC_IPI_DEST_SELF:
739 icrlo |= APIC_DEST_SELF;
740 break;
741 case APIC_IPI_DEST_ALL:
742 icrlo |= APIC_DEST_ALLISELF;
743 break;
744 case APIC_IPI_DEST_OTHERS:
745 icrlo |= APIC_DEST_ALLESELF;
746 break;
747 default:
748 KASSERT((dest & ~(APIC_ID_MASK >> APIC_ID_SHIFT)) == 0,
749 ("%s: invalid destination 0x%x", __func__, dest));
750 destfield = dest;
751 }
752
753 /* Wait for an earlier IPI to finish. */
754 if (!lapic_ipi_wait(BEFORE_SPIN))
755 panic("APIC: Previous IPI is stuck");
756
757 lapic_ipi_raw(icrlo, destfield);
758
759 #ifdef DETECT_DEADLOCK
760 /* Wait for IPI to be delivered. */
761 if (!lapic_ipi_wait(AFTER_SPIN)) {
762 #ifdef needsattention
763 /*
764 * XXX FIXME:
765 *
766 * The above function waits for the message to actually be
767 * delivered. It breaks out after an arbitrary timeout
768 * since the message should eventually be delivered (at
769 * least in theory) and that if it wasn't we would catch
770 * the failure with the check above when the next IPI is
771 * sent.
772 *
773 * We could skip this wait entirely, EXCEPT it probably
774 * protects us from other routines that assume that the
775 * message was delivered and acted upon when this function
776 * returns.
777 */
778 printf("APIC: IPI might be stuck\n");
779 #else /* !needsattention */
780 /* Wait until mesage is sent without a timeout. */
781 while (lapic->icr_lo & APIC_DELSTAT_PEND)
782 ia32_pause();
783 #endif /* needsattention */
784 }
785 #endif /* DETECT_DEADLOCK */
786 }
787 #endif /* SMP */
Cache object: 1d340cc0e7189736c74c5f4744822443
|