1 /*-
2 * Copyright (c) 2003 John Baldwin <jhb@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * $FreeBSD: releng/9.1/sys/x86/x86/intr_machdep.c 235260 2012-05-11 04:10:23Z attilio $
30 */
31
32 /*
33 * Machine dependent interrupt code for x86. For x86, we have to
34 * deal with different PICs. Thus, we use the passed in vector to lookup
35 * an interrupt source associated with that vector. The interrupt source
36 * describes which PIC the source belongs to and includes methods to handle
37 * that source.
38 */
39
40 #include "opt_atpic.h"
41 #include "opt_ddb.h"
42
43 #include <sys/param.h>
44 #include <sys/bus.h>
45 #include <sys/interrupt.h>
46 #include <sys/ktr.h>
47 #include <sys/kernel.h>
48 #include <sys/lock.h>
49 #include <sys/mutex.h>
50 #include <sys/proc.h>
51 #include <sys/smp.h>
52 #include <sys/syslog.h>
53 #include <sys/systm.h>
54 #include <machine/clock.h>
55 #include <machine/intr_machdep.h>
56 #include <machine/smp.h>
57 #ifdef DDB
58 #include <ddb/ddb.h>
59 #endif
60
61 #ifndef DEV_ATPIC
62 #include <machine/segments.h>
63 #include <machine/frame.h>
64 #include <dev/ic/i8259.h>
65 #include <x86/isa/icu.h>
66 #ifdef PC98
67 #include <pc98/cbus/cbus.h>
68 #else
69 #include <x86/isa/isa.h>
70 #endif
71 #endif
72
73 #define MAX_STRAY_LOG 5
74
75 typedef void (*mask_fn)(void *);
76
77 static int intrcnt_index;
78 static struct intsrc *interrupt_sources[NUM_IO_INTS];
79 static struct mtx intr_table_lock;
80 static struct mtx intrcnt_lock;
81 static STAILQ_HEAD(, pic) pics;
82
83 #ifdef SMP
84 static int assign_cpu;
85 #endif
86
87 u_long intrcnt[INTRCNT_COUNT];
88 char intrnames[INTRCNT_COUNT * (MAXCOMLEN + 1)];
89 size_t sintrcnt = sizeof(intrcnt);
90 size_t sintrnames = sizeof(intrnames);
91
92 static int intr_assign_cpu(void *arg, u_char cpu);
93 static void intr_disable_src(void *arg);
94 static void intr_init(void *__dummy);
95 static int intr_pic_registered(struct pic *pic);
96 static void intrcnt_setname(const char *name, int index);
97 static void intrcnt_updatename(struct intsrc *is);
98 static void intrcnt_register(struct intsrc *is);
99
100 static int
101 intr_pic_registered(struct pic *pic)
102 {
103 struct pic *p;
104
105 STAILQ_FOREACH(p, &pics, pics) {
106 if (p == pic)
107 return (1);
108 }
109 return (0);
110 }
111
112 /*
113 * Register a new interrupt controller (PIC). This is to support suspend
114 * and resume where we suspend/resume controllers rather than individual
115 * sources. This also allows controllers with no active sources (such as
116 * 8259As in a system using the APICs) to participate in suspend and resume.
117 */
118 int
119 intr_register_pic(struct pic *pic)
120 {
121 int error;
122
123 mtx_lock(&intr_table_lock);
124 if (intr_pic_registered(pic))
125 error = EBUSY;
126 else {
127 STAILQ_INSERT_TAIL(&pics, pic, pics);
128 error = 0;
129 }
130 mtx_unlock(&intr_table_lock);
131 return (error);
132 }
133
134 /*
135 * Register a new interrupt source with the global interrupt system.
136 * The global interrupts need to be disabled when this function is
137 * called.
138 */
139 int
140 intr_register_source(struct intsrc *isrc)
141 {
142 int error, vector;
143
144 KASSERT(intr_pic_registered(isrc->is_pic), ("unregistered PIC"));
145 vector = isrc->is_pic->pic_vector(isrc);
146 if (interrupt_sources[vector] != NULL)
147 return (EEXIST);
148 error = intr_event_create(&isrc->is_event, isrc, 0, vector,
149 intr_disable_src, (mask_fn)isrc->is_pic->pic_enable_source,
150 (mask_fn)isrc->is_pic->pic_eoi_source, intr_assign_cpu, "irq%d:",
151 vector);
152 if (error)
153 return (error);
154 mtx_lock(&intr_table_lock);
155 if (interrupt_sources[vector] != NULL) {
156 mtx_unlock(&intr_table_lock);
157 intr_event_destroy(isrc->is_event);
158 return (EEXIST);
159 }
160 intrcnt_register(isrc);
161 interrupt_sources[vector] = isrc;
162 isrc->is_handlers = 0;
163 mtx_unlock(&intr_table_lock);
164 return (0);
165 }
166
167 struct intsrc *
168 intr_lookup_source(int vector)
169 {
170
171 return (interrupt_sources[vector]);
172 }
173
174 int
175 intr_add_handler(const char *name, int vector, driver_filter_t filter,
176 driver_intr_t handler, void *arg, enum intr_type flags, void **cookiep)
177 {
178 struct intsrc *isrc;
179 int error;
180
181 isrc = intr_lookup_source(vector);
182 if (isrc == NULL)
183 return (EINVAL);
184 error = intr_event_add_handler(isrc->is_event, name, filter, handler,
185 arg, intr_priority(flags), flags, cookiep);
186 if (error == 0) {
187 mtx_lock(&intr_table_lock);
188 intrcnt_updatename(isrc);
189 isrc->is_handlers++;
190 if (isrc->is_handlers == 1) {
191 isrc->is_pic->pic_enable_intr(isrc);
192 isrc->is_pic->pic_enable_source(isrc);
193 }
194 mtx_unlock(&intr_table_lock);
195 }
196 return (error);
197 }
198
199 int
200 intr_remove_handler(void *cookie)
201 {
202 struct intsrc *isrc;
203 int error;
204
205 isrc = intr_handler_source(cookie);
206 error = intr_event_remove_handler(cookie);
207 if (error == 0) {
208 mtx_lock(&intr_table_lock);
209 isrc->is_handlers--;
210 if (isrc->is_handlers == 0) {
211 isrc->is_pic->pic_disable_source(isrc, PIC_NO_EOI);
212 isrc->is_pic->pic_disable_intr(isrc);
213 }
214 intrcnt_updatename(isrc);
215 mtx_unlock(&intr_table_lock);
216 }
217 return (error);
218 }
219
220 int
221 intr_config_intr(int vector, enum intr_trigger trig, enum intr_polarity pol)
222 {
223 struct intsrc *isrc;
224
225 isrc = intr_lookup_source(vector);
226 if (isrc == NULL)
227 return (EINVAL);
228 return (isrc->is_pic->pic_config_intr(isrc, trig, pol));
229 }
230
231 static void
232 intr_disable_src(void *arg)
233 {
234 struct intsrc *isrc;
235
236 isrc = arg;
237 isrc->is_pic->pic_disable_source(isrc, PIC_EOI);
238 }
239
240 void
241 intr_execute_handlers(struct intsrc *isrc, struct trapframe *frame)
242 {
243 struct intr_event *ie;
244 int vector;
245
246 /*
247 * We count software interrupts when we process them. The
248 * code here follows previous practice, but there's an
249 * argument for counting hardware interrupts when they're
250 * processed too.
251 */
252 (*isrc->is_count)++;
253 PCPU_INC(cnt.v_intr);
254
255 ie = isrc->is_event;
256
257 /*
258 * XXX: We assume that IRQ 0 is only used for the ISA timer
259 * device (clk).
260 */
261 vector = isrc->is_pic->pic_vector(isrc);
262 if (vector == 0)
263 clkintr_pending = 1;
264
265 /*
266 * For stray interrupts, mask and EOI the source, bump the
267 * stray count, and log the condition.
268 */
269 if (intr_event_handle(ie, frame) != 0) {
270 isrc->is_pic->pic_disable_source(isrc, PIC_EOI);
271 (*isrc->is_straycount)++;
272 if (*isrc->is_straycount < MAX_STRAY_LOG)
273 log(LOG_ERR, "stray irq%d\n", vector);
274 else if (*isrc->is_straycount == MAX_STRAY_LOG)
275 log(LOG_CRIT,
276 "too many stray irq %d's: not logging anymore\n",
277 vector);
278 }
279 }
280
281 void
282 intr_resume(void)
283 {
284 struct pic *pic;
285
286 #ifndef DEV_ATPIC
287 atpic_reset();
288 #endif
289 mtx_lock(&intr_table_lock);
290 STAILQ_FOREACH(pic, &pics, pics) {
291 if (pic->pic_resume != NULL)
292 pic->pic_resume(pic);
293 }
294 mtx_unlock(&intr_table_lock);
295 }
296
297 void
298 intr_suspend(void)
299 {
300 struct pic *pic;
301
302 mtx_lock(&intr_table_lock);
303 STAILQ_FOREACH(pic, &pics, pics) {
304 if (pic->pic_suspend != NULL)
305 pic->pic_suspend(pic);
306 }
307 mtx_unlock(&intr_table_lock);
308 }
309
310 static int
311 intr_assign_cpu(void *arg, u_char cpu)
312 {
313 #ifdef SMP
314 struct intsrc *isrc;
315 int error;
316
317 /*
318 * Don't do anything during early boot. We will pick up the
319 * assignment once the APs are started.
320 */
321 if (assign_cpu && cpu != NOCPU) {
322 isrc = arg;
323 mtx_lock(&intr_table_lock);
324 error = isrc->is_pic->pic_assign_cpu(isrc, cpu_apic_ids[cpu]);
325 mtx_unlock(&intr_table_lock);
326 } else
327 error = 0;
328 return (error);
329 #else
330 return (EOPNOTSUPP);
331 #endif
332 }
333
334 static void
335 intrcnt_setname(const char *name, int index)
336 {
337
338 snprintf(intrnames + (MAXCOMLEN + 1) * index, MAXCOMLEN + 1, "%-*s",
339 MAXCOMLEN, name);
340 }
341
342 static void
343 intrcnt_updatename(struct intsrc *is)
344 {
345
346 intrcnt_setname(is->is_event->ie_fullname, is->is_index);
347 }
348
349 static void
350 intrcnt_register(struct intsrc *is)
351 {
352 char straystr[MAXCOMLEN + 1];
353
354 KASSERT(is->is_event != NULL, ("%s: isrc with no event", __func__));
355 mtx_lock_spin(&intrcnt_lock);
356 is->is_index = intrcnt_index;
357 intrcnt_index += 2;
358 snprintf(straystr, MAXCOMLEN + 1, "stray irq%d",
359 is->is_pic->pic_vector(is));
360 intrcnt_updatename(is);
361 is->is_count = &intrcnt[is->is_index];
362 intrcnt_setname(straystr, is->is_index + 1);
363 is->is_straycount = &intrcnt[is->is_index + 1];
364 mtx_unlock_spin(&intrcnt_lock);
365 }
366
367 void
368 intrcnt_add(const char *name, u_long **countp)
369 {
370
371 mtx_lock_spin(&intrcnt_lock);
372 *countp = &intrcnt[intrcnt_index];
373 intrcnt_setname(name, intrcnt_index);
374 intrcnt_index++;
375 mtx_unlock_spin(&intrcnt_lock);
376 }
377
378 static void
379 intr_init(void *dummy __unused)
380 {
381
382 intrcnt_setname("???", 0);
383 intrcnt_index = 1;
384 STAILQ_INIT(&pics);
385 mtx_init(&intr_table_lock, "intr sources", NULL, MTX_DEF);
386 mtx_init(&intrcnt_lock, "intrcnt", NULL, MTX_SPIN);
387 }
388 SYSINIT(intr_init, SI_SUB_INTR, SI_ORDER_FIRST, intr_init, NULL);
389
390 #ifndef DEV_ATPIC
391 /* Initialize the two 8259A's to a known-good shutdown state. */
392 void
393 atpic_reset(void)
394 {
395
396 outb(IO_ICU1, ICW1_RESET | ICW1_IC4);
397 outb(IO_ICU1 + ICU_IMR_OFFSET, IDT_IO_INTS);
398 outb(IO_ICU1 + ICU_IMR_OFFSET, IRQ_MASK(ICU_SLAVEID));
399 outb(IO_ICU1 + ICU_IMR_OFFSET, MASTER_MODE);
400 outb(IO_ICU1 + ICU_IMR_OFFSET, 0xff);
401 outb(IO_ICU1, OCW3_SEL | OCW3_RR);
402
403 outb(IO_ICU2, ICW1_RESET | ICW1_IC4);
404 outb(IO_ICU2 + ICU_IMR_OFFSET, IDT_IO_INTS + 8);
405 outb(IO_ICU2 + ICU_IMR_OFFSET, ICU_SLAVEID);
406 outb(IO_ICU2 + ICU_IMR_OFFSET, SLAVE_MODE);
407 outb(IO_ICU2 + ICU_IMR_OFFSET, 0xff);
408 outb(IO_ICU2, OCW3_SEL | OCW3_RR);
409 }
410 #endif
411
412 /* Add a description to an active interrupt handler. */
413 int
414 intr_describe(u_int vector, void *ih, const char *descr)
415 {
416 struct intsrc *isrc;
417 int error;
418
419 isrc = intr_lookup_source(vector);
420 if (isrc == NULL)
421 return (EINVAL);
422 error = intr_event_describe_handler(isrc->is_event, ih, descr);
423 if (error)
424 return (error);
425 intrcnt_updatename(isrc);
426 return (0);
427 }
428
429 #ifdef DDB
430 /*
431 * Dump data about interrupt handlers
432 */
433 DB_SHOW_COMMAND(irqs, db_show_irqs)
434 {
435 struct intsrc **isrc;
436 int i, verbose;
437
438 if (strcmp(modif, "v") == 0)
439 verbose = 1;
440 else
441 verbose = 0;
442 isrc = interrupt_sources;
443 for (i = 0; i < NUM_IO_INTS && !db_pager_quit; i++, isrc++)
444 if (*isrc != NULL)
445 db_dump_intr_event((*isrc)->is_event, verbose);
446 }
447 #endif
448
449 #ifdef SMP
450 /*
451 * Support for balancing interrupt sources across CPUs. For now we just
452 * allocate CPUs round-robin.
453 */
454
455 static cpuset_t intr_cpus;
456 static int current_cpu;
457
458 /*
459 * Return the CPU that the next interrupt source should use. For now
460 * this just returns the next local APIC according to round-robin.
461 */
462 u_int
463 intr_next_cpu(void)
464 {
465 u_int apic_id;
466
467 /* Leave all interrupts on the BSP during boot. */
468 if (!assign_cpu)
469 return (PCPU_GET(apic_id));
470
471 mtx_lock_spin(&icu_lock);
472 apic_id = cpu_apic_ids[current_cpu];
473 do {
474 current_cpu++;
475 if (current_cpu > mp_maxid)
476 current_cpu = 0;
477 } while (!CPU_ISSET(current_cpu, &intr_cpus));
478 mtx_unlock_spin(&icu_lock);
479 return (apic_id);
480 }
481
482 /* Attempt to bind the specified IRQ to the specified CPU. */
483 int
484 intr_bind(u_int vector, u_char cpu)
485 {
486 struct intsrc *isrc;
487
488 isrc = intr_lookup_source(vector);
489 if (isrc == NULL)
490 return (EINVAL);
491 return (intr_event_bind(isrc->is_event, cpu));
492 }
493
494 /*
495 * Add a CPU to our mask of valid CPUs that can be destinations of
496 * interrupts.
497 */
498 void
499 intr_add_cpu(u_int cpu)
500 {
501
502 if (cpu >= MAXCPU)
503 panic("%s: Invalid CPU ID", __func__);
504 if (bootverbose)
505 printf("INTR: Adding local APIC %d as a target\n",
506 cpu_apic_ids[cpu]);
507
508 CPU_SET(cpu, &intr_cpus);
509 }
510
511 /*
512 * Distribute all the interrupt sources among the available CPUs once the
513 * AP's have been launched.
514 */
515 static void
516 intr_shuffle_irqs(void *arg __unused)
517 {
518 struct intsrc *isrc;
519 int i;
520
521 #ifdef XEN
522 /*
523 * Doesn't work yet
524 */
525 return;
526 #endif
527
528 /* Don't bother on UP. */
529 if (mp_ncpus == 1)
530 return;
531
532 /* Round-robin assign a CPU to each enabled source. */
533 mtx_lock(&intr_table_lock);
534 assign_cpu = 1;
535 for (i = 0; i < NUM_IO_INTS; i++) {
536 isrc = interrupt_sources[i];
537 if (isrc != NULL && isrc->is_handlers > 0) {
538 /*
539 * If this event is already bound to a CPU,
540 * then assign the source to that CPU instead
541 * of picking one via round-robin. Note that
542 * this is careful to only advance the
543 * round-robin if the CPU assignment succeeds.
544 */
545 if (isrc->is_event->ie_cpu != NOCPU)
546 (void)isrc->is_pic->pic_assign_cpu(isrc,
547 cpu_apic_ids[isrc->is_event->ie_cpu]);
548 else if (isrc->is_pic->pic_assign_cpu(isrc,
549 cpu_apic_ids[current_cpu]) == 0)
550 (void)intr_next_cpu();
551
552 }
553 }
554 mtx_unlock(&intr_table_lock);
555 }
556 SYSINIT(intr_shuffle_irqs, SI_SUB_SMP, SI_ORDER_SECOND, intr_shuffle_irqs,
557 NULL);
558 #else
559 /*
560 * Always route interrupts to the current processor in the UP case.
561 */
562 u_int
563 intr_next_cpu(void)
564 {
565
566 return (PCPU_GET(apic_id));
567 }
568
569 /* Use an empty stub for compatibility. */
570 void
571 intr_add_cpu(u_int cpu __unused)
572 {
573
574 }
575 #endif
Cache object: 129a17734288e235fc2cfce9448fc58c
|