1 /*-
2 * Copyright (c) 2003 John Baldwin <jhb@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * $FreeBSD$
30 */
31
32 /*
33 * Machine dependent interrupt code for i386. For the i386, we have to
34 * deal with different PICs. Thus, we use the passed in vector to lookup
35 * an interrupt source associated with that vector. The interrupt source
36 * describes which PIC the source belongs to and includes methods to handle
37 * that source.
38 */
39
40 #include "opt_ddb.h"
41
42 #include <sys/param.h>
43 #include <sys/bus.h>
44 #include <sys/interrupt.h>
45 #include <sys/ktr.h>
46 #include <sys/kernel.h>
47 #include <sys/lock.h>
48 #include <sys/mutex.h>
49 #include <sys/proc.h>
50 #include <sys/smp.h>
51 #include <sys/syslog.h>
52 #include <sys/systm.h>
53 #include <machine/clock.h>
54 #include <machine/intr_machdep.h>
55 #include <machine/smp.h>
56 #ifdef DDB
57 #include <ddb/ddb.h>
58 #endif
59
60 #define MAX_STRAY_LOG 5
61
62 typedef void (*mask_fn)(void *);
63
64 static int intrcnt_index;
65 static struct intsrc *interrupt_sources[NUM_IO_INTS];
66 static struct mtx intr_table_lock;
67 static struct mtx intrcnt_lock;
68 static TAILQ_HEAD(pics_head, pic) pics;
69
70 #ifdef SMP
71 static int assign_cpu;
72 #endif
73
74 static int intr_assign_cpu(void *arg, u_char cpu);
75 static void intr_disable_src(void *arg);
76 static void intr_init(void *__dummy);
77 static int intr_pic_registered(struct pic *pic);
78 static void intrcnt_setname(const char *name, int index);
79 static void intrcnt_updatename(struct intsrc *is);
80 static void intrcnt_register(struct intsrc *is);
81
82 static int
83 intr_pic_registered(struct pic *pic)
84 {
85 struct pic *p;
86
87 TAILQ_FOREACH(p, &pics, pics) {
88 if (p == pic)
89 return (1);
90 }
91 return (0);
92 }
93
94 /*
95 * Register a new interrupt controller (PIC). This is to support suspend
96 * and resume where we suspend/resume controllers rather than individual
97 * sources. This also allows controllers with no active sources (such as
98 * 8259As in a system using the APICs) to participate in suspend and resume.
99 */
100 int
101 intr_register_pic(struct pic *pic)
102 {
103 int error;
104
105 mtx_lock(&intr_table_lock);
106 if (intr_pic_registered(pic))
107 error = EBUSY;
108 else {
109 TAILQ_INSERT_TAIL(&pics, pic, pics);
110 error = 0;
111 }
112 mtx_unlock(&intr_table_lock);
113 return (error);
114 }
115
116 /*
117 * Register a new interrupt source with the global interrupt system.
118 * The global interrupts need to be disabled when this function is
119 * called.
120 */
121 int
122 intr_register_source(struct intsrc *isrc)
123 {
124 int error, vector;
125
126 KASSERT(intr_pic_registered(isrc->is_pic), ("unregistered PIC"));
127 vector = isrc->is_pic->pic_vector(isrc);
128 if (interrupt_sources[vector] != NULL)
129 return (EEXIST);
130 error = intr_event_create(&isrc->is_event, isrc, 0, vector,
131 intr_disable_src, (mask_fn)isrc->is_pic->pic_enable_source,
132 (mask_fn)isrc->is_pic->pic_eoi_source, intr_assign_cpu, "irq%d:",
133 vector);
134 if (error)
135 return (error);
136 mtx_lock(&intr_table_lock);
137 if (interrupt_sources[vector] != NULL) {
138 mtx_unlock(&intr_table_lock);
139 intr_event_destroy(isrc->is_event);
140 return (EEXIST);
141 }
142 intrcnt_register(isrc);
143 interrupt_sources[vector] = isrc;
144 isrc->is_handlers = 0;
145 mtx_unlock(&intr_table_lock);
146 return (0);
147 }
148
149 struct intsrc *
150 intr_lookup_source(int vector)
151 {
152
153 return (interrupt_sources[vector]);
154 }
155
156 int
157 intr_add_handler(const char *name, int vector, driver_filter_t filter,
158 driver_intr_t handler, void *arg, enum intr_type flags, void **cookiep)
159 {
160 struct intsrc *isrc;
161 int error;
162
163 isrc = intr_lookup_source(vector);
164 if (isrc == NULL)
165 return (EINVAL);
166 error = intr_event_add_handler(isrc->is_event, name, filter, handler,
167 arg, intr_priority(flags), flags, cookiep);
168 if (error == 0) {
169 mtx_lock(&intr_table_lock);
170 intrcnt_updatename(isrc);
171 isrc->is_handlers++;
172 if (isrc->is_handlers == 1) {
173 isrc->is_pic->pic_enable_intr(isrc);
174 isrc->is_pic->pic_enable_source(isrc);
175 }
176 mtx_unlock(&intr_table_lock);
177 }
178 return (error);
179 }
180
181 int
182 intr_remove_handler(void *cookie)
183 {
184 struct intsrc *isrc;
185 int error;
186
187 isrc = intr_handler_source(cookie);
188 error = intr_event_remove_handler(cookie);
189 if (error == 0) {
190 mtx_lock(&intr_table_lock);
191 isrc->is_handlers--;
192 if (isrc->is_handlers == 0) {
193 isrc->is_pic->pic_disable_source(isrc, PIC_NO_EOI);
194 isrc->is_pic->pic_disable_intr(isrc);
195 }
196 intrcnt_updatename(isrc);
197 mtx_unlock(&intr_table_lock);
198 }
199 return (error);
200 }
201
202 int
203 intr_config_intr(int vector, enum intr_trigger trig, enum intr_polarity pol)
204 {
205 struct intsrc *isrc;
206
207 isrc = intr_lookup_source(vector);
208 if (isrc == NULL)
209 return (EINVAL);
210 return (isrc->is_pic->pic_config_intr(isrc, trig, pol));
211 }
212
213 static void
214 intr_disable_src(void *arg)
215 {
216 struct intsrc *isrc;
217
218 isrc = arg;
219 isrc->is_pic->pic_disable_source(isrc, PIC_EOI);
220 }
221
222 void
223 intr_execute_handlers(struct intsrc *isrc, struct trapframe *frame)
224 {
225 struct intr_event *ie;
226 int vector;
227
228 /*
229 * We count software interrupts when we process them. The
230 * code here follows previous practice, but there's an
231 * argument for counting hardware interrupts when they're
232 * processed too.
233 */
234 (*isrc->is_count)++;
235 PCPU_INC(cnt.v_intr);
236
237 ie = isrc->is_event;
238
239 /*
240 * XXX: We assume that IRQ 0 is only used for the ISA timer
241 * device (clk).
242 */
243 vector = isrc->is_pic->pic_vector(isrc);
244 if (vector == 0)
245 clkintr_pending = 1;
246
247 /*
248 * For stray interrupts, mask and EOI the source, bump the
249 * stray count, and log the condition.
250 */
251 if (intr_event_handle(ie, frame) != 0) {
252 isrc->is_pic->pic_disable_source(isrc, PIC_EOI);
253 (*isrc->is_straycount)++;
254 if (*isrc->is_straycount < MAX_STRAY_LOG)
255 log(LOG_ERR, "stray irq%d\n", vector);
256 else if (*isrc->is_straycount == MAX_STRAY_LOG)
257 log(LOG_CRIT,
258 "too many stray irq %d's: not logging anymore\n",
259 vector);
260 }
261 }
262
263 void
264 intr_resume(void)
265 {
266 struct pic *pic;
267
268 mtx_lock(&intr_table_lock);
269 TAILQ_FOREACH(pic, &pics, pics) {
270 if (pic->pic_resume != NULL)
271 pic->pic_resume(pic);
272 }
273 mtx_unlock(&intr_table_lock);
274 }
275
276 void
277 intr_suspend(void)
278 {
279 struct pic *pic;
280
281 mtx_lock(&intr_table_lock);
282 TAILQ_FOREACH_REVERSE(pic, &pics, pics_head, pics) {
283 if (pic->pic_suspend != NULL)
284 pic->pic_suspend(pic);
285 }
286 mtx_unlock(&intr_table_lock);
287 }
288
289 static int
290 intr_assign_cpu(void *arg, u_char cpu)
291 {
292 #ifdef SMP
293 struct intsrc *isrc;
294 int error;
295
296 /*
297 * Don't do anything during early boot. We will pick up the
298 * assignment once the APs are started.
299 */
300 if (assign_cpu && cpu != NOCPU) {
301 isrc = arg;
302 mtx_lock(&intr_table_lock);
303 error = isrc->is_pic->pic_assign_cpu(isrc, cpu_apic_ids[cpu]);
304 mtx_unlock(&intr_table_lock);
305 } else
306 error = 0;
307 return (error);
308 #else
309 return (EOPNOTSUPP);
310 #endif
311 }
312
313 static void
314 intrcnt_setname(const char *name, int index)
315 {
316
317 snprintf(intrnames + (MAXCOMLEN + 1) * index, MAXCOMLEN + 1, "%-*s",
318 MAXCOMLEN, name);
319 }
320
321 static void
322 intrcnt_updatename(struct intsrc *is)
323 {
324
325 intrcnt_setname(is->is_event->ie_fullname, is->is_index);
326 }
327
328 static void
329 intrcnt_register(struct intsrc *is)
330 {
331 char straystr[MAXCOMLEN + 1];
332
333 KASSERT(is->is_event != NULL, ("%s: isrc with no event", __func__));
334 mtx_lock_spin(&intrcnt_lock);
335 is->is_index = intrcnt_index;
336 intrcnt_index += 2;
337 snprintf(straystr, MAXCOMLEN + 1, "stray irq%d",
338 is->is_pic->pic_vector(is));
339 intrcnt_updatename(is);
340 is->is_count = &intrcnt[is->is_index];
341 intrcnt_setname(straystr, is->is_index + 1);
342 is->is_straycount = &intrcnt[is->is_index + 1];
343 mtx_unlock_spin(&intrcnt_lock);
344 }
345
346 void
347 intrcnt_add(const char *name, u_long **countp)
348 {
349
350 mtx_lock_spin(&intrcnt_lock);
351 *countp = &intrcnt[intrcnt_index];
352 intrcnt_setname(name, intrcnt_index);
353 intrcnt_index++;
354 mtx_unlock_spin(&intrcnt_lock);
355 }
356
357 static void
358 intr_init(void *dummy __unused)
359 {
360
361 intrcnt_setname("???", 0);
362 intrcnt_index = 1;
363 TAILQ_INIT(&pics);
364 mtx_init(&intr_table_lock, "intr sources", NULL, MTX_DEF);
365 mtx_init(&intrcnt_lock, "intrcnt", NULL, MTX_SPIN);
366 }
367 SYSINIT(intr_init, SI_SUB_INTR, SI_ORDER_FIRST, intr_init, NULL);
368
369 /* Add a description to an active interrupt handler. */
370 int
371 intr_describe(u_int vector, void *ih, const char *descr)
372 {
373 struct intsrc *isrc;
374 int error;
375
376 isrc = intr_lookup_source(vector);
377 if (isrc == NULL)
378 return (EINVAL);
379 error = intr_event_describe_handler(isrc->is_event, ih, descr);
380 if (error)
381 return (error);
382 intrcnt_updatename(isrc);
383 return (0);
384 }
385
386 #ifdef DDB
387 /*
388 * Dump data about interrupt handlers
389 */
390 DB_SHOW_COMMAND(irqs, db_show_irqs)
391 {
392 struct intsrc **isrc;
393 int i, verbose;
394
395 if (strcmp(modif, "v") == 0)
396 verbose = 1;
397 else
398 verbose = 0;
399 isrc = interrupt_sources;
400 for (i = 0; i < NUM_IO_INTS && !db_pager_quit; i++, isrc++)
401 if (*isrc != NULL)
402 db_dump_intr_event((*isrc)->is_event, verbose);
403 }
404 #endif
405
406 #ifdef SMP
407 /*
408 * Support for balancing interrupt sources across CPUs. For now we just
409 * allocate CPUs round-robin.
410 */
411
412 /* The BSP is always a valid target. */
413 static cpumask_t intr_cpus = (1 << 0);
414 static int current_cpu;
415
416 /*
417 * Return the CPU that the next interrupt source should use. For now
418 * this just returns the next local APIC according to round-robin.
419 */
420 u_int
421 intr_next_cpu(void)
422 {
423 u_int apic_id;
424
425 /* Leave all interrupts on the BSP during boot. */
426 if (!assign_cpu)
427 return (PCPU_GET(apic_id));
428
429 mtx_lock_spin(&icu_lock);
430 apic_id = cpu_apic_ids[current_cpu];
431 do {
432 current_cpu++;
433 if (current_cpu > mp_maxid)
434 current_cpu = 0;
435 } while (!(intr_cpus & (1 << current_cpu)));
436 mtx_unlock_spin(&icu_lock);
437 return (apic_id);
438 }
439
440 /* Attempt to bind the specified IRQ to the specified CPU. */
441 int
442 intr_bind(u_int vector, u_char cpu)
443 {
444 struct intsrc *isrc;
445
446 isrc = intr_lookup_source(vector);
447 if (isrc == NULL)
448 return (EINVAL);
449 return (intr_event_bind(isrc->is_event, cpu));
450 }
451
452 /*
453 * Add a CPU to our mask of valid CPUs that can be destinations of
454 * interrupts.
455 */
456 void
457 intr_add_cpu(u_int cpu)
458 {
459
460 if (cpu >= MAXCPU)
461 panic("%s: Invalid CPU ID", __func__);
462 if (bootverbose)
463 printf("INTR: Adding local APIC %d as a target\n",
464 cpu_apic_ids[cpu]);
465
466 intr_cpus |= (1 << cpu);
467 }
468
469 /*
470 * Distribute all the interrupt sources among the available CPUs once the
471 * AP's have been launched.
472 */
473 static void
474 intr_shuffle_irqs(void *arg __unused)
475 {
476 struct intsrc *isrc;
477 int i;
478
479 #ifdef XEN
480 /*
481 * Doesn't work yet
482 */
483 return;
484 #endif
485
486 /* Don't bother on UP. */
487 if (mp_ncpus == 1)
488 return;
489
490 /* Round-robin assign a CPU to each enabled source. */
491 mtx_lock(&intr_table_lock);
492 assign_cpu = 1;
493 for (i = 0; i < NUM_IO_INTS; i++) {
494 isrc = interrupt_sources[i];
495 if (isrc != NULL && isrc->is_handlers > 0) {
496 /*
497 * If this event is already bound to a CPU,
498 * then assign the source to that CPU instead
499 * of picking one via round-robin. Note that
500 * this is careful to only advance the
501 * round-robin if the CPU assignment succeeds.
502 */
503 if (isrc->is_event->ie_cpu != NOCPU)
504 (void)isrc->is_pic->pic_assign_cpu(isrc,
505 cpu_apic_ids[isrc->is_event->ie_cpu]);
506 else if (isrc->is_pic->pic_assign_cpu(isrc,
507 cpu_apic_ids[current_cpu]) == 0)
508 (void)intr_next_cpu();
509
510 }
511 }
512 mtx_unlock(&intr_table_lock);
513 }
514 SYSINIT(intr_shuffle_irqs, SI_SUB_SMP, SI_ORDER_SECOND, intr_shuffle_irqs,
515 NULL);
516 #else
517 /*
518 * Always route interrupts to the current processor in the UP case.
519 */
520 u_int
521 intr_next_cpu(void)
522 {
523
524 return (PCPU_GET(apic_id));
525 }
526 #endif
Cache object: b661556a01c16032f00bbdb345edaf2c
|