1 /*-
2 * Copyright (c) 2003 John Baldwin <jhb@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * $FreeBSD$
30 */
31
32 /*
33 * Machine dependent interrupt code for i386. For the i386, we have to
34 * deal with different PICs. Thus, we use the passed in vector to lookup
35 * an interrupt source associated with that vector. The interrupt source
36 * describes which PIC the source belongs to and includes methods to handle
37 * that source.
38 */
39
40 #include "opt_ddb.h"
41
42 #include <sys/param.h>
43 #include <sys/bus.h>
44 #include <sys/interrupt.h>
45 #include <sys/ktr.h>
46 #include <sys/kernel.h>
47 #include <sys/lock.h>
48 #include <sys/mutex.h>
49 #include <sys/proc.h>
50 #include <sys/syslog.h>
51 #include <sys/systm.h>
52 #include <sys/sx.h>
53 #include <machine/clock.h>
54 #include <machine/intr_machdep.h>
55 #include <machine/smp.h>
56 #ifdef DDB
57 #include <ddb/ddb.h>
58 #endif
59
60 #define MAX_STRAY_LOG 5
61
62 typedef void (*mask_fn)(void *);
63
64 static int intrcnt_index;
65 static struct intsrc *interrupt_sources[NUM_IO_INTS];
66 static struct sx intr_table_lock;
67 static struct mtx intrcnt_lock;
68 static STAILQ_HEAD(, pic) pics;
69
70 #ifdef INTR_FILTER
71 static void intr_eoi_src(void *arg);
72 static void intr_disab_eoi_src(void *arg);
73 static void intr_event_stray(void *cookie);
74 #endif
75
76 #ifdef SMP
77 static int assign_cpu;
78
79 static void intr_assign_next_cpu(struct intsrc *isrc);
80 #endif
81
82 static void intr_init(void *__dummy);
83 static int intr_pic_registered(struct pic *pic);
84 static void intrcnt_setname(const char *name, int index);
85 static void intrcnt_updatename(struct intsrc *is);
86 static void intrcnt_register(struct intsrc *is);
87
88 static int
89 intr_pic_registered(struct pic *pic)
90 {
91 struct pic *p;
92
93 STAILQ_FOREACH(p, &pics, pics) {
94 if (p == pic)
95 return (1);
96 }
97 return (0);
98 }
99
100 /*
101 * Register a new interrupt controller (PIC). This is to support suspend
102 * and resume where we suspend/resume controllers rather than individual
103 * sources. This also allows controllers with no active sources (such as
104 * 8259As in a system using the APICs) to participate in suspend and resume.
105 */
106 int
107 intr_register_pic(struct pic *pic)
108 {
109 int error;
110
111 sx_xlock(&intr_table_lock);
112 if (intr_pic_registered(pic))
113 error = EBUSY;
114 else {
115 STAILQ_INSERT_TAIL(&pics, pic, pics);
116 error = 0;
117 }
118 sx_xunlock(&intr_table_lock);
119 return (error);
120 }
121
122 /*
123 * Register a new interrupt source with the global interrupt system.
124 * The global interrupts need to be disabled when this function is
125 * called.
126 */
127 int
128 intr_register_source(struct intsrc *isrc)
129 {
130 int error, vector;
131
132 KASSERT(intr_pic_registered(isrc->is_pic), ("unregistered PIC"));
133 vector = isrc->is_pic->pic_vector(isrc);
134 if (interrupt_sources[vector] != NULL)
135 return (EEXIST);
136 #ifdef INTR_FILTER
137 error = intr_event_create(&isrc->is_event, isrc, 0,
138 (mask_fn)isrc->is_pic->pic_enable_source,
139 intr_eoi_src, intr_disab_eoi_src, "irq%d:", vector);
140 #else
141 error = intr_event_create(&isrc->is_event, isrc, 0,
142 (mask_fn)isrc->is_pic->pic_enable_source, "irq%d:", vector);
143 #endif
144 if (error)
145 return (error);
146 sx_xlock(&intr_table_lock);
147 if (interrupt_sources[vector] != NULL) {
148 sx_xunlock(&intr_table_lock);
149 intr_event_destroy(isrc->is_event);
150 return (EEXIST);
151 }
152 intrcnt_register(isrc);
153 interrupt_sources[vector] = isrc;
154 isrc->is_handlers = 0;
155 sx_xunlock(&intr_table_lock);
156 return (0);
157 }
158
159 struct intsrc *
160 intr_lookup_source(int vector)
161 {
162
163 return (interrupt_sources[vector]);
164 }
165
166 int
167 intr_add_handler(const char *name, int vector, driver_filter_t filter,
168 driver_intr_t handler, void *arg, enum intr_type flags, void **cookiep)
169 {
170 struct intsrc *isrc;
171 int error;
172
173 isrc = intr_lookup_source(vector);
174 if (isrc == NULL)
175 return (EINVAL);
176 error = intr_event_add_handler(isrc->is_event, name, filter, handler,
177 arg, intr_priority(flags), flags, cookiep);
178 if (error == 0) {
179 sx_xlock(&intr_table_lock);
180 intrcnt_updatename(isrc);
181 isrc->is_handlers++;
182 if (isrc->is_handlers == 1) {
183 #ifdef SMP
184 if (assign_cpu)
185 intr_assign_next_cpu(isrc);
186 #endif
187 isrc->is_pic->pic_enable_intr(isrc);
188 isrc->is_pic->pic_enable_source(isrc);
189 }
190 sx_xunlock(&intr_table_lock);
191 }
192 return (error);
193 }
194
195 int
196 intr_remove_handler(void *cookie)
197 {
198 struct intsrc *isrc;
199 int error;
200
201 isrc = intr_handler_source(cookie);
202 error = intr_event_remove_handler(cookie);
203 if (error == 0) {
204 sx_xlock(&intr_table_lock);
205 isrc->is_handlers--;
206 if (isrc->is_handlers == 0) {
207 isrc->is_pic->pic_disable_source(isrc, PIC_NO_EOI);
208 isrc->is_pic->pic_disable_intr(isrc);
209 }
210 intrcnt_updatename(isrc);
211 sx_xunlock(&intr_table_lock);
212 }
213 return (error);
214 }
215
216 int
217 intr_config_intr(int vector, enum intr_trigger trig, enum intr_polarity pol)
218 {
219 struct intsrc *isrc;
220
221 isrc = intr_lookup_source(vector);
222 if (isrc == NULL)
223 return (EINVAL);
224 return (isrc->is_pic->pic_config_intr(isrc, trig, pol));
225 }
226
227 #ifdef INTR_FILTER
228 void
229 intr_execute_handlers(struct intsrc *isrc, struct trapframe *frame)
230 {
231 struct thread *td;
232 struct intr_event *ie;
233 int vector;
234
235 td = curthread;
236
237 /*
238 * We count software interrupts when we process them. The
239 * code here follows previous practice, but there's an
240 * argument for counting hardware interrupts when they're
241 * processed too.
242 */
243 (*isrc->is_count)++;
244 PCPU_INC(cnt.v_intr);
245
246 ie = isrc->is_event;
247
248 /*
249 * XXX: We assume that IRQ 0 is only used for the ISA timer
250 * device (clk).
251 */
252 vector = isrc->is_pic->pic_vector(isrc);
253 if (vector == 0)
254 clkintr_pending = 1;
255
256 if (intr_event_handle(ie, frame) != 0)
257 intr_event_stray(isrc);
258 }
259
260 static void
261 intr_event_stray(void *cookie)
262 {
263 struct intsrc *isrc;
264
265 isrc = cookie;
266 /*
267 * For stray interrupts, mask and EOI the source, bump the
268 * stray count, and log the condition.
269 */
270 isrc->is_pic->pic_disable_source(isrc, PIC_EOI);
271 (*isrc->is_straycount)++;
272 if (*isrc->is_straycount < MAX_STRAY_LOG)
273 log(LOG_ERR, "stray irq%d\n", isrc->is_pic->pic_vector(isrc));
274 else if (*isrc->is_straycount == MAX_STRAY_LOG)
275 log(LOG_CRIT,
276 "too many stray irq %d's: not logging anymore\n",
277 isrc->is_pic->pic_vector(isrc));
278 }
279
280 static void
281 intr_eoi_src(void *arg)
282 {
283 struct intsrc *isrc;
284
285 isrc = arg;
286 isrc->is_pic->pic_eoi_source(isrc);
287 }
288
289 static void
290 intr_disab_eoi_src(void *arg)
291 {
292 struct intsrc *isrc;
293
294 isrc = arg;
295 isrc->is_pic->pic_disable_source(isrc, PIC_EOI);
296 }
297 #else
298 void
299 intr_execute_handlers(struct intsrc *isrc, struct trapframe *frame)
300 {
301 struct thread *td;
302 struct intr_event *ie;
303 struct intr_handler *ih;
304 int error, vector, thread, ret;
305
306 td = curthread;
307
308 /*
309 * We count software interrupts when we process them. The
310 * code here follows previous practice, but there's an
311 * argument for counting hardware interrupts when they're
312 * processed too.
313 */
314 (*isrc->is_count)++;
315 PCPU_INC(cnt.v_intr);
316
317 ie = isrc->is_event;
318
319 /*
320 * XXX: We assume that IRQ 0 is only used for the ISA timer
321 * device (clk).
322 */
323 vector = isrc->is_pic->pic_vector(isrc);
324 if (vector == 0)
325 clkintr_pending = 1;
326
327 /*
328 * For stray interrupts, mask and EOI the source, bump the
329 * stray count, and log the condition.
330 */
331 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers)) {
332 isrc->is_pic->pic_disable_source(isrc, PIC_EOI);
333 (*isrc->is_straycount)++;
334 if (*isrc->is_straycount < MAX_STRAY_LOG)
335 log(LOG_ERR, "stray irq%d\n", vector);
336 else if (*isrc->is_straycount == MAX_STRAY_LOG)
337 log(LOG_CRIT,
338 "too many stray irq %d's: not logging anymore\n",
339 vector);
340 return;
341 }
342
343 /*
344 * Execute fast interrupt handlers directly.
345 * To support clock handlers, if a handler registers
346 * with a NULL argument, then we pass it a pointer to
347 * a trapframe as its argument.
348 */
349 td->td_intr_nesting_level++;
350 ret = 0;
351 thread = 0;
352 critical_enter();
353 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
354 if (ih->ih_filter == NULL) {
355 thread = 1;
356 continue;
357 }
358 CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__,
359 ih->ih_filter, ih->ih_argument == NULL ? frame :
360 ih->ih_argument, ih->ih_name);
361 if (ih->ih_argument == NULL)
362 ret = ih->ih_filter(frame);
363 else
364 ret = ih->ih_filter(ih->ih_argument);
365 /*
366 * Wrapper handler special handling:
367 *
368 * in some particular cases (like pccard and pccbb),
369 * the _real_ device handler is wrapped in a couple of
370 * functions - a filter wrapper and an ithread wrapper.
371 * In this case (and just in this case), the filter wrapper
372 * could ask the system to schedule the ithread and mask
373 * the interrupt source if the wrapped handler is composed
374 * of just an ithread handler.
375 *
376 * TODO: write a generic wrapper to avoid people rolling
377 * their own
378 */
379 if (!thread) {
380 if (ret == FILTER_SCHEDULE_THREAD)
381 thread = 1;
382 }
383 }
384
385 /*
386 * If there are any threaded handlers that need to run,
387 * mask the source as well as sending it an EOI. Otherwise,
388 * just send it an EOI but leave it unmasked.
389 */
390 if (thread)
391 isrc->is_pic->pic_disable_source(isrc, PIC_EOI);
392 else
393 isrc->is_pic->pic_eoi_source(isrc);
394
395 /* Schedule the ithread if needed. */
396 if (thread) {
397 error = intr_event_schedule_thread(ie);
398 KASSERT(error == 0, ("bad stray interrupt"));
399 }
400 critical_exit();
401 td->td_intr_nesting_level--;
402 }
403 #endif
404
405 void
406 intr_resume(void)
407 {
408 struct pic *pic;
409
410 sx_xlock(&intr_table_lock);
411 STAILQ_FOREACH(pic, &pics, pics) {
412 if (pic->pic_resume != NULL)
413 pic->pic_resume(pic);
414 }
415 sx_xunlock(&intr_table_lock);
416 }
417
418 void
419 intr_suspend(void)
420 {
421 struct pic *pic;
422
423 sx_xlock(&intr_table_lock);
424 STAILQ_FOREACH(pic, &pics, pics) {
425 if (pic->pic_suspend != NULL)
426 pic->pic_suspend(pic);
427 }
428 sx_xunlock(&intr_table_lock);
429 }
430
431 static void
432 intrcnt_setname(const char *name, int index)
433 {
434
435 snprintf(intrnames + (MAXCOMLEN + 1) * index, MAXCOMLEN + 1, "%-*s",
436 MAXCOMLEN, name);
437 }
438
439 static void
440 intrcnt_updatename(struct intsrc *is)
441 {
442
443 intrcnt_setname(is->is_event->ie_fullname, is->is_index);
444 }
445
446 static void
447 intrcnt_register(struct intsrc *is)
448 {
449 char straystr[MAXCOMLEN + 1];
450
451 KASSERT(is->is_event != NULL, ("%s: isrc with no event", __func__));
452 mtx_lock_spin(&intrcnt_lock);
453 is->is_index = intrcnt_index;
454 intrcnt_index += 2;
455 snprintf(straystr, MAXCOMLEN + 1, "stray irq%d",
456 is->is_pic->pic_vector(is));
457 intrcnt_updatename(is);
458 is->is_count = &intrcnt[is->is_index];
459 intrcnt_setname(straystr, is->is_index + 1);
460 is->is_straycount = &intrcnt[is->is_index + 1];
461 mtx_unlock_spin(&intrcnt_lock);
462 }
463
464 void
465 intrcnt_add(const char *name, u_long **countp)
466 {
467
468 mtx_lock_spin(&intrcnt_lock);
469 *countp = &intrcnt[intrcnt_index];
470 intrcnt_setname(name, intrcnt_index);
471 intrcnt_index++;
472 mtx_unlock_spin(&intrcnt_lock);
473 }
474
475 static void
476 intr_init(void *dummy __unused)
477 {
478
479 intrcnt_setname("???", 0);
480 intrcnt_index = 1;
481 STAILQ_INIT(&pics);
482 sx_init(&intr_table_lock, "intr sources");
483 mtx_init(&intrcnt_lock, "intrcnt", NULL, MTX_SPIN);
484 }
485 SYSINIT(intr_init, SI_SUB_INTR, SI_ORDER_FIRST, intr_init, NULL)
486
487 #ifdef DDB
488 /*
489 * Dump data about interrupt handlers
490 */
491 DB_SHOW_COMMAND(irqs, db_show_irqs)
492 {
493 struct intsrc **isrc;
494 int i, verbose;
495
496 if (strcmp(modif, "v") == 0)
497 verbose = 1;
498 else
499 verbose = 0;
500 isrc = interrupt_sources;
501 for (i = 0; i < NUM_IO_INTS && !db_pager_quit; i++, isrc++)
502 if (*isrc != NULL)
503 db_dump_intr_event((*isrc)->is_event, verbose);
504 }
505 #endif
506
507 #ifdef SMP
508 /*
509 * Support for balancing interrupt sources across CPUs. For now we just
510 * allocate CPUs round-robin.
511 */
512
513 /* The BSP is always a valid target. */
514 static cpumask_t intr_cpus = (1 << 0);
515 static int current_cpu, num_cpus = 1;
516
517 static void
518 intr_assign_next_cpu(struct intsrc *isrc)
519 {
520 struct pic *pic;
521 u_int apic_id;
522
523 /*
524 * Assign this source to a local APIC in a round-robin fashion.
525 */
526 pic = isrc->is_pic;
527 apic_id = cpu_apic_ids[current_cpu];
528 pic->pic_assign_cpu(isrc, apic_id);
529 do {
530 current_cpu++;
531 if (current_cpu >= num_cpus)
532 current_cpu = 0;
533 } while (!(intr_cpus & (1 << current_cpu)));
534 }
535
536 /*
537 * Add a CPU to our mask of valid CPUs that can be destinations of
538 * interrupts.
539 */
540 void
541 intr_add_cpu(u_int cpu)
542 {
543
544 if (cpu >= MAXCPU)
545 panic("%s: Invalid CPU ID", __func__);
546 if (bootverbose)
547 printf("INTR: Adding local APIC %d as a target\n",
548 cpu_apic_ids[cpu]);
549
550 intr_cpus |= (1 << cpu);
551 num_cpus++;
552 }
553
554 /*
555 * Distribute all the interrupt sources among the available CPUs once the
556 * AP's have been launched.
557 */
558 static void
559 intr_shuffle_irqs(void *arg __unused)
560 {
561 struct intsrc *isrc;
562 int i;
563
564 /* Don't bother on UP. */
565 if (num_cpus <= 1)
566 return;
567
568 /* Round-robin assign a CPU to each enabled source. */
569 sx_xlock(&intr_table_lock);
570 assign_cpu = 1;
571 for (i = 0; i < NUM_IO_INTS; i++) {
572 isrc = interrupt_sources[i];
573 if (isrc != NULL && isrc->is_handlers > 0)
574 intr_assign_next_cpu(isrc);
575 }
576 sx_xunlock(&intr_table_lock);
577 }
578 SYSINIT(intr_shuffle_irqs, SI_SUB_SMP, SI_ORDER_SECOND, intr_shuffle_irqs, NULL)
579 #endif
Cache object: 2c48f4c867be849b4cdd3e9b82465270
|