1 /*-
2 * Copyright (c) 2003 John Baldwin <jhb@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * $FreeBSD$
30 */
31
32 /*
33 * Machine dependent interrupt code for i386. For the i386, we have to
34 * deal with different PICs. Thus, we use the passed in vector to lookup
35 * an interrupt source associated with that vector. The interrupt source
36 * describes which PIC the source belongs to and includes methods to handle
37 * that source.
38 */
39
40 #include "opt_ddb.h"
41
42 #include <sys/param.h>
43 #include <sys/bus.h>
44 #include <sys/interrupt.h>
45 #include <sys/ktr.h>
46 #include <sys/kernel.h>
47 #include <sys/lock.h>
48 #include <sys/mutex.h>
49 #include <sys/proc.h>
50 #include <sys/syslog.h>
51 #include <sys/systm.h>
52 #include <sys/sx.h>
53 #include <machine/clock.h>
54 #include <machine/intr_machdep.h>
55 #ifdef DDB
56 #include <ddb/ddb.h>
57 #endif
58
59 #define MAX_STRAY_LOG 5
60
61 typedef void (*mask_fn)(void *);
62
63 static int intrcnt_index;
64 static struct intsrc *interrupt_sources[NUM_IO_INTS];
65 static struct sx intr_table_lock;
66 static struct mtx intrcnt_lock;
67 static STAILQ_HEAD(, pic) pics;
68
69 #ifdef SMP
70 static int assign_cpu;
71
72 static void intr_assign_next_cpu(struct intsrc *isrc);
73 #endif
74
75 static void intr_init(void *__dummy);
76 static int intr_pic_registered(struct pic *pic);
77 static void intrcnt_setname(const char *name, int index);
78 static void intrcnt_updatename(struct intsrc *is);
79 static void intrcnt_register(struct intsrc *is);
80
81 static int
82 intr_pic_registered(struct pic *pic)
83 {
84 struct pic *p;
85
86 STAILQ_FOREACH(p, &pics, pics) {
87 if (p == pic)
88 return (1);
89 }
90 return (0);
91 }
92
93 /*
94 * Register a new interrupt controller (PIC). This is to support suspend
95 * and resume where we suspend/resume controllers rather than individual
96 * sources. This also allows controllers with no active sources (such as
97 * 8259As in a system using the APICs) to participate in suspend and resume.
98 */
99 int
100 intr_register_pic(struct pic *pic)
101 {
102 int error;
103
104 sx_xlock(&intr_table_lock);
105 if (intr_pic_registered(pic))
106 error = EBUSY;
107 else {
108 STAILQ_INSERT_TAIL(&pics, pic, pics);
109 error = 0;
110 }
111 sx_xunlock(&intr_table_lock);
112 return (error);
113 }
114
115 /*
116 * Register a new interrupt source with the global interrupt system.
117 * The global interrupts need to be disabled when this function is
118 * called.
119 */
120 int
121 intr_register_source(struct intsrc *isrc)
122 {
123 int error, vector;
124
125 KASSERT(intr_pic_registered(isrc->is_pic), ("unregistered PIC"));
126 vector = isrc->is_pic->pic_vector(isrc);
127 if (interrupt_sources[vector] != NULL)
128 return (EEXIST);
129 error = intr_event_create(&isrc->is_event, isrc, 0,
130 (mask_fn)isrc->is_pic->pic_enable_source, "irq%d:", vector);
131 if (error)
132 return (error);
133 sx_xlock(&intr_table_lock);
134 if (interrupt_sources[vector] != NULL) {
135 sx_xunlock(&intr_table_lock);
136 intr_event_destroy(isrc->is_event);
137 return (EEXIST);
138 }
139 intrcnt_register(isrc);
140 interrupt_sources[vector] = isrc;
141 isrc->is_enabled = 0;
142 sx_xunlock(&intr_table_lock);
143 return (0);
144 }
145
146 struct intsrc *
147 intr_lookup_source(int vector)
148 {
149
150 return (interrupt_sources[vector]);
151 }
152
153 int
154 intr_add_handler(const char *name, int vector, driver_intr_t handler,
155 void *arg, enum intr_type flags, void **cookiep)
156 {
157 struct intsrc *isrc;
158 int error;
159
160 isrc = intr_lookup_source(vector);
161 if (isrc == NULL)
162 return (EINVAL);
163 error = intr_event_add_handler(isrc->is_event, name, handler, arg,
164 intr_priority(flags), flags, cookiep);
165 if (error == 0) {
166 sx_xlock(&intr_table_lock);
167 intrcnt_updatename(isrc);
168 if (!isrc->is_enabled) {
169 isrc->is_enabled = 1;
170 #ifdef SMP
171 if (assign_cpu)
172 intr_assign_next_cpu(isrc);
173 #endif
174 isrc->is_pic->pic_enable_intr(isrc);
175 }
176 isrc->is_pic->pic_enable_source(isrc);
177 sx_xunlock(&intr_table_lock);
178 }
179 return (error);
180 }
181
182 int
183 intr_remove_handler(void *cookie)
184 {
185 int error;
186
187 error = intr_event_remove_handler(cookie);
188 #ifdef XXX
189 if (error == 0)
190 intrcnt_updatename(/* XXX */);
191 #endif
192 return (error);
193 }
194
195 int
196 intr_config_intr(int vector, enum intr_trigger trig, enum intr_polarity pol)
197 {
198 struct intsrc *isrc;
199
200 isrc = intr_lookup_source(vector);
201 if (isrc == NULL)
202 return (EINVAL);
203 return (isrc->is_pic->pic_config_intr(isrc, trig, pol));
204 }
205
206 void
207 intr_execute_handlers(struct intsrc *isrc, struct intrframe *iframe)
208 {
209 struct thread *td;
210 struct intr_event *ie;
211 struct intr_handler *ih;
212 int error, vector, thread;
213
214 td = curthread;
215
216 /*
217 * We count software interrupts when we process them. The
218 * code here follows previous practice, but there's an
219 * argument for counting hardware interrupts when they're
220 * processed too.
221 */
222 (*isrc->is_count)++;
223 PCPU_LAZY_INC(cnt.v_intr);
224
225 ie = isrc->is_event;
226
227 /*
228 * XXX: We assume that IRQ 0 is only used for the ISA timer
229 * device (clk).
230 */
231 vector = isrc->is_pic->pic_vector(isrc);
232 if (vector == 0)
233 clkintr_pending = 1;
234
235 /*
236 * For stray interrupts, mask and EOI the source, bump the
237 * stray count, and log the condition.
238 */
239 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers)) {
240 isrc->is_pic->pic_disable_source(isrc, PIC_EOI);
241 (*isrc->is_straycount)++;
242 if (*isrc->is_straycount < MAX_STRAY_LOG)
243 log(LOG_ERR, "stray irq%d\n", vector);
244 else if (*isrc->is_straycount == MAX_STRAY_LOG)
245 log(LOG_CRIT,
246 "too many stray irq %d's: not logging anymore\n",
247 vector);
248 return;
249 }
250
251 /*
252 * Execute fast interrupt handlers directly.
253 * To support clock handlers, if a handler registers
254 * with a NULL argument, then we pass it a pointer to
255 * an intrframe as its argument.
256 */
257 td->td_intr_nesting_level++;
258 thread = 0;
259 critical_enter();
260 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
261 if (!(ih->ih_flags & IH_FAST)) {
262 thread = 1;
263 continue;
264 }
265 CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__,
266 ih->ih_handler, ih->ih_argument == NULL ? iframe :
267 ih->ih_argument, ih->ih_name);
268 if (ih->ih_argument == NULL)
269 ih->ih_handler(iframe);
270 else
271 ih->ih_handler(ih->ih_argument);
272 }
273
274 /*
275 * If there are any threaded handlers that need to run,
276 * mask the source as well as sending it an EOI. Otherwise,
277 * just send it an EOI but leave it unmasked.
278 */
279 if (thread)
280 isrc->is_pic->pic_disable_source(isrc, PIC_EOI);
281 else
282 isrc->is_pic->pic_eoi_source(isrc);
283
284 /* Schedule the ithread if needed. */
285 if (thread) {
286 error = intr_event_schedule_thread(ie);
287 KASSERT(error == 0, ("bad stray interrupt"));
288 }
289 critical_exit();
290 td->td_intr_nesting_level--;
291 }
292
293 void
294 intr_resume(void)
295 {
296 struct pic *pic;
297
298 sx_xlock(&intr_table_lock);
299 STAILQ_FOREACH(pic, &pics, pics) {
300 if (pic->pic_resume != NULL)
301 pic->pic_resume(pic);
302 }
303 sx_xunlock(&intr_table_lock);
304 }
305
306 void
307 intr_suspend(void)
308 {
309 struct pic *pic;
310
311 sx_xlock(&intr_table_lock);
312 STAILQ_FOREACH(pic, &pics, pics) {
313 if (pic->pic_suspend != NULL)
314 pic->pic_suspend(pic);
315 }
316 sx_xunlock(&intr_table_lock);
317 }
318
319 static void
320 intrcnt_setname(const char *name, int index)
321 {
322
323 snprintf(intrnames + (MAXCOMLEN + 1) * index, MAXCOMLEN + 1, "%-*s",
324 MAXCOMLEN, name);
325 }
326
327 static void
328 intrcnt_updatename(struct intsrc *is)
329 {
330
331 intrcnt_setname(is->is_event->ie_fullname, is->is_index);
332 }
333
334 static void
335 intrcnt_register(struct intsrc *is)
336 {
337 char straystr[MAXCOMLEN + 1];
338
339 KASSERT(is->is_event != NULL, ("%s: isrc with no event", __func__));
340 mtx_lock_spin(&intrcnt_lock);
341 is->is_index = intrcnt_index;
342 intrcnt_index += 2;
343 snprintf(straystr, MAXCOMLEN + 1, "stray irq%d",
344 is->is_pic->pic_vector(is));
345 intrcnt_updatename(is);
346 is->is_count = &intrcnt[is->is_index];
347 intrcnt_setname(straystr, is->is_index + 1);
348 is->is_straycount = &intrcnt[is->is_index + 1];
349 mtx_unlock_spin(&intrcnt_lock);
350 }
351
352 void
353 intrcnt_add(const char *name, u_long **countp)
354 {
355
356 mtx_lock_spin(&intrcnt_lock);
357 *countp = &intrcnt[intrcnt_index];
358 intrcnt_setname(name, intrcnt_index);
359 intrcnt_index++;
360 mtx_unlock_spin(&intrcnt_lock);
361 }
362
363 static void
364 intr_init(void *dummy __unused)
365 {
366
367 intrcnt_setname("???", 0);
368 intrcnt_index = 1;
369 STAILQ_INIT(&pics);
370 sx_init(&intr_table_lock, "intr sources");
371 mtx_init(&intrcnt_lock, "intrcnt", NULL, MTX_SPIN);
372 }
373 SYSINIT(intr_init, SI_SUB_INTR, SI_ORDER_FIRST, intr_init, NULL)
374
375 #ifdef DDB
376 /*
377 * Dump data about interrupt handlers
378 */
379 DB_SHOW_COMMAND(irqs, db_show_irqs)
380 {
381 struct intsrc **isrc;
382 int i, quit, verbose;
383
384 quit = 0;
385 if (strcmp(modif, "v") == 0)
386 verbose = 1;
387 else
388 verbose = 0;
389 isrc = interrupt_sources;
390 db_setup_paging(db_simple_pager, &quit, db_lines_per_page);
391 for (i = 0; i < NUM_IO_INTS && !quit; i++, isrc++)
392 if (*isrc != NULL)
393 db_dump_intr_event((*isrc)->is_event, verbose);
394 }
395 #endif
396
397 #ifdef SMP
398 /*
399 * Support for balancing interrupt sources across CPUs. For now we just
400 * allocate CPUs round-robin.
401 */
402
403 static u_int cpu_apic_ids[MAXCPU];
404 static int current_cpu, num_cpus;
405
406 static void
407 intr_assign_next_cpu(struct intsrc *isrc)
408 {
409 struct pic *pic;
410 u_int apic_id;
411
412 /*
413 * Assign this source to a local APIC in a round-robin fashion.
414 */
415 pic = isrc->is_pic;
416 apic_id = cpu_apic_ids[current_cpu];
417 current_cpu++;
418 if (current_cpu >= num_cpus)
419 current_cpu = 0;
420 pic->pic_assign_cpu(isrc, apic_id);
421 }
422
423 /*
424 * Add a local APIC ID to our list of valid local APIC IDs that can
425 * be destinations of interrupts.
426 */
427 void
428 intr_add_cpu(u_int apic_id)
429 {
430
431 if (bootverbose)
432 printf("INTR: Adding local APIC %d as a target\n", apic_id);
433 if (num_cpus >= MAXCPU)
434 panic("WARNING: Local APIC IDs exhausted!");
435 cpu_apic_ids[num_cpus] = apic_id;
436 num_cpus++;
437 }
438
439 /*
440 * Distribute all the interrupt sources among the available CPUs once the
441 * AP's have been launched.
442 */
443 static void
444 intr_shuffle_irqs(void *arg __unused)
445 {
446 struct intsrc *isrc;
447 int i;
448
449 /* Don't bother on UP. */
450 if (num_cpus <= 1)
451 return;
452
453 /* Round-robin assign a CPU to each enabled source. */
454 sx_xlock(&intr_table_lock);
455 assign_cpu = 1;
456 for (i = 0; i < NUM_IO_INTS; i++) {
457 isrc = interrupt_sources[i];
458 if (isrc != NULL && isrc->is_enabled)
459 intr_assign_next_cpu(isrc);
460 }
461 sx_xunlock(&intr_table_lock);
462 }
463 SYSINIT(intr_shuffle_irqs, SI_SUB_SMP, SI_ORDER_SECOND, intr_shuffle_irqs, NULL)
464 #endif
Cache object: a8e7e4d500f3f3f06ee467d0cbb7f0aa
|