FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_intr.c
1 /*-
2 * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
10 * disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD: releng/9.0/sys/kern/kern_intr.c 224187 2011-07-18 15:19:40Z attilio $");
29
30 #include "opt_ddb.h"
31
32 #include <sys/param.h>
33 #include <sys/bus.h>
34 #include <sys/conf.h>
35 #include <sys/cpuset.h>
36 #include <sys/rtprio.h>
37 #include <sys/systm.h>
38 #include <sys/interrupt.h>
39 #include <sys/kernel.h>
40 #include <sys/kthread.h>
41 #include <sys/ktr.h>
42 #include <sys/limits.h>
43 #include <sys/lock.h>
44 #include <sys/malloc.h>
45 #include <sys/mutex.h>
46 #include <sys/priv.h>
47 #include <sys/proc.h>
48 #include <sys/random.h>
49 #include <sys/resourcevar.h>
50 #include <sys/sched.h>
51 #include <sys/smp.h>
52 #include <sys/sysctl.h>
53 #include <sys/syslog.h>
54 #include <sys/unistd.h>
55 #include <sys/vmmeter.h>
56 #include <machine/atomic.h>
57 #include <machine/cpu.h>
58 #include <machine/md_var.h>
59 #include <machine/stdarg.h>
60 #ifdef DDB
61 #include <ddb/ddb.h>
62 #include <ddb/db_sym.h>
63 #endif
64
65 /*
66 * Describe an interrupt thread. There is one of these per interrupt event.
67 */
68 struct intr_thread {
69 struct intr_event *it_event;
70 struct thread *it_thread; /* Kernel thread. */
71 int it_flags; /* (j) IT_* flags. */
72 int it_need; /* Needs service. */
73 };
74
75 /* Interrupt thread flags kept in it_flags */
76 #define IT_DEAD 0x000001 /* Thread is waiting to exit. */
77 #define IT_WAIT 0x000002 /* Thread is waiting for completion. */
78
79 struct intr_entropy {
80 struct thread *td;
81 uintptr_t event;
82 };
83
84 struct intr_event *clk_intr_event;
85 struct intr_event *tty_intr_event;
86 void *vm_ih;
87 struct proc *intrproc;
88
89 static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads");
90
91 static int intr_storm_threshold = 1000;
92 TUNABLE_INT("hw.intr_storm_threshold", &intr_storm_threshold);
93 SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RW,
94 &intr_storm_threshold, 0,
95 "Number of consecutive interrupts before storm protection is enabled");
96 static TAILQ_HEAD(, intr_event) event_list =
97 TAILQ_HEAD_INITIALIZER(event_list);
98 static struct mtx event_lock;
99 MTX_SYSINIT(intr_event_list, &event_lock, "intr event list", MTX_DEF);
100
101 static void intr_event_update(struct intr_event *ie);
102 #ifdef INTR_FILTER
103 static int intr_event_schedule_thread(struct intr_event *ie,
104 struct intr_thread *ithd);
105 static int intr_filter_loop(struct intr_event *ie,
106 struct trapframe *frame, struct intr_thread **ithd);
107 static struct intr_thread *ithread_create(const char *name,
108 struct intr_handler *ih);
109 #else
110 static int intr_event_schedule_thread(struct intr_event *ie);
111 static struct intr_thread *ithread_create(const char *name);
112 #endif
113 static void ithread_destroy(struct intr_thread *ithread);
114 static void ithread_execute_handlers(struct proc *p,
115 struct intr_event *ie);
116 #ifdef INTR_FILTER
117 static void priv_ithread_execute_handler(struct proc *p,
118 struct intr_handler *ih);
119 #endif
120 static void ithread_loop(void *);
121 static void ithread_update(struct intr_thread *ithd);
122 static void start_softintr(void *);
123
124 /* Map an interrupt type to an ithread priority. */
125 u_char
126 intr_priority(enum intr_type flags)
127 {
128 u_char pri;
129
130 flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET |
131 INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV);
132 switch (flags) {
133 case INTR_TYPE_TTY:
134 pri = PI_TTY;
135 break;
136 case INTR_TYPE_BIO:
137 pri = PI_DISK;
138 break;
139 case INTR_TYPE_NET:
140 pri = PI_NET;
141 break;
142 case INTR_TYPE_CAM:
143 pri = PI_DISK;
144 break;
145 case INTR_TYPE_AV:
146 pri = PI_AV;
147 break;
148 case INTR_TYPE_CLK:
149 pri = PI_REALTIME;
150 break;
151 case INTR_TYPE_MISC:
152 pri = PI_DULL; /* don't care */
153 break;
154 default:
155 /* We didn't specify an interrupt level. */
156 panic("intr_priority: no interrupt type in flags");
157 }
158
159 return pri;
160 }
161
162 /*
163 * Update an ithread based on the associated intr_event.
164 */
165 static void
166 ithread_update(struct intr_thread *ithd)
167 {
168 struct intr_event *ie;
169 struct thread *td;
170 u_char pri;
171
172 ie = ithd->it_event;
173 td = ithd->it_thread;
174
175 /* Determine the overall priority of this event. */
176 if (TAILQ_EMPTY(&ie->ie_handlers))
177 pri = PRI_MAX_ITHD;
178 else
179 pri = TAILQ_FIRST(&ie->ie_handlers)->ih_pri;
180
181 /* Update name and priority. */
182 strlcpy(td->td_name, ie->ie_fullname, sizeof(td->td_name));
183 thread_lock(td);
184 sched_prio(td, pri);
185 thread_unlock(td);
186 }
187
188 /*
189 * Regenerate the full name of an interrupt event and update its priority.
190 */
191 static void
192 intr_event_update(struct intr_event *ie)
193 {
194 struct intr_handler *ih;
195 char *last;
196 int missed, space;
197
198 /* Start off with no entropy and just the name of the event. */
199 mtx_assert(&ie->ie_lock, MA_OWNED);
200 strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname));
201 ie->ie_flags &= ~IE_ENTROPY;
202 missed = 0;
203 space = 1;
204
205 /* Run through all the handlers updating values. */
206 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
207 if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 <
208 sizeof(ie->ie_fullname)) {
209 strcat(ie->ie_fullname, " ");
210 strcat(ie->ie_fullname, ih->ih_name);
211 space = 0;
212 } else
213 missed++;
214 if (ih->ih_flags & IH_ENTROPY)
215 ie->ie_flags |= IE_ENTROPY;
216 }
217
218 /*
219 * If the handler names were too long, add +'s to indicate missing
220 * names. If we run out of room and still have +'s to add, change
221 * the last character from a + to a *.
222 */
223 last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2];
224 while (missed-- > 0) {
225 if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) {
226 if (*last == '+') {
227 *last = '*';
228 break;
229 } else
230 *last = '+';
231 } else if (space) {
232 strcat(ie->ie_fullname, " +");
233 space = 0;
234 } else
235 strcat(ie->ie_fullname, "+");
236 }
237
238 /*
239 * If this event has an ithread, update it's priority and
240 * name.
241 */
242 if (ie->ie_thread != NULL)
243 ithread_update(ie->ie_thread);
244 CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname);
245 }
246
247 int
248 intr_event_create(struct intr_event **event, void *source, int flags, int irq,
249 void (*pre_ithread)(void *), void (*post_ithread)(void *),
250 void (*post_filter)(void *), int (*assign_cpu)(void *, u_char),
251 const char *fmt, ...)
252 {
253 struct intr_event *ie;
254 va_list ap;
255
256 /* The only valid flag during creation is IE_SOFT. */
257 if ((flags & ~IE_SOFT) != 0)
258 return (EINVAL);
259 ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO);
260 ie->ie_source = source;
261 ie->ie_pre_ithread = pre_ithread;
262 ie->ie_post_ithread = post_ithread;
263 ie->ie_post_filter = post_filter;
264 ie->ie_assign_cpu = assign_cpu;
265 ie->ie_flags = flags;
266 ie->ie_irq = irq;
267 ie->ie_cpu = NOCPU;
268 TAILQ_INIT(&ie->ie_handlers);
269 mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF);
270
271 va_start(ap, fmt);
272 vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap);
273 va_end(ap);
274 strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname));
275 mtx_lock(&event_lock);
276 TAILQ_INSERT_TAIL(&event_list, ie, ie_list);
277 mtx_unlock(&event_lock);
278 if (event != NULL)
279 *event = ie;
280 CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name);
281 return (0);
282 }
283
284 /*
285 * Bind an interrupt event to the specified CPU. Note that not all
286 * platforms support binding an interrupt to a CPU. For those
287 * platforms this request will fail. For supported platforms, any
288 * associated ithreads as well as the primary interrupt context will
289 * be bound to the specificed CPU. Using a cpu id of NOCPU unbinds
290 * the interrupt event.
291 */
292 int
293 intr_event_bind(struct intr_event *ie, u_char cpu)
294 {
295 cpuset_t mask;
296 lwpid_t id;
297 int error;
298
299 /* Need a CPU to bind to. */
300 if (cpu != NOCPU && CPU_ABSENT(cpu))
301 return (EINVAL);
302
303 if (ie->ie_assign_cpu == NULL)
304 return (EOPNOTSUPP);
305
306 error = priv_check(curthread, PRIV_SCHED_CPUSET_INTR);
307 if (error)
308 return (error);
309
310 /*
311 * If we have any ithreads try to set their mask first to verify
312 * permissions, etc.
313 */
314 mtx_lock(&ie->ie_lock);
315 if (ie->ie_thread != NULL) {
316 CPU_ZERO(&mask);
317 if (cpu == NOCPU)
318 CPU_COPY(cpuset_root, &mask);
319 else
320 CPU_SET(cpu, &mask);
321 id = ie->ie_thread->it_thread->td_tid;
322 mtx_unlock(&ie->ie_lock);
323 error = cpuset_setthread(id, &mask);
324 if (error)
325 return (error);
326 } else
327 mtx_unlock(&ie->ie_lock);
328 error = ie->ie_assign_cpu(ie->ie_source, cpu);
329 if (error) {
330 mtx_lock(&ie->ie_lock);
331 if (ie->ie_thread != NULL) {
332 CPU_ZERO(&mask);
333 if (ie->ie_cpu == NOCPU)
334 CPU_COPY(cpuset_root, &mask);
335 else
336 CPU_SET(cpu, &mask);
337 id = ie->ie_thread->it_thread->td_tid;
338 mtx_unlock(&ie->ie_lock);
339 (void)cpuset_setthread(id, &mask);
340 } else
341 mtx_unlock(&ie->ie_lock);
342 return (error);
343 }
344
345 mtx_lock(&ie->ie_lock);
346 ie->ie_cpu = cpu;
347 mtx_unlock(&ie->ie_lock);
348
349 return (error);
350 }
351
352 static struct intr_event *
353 intr_lookup(int irq)
354 {
355 struct intr_event *ie;
356
357 mtx_lock(&event_lock);
358 TAILQ_FOREACH(ie, &event_list, ie_list)
359 if (ie->ie_irq == irq &&
360 (ie->ie_flags & IE_SOFT) == 0 &&
361 TAILQ_FIRST(&ie->ie_handlers) != NULL)
362 break;
363 mtx_unlock(&event_lock);
364 return (ie);
365 }
366
367 int
368 intr_setaffinity(int irq, void *m)
369 {
370 struct intr_event *ie;
371 cpuset_t *mask;
372 u_char cpu;
373 int n;
374
375 mask = m;
376 cpu = NOCPU;
377 /*
378 * If we're setting all cpus we can unbind. Otherwise make sure
379 * only one cpu is in the set.
380 */
381 if (CPU_CMP(cpuset_root, mask)) {
382 for (n = 0; n < CPU_SETSIZE; n++) {
383 if (!CPU_ISSET(n, mask))
384 continue;
385 if (cpu != NOCPU)
386 return (EINVAL);
387 cpu = (u_char)n;
388 }
389 }
390 ie = intr_lookup(irq);
391 if (ie == NULL)
392 return (ESRCH);
393 return (intr_event_bind(ie, cpu));
394 }
395
396 int
397 intr_getaffinity(int irq, void *m)
398 {
399 struct intr_event *ie;
400 cpuset_t *mask;
401
402 mask = m;
403 ie = intr_lookup(irq);
404 if (ie == NULL)
405 return (ESRCH);
406 CPU_ZERO(mask);
407 mtx_lock(&ie->ie_lock);
408 if (ie->ie_cpu == NOCPU)
409 CPU_COPY(cpuset_root, mask);
410 else
411 CPU_SET(ie->ie_cpu, mask);
412 mtx_unlock(&ie->ie_lock);
413 return (0);
414 }
415
416 int
417 intr_event_destroy(struct intr_event *ie)
418 {
419
420 mtx_lock(&event_lock);
421 mtx_lock(&ie->ie_lock);
422 if (!TAILQ_EMPTY(&ie->ie_handlers)) {
423 mtx_unlock(&ie->ie_lock);
424 mtx_unlock(&event_lock);
425 return (EBUSY);
426 }
427 TAILQ_REMOVE(&event_list, ie, ie_list);
428 #ifndef notyet
429 if (ie->ie_thread != NULL) {
430 ithread_destroy(ie->ie_thread);
431 ie->ie_thread = NULL;
432 }
433 #endif
434 mtx_unlock(&ie->ie_lock);
435 mtx_unlock(&event_lock);
436 mtx_destroy(&ie->ie_lock);
437 free(ie, M_ITHREAD);
438 return (0);
439 }
440
441 #ifndef INTR_FILTER
442 static struct intr_thread *
443 ithread_create(const char *name)
444 {
445 struct intr_thread *ithd;
446 struct thread *td;
447 int error;
448
449 ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO);
450
451 error = kproc_kthread_add(ithread_loop, ithd, &intrproc,
452 &td, RFSTOPPED | RFHIGHPID,
453 0, "intr", "%s", name);
454 if (error)
455 panic("kproc_create() failed with %d", error);
456 thread_lock(td);
457 sched_class(td, PRI_ITHD);
458 TD_SET_IWAIT(td);
459 thread_unlock(td);
460 td->td_pflags |= TDP_ITHREAD;
461 ithd->it_thread = td;
462 CTR2(KTR_INTR, "%s: created %s", __func__, name);
463 return (ithd);
464 }
465 #else
466 static struct intr_thread *
467 ithread_create(const char *name, struct intr_handler *ih)
468 {
469 struct intr_thread *ithd;
470 struct thread *td;
471 int error;
472
473 ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO);
474
475 error = kproc_kthread_add(ithread_loop, ih, &intrproc,
476 &td, RFSTOPPED | RFHIGHPID,
477 0, "intr", "%s", name);
478 if (error)
479 panic("kproc_create() failed with %d", error);
480 thread_lock(td);
481 sched_class(td, PRI_ITHD);
482 TD_SET_IWAIT(td);
483 thread_unlock(td);
484 td->td_pflags |= TDP_ITHREAD;
485 ithd->it_thread = td;
486 CTR2(KTR_INTR, "%s: created %s", __func__, name);
487 return (ithd);
488 }
489 #endif
490
491 static void
492 ithread_destroy(struct intr_thread *ithread)
493 {
494 struct thread *td;
495
496 CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_event->ie_name);
497 td = ithread->it_thread;
498 thread_lock(td);
499 ithread->it_flags |= IT_DEAD;
500 if (TD_AWAITING_INTR(td)) {
501 TD_CLR_IWAIT(td);
502 sched_add(td, SRQ_INTR);
503 }
504 thread_unlock(td);
505 }
506
507 #ifndef INTR_FILTER
508 int
509 intr_event_add_handler(struct intr_event *ie, const char *name,
510 driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri,
511 enum intr_type flags, void **cookiep)
512 {
513 struct intr_handler *ih, *temp_ih;
514 struct intr_thread *it;
515
516 if (ie == NULL || name == NULL || (handler == NULL && filter == NULL))
517 return (EINVAL);
518
519 /* Allocate and populate an interrupt handler structure. */
520 ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO);
521 ih->ih_filter = filter;
522 ih->ih_handler = handler;
523 ih->ih_argument = arg;
524 strlcpy(ih->ih_name, name, sizeof(ih->ih_name));
525 ih->ih_event = ie;
526 ih->ih_pri = pri;
527 if (flags & INTR_EXCL)
528 ih->ih_flags = IH_EXCLUSIVE;
529 if (flags & INTR_MPSAFE)
530 ih->ih_flags |= IH_MPSAFE;
531 if (flags & INTR_ENTROPY)
532 ih->ih_flags |= IH_ENTROPY;
533
534 /* We can only have one exclusive handler in a event. */
535 mtx_lock(&ie->ie_lock);
536 if (!TAILQ_EMPTY(&ie->ie_handlers)) {
537 if ((flags & INTR_EXCL) ||
538 (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) {
539 mtx_unlock(&ie->ie_lock);
540 free(ih, M_ITHREAD);
541 return (EINVAL);
542 }
543 }
544
545 /* Add the new handler to the event in priority order. */
546 TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) {
547 if (temp_ih->ih_pri > ih->ih_pri)
548 break;
549 }
550 if (temp_ih == NULL)
551 TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next);
552 else
553 TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next);
554 intr_event_update(ie);
555
556 /* Create a thread if we need one. */
557 while (ie->ie_thread == NULL && handler != NULL) {
558 if (ie->ie_flags & IE_ADDING_THREAD)
559 msleep(ie, &ie->ie_lock, 0, "ithread", 0);
560 else {
561 ie->ie_flags |= IE_ADDING_THREAD;
562 mtx_unlock(&ie->ie_lock);
563 it = ithread_create("intr: newborn");
564 mtx_lock(&ie->ie_lock);
565 ie->ie_flags &= ~IE_ADDING_THREAD;
566 ie->ie_thread = it;
567 it->it_event = ie;
568 ithread_update(it);
569 wakeup(ie);
570 }
571 }
572 CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name,
573 ie->ie_name);
574 mtx_unlock(&ie->ie_lock);
575
576 if (cookiep != NULL)
577 *cookiep = ih;
578 return (0);
579 }
580 #else
581 int
582 intr_event_add_handler(struct intr_event *ie, const char *name,
583 driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri,
584 enum intr_type flags, void **cookiep)
585 {
586 struct intr_handler *ih, *temp_ih;
587 struct intr_thread *it;
588
589 if (ie == NULL || name == NULL || (handler == NULL && filter == NULL))
590 return (EINVAL);
591
592 /* Allocate and populate an interrupt handler structure. */
593 ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO);
594 ih->ih_filter = filter;
595 ih->ih_handler = handler;
596 ih->ih_argument = arg;
597 strlcpy(ih->ih_name, name, sizeof(ih->ih_name));
598 ih->ih_event = ie;
599 ih->ih_pri = pri;
600 if (flags & INTR_EXCL)
601 ih->ih_flags = IH_EXCLUSIVE;
602 if (flags & INTR_MPSAFE)
603 ih->ih_flags |= IH_MPSAFE;
604 if (flags & INTR_ENTROPY)
605 ih->ih_flags |= IH_ENTROPY;
606
607 /* We can only have one exclusive handler in a event. */
608 mtx_lock(&ie->ie_lock);
609 if (!TAILQ_EMPTY(&ie->ie_handlers)) {
610 if ((flags & INTR_EXCL) ||
611 (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) {
612 mtx_unlock(&ie->ie_lock);
613 free(ih, M_ITHREAD);
614 return (EINVAL);
615 }
616 }
617
618 /* Add the new handler to the event in priority order. */
619 TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) {
620 if (temp_ih->ih_pri > ih->ih_pri)
621 break;
622 }
623 if (temp_ih == NULL)
624 TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next);
625 else
626 TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next);
627 intr_event_update(ie);
628
629 /* For filtered handlers, create a private ithread to run on. */
630 if (filter != NULL && handler != NULL) {
631 mtx_unlock(&ie->ie_lock);
632 it = ithread_create("intr: newborn", ih);
633 mtx_lock(&ie->ie_lock);
634 it->it_event = ie;
635 ih->ih_thread = it;
636 ithread_update(it); // XXX - do we really need this?!?!?
637 } else { /* Create the global per-event thread if we need one. */
638 while (ie->ie_thread == NULL && handler != NULL) {
639 if (ie->ie_flags & IE_ADDING_THREAD)
640 msleep(ie, &ie->ie_lock, 0, "ithread", 0);
641 else {
642 ie->ie_flags |= IE_ADDING_THREAD;
643 mtx_unlock(&ie->ie_lock);
644 it = ithread_create("intr: newborn", ih);
645 mtx_lock(&ie->ie_lock);
646 ie->ie_flags &= ~IE_ADDING_THREAD;
647 ie->ie_thread = it;
648 it->it_event = ie;
649 ithread_update(it);
650 wakeup(ie);
651 }
652 }
653 }
654 CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name,
655 ie->ie_name);
656 mtx_unlock(&ie->ie_lock);
657
658 if (cookiep != NULL)
659 *cookiep = ih;
660 return (0);
661 }
662 #endif
663
664 /*
665 * Append a description preceded by a ':' to the name of the specified
666 * interrupt handler.
667 */
668 int
669 intr_event_describe_handler(struct intr_event *ie, void *cookie,
670 const char *descr)
671 {
672 struct intr_handler *ih;
673 size_t space;
674 char *start;
675
676 mtx_lock(&ie->ie_lock);
677 #ifdef INVARIANTS
678 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
679 if (ih == cookie)
680 break;
681 }
682 if (ih == NULL) {
683 mtx_unlock(&ie->ie_lock);
684 panic("handler %p not found in interrupt event %p", cookie, ie);
685 }
686 #endif
687 ih = cookie;
688
689 /*
690 * Look for an existing description by checking for an
691 * existing ":". This assumes device names do not include
692 * colons. If one is found, prepare to insert the new
693 * description at that point. If one is not found, find the
694 * end of the name to use as the insertion point.
695 */
696 start = index(ih->ih_name, ':');
697 if (start == NULL)
698 start = index(ih->ih_name, 0);
699
700 /*
701 * See if there is enough remaining room in the string for the
702 * description + ":". The "- 1" leaves room for the trailing
703 * '\0'. The "+ 1" accounts for the colon.
704 */
705 space = sizeof(ih->ih_name) - (start - ih->ih_name) - 1;
706 if (strlen(descr) + 1 > space) {
707 mtx_unlock(&ie->ie_lock);
708 return (ENOSPC);
709 }
710
711 /* Append a colon followed by the description. */
712 *start = ':';
713 strcpy(start + 1, descr);
714 intr_event_update(ie);
715 mtx_unlock(&ie->ie_lock);
716 return (0);
717 }
718
719 /*
720 * Return the ie_source field from the intr_event an intr_handler is
721 * associated with.
722 */
723 void *
724 intr_handler_source(void *cookie)
725 {
726 struct intr_handler *ih;
727 struct intr_event *ie;
728
729 ih = (struct intr_handler *)cookie;
730 if (ih == NULL)
731 return (NULL);
732 ie = ih->ih_event;
733 KASSERT(ie != NULL,
734 ("interrupt handler \"%s\" has a NULL interrupt event",
735 ih->ih_name));
736 return (ie->ie_source);
737 }
738
739 /*
740 * Sleep until an ithread finishes executing an interrupt handler.
741 *
742 * XXX Doesn't currently handle interrupt filters or fast interrupt
743 * handlers. This is intended for compatibility with linux drivers
744 * only. Do not use in BSD code.
745 */
746 void
747 _intr_drain(int irq)
748 {
749 struct intr_event *ie;
750 struct intr_thread *ithd;
751 struct thread *td;
752
753 ie = intr_lookup(irq);
754 if (ie == NULL)
755 return;
756 if (ie->ie_thread == NULL)
757 return;
758 ithd = ie->ie_thread;
759 td = ithd->it_thread;
760 /*
761 * We set the flag and wait for it to be cleared to avoid
762 * long delays with potentially busy interrupt handlers
763 * were we to only sample TD_AWAITING_INTR() every tick.
764 */
765 thread_lock(td);
766 if (!TD_AWAITING_INTR(td)) {
767 ithd->it_flags |= IT_WAIT;
768 while (ithd->it_flags & IT_WAIT) {
769 thread_unlock(td);
770 pause("idrain", 1);
771 thread_lock(td);
772 }
773 }
774 thread_unlock(td);
775 return;
776 }
777
778
779 #ifndef INTR_FILTER
780 int
781 intr_event_remove_handler(void *cookie)
782 {
783 struct intr_handler *handler = (struct intr_handler *)cookie;
784 struct intr_event *ie;
785 #ifdef INVARIANTS
786 struct intr_handler *ih;
787 #endif
788 #ifdef notyet
789 int dead;
790 #endif
791
792 if (handler == NULL)
793 return (EINVAL);
794 ie = handler->ih_event;
795 KASSERT(ie != NULL,
796 ("interrupt handler \"%s\" has a NULL interrupt event",
797 handler->ih_name));
798 mtx_lock(&ie->ie_lock);
799 CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name,
800 ie->ie_name);
801 #ifdef INVARIANTS
802 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
803 if (ih == handler)
804 goto ok;
805 mtx_unlock(&ie->ie_lock);
806 panic("interrupt handler \"%s\" not found in interrupt event \"%s\"",
807 ih->ih_name, ie->ie_name);
808 ok:
809 #endif
810 /*
811 * If there is no ithread, then just remove the handler and return.
812 * XXX: Note that an INTR_FAST handler might be running on another
813 * CPU!
814 */
815 if (ie->ie_thread == NULL) {
816 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
817 mtx_unlock(&ie->ie_lock);
818 free(handler, M_ITHREAD);
819 return (0);
820 }
821
822 /*
823 * If the interrupt thread is already running, then just mark this
824 * handler as being dead and let the ithread do the actual removal.
825 *
826 * During a cold boot while cold is set, msleep() does not sleep,
827 * so we have to remove the handler here rather than letting the
828 * thread do it.
829 */
830 thread_lock(ie->ie_thread->it_thread);
831 if (!TD_AWAITING_INTR(ie->ie_thread->it_thread) && !cold) {
832 handler->ih_flags |= IH_DEAD;
833
834 /*
835 * Ensure that the thread will process the handler list
836 * again and remove this handler if it has already passed
837 * it on the list.
838 */
839 ie->ie_thread->it_need = 1;
840 } else
841 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
842 thread_unlock(ie->ie_thread->it_thread);
843 while (handler->ih_flags & IH_DEAD)
844 msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0);
845 intr_event_update(ie);
846 #ifdef notyet
847 /*
848 * XXX: This could be bad in the case of ppbus(8). Also, I think
849 * this could lead to races of stale data when servicing an
850 * interrupt.
851 */
852 dead = 1;
853 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
854 if (!(ih->ih_flags & IH_FAST)) {
855 dead = 0;
856 break;
857 }
858 }
859 if (dead) {
860 ithread_destroy(ie->ie_thread);
861 ie->ie_thread = NULL;
862 }
863 #endif
864 mtx_unlock(&ie->ie_lock);
865 free(handler, M_ITHREAD);
866 return (0);
867 }
868
869 static int
870 intr_event_schedule_thread(struct intr_event *ie)
871 {
872 struct intr_entropy entropy;
873 struct intr_thread *it;
874 struct thread *td;
875 struct thread *ctd;
876 struct proc *p;
877
878 /*
879 * If no ithread or no handlers, then we have a stray interrupt.
880 */
881 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) ||
882 ie->ie_thread == NULL)
883 return (EINVAL);
884
885 ctd = curthread;
886 it = ie->ie_thread;
887 td = it->it_thread;
888 p = td->td_proc;
889
890 /*
891 * If any of the handlers for this ithread claim to be good
892 * sources of entropy, then gather some.
893 */
894 if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) {
895 CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__,
896 p->p_pid, td->td_name);
897 entropy.event = (uintptr_t)ie;
898 entropy.td = ctd;
899 random_harvest(&entropy, sizeof(entropy), 2, 0,
900 RANDOM_INTERRUPT);
901 }
902
903 KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name));
904
905 /*
906 * Set it_need to tell the thread to keep running if it is already
907 * running. Then, lock the thread and see if we actually need to
908 * put it on the runqueue.
909 */
910 it->it_need = 1;
911 thread_lock(td);
912 if (TD_AWAITING_INTR(td)) {
913 CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid,
914 td->td_name);
915 TD_CLR_IWAIT(td);
916 sched_add(td, SRQ_INTR);
917 } else {
918 CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d",
919 __func__, p->p_pid, td->td_name, it->it_need, td->td_state);
920 }
921 thread_unlock(td);
922
923 return (0);
924 }
925 #else
926 int
927 intr_event_remove_handler(void *cookie)
928 {
929 struct intr_handler *handler = (struct intr_handler *)cookie;
930 struct intr_event *ie;
931 struct intr_thread *it;
932 #ifdef INVARIANTS
933 struct intr_handler *ih;
934 #endif
935 #ifdef notyet
936 int dead;
937 #endif
938
939 if (handler == NULL)
940 return (EINVAL);
941 ie = handler->ih_event;
942 KASSERT(ie != NULL,
943 ("interrupt handler \"%s\" has a NULL interrupt event",
944 handler->ih_name));
945 mtx_lock(&ie->ie_lock);
946 CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name,
947 ie->ie_name);
948 #ifdef INVARIANTS
949 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
950 if (ih == handler)
951 goto ok;
952 mtx_unlock(&ie->ie_lock);
953 panic("interrupt handler \"%s\" not found in interrupt event \"%s\"",
954 ih->ih_name, ie->ie_name);
955 ok:
956 #endif
957 /*
958 * If there are no ithreads (per event and per handler), then
959 * just remove the handler and return.
960 * XXX: Note that an INTR_FAST handler might be running on another CPU!
961 */
962 if (ie->ie_thread == NULL && handler->ih_thread == NULL) {
963 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
964 mtx_unlock(&ie->ie_lock);
965 free(handler, M_ITHREAD);
966 return (0);
967 }
968
969 /* Private or global ithread? */
970 it = (handler->ih_thread) ? handler->ih_thread : ie->ie_thread;
971 /*
972 * If the interrupt thread is already running, then just mark this
973 * handler as being dead and let the ithread do the actual removal.
974 *
975 * During a cold boot while cold is set, msleep() does not sleep,
976 * so we have to remove the handler here rather than letting the
977 * thread do it.
978 */
979 thread_lock(it->it_thread);
980 if (!TD_AWAITING_INTR(it->it_thread) && !cold) {
981 handler->ih_flags |= IH_DEAD;
982
983 /*
984 * Ensure that the thread will process the handler list
985 * again and remove this handler if it has already passed
986 * it on the list.
987 */
988 it->it_need = 1;
989 } else
990 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
991 thread_unlock(it->it_thread);
992 while (handler->ih_flags & IH_DEAD)
993 msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0);
994 /*
995 * At this point, the handler has been disconnected from the event,
996 * so we can kill the private ithread if any.
997 */
998 if (handler->ih_thread) {
999 ithread_destroy(handler->ih_thread);
1000 handler->ih_thread = NULL;
1001 }
1002 intr_event_update(ie);
1003 #ifdef notyet
1004 /*
1005 * XXX: This could be bad in the case of ppbus(8). Also, I think
1006 * this could lead to races of stale data when servicing an
1007 * interrupt.
1008 */
1009 dead = 1;
1010 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
1011 if (handler != NULL) {
1012 dead = 0;
1013 break;
1014 }
1015 }
1016 if (dead) {
1017 ithread_destroy(ie->ie_thread);
1018 ie->ie_thread = NULL;
1019 }
1020 #endif
1021 mtx_unlock(&ie->ie_lock);
1022 free(handler, M_ITHREAD);
1023 return (0);
1024 }
1025
1026 static int
1027 intr_event_schedule_thread(struct intr_event *ie, struct intr_thread *it)
1028 {
1029 struct intr_entropy entropy;
1030 struct thread *td;
1031 struct thread *ctd;
1032 struct proc *p;
1033
1034 /*
1035 * If no ithread or no handlers, then we have a stray interrupt.
1036 */
1037 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || it == NULL)
1038 return (EINVAL);
1039
1040 ctd = curthread;
1041 td = it->it_thread;
1042 p = td->td_proc;
1043
1044 /*
1045 * If any of the handlers for this ithread claim to be good
1046 * sources of entropy, then gather some.
1047 */
1048 if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) {
1049 CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__,
1050 p->p_pid, td->td_name);
1051 entropy.event = (uintptr_t)ie;
1052 entropy.td = ctd;
1053 random_harvest(&entropy, sizeof(entropy), 2, 0,
1054 RANDOM_INTERRUPT);
1055 }
1056
1057 KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name));
1058
1059 /*
1060 * Set it_need to tell the thread to keep running if it is already
1061 * running. Then, lock the thread and see if we actually need to
1062 * put it on the runqueue.
1063 */
1064 it->it_need = 1;
1065 thread_lock(td);
1066 if (TD_AWAITING_INTR(td)) {
1067 CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid,
1068 td->td_name);
1069 TD_CLR_IWAIT(td);
1070 sched_add(td, SRQ_INTR);
1071 } else {
1072 CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d",
1073 __func__, p->p_pid, td->td_name, it->it_need, td->td_state);
1074 }
1075 thread_unlock(td);
1076
1077 return (0);
1078 }
1079 #endif
1080
1081 /*
1082 * Allow interrupt event binding for software interrupt handlers -- a no-op,
1083 * since interrupts are generated in software rather than being directed by
1084 * a PIC.
1085 */
1086 static int
1087 swi_assign_cpu(void *arg, u_char cpu)
1088 {
1089
1090 return (0);
1091 }
1092
1093 /*
1094 * Add a software interrupt handler to a specified event. If a given event
1095 * is not specified, then a new event is created.
1096 */
1097 int
1098 swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler,
1099 void *arg, int pri, enum intr_type flags, void **cookiep)
1100 {
1101 struct thread *td;
1102 struct intr_event *ie;
1103 int error;
1104
1105 if (flags & INTR_ENTROPY)
1106 return (EINVAL);
1107
1108 ie = (eventp != NULL) ? *eventp : NULL;
1109
1110 if (ie != NULL) {
1111 if (!(ie->ie_flags & IE_SOFT))
1112 return (EINVAL);
1113 } else {
1114 error = intr_event_create(&ie, NULL, IE_SOFT, 0,
1115 NULL, NULL, NULL, swi_assign_cpu, "swi%d:", pri);
1116 if (error)
1117 return (error);
1118 if (eventp != NULL)
1119 *eventp = ie;
1120 }
1121 error = intr_event_add_handler(ie, name, NULL, handler, arg,
1122 PI_SWI(pri), flags, cookiep);
1123 if (error)
1124 return (error);
1125 if (pri == SWI_CLOCK) {
1126 td = ie->ie_thread->it_thread;
1127 thread_lock(td);
1128 td->td_flags |= TDF_NOLOAD;
1129 thread_unlock(td);
1130 }
1131 return (0);
1132 }
1133
1134 /*
1135 * Schedule a software interrupt thread.
1136 */
1137 void
1138 swi_sched(void *cookie, int flags)
1139 {
1140 struct intr_handler *ih = (struct intr_handler *)cookie;
1141 struct intr_event *ie = ih->ih_event;
1142 int error;
1143
1144 CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name,
1145 ih->ih_need);
1146
1147 /*
1148 * Set ih_need for this handler so that if the ithread is already
1149 * running it will execute this handler on the next pass. Otherwise,
1150 * it will execute it the next time it runs.
1151 */
1152 atomic_store_rel_int(&ih->ih_need, 1);
1153
1154 if (!(flags & SWI_DELAY)) {
1155 PCPU_INC(cnt.v_soft);
1156 #ifdef INTR_FILTER
1157 error = intr_event_schedule_thread(ie, ie->ie_thread);
1158 #else
1159 error = intr_event_schedule_thread(ie);
1160 #endif
1161 KASSERT(error == 0, ("stray software interrupt"));
1162 }
1163 }
1164
1165 /*
1166 * Remove a software interrupt handler. Currently this code does not
1167 * remove the associated interrupt event if it becomes empty. Calling code
1168 * may do so manually via intr_event_destroy(), but that's not really
1169 * an optimal interface.
1170 */
1171 int
1172 swi_remove(void *cookie)
1173 {
1174
1175 return (intr_event_remove_handler(cookie));
1176 }
1177
1178 #ifdef INTR_FILTER
1179 static void
1180 priv_ithread_execute_handler(struct proc *p, struct intr_handler *ih)
1181 {
1182 struct intr_event *ie;
1183
1184 ie = ih->ih_event;
1185 /*
1186 * If this handler is marked for death, remove it from
1187 * the list of handlers and wake up the sleeper.
1188 */
1189 if (ih->ih_flags & IH_DEAD) {
1190 mtx_lock(&ie->ie_lock);
1191 TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next);
1192 ih->ih_flags &= ~IH_DEAD;
1193 wakeup(ih);
1194 mtx_unlock(&ie->ie_lock);
1195 return;
1196 }
1197
1198 /* Execute this handler. */
1199 CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x",
1200 __func__, p->p_pid, (void *)ih->ih_handler, ih->ih_argument,
1201 ih->ih_name, ih->ih_flags);
1202
1203 if (!(ih->ih_flags & IH_MPSAFE))
1204 mtx_lock(&Giant);
1205 ih->ih_handler(ih->ih_argument);
1206 if (!(ih->ih_flags & IH_MPSAFE))
1207 mtx_unlock(&Giant);
1208 }
1209 #endif
1210
1211 /*
1212 * This is a public function for use by drivers that mux interrupt
1213 * handlers for child devices from their interrupt handler.
1214 */
1215 void
1216 intr_event_execute_handlers(struct proc *p, struct intr_event *ie)
1217 {
1218 struct intr_handler *ih, *ihn;
1219
1220 TAILQ_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) {
1221 /*
1222 * If this handler is marked for death, remove it from
1223 * the list of handlers and wake up the sleeper.
1224 */
1225 if (ih->ih_flags & IH_DEAD) {
1226 mtx_lock(&ie->ie_lock);
1227 TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next);
1228 ih->ih_flags &= ~IH_DEAD;
1229 wakeup(ih);
1230 mtx_unlock(&ie->ie_lock);
1231 continue;
1232 }
1233
1234 /* Skip filter only handlers */
1235 if (ih->ih_handler == NULL)
1236 continue;
1237
1238 /*
1239 * For software interrupt threads, we only execute
1240 * handlers that have their need flag set. Hardware
1241 * interrupt threads always invoke all of their handlers.
1242 */
1243 if (ie->ie_flags & IE_SOFT) {
1244 if (!ih->ih_need)
1245 continue;
1246 else
1247 atomic_store_rel_int(&ih->ih_need, 0);
1248 }
1249
1250 /* Execute this handler. */
1251 CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x",
1252 __func__, p->p_pid, (void *)ih->ih_handler,
1253 ih->ih_argument, ih->ih_name, ih->ih_flags);
1254
1255 if (!(ih->ih_flags & IH_MPSAFE))
1256 mtx_lock(&Giant);
1257 ih->ih_handler(ih->ih_argument);
1258 if (!(ih->ih_flags & IH_MPSAFE))
1259 mtx_unlock(&Giant);
1260 }
1261 }
1262
1263 static void
1264 ithread_execute_handlers(struct proc *p, struct intr_event *ie)
1265 {
1266
1267 /* Interrupt handlers should not sleep. */
1268 if (!(ie->ie_flags & IE_SOFT))
1269 THREAD_NO_SLEEPING();
1270 intr_event_execute_handlers(p, ie);
1271 if (!(ie->ie_flags & IE_SOFT))
1272 THREAD_SLEEPING_OK();
1273
1274 /*
1275 * Interrupt storm handling:
1276 *
1277 * If this interrupt source is currently storming, then throttle
1278 * it to only fire the handler once per clock tick.
1279 *
1280 * If this interrupt source is not currently storming, but the
1281 * number of back to back interrupts exceeds the storm threshold,
1282 * then enter storming mode.
1283 */
1284 if (intr_storm_threshold != 0 && ie->ie_count >= intr_storm_threshold &&
1285 !(ie->ie_flags & IE_SOFT)) {
1286 /* Report the message only once every second. */
1287 if (ppsratecheck(&ie->ie_warntm, &ie->ie_warncnt, 1)) {
1288 printf(
1289 "interrupt storm detected on \"%s\"; throttling interrupt source\n",
1290 ie->ie_name);
1291 }
1292 pause("istorm", 1);
1293 } else
1294 ie->ie_count++;
1295
1296 /*
1297 * Now that all the handlers have had a chance to run, reenable
1298 * the interrupt source.
1299 */
1300 if (ie->ie_post_ithread != NULL)
1301 ie->ie_post_ithread(ie->ie_source);
1302 }
1303
1304 #ifndef INTR_FILTER
1305 /*
1306 * This is the main code for interrupt threads.
1307 */
1308 static void
1309 ithread_loop(void *arg)
1310 {
1311 struct intr_thread *ithd;
1312 struct intr_event *ie;
1313 struct thread *td;
1314 struct proc *p;
1315 int wake;
1316
1317 td = curthread;
1318 p = td->td_proc;
1319 ithd = (struct intr_thread *)arg;
1320 KASSERT(ithd->it_thread == td,
1321 ("%s: ithread and proc linkage out of sync", __func__));
1322 ie = ithd->it_event;
1323 ie->ie_count = 0;
1324 wake = 0;
1325
1326 /*
1327 * As long as we have interrupts outstanding, go through the
1328 * list of handlers, giving each one a go at it.
1329 */
1330 for (;;) {
1331 /*
1332 * If we are an orphaned thread, then just die.
1333 */
1334 if (ithd->it_flags & IT_DEAD) {
1335 CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__,
1336 p->p_pid, td->td_name);
1337 free(ithd, M_ITHREAD);
1338 kthread_exit();
1339 }
1340
1341 /*
1342 * Service interrupts. If another interrupt arrives while
1343 * we are running, it will set it_need to note that we
1344 * should make another pass.
1345 */
1346 while (ithd->it_need) {
1347 /*
1348 * This might need a full read and write barrier
1349 * to make sure that this write posts before any
1350 * of the memory or device accesses in the
1351 * handlers.
1352 */
1353 atomic_store_rel_int(&ithd->it_need, 0);
1354 ithread_execute_handlers(p, ie);
1355 }
1356 WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread");
1357 mtx_assert(&Giant, MA_NOTOWNED);
1358
1359 /*
1360 * Processed all our interrupts. Now get the sched
1361 * lock. This may take a while and it_need may get
1362 * set again, so we have to check it again.
1363 */
1364 thread_lock(td);
1365 if (!ithd->it_need && !(ithd->it_flags & (IT_DEAD | IT_WAIT))) {
1366 TD_SET_IWAIT(td);
1367 ie->ie_count = 0;
1368 mi_switch(SW_VOL | SWT_IWAIT, NULL);
1369 }
1370 if (ithd->it_flags & IT_WAIT) {
1371 wake = 1;
1372 ithd->it_flags &= ~IT_WAIT;
1373 }
1374 thread_unlock(td);
1375 if (wake) {
1376 wakeup(ithd);
1377 wake = 0;
1378 }
1379 }
1380 }
1381
1382 /*
1383 * Main interrupt handling body.
1384 *
1385 * Input:
1386 * o ie: the event connected to this interrupt.
1387 * o frame: some archs (i.e. i386) pass a frame to some.
1388 * handlers as their main argument.
1389 * Return value:
1390 * o 0: everything ok.
1391 * o EINVAL: stray interrupt.
1392 */
1393 int
1394 intr_event_handle(struct intr_event *ie, struct trapframe *frame)
1395 {
1396 struct intr_handler *ih;
1397 struct trapframe *oldframe;
1398 struct thread *td;
1399 int error, ret, thread;
1400
1401 td = curthread;
1402
1403 /* An interrupt with no event or handlers is a stray interrupt. */
1404 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers))
1405 return (EINVAL);
1406
1407 /*
1408 * Execute fast interrupt handlers directly.
1409 * To support clock handlers, if a handler registers
1410 * with a NULL argument, then we pass it a pointer to
1411 * a trapframe as its argument.
1412 */
1413 td->td_intr_nesting_level++;
1414 thread = 0;
1415 ret = 0;
1416 critical_enter();
1417 oldframe = td->td_intr_frame;
1418 td->td_intr_frame = frame;
1419 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
1420 if (ih->ih_filter == NULL) {
1421 thread = 1;
1422 continue;
1423 }
1424 CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__,
1425 ih->ih_filter, ih->ih_argument == NULL ? frame :
1426 ih->ih_argument, ih->ih_name);
1427 if (ih->ih_argument == NULL)
1428 ret = ih->ih_filter(frame);
1429 else
1430 ret = ih->ih_filter(ih->ih_argument);
1431 KASSERT(ret == FILTER_STRAY ||
1432 ((ret & (FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) != 0 &&
1433 (ret & ~(FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) == 0),
1434 ("%s: incorrect return value %#x from %s", __func__, ret,
1435 ih->ih_name));
1436
1437 /*
1438 * Wrapper handler special handling:
1439 *
1440 * in some particular cases (like pccard and pccbb),
1441 * the _real_ device handler is wrapped in a couple of
1442 * functions - a filter wrapper and an ithread wrapper.
1443 * In this case (and just in this case), the filter wrapper
1444 * could ask the system to schedule the ithread and mask
1445 * the interrupt source if the wrapped handler is composed
1446 * of just an ithread handler.
1447 *
1448 * TODO: write a generic wrapper to avoid people rolling
1449 * their own
1450 */
1451 if (!thread) {
1452 if (ret == FILTER_SCHEDULE_THREAD)
1453 thread = 1;
1454 }
1455 }
1456 td->td_intr_frame = oldframe;
1457
1458 if (thread) {
1459 if (ie->ie_pre_ithread != NULL)
1460 ie->ie_pre_ithread(ie->ie_source);
1461 } else {
1462 if (ie->ie_post_filter != NULL)
1463 ie->ie_post_filter(ie->ie_source);
1464 }
1465
1466 /* Schedule the ithread if needed. */
1467 if (thread) {
1468 error = intr_event_schedule_thread(ie);
1469 #ifndef XEN
1470 KASSERT(error == 0, ("bad stray interrupt"));
1471 #else
1472 if (error != 0)
1473 log(LOG_WARNING, "bad stray interrupt");
1474 #endif
1475 }
1476 critical_exit();
1477 td->td_intr_nesting_level--;
1478 return (0);
1479 }
1480 #else
1481 /*
1482 * This is the main code for interrupt threads.
1483 */
1484 static void
1485 ithread_loop(void *arg)
1486 {
1487 struct intr_thread *ithd;
1488 struct intr_handler *ih;
1489 struct intr_event *ie;
1490 struct thread *td;
1491 struct proc *p;
1492 int priv;
1493 int wake;
1494
1495 td = curthread;
1496 p = td->td_proc;
1497 ih = (struct intr_handler *)arg;
1498 priv = (ih->ih_thread != NULL) ? 1 : 0;
1499 ithd = (priv) ? ih->ih_thread : ih->ih_event->ie_thread;
1500 KASSERT(ithd->it_thread == td,
1501 ("%s: ithread and proc linkage out of sync", __func__));
1502 ie = ithd->it_event;
1503 ie->ie_count = 0;
1504 wake = 0;
1505
1506 /*
1507 * As long as we have interrupts outstanding, go through the
1508 * list of handlers, giving each one a go at it.
1509 */
1510 for (;;) {
1511 /*
1512 * If we are an orphaned thread, then just die.
1513 */
1514 if (ithd->it_flags & IT_DEAD) {
1515 CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__,
1516 p->p_pid, td->td_name);
1517 free(ithd, M_ITHREAD);
1518 kthread_exit();
1519 }
1520
1521 /*
1522 * Service interrupts. If another interrupt arrives while
1523 * we are running, it will set it_need to note that we
1524 * should make another pass.
1525 */
1526 while (ithd->it_need) {
1527 /*
1528 * This might need a full read and write barrier
1529 * to make sure that this write posts before any
1530 * of the memory or device accesses in the
1531 * handlers.
1532 */
1533 atomic_store_rel_int(&ithd->it_need, 0);
1534 if (priv)
1535 priv_ithread_execute_handler(p, ih);
1536 else
1537 ithread_execute_handlers(p, ie);
1538 }
1539 WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread");
1540 mtx_assert(&Giant, MA_NOTOWNED);
1541
1542 /*
1543 * Processed all our interrupts. Now get the sched
1544 * lock. This may take a while and it_need may get
1545 * set again, so we have to check it again.
1546 */
1547 thread_lock(td);
1548 if (!ithd->it_need && !(ithd->it_flags & (IT_DEAD | IT_WAIT))) {
1549 TD_SET_IWAIT(td);
1550 ie->ie_count = 0;
1551 mi_switch(SW_VOL | SWT_IWAIT, NULL);
1552 }
1553 if (ithd->it_flags & IT_WAIT) {
1554 wake = 1;
1555 ithd->it_flags &= ~IT_WAIT;
1556 }
1557 thread_unlock(td);
1558 if (wake) {
1559 wakeup(ithd);
1560 wake = 0;
1561 }
1562 }
1563 }
1564
1565 /*
1566 * Main loop for interrupt filter.
1567 *
1568 * Some architectures (i386, amd64 and arm) require the optional frame
1569 * parameter, and use it as the main argument for fast handler execution
1570 * when ih_argument == NULL.
1571 *
1572 * Return value:
1573 * o FILTER_STRAY: No filter recognized the event, and no
1574 * filter-less handler is registered on this
1575 * line.
1576 * o FILTER_HANDLED: A filter claimed the event and served it.
1577 * o FILTER_SCHEDULE_THREAD: No filter claimed the event, but there's at
1578 * least one filter-less handler on this line.
1579 * o FILTER_HANDLED |
1580 * FILTER_SCHEDULE_THREAD: A filter claimed the event, and asked for
1581 * scheduling the per-handler ithread.
1582 *
1583 * In case an ithread has to be scheduled, in *ithd there will be a
1584 * pointer to a struct intr_thread containing the thread to be
1585 * scheduled.
1586 */
1587
1588 static int
1589 intr_filter_loop(struct intr_event *ie, struct trapframe *frame,
1590 struct intr_thread **ithd)
1591 {
1592 struct intr_handler *ih;
1593 void *arg;
1594 int ret, thread_only;
1595
1596 ret = 0;
1597 thread_only = 0;
1598 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
1599 /*
1600 * Execute fast interrupt handlers directly.
1601 * To support clock handlers, if a handler registers
1602 * with a NULL argument, then we pass it a pointer to
1603 * a trapframe as its argument.
1604 */
1605 arg = ((ih->ih_argument == NULL) ? frame : ih->ih_argument);
1606
1607 CTR5(KTR_INTR, "%s: exec %p/%p(%p) for %s", __func__,
1608 ih->ih_filter, ih->ih_handler, arg, ih->ih_name);
1609
1610 if (ih->ih_filter != NULL)
1611 ret = ih->ih_filter(arg);
1612 else {
1613 thread_only = 1;
1614 continue;
1615 }
1616 KASSERT(ret == FILTER_STRAY ||
1617 ((ret & (FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) != 0 &&
1618 (ret & ~(FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) == 0),
1619 ("%s: incorrect return value %#x from %s", __func__, ret,
1620 ih->ih_name));
1621 if (ret & FILTER_STRAY)
1622 continue;
1623 else {
1624 *ithd = ih->ih_thread;
1625 return (ret);
1626 }
1627 }
1628
1629 /*
1630 * No filters handled the interrupt and we have at least
1631 * one handler without a filter. In this case, we schedule
1632 * all of the filter-less handlers to run in the ithread.
1633 */
1634 if (thread_only) {
1635 *ithd = ie->ie_thread;
1636 return (FILTER_SCHEDULE_THREAD);
1637 }
1638 return (FILTER_STRAY);
1639 }
1640
1641 /*
1642 * Main interrupt handling body.
1643 *
1644 * Input:
1645 * o ie: the event connected to this interrupt.
1646 * o frame: some archs (i.e. i386) pass a frame to some.
1647 * handlers as their main argument.
1648 * Return value:
1649 * o 0: everything ok.
1650 * o EINVAL: stray interrupt.
1651 */
1652 int
1653 intr_event_handle(struct intr_event *ie, struct trapframe *frame)
1654 {
1655 struct intr_thread *ithd;
1656 struct trapframe *oldframe;
1657 struct thread *td;
1658 int thread;
1659
1660 ithd = NULL;
1661 td = curthread;
1662
1663 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers))
1664 return (EINVAL);
1665
1666 td->td_intr_nesting_level++;
1667 thread = 0;
1668 critical_enter();
1669 oldframe = td->td_intr_frame;
1670 td->td_intr_frame = frame;
1671 thread = intr_filter_loop(ie, frame, &ithd);
1672 if (thread & FILTER_HANDLED) {
1673 if (ie->ie_post_filter != NULL)
1674 ie->ie_post_filter(ie->ie_source);
1675 } else {
1676 if (ie->ie_pre_ithread != NULL)
1677 ie->ie_pre_ithread(ie->ie_source);
1678 }
1679 td->td_intr_frame = oldframe;
1680 critical_exit();
1681
1682 /* Interrupt storm logic */
1683 if (thread & FILTER_STRAY) {
1684 ie->ie_count++;
1685 if (ie->ie_count < intr_storm_threshold)
1686 printf("Interrupt stray detection not present\n");
1687 }
1688
1689 /* Schedule an ithread if needed. */
1690 if (thread & FILTER_SCHEDULE_THREAD) {
1691 if (intr_event_schedule_thread(ie, ithd) != 0)
1692 panic("%s: impossible stray interrupt", __func__);
1693 }
1694 td->td_intr_nesting_level--;
1695 return (0);
1696 }
1697 #endif
1698
1699 #ifdef DDB
1700 /*
1701 * Dump details about an interrupt handler
1702 */
1703 static void
1704 db_dump_intrhand(struct intr_handler *ih)
1705 {
1706 int comma;
1707
1708 db_printf("\t%-10s ", ih->ih_name);
1709 switch (ih->ih_pri) {
1710 case PI_REALTIME:
1711 db_printf("CLK ");
1712 break;
1713 case PI_AV:
1714 db_printf("AV ");
1715 break;
1716 case PI_TTY:
1717 db_printf("TTY ");
1718 break;
1719 case PI_NET:
1720 db_printf("NET ");
1721 break;
1722 case PI_DISK:
1723 db_printf("DISK");
1724 break;
1725 case PI_DULL:
1726 db_printf("DULL");
1727 break;
1728 default:
1729 if (ih->ih_pri >= PI_SOFT)
1730 db_printf("SWI ");
1731 else
1732 db_printf("%4u", ih->ih_pri);
1733 break;
1734 }
1735 db_printf(" ");
1736 db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC);
1737 db_printf("(%p)", ih->ih_argument);
1738 if (ih->ih_need ||
1739 (ih->ih_flags & (IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD |
1740 IH_MPSAFE)) != 0) {
1741 db_printf(" {");
1742 comma = 0;
1743 if (ih->ih_flags & IH_EXCLUSIVE) {
1744 if (comma)
1745 db_printf(", ");
1746 db_printf("EXCL");
1747 comma = 1;
1748 }
1749 if (ih->ih_flags & IH_ENTROPY) {
1750 if (comma)
1751 db_printf(", ");
1752 db_printf("ENTROPY");
1753 comma = 1;
1754 }
1755 if (ih->ih_flags & IH_DEAD) {
1756 if (comma)
1757 db_printf(", ");
1758 db_printf("DEAD");
1759 comma = 1;
1760 }
1761 if (ih->ih_flags & IH_MPSAFE) {
1762 if (comma)
1763 db_printf(", ");
1764 db_printf("MPSAFE");
1765 comma = 1;
1766 }
1767 if (ih->ih_need) {
1768 if (comma)
1769 db_printf(", ");
1770 db_printf("NEED");
1771 }
1772 db_printf("}");
1773 }
1774 db_printf("\n");
1775 }
1776
1777 /*
1778 * Dump details about a event.
1779 */
1780 void
1781 db_dump_intr_event(struct intr_event *ie, int handlers)
1782 {
1783 struct intr_handler *ih;
1784 struct intr_thread *it;
1785 int comma;
1786
1787 db_printf("%s ", ie->ie_fullname);
1788 it = ie->ie_thread;
1789 if (it != NULL)
1790 db_printf("(pid %d)", it->it_thread->td_proc->p_pid);
1791 else
1792 db_printf("(no thread)");
1793 if ((ie->ie_flags & (IE_SOFT | IE_ENTROPY | IE_ADDING_THREAD)) != 0 ||
1794 (it != NULL && it->it_need)) {
1795 db_printf(" {");
1796 comma = 0;
1797 if (ie->ie_flags & IE_SOFT) {
1798 db_printf("SOFT");
1799 comma = 1;
1800 }
1801 if (ie->ie_flags & IE_ENTROPY) {
1802 if (comma)
1803 db_printf(", ");
1804 db_printf("ENTROPY");
1805 comma = 1;
1806 }
1807 if (ie->ie_flags & IE_ADDING_THREAD) {
1808 if (comma)
1809 db_printf(", ");
1810 db_printf("ADDING_THREAD");
1811 comma = 1;
1812 }
1813 if (it != NULL && it->it_need) {
1814 if (comma)
1815 db_printf(", ");
1816 db_printf("NEED");
1817 }
1818 db_printf("}");
1819 }
1820 db_printf("\n");
1821
1822 if (handlers)
1823 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
1824 db_dump_intrhand(ih);
1825 }
1826
1827 /*
1828 * Dump data about interrupt handlers
1829 */
1830 DB_SHOW_COMMAND(intr, db_show_intr)
1831 {
1832 struct intr_event *ie;
1833 int all, verbose;
1834
1835 verbose = index(modif, 'v') != NULL;
1836 all = index(modif, 'a') != NULL;
1837 TAILQ_FOREACH(ie, &event_list, ie_list) {
1838 if (!all && TAILQ_EMPTY(&ie->ie_handlers))
1839 continue;
1840 db_dump_intr_event(ie, verbose);
1841 if (db_pager_quit)
1842 break;
1843 }
1844 }
1845 #endif /* DDB */
1846
1847 /*
1848 * Start standard software interrupt threads
1849 */
1850 static void
1851 start_softintr(void *dummy)
1852 {
1853
1854 if (swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, INTR_MPSAFE, &vm_ih))
1855 panic("died while creating vm swi ithread");
1856 }
1857 SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr,
1858 NULL);
1859
1860 /*
1861 * Sysctls used by systat and others: hw.intrnames and hw.intrcnt.
1862 * The data for this machine dependent, and the declarations are in machine
1863 * dependent code. The layout of intrnames and intrcnt however is machine
1864 * independent.
1865 *
1866 * We do not know the length of intrcnt and intrnames at compile time, so
1867 * calculate things at run time.
1868 */
1869 static int
1870 sysctl_intrnames(SYSCTL_HANDLER_ARGS)
1871 {
1872 return (sysctl_handle_opaque(oidp, intrnames, sintrnames, req));
1873 }
1874
1875 SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD,
1876 NULL, 0, sysctl_intrnames, "", "Interrupt Names");
1877
1878 static int
1879 sysctl_intrcnt(SYSCTL_HANDLER_ARGS)
1880 {
1881 return (sysctl_handle_opaque(oidp, intrcnt, sintrcnt, req));
1882 }
1883
1884 SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD,
1885 NULL, 0, sysctl_intrcnt, "", "Interrupt Counts");
1886
1887 #ifdef DDB
1888 /*
1889 * DDB command to dump the interrupt statistics.
1890 */
1891 DB_SHOW_COMMAND(intrcnt, db_show_intrcnt)
1892 {
1893 u_long *i;
1894 char *cp;
1895 u_int j;
1896
1897 cp = intrnames;
1898 j = 0;
1899 for (i = intrcnt; j < (sintrcnt / sizeof(u_long)) && !db_pager_quit;
1900 i++, j++) {
1901 if (*cp == '\0')
1902 break;
1903 if (*i != 0)
1904 db_printf("%s\t%lu\n", cp, *i);
1905 cp += strlen(cp) + 1;
1906 }
1907 }
1908 #endif
Cache object: 8aaff5aa2411a3ad71ee79647c898f38
|