FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_intr.c
1 /*-
2 * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
10 * disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD: releng/6.1/sys/kern/kern_intr.c 157786 2006-04-15 20:08:33Z scottl $");
29
30 #include "opt_ddb.h"
31
32 #include <sys/param.h>
33 #include <sys/bus.h>
34 #include <sys/conf.h>
35 #include <sys/rtprio.h>
36 #include <sys/systm.h>
37 #include <sys/interrupt.h>
38 #include <sys/kernel.h>
39 #include <sys/kthread.h>
40 #include <sys/ktr.h>
41 #include <sys/limits.h>
42 #include <sys/lock.h>
43 #include <sys/malloc.h>
44 #include <sys/mutex.h>
45 #include <sys/proc.h>
46 #include <sys/random.h>
47 #include <sys/resourcevar.h>
48 #include <sys/sched.h>
49 #include <sys/sysctl.h>
50 #include <sys/unistd.h>
51 #include <sys/vmmeter.h>
52 #include <machine/atomic.h>
53 #include <machine/cpu.h>
54 #include <machine/md_var.h>
55 #include <machine/stdarg.h>
56 #ifdef DDB
57 #include <ddb/ddb.h>
58 #include <ddb/db_sym.h>
59 #endif
60
61 /*
62 * Describe an interrupt thread. There is one of these per interrupt event.
63 */
64 struct intr_thread {
65 struct intr_event *it_event;
66 struct thread *it_thread; /* Kernel thread. */
67 int it_flags; /* (j) IT_* flags. */
68 int it_need; /* Needs service. */
69 };
70
71 /* Interrupt thread flags kept in it_flags */
72 #define IT_DEAD 0x000001 /* Thread is waiting to exit. */
73
74 struct intr_entropy {
75 struct thread *td;
76 uintptr_t event;
77 };
78
79 struct intr_event *clk_intr_event;
80 struct intr_event *tty_intr_event;
81 void *softclock_ih;
82 void *vm_ih;
83
84 static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads");
85
86 static int intr_storm_threshold = 500;
87 TUNABLE_INT("hw.intr_storm_threshold", &intr_storm_threshold);
88 SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RW,
89 &intr_storm_threshold, 0,
90 "Number of consecutive interrupts before storm protection is enabled");
91 static TAILQ_HEAD(, intr_event) event_list =
92 TAILQ_HEAD_INITIALIZER(event_list);
93
94 static void intr_event_update(struct intr_event *ie);
95 static struct intr_thread *ithread_create(const char *name);
96 static void ithread_destroy2(struct intr_thread *ithread);
97 static void ithread_execute_handlers(struct proc *p, struct intr_event *ie);
98 static void ithread_loop(void *);
99 static void ithread_update(struct intr_thread *ithd);
100 static void start_softintr(void *);
101
102 u_char
103 intr_priority(enum intr_type flags)
104 {
105 u_char pri;
106
107 flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET |
108 INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV);
109 switch (flags) {
110 case INTR_TYPE_TTY:
111 pri = PI_TTYLOW;
112 break;
113 case INTR_TYPE_BIO:
114 /*
115 * XXX We need to refine this. BSD/OS distinguishes
116 * between tape and disk priorities.
117 */
118 pri = PI_DISK;
119 break;
120 case INTR_TYPE_NET:
121 pri = PI_NET;
122 break;
123 case INTR_TYPE_CAM:
124 pri = PI_DISK; /* XXX or PI_CAM? */
125 break;
126 case INTR_TYPE_AV: /* Audio/video */
127 pri = PI_AV;
128 break;
129 case INTR_TYPE_CLK:
130 pri = PI_REALTIME;
131 break;
132 case INTR_TYPE_MISC:
133 pri = PI_DULL; /* don't care */
134 break;
135 default:
136 /* We didn't specify an interrupt level. */
137 panic("intr_priority: no interrupt type in flags");
138 }
139
140 return pri;
141 }
142
143 /*
144 * Update an ithread based on the associated intr_event.
145 */
146 static void
147 ithread_update(struct intr_thread *ithd)
148 {
149 struct intr_event *ie;
150 struct thread *td;
151 u_char pri;
152
153 ie = ithd->it_event;
154 td = ithd->it_thread;
155
156 /* Determine the overall priority of this event. */
157 if (TAILQ_EMPTY(&ie->ie_handlers))
158 pri = PRI_MAX_ITHD;
159 else
160 pri = TAILQ_FIRST(&ie->ie_handlers)->ih_pri;
161
162 /* Update name and priority. */
163 strlcpy(td->td_proc->p_comm, ie->ie_fullname,
164 sizeof(td->td_proc->p_comm));
165 mtx_lock_spin(&sched_lock);
166 sched_prio(td, pri);
167 mtx_unlock_spin(&sched_lock);
168 }
169
170 /*
171 * Regenerate the full name of an interrupt event and update its priority.
172 */
173 static void
174 intr_event_update(struct intr_event *ie)
175 {
176 struct intr_handler *ih;
177 char *last;
178 int missed, space;
179
180 /* Start off with no entropy and just the name of the event. */
181 mtx_assert(&ie->ie_lock, MA_OWNED);
182 strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname));
183 ie->ie_flags &= ~IE_ENTROPY;
184 missed = 0;
185 space = 1;
186
187 /* Run through all the handlers updating values. */
188 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
189 if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 <
190 sizeof(ie->ie_fullname)) {
191 strcat(ie->ie_fullname, " ");
192 strcat(ie->ie_fullname, ih->ih_name);
193 space = 0;
194 } else
195 missed++;
196 if (ih->ih_flags & IH_ENTROPY)
197 ie->ie_flags |= IE_ENTROPY;
198 }
199
200 /*
201 * If the handler names were too long, add +'s to indicate missing
202 * names. If we run out of room and still have +'s to add, change
203 * the last character from a + to a *.
204 */
205 last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2];
206 while (missed-- > 0) {
207 if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) {
208 if (*last == '+') {
209 *last = '*';
210 break;
211 } else
212 *last = '+';
213 } else if (space) {
214 strcat(ie->ie_fullname, " +");
215 space = 0;
216 } else
217 strcat(ie->ie_fullname, "+");
218 }
219
220 /*
221 * If this event has an ithread, update it's priority and
222 * name.
223 */
224 if (ie->ie_thread != NULL)
225 ithread_update(ie->ie_thread);
226 CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname);
227 }
228
229 int
230 intr_event_create(struct intr_event **event, void *source, int flags,
231 void (*enable)(void *), const char *fmt, ...)
232 {
233 struct intr_event *ie;
234 va_list ap;
235
236 /* The only valid flag during creation is IE_SOFT. */
237 if ((flags & ~IE_SOFT) != 0)
238 return (EINVAL);
239 ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO);
240 ie->ie_source = source;
241 ie->ie_enable = enable;
242 ie->ie_flags = flags;
243 TAILQ_INIT(&ie->ie_handlers);
244 mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF);
245
246 va_start(ap, fmt);
247 vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap);
248 va_end(ap);
249 strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname));
250 mtx_pool_lock(mtxpool_sleep, &event_list);
251 TAILQ_INSERT_TAIL(&event_list, ie, ie_list);
252 mtx_pool_unlock(mtxpool_sleep, &event_list);
253 if (event != NULL)
254 *event = ie;
255 CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name);
256 return (0);
257 }
258
259 int
260 intr_event_destroy(struct intr_event *ie)
261 {
262
263 mtx_lock(&ie->ie_lock);
264 if (!TAILQ_EMPTY(&ie->ie_handlers)) {
265 mtx_unlock(&ie->ie_lock);
266 return (EBUSY);
267 }
268 mtx_pool_lock(mtxpool_sleep, &event_list);
269 TAILQ_REMOVE(&event_list, ie, ie_list);
270 mtx_pool_unlock(mtxpool_sleep, &event_list);
271 #ifndef notyet
272 if (ie->ie_thread != NULL) {
273 ithread_destroy2(ie->ie_thread);
274 ie->ie_thread = NULL;
275 }
276 #endif
277 mtx_unlock(&ie->ie_lock);
278 mtx_destroy(&ie->ie_lock);
279 free(ie, M_ITHREAD);
280 return (0);
281 }
282
283 static struct intr_thread *
284 ithread_create(const char *name)
285 {
286 struct intr_thread *ithd;
287 struct thread *td;
288 struct proc *p;
289 int error;
290
291 ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO);
292
293 error = kthread_create(ithread_loop, ithd, &p, RFSTOPPED | RFHIGHPID,
294 0, "%s", name);
295 if (error)
296 panic("kthread_create() failed with %d", error);
297 td = FIRST_THREAD_IN_PROC(p); /* XXXKSE */
298 mtx_lock_spin(&sched_lock);
299 td->td_ksegrp->kg_pri_class = PRI_ITHD;
300 TD_SET_IWAIT(td);
301 mtx_unlock_spin(&sched_lock);
302 td->td_pflags |= TDP_ITHREAD;
303 ithd->it_thread = td;
304 CTR2(KTR_INTR, "%s: created %s", __func__, name);
305 return (ithd);
306 }
307
308 static void
309 ithread_destroy2(struct intr_thread *ithread)
310 {
311 struct thread *td;
312
313 CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_event->ie_name);
314 td = ithread->it_thread;
315 mtx_lock_spin(&sched_lock);
316 ithread->it_flags |= IT_DEAD;
317 if (TD_AWAITING_INTR(td)) {
318 TD_CLR_IWAIT(td);
319 setrunqueue(td, SRQ_INTR);
320 }
321 mtx_unlock_spin(&sched_lock);
322 }
323
324 int
325 intr_event_add_handler(struct intr_event *ie, const char *name,
326 driver_intr_t handler, void *arg, u_char pri, enum intr_type flags,
327 void **cookiep)
328 {
329 struct intr_handler *ih, *temp_ih;
330 struct intr_thread *it;
331
332 if (ie == NULL || name == NULL || handler == NULL)
333 return (EINVAL);
334
335 /* Allocate and populate an interrupt handler structure. */
336 ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO);
337 ih->ih_handler = handler;
338 ih->ih_argument = arg;
339 ih->ih_name = name;
340 ih->ih_event = ie;
341 ih->ih_pri = pri;
342 if (flags & INTR_FAST)
343 ih->ih_flags = IH_FAST;
344 else if (flags & INTR_EXCL)
345 ih->ih_flags = IH_EXCLUSIVE;
346 if (flags & INTR_MPSAFE)
347 ih->ih_flags |= IH_MPSAFE;
348 if (flags & INTR_ENTROPY)
349 ih->ih_flags |= IH_ENTROPY;
350
351 /* We can only have one exclusive handler in a event. */
352 mtx_lock(&ie->ie_lock);
353 if (!TAILQ_EMPTY(&ie->ie_handlers)) {
354 if ((flags & INTR_EXCL) ||
355 (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) {
356 mtx_unlock(&ie->ie_lock);
357 free(ih, M_ITHREAD);
358 return (EINVAL);
359 }
360 }
361
362 /* Add the new handler to the event in priority order. */
363 TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) {
364 if (temp_ih->ih_pri > ih->ih_pri)
365 break;
366 }
367 if (temp_ih == NULL)
368 TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next);
369 else
370 TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next);
371 intr_event_update(ie);
372
373 /* Create a thread if we need one. */
374 while (ie->ie_thread == NULL && !(flags & INTR_FAST)) {
375 if (ie->ie_flags & IE_ADDING_THREAD)
376 msleep(ie, &ie->ie_lock, curthread->td_priority,
377 "ithread", 0);
378 else {
379 ie->ie_flags |= IE_ADDING_THREAD;
380 mtx_unlock(&ie->ie_lock);
381 it = ithread_create("intr: newborn");
382 mtx_lock(&ie->ie_lock);
383 ie->ie_flags &= ~IE_ADDING_THREAD;
384 ie->ie_thread = it;
385 it->it_event = ie;
386 ithread_update(it);
387 wakeup(ie);
388 }
389 }
390 CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name,
391 ie->ie_name);
392 mtx_unlock(&ie->ie_lock);
393
394 if (cookiep != NULL)
395 *cookiep = ih;
396 return (0);
397 }
398
399 int
400 intr_event_remove_handler(void *cookie)
401 {
402 struct intr_handler *handler = (struct intr_handler *)cookie;
403 struct intr_event *ie;
404 #ifdef INVARIANTS
405 struct intr_handler *ih;
406 #endif
407 #ifdef notyet
408 int dead;
409 #endif
410
411 if (handler == NULL)
412 return (EINVAL);
413 ie = handler->ih_event;
414 KASSERT(ie != NULL,
415 ("interrupt handler \"%s\" has a NULL interrupt event",
416 handler->ih_name));
417 mtx_lock(&ie->ie_lock);
418 CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name,
419 ie->ie_name);
420 #ifdef INVARIANTS
421 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
422 if (ih == handler)
423 goto ok;
424 mtx_unlock(&ie->ie_lock);
425 panic("interrupt handler \"%s\" not found in interrupt event \"%s\"",
426 ih->ih_name, ie->ie_name);
427 ok:
428 #endif
429 /*
430 * If there is no ithread, then just remove the handler and return.
431 * XXX: Note that an INTR_FAST handler might be running on another
432 * CPU!
433 */
434 if (ie->ie_thread == NULL) {
435 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
436 mtx_unlock(&ie->ie_lock);
437 free(handler, M_ITHREAD);
438 return (0);
439 }
440
441 /*
442 * If the interrupt thread is already running, then just mark this
443 * handler as being dead and let the ithread do the actual removal.
444 *
445 * During a cold boot while cold is set, msleep() does not sleep,
446 * so we have to remove the handler here rather than letting the
447 * thread do it.
448 */
449 mtx_lock_spin(&sched_lock);
450 if (!TD_AWAITING_INTR(ie->ie_thread->it_thread) && !cold) {
451 handler->ih_flags |= IH_DEAD;
452
453 /*
454 * Ensure that the thread will process the handler list
455 * again and remove this handler if it has already passed
456 * it on the list.
457 */
458 ie->ie_thread->it_need = 1;
459 } else
460 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
461 mtx_unlock_spin(&sched_lock);
462 while (handler->ih_flags & IH_DEAD)
463 msleep(handler, &ie->ie_lock, curthread->td_priority, "iev_rmh",
464 0);
465 intr_event_update(ie);
466 #ifdef notyet
467 /*
468 * XXX: This could be bad in the case of ppbus(8). Also, I think
469 * this could lead to races of stale data when servicing an
470 * interrupt.
471 */
472 dead = 1;
473 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
474 if (!(ih->ih_flags & IH_FAST)) {
475 dead = 0;
476 break;
477 }
478 }
479 if (dead) {
480 ithread_destroy2(ie->ie_thread);
481 ie->ie_thread = NULL;
482 }
483 #endif
484 mtx_unlock(&ie->ie_lock);
485 free(handler, M_ITHREAD);
486 return (0);
487 }
488
489 int
490 intr_event_schedule_thread(struct intr_event *ie)
491 {
492 struct intr_entropy entropy;
493 struct intr_thread *it;
494 struct thread *td;
495 struct thread *ctd;
496 struct proc *p;
497
498 /*
499 * If no ithread or no handlers, then we have a stray interrupt.
500 */
501 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) ||
502 ie->ie_thread == NULL)
503 return (EINVAL);
504
505 ctd = curthread;
506 it = ie->ie_thread;
507 td = it->it_thread;
508 p = td->td_proc;
509
510 /*
511 * If any of the handlers for this ithread claim to be good
512 * sources of entropy, then gather some.
513 */
514 if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) {
515 CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__,
516 p->p_pid, p->p_comm);
517 entropy.event = (uintptr_t)ie;
518 entropy.td = ctd;
519 random_harvest(&entropy, sizeof(entropy), 2, 0,
520 RANDOM_INTERRUPT);
521 }
522
523 KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name));
524
525 /*
526 * Set it_need to tell the thread to keep running if it is already
527 * running. Then, grab sched_lock and see if we actually need to
528 * put this thread on the runqueue.
529 */
530 it->it_need = 1;
531 mtx_lock_spin(&sched_lock);
532 if (TD_AWAITING_INTR(td)) {
533 CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid,
534 p->p_comm);
535 TD_CLR_IWAIT(td);
536 setrunqueue(td, SRQ_INTR);
537 } else {
538 CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d",
539 __func__, p->p_pid, p->p_comm, it->it_need, td->td_state);
540 }
541 mtx_unlock_spin(&sched_lock);
542
543 return (0);
544 }
545
546 /*
547 * Add a software interrupt handler to a specified event. If a given event
548 * is not specified, then a new event is created.
549 */
550 int
551 swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler,
552 void *arg, int pri, enum intr_type flags, void **cookiep)
553 {
554 struct intr_event *ie;
555 int error;
556
557 if (flags & (INTR_FAST | INTR_ENTROPY))
558 return (EINVAL);
559
560 ie = (eventp != NULL) ? *eventp : NULL;
561
562 if (ie != NULL) {
563 if (!(ie->ie_flags & IE_SOFT))
564 return (EINVAL);
565 } else {
566 error = intr_event_create(&ie, NULL, IE_SOFT, NULL,
567 "swi%d:", pri);
568 if (error)
569 return (error);
570 if (eventp != NULL)
571 *eventp = ie;
572 }
573 return (intr_event_add_handler(ie, name, handler, arg,
574 (pri * RQ_PPQ) + PI_SOFT, flags, cookiep));
575 /* XXKSE.. think of a better way to get separate queues */
576 }
577
578 /*
579 * Schedule a software interrupt thread.
580 */
581 void
582 swi_sched(void *cookie, int flags)
583 {
584 struct intr_handler *ih = (struct intr_handler *)cookie;
585 struct intr_event *ie = ih->ih_event;
586 int error;
587
588 PCPU_LAZY_INC(cnt.v_intr);
589
590 CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name,
591 ih->ih_need);
592
593 /*
594 * Set ih_need for this handler so that if the ithread is already
595 * running it will execute this handler on the next pass. Otherwise,
596 * it will execute it the next time it runs.
597 */
598 atomic_store_rel_int(&ih->ih_need, 1);
599 if (!(flags & SWI_DELAY)) {
600 error = intr_event_schedule_thread(ie);
601 KASSERT(error == 0, ("stray software interrupt"));
602 }
603 }
604
605 /*
606 * Remove a software interrupt handler. Currently this code does not
607 * remove the associated interrupt event if it becomes empty. Calling code
608 * may do so manually via intr_event_destroy(), but that's not really
609 * an optimal interface.
610 */
611 int
612 swi_remove(void *cookie)
613 {
614
615 return (intr_event_remove_handler(cookie));
616 }
617
618 /* ABI compatibility shims. */
619 #undef ithread_remove_handler
620 #undef ithread_destroy
621 int ithread_remove_handler(void *);
622 int ithread_destroy(struct ithd *);
623
624 int
625 ithread_remove_handler(void *cookie)
626 {
627
628 return (intr_event_remove_handler(cookie));
629 }
630
631 int
632 ithread_destroy(struct ithd *ithread)
633 {
634
635 return (intr_event_destroy(ithread));
636 }
637
638 static void
639 ithread_execute_handlers(struct proc *p, struct intr_event *ie)
640 {
641 struct intr_handler *ih, *ihn;
642
643 /* Interrupt handlers should not sleep. */
644 if (!(ie->ie_flags & IE_SOFT))
645 THREAD_NO_SLEEPING();
646 TAILQ_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) {
647
648 /*
649 * If this handler is marked for death, remove it from
650 * the list of handlers and wake up the sleeper.
651 */
652 if (ih->ih_flags & IH_DEAD) {
653 mtx_lock(&ie->ie_lock);
654 TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next);
655 ih->ih_flags &= ~IH_DEAD;
656 wakeup(ih);
657 mtx_unlock(&ie->ie_lock);
658 continue;
659 }
660
661 /*
662 * For software interrupt threads, we only execute
663 * handlers that have their need flag set. Hardware
664 * interrupt threads always invoke all of their handlers.
665 */
666 if (ie->ie_flags & IE_SOFT) {
667 if (!ih->ih_need)
668 continue;
669 else
670 atomic_store_rel_int(&ih->ih_need, 0);
671 }
672
673 /* Fast handlers are handled in primary interrupt context. */
674 if (ih->ih_flags & IH_FAST)
675 continue;
676
677 /* Execute this handler. */
678 CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x",
679 __func__, p->p_pid, (void *)ih->ih_handler, ih->ih_argument,
680 ih->ih_name, ih->ih_flags);
681
682 if (!(ih->ih_flags & IH_MPSAFE))
683 mtx_lock(&Giant);
684 ih->ih_handler(ih->ih_argument);
685 if (!(ih->ih_flags & IH_MPSAFE))
686 mtx_unlock(&Giant);
687 }
688 if (!(ie->ie_flags & IE_SOFT))
689 THREAD_SLEEPING_OK();
690
691 /*
692 * Interrupt storm handling:
693 *
694 * If this interrupt source is currently storming, then throttle
695 * it to only fire the handler once per clock tick.
696 *
697 * If this interrupt source is not currently storming, but the
698 * number of back to back interrupts exceeds the storm threshold,
699 * then enter storming mode.
700 */
701 if (intr_storm_threshold != 0 && ie->ie_count >= intr_storm_threshold) {
702 if (ie->ie_warned == 0) {
703 printf(
704 "Interrupt storm detected on \"%s\"; throttling interrupt source\n",
705 ie->ie_name);
706 ie->ie_warned = 1;
707 }
708 tsleep(&ie->ie_count, curthread->td_priority, "istorm", 1);
709 } else
710 ie->ie_count++;
711
712 /*
713 * Now that all the handlers have had a chance to run, reenable
714 * the interrupt source.
715 */
716 if (ie->ie_enable != NULL)
717 ie->ie_enable(ie->ie_source);
718 }
719
720 /*
721 * This is the main code for interrupt threads.
722 */
723 static void
724 ithread_loop(void *arg)
725 {
726 struct intr_thread *ithd;
727 struct intr_event *ie;
728 struct thread *td;
729 struct proc *p;
730
731 td = curthread;
732 p = td->td_proc;
733 ithd = (struct intr_thread *)arg;
734 KASSERT(ithd->it_thread == td,
735 ("%s: ithread and proc linkage out of sync", __func__));
736 ie = ithd->it_event;
737 ie->ie_count = 0;
738
739 /*
740 * As long as we have interrupts outstanding, go through the
741 * list of handlers, giving each one a go at it.
742 */
743 for (;;) {
744 /*
745 * If we are an orphaned thread, then just die.
746 */
747 if (ithd->it_flags & IT_DEAD) {
748 CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__,
749 p->p_pid, p->p_comm);
750 free(ithd, M_ITHREAD);
751 kthread_exit(0);
752 }
753
754 /*
755 * Service interrupts. If another interrupt arrives while
756 * we are running, it will set it_need to note that we
757 * should make another pass.
758 */
759 while (ithd->it_need) {
760 /*
761 * This might need a full read and write barrier
762 * to make sure that this write posts before any
763 * of the memory or device accesses in the
764 * handlers.
765 */
766 atomic_store_rel_int(&ithd->it_need, 0);
767 ithread_execute_handlers(p, ie);
768 }
769 WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread");
770 mtx_assert(&Giant, MA_NOTOWNED);
771
772 /*
773 * Processed all our interrupts. Now get the sched
774 * lock. This may take a while and it_need may get
775 * set again, so we have to check it again.
776 */
777 mtx_lock_spin(&sched_lock);
778 if (!ithd->it_need && !(ithd->it_flags & IT_DEAD)) {
779 TD_SET_IWAIT(td);
780 ie->ie_count = 0;
781 mi_switch(SW_VOL, NULL);
782 }
783 mtx_unlock_spin(&sched_lock);
784 }
785 }
786
787 #ifdef DDB
788 /*
789 * Dump details about an interrupt handler
790 */
791 static void
792 db_dump_intrhand(struct intr_handler *ih)
793 {
794 int comma;
795
796 db_printf("\t%-10s ", ih->ih_name);
797 switch (ih->ih_pri) {
798 case PI_REALTIME:
799 db_printf("CLK ");
800 break;
801 case PI_AV:
802 db_printf("AV ");
803 break;
804 case PI_TTYHIGH:
805 case PI_TTYLOW:
806 db_printf("TTY ");
807 break;
808 case PI_TAPE:
809 db_printf("TAPE");
810 break;
811 case PI_NET:
812 db_printf("NET ");
813 break;
814 case PI_DISK:
815 case PI_DISKLOW:
816 db_printf("DISK");
817 break;
818 case PI_DULL:
819 db_printf("DULL");
820 break;
821 default:
822 if (ih->ih_pri >= PI_SOFT)
823 db_printf("SWI ");
824 else
825 db_printf("%4u", ih->ih_pri);
826 break;
827 }
828 db_printf(" ");
829 db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC);
830 db_printf("(%p)", ih->ih_argument);
831 if (ih->ih_need ||
832 (ih->ih_flags & (IH_FAST | IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD |
833 IH_MPSAFE)) != 0) {
834 db_printf(" {");
835 comma = 0;
836 if (ih->ih_flags & IH_FAST) {
837 db_printf("FAST");
838 comma = 1;
839 }
840 if (ih->ih_flags & IH_EXCLUSIVE) {
841 if (comma)
842 db_printf(", ");
843 db_printf("EXCL");
844 comma = 1;
845 }
846 if (ih->ih_flags & IH_ENTROPY) {
847 if (comma)
848 db_printf(", ");
849 db_printf("ENTROPY");
850 comma = 1;
851 }
852 if (ih->ih_flags & IH_DEAD) {
853 if (comma)
854 db_printf(", ");
855 db_printf("DEAD");
856 comma = 1;
857 }
858 if (ih->ih_flags & IH_MPSAFE) {
859 if (comma)
860 db_printf(", ");
861 db_printf("MPSAFE");
862 comma = 1;
863 }
864 if (ih->ih_need) {
865 if (comma)
866 db_printf(", ");
867 db_printf("NEED");
868 }
869 db_printf("}");
870 }
871 db_printf("\n");
872 }
873
874 /*
875 * Dump details about a event.
876 */
877 void
878 db_dump_intr_event(struct intr_event *ie, int handlers)
879 {
880 struct intr_handler *ih;
881 struct intr_thread *it;
882 int comma;
883
884 db_printf("%s ", ie->ie_fullname);
885 it = ie->ie_thread;
886 if (it != NULL)
887 db_printf("(pid %d)", it->it_thread->td_proc->p_pid);
888 else
889 db_printf("(no thread)");
890 if ((ie->ie_flags & (IE_SOFT | IE_ENTROPY | IE_ADDING_THREAD)) != 0 ||
891 (it != NULL && it->it_need)) {
892 db_printf(" {");
893 comma = 0;
894 if (ie->ie_flags & IE_SOFT) {
895 db_printf("SOFT");
896 comma = 1;
897 }
898 if (ie->ie_flags & IE_ENTROPY) {
899 if (comma)
900 db_printf(", ");
901 db_printf("ENTROPY");
902 comma = 1;
903 }
904 if (ie->ie_flags & IE_ADDING_THREAD) {
905 if (comma)
906 db_printf(", ");
907 db_printf("ADDING_THREAD");
908 comma = 1;
909 }
910 if (it != NULL && it->it_need) {
911 if (comma)
912 db_printf(", ");
913 db_printf("NEED");
914 }
915 db_printf("}");
916 }
917 db_printf("\n");
918
919 if (handlers)
920 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
921 db_dump_intrhand(ih);
922 }
923
924 /*
925 * Dump data about interrupt handlers
926 */
927 DB_SHOW_COMMAND(intr, db_show_intr)
928 {
929 struct intr_event *ie;
930 int quit, all, verbose;
931
932 quit = 0;
933 verbose = index(modif, 'v') != NULL;
934 all = index(modif, 'a') != NULL;
935 db_setup_paging(db_simple_pager, &quit, db_lines_per_page);
936 TAILQ_FOREACH(ie, &event_list, ie_list) {
937 if (!all && TAILQ_EMPTY(&ie->ie_handlers))
938 continue;
939 db_dump_intr_event(ie, verbose);
940 }
941 }
942 #endif /* DDB */
943
944 /*
945 * Start standard software interrupt threads
946 */
947 static void
948 start_softintr(void *dummy)
949 {
950 struct proc *p;
951
952 if (swi_add(&clk_intr_event, "clock", softclock, NULL, SWI_CLOCK,
953 INTR_MPSAFE, &softclock_ih) ||
954 swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, INTR_MPSAFE, &vm_ih))
955 panic("died while creating standard software ithreads");
956
957 p = clk_intr_event->ie_thread->it_thread->td_proc;
958 PROC_LOCK(p);
959 p->p_flag |= P_NOLOAD;
960 PROC_UNLOCK(p);
961 }
962 SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr, NULL)
963
964 /*
965 * Sysctls used by systat and others: hw.intrnames and hw.intrcnt.
966 * The data for this machine dependent, and the declarations are in machine
967 * dependent code. The layout of intrnames and intrcnt however is machine
968 * independent.
969 *
970 * We do not know the length of intrcnt and intrnames at compile time, so
971 * calculate things at run time.
972 */
973 static int
974 sysctl_intrnames(SYSCTL_HANDLER_ARGS)
975 {
976 return (sysctl_handle_opaque(oidp, intrnames, eintrnames - intrnames,
977 req));
978 }
979
980 SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD,
981 NULL, 0, sysctl_intrnames, "", "Interrupt Names");
982
983 static int
984 sysctl_intrcnt(SYSCTL_HANDLER_ARGS)
985 {
986 return (sysctl_handle_opaque(oidp, intrcnt,
987 (char *)eintrcnt - (char *)intrcnt, req));
988 }
989
990 SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD,
991 NULL, 0, sysctl_intrcnt, "", "Interrupt Counts");
992
993 #ifdef DDB
994 /*
995 * DDB command to dump the interrupt statistics.
996 */
997 DB_SHOW_COMMAND(intrcnt, db_show_intrcnt)
998 {
999 u_long *i;
1000 char *cp;
1001 int quit;
1002
1003 cp = intrnames;
1004 db_setup_paging(db_simple_pager, &quit, db_lines_per_page);
1005 for (i = intrcnt, quit = 0; i != eintrcnt && !quit; i++) {
1006 if (*cp == '\0')
1007 break;
1008 if (*i != 0)
1009 db_printf("%s\t%lu\n", cp, *i);
1010 cp += strlen(cp) + 1;
1011 }
1012 }
1013 #endif
Cache object: e675862b7ca0163358e74fd7bb01a8a9
|