FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_intr.c
1 /*-
2 * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
10 * disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD: releng/6.4/sys/kern/kern_intr.c 173156 2007-10-29 21:10:03Z emaste $");
29
30 #include "opt_ddb.h"
31
32 #include <sys/param.h>
33 #include <sys/bus.h>
34 #include <sys/conf.h>
35 #include <sys/rtprio.h>
36 #include <sys/systm.h>
37 #include <sys/interrupt.h>
38 #include <sys/kernel.h>
39 #include <sys/kthread.h>
40 #include <sys/ktr.h>
41 #include <sys/limits.h>
42 #include <sys/lock.h>
43 #include <sys/malloc.h>
44 #include <sys/mutex.h>
45 #include <sys/proc.h>
46 #include <sys/random.h>
47 #include <sys/resourcevar.h>
48 #include <sys/sched.h>
49 #include <sys/sysctl.h>
50 #include <sys/unistd.h>
51 #include <sys/vmmeter.h>
52 #include <machine/atomic.h>
53 #include <machine/cpu.h>
54 #include <machine/md_var.h>
55 #include <machine/stdarg.h>
56 #ifdef DDB
57 #include <ddb/ddb.h>
58 #include <ddb/db_sym.h>
59 #endif
60
61 /*
62 * Describe an interrupt thread. There is one of these per interrupt event.
63 */
64 struct intr_thread {
65 struct intr_event *it_event;
66 struct thread *it_thread; /* Kernel thread. */
67 int it_flags; /* (j) IT_* flags. */
68 int it_need; /* Needs service. */
69 };
70
71 /* Interrupt thread flags kept in it_flags */
72 #define IT_DEAD 0x000001 /* Thread is waiting to exit. */
73
74 struct intr_entropy {
75 struct thread *td;
76 uintptr_t event;
77 };
78
79 struct intr_event *clk_intr_event;
80 struct intr_event *tty_intr_event;
81 void *softclock_ih;
82 void *vm_ih;
83
84 static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads");
85
86 static int intr_storm_threshold = 1000;
87 TUNABLE_INT("hw.intr_storm_threshold", &intr_storm_threshold);
88 SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RW,
89 &intr_storm_threshold, 0,
90 "Number of consecutive interrupts before storm protection is enabled");
91 static TAILQ_HEAD(, intr_event) event_list =
92 TAILQ_HEAD_INITIALIZER(event_list);
93
94 static void intr_event_update(struct intr_event *ie);
95 static struct intr_thread *ithread_create(const char *name);
96 static void ithread_destroy2(struct intr_thread *ithread);
97 static void ithread_execute_handlers(struct proc *p, struct intr_event *ie);
98 static void ithread_loop(void *);
99 static void ithread_update(struct intr_thread *ithd);
100 static void start_softintr(void *);
101
102 u_char
103 intr_priority(enum intr_type flags)
104 {
105 u_char pri;
106
107 flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET |
108 INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV);
109 switch (flags) {
110 case INTR_TYPE_TTY:
111 pri = PI_TTYLOW;
112 break;
113 case INTR_TYPE_BIO:
114 /*
115 * XXX We need to refine this. BSD/OS distinguishes
116 * between tape and disk priorities.
117 */
118 pri = PI_DISK;
119 break;
120 case INTR_TYPE_NET:
121 pri = PI_NET;
122 break;
123 case INTR_TYPE_CAM:
124 pri = PI_DISK; /* XXX or PI_CAM? */
125 break;
126 case INTR_TYPE_AV: /* Audio/video */
127 pri = PI_AV;
128 break;
129 case INTR_TYPE_CLK:
130 pri = PI_REALTIME;
131 break;
132 case INTR_TYPE_MISC:
133 pri = PI_DULL; /* don't care */
134 break;
135 default:
136 /* We didn't specify an interrupt level. */
137 panic("intr_priority: no interrupt type in flags");
138 }
139
140 return pri;
141 }
142
143 /*
144 * Update an ithread based on the associated intr_event.
145 */
146 static void
147 ithread_update(struct intr_thread *ithd)
148 {
149 struct intr_event *ie;
150 struct thread *td;
151 u_char pri;
152
153 ie = ithd->it_event;
154 td = ithd->it_thread;
155
156 /* Determine the overall priority of this event. */
157 if (TAILQ_EMPTY(&ie->ie_handlers))
158 pri = PRI_MAX_ITHD;
159 else
160 pri = TAILQ_FIRST(&ie->ie_handlers)->ih_pri;
161
162 /* Update name and priority. */
163 strlcpy(td->td_proc->p_comm, ie->ie_fullname,
164 sizeof(td->td_proc->p_comm));
165 mtx_lock_spin(&sched_lock);
166 sched_prio(td, pri);
167 mtx_unlock_spin(&sched_lock);
168 }
169
170 /*
171 * Regenerate the full name of an interrupt event and update its priority.
172 */
173 static void
174 intr_event_update(struct intr_event *ie)
175 {
176 struct intr_handler *ih;
177 char *last;
178 int missed, space;
179
180 /* Start off with no entropy and just the name of the event. */
181 mtx_assert(&ie->ie_lock, MA_OWNED);
182 strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname));
183 ie->ie_flags &= ~IE_ENTROPY;
184 missed = 0;
185 space = 1;
186
187 /* Run through all the handlers updating values. */
188 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
189 if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 <
190 sizeof(ie->ie_fullname)) {
191 strcat(ie->ie_fullname, " ");
192 strcat(ie->ie_fullname, ih->ih_name);
193 space = 0;
194 } else
195 missed++;
196 if (ih->ih_flags & IH_ENTROPY)
197 ie->ie_flags |= IE_ENTROPY;
198 }
199
200 /*
201 * If the handler names were too long, add +'s to indicate missing
202 * names. If we run out of room and still have +'s to add, change
203 * the last character from a + to a *.
204 */
205 last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2];
206 while (missed-- > 0) {
207 if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) {
208 if (*last == '+') {
209 *last = '*';
210 break;
211 } else
212 *last = '+';
213 } else if (space) {
214 strcat(ie->ie_fullname, " +");
215 space = 0;
216 } else
217 strcat(ie->ie_fullname, "+");
218 }
219
220 /*
221 * If this event has an ithread, update it's priority and
222 * name.
223 */
224 if (ie->ie_thread != NULL)
225 ithread_update(ie->ie_thread);
226 CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname);
227 }
228
229 int
230 intr_event_create(struct intr_event **event, void *source, int flags,
231 void (*enable)(void *), const char *fmt, ...)
232 {
233 struct intr_event *ie;
234 va_list ap;
235
236 /* The only valid flag during creation is IE_SOFT. */
237 if ((flags & ~IE_SOFT) != 0)
238 return (EINVAL);
239 ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO);
240 ie->ie_source = source;
241 ie->ie_enable = enable;
242 ie->ie_flags = flags;
243 TAILQ_INIT(&ie->ie_handlers);
244 mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF);
245
246 va_start(ap, fmt);
247 vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap);
248 va_end(ap);
249 strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname));
250 mtx_pool_lock(mtxpool_sleep, &event_list);
251 TAILQ_INSERT_TAIL(&event_list, ie, ie_list);
252 mtx_pool_unlock(mtxpool_sleep, &event_list);
253 if (event != NULL)
254 *event = ie;
255 CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name);
256 return (0);
257 }
258
259 int
260 intr_event_destroy(struct intr_event *ie)
261 {
262
263 mtx_lock(&ie->ie_lock);
264 if (!TAILQ_EMPTY(&ie->ie_handlers)) {
265 mtx_unlock(&ie->ie_lock);
266 return (EBUSY);
267 }
268 mtx_pool_lock(mtxpool_sleep, &event_list);
269 TAILQ_REMOVE(&event_list, ie, ie_list);
270 mtx_pool_unlock(mtxpool_sleep, &event_list);
271 #ifndef notyet
272 if (ie->ie_thread != NULL) {
273 ithread_destroy2(ie->ie_thread);
274 ie->ie_thread = NULL;
275 }
276 #endif
277 mtx_unlock(&ie->ie_lock);
278 mtx_destroy(&ie->ie_lock);
279 free(ie, M_ITHREAD);
280 return (0);
281 }
282
283 static struct intr_thread *
284 ithread_create(const char *name)
285 {
286 struct intr_thread *ithd;
287 struct thread *td;
288 struct proc *p;
289 int error;
290
291 ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO);
292
293 error = kthread_create(ithread_loop, ithd, &p, RFSTOPPED | RFHIGHPID,
294 0, "%s", name);
295 if (error)
296 panic("kthread_create() failed with %d", error);
297 td = FIRST_THREAD_IN_PROC(p); /* XXXKSE */
298 mtx_lock_spin(&sched_lock);
299 td->td_ksegrp->kg_pri_class = PRI_ITHD;
300 TD_SET_IWAIT(td);
301 mtx_unlock_spin(&sched_lock);
302 td->td_pflags |= TDP_ITHREAD;
303 ithd->it_thread = td;
304 CTR2(KTR_INTR, "%s: created %s", __func__, name);
305 return (ithd);
306 }
307
308 static void
309 ithread_destroy2(struct intr_thread *ithread)
310 {
311 struct thread *td;
312
313 CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_event->ie_name);
314 td = ithread->it_thread;
315 mtx_lock_spin(&sched_lock);
316 ithread->it_flags |= IT_DEAD;
317 if (TD_AWAITING_INTR(td)) {
318 TD_CLR_IWAIT(td);
319 setrunqueue(td, SRQ_INTR);
320 }
321 mtx_unlock_spin(&sched_lock);
322 }
323
324 int
325 intr_event_add_handler(struct intr_event *ie, const char *name,
326 driver_intr_t handler, void *arg, u_char pri, enum intr_type flags,
327 void **cookiep)
328 {
329 struct intr_handler *ih, *temp_ih;
330 struct intr_thread *it;
331
332 if (ie == NULL || name == NULL || handler == NULL)
333 return (EINVAL);
334
335 /* Allocate and populate an interrupt handler structure. */
336 ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO);
337 ih->ih_handler = handler;
338 ih->ih_argument = arg;
339 ih->ih_name = name;
340 ih->ih_event = ie;
341 ih->ih_pri = pri;
342 if (flags & INTR_FAST)
343 ih->ih_flags = IH_FAST;
344 else if (flags & INTR_EXCL)
345 ih->ih_flags = IH_EXCLUSIVE;
346 if (flags & INTR_MPSAFE)
347 ih->ih_flags |= IH_MPSAFE;
348 if (flags & INTR_ENTROPY)
349 ih->ih_flags |= IH_ENTROPY;
350
351 /* We can only have one exclusive handler in a event. */
352 mtx_lock(&ie->ie_lock);
353 if (!TAILQ_EMPTY(&ie->ie_handlers)) {
354 if ((flags & INTR_EXCL) ||
355 (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) {
356 mtx_unlock(&ie->ie_lock);
357 free(ih, M_ITHREAD);
358 return (EINVAL);
359 }
360 }
361
362 /* Add the new handler to the event in priority order. */
363 TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) {
364 if (temp_ih->ih_pri > ih->ih_pri)
365 break;
366 }
367 if (temp_ih == NULL)
368 TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next);
369 else
370 TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next);
371 intr_event_update(ie);
372
373 /* Create a thread if we need one. */
374 while (ie->ie_thread == NULL && !(flags & INTR_FAST)) {
375 if (ie->ie_flags & IE_ADDING_THREAD)
376 msleep(ie, &ie->ie_lock, 0, "ithread", 0);
377 else {
378 ie->ie_flags |= IE_ADDING_THREAD;
379 mtx_unlock(&ie->ie_lock);
380 it = ithread_create("intr: newborn");
381 mtx_lock(&ie->ie_lock);
382 ie->ie_flags &= ~IE_ADDING_THREAD;
383 ie->ie_thread = it;
384 it->it_event = ie;
385 ithread_update(it);
386 wakeup(ie);
387 }
388 }
389 CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name,
390 ie->ie_name);
391 mtx_unlock(&ie->ie_lock);
392
393 if (cookiep != NULL)
394 *cookiep = ih;
395 return (0);
396 }
397
398 int
399 intr_event_remove_handler(void *cookie)
400 {
401 struct intr_handler *handler = (struct intr_handler *)cookie;
402 struct intr_event *ie;
403 #ifdef INVARIANTS
404 struct intr_handler *ih;
405 #endif
406 #ifdef notyet
407 int dead;
408 #endif
409
410 if (handler == NULL)
411 return (EINVAL);
412 ie = handler->ih_event;
413 KASSERT(ie != NULL,
414 ("interrupt handler \"%s\" has a NULL interrupt event",
415 handler->ih_name));
416 mtx_lock(&ie->ie_lock);
417 CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name,
418 ie->ie_name);
419 #ifdef INVARIANTS
420 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
421 if (ih == handler)
422 goto ok;
423 mtx_unlock(&ie->ie_lock);
424 panic("interrupt handler \"%s\" not found in interrupt event \"%s\"",
425 ih->ih_name, ie->ie_name);
426 ok:
427 #endif
428 /*
429 * If there is no ithread, then just remove the handler and return.
430 * XXX: Note that an INTR_FAST handler might be running on another
431 * CPU!
432 */
433 if (ie->ie_thread == NULL) {
434 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
435 mtx_unlock(&ie->ie_lock);
436 free(handler, M_ITHREAD);
437 return (0);
438 }
439
440 /*
441 * If the interrupt thread is already running, then just mark this
442 * handler as being dead and let the ithread do the actual removal.
443 *
444 * During a cold boot while cold is set, msleep() does not sleep,
445 * so we have to remove the handler here rather than letting the
446 * thread do it.
447 */
448 mtx_lock_spin(&sched_lock);
449 if (!TD_AWAITING_INTR(ie->ie_thread->it_thread) && !cold) {
450 handler->ih_flags |= IH_DEAD;
451
452 /*
453 * Ensure that the thread will process the handler list
454 * again and remove this handler if it has already passed
455 * it on the list.
456 */
457 ie->ie_thread->it_need = 1;
458 } else
459 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
460 mtx_unlock_spin(&sched_lock);
461 while (handler->ih_flags & IH_DEAD)
462 msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0);
463 intr_event_update(ie);
464 #ifdef notyet
465 /*
466 * XXX: This could be bad in the case of ppbus(8). Also, I think
467 * this could lead to races of stale data when servicing an
468 * interrupt.
469 */
470 dead = 1;
471 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
472 if (!(ih->ih_flags & IH_FAST)) {
473 dead = 0;
474 break;
475 }
476 }
477 if (dead) {
478 ithread_destroy2(ie->ie_thread);
479 ie->ie_thread = NULL;
480 }
481 #endif
482 mtx_unlock(&ie->ie_lock);
483 free(handler, M_ITHREAD);
484 return (0);
485 }
486
487 int
488 intr_event_schedule_thread(struct intr_event *ie)
489 {
490 struct intr_entropy entropy;
491 struct intr_thread *it;
492 struct thread *td;
493 struct thread *ctd;
494 struct proc *p;
495
496 /*
497 * If no ithread or no handlers, then we have a stray interrupt.
498 */
499 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) ||
500 ie->ie_thread == NULL)
501 return (EINVAL);
502
503 ctd = curthread;
504 it = ie->ie_thread;
505 td = it->it_thread;
506 p = td->td_proc;
507
508 /*
509 * If any of the handlers for this ithread claim to be good
510 * sources of entropy, then gather some.
511 */
512 if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) {
513 CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__,
514 p->p_pid, p->p_comm);
515 entropy.event = (uintptr_t)ie;
516 entropy.td = ctd;
517 random_harvest(&entropy, sizeof(entropy), 2, 0,
518 RANDOM_INTERRUPT);
519 }
520
521 KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name));
522
523 /*
524 * Set it_need to tell the thread to keep running if it is already
525 * running. Then, grab sched_lock and see if we actually need to
526 * put this thread on the runqueue.
527 */
528 it->it_need = 1;
529 mtx_lock_spin(&sched_lock);
530 if (TD_AWAITING_INTR(td)) {
531 CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid,
532 p->p_comm);
533 TD_CLR_IWAIT(td);
534 setrunqueue(td, SRQ_INTR);
535 } else {
536 CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d",
537 __func__, p->p_pid, p->p_comm, it->it_need, td->td_state);
538 }
539 mtx_unlock_spin(&sched_lock);
540
541 return (0);
542 }
543
544 /*
545 * Add a software interrupt handler to a specified event. If a given event
546 * is not specified, then a new event is created.
547 */
548 int
549 swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler,
550 void *arg, int pri, enum intr_type flags, void **cookiep)
551 {
552 struct intr_event *ie;
553 int error;
554
555 if (flags & (INTR_FAST | INTR_ENTROPY))
556 return (EINVAL);
557
558 ie = (eventp != NULL) ? *eventp : NULL;
559
560 if (ie != NULL) {
561 if (!(ie->ie_flags & IE_SOFT))
562 return (EINVAL);
563 } else {
564 error = intr_event_create(&ie, NULL, IE_SOFT, NULL,
565 "swi%d:", pri);
566 if (error)
567 return (error);
568 if (eventp != NULL)
569 *eventp = ie;
570 }
571 return (intr_event_add_handler(ie, name, handler, arg,
572 (pri * RQ_PPQ) + PI_SOFT, flags, cookiep));
573 /* XXKSE.. think of a better way to get separate queues */
574 }
575
576 /*
577 * Schedule a software interrupt thread.
578 */
579 void
580 swi_sched(void *cookie, int flags)
581 {
582 struct intr_handler *ih = (struct intr_handler *)cookie;
583 struct intr_event *ie = ih->ih_event;
584 int error;
585
586 PCPU_LAZY_INC(cnt.v_intr);
587
588 CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name,
589 ih->ih_need);
590
591 /*
592 * Set ih_need for this handler so that if the ithread is already
593 * running it will execute this handler on the next pass. Otherwise,
594 * it will execute it the next time it runs.
595 */
596 atomic_store_rel_int(&ih->ih_need, 1);
597 if (!(flags & SWI_DELAY)) {
598 error = intr_event_schedule_thread(ie);
599 KASSERT(error == 0, ("stray software interrupt"));
600 }
601 }
602
603 /*
604 * Remove a software interrupt handler. Currently this code does not
605 * remove the associated interrupt event if it becomes empty. Calling code
606 * may do so manually via intr_event_destroy(), but that's not really
607 * an optimal interface.
608 */
609 int
610 swi_remove(void *cookie)
611 {
612
613 return (intr_event_remove_handler(cookie));
614 }
615
616 /* ABI compatibility shims. */
617 #undef ithread_remove_handler
618 #undef ithread_destroy
619 int ithread_remove_handler(void *);
620 int ithread_destroy(struct ithd *);
621
622 int
623 ithread_remove_handler(void *cookie)
624 {
625
626 return (intr_event_remove_handler(cookie));
627 }
628
629 int
630 ithread_destroy(struct ithd *ithread)
631 {
632
633 return (intr_event_destroy(ithread));
634 }
635
636 static void
637 ithread_execute_handlers(struct proc *p, struct intr_event *ie)
638 {
639 struct intr_handler *ih, *ihn;
640
641 /* Interrupt handlers should not sleep. */
642 if (!(ie->ie_flags & IE_SOFT))
643 THREAD_NO_SLEEPING();
644 TAILQ_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) {
645
646 /*
647 * If this handler is marked for death, remove it from
648 * the list of handlers and wake up the sleeper.
649 */
650 if (ih->ih_flags & IH_DEAD) {
651 mtx_lock(&ie->ie_lock);
652 TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next);
653 ih->ih_flags &= ~IH_DEAD;
654 wakeup(ih);
655 mtx_unlock(&ie->ie_lock);
656 continue;
657 }
658
659 /*
660 * For software interrupt threads, we only execute
661 * handlers that have their need flag set. Hardware
662 * interrupt threads always invoke all of their handlers.
663 */
664 if (ie->ie_flags & IE_SOFT) {
665 if (!ih->ih_need)
666 continue;
667 else
668 atomic_store_rel_int(&ih->ih_need, 0);
669 }
670
671 /* Fast handlers are handled in primary interrupt context. */
672 if (ih->ih_flags & IH_FAST)
673 continue;
674
675 /* Execute this handler. */
676 CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x",
677 __func__, p->p_pid, (void *)ih->ih_handler, ih->ih_argument,
678 ih->ih_name, ih->ih_flags);
679
680 if (!(ih->ih_flags & IH_MPSAFE))
681 mtx_lock(&Giant);
682 ih->ih_handler(ih->ih_argument);
683 if (!(ih->ih_flags & IH_MPSAFE))
684 mtx_unlock(&Giant);
685 }
686 if (!(ie->ie_flags & IE_SOFT))
687 THREAD_SLEEPING_OK();
688
689 /*
690 * Interrupt storm handling:
691 *
692 * If this interrupt source is currently storming, then throttle
693 * it to only fire the handler once per clock tick.
694 *
695 * If this interrupt source is not currently storming, but the
696 * number of back to back interrupts exceeds the storm threshold,
697 * then enter storming mode.
698 */
699 if (intr_storm_threshold != 0 && ie->ie_count >= intr_storm_threshold &&
700 !(ie->ie_flags & IE_SOFT)) {
701 /* Report the message only once every second. */
702 if (ppsratecheck(&ie->ie_warntm, &ie->ie_warncnt, 1)) {
703 printf(
704 "interrupt storm detected on \"%s\"; throttling interrupt source\n",
705 ie->ie_name);
706 }
707 tsleep(&ie->ie_count, 0, "istorm", 1);
708 } else
709 ie->ie_count++;
710
711 /*
712 * Now that all the handlers have had a chance to run, reenable
713 * the interrupt source.
714 */
715 if (ie->ie_enable != NULL)
716 ie->ie_enable(ie->ie_source);
717 }
718
719 /*
720 * This is the main code for interrupt threads.
721 */
722 static void
723 ithread_loop(void *arg)
724 {
725 struct intr_thread *ithd;
726 struct intr_event *ie;
727 struct thread *td;
728 struct proc *p;
729
730 td = curthread;
731 p = td->td_proc;
732 ithd = (struct intr_thread *)arg;
733 KASSERT(ithd->it_thread == td,
734 ("%s: ithread and proc linkage out of sync", __func__));
735 ie = ithd->it_event;
736 ie->ie_count = 0;
737
738 /*
739 * As long as we have interrupts outstanding, go through the
740 * list of handlers, giving each one a go at it.
741 */
742 for (;;) {
743 /*
744 * If we are an orphaned thread, then just die.
745 */
746 if (ithd->it_flags & IT_DEAD) {
747 CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__,
748 p->p_pid, p->p_comm);
749 free(ithd, M_ITHREAD);
750 kthread_exit(0);
751 }
752
753 /*
754 * Service interrupts. If another interrupt arrives while
755 * we are running, it will set it_need to note that we
756 * should make another pass.
757 */
758 while (ithd->it_need) {
759 /*
760 * This might need a full read and write barrier
761 * to make sure that this write posts before any
762 * of the memory or device accesses in the
763 * handlers.
764 */
765 atomic_store_rel_int(&ithd->it_need, 0);
766 ithread_execute_handlers(p, ie);
767 }
768 WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread");
769 mtx_assert(&Giant, MA_NOTOWNED);
770
771 /*
772 * Processed all our interrupts. Now get the sched
773 * lock. This may take a while and it_need may get
774 * set again, so we have to check it again.
775 */
776 mtx_lock_spin(&sched_lock);
777 if (!ithd->it_need && !(ithd->it_flags & IT_DEAD)) {
778 TD_SET_IWAIT(td);
779 ie->ie_count = 0;
780 mi_switch(SW_VOL, NULL);
781 }
782 mtx_unlock_spin(&sched_lock);
783 }
784 }
785
786 #ifdef DDB
787 /*
788 * Dump details about an interrupt handler
789 */
790 static void
791 db_dump_intrhand(struct intr_handler *ih)
792 {
793 int comma;
794
795 db_printf("\t%-10s ", ih->ih_name);
796 switch (ih->ih_pri) {
797 case PI_REALTIME:
798 db_printf("CLK ");
799 break;
800 case PI_AV:
801 db_printf("AV ");
802 break;
803 case PI_TTYHIGH:
804 case PI_TTYLOW:
805 db_printf("TTY ");
806 break;
807 case PI_TAPE:
808 db_printf("TAPE");
809 break;
810 case PI_NET:
811 db_printf("NET ");
812 break;
813 case PI_DISK:
814 case PI_DISKLOW:
815 db_printf("DISK");
816 break;
817 case PI_DULL:
818 db_printf("DULL");
819 break;
820 default:
821 if (ih->ih_pri >= PI_SOFT)
822 db_printf("SWI ");
823 else
824 db_printf("%4u", ih->ih_pri);
825 break;
826 }
827 db_printf(" ");
828 db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC);
829 db_printf("(%p)", ih->ih_argument);
830 if (ih->ih_need ||
831 (ih->ih_flags & (IH_FAST | IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD |
832 IH_MPSAFE)) != 0) {
833 db_printf(" {");
834 comma = 0;
835 if (ih->ih_flags & IH_FAST) {
836 db_printf("FAST");
837 comma = 1;
838 }
839 if (ih->ih_flags & IH_EXCLUSIVE) {
840 if (comma)
841 db_printf(", ");
842 db_printf("EXCL");
843 comma = 1;
844 }
845 if (ih->ih_flags & IH_ENTROPY) {
846 if (comma)
847 db_printf(", ");
848 db_printf("ENTROPY");
849 comma = 1;
850 }
851 if (ih->ih_flags & IH_DEAD) {
852 if (comma)
853 db_printf(", ");
854 db_printf("DEAD");
855 comma = 1;
856 }
857 if (ih->ih_flags & IH_MPSAFE) {
858 if (comma)
859 db_printf(", ");
860 db_printf("MPSAFE");
861 comma = 1;
862 }
863 if (ih->ih_need) {
864 if (comma)
865 db_printf(", ");
866 db_printf("NEED");
867 }
868 db_printf("}");
869 }
870 db_printf("\n");
871 }
872
873 /*
874 * Dump details about a event.
875 */
876 void
877 db_dump_intr_event(struct intr_event *ie, int handlers)
878 {
879 struct intr_handler *ih;
880 struct intr_thread *it;
881 int comma;
882
883 db_printf("%s ", ie->ie_fullname);
884 it = ie->ie_thread;
885 if (it != NULL)
886 db_printf("(pid %d)", it->it_thread->td_proc->p_pid);
887 else
888 db_printf("(no thread)");
889 if ((ie->ie_flags & (IE_SOFT | IE_ENTROPY | IE_ADDING_THREAD)) != 0 ||
890 (it != NULL && it->it_need)) {
891 db_printf(" {");
892 comma = 0;
893 if (ie->ie_flags & IE_SOFT) {
894 db_printf("SOFT");
895 comma = 1;
896 }
897 if (ie->ie_flags & IE_ENTROPY) {
898 if (comma)
899 db_printf(", ");
900 db_printf("ENTROPY");
901 comma = 1;
902 }
903 if (ie->ie_flags & IE_ADDING_THREAD) {
904 if (comma)
905 db_printf(", ");
906 db_printf("ADDING_THREAD");
907 comma = 1;
908 }
909 if (it != NULL && it->it_need) {
910 if (comma)
911 db_printf(", ");
912 db_printf("NEED");
913 }
914 db_printf("}");
915 }
916 db_printf("\n");
917
918 if (handlers)
919 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
920 db_dump_intrhand(ih);
921 }
922
923 /*
924 * Dump data about interrupt handlers
925 */
926 DB_SHOW_COMMAND(intr, db_show_intr)
927 {
928 struct intr_event *ie;
929 int quit, all, verbose;
930
931 quit = 0;
932 verbose = index(modif, 'v') != NULL;
933 all = index(modif, 'a') != NULL;
934 db_setup_paging(db_simple_pager, &quit, db_lines_per_page);
935 TAILQ_FOREACH(ie, &event_list, ie_list) {
936 if (!all && TAILQ_EMPTY(&ie->ie_handlers))
937 continue;
938 db_dump_intr_event(ie, verbose);
939 }
940 }
941 #endif /* DDB */
942
943 /*
944 * Start standard software interrupt threads
945 */
946 static void
947 start_softintr(void *dummy)
948 {
949 struct proc *p;
950
951 if (swi_add(&clk_intr_event, "clock", softclock, NULL, SWI_CLOCK,
952 INTR_MPSAFE, &softclock_ih) ||
953 swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, INTR_MPSAFE, &vm_ih))
954 panic("died while creating standard software ithreads");
955
956 p = clk_intr_event->ie_thread->it_thread->td_proc;
957 PROC_LOCK(p);
958 p->p_flag |= P_NOLOAD;
959 PROC_UNLOCK(p);
960 }
961 SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr, NULL)
962
963 /*
964 * Sysctls used by systat and others: hw.intrnames and hw.intrcnt.
965 * The data for this machine dependent, and the declarations are in machine
966 * dependent code. The layout of intrnames and intrcnt however is machine
967 * independent.
968 *
969 * We do not know the length of intrcnt and intrnames at compile time, so
970 * calculate things at run time.
971 */
972 static int
973 sysctl_intrnames(SYSCTL_HANDLER_ARGS)
974 {
975 return (sysctl_handle_opaque(oidp, intrnames, eintrnames - intrnames,
976 req));
977 }
978
979 SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD,
980 NULL, 0, sysctl_intrnames, "", "Interrupt Names");
981
982 static int
983 sysctl_intrcnt(SYSCTL_HANDLER_ARGS)
984 {
985 return (sysctl_handle_opaque(oidp, intrcnt,
986 (char *)eintrcnt - (char *)intrcnt, req));
987 }
988
989 SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD,
990 NULL, 0, sysctl_intrcnt, "", "Interrupt Counts");
991
992 #ifdef DDB
993 /*
994 * DDB command to dump the interrupt statistics.
995 */
996 DB_SHOW_COMMAND(intrcnt, db_show_intrcnt)
997 {
998 u_long *i;
999 char *cp;
1000 int quit;
1001
1002 cp = intrnames;
1003 db_setup_paging(db_simple_pager, &quit, db_lines_per_page);
1004 for (i = intrcnt, quit = 0; i != eintrcnt && !quit; i++) {
1005 if (*cp == '\0')
1006 break;
1007 if (*i != 0)
1008 db_printf("%s\t%lu\n", cp, *i);
1009 cp += strlen(cp) + 1;
1010 }
1011 }
1012 #endif
Cache object: 31498a8a1f1d84836e54e990d3c07227
|