FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_intr.c
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
12 * disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include "opt_ddb.h"
33 #include "opt_hwpmc_hooks.h"
34 #include "opt_kstack_usage_prof.h"
35
36 #include <sys/param.h>
37 #include <sys/bus.h>
38 #include <sys/conf.h>
39 #include <sys/cpuset.h>
40 #include <sys/rtprio.h>
41 #include <sys/systm.h>
42 #include <sys/interrupt.h>
43 #include <sys/kernel.h>
44 #include <sys/kthread.h>
45 #include <sys/ktr.h>
46 #include <sys/limits.h>
47 #include <sys/lock.h>
48 #include <sys/malloc.h>
49 #include <sys/mutex.h>
50 #include <sys/priv.h>
51 #include <sys/proc.h>
52 #include <sys/epoch.h>
53 #include <sys/random.h>
54 #include <sys/resourcevar.h>
55 #include <sys/sched.h>
56 #include <sys/smp.h>
57 #include <sys/sysctl.h>
58 #include <sys/syslog.h>
59 #include <sys/unistd.h>
60 #include <sys/vmmeter.h>
61 #include <machine/atomic.h>
62 #include <machine/cpu.h>
63 #include <machine/md_var.h>
64 #include <machine/smp.h>
65 #include <machine/stdarg.h>
66 #ifdef DDB
67 #include <ddb/ddb.h>
68 #include <ddb/db_sym.h>
69 #endif
70
71 /*
72 * Describe an interrupt thread. There is one of these per interrupt event.
73 */
74 struct intr_thread {
75 struct intr_event *it_event;
76 struct thread *it_thread; /* Kernel thread. */
77 int it_flags; /* (j) IT_* flags. */
78 int it_need; /* Needs service. */
79 int it_waiting; /* Waiting in the runq. */
80 };
81
82 /* Interrupt thread flags kept in it_flags */
83 #define IT_DEAD 0x000001 /* Thread is waiting to exit. */
84 #define IT_WAIT 0x000002 /* Thread is waiting for completion. */
85
86 struct intr_entropy {
87 struct thread *td;
88 uintptr_t event;
89 };
90
91 struct intr_event *clk_intr_event;
92 struct proc *intrproc;
93
94 static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads");
95
96 static int intr_storm_threshold = 0;
97 SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RWTUN,
98 &intr_storm_threshold, 0,
99 "Number of consecutive interrupts before storm protection is enabled");
100 static int intr_epoch_batch = 1000;
101 SYSCTL_INT(_hw, OID_AUTO, intr_epoch_batch, CTLFLAG_RWTUN, &intr_epoch_batch,
102 0, "Maximum interrupt handler executions without re-entering epoch(9)");
103 #ifdef HWPMC_HOOKS
104 static int intr_hwpmc_waiting_report_threshold = 1;
105 SYSCTL_INT(_hw, OID_AUTO, intr_hwpmc_waiting_report_threshold, CTLFLAG_RWTUN,
106 &intr_hwpmc_waiting_report_threshold, 1,
107 "Threshold for reporting number of events in a workq");
108 #define PMC_HOOK_INSTALLED_ANY() __predict_false(pmc_hook != NULL)
109 #endif
110 static TAILQ_HEAD(, intr_event) event_list =
111 TAILQ_HEAD_INITIALIZER(event_list);
112 static struct mtx event_lock;
113 MTX_SYSINIT(intr_event_list, &event_lock, "intr event list", MTX_DEF);
114
115 static void intr_event_update(struct intr_event *ie);
116 static int intr_event_schedule_thread(struct intr_event *ie, struct trapframe *frame);
117 static struct intr_thread *ithread_create(const char *name);
118 static void ithread_destroy(struct intr_thread *ithread);
119 static void ithread_execute_handlers(struct proc *p,
120 struct intr_event *ie);
121 static void ithread_loop(void *);
122 static void ithread_update(struct intr_thread *ithd);
123 static void start_softintr(void *);
124
125 #ifdef HWPMC_HOOKS
126 #include <sys/pmckern.h>
127 PMC_SOFT_DEFINE( , , intr, all);
128 PMC_SOFT_DEFINE( , , intr, ithread);
129 PMC_SOFT_DEFINE( , , intr, filter);
130 PMC_SOFT_DEFINE( , , intr, stray);
131 PMC_SOFT_DEFINE( , , intr, schedule);
132 PMC_SOFT_DEFINE( , , intr, waiting);
133
134 #define PMC_SOFT_CALL_INTR_HLPR(event, frame) \
135 do { \
136 if (frame != NULL) \
137 PMC_SOFT_CALL_TF( , , intr, event, frame); \
138 else \
139 PMC_SOFT_CALL( , , intr, event); \
140 } while (0)
141 #endif
142
143 /* Map an interrupt type to an ithread priority. */
144 u_char
145 intr_priority(enum intr_type flags)
146 {
147 u_char pri;
148
149 flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET |
150 INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV);
151 switch (flags) {
152 case INTR_TYPE_TTY:
153 pri = PI_TTY;
154 break;
155 case INTR_TYPE_BIO:
156 pri = PI_DISK;
157 break;
158 case INTR_TYPE_NET:
159 pri = PI_NET;
160 break;
161 case INTR_TYPE_CAM:
162 pri = PI_DISK;
163 break;
164 case INTR_TYPE_AV:
165 pri = PI_AV;
166 break;
167 case INTR_TYPE_CLK:
168 pri = PI_REALTIME;
169 break;
170 case INTR_TYPE_MISC:
171 pri = PI_DULL; /* don't care */
172 break;
173 default:
174 /* We didn't specify an interrupt level. */
175 panic("intr_priority: no interrupt type in flags");
176 }
177
178 return pri;
179 }
180
181 /*
182 * Update an ithread based on the associated intr_event.
183 */
184 static void
185 ithread_update(struct intr_thread *ithd)
186 {
187 struct intr_event *ie;
188 struct thread *td;
189 u_char pri;
190
191 ie = ithd->it_event;
192 td = ithd->it_thread;
193 mtx_assert(&ie->ie_lock, MA_OWNED);
194
195 /* Determine the overall priority of this event. */
196 if (CK_SLIST_EMPTY(&ie->ie_handlers))
197 pri = PRI_MAX_ITHD;
198 else
199 pri = CK_SLIST_FIRST(&ie->ie_handlers)->ih_pri;
200
201 /* Update name and priority. */
202 strlcpy(td->td_name, ie->ie_fullname, sizeof(td->td_name));
203 #ifdef KTR
204 sched_clear_tdname(td);
205 #endif
206 thread_lock(td);
207 sched_ithread_prio(td, pri);
208 thread_unlock(td);
209 }
210
211 /*
212 * Regenerate the full name of an interrupt event and update its priority.
213 */
214 static void
215 intr_event_update(struct intr_event *ie)
216 {
217 struct intr_handler *ih;
218 char *last;
219 int missed, space, flags;
220
221 /* Start off with no entropy and just the name of the event. */
222 mtx_assert(&ie->ie_lock, MA_OWNED);
223 strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname));
224 flags = 0;
225 missed = 0;
226 space = 1;
227
228 /* Run through all the handlers updating values. */
229 CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) {
230 if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 <
231 sizeof(ie->ie_fullname)) {
232 strcat(ie->ie_fullname, " ");
233 strcat(ie->ie_fullname, ih->ih_name);
234 space = 0;
235 } else
236 missed++;
237 flags |= ih->ih_flags;
238 }
239 ie->ie_hflags = flags;
240
241 /*
242 * If there is only one handler and its name is too long, just copy in
243 * as much of the end of the name (includes the unit number) as will
244 * fit. Otherwise, we have multiple handlers and not all of the names
245 * will fit. Add +'s to indicate missing names. If we run out of room
246 * and still have +'s to add, change the last character from a + to a *.
247 */
248 if (missed == 1 && space == 1) {
249 ih = CK_SLIST_FIRST(&ie->ie_handlers);
250 missed = strlen(ie->ie_fullname) + strlen(ih->ih_name) + 2 -
251 sizeof(ie->ie_fullname);
252 strcat(ie->ie_fullname, (missed == 0) ? " " : "-");
253 strcat(ie->ie_fullname, &ih->ih_name[missed]);
254 missed = 0;
255 }
256 last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2];
257 while (missed-- > 0) {
258 if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) {
259 if (*last == '+') {
260 *last = '*';
261 break;
262 } else
263 *last = '+';
264 } else if (space) {
265 strcat(ie->ie_fullname, " +");
266 space = 0;
267 } else
268 strcat(ie->ie_fullname, "+");
269 }
270
271 /*
272 * If this event has an ithread, update it's priority and
273 * name.
274 */
275 if (ie->ie_thread != NULL)
276 ithread_update(ie->ie_thread);
277 CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname);
278 }
279
280 int
281 intr_event_create(struct intr_event **event, void *source, int flags, int irq,
282 void (*pre_ithread)(void *), void (*post_ithread)(void *),
283 void (*post_filter)(void *), int (*assign_cpu)(void *, int),
284 const char *fmt, ...)
285 {
286 struct intr_event *ie;
287 va_list ap;
288
289 /* The only valid flag during creation is IE_SOFT. */
290 if ((flags & ~IE_SOFT) != 0)
291 return (EINVAL);
292 ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO);
293 ie->ie_source = source;
294 ie->ie_pre_ithread = pre_ithread;
295 ie->ie_post_ithread = post_ithread;
296 ie->ie_post_filter = post_filter;
297 ie->ie_assign_cpu = assign_cpu;
298 ie->ie_flags = flags;
299 ie->ie_irq = irq;
300 ie->ie_cpu = NOCPU;
301 CK_SLIST_INIT(&ie->ie_handlers);
302 mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF);
303
304 va_start(ap, fmt);
305 vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap);
306 va_end(ap);
307 strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname));
308 mtx_lock(&event_lock);
309 TAILQ_INSERT_TAIL(&event_list, ie, ie_list);
310 mtx_unlock(&event_lock);
311 if (event != NULL)
312 *event = ie;
313 CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name);
314 return (0);
315 }
316
317 /*
318 * Bind an interrupt event to the specified CPU. Note that not all
319 * platforms support binding an interrupt to a CPU. For those
320 * platforms this request will fail. Using a cpu id of NOCPU unbinds
321 * the interrupt event.
322 */
323 static int
324 _intr_event_bind(struct intr_event *ie, int cpu, bool bindirq, bool bindithread)
325 {
326 lwpid_t id;
327 int error;
328
329 /* Need a CPU to bind to. */
330 if (cpu != NOCPU && CPU_ABSENT(cpu))
331 return (EINVAL);
332
333 if (ie->ie_assign_cpu == NULL)
334 return (EOPNOTSUPP);
335
336 error = priv_check(curthread, PRIV_SCHED_CPUSET_INTR);
337 if (error)
338 return (error);
339
340 /*
341 * If we have any ithreads try to set their mask first to verify
342 * permissions, etc.
343 */
344 if (bindithread) {
345 mtx_lock(&ie->ie_lock);
346 if (ie->ie_thread != NULL) {
347 id = ie->ie_thread->it_thread->td_tid;
348 mtx_unlock(&ie->ie_lock);
349 error = cpuset_setithread(id, cpu);
350 if (error)
351 return (error);
352 } else
353 mtx_unlock(&ie->ie_lock);
354 }
355 if (bindirq)
356 error = ie->ie_assign_cpu(ie->ie_source, cpu);
357 if (error) {
358 if (bindithread) {
359 mtx_lock(&ie->ie_lock);
360 if (ie->ie_thread != NULL) {
361 cpu = ie->ie_cpu;
362 id = ie->ie_thread->it_thread->td_tid;
363 mtx_unlock(&ie->ie_lock);
364 (void)cpuset_setithread(id, cpu);
365 } else
366 mtx_unlock(&ie->ie_lock);
367 }
368 return (error);
369 }
370
371 if (bindirq) {
372 mtx_lock(&ie->ie_lock);
373 ie->ie_cpu = cpu;
374 mtx_unlock(&ie->ie_lock);
375 }
376
377 return (error);
378 }
379
380 /*
381 * Bind an interrupt event to the specified CPU. For supported platforms, any
382 * associated ithreads as well as the primary interrupt context will be bound
383 * to the specificed CPU.
384 */
385 int
386 intr_event_bind(struct intr_event *ie, int cpu)
387 {
388
389 return (_intr_event_bind(ie, cpu, true, true));
390 }
391
392 /*
393 * Bind an interrupt event to the specified CPU, but do not bind associated
394 * ithreads.
395 */
396 int
397 intr_event_bind_irqonly(struct intr_event *ie, int cpu)
398 {
399
400 return (_intr_event_bind(ie, cpu, true, false));
401 }
402
403 /*
404 * Bind an interrupt event's ithread to the specified CPU.
405 */
406 int
407 intr_event_bind_ithread(struct intr_event *ie, int cpu)
408 {
409
410 return (_intr_event_bind(ie, cpu, false, true));
411 }
412
413 /*
414 * Bind an interrupt event's ithread to the specified cpuset.
415 */
416 int
417 intr_event_bind_ithread_cpuset(struct intr_event *ie, cpuset_t *cs)
418 {
419 lwpid_t id;
420
421 mtx_lock(&ie->ie_lock);
422 if (ie->ie_thread != NULL) {
423 id = ie->ie_thread->it_thread->td_tid;
424 mtx_unlock(&ie->ie_lock);
425 return (cpuset_setthread(id, cs));
426 } else {
427 mtx_unlock(&ie->ie_lock);
428 }
429 return (ENODEV);
430 }
431
432 static struct intr_event *
433 intr_lookup(int irq)
434 {
435 struct intr_event *ie;
436
437 mtx_lock(&event_lock);
438 TAILQ_FOREACH(ie, &event_list, ie_list)
439 if (ie->ie_irq == irq &&
440 (ie->ie_flags & IE_SOFT) == 0 &&
441 CK_SLIST_FIRST(&ie->ie_handlers) != NULL)
442 break;
443 mtx_unlock(&event_lock);
444 return (ie);
445 }
446
447 int
448 intr_setaffinity(int irq, int mode, void *m)
449 {
450 struct intr_event *ie;
451 cpuset_t *mask;
452 int cpu, n;
453
454 mask = m;
455 cpu = NOCPU;
456 /*
457 * If we're setting all cpus we can unbind. Otherwise make sure
458 * only one cpu is in the set.
459 */
460 if (CPU_CMP(cpuset_root, mask)) {
461 for (n = 0; n < CPU_SETSIZE; n++) {
462 if (!CPU_ISSET(n, mask))
463 continue;
464 if (cpu != NOCPU)
465 return (EINVAL);
466 cpu = n;
467 }
468 }
469 ie = intr_lookup(irq);
470 if (ie == NULL)
471 return (ESRCH);
472 switch (mode) {
473 case CPU_WHICH_IRQ:
474 return (intr_event_bind(ie, cpu));
475 case CPU_WHICH_INTRHANDLER:
476 return (intr_event_bind_irqonly(ie, cpu));
477 case CPU_WHICH_ITHREAD:
478 return (intr_event_bind_ithread(ie, cpu));
479 default:
480 return (EINVAL);
481 }
482 }
483
484 int
485 intr_getaffinity(int irq, int mode, void *m)
486 {
487 struct intr_event *ie;
488 struct thread *td;
489 struct proc *p;
490 cpuset_t *mask;
491 lwpid_t id;
492 int error;
493
494 mask = m;
495 ie = intr_lookup(irq);
496 if (ie == NULL)
497 return (ESRCH);
498
499 error = 0;
500 CPU_ZERO(mask);
501 switch (mode) {
502 case CPU_WHICH_IRQ:
503 case CPU_WHICH_INTRHANDLER:
504 mtx_lock(&ie->ie_lock);
505 if (ie->ie_cpu == NOCPU)
506 CPU_COPY(cpuset_root, mask);
507 else
508 CPU_SET(ie->ie_cpu, mask);
509 mtx_unlock(&ie->ie_lock);
510 break;
511 case CPU_WHICH_ITHREAD:
512 mtx_lock(&ie->ie_lock);
513 if (ie->ie_thread == NULL) {
514 mtx_unlock(&ie->ie_lock);
515 CPU_COPY(cpuset_root, mask);
516 } else {
517 id = ie->ie_thread->it_thread->td_tid;
518 mtx_unlock(&ie->ie_lock);
519 error = cpuset_which(CPU_WHICH_TID, id, &p, &td, NULL);
520 if (error != 0)
521 return (error);
522 CPU_COPY(&td->td_cpuset->cs_mask, mask);
523 PROC_UNLOCK(p);
524 }
525 default:
526 return (EINVAL);
527 }
528 return (0);
529 }
530
531 int
532 intr_event_destroy(struct intr_event *ie)
533 {
534
535 if (ie == NULL)
536 return (EINVAL);
537
538 mtx_lock(&event_lock);
539 mtx_lock(&ie->ie_lock);
540 if (!CK_SLIST_EMPTY(&ie->ie_handlers)) {
541 mtx_unlock(&ie->ie_lock);
542 mtx_unlock(&event_lock);
543 return (EBUSY);
544 }
545 TAILQ_REMOVE(&event_list, ie, ie_list);
546 #ifndef notyet
547 if (ie->ie_thread != NULL) {
548 ithread_destroy(ie->ie_thread);
549 ie->ie_thread = NULL;
550 }
551 #endif
552 mtx_unlock(&ie->ie_lock);
553 mtx_unlock(&event_lock);
554 mtx_destroy(&ie->ie_lock);
555 free(ie, M_ITHREAD);
556 return (0);
557 }
558
559 static struct intr_thread *
560 ithread_create(const char *name)
561 {
562 struct intr_thread *ithd;
563 struct thread *td;
564 int error;
565
566 ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO);
567
568 error = kproc_kthread_add(ithread_loop, ithd, &intrproc,
569 &td, RFSTOPPED | RFHIGHPID,
570 0, "intr", "%s", name);
571 if (error)
572 panic("kproc_create() failed with %d", error);
573 thread_lock(td);
574 sched_class(td, PRI_ITHD);
575 TD_SET_IWAIT(td);
576 thread_unlock(td);
577 td->td_pflags |= TDP_ITHREAD;
578 ithd->it_thread = td;
579 CTR2(KTR_INTR, "%s: created %s", __func__, name);
580 return (ithd);
581 }
582
583 static void
584 ithread_destroy(struct intr_thread *ithread)
585 {
586 struct thread *td;
587
588 CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_event->ie_name);
589 td = ithread->it_thread;
590 thread_lock(td);
591 ithread->it_flags |= IT_DEAD;
592 if (TD_AWAITING_INTR(td)) {
593 TD_CLR_IWAIT(td);
594 sched_wakeup(td, SRQ_INTR);
595 } else
596 thread_unlock(td);
597 }
598
599 int
600 intr_event_add_handler(struct intr_event *ie, const char *name,
601 driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri,
602 enum intr_type flags, void **cookiep)
603 {
604 struct intr_handler *ih, *temp_ih;
605 struct intr_handler **prevptr;
606 struct intr_thread *it;
607
608 if (ie == NULL || name == NULL || (handler == NULL && filter == NULL))
609 return (EINVAL);
610
611 /* Allocate and populate an interrupt handler structure. */
612 ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO);
613 ih->ih_filter = filter;
614 ih->ih_handler = handler;
615 ih->ih_argument = arg;
616 strlcpy(ih->ih_name, name, sizeof(ih->ih_name));
617 ih->ih_event = ie;
618 ih->ih_pri = pri;
619 if (flags & INTR_EXCL)
620 ih->ih_flags = IH_EXCLUSIVE;
621 if (flags & INTR_MPSAFE)
622 ih->ih_flags |= IH_MPSAFE;
623 if (flags & INTR_ENTROPY)
624 ih->ih_flags |= IH_ENTROPY;
625 if (flags & INTR_TYPE_NET)
626 ih->ih_flags |= IH_NET;
627
628 /* We can only have one exclusive handler in a event. */
629 mtx_lock(&ie->ie_lock);
630 if (!CK_SLIST_EMPTY(&ie->ie_handlers)) {
631 if ((flags & INTR_EXCL) ||
632 (CK_SLIST_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) {
633 mtx_unlock(&ie->ie_lock);
634 free(ih, M_ITHREAD);
635 return (EINVAL);
636 }
637 }
638
639 /* Create a thread if we need one. */
640 while (ie->ie_thread == NULL && handler != NULL) {
641 if (ie->ie_flags & IE_ADDING_THREAD)
642 msleep(ie, &ie->ie_lock, 0, "ithread", 0);
643 else {
644 ie->ie_flags |= IE_ADDING_THREAD;
645 mtx_unlock(&ie->ie_lock);
646 it = ithread_create("intr: newborn");
647 mtx_lock(&ie->ie_lock);
648 ie->ie_flags &= ~IE_ADDING_THREAD;
649 ie->ie_thread = it;
650 it->it_event = ie;
651 ithread_update(it);
652 wakeup(ie);
653 }
654 }
655
656 /* Add the new handler to the event in priority order. */
657 CK_SLIST_FOREACH_PREVPTR(temp_ih, prevptr, &ie->ie_handlers, ih_next) {
658 if (temp_ih->ih_pri > ih->ih_pri)
659 break;
660 }
661 CK_SLIST_INSERT_PREVPTR(prevptr, temp_ih, ih, ih_next);
662
663 intr_event_update(ie);
664
665 CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name,
666 ie->ie_name);
667 mtx_unlock(&ie->ie_lock);
668
669 if (cookiep != NULL)
670 *cookiep = ih;
671 return (0);
672 }
673
674 /*
675 * Append a description preceded by a ':' to the name of the specified
676 * interrupt handler.
677 */
678 int
679 intr_event_describe_handler(struct intr_event *ie, void *cookie,
680 const char *descr)
681 {
682 struct intr_handler *ih;
683 size_t space;
684 char *start;
685
686 mtx_lock(&ie->ie_lock);
687 #ifdef INVARIANTS
688 CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) {
689 if (ih == cookie)
690 break;
691 }
692 if (ih == NULL) {
693 mtx_unlock(&ie->ie_lock);
694 panic("handler %p not found in interrupt event %p", cookie, ie);
695 }
696 #endif
697 ih = cookie;
698
699 /*
700 * Look for an existing description by checking for an
701 * existing ":". This assumes device names do not include
702 * colons. If one is found, prepare to insert the new
703 * description at that point. If one is not found, find the
704 * end of the name to use as the insertion point.
705 */
706 start = strchr(ih->ih_name, ':');
707 if (start == NULL)
708 start = strchr(ih->ih_name, 0);
709
710 /*
711 * See if there is enough remaining room in the string for the
712 * description + ":". The "- 1" leaves room for the trailing
713 * '\0'. The "+ 1" accounts for the colon.
714 */
715 space = sizeof(ih->ih_name) - (start - ih->ih_name) - 1;
716 if (strlen(descr) + 1 > space) {
717 mtx_unlock(&ie->ie_lock);
718 return (ENOSPC);
719 }
720
721 /* Append a colon followed by the description. */
722 *start = ':';
723 strcpy(start + 1, descr);
724 intr_event_update(ie);
725 mtx_unlock(&ie->ie_lock);
726 return (0);
727 }
728
729 /*
730 * Return the ie_source field from the intr_event an intr_handler is
731 * associated with.
732 */
733 void *
734 intr_handler_source(void *cookie)
735 {
736 struct intr_handler *ih;
737 struct intr_event *ie;
738
739 ih = (struct intr_handler *)cookie;
740 if (ih == NULL)
741 return (NULL);
742 ie = ih->ih_event;
743 KASSERT(ie != NULL,
744 ("interrupt handler \"%s\" has a NULL interrupt event",
745 ih->ih_name));
746 return (ie->ie_source);
747 }
748
749 /*
750 * If intr_event_handle() is running in the ISR context at the time of the call,
751 * then wait for it to complete.
752 */
753 static void
754 intr_event_barrier(struct intr_event *ie)
755 {
756 int phase;
757
758 mtx_assert(&ie->ie_lock, MA_OWNED);
759 phase = ie->ie_phase;
760
761 /*
762 * Switch phase to direct future interrupts to the other active counter.
763 * Make sure that any preceding stores are visible before the switch.
764 */
765 KASSERT(ie->ie_active[!phase] == 0, ("idle phase has activity"));
766 atomic_store_rel_int(&ie->ie_phase, !phase);
767
768 /*
769 * This code cooperates with wait-free iteration of ie_handlers
770 * in intr_event_handle.
771 * Make sure that the removal and the phase update are not reordered
772 * with the active count check.
773 * Note that no combination of acquire and release fences can provide
774 * that guarantee as Store->Load sequences can always be reordered.
775 */
776 atomic_thread_fence_seq_cst();
777
778 /*
779 * Now wait on the inactive phase.
780 * The acquire fence is needed so that all post-barrier accesses
781 * are after the check.
782 */
783 while (ie->ie_active[phase] > 0)
784 cpu_spinwait();
785 atomic_thread_fence_acq();
786 }
787
788 static void
789 intr_handler_barrier(struct intr_handler *handler)
790 {
791 struct intr_event *ie;
792
793 ie = handler->ih_event;
794 mtx_assert(&ie->ie_lock, MA_OWNED);
795 KASSERT((handler->ih_flags & IH_DEAD) == 0,
796 ("update for a removed handler"));
797
798 if (ie->ie_thread == NULL) {
799 intr_event_barrier(ie);
800 return;
801 }
802 if ((handler->ih_flags & IH_CHANGED) == 0) {
803 handler->ih_flags |= IH_CHANGED;
804 intr_event_schedule_thread(ie, NULL);
805 }
806 while ((handler->ih_flags & IH_CHANGED) != 0)
807 msleep(handler, &ie->ie_lock, 0, "ih_barr", 0);
808 }
809
810 /*
811 * Sleep until an ithread finishes executing an interrupt handler.
812 *
813 * XXX Doesn't currently handle interrupt filters or fast interrupt
814 * handlers. This is intended for LinuxKPI drivers only.
815 * Do not use in BSD code.
816 */
817 void
818 _intr_drain(int irq)
819 {
820 struct intr_event *ie;
821 struct intr_thread *ithd;
822 struct thread *td;
823
824 ie = intr_lookup(irq);
825 if (ie == NULL)
826 return;
827 if (ie->ie_thread == NULL)
828 return;
829 ithd = ie->ie_thread;
830 td = ithd->it_thread;
831 /*
832 * We set the flag and wait for it to be cleared to avoid
833 * long delays with potentially busy interrupt handlers
834 * were we to only sample TD_AWAITING_INTR() every tick.
835 */
836 thread_lock(td);
837 if (!TD_AWAITING_INTR(td)) {
838 ithd->it_flags |= IT_WAIT;
839 while (ithd->it_flags & IT_WAIT) {
840 thread_unlock(td);
841 pause("idrain", 1);
842 thread_lock(td);
843 }
844 }
845 thread_unlock(td);
846 return;
847 }
848
849 int
850 intr_event_remove_handler(void *cookie)
851 {
852 struct intr_handler *handler = (struct intr_handler *)cookie;
853 struct intr_event *ie;
854 struct intr_handler *ih;
855 struct intr_handler **prevptr;
856 #ifdef notyet
857 int dead;
858 #endif
859
860 if (handler == NULL)
861 return (EINVAL);
862 ie = handler->ih_event;
863 KASSERT(ie != NULL,
864 ("interrupt handler \"%s\" has a NULL interrupt event",
865 handler->ih_name));
866
867 mtx_lock(&ie->ie_lock);
868 CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name,
869 ie->ie_name);
870 CK_SLIST_FOREACH_PREVPTR(ih, prevptr, &ie->ie_handlers, ih_next) {
871 if (ih == handler)
872 break;
873 }
874 if (ih == NULL) {
875 panic("interrupt handler \"%s\" not found in "
876 "interrupt event \"%s\"", handler->ih_name, ie->ie_name);
877 }
878
879 /*
880 * If there is no ithread, then directly remove the handler. Note that
881 * intr_event_handle() iterates ie_handlers in a lock-less fashion, so
882 * care needs to be taken to keep ie_handlers consistent and to free
883 * the removed handler only when ie_handlers is quiescent.
884 */
885 if (ie->ie_thread == NULL) {
886 CK_SLIST_REMOVE_PREVPTR(prevptr, ih, ih_next);
887 intr_event_barrier(ie);
888 intr_event_update(ie);
889 mtx_unlock(&ie->ie_lock);
890 free(handler, M_ITHREAD);
891 return (0);
892 }
893
894 /*
895 * Let the interrupt thread do the job.
896 * The interrupt source is disabled when the interrupt thread is
897 * running, so it does not have to worry about interaction with
898 * intr_event_handle().
899 */
900 KASSERT((handler->ih_flags & IH_DEAD) == 0,
901 ("duplicate handle remove"));
902 handler->ih_flags |= IH_DEAD;
903 intr_event_schedule_thread(ie, NULL);
904 while (handler->ih_flags & IH_DEAD)
905 msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0);
906 intr_event_update(ie);
907
908 #ifdef notyet
909 /*
910 * XXX: This could be bad in the case of ppbus(8). Also, I think
911 * this could lead to races of stale data when servicing an
912 * interrupt.
913 */
914 dead = 1;
915 CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) {
916 if (ih->ih_handler != NULL) {
917 dead = 0;
918 break;
919 }
920 }
921 if (dead) {
922 ithread_destroy(ie->ie_thread);
923 ie->ie_thread = NULL;
924 }
925 #endif
926 mtx_unlock(&ie->ie_lock);
927 free(handler, M_ITHREAD);
928 return (0);
929 }
930
931 int
932 intr_event_suspend_handler(void *cookie)
933 {
934 struct intr_handler *handler = (struct intr_handler *)cookie;
935 struct intr_event *ie;
936
937 if (handler == NULL)
938 return (EINVAL);
939 ie = handler->ih_event;
940 KASSERT(ie != NULL,
941 ("interrupt handler \"%s\" has a NULL interrupt event",
942 handler->ih_name));
943 mtx_lock(&ie->ie_lock);
944 handler->ih_flags |= IH_SUSP;
945 intr_handler_barrier(handler);
946 mtx_unlock(&ie->ie_lock);
947 return (0);
948 }
949
950 int
951 intr_event_resume_handler(void *cookie)
952 {
953 struct intr_handler *handler = (struct intr_handler *)cookie;
954 struct intr_event *ie;
955
956 if (handler == NULL)
957 return (EINVAL);
958 ie = handler->ih_event;
959 KASSERT(ie != NULL,
960 ("interrupt handler \"%s\" has a NULL interrupt event",
961 handler->ih_name));
962
963 /*
964 * intr_handler_barrier() acts not only as a barrier,
965 * it also allows to check for any pending interrupts.
966 */
967 mtx_lock(&ie->ie_lock);
968 handler->ih_flags &= ~IH_SUSP;
969 intr_handler_barrier(handler);
970 mtx_unlock(&ie->ie_lock);
971 return (0);
972 }
973
974 static int
975 intr_event_schedule_thread(struct intr_event *ie, struct trapframe *frame)
976 {
977 struct intr_entropy entropy;
978 struct intr_thread *it;
979 struct thread *td;
980 struct thread *ctd;
981
982 /*
983 * If no ithread or no handlers, then we have a stray interrupt.
984 */
985 if (ie == NULL || CK_SLIST_EMPTY(&ie->ie_handlers) ||
986 ie->ie_thread == NULL)
987 return (EINVAL);
988
989 ctd = curthread;
990 it = ie->ie_thread;
991 td = it->it_thread;
992
993 /*
994 * If any of the handlers for this ithread claim to be good
995 * sources of entropy, then gather some.
996 */
997 if (ie->ie_hflags & IH_ENTROPY) {
998 entropy.event = (uintptr_t)ie;
999 entropy.td = ctd;
1000 random_harvest_queue(&entropy, sizeof(entropy), RANDOM_INTERRUPT);
1001 }
1002
1003 KASSERT(td->td_proc != NULL, ("ithread %s has no process", ie->ie_name));
1004
1005 /*
1006 * Set it_need to tell the thread to keep running if it is already
1007 * running. Then, lock the thread and see if we actually need to
1008 * put it on the runqueue.
1009 *
1010 * Use store_rel to arrange that the store to ih_need in
1011 * swi_sched() is before the store to it_need and prepare for
1012 * transfer of this order to loads in the ithread.
1013 */
1014 atomic_store_rel_int(&it->it_need, 1);
1015 thread_lock(td);
1016 if (TD_AWAITING_INTR(td)) {
1017 #ifdef HWPMC_HOOKS
1018 it->it_waiting = 0;
1019 if (PMC_HOOK_INSTALLED_ANY())
1020 PMC_SOFT_CALL_INTR_HLPR(schedule, frame);
1021 #endif
1022 CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, td->td_proc->p_pid,
1023 td->td_name);
1024 TD_CLR_IWAIT(td);
1025 sched_wakeup(td, SRQ_INTR);
1026 } else {
1027 #ifdef HWPMC_HOOKS
1028 it->it_waiting++;
1029 if (PMC_HOOK_INSTALLED_ANY() &&
1030 (it->it_waiting >= intr_hwpmc_waiting_report_threshold))
1031 PMC_SOFT_CALL_INTR_HLPR(waiting, frame);
1032 #endif
1033 CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d",
1034 __func__, td->td_proc->p_pid, td->td_name, it->it_need, TD_GET_STATE(td));
1035 thread_unlock(td);
1036 }
1037
1038 return (0);
1039 }
1040
1041 /*
1042 * Allow interrupt event binding for software interrupt handlers -- a no-op,
1043 * since interrupts are generated in software rather than being directed by
1044 * a PIC.
1045 */
1046 static int
1047 swi_assign_cpu(void *arg, int cpu)
1048 {
1049
1050 return (0);
1051 }
1052
1053 /*
1054 * Add a software interrupt handler to a specified event. If a given event
1055 * is not specified, then a new event is created.
1056 */
1057 int
1058 swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler,
1059 void *arg, int pri, enum intr_type flags, void **cookiep)
1060 {
1061 struct intr_event *ie;
1062 int error = 0;
1063
1064 if (flags & INTR_ENTROPY)
1065 return (EINVAL);
1066
1067 ie = (eventp != NULL) ? *eventp : NULL;
1068
1069 if (ie != NULL) {
1070 if (!(ie->ie_flags & IE_SOFT))
1071 return (EINVAL);
1072 } else {
1073 error = intr_event_create(&ie, NULL, IE_SOFT, 0,
1074 NULL, NULL, NULL, swi_assign_cpu, "swi%d:", pri);
1075 if (error)
1076 return (error);
1077 if (eventp != NULL)
1078 *eventp = ie;
1079 }
1080 if (handler != NULL) {
1081 error = intr_event_add_handler(ie, name, NULL, handler, arg,
1082 PI_SWI(pri), flags, cookiep);
1083 }
1084 return (error);
1085 }
1086
1087 /*
1088 * Schedule a software interrupt thread.
1089 */
1090 void
1091 swi_sched(void *cookie, int flags)
1092 {
1093 struct intr_handler *ih = (struct intr_handler *)cookie;
1094 struct intr_event *ie = ih->ih_event;
1095 struct intr_entropy entropy;
1096 int error __unused;
1097
1098 CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name,
1099 ih->ih_need);
1100
1101 if ((flags & SWI_FROMNMI) == 0) {
1102 entropy.event = (uintptr_t)ih;
1103 entropy.td = curthread;
1104 random_harvest_queue(&entropy, sizeof(entropy), RANDOM_SWI);
1105 }
1106
1107 /*
1108 * Set ih_need for this handler so that if the ithread is already
1109 * running it will execute this handler on the next pass. Otherwise,
1110 * it will execute it the next time it runs.
1111 */
1112 ih->ih_need = 1;
1113
1114 if (flags & SWI_DELAY)
1115 return;
1116
1117 if (flags & SWI_FROMNMI) {
1118 #if defined(SMP) && (defined(__i386__) || defined(__amd64__))
1119 KASSERT(ie == clk_intr_event,
1120 ("SWI_FROMNMI used not with clk_intr_event"));
1121 ipi_self_from_nmi(IPI_SWI);
1122 #endif
1123 } else {
1124 VM_CNT_INC(v_soft);
1125 error = intr_event_schedule_thread(ie, NULL);
1126 KASSERT(error == 0, ("stray software interrupt"));
1127 }
1128 }
1129
1130 /*
1131 * Remove a software interrupt handler. Currently this code does not
1132 * remove the associated interrupt event if it becomes empty. Calling code
1133 * may do so manually via intr_event_destroy(), but that's not really
1134 * an optimal interface.
1135 */
1136 int
1137 swi_remove(void *cookie)
1138 {
1139
1140 return (intr_event_remove_handler(cookie));
1141 }
1142
1143 static void
1144 intr_event_execute_handlers(struct proc *p, struct intr_event *ie)
1145 {
1146 struct intr_handler *ih, *ihn, *ihp;
1147
1148 ihp = NULL;
1149 CK_SLIST_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) {
1150 /*
1151 * If this handler is marked for death, remove it from
1152 * the list of handlers and wake up the sleeper.
1153 */
1154 if (ih->ih_flags & IH_DEAD) {
1155 mtx_lock(&ie->ie_lock);
1156 if (ihp == NULL)
1157 CK_SLIST_REMOVE_HEAD(&ie->ie_handlers, ih_next);
1158 else
1159 CK_SLIST_REMOVE_AFTER(ihp, ih_next);
1160 ih->ih_flags &= ~IH_DEAD;
1161 wakeup(ih);
1162 mtx_unlock(&ie->ie_lock);
1163 continue;
1164 }
1165
1166 /*
1167 * Now that we know that the current element won't be removed
1168 * update the previous element.
1169 */
1170 ihp = ih;
1171
1172 if ((ih->ih_flags & IH_CHANGED) != 0) {
1173 mtx_lock(&ie->ie_lock);
1174 ih->ih_flags &= ~IH_CHANGED;
1175 wakeup(ih);
1176 mtx_unlock(&ie->ie_lock);
1177 }
1178
1179 /* Skip filter only handlers */
1180 if (ih->ih_handler == NULL)
1181 continue;
1182
1183 /* Skip suspended handlers */
1184 if ((ih->ih_flags & IH_SUSP) != 0)
1185 continue;
1186
1187 /*
1188 * For software interrupt threads, we only execute
1189 * handlers that have their need flag set. Hardware
1190 * interrupt threads always invoke all of their handlers.
1191 *
1192 * ih_need can only be 0 or 1. Failed cmpset below
1193 * means that there is no request to execute handlers,
1194 * so a retry of the cmpset is not needed.
1195 */
1196 if ((ie->ie_flags & IE_SOFT) != 0 &&
1197 atomic_cmpset_int(&ih->ih_need, 1, 0) == 0)
1198 continue;
1199
1200 /* Execute this handler. */
1201 CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x",
1202 __func__, p->p_pid, (void *)ih->ih_handler,
1203 ih->ih_argument, ih->ih_name, ih->ih_flags);
1204
1205 if (!(ih->ih_flags & IH_MPSAFE))
1206 mtx_lock(&Giant);
1207 ih->ih_handler(ih->ih_argument);
1208 if (!(ih->ih_flags & IH_MPSAFE))
1209 mtx_unlock(&Giant);
1210 }
1211 }
1212
1213 static void
1214 ithread_execute_handlers(struct proc *p, struct intr_event *ie)
1215 {
1216
1217 /* Interrupt handlers should not sleep. */
1218 if (!(ie->ie_flags & IE_SOFT))
1219 THREAD_NO_SLEEPING();
1220 intr_event_execute_handlers(p, ie);
1221 if (!(ie->ie_flags & IE_SOFT))
1222 THREAD_SLEEPING_OK();
1223
1224 /*
1225 * Interrupt storm handling:
1226 *
1227 * If this interrupt source is currently storming, then throttle
1228 * it to only fire the handler once per clock tick.
1229 *
1230 * If this interrupt source is not currently storming, but the
1231 * number of back to back interrupts exceeds the storm threshold,
1232 * then enter storming mode.
1233 */
1234 if (intr_storm_threshold != 0 && ie->ie_count >= intr_storm_threshold &&
1235 !(ie->ie_flags & IE_SOFT)) {
1236 /* Report the message only once every second. */
1237 if (ppsratecheck(&ie->ie_warntm, &ie->ie_warncnt, 1)) {
1238 printf(
1239 "interrupt storm detected on \"%s\"; throttling interrupt source\n",
1240 ie->ie_name);
1241 }
1242 pause("istorm", 1);
1243 } else
1244 ie->ie_count++;
1245
1246 /*
1247 * Now that all the handlers have had a chance to run, reenable
1248 * the interrupt source.
1249 */
1250 if (ie->ie_post_ithread != NULL)
1251 ie->ie_post_ithread(ie->ie_source);
1252 }
1253
1254 /*
1255 * This is the main code for interrupt threads.
1256 */
1257 static void
1258 ithread_loop(void *arg)
1259 {
1260 struct epoch_tracker et;
1261 struct intr_thread *ithd;
1262 struct intr_event *ie;
1263 struct thread *td;
1264 struct proc *p;
1265 int wake, epoch_count;
1266 bool needs_epoch;
1267
1268 td = curthread;
1269 p = td->td_proc;
1270 ithd = (struct intr_thread *)arg;
1271 KASSERT(ithd->it_thread == td,
1272 ("%s: ithread and proc linkage out of sync", __func__));
1273 ie = ithd->it_event;
1274 ie->ie_count = 0;
1275 wake = 0;
1276
1277 /*
1278 * As long as we have interrupts outstanding, go through the
1279 * list of handlers, giving each one a go at it.
1280 */
1281 for (;;) {
1282 /*
1283 * If we are an orphaned thread, then just die.
1284 */
1285 if (ithd->it_flags & IT_DEAD) {
1286 CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__,
1287 p->p_pid, td->td_name);
1288 free(ithd, M_ITHREAD);
1289 kthread_exit();
1290 }
1291
1292 /*
1293 * Service interrupts. If another interrupt arrives while
1294 * we are running, it will set it_need to note that we
1295 * should make another pass.
1296 *
1297 * The load_acq part of the following cmpset ensures
1298 * that the load of ih_need in ithread_execute_handlers()
1299 * is ordered after the load of it_need here.
1300 */
1301 needs_epoch =
1302 (atomic_load_int(&ie->ie_hflags) & IH_NET) != 0;
1303 if (needs_epoch) {
1304 epoch_count = 0;
1305 NET_EPOCH_ENTER(et);
1306 }
1307 while (atomic_cmpset_acq_int(&ithd->it_need, 1, 0) != 0) {
1308 ithread_execute_handlers(p, ie);
1309 if (needs_epoch &&
1310 ++epoch_count >= intr_epoch_batch) {
1311 NET_EPOCH_EXIT(et);
1312 epoch_count = 0;
1313 NET_EPOCH_ENTER(et);
1314 }
1315 }
1316 if (needs_epoch)
1317 NET_EPOCH_EXIT(et);
1318 WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread");
1319 mtx_assert(&Giant, MA_NOTOWNED);
1320
1321 /*
1322 * Processed all our interrupts. Now get the sched
1323 * lock. This may take a while and it_need may get
1324 * set again, so we have to check it again.
1325 */
1326 thread_lock(td);
1327 if (atomic_load_acq_int(&ithd->it_need) == 0 &&
1328 (ithd->it_flags & (IT_DEAD | IT_WAIT)) == 0) {
1329 TD_SET_IWAIT(td);
1330 ie->ie_count = 0;
1331 mi_switch(SW_VOL | SWT_IWAIT);
1332 } else {
1333 if (ithd->it_flags & IT_WAIT) {
1334 wake = 1;
1335 ithd->it_flags &= ~IT_WAIT;
1336 }
1337 thread_unlock(td);
1338 }
1339 if (wake) {
1340 wakeup(ithd);
1341 wake = 0;
1342 }
1343 }
1344 }
1345
1346 /*
1347 * Main interrupt handling body.
1348 *
1349 * Input:
1350 * o ie: the event connected to this interrupt.
1351 * o frame: some archs (i.e. i386) pass a frame to some.
1352 * handlers as their main argument.
1353 * Return value:
1354 * o 0: everything ok.
1355 * o EINVAL: stray interrupt.
1356 */
1357 int
1358 intr_event_handle(struct intr_event *ie, struct trapframe *frame)
1359 {
1360 struct intr_handler *ih;
1361 struct trapframe *oldframe;
1362 struct thread *td;
1363 int phase;
1364 int ret;
1365 bool filter, thread;
1366
1367 td = curthread;
1368
1369 #ifdef KSTACK_USAGE_PROF
1370 intr_prof_stack_use(td, frame);
1371 #endif
1372
1373 /* An interrupt with no event or handlers is a stray interrupt. */
1374 if (ie == NULL || CK_SLIST_EMPTY(&ie->ie_handlers))
1375 return (EINVAL);
1376
1377 /*
1378 * Execute fast interrupt handlers directly.
1379 * To support clock handlers, if a handler registers
1380 * with a NULL argument, then we pass it a pointer to
1381 * a trapframe as its argument.
1382 */
1383 td->td_intr_nesting_level++;
1384 filter = false;
1385 thread = false;
1386 ret = 0;
1387 critical_enter();
1388 oldframe = td->td_intr_frame;
1389 td->td_intr_frame = frame;
1390
1391 phase = ie->ie_phase;
1392 atomic_add_int(&ie->ie_active[phase], 1);
1393
1394 /*
1395 * This fence is required to ensure that no later loads are
1396 * re-ordered before the ie_active store.
1397 */
1398 atomic_thread_fence_seq_cst();
1399
1400 CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) {
1401 if ((ih->ih_flags & IH_SUSP) != 0)
1402 continue;
1403 if ((ie->ie_flags & IE_SOFT) != 0 && ih->ih_need == 0)
1404 continue;
1405 if (ih->ih_filter == NULL) {
1406 thread = true;
1407 continue;
1408 }
1409 CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__,
1410 ih->ih_filter, ih->ih_argument == NULL ? frame :
1411 ih->ih_argument, ih->ih_name);
1412 if (ih->ih_argument == NULL)
1413 ret = ih->ih_filter(frame);
1414 else
1415 ret = ih->ih_filter(ih->ih_argument);
1416 #ifdef HWPMC_HOOKS
1417 PMC_SOFT_CALL_TF( , , intr, all, frame);
1418 #endif
1419 KASSERT(ret == FILTER_STRAY ||
1420 ((ret & (FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) != 0 &&
1421 (ret & ~(FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) == 0),
1422 ("%s: incorrect return value %#x from %s", __func__, ret,
1423 ih->ih_name));
1424 filter = filter || ret == FILTER_HANDLED;
1425 #ifdef HWPMC_HOOKS
1426 if (ret & FILTER_SCHEDULE_THREAD)
1427 PMC_SOFT_CALL_TF( , , intr, ithread, frame);
1428 else if (ret & FILTER_HANDLED)
1429 PMC_SOFT_CALL_TF( , , intr, filter, frame);
1430 else if (ret == FILTER_STRAY)
1431 PMC_SOFT_CALL_TF( , , intr, stray, frame);
1432 #endif
1433
1434 /*
1435 * Wrapper handler special handling:
1436 *
1437 * in some particular cases (like pccard and pccbb),
1438 * the _real_ device handler is wrapped in a couple of
1439 * functions - a filter wrapper and an ithread wrapper.
1440 * In this case (and just in this case), the filter wrapper
1441 * could ask the system to schedule the ithread and mask
1442 * the interrupt source if the wrapped handler is composed
1443 * of just an ithread handler.
1444 *
1445 * TODO: write a generic wrapper to avoid people rolling
1446 * their own.
1447 */
1448 if (!thread) {
1449 if (ret == FILTER_SCHEDULE_THREAD)
1450 thread = true;
1451 }
1452 }
1453 atomic_add_rel_int(&ie->ie_active[phase], -1);
1454
1455 td->td_intr_frame = oldframe;
1456
1457 if (thread) {
1458 if (ie->ie_pre_ithread != NULL)
1459 ie->ie_pre_ithread(ie->ie_source);
1460 } else {
1461 if (ie->ie_post_filter != NULL)
1462 ie->ie_post_filter(ie->ie_source);
1463 }
1464
1465 /* Schedule the ithread if needed. */
1466 if (thread) {
1467 int error __unused;
1468
1469 error = intr_event_schedule_thread(ie, frame);
1470 KASSERT(error == 0, ("bad stray interrupt"));
1471 }
1472 critical_exit();
1473 td->td_intr_nesting_level--;
1474 #ifdef notyet
1475 /* The interrupt is not aknowledged by any filter and has no ithread. */
1476 if (!thread && !filter)
1477 return (EINVAL);
1478 #endif
1479 return (0);
1480 }
1481
1482 #ifdef DDB
1483 /*
1484 * Dump details about an interrupt handler
1485 */
1486 static void
1487 db_dump_intrhand(struct intr_handler *ih)
1488 {
1489 int comma;
1490
1491 db_printf("\t%-10s ", ih->ih_name);
1492 switch (ih->ih_pri) {
1493 case PI_REALTIME:
1494 db_printf("CLK ");
1495 break;
1496 case PI_INTR:
1497 db_printf("INTR");
1498 break;
1499 default:
1500 if (ih->ih_pri >= PI_SOFT)
1501 db_printf("SWI ");
1502 else
1503 db_printf("%4u", ih->ih_pri);
1504 break;
1505 }
1506 db_printf(" ");
1507 if (ih->ih_filter != NULL) {
1508 db_printf("[F]");
1509 db_printsym((uintptr_t)ih->ih_filter, DB_STGY_PROC);
1510 }
1511 if (ih->ih_handler != NULL) {
1512 if (ih->ih_filter != NULL)
1513 db_printf(",");
1514 db_printf("[H]");
1515 db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC);
1516 }
1517 db_printf("(%p)", ih->ih_argument);
1518 if (ih->ih_need ||
1519 (ih->ih_flags & (IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD |
1520 IH_MPSAFE)) != 0) {
1521 db_printf(" {");
1522 comma = 0;
1523 if (ih->ih_flags & IH_EXCLUSIVE) {
1524 if (comma)
1525 db_printf(", ");
1526 db_printf("EXCL");
1527 comma = 1;
1528 }
1529 if (ih->ih_flags & IH_ENTROPY) {
1530 if (comma)
1531 db_printf(", ");
1532 db_printf("ENTROPY");
1533 comma = 1;
1534 }
1535 if (ih->ih_flags & IH_DEAD) {
1536 if (comma)
1537 db_printf(", ");
1538 db_printf("DEAD");
1539 comma = 1;
1540 }
1541 if (ih->ih_flags & IH_MPSAFE) {
1542 if (comma)
1543 db_printf(", ");
1544 db_printf("MPSAFE");
1545 comma = 1;
1546 }
1547 if (ih->ih_need) {
1548 if (comma)
1549 db_printf(", ");
1550 db_printf("NEED");
1551 }
1552 db_printf("}");
1553 }
1554 db_printf("\n");
1555 }
1556
1557 /*
1558 * Dump details about a event.
1559 */
1560 void
1561 db_dump_intr_event(struct intr_event *ie, int handlers)
1562 {
1563 struct intr_handler *ih;
1564 struct intr_thread *it;
1565 int comma;
1566
1567 db_printf("%s ", ie->ie_fullname);
1568 it = ie->ie_thread;
1569 if (it != NULL)
1570 db_printf("(pid %d)", it->it_thread->td_proc->p_pid);
1571 else
1572 db_printf("(no thread)");
1573 if ((ie->ie_flags & (IE_SOFT | IE_ADDING_THREAD)) != 0 ||
1574 (it != NULL && it->it_need)) {
1575 db_printf(" {");
1576 comma = 0;
1577 if (ie->ie_flags & IE_SOFT) {
1578 db_printf("SOFT");
1579 comma = 1;
1580 }
1581 if (ie->ie_flags & IE_ADDING_THREAD) {
1582 if (comma)
1583 db_printf(", ");
1584 db_printf("ADDING_THREAD");
1585 comma = 1;
1586 }
1587 if (it != NULL && it->it_need) {
1588 if (comma)
1589 db_printf(", ");
1590 db_printf("NEED");
1591 }
1592 db_printf("}");
1593 }
1594 db_printf("\n");
1595
1596 if (handlers)
1597 CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next)
1598 db_dump_intrhand(ih);
1599 }
1600
1601 /*
1602 * Dump data about interrupt handlers
1603 */
1604 DB_SHOW_COMMAND_FLAGS(intr, db_show_intr, DB_CMD_MEMSAFE)
1605 {
1606 struct intr_event *ie;
1607 int all, verbose;
1608
1609 verbose = strchr(modif, 'v') != NULL;
1610 all = strchr(modif, 'a') != NULL;
1611 TAILQ_FOREACH(ie, &event_list, ie_list) {
1612 if (!all && CK_SLIST_EMPTY(&ie->ie_handlers))
1613 continue;
1614 db_dump_intr_event(ie, verbose);
1615 if (db_pager_quit)
1616 break;
1617 }
1618 }
1619 #endif /* DDB */
1620
1621 /*
1622 * Start standard software interrupt threads
1623 */
1624 static void
1625 start_softintr(void *dummy)
1626 {
1627
1628 if (swi_add(&clk_intr_event, "clk", NULL, NULL, SWI_CLOCK,
1629 INTR_MPSAFE, NULL))
1630 panic("died while creating clk swi ithread");
1631 }
1632 SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr,
1633 NULL);
1634
1635 /*
1636 * Sysctls used by systat and others: hw.intrnames and hw.intrcnt.
1637 * The data for this machine dependent, and the declarations are in machine
1638 * dependent code. The layout of intrnames and intrcnt however is machine
1639 * independent.
1640 *
1641 * We do not know the length of intrcnt and intrnames at compile time, so
1642 * calculate things at run time.
1643 */
1644 static int
1645 sysctl_intrnames(SYSCTL_HANDLER_ARGS)
1646 {
1647 return (sysctl_handle_opaque(oidp, intrnames, sintrnames, req));
1648 }
1649
1650 SYSCTL_PROC(_hw, OID_AUTO, intrnames,
1651 CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1652 sysctl_intrnames, "",
1653 "Interrupt Names");
1654
1655 static int
1656 sysctl_intrcnt(SYSCTL_HANDLER_ARGS)
1657 {
1658 #ifdef SCTL_MASK32
1659 uint32_t *intrcnt32;
1660 unsigned i;
1661 int error;
1662
1663 if (req->flags & SCTL_MASK32) {
1664 if (!req->oldptr)
1665 return (sysctl_handle_opaque(oidp, NULL, sintrcnt / 2, req));
1666 intrcnt32 = malloc(sintrcnt / 2, M_TEMP, M_NOWAIT);
1667 if (intrcnt32 == NULL)
1668 return (ENOMEM);
1669 for (i = 0; i < sintrcnt / sizeof (u_long); i++)
1670 intrcnt32[i] = intrcnt[i];
1671 error = sysctl_handle_opaque(oidp, intrcnt32, sintrcnt / 2, req);
1672 free(intrcnt32, M_TEMP);
1673 return (error);
1674 }
1675 #endif
1676 return (sysctl_handle_opaque(oidp, intrcnt, sintrcnt, req));
1677 }
1678
1679 SYSCTL_PROC(_hw, OID_AUTO, intrcnt,
1680 CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
1681 sysctl_intrcnt, "",
1682 "Interrupt Counts");
1683
1684 #ifdef DDB
1685 /*
1686 * DDB command to dump the interrupt statistics.
1687 */
1688 DB_SHOW_COMMAND_FLAGS(intrcnt, db_show_intrcnt, DB_CMD_MEMSAFE)
1689 {
1690 u_long *i;
1691 char *cp;
1692 u_int j;
1693
1694 cp = intrnames;
1695 j = 0;
1696 for (i = intrcnt; j < (sintrcnt / sizeof(u_long)) && !db_pager_quit;
1697 i++, j++) {
1698 if (*cp == '\0')
1699 break;
1700 if (*i != 0)
1701 db_printf("%s\t%lu\n", cp, *i);
1702 cp += strlen(cp) + 1;
1703 }
1704 }
1705 #endif
Cache object: 585e585177fb6bcd1f844b9749a1fba8
|