The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_intr.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice unmodified, this list of conditions, and the following
   10  *    disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  *
   15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   25  */
   26 
   27 #include <sys/cdefs.h>
   28 __FBSDID("$FreeBSD: releng/11.2/sys/kern/kern_intr.c 333338 2018-05-07 21:42:22Z shurd $");
   29 
   30 #include "opt_ddb.h"
   31 #include "opt_kstack_usage_prof.h"
   32 
   33 #include <sys/param.h>
   34 #include <sys/bus.h>
   35 #include <sys/conf.h>
   36 #include <sys/cpuset.h>
   37 #include <sys/rtprio.h>
   38 #include <sys/systm.h>
   39 #include <sys/interrupt.h>
   40 #include <sys/kernel.h>
   41 #include <sys/kthread.h>
   42 #include <sys/ktr.h>
   43 #include <sys/limits.h>
   44 #include <sys/lock.h>
   45 #include <sys/malloc.h>
   46 #include <sys/mutex.h>
   47 #include <sys/priv.h>
   48 #include <sys/proc.h>
   49 #include <sys/random.h>
   50 #include <sys/resourcevar.h>
   51 #include <sys/sched.h>
   52 #include <sys/smp.h>
   53 #include <sys/sysctl.h>
   54 #include <sys/syslog.h>
   55 #include <sys/unistd.h>
   56 #include <sys/vmmeter.h>
   57 #include <machine/atomic.h>
   58 #include <machine/cpu.h>
   59 #include <machine/md_var.h>
   60 #include <machine/stdarg.h>
   61 #ifdef DDB
   62 #include <ddb/ddb.h>
   63 #include <ddb/db_sym.h>
   64 #endif
   65 
   66 /*
   67  * Describe an interrupt thread.  There is one of these per interrupt event.
   68  */
   69 struct intr_thread {
   70         struct intr_event *it_event;
   71         struct thread *it_thread;       /* Kernel thread. */
   72         int     it_flags;               /* (j) IT_* flags. */
   73         int     it_need;                /* Needs service. */
   74 };
   75 
   76 /* Interrupt thread flags kept in it_flags */
   77 #define IT_DEAD         0x000001        /* Thread is waiting to exit. */
   78 #define IT_WAIT         0x000002        /* Thread is waiting for completion. */
   79 
   80 struct  intr_entropy {
   81         struct  thread *td;
   82         uintptr_t event;
   83 };
   84 
   85 struct  intr_event *clk_intr_event;
   86 struct  intr_event *tty_intr_event;
   87 void    *vm_ih;
   88 struct proc *intrproc;
   89 
   90 static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads");
   91 
   92 static int intr_storm_threshold = 1000;
   93 SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RWTUN,
   94     &intr_storm_threshold, 0,
   95     "Number of consecutive interrupts before storm protection is enabled");
   96 static TAILQ_HEAD(, intr_event) event_list =
   97     TAILQ_HEAD_INITIALIZER(event_list);
   98 static struct mtx event_lock;
   99 MTX_SYSINIT(intr_event_list, &event_lock, "intr event list", MTX_DEF);
  100 
  101 static void     intr_event_update(struct intr_event *ie);
  102 #ifdef INTR_FILTER
  103 static int      intr_event_schedule_thread(struct intr_event *ie,
  104                     struct intr_thread *ithd);
  105 static int      intr_filter_loop(struct intr_event *ie,
  106                     struct trapframe *frame, struct intr_thread **ithd);
  107 static struct intr_thread *ithread_create(const char *name,
  108                               struct intr_handler *ih);
  109 #else
  110 static int      intr_event_schedule_thread(struct intr_event *ie);
  111 static struct intr_thread *ithread_create(const char *name);
  112 #endif
  113 static void     ithread_destroy(struct intr_thread *ithread);
  114 static void     ithread_execute_handlers(struct proc *p, 
  115                     struct intr_event *ie);
  116 #ifdef INTR_FILTER
  117 static void     priv_ithread_execute_handler(struct proc *p, 
  118                     struct intr_handler *ih);
  119 #endif
  120 static void     ithread_loop(void *);
  121 static void     ithread_update(struct intr_thread *ithd);
  122 static void     start_softintr(void *);
  123 
  124 /* Map an interrupt type to an ithread priority. */
  125 u_char
  126 intr_priority(enum intr_type flags)
  127 {
  128         u_char pri;
  129 
  130         flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET |
  131             INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV);
  132         switch (flags) {
  133         case INTR_TYPE_TTY:
  134                 pri = PI_TTY;
  135                 break;
  136         case INTR_TYPE_BIO:
  137                 pri = PI_DISK;
  138                 break;
  139         case INTR_TYPE_NET:
  140                 pri = PI_NET;
  141                 break;
  142         case INTR_TYPE_CAM:
  143                 pri = PI_DISK;
  144                 break;
  145         case INTR_TYPE_AV:
  146                 pri = PI_AV;
  147                 break;
  148         case INTR_TYPE_CLK:
  149                 pri = PI_REALTIME;
  150                 break;
  151         case INTR_TYPE_MISC:
  152                 pri = PI_DULL;          /* don't care */
  153                 break;
  154         default:
  155                 /* We didn't specify an interrupt level. */
  156                 panic("intr_priority: no interrupt type in flags");
  157         }
  158 
  159         return pri;
  160 }
  161 
  162 /*
  163  * Update an ithread based on the associated intr_event.
  164  */
  165 static void
  166 ithread_update(struct intr_thread *ithd)
  167 {
  168         struct intr_event *ie;
  169         struct thread *td;
  170         u_char pri;
  171 
  172         ie = ithd->it_event;
  173         td = ithd->it_thread;
  174 
  175         /* Determine the overall priority of this event. */
  176         if (TAILQ_EMPTY(&ie->ie_handlers))
  177                 pri = PRI_MAX_ITHD;
  178         else
  179                 pri = TAILQ_FIRST(&ie->ie_handlers)->ih_pri;
  180 
  181         /* Update name and priority. */
  182         strlcpy(td->td_name, ie->ie_fullname, sizeof(td->td_name));
  183 #ifdef KTR
  184         sched_clear_tdname(td);
  185 #endif
  186         thread_lock(td);
  187         sched_prio(td, pri);
  188         thread_unlock(td);
  189 }
  190 
  191 /*
  192  * Regenerate the full name of an interrupt event and update its priority.
  193  */
  194 static void
  195 intr_event_update(struct intr_event *ie)
  196 {
  197         struct intr_handler *ih;
  198         char *last;
  199         int missed, space;
  200 
  201         /* Start off with no entropy and just the name of the event. */
  202         mtx_assert(&ie->ie_lock, MA_OWNED);
  203         strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname));
  204         ie->ie_flags &= ~IE_ENTROPY;
  205         missed = 0;
  206         space = 1;
  207 
  208         /* Run through all the handlers updating values. */
  209         TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
  210                 if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 <
  211                     sizeof(ie->ie_fullname)) {
  212                         strcat(ie->ie_fullname, " ");
  213                         strcat(ie->ie_fullname, ih->ih_name);
  214                         space = 0;
  215                 } else
  216                         missed++;
  217                 if (ih->ih_flags & IH_ENTROPY)
  218                         ie->ie_flags |= IE_ENTROPY;
  219         }
  220 
  221         /*
  222          * If the handler names were too long, add +'s to indicate missing
  223          * names. If we run out of room and still have +'s to add, change
  224          * the last character from a + to a *.
  225          */
  226         last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2];
  227         while (missed-- > 0) {
  228                 if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) {
  229                         if (*last == '+') {
  230                                 *last = '*';
  231                                 break;
  232                         } else
  233                                 *last = '+';
  234                 } else if (space) {
  235                         strcat(ie->ie_fullname, " +");
  236                         space = 0;
  237                 } else
  238                         strcat(ie->ie_fullname, "+");
  239         }
  240 
  241         /*
  242          * If this event has an ithread, update it's priority and
  243          * name.
  244          */
  245         if (ie->ie_thread != NULL)
  246                 ithread_update(ie->ie_thread);
  247         CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname);
  248 }
  249 
  250 int
  251 intr_event_create(struct intr_event **event, void *source, int flags, int irq,
  252     void (*pre_ithread)(void *), void (*post_ithread)(void *),
  253     void (*post_filter)(void *), int (*assign_cpu)(void *, int),
  254     const char *fmt, ...)
  255 {
  256         struct intr_event *ie;
  257         va_list ap;
  258 
  259         /* The only valid flag during creation is IE_SOFT. */
  260         if ((flags & ~IE_SOFT) != 0)
  261                 return (EINVAL);
  262         ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO);
  263         ie->ie_source = source;
  264         ie->ie_pre_ithread = pre_ithread;
  265         ie->ie_post_ithread = post_ithread;
  266         ie->ie_post_filter = post_filter;
  267         ie->ie_assign_cpu = assign_cpu;
  268         ie->ie_flags = flags;
  269         ie->ie_irq = irq;
  270         ie->ie_cpu = NOCPU;
  271         TAILQ_INIT(&ie->ie_handlers);
  272         mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF);
  273 
  274         va_start(ap, fmt);
  275         vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap);
  276         va_end(ap);
  277         strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname));
  278         mtx_lock(&event_lock);
  279         TAILQ_INSERT_TAIL(&event_list, ie, ie_list);
  280         mtx_unlock(&event_lock);
  281         if (event != NULL)
  282                 *event = ie;
  283         CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name);
  284         return (0);
  285 }
  286 
  287 /*
  288  * Bind an interrupt event to the specified CPU.  Note that not all
  289  * platforms support binding an interrupt to a CPU.  For those
  290  * platforms this request will fail.  Using a cpu id of NOCPU unbinds
  291  * the interrupt event.
  292  */
  293 static int
  294 _intr_event_bind(struct intr_event *ie, int cpu, bool bindirq, bool bindithread)
  295 {
  296         lwpid_t id;
  297         int error;
  298 
  299         /* Need a CPU to bind to. */
  300         if (cpu != NOCPU && CPU_ABSENT(cpu))
  301                 return (EINVAL);
  302 
  303         if (ie->ie_assign_cpu == NULL)
  304                 return (EOPNOTSUPP);
  305 
  306         error = priv_check(curthread, PRIV_SCHED_CPUSET_INTR);
  307         if (error)
  308                 return (error);
  309 
  310         /*
  311          * If we have any ithreads try to set their mask first to verify
  312          * permissions, etc.
  313          */
  314         if (bindithread) {
  315                 mtx_lock(&ie->ie_lock);
  316                 if (ie->ie_thread != NULL) {
  317                         id = ie->ie_thread->it_thread->td_tid;
  318                         mtx_unlock(&ie->ie_lock);
  319                         error = cpuset_setithread(id, cpu);
  320                         if (error)
  321                                 return (error);
  322                 } else
  323                         mtx_unlock(&ie->ie_lock);
  324         }
  325         if (bindirq)
  326                 error = ie->ie_assign_cpu(ie->ie_source, cpu);
  327         if (error) {
  328                 if (bindithread) {
  329                         mtx_lock(&ie->ie_lock);
  330                         if (ie->ie_thread != NULL) {
  331                                 cpu = ie->ie_cpu;
  332                                 id = ie->ie_thread->it_thread->td_tid;
  333                                 mtx_unlock(&ie->ie_lock);
  334                                 (void)cpuset_setithread(id, cpu);
  335                         } else
  336                                 mtx_unlock(&ie->ie_lock);
  337                 }
  338                 return (error);
  339         }
  340 
  341         if (bindirq) {
  342                 mtx_lock(&ie->ie_lock);
  343                 ie->ie_cpu = cpu;
  344                 mtx_unlock(&ie->ie_lock);
  345         }
  346 
  347         return (error);
  348 }
  349 
  350 /*
  351  * Bind an interrupt event to the specified CPU.  For supported platforms, any
  352  * associated ithreads as well as the primary interrupt context will be bound
  353  * to the specificed CPU.
  354  */
  355 int
  356 intr_event_bind(struct intr_event *ie, int cpu)
  357 {
  358 
  359         return (_intr_event_bind(ie, cpu, true, true));
  360 }
  361 
  362 /*
  363  * Bind an interrupt event to the specified CPU, but do not bind associated
  364  * ithreads.
  365  */
  366 int
  367 intr_event_bind_irqonly(struct intr_event *ie, int cpu)
  368 {
  369 
  370         return (_intr_event_bind(ie, cpu, true, false));
  371 }
  372 
  373 /*
  374  * Bind an interrupt event's ithread to the specified CPU.
  375  */
  376 int
  377 intr_event_bind_ithread(struct intr_event *ie, int cpu)
  378 {
  379 
  380         return (_intr_event_bind(ie, cpu, false, true));
  381 }
  382 
  383 static struct intr_event *
  384 intr_lookup(int irq)
  385 {
  386         struct intr_event *ie;
  387 
  388         mtx_lock(&event_lock);
  389         TAILQ_FOREACH(ie, &event_list, ie_list)
  390                 if (ie->ie_irq == irq &&
  391                     (ie->ie_flags & IE_SOFT) == 0 &&
  392                     TAILQ_FIRST(&ie->ie_handlers) != NULL)
  393                         break;
  394         mtx_unlock(&event_lock);
  395         return (ie);
  396 }
  397 
  398 int
  399 intr_setaffinity(int irq, int mode, void *m)
  400 {
  401         struct intr_event *ie;
  402         cpuset_t *mask;
  403         int cpu, n;
  404 
  405         mask = m;
  406         cpu = NOCPU;
  407         /*
  408          * If we're setting all cpus we can unbind.  Otherwise make sure
  409          * only one cpu is in the set.
  410          */
  411         if (CPU_CMP(cpuset_root, mask)) {
  412                 for (n = 0; n < CPU_SETSIZE; n++) {
  413                         if (!CPU_ISSET(n, mask))
  414                                 continue;
  415                         if (cpu != NOCPU)
  416                                 return (EINVAL);
  417                         cpu = n;
  418                 }
  419         }
  420         ie = intr_lookup(irq);
  421         if (ie == NULL)
  422                 return (ESRCH);
  423         switch (mode) {
  424         case CPU_WHICH_IRQ:
  425                 return (intr_event_bind(ie, cpu));
  426         case CPU_WHICH_INTRHANDLER:
  427                 return (intr_event_bind_irqonly(ie, cpu));
  428         case CPU_WHICH_ITHREAD:
  429                 return (intr_event_bind_ithread(ie, cpu));
  430         default:
  431                 return (EINVAL);
  432         }
  433 }
  434 
  435 int
  436 intr_getaffinity(int irq, int mode, void *m)
  437 {
  438         struct intr_event *ie;
  439         struct thread *td;
  440         struct proc *p;
  441         cpuset_t *mask;
  442         lwpid_t id;
  443         int error;
  444 
  445         mask = m;
  446         ie = intr_lookup(irq);
  447         if (ie == NULL)
  448                 return (ESRCH);
  449 
  450         error = 0;
  451         CPU_ZERO(mask);
  452         switch (mode) {
  453         case CPU_WHICH_IRQ:
  454         case CPU_WHICH_INTRHANDLER:
  455                 mtx_lock(&ie->ie_lock);
  456                 if (ie->ie_cpu == NOCPU)
  457                         CPU_COPY(cpuset_root, mask);
  458                 else
  459                         CPU_SET(ie->ie_cpu, mask);
  460                 mtx_unlock(&ie->ie_lock);
  461                 break;
  462         case CPU_WHICH_ITHREAD:
  463                 mtx_lock(&ie->ie_lock);
  464                 if (ie->ie_thread == NULL) {
  465                         mtx_unlock(&ie->ie_lock);
  466                         CPU_COPY(cpuset_root, mask);
  467                 } else {
  468                         id = ie->ie_thread->it_thread->td_tid;
  469                         mtx_unlock(&ie->ie_lock);
  470                         error = cpuset_which(CPU_WHICH_TID, id, &p, &td, NULL);
  471                         if (error != 0)
  472                                 return (error);
  473                         CPU_COPY(&td->td_cpuset->cs_mask, mask);
  474                         PROC_UNLOCK(p);
  475                 }
  476         default:
  477                 return (EINVAL);
  478         }
  479         return (0);
  480 }
  481 
  482 int
  483 intr_event_destroy(struct intr_event *ie)
  484 {
  485 
  486         mtx_lock(&event_lock);
  487         mtx_lock(&ie->ie_lock);
  488         if (!TAILQ_EMPTY(&ie->ie_handlers)) {
  489                 mtx_unlock(&ie->ie_lock);
  490                 mtx_unlock(&event_lock);
  491                 return (EBUSY);
  492         }
  493         TAILQ_REMOVE(&event_list, ie, ie_list);
  494 #ifndef notyet
  495         if (ie->ie_thread != NULL) {
  496                 ithread_destroy(ie->ie_thread);
  497                 ie->ie_thread = NULL;
  498         }
  499 #endif
  500         mtx_unlock(&ie->ie_lock);
  501         mtx_unlock(&event_lock);
  502         mtx_destroy(&ie->ie_lock);
  503         free(ie, M_ITHREAD);
  504         return (0);
  505 }
  506 
  507 #ifndef INTR_FILTER
  508 static struct intr_thread *
  509 ithread_create(const char *name)
  510 {
  511         struct intr_thread *ithd;
  512         struct thread *td;
  513         int error;
  514 
  515         ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO);
  516 
  517         error = kproc_kthread_add(ithread_loop, ithd, &intrproc,
  518                     &td, RFSTOPPED | RFHIGHPID,
  519                     0, "intr", "%s", name);
  520         if (error)
  521                 panic("kproc_create() failed with %d", error);
  522         thread_lock(td);
  523         sched_class(td, PRI_ITHD);
  524         TD_SET_IWAIT(td);
  525         thread_unlock(td);
  526         td->td_pflags |= TDP_ITHREAD;
  527         ithd->it_thread = td;
  528         CTR2(KTR_INTR, "%s: created %s", __func__, name);
  529         return (ithd);
  530 }
  531 #else
  532 static struct intr_thread *
  533 ithread_create(const char *name, struct intr_handler *ih)
  534 {
  535         struct intr_thread *ithd;
  536         struct thread *td;
  537         int error;
  538 
  539         ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO);
  540 
  541         error = kproc_kthread_add(ithread_loop, ih, &intrproc,
  542                     &td, RFSTOPPED | RFHIGHPID,
  543                     0, "intr", "%s", name);
  544         if (error)
  545                 panic("kproc_create() failed with %d", error);
  546         thread_lock(td);
  547         sched_class(td, PRI_ITHD);
  548         TD_SET_IWAIT(td);
  549         thread_unlock(td);
  550         td->td_pflags |= TDP_ITHREAD;
  551         ithd->it_thread = td;
  552         CTR2(KTR_INTR, "%s: created %s", __func__, name);
  553         return (ithd);
  554 }
  555 #endif
  556 
  557 static void
  558 ithread_destroy(struct intr_thread *ithread)
  559 {
  560         struct thread *td;
  561 
  562         CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_event->ie_name);
  563         td = ithread->it_thread;
  564         thread_lock(td);
  565         ithread->it_flags |= IT_DEAD;
  566         if (TD_AWAITING_INTR(td)) {
  567                 TD_CLR_IWAIT(td);
  568                 sched_add(td, SRQ_INTR);
  569         }
  570         thread_unlock(td);
  571 }
  572 
  573 #ifndef INTR_FILTER
  574 int
  575 intr_event_add_handler(struct intr_event *ie, const char *name,
  576     driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri,
  577     enum intr_type flags, void **cookiep)
  578 {
  579         struct intr_handler *ih, *temp_ih;
  580         struct intr_thread *it;
  581 
  582         if (ie == NULL || name == NULL || (handler == NULL && filter == NULL))
  583                 return (EINVAL);
  584 
  585         /* Allocate and populate an interrupt handler structure. */
  586         ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO);
  587         ih->ih_filter = filter;
  588         ih->ih_handler = handler;
  589         ih->ih_argument = arg;
  590         strlcpy(ih->ih_name, name, sizeof(ih->ih_name));
  591         ih->ih_event = ie;
  592         ih->ih_pri = pri;
  593         if (flags & INTR_EXCL)
  594                 ih->ih_flags = IH_EXCLUSIVE;
  595         if (flags & INTR_MPSAFE)
  596                 ih->ih_flags |= IH_MPSAFE;
  597         if (flags & INTR_ENTROPY)
  598                 ih->ih_flags |= IH_ENTROPY;
  599 
  600         /* We can only have one exclusive handler in a event. */
  601         mtx_lock(&ie->ie_lock);
  602         if (!TAILQ_EMPTY(&ie->ie_handlers)) {
  603                 if ((flags & INTR_EXCL) ||
  604                     (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) {
  605                         mtx_unlock(&ie->ie_lock);
  606                         free(ih, M_ITHREAD);
  607                         return (EINVAL);
  608                 }
  609         }
  610 
  611         /* Create a thread if we need one. */
  612         while (ie->ie_thread == NULL && handler != NULL) {
  613                 if (ie->ie_flags & IE_ADDING_THREAD)
  614                         msleep(ie, &ie->ie_lock, 0, "ithread", 0);
  615                 else {
  616                         ie->ie_flags |= IE_ADDING_THREAD;
  617                         mtx_unlock(&ie->ie_lock);
  618                         it = ithread_create("intr: newborn");
  619                         mtx_lock(&ie->ie_lock);
  620                         ie->ie_flags &= ~IE_ADDING_THREAD;
  621                         ie->ie_thread = it;
  622                         it->it_event = ie;
  623                         ithread_update(it);
  624                         wakeup(ie);
  625                 }
  626         }
  627 
  628         /* Add the new handler to the event in priority order. */
  629         TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) {
  630                 if (temp_ih->ih_pri > ih->ih_pri)
  631                         break;
  632         }
  633         if (temp_ih == NULL)
  634                 TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next);
  635         else
  636                 TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next);
  637         intr_event_update(ie);
  638 
  639         CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name,
  640             ie->ie_name);
  641         mtx_unlock(&ie->ie_lock);
  642 
  643         if (cookiep != NULL)
  644                 *cookiep = ih;
  645         return (0);
  646 }
  647 #else
  648 int
  649 intr_event_add_handler(struct intr_event *ie, const char *name,
  650     driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri,
  651     enum intr_type flags, void **cookiep)
  652 {
  653         struct intr_handler *ih, *temp_ih;
  654         struct intr_thread *it;
  655 
  656         if (ie == NULL || name == NULL || (handler == NULL && filter == NULL))
  657                 return (EINVAL);
  658 
  659         /* Allocate and populate an interrupt handler structure. */
  660         ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO);
  661         ih->ih_filter = filter;
  662         ih->ih_handler = handler;
  663         ih->ih_argument = arg;
  664         strlcpy(ih->ih_name, name, sizeof(ih->ih_name));
  665         ih->ih_event = ie;
  666         ih->ih_pri = pri;
  667         if (flags & INTR_EXCL)
  668                 ih->ih_flags = IH_EXCLUSIVE;
  669         if (flags & INTR_MPSAFE)
  670                 ih->ih_flags |= IH_MPSAFE;
  671         if (flags & INTR_ENTROPY)
  672                 ih->ih_flags |= IH_ENTROPY;
  673 
  674         /* We can only have one exclusive handler in a event. */
  675         mtx_lock(&ie->ie_lock);
  676         if (!TAILQ_EMPTY(&ie->ie_handlers)) {
  677                 if ((flags & INTR_EXCL) ||
  678                     (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) {
  679                         mtx_unlock(&ie->ie_lock);
  680                         free(ih, M_ITHREAD);
  681                         return (EINVAL);
  682                 }
  683         }
  684 
  685         /* For filtered handlers, create a private ithread to run on. */
  686         if (filter != NULL && handler != NULL) {
  687                 mtx_unlock(&ie->ie_lock);
  688                 it = ithread_create("intr: newborn", ih);
  689                 mtx_lock(&ie->ie_lock);
  690                 it->it_event = ie;
  691                 ih->ih_thread = it;
  692                 ithread_update(it); /* XXX - do we really need this?!?!? */
  693         } else { /* Create the global per-event thread if we need one. */
  694                 while (ie->ie_thread == NULL && handler != NULL) {
  695                         if (ie->ie_flags & IE_ADDING_THREAD)
  696                                 msleep(ie, &ie->ie_lock, 0, "ithread", 0);
  697                         else {
  698                                 ie->ie_flags |= IE_ADDING_THREAD;
  699                                 mtx_unlock(&ie->ie_lock);
  700                                 it = ithread_create("intr: newborn", ih);
  701                                 mtx_lock(&ie->ie_lock);
  702                                 ie->ie_flags &= ~IE_ADDING_THREAD;
  703                                 ie->ie_thread = it;
  704                                 it->it_event = ie;
  705                                 ithread_update(it);
  706                                 wakeup(ie);
  707                         }
  708                 }
  709         }
  710 
  711         /* Add the new handler to the event in priority order. */
  712         TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) {
  713                 if (temp_ih->ih_pri > ih->ih_pri)
  714                         break;
  715         }
  716         if (temp_ih == NULL)
  717                 TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next);
  718         else
  719                 TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next);
  720         intr_event_update(ie);
  721 
  722         CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name,
  723             ie->ie_name);
  724         mtx_unlock(&ie->ie_lock);
  725 
  726         if (cookiep != NULL)
  727                 *cookiep = ih;
  728         return (0);
  729 }
  730 #endif
  731 
  732 /*
  733  * Append a description preceded by a ':' to the name of the specified
  734  * interrupt handler.
  735  */
  736 int
  737 intr_event_describe_handler(struct intr_event *ie, void *cookie,
  738     const char *descr)
  739 {
  740         struct intr_handler *ih;
  741         size_t space;
  742         char *start;
  743 
  744         mtx_lock(&ie->ie_lock);
  745 #ifdef INVARIANTS
  746         TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
  747                 if (ih == cookie)
  748                         break;
  749         }
  750         if (ih == NULL) {
  751                 mtx_unlock(&ie->ie_lock);
  752                 panic("handler %p not found in interrupt event %p", cookie, ie);
  753         }
  754 #endif
  755         ih = cookie;
  756 
  757         /*
  758          * Look for an existing description by checking for an
  759          * existing ":".  This assumes device names do not include
  760          * colons.  If one is found, prepare to insert the new
  761          * description at that point.  If one is not found, find the
  762          * end of the name to use as the insertion point.
  763          */
  764         start = strchr(ih->ih_name, ':');
  765         if (start == NULL)
  766                 start = strchr(ih->ih_name, 0);
  767 
  768         /*
  769          * See if there is enough remaining room in the string for the
  770          * description + ":".  The "- 1" leaves room for the trailing
  771          * '\0'.  The "+ 1" accounts for the colon.
  772          */
  773         space = sizeof(ih->ih_name) - (start - ih->ih_name) - 1;
  774         if (strlen(descr) + 1 > space) {
  775                 mtx_unlock(&ie->ie_lock);
  776                 return (ENOSPC);
  777         }
  778 
  779         /* Append a colon followed by the description. */
  780         *start = ':';
  781         strcpy(start + 1, descr);
  782         intr_event_update(ie);
  783         mtx_unlock(&ie->ie_lock);
  784         return (0);
  785 }
  786 
  787 /*
  788  * Return the ie_source field from the intr_event an intr_handler is
  789  * associated with.
  790  */
  791 void *
  792 intr_handler_source(void *cookie)
  793 {
  794         struct intr_handler *ih;
  795         struct intr_event *ie;
  796 
  797         ih = (struct intr_handler *)cookie;
  798         if (ih == NULL)
  799                 return (NULL);
  800         ie = ih->ih_event;
  801         KASSERT(ie != NULL,
  802             ("interrupt handler \"%s\" has a NULL interrupt event",
  803             ih->ih_name));
  804         return (ie->ie_source);
  805 }
  806 
  807 /*
  808  * Sleep until an ithread finishes executing an interrupt handler.
  809  *
  810  * XXX Doesn't currently handle interrupt filters or fast interrupt
  811  * handlers.  This is intended for compatibility with linux drivers
  812  * only.  Do not use in BSD code.
  813  */
  814 void
  815 _intr_drain(int irq)
  816 {
  817         struct intr_event *ie;
  818         struct intr_thread *ithd;
  819         struct thread *td;
  820 
  821         ie = intr_lookup(irq);
  822         if (ie == NULL)
  823                 return;
  824         if (ie->ie_thread == NULL)
  825                 return;
  826         ithd = ie->ie_thread;
  827         td = ithd->it_thread;
  828         /*
  829          * We set the flag and wait for it to be cleared to avoid
  830          * long delays with potentially busy interrupt handlers
  831          * were we to only sample TD_AWAITING_INTR() every tick.
  832          */
  833         thread_lock(td);
  834         if (!TD_AWAITING_INTR(td)) {
  835                 ithd->it_flags |= IT_WAIT;
  836                 while (ithd->it_flags & IT_WAIT) {
  837                         thread_unlock(td);
  838                         pause("idrain", 1);
  839                         thread_lock(td);
  840                 }
  841         }
  842         thread_unlock(td);
  843         return;
  844 }
  845 
  846 
  847 #ifndef INTR_FILTER
  848 int
  849 intr_event_remove_handler(void *cookie)
  850 {
  851         struct intr_handler *handler = (struct intr_handler *)cookie;
  852         struct intr_event *ie;
  853 #ifdef INVARIANTS
  854         struct intr_handler *ih;
  855 #endif
  856 #ifdef notyet
  857         int dead;
  858 #endif
  859 
  860         if (handler == NULL)
  861                 return (EINVAL);
  862         ie = handler->ih_event;
  863         KASSERT(ie != NULL,
  864             ("interrupt handler \"%s\" has a NULL interrupt event",
  865             handler->ih_name));
  866         mtx_lock(&ie->ie_lock);
  867         CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name,
  868             ie->ie_name);
  869 #ifdef INVARIANTS
  870         TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
  871                 if (ih == handler)
  872                         goto ok;
  873         mtx_unlock(&ie->ie_lock);
  874         panic("interrupt handler \"%s\" not found in interrupt event \"%s\"",
  875             ih->ih_name, ie->ie_name);
  876 ok:
  877 #endif
  878         /*
  879          * If there is no ithread, then just remove the handler and return.
  880          * XXX: Note that an INTR_FAST handler might be running on another
  881          * CPU!
  882          */
  883         if (ie->ie_thread == NULL) {
  884                 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
  885                 mtx_unlock(&ie->ie_lock);
  886                 free(handler, M_ITHREAD);
  887                 return (0);
  888         }
  889 
  890         /*
  891          * If the interrupt thread is already running, then just mark this
  892          * handler as being dead and let the ithread do the actual removal.
  893          *
  894          * During a cold boot while cold is set, msleep() does not sleep,
  895          * so we have to remove the handler here rather than letting the
  896          * thread do it.
  897          */
  898         thread_lock(ie->ie_thread->it_thread);
  899         if (!TD_AWAITING_INTR(ie->ie_thread->it_thread) && !cold) {
  900                 handler->ih_flags |= IH_DEAD;
  901 
  902                 /*
  903                  * Ensure that the thread will process the handler list
  904                  * again and remove this handler if it has already passed
  905                  * it on the list.
  906                  *
  907                  * The release part of the following store ensures
  908                  * that the update of ih_flags is ordered before the
  909                  * it_need setting.  See the comment before
  910                  * atomic_cmpset_acq(&ithd->it_need, ...) operation in
  911                  * the ithread_execute_handlers().
  912                  */
  913                 atomic_store_rel_int(&ie->ie_thread->it_need, 1);
  914         } else
  915                 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
  916         thread_unlock(ie->ie_thread->it_thread);
  917         while (handler->ih_flags & IH_DEAD)
  918                 msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0);
  919         intr_event_update(ie);
  920 #ifdef notyet
  921         /*
  922          * XXX: This could be bad in the case of ppbus(8).  Also, I think
  923          * this could lead to races of stale data when servicing an
  924          * interrupt.
  925          */
  926         dead = 1;
  927         TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
  928                 if (!(ih->ih_flags & IH_FAST)) {
  929                         dead = 0;
  930                         break;
  931                 }
  932         }
  933         if (dead) {
  934                 ithread_destroy(ie->ie_thread);
  935                 ie->ie_thread = NULL;
  936         }
  937 #endif
  938         mtx_unlock(&ie->ie_lock);
  939         free(handler, M_ITHREAD);
  940         return (0);
  941 }
  942 
  943 static int
  944 intr_event_schedule_thread(struct intr_event *ie)
  945 {
  946         struct intr_entropy entropy;
  947         struct intr_thread *it;
  948         struct thread *td;
  949         struct thread *ctd;
  950         struct proc *p;
  951 
  952         /*
  953          * If no ithread or no handlers, then we have a stray interrupt.
  954          */
  955         if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) ||
  956             ie->ie_thread == NULL)
  957                 return (EINVAL);
  958 
  959         ctd = curthread;
  960         it = ie->ie_thread;
  961         td = it->it_thread;
  962         p = td->td_proc;
  963 
  964         /*
  965          * If any of the handlers for this ithread claim to be good
  966          * sources of entropy, then gather some.
  967          */
  968         if (ie->ie_flags & IE_ENTROPY) {
  969                 entropy.event = (uintptr_t)ie;
  970                 entropy.td = ctd;
  971                 random_harvest_queue(&entropy, sizeof(entropy), 2, RANDOM_INTERRUPT);
  972         }
  973 
  974         KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name));
  975 
  976         /*
  977          * Set it_need to tell the thread to keep running if it is already
  978          * running.  Then, lock the thread and see if we actually need to
  979          * put it on the runqueue.
  980          *
  981          * Use store_rel to arrange that the store to ih_need in
  982          * swi_sched() is before the store to it_need and prepare for
  983          * transfer of this order to loads in the ithread.
  984          */
  985         atomic_store_rel_int(&it->it_need, 1);
  986         thread_lock(td);
  987         if (TD_AWAITING_INTR(td)) {
  988                 CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid,
  989                     td->td_name);
  990                 TD_CLR_IWAIT(td);
  991                 sched_add(td, SRQ_INTR);
  992         } else {
  993                 CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d",
  994                     __func__, p->p_pid, td->td_name, it->it_need, td->td_state);
  995         }
  996         thread_unlock(td);
  997 
  998         return (0);
  999 }
 1000 #else
 1001 int
 1002 intr_event_remove_handler(void *cookie)
 1003 {
 1004         struct intr_handler *handler = (struct intr_handler *)cookie;
 1005         struct intr_event *ie;
 1006         struct intr_thread *it;
 1007 #ifdef INVARIANTS
 1008         struct intr_handler *ih;
 1009 #endif
 1010 #ifdef notyet
 1011         int dead;
 1012 #endif
 1013 
 1014         if (handler == NULL)
 1015                 return (EINVAL);
 1016         ie = handler->ih_event;
 1017         KASSERT(ie != NULL,
 1018             ("interrupt handler \"%s\" has a NULL interrupt event",
 1019             handler->ih_name));
 1020         mtx_lock(&ie->ie_lock);
 1021         CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name,
 1022             ie->ie_name);
 1023 #ifdef INVARIANTS
 1024         TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
 1025                 if (ih == handler)
 1026                         goto ok;
 1027         mtx_unlock(&ie->ie_lock);
 1028         panic("interrupt handler \"%s\" not found in interrupt event \"%s\"",
 1029             ih->ih_name, ie->ie_name);
 1030 ok:
 1031 #endif
 1032         /*
 1033          * If there are no ithreads (per event and per handler), then
 1034          * just remove the handler and return.  
 1035          * XXX: Note that an INTR_FAST handler might be running on another CPU!
 1036          */
 1037         if (ie->ie_thread == NULL && handler->ih_thread == NULL) {
 1038                 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
 1039                 mtx_unlock(&ie->ie_lock);
 1040                 free(handler, M_ITHREAD);
 1041                 return (0);
 1042         }
 1043 
 1044         /* Private or global ithread? */
 1045         it = (handler->ih_thread) ? handler->ih_thread : ie->ie_thread;
 1046         /*
 1047          * If the interrupt thread is already running, then just mark this
 1048          * handler as being dead and let the ithread do the actual removal.
 1049          *
 1050          * During a cold boot while cold is set, msleep() does not sleep,
 1051          * so we have to remove the handler here rather than letting the
 1052          * thread do it.
 1053          */
 1054         thread_lock(it->it_thread);
 1055         if (!TD_AWAITING_INTR(it->it_thread) && !cold) {
 1056                 handler->ih_flags |= IH_DEAD;
 1057 
 1058                 /*
 1059                  * Ensure that the thread will process the handler list
 1060                  * again and remove this handler if it has already passed
 1061                  * it on the list.
 1062                  *
 1063                  * The release part of the following store ensures
 1064                  * that the update of ih_flags is ordered before the
 1065                  * it_need setting.  See the comment before
 1066                  * atomic_cmpset_acq(&ithd->it_need, ...) operation in
 1067                  * the ithread_execute_handlers().
 1068                  */
 1069                 atomic_store_rel_int(&it->it_need, 1);
 1070         } else
 1071                 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
 1072         thread_unlock(it->it_thread);
 1073         while (handler->ih_flags & IH_DEAD)
 1074                 msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0);
 1075         /* 
 1076          * At this point, the handler has been disconnected from the event,
 1077          * so we can kill the private ithread if any.
 1078          */
 1079         if (handler->ih_thread) {
 1080                 ithread_destroy(handler->ih_thread);
 1081                 handler->ih_thread = NULL;
 1082         }
 1083         intr_event_update(ie);
 1084 #ifdef notyet
 1085         /*
 1086          * XXX: This could be bad in the case of ppbus(8).  Also, I think
 1087          * this could lead to races of stale data when servicing an
 1088          * interrupt.
 1089          */
 1090         dead = 1;
 1091         TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
 1092                 if (handler != NULL) {
 1093                         dead = 0;
 1094                         break;
 1095                 }
 1096         }
 1097         if (dead) {
 1098                 ithread_destroy(ie->ie_thread);
 1099                 ie->ie_thread = NULL;
 1100         }
 1101 #endif
 1102         mtx_unlock(&ie->ie_lock);
 1103         free(handler, M_ITHREAD);
 1104         return (0);
 1105 }
 1106 
 1107 static int
 1108 intr_event_schedule_thread(struct intr_event *ie, struct intr_thread *it)
 1109 {
 1110         struct intr_entropy entropy;
 1111         struct thread *td;
 1112         struct thread *ctd;
 1113         struct proc *p;
 1114 
 1115         /*
 1116          * If no ithread or no handlers, then we have a stray interrupt.
 1117          */
 1118         if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || it == NULL)
 1119                 return (EINVAL);
 1120 
 1121         ctd = curthread;
 1122         td = it->it_thread;
 1123         p = td->td_proc;
 1124 
 1125         /*
 1126          * If any of the handlers for this ithread claim to be good
 1127          * sources of entropy, then gather some.
 1128          */
 1129         if (ie->ie_flags & IE_ENTROPY) {
 1130                 entropy.event = (uintptr_t)ie;
 1131                 entropy.td = ctd;
 1132                 random_harvest_queue(&entropy, sizeof(entropy), 2, RANDOM_INTERRUPT);
 1133         }
 1134 
 1135         KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name));
 1136 
 1137         /*
 1138          * Set it_need to tell the thread to keep running if it is already
 1139          * running.  Then, lock the thread and see if we actually need to
 1140          * put it on the runqueue.
 1141          *
 1142          * Use store_rel to arrange that the store to ih_need in
 1143          * swi_sched() is before the store to it_need and prepare for
 1144          * transfer of this order to loads in the ithread.
 1145          */
 1146         atomic_store_rel_int(&it->it_need, 1);
 1147         thread_lock(td);
 1148         if (TD_AWAITING_INTR(td)) {
 1149                 CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid,
 1150                     td->td_name);
 1151                 TD_CLR_IWAIT(td);
 1152                 sched_add(td, SRQ_INTR);
 1153         } else {
 1154                 CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d",
 1155                     __func__, p->p_pid, td->td_name, it->it_need, td->td_state);
 1156         }
 1157         thread_unlock(td);
 1158 
 1159         return (0);
 1160 }
 1161 #endif
 1162 
 1163 /*
 1164  * Allow interrupt event binding for software interrupt handlers -- a no-op,
 1165  * since interrupts are generated in software rather than being directed by
 1166  * a PIC.
 1167  */
 1168 static int
 1169 swi_assign_cpu(void *arg, int cpu)
 1170 {
 1171 
 1172         return (0);
 1173 }
 1174 
 1175 /*
 1176  * Add a software interrupt handler to a specified event.  If a given event
 1177  * is not specified, then a new event is created.
 1178  */
 1179 int
 1180 swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler,
 1181             void *arg, int pri, enum intr_type flags, void **cookiep)
 1182 {
 1183         struct intr_event *ie;
 1184         int error;
 1185 
 1186         if (flags & INTR_ENTROPY)
 1187                 return (EINVAL);
 1188 
 1189         ie = (eventp != NULL) ? *eventp : NULL;
 1190 
 1191         if (ie != NULL) {
 1192                 if (!(ie->ie_flags & IE_SOFT))
 1193                         return (EINVAL);
 1194         } else {
 1195                 error = intr_event_create(&ie, NULL, IE_SOFT, 0,
 1196                     NULL, NULL, NULL, swi_assign_cpu, "swi%d:", pri);
 1197                 if (error)
 1198                         return (error);
 1199                 if (eventp != NULL)
 1200                         *eventp = ie;
 1201         }
 1202         error = intr_event_add_handler(ie, name, NULL, handler, arg,
 1203             PI_SWI(pri), flags, cookiep);
 1204         return (error);
 1205 }
 1206 
 1207 /*
 1208  * Schedule a software interrupt thread.
 1209  */
 1210 void
 1211 swi_sched(void *cookie, int flags)
 1212 {
 1213         struct intr_handler *ih = (struct intr_handler *)cookie;
 1214         struct intr_event *ie = ih->ih_event;
 1215         struct intr_entropy entropy;
 1216         int error;
 1217 
 1218         CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name,
 1219             ih->ih_need);
 1220 
 1221         entropy.event = (uintptr_t)ih;
 1222         entropy.td = curthread;
 1223         random_harvest_queue(&entropy, sizeof(entropy), 1, RANDOM_SWI);
 1224 
 1225         /*
 1226          * Set ih_need for this handler so that if the ithread is already
 1227          * running it will execute this handler on the next pass.  Otherwise,
 1228          * it will execute it the next time it runs.
 1229          */
 1230         ih->ih_need = 1;
 1231 
 1232         if (!(flags & SWI_DELAY)) {
 1233                 PCPU_INC(cnt.v_soft);
 1234 #ifdef INTR_FILTER
 1235                 error = intr_event_schedule_thread(ie, ie->ie_thread);
 1236 #else
 1237                 error = intr_event_schedule_thread(ie);
 1238 #endif
 1239                 KASSERT(error == 0, ("stray software interrupt"));
 1240         }
 1241 }
 1242 
 1243 /*
 1244  * Remove a software interrupt handler.  Currently this code does not
 1245  * remove the associated interrupt event if it becomes empty.  Calling code
 1246  * may do so manually via intr_event_destroy(), but that's not really
 1247  * an optimal interface.
 1248  */
 1249 int
 1250 swi_remove(void *cookie)
 1251 {
 1252 
 1253         return (intr_event_remove_handler(cookie));
 1254 }
 1255 
 1256 #ifdef INTR_FILTER
 1257 static void
 1258 priv_ithread_execute_handler(struct proc *p, struct intr_handler *ih)
 1259 {
 1260         struct intr_event *ie;
 1261 
 1262         ie = ih->ih_event;
 1263         /*
 1264          * If this handler is marked for death, remove it from
 1265          * the list of handlers and wake up the sleeper.
 1266          */
 1267         if (ih->ih_flags & IH_DEAD) {
 1268                 mtx_lock(&ie->ie_lock);
 1269                 TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next);
 1270                 ih->ih_flags &= ~IH_DEAD;
 1271                 wakeup(ih);
 1272                 mtx_unlock(&ie->ie_lock);
 1273                 return;
 1274         }
 1275         
 1276         /* Execute this handler. */
 1277         CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x",
 1278              __func__, p->p_pid, (void *)ih->ih_handler, ih->ih_argument,
 1279              ih->ih_name, ih->ih_flags);
 1280         
 1281         if (!(ih->ih_flags & IH_MPSAFE))
 1282                 mtx_lock(&Giant);
 1283         ih->ih_handler(ih->ih_argument);
 1284         if (!(ih->ih_flags & IH_MPSAFE))
 1285                 mtx_unlock(&Giant);
 1286 }
 1287 #endif
 1288 
 1289 /*
 1290  * This is a public function for use by drivers that mux interrupt
 1291  * handlers for child devices from their interrupt handler.
 1292  */
 1293 void
 1294 intr_event_execute_handlers(struct proc *p, struct intr_event *ie)
 1295 {
 1296         struct intr_handler *ih, *ihn;
 1297 
 1298         TAILQ_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) {
 1299                 /*
 1300                  * If this handler is marked for death, remove it from
 1301                  * the list of handlers and wake up the sleeper.
 1302                  */
 1303                 if (ih->ih_flags & IH_DEAD) {
 1304                         mtx_lock(&ie->ie_lock);
 1305                         TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next);
 1306                         ih->ih_flags &= ~IH_DEAD;
 1307                         wakeup(ih);
 1308                         mtx_unlock(&ie->ie_lock);
 1309                         continue;
 1310                 }
 1311 
 1312                 /* Skip filter only handlers */
 1313                 if (ih->ih_handler == NULL)
 1314                         continue;
 1315 
 1316                 /*
 1317                  * For software interrupt threads, we only execute
 1318                  * handlers that have their need flag set.  Hardware
 1319                  * interrupt threads always invoke all of their handlers.
 1320                  *
 1321                  * ih_need can only be 0 or 1.  Failed cmpset below
 1322                  * means that there is no request to execute handlers,
 1323                  * so a retry of the cmpset is not needed.
 1324                  */
 1325                 if ((ie->ie_flags & IE_SOFT) != 0 &&
 1326                     atomic_cmpset_int(&ih->ih_need, 1, 0) == 0)
 1327                         continue;
 1328 
 1329                 /* Execute this handler. */
 1330                 CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x",
 1331                     __func__, p->p_pid, (void *)ih->ih_handler, 
 1332                     ih->ih_argument, ih->ih_name, ih->ih_flags);
 1333 
 1334                 if (!(ih->ih_flags & IH_MPSAFE))
 1335                         mtx_lock(&Giant);
 1336                 ih->ih_handler(ih->ih_argument);
 1337                 if (!(ih->ih_flags & IH_MPSAFE))
 1338                         mtx_unlock(&Giant);
 1339         }
 1340 }
 1341 
 1342 static void
 1343 ithread_execute_handlers(struct proc *p, struct intr_event *ie)
 1344 {
 1345 
 1346         /* Interrupt handlers should not sleep. */
 1347         if (!(ie->ie_flags & IE_SOFT))
 1348                 THREAD_NO_SLEEPING();
 1349         intr_event_execute_handlers(p, ie);
 1350         if (!(ie->ie_flags & IE_SOFT))
 1351                 THREAD_SLEEPING_OK();
 1352 
 1353         /*
 1354          * Interrupt storm handling:
 1355          *
 1356          * If this interrupt source is currently storming, then throttle
 1357          * it to only fire the handler once  per clock tick.
 1358          *
 1359          * If this interrupt source is not currently storming, but the
 1360          * number of back to back interrupts exceeds the storm threshold,
 1361          * then enter storming mode.
 1362          */
 1363         if (intr_storm_threshold != 0 && ie->ie_count >= intr_storm_threshold &&
 1364             !(ie->ie_flags & IE_SOFT)) {
 1365                 /* Report the message only once every second. */
 1366                 if (ppsratecheck(&ie->ie_warntm, &ie->ie_warncnt, 1)) {
 1367                         printf(
 1368         "interrupt storm detected on \"%s\"; throttling interrupt source\n",
 1369                             ie->ie_name);
 1370                 }
 1371                 pause("istorm", 1);
 1372         } else
 1373                 ie->ie_count++;
 1374 
 1375         /*
 1376          * Now that all the handlers have had a chance to run, reenable
 1377          * the interrupt source.
 1378          */
 1379         if (ie->ie_post_ithread != NULL)
 1380                 ie->ie_post_ithread(ie->ie_source);
 1381 }
 1382 
 1383 #ifndef INTR_FILTER
 1384 /*
 1385  * This is the main code for interrupt threads.
 1386  */
 1387 static void
 1388 ithread_loop(void *arg)
 1389 {
 1390         struct intr_thread *ithd;
 1391         struct intr_event *ie;
 1392         struct thread *td;
 1393         struct proc *p;
 1394         int wake;
 1395 
 1396         td = curthread;
 1397         p = td->td_proc;
 1398         ithd = (struct intr_thread *)arg;
 1399         KASSERT(ithd->it_thread == td,
 1400             ("%s: ithread and proc linkage out of sync", __func__));
 1401         ie = ithd->it_event;
 1402         ie->ie_count = 0;
 1403         wake = 0;
 1404 
 1405         /*
 1406          * As long as we have interrupts outstanding, go through the
 1407          * list of handlers, giving each one a go at it.
 1408          */
 1409         for (;;) {
 1410                 /*
 1411                  * If we are an orphaned thread, then just die.
 1412                  */
 1413                 if (ithd->it_flags & IT_DEAD) {
 1414                         CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__,
 1415                             p->p_pid, td->td_name);
 1416                         free(ithd, M_ITHREAD);
 1417                         kthread_exit();
 1418                 }
 1419 
 1420                 /*
 1421                  * Service interrupts.  If another interrupt arrives while
 1422                  * we are running, it will set it_need to note that we
 1423                  * should make another pass.
 1424                  *
 1425                  * The load_acq part of the following cmpset ensures
 1426                  * that the load of ih_need in ithread_execute_handlers()
 1427                  * is ordered after the load of it_need here.
 1428                  */
 1429                 while (atomic_cmpset_acq_int(&ithd->it_need, 1, 0) != 0)
 1430                         ithread_execute_handlers(p, ie);
 1431                 WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread");
 1432                 mtx_assert(&Giant, MA_NOTOWNED);
 1433 
 1434                 /*
 1435                  * Processed all our interrupts.  Now get the sched
 1436                  * lock.  This may take a while and it_need may get
 1437                  * set again, so we have to check it again.
 1438                  */
 1439                 thread_lock(td);
 1440                 if (atomic_load_acq_int(&ithd->it_need) == 0 &&
 1441                     (ithd->it_flags & (IT_DEAD | IT_WAIT)) == 0) {
 1442                         TD_SET_IWAIT(td);
 1443                         ie->ie_count = 0;
 1444                         mi_switch(SW_VOL | SWT_IWAIT, NULL);
 1445                 }
 1446                 if (ithd->it_flags & IT_WAIT) {
 1447                         wake = 1;
 1448                         ithd->it_flags &= ~IT_WAIT;
 1449                 }
 1450                 thread_unlock(td);
 1451                 if (wake) {
 1452                         wakeup(ithd);
 1453                         wake = 0;
 1454                 }
 1455         }
 1456 }
 1457 
 1458 /*
 1459  * Main interrupt handling body.
 1460  *
 1461  * Input:
 1462  * o ie:                        the event connected to this interrupt.
 1463  * o frame:                     some archs (i.e. i386) pass a frame to some.
 1464  *                              handlers as their main argument.
 1465  * Return value:
 1466  * o 0:                         everything ok.
 1467  * o EINVAL:                    stray interrupt.
 1468  */
 1469 int
 1470 intr_event_handle(struct intr_event *ie, struct trapframe *frame)
 1471 {
 1472         struct intr_handler *ih;
 1473         struct trapframe *oldframe;
 1474         struct thread *td;
 1475         int error, ret, thread;
 1476 
 1477         td = curthread;
 1478 
 1479 #ifdef KSTACK_USAGE_PROF
 1480         intr_prof_stack_use(td, frame);
 1481 #endif
 1482 
 1483         /* An interrupt with no event or handlers is a stray interrupt. */
 1484         if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers))
 1485                 return (EINVAL);
 1486 
 1487         /*
 1488          * Execute fast interrupt handlers directly.
 1489          * To support clock handlers, if a handler registers
 1490          * with a NULL argument, then we pass it a pointer to
 1491          * a trapframe as its argument.
 1492          */
 1493         td->td_intr_nesting_level++;
 1494         thread = 0;
 1495         ret = 0;
 1496         critical_enter();
 1497         oldframe = td->td_intr_frame;
 1498         td->td_intr_frame = frame;
 1499         TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
 1500                 if (ih->ih_filter == NULL) {
 1501                         thread = 1;
 1502                         continue;
 1503                 }
 1504                 CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__,
 1505                     ih->ih_filter, ih->ih_argument == NULL ? frame :
 1506                     ih->ih_argument, ih->ih_name);
 1507                 if (ih->ih_argument == NULL)
 1508                         ret = ih->ih_filter(frame);
 1509                 else
 1510                         ret = ih->ih_filter(ih->ih_argument);
 1511                 KASSERT(ret == FILTER_STRAY ||
 1512                     ((ret & (FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) != 0 &&
 1513                     (ret & ~(FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) == 0),
 1514                     ("%s: incorrect return value %#x from %s", __func__, ret,
 1515                     ih->ih_name));
 1516 
 1517                 /* 
 1518                  * Wrapper handler special handling:
 1519                  *
 1520                  * in some particular cases (like pccard and pccbb), 
 1521                  * the _real_ device handler is wrapped in a couple of
 1522                  * functions - a filter wrapper and an ithread wrapper.
 1523                  * In this case (and just in this case), the filter wrapper 
 1524                  * could ask the system to schedule the ithread and mask
 1525                  * the interrupt source if the wrapped handler is composed
 1526                  * of just an ithread handler.
 1527                  *
 1528                  * TODO: write a generic wrapper to avoid people rolling 
 1529                  * their own
 1530                  */
 1531                 if (!thread) {
 1532                         if (ret == FILTER_SCHEDULE_THREAD)
 1533                                 thread = 1;
 1534                 }
 1535         }
 1536         td->td_intr_frame = oldframe;
 1537 
 1538         if (thread) {
 1539                 if (ie->ie_pre_ithread != NULL)
 1540                         ie->ie_pre_ithread(ie->ie_source);
 1541         } else {
 1542                 if (ie->ie_post_filter != NULL)
 1543                         ie->ie_post_filter(ie->ie_source);
 1544         }
 1545         
 1546         /* Schedule the ithread if needed. */
 1547         if (thread) {
 1548                 error = intr_event_schedule_thread(ie);
 1549                 KASSERT(error == 0, ("bad stray interrupt"));
 1550         }
 1551         critical_exit();
 1552         td->td_intr_nesting_level--;
 1553         return (0);
 1554 }
 1555 #else
 1556 /*
 1557  * This is the main code for interrupt threads.
 1558  */
 1559 static void
 1560 ithread_loop(void *arg)
 1561 {
 1562         struct intr_thread *ithd;
 1563         struct intr_handler *ih;
 1564         struct intr_event *ie;
 1565         struct thread *td;
 1566         struct proc *p;
 1567         int priv;
 1568         int wake;
 1569 
 1570         td = curthread;
 1571         p = td->td_proc;
 1572         ih = (struct intr_handler *)arg;
 1573         priv = (ih->ih_thread != NULL) ? 1 : 0;
 1574         ithd = (priv) ? ih->ih_thread : ih->ih_event->ie_thread;
 1575         KASSERT(ithd->it_thread == td,
 1576             ("%s: ithread and proc linkage out of sync", __func__));
 1577         ie = ithd->it_event;
 1578         ie->ie_count = 0;
 1579         wake = 0;
 1580 
 1581         /*
 1582          * As long as we have interrupts outstanding, go through the
 1583          * list of handlers, giving each one a go at it.
 1584          */
 1585         for (;;) {
 1586                 /*
 1587                  * If we are an orphaned thread, then just die.
 1588                  */
 1589                 if (ithd->it_flags & IT_DEAD) {
 1590                         CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__,
 1591                             p->p_pid, td->td_name);
 1592                         free(ithd, M_ITHREAD);
 1593                         kthread_exit();
 1594                 }
 1595 
 1596                 /*
 1597                  * Service interrupts.  If another interrupt arrives while
 1598                  * we are running, it will set it_need to note that we
 1599                  * should make another pass.
 1600                  *
 1601                  * The load_acq part of the following cmpset ensures
 1602                  * that the load of ih_need in ithread_execute_handlers()
 1603                  * is ordered after the load of it_need here.
 1604                  */
 1605                 while (atomic_cmpset_acq_int(&ithd->it_need, 1, 0) != 0) {
 1606                         if (priv)
 1607                                 priv_ithread_execute_handler(p, ih);
 1608                         else 
 1609                                 ithread_execute_handlers(p, ie);
 1610                 }
 1611                 WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread");
 1612                 mtx_assert(&Giant, MA_NOTOWNED);
 1613 
 1614                 /*
 1615                  * Processed all our interrupts.  Now get the sched
 1616                  * lock.  This may take a while and it_need may get
 1617                  * set again, so we have to check it again.
 1618                  */
 1619                 thread_lock(td);
 1620                 if (atomic_load_acq_int(&ithd->it_need) == 0 &&
 1621                     (ithd->it_flags & (IT_DEAD | IT_WAIT)) == 0) {
 1622                         TD_SET_IWAIT(td);
 1623                         ie->ie_count = 0;
 1624                         mi_switch(SW_VOL | SWT_IWAIT, NULL);
 1625                 }
 1626                 if (ithd->it_flags & IT_WAIT) {
 1627                         wake = 1;
 1628                         ithd->it_flags &= ~IT_WAIT;
 1629                 }
 1630                 thread_unlock(td);
 1631                 if (wake) {
 1632                         wakeup(ithd);
 1633                         wake = 0;
 1634                 }
 1635         }
 1636 }
 1637 
 1638 /* 
 1639  * Main loop for interrupt filter.
 1640  *
 1641  * Some architectures (i386, amd64 and arm) require the optional frame 
 1642  * parameter, and use it as the main argument for fast handler execution
 1643  * when ih_argument == NULL.
 1644  *
 1645  * Return value:
 1646  * o FILTER_STRAY:              No filter recognized the event, and no
 1647  *                              filter-less handler is registered on this 
 1648  *                              line.
 1649  * o FILTER_HANDLED:            A filter claimed the event and served it.
 1650  * o FILTER_SCHEDULE_THREAD:    No filter claimed the event, but there's at
 1651  *                              least one filter-less handler on this line.
 1652  * o FILTER_HANDLED | 
 1653  *   FILTER_SCHEDULE_THREAD:    A filter claimed the event, and asked for
 1654  *                              scheduling the per-handler ithread.
 1655  *
 1656  * In case an ithread has to be scheduled, in *ithd there will be a 
 1657  * pointer to a struct intr_thread containing the thread to be
 1658  * scheduled.
 1659  */
 1660 
 1661 static int
 1662 intr_filter_loop(struct intr_event *ie, struct trapframe *frame, 
 1663                  struct intr_thread **ithd) 
 1664 {
 1665         struct intr_handler *ih;
 1666         void *arg;
 1667         int ret, thread_only;
 1668 
 1669         ret = 0;
 1670         thread_only = 0;
 1671         TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
 1672                 /*
 1673                  * Execute fast interrupt handlers directly.
 1674                  * To support clock handlers, if a handler registers
 1675                  * with a NULL argument, then we pass it a pointer to
 1676                  * a trapframe as its argument.
 1677                  */
 1678                 arg = ((ih->ih_argument == NULL) ? frame : ih->ih_argument);
 1679                 
 1680                 CTR5(KTR_INTR, "%s: exec %p/%p(%p) for %s", __func__,
 1681                      ih->ih_filter, ih->ih_handler, arg, ih->ih_name);
 1682 
 1683                 if (ih->ih_filter != NULL)
 1684                         ret = ih->ih_filter(arg);
 1685                 else {
 1686                         thread_only = 1;
 1687                         continue;
 1688                 }
 1689                 KASSERT(ret == FILTER_STRAY ||
 1690                     ((ret & (FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) != 0 &&
 1691                     (ret & ~(FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) == 0),
 1692                     ("%s: incorrect return value %#x from %s", __func__, ret,
 1693                     ih->ih_name));
 1694                 if (ret & FILTER_STRAY)
 1695                         continue;
 1696                 else { 
 1697                         *ithd = ih->ih_thread;
 1698                         return (ret);
 1699                 }
 1700         }
 1701 
 1702         /*
 1703          * No filters handled the interrupt and we have at least
 1704          * one handler without a filter.  In this case, we schedule
 1705          * all of the filter-less handlers to run in the ithread.
 1706          */     
 1707         if (thread_only) {
 1708                 *ithd = ie->ie_thread;
 1709                 return (FILTER_SCHEDULE_THREAD);
 1710         }
 1711         return (FILTER_STRAY);
 1712 }
 1713 
 1714 /*
 1715  * Main interrupt handling body.
 1716  *
 1717  * Input:
 1718  * o ie:                        the event connected to this interrupt.
 1719  * o frame:                     some archs (i.e. i386) pass a frame to some.
 1720  *                              handlers as their main argument.
 1721  * Return value:
 1722  * o 0:                         everything ok.
 1723  * o EINVAL:                    stray interrupt.
 1724  */
 1725 int
 1726 intr_event_handle(struct intr_event *ie, struct trapframe *frame)
 1727 {
 1728         struct intr_thread *ithd;
 1729         struct trapframe *oldframe;
 1730         struct thread *td;
 1731         int thread;
 1732 
 1733         ithd = NULL;
 1734         td = curthread;
 1735 
 1736         if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers))
 1737                 return (EINVAL);
 1738 
 1739         td->td_intr_nesting_level++;
 1740         thread = 0;
 1741         critical_enter();
 1742         oldframe = td->td_intr_frame;
 1743         td->td_intr_frame = frame;
 1744         thread = intr_filter_loop(ie, frame, &ithd);    
 1745         if (thread & FILTER_HANDLED) {
 1746                 if (ie->ie_post_filter != NULL)
 1747                         ie->ie_post_filter(ie->ie_source);
 1748         } else {
 1749                 if (ie->ie_pre_ithread != NULL)
 1750                         ie->ie_pre_ithread(ie->ie_source);
 1751         }
 1752         td->td_intr_frame = oldframe;
 1753         critical_exit();
 1754         
 1755         /* Interrupt storm logic */
 1756         if (thread & FILTER_STRAY) {
 1757                 ie->ie_count++;
 1758                 if (ie->ie_count < intr_storm_threshold)
 1759                         printf("Interrupt stray detection not present\n");
 1760         }
 1761 
 1762         /* Schedule an ithread if needed. */
 1763         if (thread & FILTER_SCHEDULE_THREAD) {
 1764                 if (intr_event_schedule_thread(ie, ithd) != 0)
 1765                         panic("%s: impossible stray interrupt", __func__);
 1766         }
 1767         td->td_intr_nesting_level--;
 1768         return (0);
 1769 }
 1770 #endif
 1771 
 1772 #ifdef DDB
 1773 /*
 1774  * Dump details about an interrupt handler
 1775  */
 1776 static void
 1777 db_dump_intrhand(struct intr_handler *ih)
 1778 {
 1779         int comma;
 1780 
 1781         db_printf("\t%-10s ", ih->ih_name);
 1782         switch (ih->ih_pri) {
 1783         case PI_REALTIME:
 1784                 db_printf("CLK ");
 1785                 break;
 1786         case PI_AV:
 1787                 db_printf("AV  ");
 1788                 break;
 1789         case PI_TTY:
 1790                 db_printf("TTY ");
 1791                 break;
 1792         case PI_NET:
 1793                 db_printf("NET ");
 1794                 break;
 1795         case PI_DISK:
 1796                 db_printf("DISK");
 1797                 break;
 1798         case PI_DULL:
 1799                 db_printf("DULL");
 1800                 break;
 1801         default:
 1802                 if (ih->ih_pri >= PI_SOFT)
 1803                         db_printf("SWI ");
 1804                 else
 1805                         db_printf("%4u", ih->ih_pri);
 1806                 break;
 1807         }
 1808         db_printf(" ");
 1809         if (ih->ih_filter != NULL) {
 1810                 db_printf("[F]");
 1811                 db_printsym((uintptr_t)ih->ih_filter, DB_STGY_PROC);
 1812         }
 1813         if (ih->ih_handler != NULL) {
 1814                 if (ih->ih_filter != NULL)
 1815                         db_printf(",");
 1816                 db_printf("[H]");
 1817                 db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC);
 1818         }
 1819         db_printf("(%p)", ih->ih_argument);
 1820         if (ih->ih_need ||
 1821             (ih->ih_flags & (IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD |
 1822             IH_MPSAFE)) != 0) {
 1823                 db_printf(" {");
 1824                 comma = 0;
 1825                 if (ih->ih_flags & IH_EXCLUSIVE) {
 1826                         if (comma)
 1827                                 db_printf(", ");
 1828                         db_printf("EXCL");
 1829                         comma = 1;
 1830                 }
 1831                 if (ih->ih_flags & IH_ENTROPY) {
 1832                         if (comma)
 1833                                 db_printf(", ");
 1834                         db_printf("ENTROPY");
 1835                         comma = 1;
 1836                 }
 1837                 if (ih->ih_flags & IH_DEAD) {
 1838                         if (comma)
 1839                                 db_printf(", ");
 1840                         db_printf("DEAD");
 1841                         comma = 1;
 1842                 }
 1843                 if (ih->ih_flags & IH_MPSAFE) {
 1844                         if (comma)
 1845                                 db_printf(", ");
 1846                         db_printf("MPSAFE");
 1847                         comma = 1;
 1848                 }
 1849                 if (ih->ih_need) {
 1850                         if (comma)
 1851                                 db_printf(", ");
 1852                         db_printf("NEED");
 1853                 }
 1854                 db_printf("}");
 1855         }
 1856         db_printf("\n");
 1857 }
 1858 
 1859 /*
 1860  * Dump details about a event.
 1861  */
 1862 void
 1863 db_dump_intr_event(struct intr_event *ie, int handlers)
 1864 {
 1865         struct intr_handler *ih;
 1866         struct intr_thread *it;
 1867         int comma;
 1868 
 1869         db_printf("%s ", ie->ie_fullname);
 1870         it = ie->ie_thread;
 1871         if (it != NULL)
 1872                 db_printf("(pid %d)", it->it_thread->td_proc->p_pid);
 1873         else
 1874                 db_printf("(no thread)");
 1875         if ((ie->ie_flags & (IE_SOFT | IE_ENTROPY | IE_ADDING_THREAD)) != 0 ||
 1876             (it != NULL && it->it_need)) {
 1877                 db_printf(" {");
 1878                 comma = 0;
 1879                 if (ie->ie_flags & IE_SOFT) {
 1880                         db_printf("SOFT");
 1881                         comma = 1;
 1882                 }
 1883                 if (ie->ie_flags & IE_ENTROPY) {
 1884                         if (comma)
 1885                                 db_printf(", ");
 1886                         db_printf("ENTROPY");
 1887                         comma = 1;
 1888                 }
 1889                 if (ie->ie_flags & IE_ADDING_THREAD) {
 1890                         if (comma)
 1891                                 db_printf(", ");
 1892                         db_printf("ADDING_THREAD");
 1893                         comma = 1;
 1894                 }
 1895                 if (it != NULL && it->it_need) {
 1896                         if (comma)
 1897                                 db_printf(", ");
 1898                         db_printf("NEED");
 1899                 }
 1900                 db_printf("}");
 1901         }
 1902         db_printf("\n");
 1903 
 1904         if (handlers)
 1905                 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
 1906                     db_dump_intrhand(ih);
 1907 }
 1908 
 1909 /*
 1910  * Dump data about interrupt handlers
 1911  */
 1912 DB_SHOW_COMMAND(intr, db_show_intr)
 1913 {
 1914         struct intr_event *ie;
 1915         int all, verbose;
 1916 
 1917         verbose = strchr(modif, 'v') != NULL;
 1918         all = strchr(modif, 'a') != NULL;
 1919         TAILQ_FOREACH(ie, &event_list, ie_list) {
 1920                 if (!all && TAILQ_EMPTY(&ie->ie_handlers))
 1921                         continue;
 1922                 db_dump_intr_event(ie, verbose);
 1923                 if (db_pager_quit)
 1924                         break;
 1925         }
 1926 }
 1927 #endif /* DDB */
 1928 
 1929 /*
 1930  * Start standard software interrupt threads
 1931  */
 1932 static void
 1933 start_softintr(void *dummy)
 1934 {
 1935 
 1936         if (swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, INTR_MPSAFE, &vm_ih))
 1937                 panic("died while creating vm swi ithread");
 1938 }
 1939 SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr,
 1940     NULL);
 1941 
 1942 /*
 1943  * Sysctls used by systat and others: hw.intrnames and hw.intrcnt.
 1944  * The data for this machine dependent, and the declarations are in machine
 1945  * dependent code.  The layout of intrnames and intrcnt however is machine
 1946  * independent.
 1947  *
 1948  * We do not know the length of intrcnt and intrnames at compile time, so
 1949  * calculate things at run time.
 1950  */
 1951 static int
 1952 sysctl_intrnames(SYSCTL_HANDLER_ARGS)
 1953 {
 1954         return (sysctl_handle_opaque(oidp, intrnames, sintrnames, req));
 1955 }
 1956 
 1957 SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD,
 1958     NULL, 0, sysctl_intrnames, "", "Interrupt Names");
 1959 
 1960 static int
 1961 sysctl_intrcnt(SYSCTL_HANDLER_ARGS)
 1962 {
 1963 #ifdef SCTL_MASK32
 1964         uint32_t *intrcnt32;
 1965         unsigned i;
 1966         int error;
 1967 
 1968         if (req->flags & SCTL_MASK32) {
 1969                 if (!req->oldptr)
 1970                         return (sysctl_handle_opaque(oidp, NULL, sintrcnt / 2, req));
 1971                 intrcnt32 = malloc(sintrcnt / 2, M_TEMP, M_NOWAIT);
 1972                 if (intrcnt32 == NULL)
 1973                         return (ENOMEM);
 1974                 for (i = 0; i < sintrcnt / sizeof (u_long); i++)
 1975                         intrcnt32[i] = intrcnt[i];
 1976                 error = sysctl_handle_opaque(oidp, intrcnt32, sintrcnt / 2, req);
 1977                 free(intrcnt32, M_TEMP);
 1978                 return (error);
 1979         }
 1980 #endif
 1981         return (sysctl_handle_opaque(oidp, intrcnt, sintrcnt, req));
 1982 }
 1983 
 1984 SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD,
 1985     NULL, 0, sysctl_intrcnt, "", "Interrupt Counts");
 1986 
 1987 #ifdef DDB
 1988 /*
 1989  * DDB command to dump the interrupt statistics.
 1990  */
 1991 DB_SHOW_COMMAND(intrcnt, db_show_intrcnt)
 1992 {
 1993         u_long *i;
 1994         char *cp;
 1995         u_int j;
 1996 
 1997         cp = intrnames;
 1998         j = 0;
 1999         for (i = intrcnt; j < (sintrcnt / sizeof(u_long)) && !db_pager_quit;
 2000             i++, j++) {
 2001                 if (*cp == '\0')
 2002                         break;
 2003                 if (*i != 0)
 2004                         db_printf("%s\t%lu\n", cp, *i);
 2005                 cp += strlen(cp) + 1;
 2006         }
 2007 }
 2008 #endif

Cache object: fe828f6179399ff6cb9cc3835ef447fd


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.