The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_intr.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice unmodified, this list of conditions, and the following
   10  *    disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  *
   15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   25  */
   26 
   27 #include <sys/cdefs.h>
   28 __FBSDID("$FreeBSD$");
   29 
   30 #include "opt_ddb.h"
   31 
   32 #include <sys/param.h>
   33 #include <sys/bus.h>
   34 #include <sys/conf.h>
   35 #include <sys/cpuset.h>
   36 #include <sys/rtprio.h>
   37 #include <sys/systm.h>
   38 #include <sys/interrupt.h>
   39 #include <sys/kernel.h>
   40 #include <sys/kthread.h>
   41 #include <sys/ktr.h>
   42 #include <sys/limits.h>
   43 #include <sys/lock.h>
   44 #include <sys/malloc.h>
   45 #include <sys/mutex.h>
   46 #include <sys/priv.h>
   47 #include <sys/proc.h>
   48 #include <sys/random.h>
   49 #include <sys/resourcevar.h>
   50 #include <sys/sched.h>
   51 #include <sys/smp.h>
   52 #include <sys/sysctl.h>
   53 #include <sys/syslog.h>
   54 #include <sys/unistd.h>
   55 #include <sys/vmmeter.h>
   56 #include <machine/atomic.h>
   57 #include <machine/cpu.h>
   58 #include <machine/md_var.h>
   59 #include <machine/stdarg.h>
   60 #ifdef DDB
   61 #include <ddb/ddb.h>
   62 #include <ddb/db_sym.h>
   63 #endif
   64 
   65 /*
   66  * Describe an interrupt thread.  There is one of these per interrupt event.
   67  */
   68 struct intr_thread {
   69         struct intr_event *it_event;
   70         struct thread *it_thread;       /* Kernel thread. */
   71         int     it_flags;               /* (j) IT_* flags. */
   72         int     it_need;                /* Needs service. */
   73 };
   74 
   75 /* Interrupt thread flags kept in it_flags */
   76 #define IT_DEAD         0x000001        /* Thread is waiting to exit. */
   77 #define IT_WAIT         0x000002        /* Thread is waiting for completion. */
   78 
   79 struct  intr_entropy {
   80         struct  thread *td;
   81         uintptr_t event;
   82 };
   83 
   84 struct  intr_event *clk_intr_event;
   85 struct  intr_event *tty_intr_event;
   86 void    *vm_ih;
   87 struct proc *intrproc;
   88 
   89 static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads");
   90 
   91 static int intr_storm_threshold = 1000;
   92 TUNABLE_INT("hw.intr_storm_threshold", &intr_storm_threshold);
   93 SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RW,
   94     &intr_storm_threshold, 0,
   95     "Number of consecutive interrupts before storm protection is enabled");
   96 static TAILQ_HEAD(, intr_event) event_list =
   97     TAILQ_HEAD_INITIALIZER(event_list);
   98 static struct mtx event_lock;
   99 MTX_SYSINIT(intr_event_list, &event_lock, "intr event list", MTX_DEF);
  100 
  101 static void     intr_event_update(struct intr_event *ie);
  102 #ifdef INTR_FILTER
  103 static int      intr_event_schedule_thread(struct intr_event *ie,
  104                     struct intr_thread *ithd);
  105 static int      intr_filter_loop(struct intr_event *ie,
  106                     struct trapframe *frame, struct intr_thread **ithd);
  107 static struct intr_thread *ithread_create(const char *name,
  108                               struct intr_handler *ih);
  109 #else
  110 static int      intr_event_schedule_thread(struct intr_event *ie);
  111 static struct intr_thread *ithread_create(const char *name);
  112 #endif
  113 static void     ithread_destroy(struct intr_thread *ithread);
  114 static void     ithread_execute_handlers(struct proc *p, 
  115                     struct intr_event *ie);
  116 #ifdef INTR_FILTER
  117 static void     priv_ithread_execute_handler(struct proc *p, 
  118                     struct intr_handler *ih);
  119 #endif
  120 static void     ithread_loop(void *);
  121 static void     ithread_update(struct intr_thread *ithd);
  122 static void     start_softintr(void *);
  123 
  124 /* Map an interrupt type to an ithread priority. */
  125 u_char
  126 intr_priority(enum intr_type flags)
  127 {
  128         u_char pri;
  129 
  130         flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET |
  131             INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV);
  132         switch (flags) {
  133         case INTR_TYPE_TTY:
  134                 pri = PI_TTY;
  135                 break;
  136         case INTR_TYPE_BIO:
  137                 pri = PI_DISK;
  138                 break;
  139         case INTR_TYPE_NET:
  140                 pri = PI_NET;
  141                 break;
  142         case INTR_TYPE_CAM:
  143                 pri = PI_DISK;
  144                 break;
  145         case INTR_TYPE_AV:
  146                 pri = PI_AV;
  147                 break;
  148         case INTR_TYPE_CLK:
  149                 pri = PI_REALTIME;
  150                 break;
  151         case INTR_TYPE_MISC:
  152                 pri = PI_DULL;          /* don't care */
  153                 break;
  154         default:
  155                 /* We didn't specify an interrupt level. */
  156                 panic("intr_priority: no interrupt type in flags");
  157         }
  158 
  159         return pri;
  160 }
  161 
  162 /*
  163  * Update an ithread based on the associated intr_event.
  164  */
  165 static void
  166 ithread_update(struct intr_thread *ithd)
  167 {
  168         struct intr_event *ie;
  169         struct thread *td;
  170         u_char pri;
  171 
  172         ie = ithd->it_event;
  173         td = ithd->it_thread;
  174 
  175         /* Determine the overall priority of this event. */
  176         if (TAILQ_EMPTY(&ie->ie_handlers))
  177                 pri = PRI_MAX_ITHD;
  178         else
  179                 pri = TAILQ_FIRST(&ie->ie_handlers)->ih_pri;
  180 
  181         /* Update name and priority. */
  182         strlcpy(td->td_name, ie->ie_fullname, sizeof(td->td_name));
  183 #ifdef KTR
  184         sched_clear_tdname(td);
  185 #endif
  186         thread_lock(td);
  187         sched_prio(td, pri);
  188         thread_unlock(td);
  189 }
  190 
  191 /*
  192  * Regenerate the full name of an interrupt event and update its priority.
  193  */
  194 static void
  195 intr_event_update(struct intr_event *ie)
  196 {
  197         struct intr_handler *ih;
  198         char *last;
  199         int missed, space;
  200 
  201         /* Start off with no entropy and just the name of the event. */
  202         mtx_assert(&ie->ie_lock, MA_OWNED);
  203         strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname));
  204         ie->ie_flags &= ~IE_ENTROPY;
  205         missed = 0;
  206         space = 1;
  207 
  208         /* Run through all the handlers updating values. */
  209         TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
  210                 if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 <
  211                     sizeof(ie->ie_fullname)) {
  212                         strcat(ie->ie_fullname, " ");
  213                         strcat(ie->ie_fullname, ih->ih_name);
  214                         space = 0;
  215                 } else
  216                         missed++;
  217                 if (ih->ih_flags & IH_ENTROPY)
  218                         ie->ie_flags |= IE_ENTROPY;
  219         }
  220 
  221         /*
  222          * If the handler names were too long, add +'s to indicate missing
  223          * names. If we run out of room and still have +'s to add, change
  224          * the last character from a + to a *.
  225          */
  226         last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2];
  227         while (missed-- > 0) {
  228                 if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) {
  229                         if (*last == '+') {
  230                                 *last = '*';
  231                                 break;
  232                         } else
  233                                 *last = '+';
  234                 } else if (space) {
  235                         strcat(ie->ie_fullname, " +");
  236                         space = 0;
  237                 } else
  238                         strcat(ie->ie_fullname, "+");
  239         }
  240 
  241         /*
  242          * If this event has an ithread, update it's priority and
  243          * name.
  244          */
  245         if (ie->ie_thread != NULL)
  246                 ithread_update(ie->ie_thread);
  247         CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname);
  248 }
  249 
  250 int
  251 intr_event_create(struct intr_event **event, void *source, int flags, int irq,
  252     void (*pre_ithread)(void *), void (*post_ithread)(void *),
  253     void (*post_filter)(void *), int (*assign_cpu)(void *, u_char),
  254     const char *fmt, ...)
  255 {
  256         struct intr_event *ie;
  257         va_list ap;
  258 
  259         /* The only valid flag during creation is IE_SOFT. */
  260         if ((flags & ~IE_SOFT) != 0)
  261                 return (EINVAL);
  262         ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO);
  263         ie->ie_source = source;
  264         ie->ie_pre_ithread = pre_ithread;
  265         ie->ie_post_ithread = post_ithread;
  266         ie->ie_post_filter = post_filter;
  267         ie->ie_assign_cpu = assign_cpu;
  268         ie->ie_flags = flags;
  269         ie->ie_irq = irq;
  270         ie->ie_cpu = NOCPU;
  271         TAILQ_INIT(&ie->ie_handlers);
  272         mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF);
  273 
  274         va_start(ap, fmt);
  275         vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap);
  276         va_end(ap);
  277         strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname));
  278         mtx_lock(&event_lock);
  279         TAILQ_INSERT_TAIL(&event_list, ie, ie_list);
  280         mtx_unlock(&event_lock);
  281         if (event != NULL)
  282                 *event = ie;
  283         CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name);
  284         return (0);
  285 }
  286 
  287 /*
  288  * Bind an interrupt event to the specified CPU.  Note that not all
  289  * platforms support binding an interrupt to a CPU.  For those
  290  * platforms this request will fail.  For supported platforms, any
  291  * associated ithreads as well as the primary interrupt context will
  292  * be bound to the specificed CPU.  Using a cpu id of NOCPU unbinds
  293  * the interrupt event.
  294  */
  295 int
  296 intr_event_bind(struct intr_event *ie, u_char cpu)
  297 {
  298         cpuset_t mask;
  299         lwpid_t id;
  300         int error;
  301 
  302         /* Need a CPU to bind to. */
  303         if (cpu != NOCPU && CPU_ABSENT(cpu))
  304                 return (EINVAL);
  305 
  306         if (ie->ie_assign_cpu == NULL)
  307                 return (EOPNOTSUPP);
  308 
  309         error = priv_check(curthread, PRIV_SCHED_CPUSET_INTR);
  310         if (error)
  311                 return (error);
  312 
  313         /*
  314          * If we have any ithreads try to set their mask first to verify
  315          * permissions, etc.
  316          */
  317         mtx_lock(&ie->ie_lock);
  318         if (ie->ie_thread != NULL) {
  319                 CPU_ZERO(&mask);
  320                 if (cpu == NOCPU)
  321                         CPU_COPY(cpuset_root, &mask);
  322                 else
  323                         CPU_SET(cpu, &mask);
  324                 id = ie->ie_thread->it_thread->td_tid;
  325                 mtx_unlock(&ie->ie_lock);
  326                 error = cpuset_setthread(id, &mask);
  327                 if (error)
  328                         return (error);
  329         } else
  330                 mtx_unlock(&ie->ie_lock);
  331         error = ie->ie_assign_cpu(ie->ie_source, cpu);
  332         if (error) {
  333                 mtx_lock(&ie->ie_lock);
  334                 if (ie->ie_thread != NULL) {
  335                         CPU_ZERO(&mask);
  336                         if (ie->ie_cpu == NOCPU)
  337                                 CPU_COPY(cpuset_root, &mask);
  338                         else
  339                                 CPU_SET(cpu, &mask);
  340                         id = ie->ie_thread->it_thread->td_tid;
  341                         mtx_unlock(&ie->ie_lock);
  342                         (void)cpuset_setthread(id, &mask);
  343                 } else
  344                         mtx_unlock(&ie->ie_lock);
  345                 return (error);
  346         }
  347 
  348         mtx_lock(&ie->ie_lock);
  349         ie->ie_cpu = cpu;
  350         mtx_unlock(&ie->ie_lock);
  351 
  352         return (error);
  353 }
  354 
  355 static struct intr_event *
  356 intr_lookup(int irq)
  357 {
  358         struct intr_event *ie;
  359 
  360         mtx_lock(&event_lock);
  361         TAILQ_FOREACH(ie, &event_list, ie_list)
  362                 if (ie->ie_irq == irq &&
  363                     (ie->ie_flags & IE_SOFT) == 0 &&
  364                     TAILQ_FIRST(&ie->ie_handlers) != NULL)
  365                         break;
  366         mtx_unlock(&event_lock);
  367         return (ie);
  368 }
  369 
  370 int
  371 intr_setaffinity(int irq, void *m)
  372 {
  373         struct intr_event *ie;
  374         cpuset_t *mask;
  375         u_char cpu;
  376         int n;
  377 
  378         mask = m;
  379         cpu = NOCPU;
  380         /*
  381          * If we're setting all cpus we can unbind.  Otherwise make sure
  382          * only one cpu is in the set.
  383          */
  384         if (CPU_CMP(cpuset_root, mask)) {
  385                 for (n = 0; n < CPU_SETSIZE; n++) {
  386                         if (!CPU_ISSET(n, mask))
  387                                 continue;
  388                         if (cpu != NOCPU)
  389                                 return (EINVAL);
  390                         cpu = (u_char)n;
  391                 }
  392         }
  393         ie = intr_lookup(irq);
  394         if (ie == NULL)
  395                 return (ESRCH);
  396         return (intr_event_bind(ie, cpu));
  397 }
  398 
  399 int
  400 intr_getaffinity(int irq, void *m)
  401 {
  402         struct intr_event *ie;
  403         cpuset_t *mask;
  404 
  405         mask = m;
  406         ie = intr_lookup(irq);
  407         if (ie == NULL)
  408                 return (ESRCH);
  409         CPU_ZERO(mask);
  410         mtx_lock(&ie->ie_lock);
  411         if (ie->ie_cpu == NOCPU)
  412                 CPU_COPY(cpuset_root, mask);
  413         else
  414                 CPU_SET(ie->ie_cpu, mask);
  415         mtx_unlock(&ie->ie_lock);
  416         return (0);
  417 }
  418 
  419 int
  420 intr_event_destroy(struct intr_event *ie)
  421 {
  422 
  423         mtx_lock(&event_lock);
  424         mtx_lock(&ie->ie_lock);
  425         if (!TAILQ_EMPTY(&ie->ie_handlers)) {
  426                 mtx_unlock(&ie->ie_lock);
  427                 mtx_unlock(&event_lock);
  428                 return (EBUSY);
  429         }
  430         TAILQ_REMOVE(&event_list, ie, ie_list);
  431 #ifndef notyet
  432         if (ie->ie_thread != NULL) {
  433                 ithread_destroy(ie->ie_thread);
  434                 ie->ie_thread = NULL;
  435         }
  436 #endif
  437         mtx_unlock(&ie->ie_lock);
  438         mtx_unlock(&event_lock);
  439         mtx_destroy(&ie->ie_lock);
  440         free(ie, M_ITHREAD);
  441         return (0);
  442 }
  443 
  444 #ifndef INTR_FILTER
  445 static struct intr_thread *
  446 ithread_create(const char *name)
  447 {
  448         struct intr_thread *ithd;
  449         struct thread *td;
  450         int error;
  451 
  452         ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO);
  453 
  454         error = kproc_kthread_add(ithread_loop, ithd, &intrproc,
  455                     &td, RFSTOPPED | RFHIGHPID,
  456                     0, "intr", "%s", name);
  457         if (error)
  458                 panic("kproc_create() failed with %d", error);
  459         thread_lock(td);
  460         sched_class(td, PRI_ITHD);
  461         TD_SET_IWAIT(td);
  462         thread_unlock(td);
  463         td->td_pflags |= TDP_ITHREAD;
  464         ithd->it_thread = td;
  465         CTR2(KTR_INTR, "%s: created %s", __func__, name);
  466         return (ithd);
  467 }
  468 #else
  469 static struct intr_thread *
  470 ithread_create(const char *name, struct intr_handler *ih)
  471 {
  472         struct intr_thread *ithd;
  473         struct thread *td;
  474         int error;
  475 
  476         ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO);
  477 
  478         error = kproc_kthread_add(ithread_loop, ih, &intrproc,
  479                     &td, RFSTOPPED | RFHIGHPID,
  480                     0, "intr", "%s", name);
  481         if (error)
  482                 panic("kproc_create() failed with %d", error);
  483         thread_lock(td);
  484         sched_class(td, PRI_ITHD);
  485         TD_SET_IWAIT(td);
  486         thread_unlock(td);
  487         td->td_pflags |= TDP_ITHREAD;
  488         ithd->it_thread = td;
  489         CTR2(KTR_INTR, "%s: created %s", __func__, name);
  490         return (ithd);
  491 }
  492 #endif
  493 
  494 static void
  495 ithread_destroy(struct intr_thread *ithread)
  496 {
  497         struct thread *td;
  498 
  499         CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_event->ie_name);
  500         td = ithread->it_thread;
  501         thread_lock(td);
  502         ithread->it_flags |= IT_DEAD;
  503         if (TD_AWAITING_INTR(td)) {
  504                 TD_CLR_IWAIT(td);
  505                 sched_add(td, SRQ_INTR);
  506         }
  507         thread_unlock(td);
  508 }
  509 
  510 #ifndef INTR_FILTER
  511 int
  512 intr_event_add_handler(struct intr_event *ie, const char *name,
  513     driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri,
  514     enum intr_type flags, void **cookiep)
  515 {
  516         struct intr_handler *ih, *temp_ih;
  517         struct intr_thread *it;
  518 
  519         if (ie == NULL || name == NULL || (handler == NULL && filter == NULL))
  520                 return (EINVAL);
  521 
  522         /* Allocate and populate an interrupt handler structure. */
  523         ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO);
  524         ih->ih_filter = filter;
  525         ih->ih_handler = handler;
  526         ih->ih_argument = arg;
  527         strlcpy(ih->ih_name, name, sizeof(ih->ih_name));
  528         ih->ih_event = ie;
  529         ih->ih_pri = pri;
  530         if (flags & INTR_EXCL)
  531                 ih->ih_flags = IH_EXCLUSIVE;
  532         if (flags & INTR_MPSAFE)
  533                 ih->ih_flags |= IH_MPSAFE;
  534         if (flags & INTR_ENTROPY)
  535                 ih->ih_flags |= IH_ENTROPY;
  536 
  537         /* We can only have one exclusive handler in a event. */
  538         mtx_lock(&ie->ie_lock);
  539         if (!TAILQ_EMPTY(&ie->ie_handlers)) {
  540                 if ((flags & INTR_EXCL) ||
  541                     (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) {
  542                         mtx_unlock(&ie->ie_lock);
  543                         free(ih, M_ITHREAD);
  544                         return (EINVAL);
  545                 }
  546         }
  547 
  548         /* Create a thread if we need one. */
  549         while (ie->ie_thread == NULL && handler != NULL) {
  550                 if (ie->ie_flags & IE_ADDING_THREAD)
  551                         msleep(ie, &ie->ie_lock, 0, "ithread", 0);
  552                 else {
  553                         ie->ie_flags |= IE_ADDING_THREAD;
  554                         mtx_unlock(&ie->ie_lock);
  555                         it = ithread_create("intr: newborn");
  556                         mtx_lock(&ie->ie_lock);
  557                         ie->ie_flags &= ~IE_ADDING_THREAD;
  558                         ie->ie_thread = it;
  559                         it->it_event = ie;
  560                         ithread_update(it);
  561                         wakeup(ie);
  562                 }
  563         }
  564 
  565         /* Add the new handler to the event in priority order. */
  566         TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) {
  567                 if (temp_ih->ih_pri > ih->ih_pri)
  568                         break;
  569         }
  570         if (temp_ih == NULL)
  571                 TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next);
  572         else
  573                 TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next);
  574         intr_event_update(ie);
  575 
  576         CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name,
  577             ie->ie_name);
  578         mtx_unlock(&ie->ie_lock);
  579 
  580         if (cookiep != NULL)
  581                 *cookiep = ih;
  582         return (0);
  583 }
  584 #else
  585 int
  586 intr_event_add_handler(struct intr_event *ie, const char *name,
  587     driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri,
  588     enum intr_type flags, void **cookiep)
  589 {
  590         struct intr_handler *ih, *temp_ih;
  591         struct intr_thread *it;
  592 
  593         if (ie == NULL || name == NULL || (handler == NULL && filter == NULL))
  594                 return (EINVAL);
  595 
  596         /* Allocate and populate an interrupt handler structure. */
  597         ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO);
  598         ih->ih_filter = filter;
  599         ih->ih_handler = handler;
  600         ih->ih_argument = arg;
  601         strlcpy(ih->ih_name, name, sizeof(ih->ih_name));
  602         ih->ih_event = ie;
  603         ih->ih_pri = pri;
  604         if (flags & INTR_EXCL)
  605                 ih->ih_flags = IH_EXCLUSIVE;
  606         if (flags & INTR_MPSAFE)
  607                 ih->ih_flags |= IH_MPSAFE;
  608         if (flags & INTR_ENTROPY)
  609                 ih->ih_flags |= IH_ENTROPY;
  610 
  611         /* We can only have one exclusive handler in a event. */
  612         mtx_lock(&ie->ie_lock);
  613         if (!TAILQ_EMPTY(&ie->ie_handlers)) {
  614                 if ((flags & INTR_EXCL) ||
  615                     (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) {
  616                         mtx_unlock(&ie->ie_lock);
  617                         free(ih, M_ITHREAD);
  618                         return (EINVAL);
  619                 }
  620         }
  621 
  622         /* For filtered handlers, create a private ithread to run on. */
  623         if (filter != NULL && handler != NULL) {
  624                 mtx_unlock(&ie->ie_lock);
  625                 it = ithread_create("intr: newborn", ih);
  626                 mtx_lock(&ie->ie_lock);
  627                 it->it_event = ie;
  628                 ih->ih_thread = it;
  629                 ithread_update(it); // XXX - do we really need this?!?!?
  630         } else { /* Create the global per-event thread if we need one. */
  631                 while (ie->ie_thread == NULL && handler != NULL) {
  632                         if (ie->ie_flags & IE_ADDING_THREAD)
  633                                 msleep(ie, &ie->ie_lock, 0, "ithread", 0);
  634                         else {
  635                                 ie->ie_flags |= IE_ADDING_THREAD;
  636                                 mtx_unlock(&ie->ie_lock);
  637                                 it = ithread_create("intr: newborn", ih);
  638                                 mtx_lock(&ie->ie_lock);
  639                                 ie->ie_flags &= ~IE_ADDING_THREAD;
  640                                 ie->ie_thread = it;
  641                                 it->it_event = ie;
  642                                 ithread_update(it);
  643                                 wakeup(ie);
  644                         }
  645                 }
  646         }
  647 
  648         /* Add the new handler to the event in priority order. */
  649         TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) {
  650                 if (temp_ih->ih_pri > ih->ih_pri)
  651                         break;
  652         }
  653         if (temp_ih == NULL)
  654                 TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next);
  655         else
  656                 TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next);
  657         intr_event_update(ie);
  658 
  659         CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name,
  660             ie->ie_name);
  661         mtx_unlock(&ie->ie_lock);
  662 
  663         if (cookiep != NULL)
  664                 *cookiep = ih;
  665         return (0);
  666 }
  667 #endif
  668 
  669 /*
  670  * Append a description preceded by a ':' to the name of the specified
  671  * interrupt handler.
  672  */
  673 int
  674 intr_event_describe_handler(struct intr_event *ie, void *cookie,
  675     const char *descr)
  676 {
  677         struct intr_handler *ih;
  678         size_t space;
  679         char *start;
  680 
  681         mtx_lock(&ie->ie_lock);
  682 #ifdef INVARIANTS
  683         TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
  684                 if (ih == cookie)
  685                         break;
  686         }
  687         if (ih == NULL) {
  688                 mtx_unlock(&ie->ie_lock);
  689                 panic("handler %p not found in interrupt event %p", cookie, ie);
  690         }
  691 #endif
  692         ih = cookie;
  693 
  694         /*
  695          * Look for an existing description by checking for an
  696          * existing ":".  This assumes device names do not include
  697          * colons.  If one is found, prepare to insert the new
  698          * description at that point.  If one is not found, find the
  699          * end of the name to use as the insertion point.
  700          */
  701         start = index(ih->ih_name, ':');
  702         if (start == NULL)
  703                 start = index(ih->ih_name, 0);
  704 
  705         /*
  706          * See if there is enough remaining room in the string for the
  707          * description + ":".  The "- 1" leaves room for the trailing
  708          * '\0'.  The "+ 1" accounts for the colon.
  709          */
  710         space = sizeof(ih->ih_name) - (start - ih->ih_name) - 1;
  711         if (strlen(descr) + 1 > space) {
  712                 mtx_unlock(&ie->ie_lock);
  713                 return (ENOSPC);
  714         }
  715 
  716         /* Append a colon followed by the description. */
  717         *start = ':';
  718         strcpy(start + 1, descr);
  719         intr_event_update(ie);
  720         mtx_unlock(&ie->ie_lock);
  721         return (0);
  722 }
  723 
  724 /*
  725  * Return the ie_source field from the intr_event an intr_handler is
  726  * associated with.
  727  */
  728 void *
  729 intr_handler_source(void *cookie)
  730 {
  731         struct intr_handler *ih;
  732         struct intr_event *ie;
  733 
  734         ih = (struct intr_handler *)cookie;
  735         if (ih == NULL)
  736                 return (NULL);
  737         ie = ih->ih_event;
  738         KASSERT(ie != NULL,
  739             ("interrupt handler \"%s\" has a NULL interrupt event",
  740             ih->ih_name));
  741         return (ie->ie_source);
  742 }
  743 
  744 /*
  745  * Sleep until an ithread finishes executing an interrupt handler.
  746  *
  747  * XXX Doesn't currently handle interrupt filters or fast interrupt
  748  * handlers.  This is intended for compatibility with linux drivers
  749  * only.  Do not use in BSD code.
  750  */
  751 void
  752 _intr_drain(int irq)
  753 {
  754         struct intr_event *ie;
  755         struct intr_thread *ithd;
  756         struct thread *td;
  757 
  758         ie = intr_lookup(irq);
  759         if (ie == NULL)
  760                 return;
  761         if (ie->ie_thread == NULL)
  762                 return;
  763         ithd = ie->ie_thread;
  764         td = ithd->it_thread;
  765         /*
  766          * We set the flag and wait for it to be cleared to avoid
  767          * long delays with potentially busy interrupt handlers
  768          * were we to only sample TD_AWAITING_INTR() every tick.
  769          */
  770         thread_lock(td);
  771         if (!TD_AWAITING_INTR(td)) {
  772                 ithd->it_flags |= IT_WAIT;
  773                 while (ithd->it_flags & IT_WAIT) {
  774                         thread_unlock(td);
  775                         pause("idrain", 1);
  776                         thread_lock(td);
  777                 }
  778         }
  779         thread_unlock(td);
  780         return;
  781 }
  782 
  783 
  784 #ifndef INTR_FILTER
  785 int
  786 intr_event_remove_handler(void *cookie)
  787 {
  788         struct intr_handler *handler = (struct intr_handler *)cookie;
  789         struct intr_event *ie;
  790 #ifdef INVARIANTS
  791         struct intr_handler *ih;
  792 #endif
  793 #ifdef notyet
  794         int dead;
  795 #endif
  796 
  797         if (handler == NULL)
  798                 return (EINVAL);
  799         ie = handler->ih_event;
  800         KASSERT(ie != NULL,
  801             ("interrupt handler \"%s\" has a NULL interrupt event",
  802             handler->ih_name));
  803         mtx_lock(&ie->ie_lock);
  804         CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name,
  805             ie->ie_name);
  806 #ifdef INVARIANTS
  807         TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
  808                 if (ih == handler)
  809                         goto ok;
  810         mtx_unlock(&ie->ie_lock);
  811         panic("interrupt handler \"%s\" not found in interrupt event \"%s\"",
  812             ih->ih_name, ie->ie_name);
  813 ok:
  814 #endif
  815         /*
  816          * If there is no ithread, then just remove the handler and return.
  817          * XXX: Note that an INTR_FAST handler might be running on another
  818          * CPU!
  819          */
  820         if (ie->ie_thread == NULL) {
  821                 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
  822                 mtx_unlock(&ie->ie_lock);
  823                 free(handler, M_ITHREAD);
  824                 return (0);
  825         }
  826 
  827         /*
  828          * If the interrupt thread is already running, then just mark this
  829          * handler as being dead and let the ithread do the actual removal.
  830          *
  831          * During a cold boot while cold is set, msleep() does not sleep,
  832          * so we have to remove the handler here rather than letting the
  833          * thread do it.
  834          */
  835         thread_lock(ie->ie_thread->it_thread);
  836         if (!TD_AWAITING_INTR(ie->ie_thread->it_thread) && !cold) {
  837                 handler->ih_flags |= IH_DEAD;
  838 
  839                 /*
  840                  * Ensure that the thread will process the handler list
  841                  * again and remove this handler if it has already passed
  842                  * it on the list.
  843                  */
  844                 atomic_store_rel_int(&ie->ie_thread->it_need, 1);
  845         } else
  846                 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
  847         thread_unlock(ie->ie_thread->it_thread);
  848         while (handler->ih_flags & IH_DEAD)
  849                 msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0);
  850         intr_event_update(ie);
  851 #ifdef notyet
  852         /*
  853          * XXX: This could be bad in the case of ppbus(8).  Also, I think
  854          * this could lead to races of stale data when servicing an
  855          * interrupt.
  856          */
  857         dead = 1;
  858         TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
  859                 if (!(ih->ih_flags & IH_FAST)) {
  860                         dead = 0;
  861                         break;
  862                 }
  863         }
  864         if (dead) {
  865                 ithread_destroy(ie->ie_thread);
  866                 ie->ie_thread = NULL;
  867         }
  868 #endif
  869         mtx_unlock(&ie->ie_lock);
  870         free(handler, M_ITHREAD);
  871         return (0);
  872 }
  873 
  874 static int
  875 intr_event_schedule_thread(struct intr_event *ie)
  876 {
  877         struct intr_entropy entropy;
  878         struct intr_thread *it;
  879         struct thread *td;
  880         struct thread *ctd;
  881         struct proc *p;
  882 
  883         /*
  884          * If no ithread or no handlers, then we have a stray interrupt.
  885          */
  886         if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) ||
  887             ie->ie_thread == NULL)
  888                 return (EINVAL);
  889 
  890         ctd = curthread;
  891         it = ie->ie_thread;
  892         td = it->it_thread;
  893         p = td->td_proc;
  894 
  895         /*
  896          * If any of the handlers for this ithread claim to be good
  897          * sources of entropy, then gather some.
  898          */
  899         if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) {
  900                 CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__,
  901                     p->p_pid, td->td_name);
  902                 entropy.event = (uintptr_t)ie;
  903                 entropy.td = ctd;
  904                 random_harvest(&entropy, sizeof(entropy), 2, 0,
  905                     RANDOM_INTERRUPT);
  906         }
  907 
  908         KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name));
  909 
  910         /*
  911          * Set it_need to tell the thread to keep running if it is already
  912          * running.  Then, lock the thread and see if we actually need to
  913          * put it on the runqueue.
  914          */
  915         atomic_store_rel_int(&it->it_need, 1);
  916         thread_lock(td);
  917         if (TD_AWAITING_INTR(td)) {
  918                 CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid,
  919                     td->td_name);
  920                 TD_CLR_IWAIT(td);
  921                 sched_add(td, SRQ_INTR);
  922         } else {
  923                 CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d",
  924                     __func__, p->p_pid, td->td_name, it->it_need, td->td_state);
  925         }
  926         thread_unlock(td);
  927 
  928         return (0);
  929 }
  930 #else
  931 int
  932 intr_event_remove_handler(void *cookie)
  933 {
  934         struct intr_handler *handler = (struct intr_handler *)cookie;
  935         struct intr_event *ie;
  936         struct intr_thread *it;
  937 #ifdef INVARIANTS
  938         struct intr_handler *ih;
  939 #endif
  940 #ifdef notyet
  941         int dead;
  942 #endif
  943 
  944         if (handler == NULL)
  945                 return (EINVAL);
  946         ie = handler->ih_event;
  947         KASSERT(ie != NULL,
  948             ("interrupt handler \"%s\" has a NULL interrupt event",
  949             handler->ih_name));
  950         mtx_lock(&ie->ie_lock);
  951         CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name,
  952             ie->ie_name);
  953 #ifdef INVARIANTS
  954         TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
  955                 if (ih == handler)
  956                         goto ok;
  957         mtx_unlock(&ie->ie_lock);
  958         panic("interrupt handler \"%s\" not found in interrupt event \"%s\"",
  959             ih->ih_name, ie->ie_name);
  960 ok:
  961 #endif
  962         /*
  963          * If there are no ithreads (per event and per handler), then
  964          * just remove the handler and return.  
  965          * XXX: Note that an INTR_FAST handler might be running on another CPU!
  966          */
  967         if (ie->ie_thread == NULL && handler->ih_thread == NULL) {
  968                 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
  969                 mtx_unlock(&ie->ie_lock);
  970                 free(handler, M_ITHREAD);
  971                 return (0);
  972         }
  973 
  974         /* Private or global ithread? */
  975         it = (handler->ih_thread) ? handler->ih_thread : ie->ie_thread;
  976         /*
  977          * If the interrupt thread is already running, then just mark this
  978          * handler as being dead and let the ithread do the actual removal.
  979          *
  980          * During a cold boot while cold is set, msleep() does not sleep,
  981          * so we have to remove the handler here rather than letting the
  982          * thread do it.
  983          */
  984         thread_lock(it->it_thread);
  985         if (!TD_AWAITING_INTR(it->it_thread) && !cold) {
  986                 handler->ih_flags |= IH_DEAD;
  987 
  988                 /*
  989                  * Ensure that the thread will process the handler list
  990                  * again and remove this handler if it has already passed
  991                  * it on the list.
  992                  */
  993                 atomic_store_rel_int(&it->it_need, 1);
  994         } else
  995                 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
  996         thread_unlock(it->it_thread);
  997         while (handler->ih_flags & IH_DEAD)
  998                 msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0);
  999         /* 
 1000          * At this point, the handler has been disconnected from the event,
 1001          * so we can kill the private ithread if any.
 1002          */
 1003         if (handler->ih_thread) {
 1004                 ithread_destroy(handler->ih_thread);
 1005                 handler->ih_thread = NULL;
 1006         }
 1007         intr_event_update(ie);
 1008 #ifdef notyet
 1009         /*
 1010          * XXX: This could be bad in the case of ppbus(8).  Also, I think
 1011          * this could lead to races of stale data when servicing an
 1012          * interrupt.
 1013          */
 1014         dead = 1;
 1015         TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
 1016                 if (handler != NULL) {
 1017                         dead = 0;
 1018                         break;
 1019                 }
 1020         }
 1021         if (dead) {
 1022                 ithread_destroy(ie->ie_thread);
 1023                 ie->ie_thread = NULL;
 1024         }
 1025 #endif
 1026         mtx_unlock(&ie->ie_lock);
 1027         free(handler, M_ITHREAD);
 1028         return (0);
 1029 }
 1030 
 1031 static int
 1032 intr_event_schedule_thread(struct intr_event *ie, struct intr_thread *it)
 1033 {
 1034         struct intr_entropy entropy;
 1035         struct thread *td;
 1036         struct thread *ctd;
 1037         struct proc *p;
 1038 
 1039         /*
 1040          * If no ithread or no handlers, then we have a stray interrupt.
 1041          */
 1042         if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || it == NULL)
 1043                 return (EINVAL);
 1044 
 1045         ctd = curthread;
 1046         td = it->it_thread;
 1047         p = td->td_proc;
 1048 
 1049         /*
 1050          * If any of the handlers for this ithread claim to be good
 1051          * sources of entropy, then gather some.
 1052          */
 1053         if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) {
 1054                 CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__,
 1055                     p->p_pid, td->td_name);
 1056                 entropy.event = (uintptr_t)ie;
 1057                 entropy.td = ctd;
 1058                 random_harvest(&entropy, sizeof(entropy), 2, 0,
 1059                     RANDOM_INTERRUPT);
 1060         }
 1061 
 1062         KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name));
 1063 
 1064         /*
 1065          * Set it_need to tell the thread to keep running if it is already
 1066          * running.  Then, lock the thread and see if we actually need to
 1067          * put it on the runqueue.
 1068          */
 1069         atomic_store_rel_int(&it->it_need, 1);
 1070         thread_lock(td);
 1071         if (TD_AWAITING_INTR(td)) {
 1072                 CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid,
 1073                     td->td_name);
 1074                 TD_CLR_IWAIT(td);
 1075                 sched_add(td, SRQ_INTR);
 1076         } else {
 1077                 CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d",
 1078                     __func__, p->p_pid, td->td_name, it->it_need, td->td_state);
 1079         }
 1080         thread_unlock(td);
 1081 
 1082         return (0);
 1083 }
 1084 #endif
 1085 
 1086 /*
 1087  * Allow interrupt event binding for software interrupt handlers -- a no-op,
 1088  * since interrupts are generated in software rather than being directed by
 1089  * a PIC.
 1090  */
 1091 static int
 1092 swi_assign_cpu(void *arg, u_char cpu)
 1093 {
 1094 
 1095         return (0);
 1096 }
 1097 
 1098 /*
 1099  * Add a software interrupt handler to a specified event.  If a given event
 1100  * is not specified, then a new event is created.
 1101  */
 1102 int
 1103 swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler,
 1104             void *arg, int pri, enum intr_type flags, void **cookiep)
 1105 {
 1106         struct thread *td;
 1107         struct intr_event *ie;
 1108         int error;
 1109 
 1110         if (flags & INTR_ENTROPY)
 1111                 return (EINVAL);
 1112 
 1113         ie = (eventp != NULL) ? *eventp : NULL;
 1114 
 1115         if (ie != NULL) {
 1116                 if (!(ie->ie_flags & IE_SOFT))
 1117                         return (EINVAL);
 1118         } else {
 1119                 error = intr_event_create(&ie, NULL, IE_SOFT, 0,
 1120                     NULL, NULL, NULL, swi_assign_cpu, "swi%d:", pri);
 1121                 if (error)
 1122                         return (error);
 1123                 if (eventp != NULL)
 1124                         *eventp = ie;
 1125         }
 1126         error = intr_event_add_handler(ie, name, NULL, handler, arg,
 1127             PI_SWI(pri), flags, cookiep);
 1128         if (error)
 1129                 return (error);
 1130         if (pri == SWI_CLOCK) {
 1131                 td = ie->ie_thread->it_thread;
 1132                 thread_lock(td);
 1133                 td->td_flags |= TDF_NOLOAD;
 1134                 thread_unlock(td);
 1135         }
 1136         return (0);
 1137 }
 1138 
 1139 /*
 1140  * Schedule a software interrupt thread.
 1141  */
 1142 void
 1143 swi_sched(void *cookie, int flags)
 1144 {
 1145         struct intr_handler *ih = (struct intr_handler *)cookie;
 1146         struct intr_event *ie = ih->ih_event;
 1147         struct intr_entropy entropy;
 1148         int error;
 1149 
 1150         CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name,
 1151             ih->ih_need);
 1152 
 1153         if (harvest.swi) {
 1154                 CTR2(KTR_INTR, "swi_sched: pid %d (%s) gathering entropy",
 1155                     curproc->p_pid, curthread->td_name);
 1156                 entropy.event = (uintptr_t)ih;
 1157                 entropy.td = curthread;
 1158                 random_harvest(&entropy, sizeof(entropy), 1, 0,
 1159                     RANDOM_INTERRUPT);
 1160         }
 1161 
 1162         /*
 1163          * Set ih_need for this handler so that if the ithread is already
 1164          * running it will execute this handler on the next pass.  Otherwise,
 1165          * it will execute it the next time it runs.
 1166          */
 1167         atomic_store_rel_int(&ih->ih_need, 1);
 1168 
 1169         if (!(flags & SWI_DELAY)) {
 1170                 PCPU_INC(cnt.v_soft);
 1171 #ifdef INTR_FILTER
 1172                 error = intr_event_schedule_thread(ie, ie->ie_thread);
 1173 #else
 1174                 error = intr_event_schedule_thread(ie);
 1175 #endif
 1176                 KASSERT(error == 0, ("stray software interrupt"));
 1177         }
 1178 }
 1179 
 1180 /*
 1181  * Remove a software interrupt handler.  Currently this code does not
 1182  * remove the associated interrupt event if it becomes empty.  Calling code
 1183  * may do so manually via intr_event_destroy(), but that's not really
 1184  * an optimal interface.
 1185  */
 1186 int
 1187 swi_remove(void *cookie)
 1188 {
 1189 
 1190         return (intr_event_remove_handler(cookie));
 1191 }
 1192 
 1193 #ifdef INTR_FILTER
 1194 static void
 1195 priv_ithread_execute_handler(struct proc *p, struct intr_handler *ih)
 1196 {
 1197         struct intr_event *ie;
 1198 
 1199         ie = ih->ih_event;
 1200         /*
 1201          * If this handler is marked for death, remove it from
 1202          * the list of handlers and wake up the sleeper.
 1203          */
 1204         if (ih->ih_flags & IH_DEAD) {
 1205                 mtx_lock(&ie->ie_lock);
 1206                 TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next);
 1207                 ih->ih_flags &= ~IH_DEAD;
 1208                 wakeup(ih);
 1209                 mtx_unlock(&ie->ie_lock);
 1210                 return;
 1211         }
 1212         
 1213         /* Execute this handler. */
 1214         CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x",
 1215              __func__, p->p_pid, (void *)ih->ih_handler, ih->ih_argument,
 1216              ih->ih_name, ih->ih_flags);
 1217         
 1218         if (!(ih->ih_flags & IH_MPSAFE))
 1219                 mtx_lock(&Giant);
 1220         ih->ih_handler(ih->ih_argument);
 1221         if (!(ih->ih_flags & IH_MPSAFE))
 1222                 mtx_unlock(&Giant);
 1223 }
 1224 #endif
 1225 
 1226 /*
 1227  * This is a public function for use by drivers that mux interrupt
 1228  * handlers for child devices from their interrupt handler.
 1229  */
 1230 void
 1231 intr_event_execute_handlers(struct proc *p, struct intr_event *ie)
 1232 {
 1233         struct intr_handler *ih, *ihn;
 1234 
 1235         TAILQ_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) {
 1236                 /*
 1237                  * If this handler is marked for death, remove it from
 1238                  * the list of handlers and wake up the sleeper.
 1239                  */
 1240                 if (ih->ih_flags & IH_DEAD) {
 1241                         mtx_lock(&ie->ie_lock);
 1242                         TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next);
 1243                         ih->ih_flags &= ~IH_DEAD;
 1244                         wakeup(ih);
 1245                         mtx_unlock(&ie->ie_lock);
 1246                         continue;
 1247                 }
 1248 
 1249                 /* Skip filter only handlers */
 1250                 if (ih->ih_handler == NULL)
 1251                         continue;
 1252 
 1253                 /*
 1254                  * For software interrupt threads, we only execute
 1255                  * handlers that have their need flag set.  Hardware
 1256                  * interrupt threads always invoke all of their handlers.
 1257                  */
 1258                 if (ie->ie_flags & IE_SOFT) {
 1259                         if (atomic_load_acq_int(&ih->ih_need) == 0)
 1260                                 continue;
 1261                         else
 1262                                 atomic_store_rel_int(&ih->ih_need, 0);
 1263                 }
 1264 
 1265                 /* Execute this handler. */
 1266                 CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x",
 1267                     __func__, p->p_pid, (void *)ih->ih_handler, 
 1268                     ih->ih_argument, ih->ih_name, ih->ih_flags);
 1269 
 1270                 if (!(ih->ih_flags & IH_MPSAFE))
 1271                         mtx_lock(&Giant);
 1272                 ih->ih_handler(ih->ih_argument);
 1273                 if (!(ih->ih_flags & IH_MPSAFE))
 1274                         mtx_unlock(&Giant);
 1275         }
 1276 }
 1277 
 1278 static void
 1279 ithread_execute_handlers(struct proc *p, struct intr_event *ie)
 1280 {
 1281 
 1282         /* Interrupt handlers should not sleep. */
 1283         if (!(ie->ie_flags & IE_SOFT))
 1284                 THREAD_NO_SLEEPING();
 1285         intr_event_execute_handlers(p, ie);
 1286         if (!(ie->ie_flags & IE_SOFT))
 1287                 THREAD_SLEEPING_OK();
 1288 
 1289         /*
 1290          * Interrupt storm handling:
 1291          *
 1292          * If this interrupt source is currently storming, then throttle
 1293          * it to only fire the handler once  per clock tick.
 1294          *
 1295          * If this interrupt source is not currently storming, but the
 1296          * number of back to back interrupts exceeds the storm threshold,
 1297          * then enter storming mode.
 1298          */
 1299         if (intr_storm_threshold != 0 && ie->ie_count >= intr_storm_threshold &&
 1300             !(ie->ie_flags & IE_SOFT)) {
 1301                 /* Report the message only once every second. */
 1302                 if (ppsratecheck(&ie->ie_warntm, &ie->ie_warncnt, 1)) {
 1303                         printf(
 1304         "interrupt storm detected on \"%s\"; throttling interrupt source\n",
 1305                             ie->ie_name);
 1306                 }
 1307                 pause("istorm", 1);
 1308         } else
 1309                 ie->ie_count++;
 1310 
 1311         /*
 1312          * Now that all the handlers have had a chance to run, reenable
 1313          * the interrupt source.
 1314          */
 1315         if (ie->ie_post_ithread != NULL)
 1316                 ie->ie_post_ithread(ie->ie_source);
 1317 }
 1318 
 1319 #ifndef INTR_FILTER
 1320 /*
 1321  * This is the main code for interrupt threads.
 1322  */
 1323 static void
 1324 ithread_loop(void *arg)
 1325 {
 1326         struct intr_thread *ithd;
 1327         struct intr_event *ie;
 1328         struct thread *td;
 1329         struct proc *p;
 1330         int wake;
 1331 
 1332         td = curthread;
 1333         p = td->td_proc;
 1334         ithd = (struct intr_thread *)arg;
 1335         KASSERT(ithd->it_thread == td,
 1336             ("%s: ithread and proc linkage out of sync", __func__));
 1337         ie = ithd->it_event;
 1338         ie->ie_count = 0;
 1339         wake = 0;
 1340 
 1341         /*
 1342          * As long as we have interrupts outstanding, go through the
 1343          * list of handlers, giving each one a go at it.
 1344          */
 1345         for (;;) {
 1346                 /*
 1347                  * If we are an orphaned thread, then just die.
 1348                  */
 1349                 if (ithd->it_flags & IT_DEAD) {
 1350                         CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__,
 1351                             p->p_pid, td->td_name);
 1352                         free(ithd, M_ITHREAD);
 1353                         kthread_exit();
 1354                 }
 1355 
 1356                 /*
 1357                  * Service interrupts.  If another interrupt arrives while
 1358                  * we are running, it will set it_need to note that we
 1359                  * should make another pass.
 1360                  */
 1361                 while (atomic_load_acq_int(&ithd->it_need) != 0) {
 1362                         /*
 1363                          * This might need a full read and write barrier
 1364                          * to make sure that this write posts before any
 1365                          * of the memory or device accesses in the
 1366                          * handlers.
 1367                          */
 1368                         atomic_store_rel_int(&ithd->it_need, 0);
 1369                         ithread_execute_handlers(p, ie);
 1370                 }
 1371                 WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread");
 1372                 mtx_assert(&Giant, MA_NOTOWNED);
 1373 
 1374                 /*
 1375                  * Processed all our interrupts.  Now get the sched
 1376                  * lock.  This may take a while and it_need may get
 1377                  * set again, so we have to check it again.
 1378                  */
 1379                 thread_lock(td);
 1380                 if ((atomic_load_acq_int(&ithd->it_need) == 0) &&
 1381                     !(ithd->it_flags & (IT_DEAD | IT_WAIT))) {
 1382                         TD_SET_IWAIT(td);
 1383                         ie->ie_count = 0;
 1384                         mi_switch(SW_VOL | SWT_IWAIT, NULL);
 1385                 }
 1386                 if (ithd->it_flags & IT_WAIT) {
 1387                         wake = 1;
 1388                         ithd->it_flags &= ~IT_WAIT;
 1389                 }
 1390                 thread_unlock(td);
 1391                 if (wake) {
 1392                         wakeup(ithd);
 1393                         wake = 0;
 1394                 }
 1395         }
 1396 }
 1397 
 1398 /*
 1399  * Main interrupt handling body.
 1400  *
 1401  * Input:
 1402  * o ie:                        the event connected to this interrupt.
 1403  * o frame:                     some archs (i.e. i386) pass a frame to some.
 1404  *                              handlers as their main argument.
 1405  * Return value:
 1406  * o 0:                         everything ok.
 1407  * o EINVAL:                    stray interrupt.
 1408  */
 1409 int
 1410 intr_event_handle(struct intr_event *ie, struct trapframe *frame)
 1411 {
 1412         struct intr_handler *ih;
 1413         struct trapframe *oldframe;
 1414         struct thread *td;
 1415         int error, ret, thread;
 1416 
 1417         td = curthread;
 1418 
 1419         /* An interrupt with no event or handlers is a stray interrupt. */
 1420         if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers))
 1421                 return (EINVAL);
 1422 
 1423         /*
 1424          * Execute fast interrupt handlers directly.
 1425          * To support clock handlers, if a handler registers
 1426          * with a NULL argument, then we pass it a pointer to
 1427          * a trapframe as its argument.
 1428          */
 1429         td->td_intr_nesting_level++;
 1430         thread = 0;
 1431         ret = 0;
 1432         critical_enter();
 1433         oldframe = td->td_intr_frame;
 1434         td->td_intr_frame = frame;
 1435         TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
 1436                 if (ih->ih_filter == NULL) {
 1437                         thread = 1;
 1438                         continue;
 1439                 }
 1440                 CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__,
 1441                     ih->ih_filter, ih->ih_argument == NULL ? frame :
 1442                     ih->ih_argument, ih->ih_name);
 1443                 if (ih->ih_argument == NULL)
 1444                         ret = ih->ih_filter(frame);
 1445                 else
 1446                         ret = ih->ih_filter(ih->ih_argument);
 1447                 KASSERT(ret == FILTER_STRAY ||
 1448                     ((ret & (FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) != 0 &&
 1449                     (ret & ~(FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) == 0),
 1450                     ("%s: incorrect return value %#x from %s", __func__, ret,
 1451                     ih->ih_name));
 1452 
 1453                 /* 
 1454                  * Wrapper handler special handling:
 1455                  *
 1456                  * in some particular cases (like pccard and pccbb), 
 1457                  * the _real_ device handler is wrapped in a couple of
 1458                  * functions - a filter wrapper and an ithread wrapper.
 1459                  * In this case (and just in this case), the filter wrapper 
 1460                  * could ask the system to schedule the ithread and mask
 1461                  * the interrupt source if the wrapped handler is composed
 1462                  * of just an ithread handler.
 1463                  *
 1464                  * TODO: write a generic wrapper to avoid people rolling 
 1465                  * their own
 1466                  */
 1467                 if (!thread) {
 1468                         if (ret == FILTER_SCHEDULE_THREAD)
 1469                                 thread = 1;
 1470                 }
 1471         }
 1472         td->td_intr_frame = oldframe;
 1473 
 1474         if (thread) {
 1475                 if (ie->ie_pre_ithread != NULL)
 1476                         ie->ie_pre_ithread(ie->ie_source);
 1477         } else {
 1478                 if (ie->ie_post_filter != NULL)
 1479                         ie->ie_post_filter(ie->ie_source);
 1480         }
 1481         
 1482         /* Schedule the ithread if needed. */
 1483         if (thread) {
 1484                 error = intr_event_schedule_thread(ie);
 1485 #ifndef XEN             
 1486                 KASSERT(error == 0, ("bad stray interrupt"));
 1487 #else
 1488                 if (error != 0)
 1489                         log(LOG_WARNING, "bad stray interrupt");
 1490 #endif          
 1491         }
 1492         critical_exit();
 1493         td->td_intr_nesting_level--;
 1494         return (0);
 1495 }
 1496 #else
 1497 /*
 1498  * This is the main code for interrupt threads.
 1499  */
 1500 static void
 1501 ithread_loop(void *arg)
 1502 {
 1503         struct intr_thread *ithd;
 1504         struct intr_handler *ih;
 1505         struct intr_event *ie;
 1506         struct thread *td;
 1507         struct proc *p;
 1508         int priv;
 1509         int wake;
 1510 
 1511         td = curthread;
 1512         p = td->td_proc;
 1513         ih = (struct intr_handler *)arg;
 1514         priv = (ih->ih_thread != NULL) ? 1 : 0;
 1515         ithd = (priv) ? ih->ih_thread : ih->ih_event->ie_thread;
 1516         KASSERT(ithd->it_thread == td,
 1517             ("%s: ithread and proc linkage out of sync", __func__));
 1518         ie = ithd->it_event;
 1519         ie->ie_count = 0;
 1520         wake = 0;
 1521 
 1522         /*
 1523          * As long as we have interrupts outstanding, go through the
 1524          * list of handlers, giving each one a go at it.
 1525          */
 1526         for (;;) {
 1527                 /*
 1528                  * If we are an orphaned thread, then just die.
 1529                  */
 1530                 if (ithd->it_flags & IT_DEAD) {
 1531                         CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__,
 1532                             p->p_pid, td->td_name);
 1533                         free(ithd, M_ITHREAD);
 1534                         kthread_exit();
 1535                 }
 1536 
 1537                 /*
 1538                  * Service interrupts.  If another interrupt arrives while
 1539                  * we are running, it will set it_need to note that we
 1540                  * should make another pass.
 1541                  */
 1542                 while (atomic_load_acq_int(&ithd->it_need) != 0) {
 1543                         /*
 1544                          * This might need a full read and write barrier
 1545                          * to make sure that this write posts before any
 1546                          * of the memory or device accesses in the
 1547                          * handlers.
 1548                          */
 1549                         atomic_store_rel_int(&ithd->it_need, 0);
 1550                         if (priv)
 1551                                 priv_ithread_execute_handler(p, ih);
 1552                         else 
 1553                                 ithread_execute_handlers(p, ie);
 1554                 }
 1555                 WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread");
 1556                 mtx_assert(&Giant, MA_NOTOWNED);
 1557 
 1558                 /*
 1559                  * Processed all our interrupts.  Now get the sched
 1560                  * lock.  This may take a while and it_need may get
 1561                  * set again, so we have to check it again.
 1562                  */
 1563                 thread_lock(td);
 1564                 if ((atomic_load_acq_int(&ithd->it_need) == 0) &&
 1565                     !(ithd->it_flags & (IT_DEAD | IT_WAIT))) {
 1566                         TD_SET_IWAIT(td);
 1567                         ie->ie_count = 0;
 1568                         mi_switch(SW_VOL | SWT_IWAIT, NULL);
 1569                 }
 1570                 if (ithd->it_flags & IT_WAIT) {
 1571                         wake = 1;
 1572                         ithd->it_flags &= ~IT_WAIT;
 1573                 }
 1574                 thread_unlock(td);
 1575                 if (wake) {
 1576                         wakeup(ithd);
 1577                         wake = 0;
 1578                 }
 1579         }
 1580 }
 1581 
 1582 /* 
 1583  * Main loop for interrupt filter.
 1584  *
 1585  * Some architectures (i386, amd64 and arm) require the optional frame 
 1586  * parameter, and use it as the main argument for fast handler execution
 1587  * when ih_argument == NULL.
 1588  *
 1589  * Return value:
 1590  * o FILTER_STRAY:              No filter recognized the event, and no
 1591  *                              filter-less handler is registered on this 
 1592  *                              line.
 1593  * o FILTER_HANDLED:            A filter claimed the event and served it.
 1594  * o FILTER_SCHEDULE_THREAD:    No filter claimed the event, but there's at
 1595  *                              least one filter-less handler on this line.
 1596  * o FILTER_HANDLED | 
 1597  *   FILTER_SCHEDULE_THREAD:    A filter claimed the event, and asked for
 1598  *                              scheduling the per-handler ithread.
 1599  *
 1600  * In case an ithread has to be scheduled, in *ithd there will be a 
 1601  * pointer to a struct intr_thread containing the thread to be
 1602  * scheduled.
 1603  */
 1604 
 1605 static int
 1606 intr_filter_loop(struct intr_event *ie, struct trapframe *frame, 
 1607                  struct intr_thread **ithd) 
 1608 {
 1609         struct intr_handler *ih;
 1610         void *arg;
 1611         int ret, thread_only;
 1612 
 1613         ret = 0;
 1614         thread_only = 0;
 1615         TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
 1616                 /*
 1617                  * Execute fast interrupt handlers directly.
 1618                  * To support clock handlers, if a handler registers
 1619                  * with a NULL argument, then we pass it a pointer to
 1620                  * a trapframe as its argument.
 1621                  */
 1622                 arg = ((ih->ih_argument == NULL) ? frame : ih->ih_argument);
 1623                 
 1624                 CTR5(KTR_INTR, "%s: exec %p/%p(%p) for %s", __func__,
 1625                      ih->ih_filter, ih->ih_handler, arg, ih->ih_name);
 1626 
 1627                 if (ih->ih_filter != NULL)
 1628                         ret = ih->ih_filter(arg);
 1629                 else {
 1630                         thread_only = 1;
 1631                         continue;
 1632                 }
 1633                 KASSERT(ret == FILTER_STRAY ||
 1634                     ((ret & (FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) != 0 &&
 1635                     (ret & ~(FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) == 0),
 1636                     ("%s: incorrect return value %#x from %s", __func__, ret,
 1637                     ih->ih_name));
 1638                 if (ret & FILTER_STRAY)
 1639                         continue;
 1640                 else { 
 1641                         *ithd = ih->ih_thread;
 1642                         return (ret);
 1643                 }
 1644         }
 1645 
 1646         /*
 1647          * No filters handled the interrupt and we have at least
 1648          * one handler without a filter.  In this case, we schedule
 1649          * all of the filter-less handlers to run in the ithread.
 1650          */     
 1651         if (thread_only) {
 1652                 *ithd = ie->ie_thread;
 1653                 return (FILTER_SCHEDULE_THREAD);
 1654         }
 1655         return (FILTER_STRAY);
 1656 }
 1657 
 1658 /*
 1659  * Main interrupt handling body.
 1660  *
 1661  * Input:
 1662  * o ie:                        the event connected to this interrupt.
 1663  * o frame:                     some archs (i.e. i386) pass a frame to some.
 1664  *                              handlers as their main argument.
 1665  * Return value:
 1666  * o 0:                         everything ok.
 1667  * o EINVAL:                    stray interrupt.
 1668  */
 1669 int
 1670 intr_event_handle(struct intr_event *ie, struct trapframe *frame)
 1671 {
 1672         struct intr_thread *ithd;
 1673         struct trapframe *oldframe;
 1674         struct thread *td;
 1675         int thread;
 1676 
 1677         ithd = NULL;
 1678         td = curthread;
 1679 
 1680         if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers))
 1681                 return (EINVAL);
 1682 
 1683         td->td_intr_nesting_level++;
 1684         thread = 0;
 1685         critical_enter();
 1686         oldframe = td->td_intr_frame;
 1687         td->td_intr_frame = frame;
 1688         thread = intr_filter_loop(ie, frame, &ithd);    
 1689         if (thread & FILTER_HANDLED) {
 1690                 if (ie->ie_post_filter != NULL)
 1691                         ie->ie_post_filter(ie->ie_source);
 1692         } else {
 1693                 if (ie->ie_pre_ithread != NULL)
 1694                         ie->ie_pre_ithread(ie->ie_source);
 1695         }
 1696         td->td_intr_frame = oldframe;
 1697         critical_exit();
 1698         
 1699         /* Interrupt storm logic */
 1700         if (thread & FILTER_STRAY) {
 1701                 ie->ie_count++;
 1702                 if (ie->ie_count < intr_storm_threshold)
 1703                         printf("Interrupt stray detection not present\n");
 1704         }
 1705 
 1706         /* Schedule an ithread if needed. */
 1707         if (thread & FILTER_SCHEDULE_THREAD) {
 1708                 if (intr_event_schedule_thread(ie, ithd) != 0)
 1709                         panic("%s: impossible stray interrupt", __func__);
 1710         }
 1711         td->td_intr_nesting_level--;
 1712         return (0);
 1713 }
 1714 #endif
 1715 
 1716 #ifdef DDB
 1717 /*
 1718  * Dump details about an interrupt handler
 1719  */
 1720 static void
 1721 db_dump_intrhand(struct intr_handler *ih)
 1722 {
 1723         int comma;
 1724 
 1725         db_printf("\t%-10s ", ih->ih_name);
 1726         switch (ih->ih_pri) {
 1727         case PI_REALTIME:
 1728                 db_printf("CLK ");
 1729                 break;
 1730         case PI_AV:
 1731                 db_printf("AV  ");
 1732                 break;
 1733         case PI_TTY:
 1734                 db_printf("TTY ");
 1735                 break;
 1736         case PI_NET:
 1737                 db_printf("NET ");
 1738                 break;
 1739         case PI_DISK:
 1740                 db_printf("DISK");
 1741                 break;
 1742         case PI_DULL:
 1743                 db_printf("DULL");
 1744                 break;
 1745         default:
 1746                 if (ih->ih_pri >= PI_SOFT)
 1747                         db_printf("SWI ");
 1748                 else
 1749                         db_printf("%4u", ih->ih_pri);
 1750                 break;
 1751         }
 1752         db_printf(" ");
 1753         if (ih->ih_filter != NULL) {
 1754                 db_printf("[F]");
 1755                 db_printsym((uintptr_t)ih->ih_filter, DB_STGY_PROC);
 1756         }
 1757         if (ih->ih_handler != NULL) {
 1758                 if (ih->ih_filter != NULL)
 1759                         db_printf(",");
 1760                 db_printf("[H]");
 1761                 db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC);
 1762         }
 1763         db_printf("(%p)", ih->ih_argument);
 1764         if (ih->ih_need ||
 1765             (ih->ih_flags & (IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD |
 1766             IH_MPSAFE)) != 0) {
 1767                 db_printf(" {");
 1768                 comma = 0;
 1769                 if (ih->ih_flags & IH_EXCLUSIVE) {
 1770                         if (comma)
 1771                                 db_printf(", ");
 1772                         db_printf("EXCL");
 1773                         comma = 1;
 1774                 }
 1775                 if (ih->ih_flags & IH_ENTROPY) {
 1776                         if (comma)
 1777                                 db_printf(", ");
 1778                         db_printf("ENTROPY");
 1779                         comma = 1;
 1780                 }
 1781                 if (ih->ih_flags & IH_DEAD) {
 1782                         if (comma)
 1783                                 db_printf(", ");
 1784                         db_printf("DEAD");
 1785                         comma = 1;
 1786                 }
 1787                 if (ih->ih_flags & IH_MPSAFE) {
 1788                         if (comma)
 1789                                 db_printf(", ");
 1790                         db_printf("MPSAFE");
 1791                         comma = 1;
 1792                 }
 1793                 if (ih->ih_need) {
 1794                         if (comma)
 1795                                 db_printf(", ");
 1796                         db_printf("NEED");
 1797                 }
 1798                 db_printf("}");
 1799         }
 1800         db_printf("\n");
 1801 }
 1802 
 1803 /*
 1804  * Dump details about a event.
 1805  */
 1806 void
 1807 db_dump_intr_event(struct intr_event *ie, int handlers)
 1808 {
 1809         struct intr_handler *ih;
 1810         struct intr_thread *it;
 1811         int comma;
 1812 
 1813         db_printf("%s ", ie->ie_fullname);
 1814         it = ie->ie_thread;
 1815         if (it != NULL)
 1816                 db_printf("(pid %d)", it->it_thread->td_proc->p_pid);
 1817         else
 1818                 db_printf("(no thread)");
 1819         if ((ie->ie_flags & (IE_SOFT | IE_ENTROPY | IE_ADDING_THREAD)) != 0 ||
 1820             (it != NULL && it->it_need)) {
 1821                 db_printf(" {");
 1822                 comma = 0;
 1823                 if (ie->ie_flags & IE_SOFT) {
 1824                         db_printf("SOFT");
 1825                         comma = 1;
 1826                 }
 1827                 if (ie->ie_flags & IE_ENTROPY) {
 1828                         if (comma)
 1829                                 db_printf(", ");
 1830                         db_printf("ENTROPY");
 1831                         comma = 1;
 1832                 }
 1833                 if (ie->ie_flags & IE_ADDING_THREAD) {
 1834                         if (comma)
 1835                                 db_printf(", ");
 1836                         db_printf("ADDING_THREAD");
 1837                         comma = 1;
 1838                 }
 1839                 if (it != NULL && it->it_need) {
 1840                         if (comma)
 1841                                 db_printf(", ");
 1842                         db_printf("NEED");
 1843                 }
 1844                 db_printf("}");
 1845         }
 1846         db_printf("\n");
 1847 
 1848         if (handlers)
 1849                 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
 1850                     db_dump_intrhand(ih);
 1851 }
 1852 
 1853 /*
 1854  * Dump data about interrupt handlers
 1855  */
 1856 DB_SHOW_COMMAND(intr, db_show_intr)
 1857 {
 1858         struct intr_event *ie;
 1859         int all, verbose;
 1860 
 1861         verbose = index(modif, 'v') != NULL;
 1862         all = index(modif, 'a') != NULL;
 1863         TAILQ_FOREACH(ie, &event_list, ie_list) {
 1864                 if (!all && TAILQ_EMPTY(&ie->ie_handlers))
 1865                         continue;
 1866                 db_dump_intr_event(ie, verbose);
 1867                 if (db_pager_quit)
 1868                         break;
 1869         }
 1870 }
 1871 #endif /* DDB */
 1872 
 1873 /*
 1874  * Start standard software interrupt threads
 1875  */
 1876 static void
 1877 start_softintr(void *dummy)
 1878 {
 1879 
 1880         if (swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, INTR_MPSAFE, &vm_ih))
 1881                 panic("died while creating vm swi ithread");
 1882 }
 1883 SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr,
 1884     NULL);
 1885 
 1886 /*
 1887  * Sysctls used by systat and others: hw.intrnames and hw.intrcnt.
 1888  * The data for this machine dependent, and the declarations are in machine
 1889  * dependent code.  The layout of intrnames and intrcnt however is machine
 1890  * independent.
 1891  *
 1892  * We do not know the length of intrcnt and intrnames at compile time, so
 1893  * calculate things at run time.
 1894  */
 1895 static int
 1896 sysctl_intrnames(SYSCTL_HANDLER_ARGS)
 1897 {
 1898         return (sysctl_handle_opaque(oidp, intrnames, sintrnames, req));
 1899 }
 1900 
 1901 SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD,
 1902     NULL, 0, sysctl_intrnames, "", "Interrupt Names");
 1903 
 1904 static int
 1905 sysctl_intrcnt(SYSCTL_HANDLER_ARGS)
 1906 {
 1907         return (sysctl_handle_opaque(oidp, intrcnt, sintrcnt, req));
 1908 }
 1909 
 1910 SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD,
 1911     NULL, 0, sysctl_intrcnt, "", "Interrupt Counts");
 1912 
 1913 #ifdef DDB
 1914 /*
 1915  * DDB command to dump the interrupt statistics.
 1916  */
 1917 DB_SHOW_COMMAND(intrcnt, db_show_intrcnt)
 1918 {
 1919         u_long *i;
 1920         char *cp;
 1921         u_int j;
 1922 
 1923         cp = intrnames;
 1924         j = 0;
 1925         for (i = intrcnt; j < (sintrcnt / sizeof(u_long)) && !db_pager_quit;
 1926             i++, j++) {
 1927                 if (*cp == '\0')
 1928                         break;
 1929                 if (*i != 0)
 1930                         db_printf("%s\t%lu\n", cp, *i);
 1931                 cp += strlen(cp) + 1;
 1932         }
 1933 }
 1934 #endif

Cache object: 157bc98fbac9721efc2e90c1706d4734


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.