The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_intr.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice unmodified, this list of conditions, and the following
   10  *    disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  *
   15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   25  */
   26 
   27 #include <sys/cdefs.h>
   28 __FBSDID("$FreeBSD: releng/8.3/sys/kern/kern_intr.c 218352 2011-02-05 21:50:23Z kib $");
   29 
   30 #include "opt_ddb.h"
   31 
   32 #include <sys/param.h>
   33 #include <sys/bus.h>
   34 #include <sys/conf.h>
   35 #include <sys/cpuset.h>
   36 #include <sys/rtprio.h>
   37 #include <sys/systm.h>
   38 #include <sys/interrupt.h>
   39 #include <sys/kernel.h>
   40 #include <sys/kthread.h>
   41 #include <sys/ktr.h>
   42 #include <sys/limits.h>
   43 #include <sys/lock.h>
   44 #include <sys/malloc.h>
   45 #include <sys/mutex.h>
   46 #include <sys/priv.h>
   47 #include <sys/proc.h>
   48 #include <sys/random.h>
   49 #include <sys/resourcevar.h>
   50 #include <sys/sched.h>
   51 #include <sys/smp.h>
   52 #include <sys/sysctl.h>
   53 #include <sys/syslog.h>
   54 #include <sys/unistd.h>
   55 #include <sys/vmmeter.h>
   56 #include <machine/atomic.h>
   57 #include <machine/cpu.h>
   58 #include <machine/md_var.h>
   59 #include <machine/stdarg.h>
   60 #ifdef DDB
   61 #include <ddb/ddb.h>
   62 #include <ddb/db_sym.h>
   63 #endif
   64 
   65 /*
   66  * Describe an interrupt thread.  There is one of these per interrupt event.
   67  */
   68 struct intr_thread {
   69         struct intr_event *it_event;
   70         struct thread *it_thread;       /* Kernel thread. */
   71         int     it_flags;               /* (j) IT_* flags. */
   72         int     it_need;                /* Needs service. */
   73 };
   74 
   75 /* Interrupt thread flags kept in it_flags */
   76 #define IT_DEAD         0x000001        /* Thread is waiting to exit. */
   77 
   78 struct  intr_entropy {
   79         struct  thread *td;
   80         uintptr_t event;
   81 };
   82 
   83 struct  intr_event *clk_intr_event;
   84 struct  intr_event *tty_intr_event;
   85 void    *vm_ih;
   86 struct proc *intrproc;
   87 
   88 static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads");
   89 
   90 static int intr_storm_threshold = 1000;
   91 TUNABLE_INT("hw.intr_storm_threshold", &intr_storm_threshold);
   92 SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RW,
   93     &intr_storm_threshold, 0,
   94     "Number of consecutive interrupts before storm protection is enabled");
   95 static TAILQ_HEAD(, intr_event) event_list =
   96     TAILQ_HEAD_INITIALIZER(event_list);
   97 static struct mtx event_lock;
   98 MTX_SYSINIT(intr_event_list, &event_lock, "intr event list", MTX_DEF);
   99 
  100 static void     intr_event_update(struct intr_event *ie);
  101 #ifdef INTR_FILTER
  102 static int      intr_event_schedule_thread(struct intr_event *ie,
  103                     struct intr_thread *ithd);
  104 static int      intr_filter_loop(struct intr_event *ie,
  105                     struct trapframe *frame, struct intr_thread **ithd);
  106 static struct intr_thread *ithread_create(const char *name,
  107                               struct intr_handler *ih);
  108 #else
  109 static int      intr_event_schedule_thread(struct intr_event *ie);
  110 static struct intr_thread *ithread_create(const char *name);
  111 #endif
  112 static void     ithread_destroy(struct intr_thread *ithread);
  113 static void     ithread_execute_handlers(struct proc *p, 
  114                     struct intr_event *ie);
  115 #ifdef INTR_FILTER
  116 static void     priv_ithread_execute_handler(struct proc *p, 
  117                     struct intr_handler *ih);
  118 #endif
  119 static void     ithread_loop(void *);
  120 static void     ithread_update(struct intr_thread *ithd);
  121 static void     start_softintr(void *);
  122 
  123 /* Map an interrupt type to an ithread priority. */
  124 u_char
  125 intr_priority(enum intr_type flags)
  126 {
  127         u_char pri;
  128 
  129         flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET |
  130             INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV);
  131         switch (flags) {
  132         case INTR_TYPE_TTY:
  133                 pri = PI_TTY;
  134                 break;
  135         case INTR_TYPE_BIO:
  136                 pri = PI_DISK;
  137                 break;
  138         case INTR_TYPE_NET:
  139                 pri = PI_NET;
  140                 break;
  141         case INTR_TYPE_CAM:
  142                 pri = PI_DISK;
  143                 break;
  144         case INTR_TYPE_AV:
  145                 pri = PI_AV;
  146                 break;
  147         case INTR_TYPE_CLK:
  148                 pri = PI_REALTIME;
  149                 break;
  150         case INTR_TYPE_MISC:
  151                 pri = PI_DULL;          /* don't care */
  152                 break;
  153         default:
  154                 /* We didn't specify an interrupt level. */
  155                 panic("intr_priority: no interrupt type in flags");
  156         }
  157 
  158         return pri;
  159 }
  160 
  161 /*
  162  * Update an ithread based on the associated intr_event.
  163  */
  164 static void
  165 ithread_update(struct intr_thread *ithd)
  166 {
  167         struct intr_event *ie;
  168         struct thread *td;
  169         u_char pri;
  170 
  171         ie = ithd->it_event;
  172         td = ithd->it_thread;
  173 
  174         /* Determine the overall priority of this event. */
  175         if (TAILQ_EMPTY(&ie->ie_handlers))
  176                 pri = PRI_MAX_ITHD;
  177         else
  178                 pri = TAILQ_FIRST(&ie->ie_handlers)->ih_pri;
  179 
  180         /* Update name and priority. */
  181         strlcpy(td->td_name, ie->ie_fullname, sizeof(td->td_name));
  182         thread_lock(td);
  183         sched_prio(td, pri);
  184         thread_unlock(td);
  185 }
  186 
  187 /*
  188  * Regenerate the full name of an interrupt event and update its priority.
  189  */
  190 static void
  191 intr_event_update(struct intr_event *ie)
  192 {
  193         struct intr_handler *ih;
  194         char *last;
  195         int missed, space;
  196 
  197         /* Start off with no entropy and just the name of the event. */
  198         mtx_assert(&ie->ie_lock, MA_OWNED);
  199         strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname));
  200         ie->ie_flags &= ~IE_ENTROPY;
  201         missed = 0;
  202         space = 1;
  203 
  204         /* Run through all the handlers updating values. */
  205         TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
  206                 if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 <
  207                     sizeof(ie->ie_fullname)) {
  208                         strcat(ie->ie_fullname, " ");
  209                         strcat(ie->ie_fullname, ih->ih_name);
  210                         space = 0;
  211                 } else
  212                         missed++;
  213                 if (ih->ih_flags & IH_ENTROPY)
  214                         ie->ie_flags |= IE_ENTROPY;
  215         }
  216 
  217         /*
  218          * If the handler names were too long, add +'s to indicate missing
  219          * names. If we run out of room and still have +'s to add, change
  220          * the last character from a + to a *.
  221          */
  222         last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2];
  223         while (missed-- > 0) {
  224                 if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) {
  225                         if (*last == '+') {
  226                                 *last = '*';
  227                                 break;
  228                         } else
  229                                 *last = '+';
  230                 } else if (space) {
  231                         strcat(ie->ie_fullname, " +");
  232                         space = 0;
  233                 } else
  234                         strcat(ie->ie_fullname, "+");
  235         }
  236 
  237         /*
  238          * If this event has an ithread, update it's priority and
  239          * name.
  240          */
  241         if (ie->ie_thread != NULL)
  242                 ithread_update(ie->ie_thread);
  243         CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname);
  244 }
  245 
  246 int
  247 intr_event_create(struct intr_event **event, void *source, int flags, int irq,
  248     void (*pre_ithread)(void *), void (*post_ithread)(void *),
  249     void (*post_filter)(void *), int (*assign_cpu)(void *, u_char),
  250     const char *fmt, ...)
  251 {
  252         struct intr_event *ie;
  253         va_list ap;
  254 
  255         /* The only valid flag during creation is IE_SOFT. */
  256         if ((flags & ~IE_SOFT) != 0)
  257                 return (EINVAL);
  258         ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO);
  259         ie->ie_source = source;
  260         ie->ie_pre_ithread = pre_ithread;
  261         ie->ie_post_ithread = post_ithread;
  262         ie->ie_post_filter = post_filter;
  263         ie->ie_assign_cpu = assign_cpu;
  264         ie->ie_flags = flags;
  265         ie->ie_irq = irq;
  266         ie->ie_cpu = NOCPU;
  267         TAILQ_INIT(&ie->ie_handlers);
  268         mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF);
  269 
  270         va_start(ap, fmt);
  271         vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap);
  272         va_end(ap);
  273         strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname));
  274         mtx_lock(&event_lock);
  275         TAILQ_INSERT_TAIL(&event_list, ie, ie_list);
  276         mtx_unlock(&event_lock);
  277         if (event != NULL)
  278                 *event = ie;
  279         CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name);
  280         return (0);
  281 }
  282 
  283 /*
  284  * Bind an interrupt event to the specified CPU.  Note that not all
  285  * platforms support binding an interrupt to a CPU.  For those
  286  * platforms this request will fail.  For supported platforms, any
  287  * associated ithreads as well as the primary interrupt context will
  288  * be bound to the specificed CPU.  Using a cpu id of NOCPU unbinds
  289  * the interrupt event.
  290  */
  291 int
  292 intr_event_bind(struct intr_event *ie, u_char cpu)
  293 {
  294         cpuset_t mask;
  295         lwpid_t id;
  296         int error;
  297 
  298         /* Need a CPU to bind to. */
  299         if (cpu != NOCPU && CPU_ABSENT(cpu))
  300                 return (EINVAL);
  301 
  302         if (ie->ie_assign_cpu == NULL)
  303                 return (EOPNOTSUPP);
  304 
  305         error = priv_check(curthread, PRIV_SCHED_CPUSET_INTR);
  306         if (error)
  307                 return (error);
  308 
  309         /*
  310          * If we have any ithreads try to set their mask first to verify
  311          * permissions, etc.
  312          */
  313         mtx_lock(&ie->ie_lock);
  314         if (ie->ie_thread != NULL) {
  315                 CPU_ZERO(&mask);
  316                 if (cpu == NOCPU)
  317                         CPU_COPY(cpuset_root, &mask);
  318                 else
  319                         CPU_SET(cpu, &mask);
  320                 id = ie->ie_thread->it_thread->td_tid;
  321                 mtx_unlock(&ie->ie_lock);
  322                 error = cpuset_setthread(id, &mask);
  323                 if (error)
  324                         return (error);
  325         } else
  326                 mtx_unlock(&ie->ie_lock);
  327         error = ie->ie_assign_cpu(ie->ie_source, cpu);
  328         if (error) {
  329                 mtx_lock(&ie->ie_lock);
  330                 if (ie->ie_thread != NULL) {
  331                         CPU_ZERO(&mask);
  332                         if (ie->ie_cpu == NOCPU)
  333                                 CPU_COPY(cpuset_root, &mask);
  334                         else
  335                                 CPU_SET(cpu, &mask);
  336                         id = ie->ie_thread->it_thread->td_tid;
  337                         mtx_unlock(&ie->ie_lock);
  338                         (void)cpuset_setthread(id, &mask);
  339                 } else
  340                         mtx_unlock(&ie->ie_lock);
  341                 return (error);
  342         }
  343 
  344         mtx_lock(&ie->ie_lock);
  345         ie->ie_cpu = cpu;
  346         mtx_unlock(&ie->ie_lock);
  347 
  348         return (error);
  349 }
  350 
  351 static struct intr_event *
  352 intr_lookup(int irq)
  353 {
  354         struct intr_event *ie;
  355 
  356         mtx_lock(&event_lock);
  357         TAILQ_FOREACH(ie, &event_list, ie_list)
  358                 if (ie->ie_irq == irq &&
  359                     (ie->ie_flags & IE_SOFT) == 0 &&
  360                     TAILQ_FIRST(&ie->ie_handlers) != NULL)
  361                         break;
  362         mtx_unlock(&event_lock);
  363         return (ie);
  364 }
  365 
  366 int
  367 intr_setaffinity(int irq, void *m)
  368 {
  369         struct intr_event *ie;
  370         cpuset_t *mask;
  371         u_char cpu;
  372         int n;
  373 
  374         mask = m;
  375         cpu = NOCPU;
  376         /*
  377          * If we're setting all cpus we can unbind.  Otherwise make sure
  378          * only one cpu is in the set.
  379          */
  380         if (CPU_CMP(cpuset_root, mask)) {
  381                 for (n = 0; n < CPU_SETSIZE; n++) {
  382                         if (!CPU_ISSET(n, mask))
  383                                 continue;
  384                         if (cpu != NOCPU)
  385                                 return (EINVAL);
  386                         cpu = (u_char)n;
  387                 }
  388         }
  389         ie = intr_lookup(irq);
  390         if (ie == NULL)
  391                 return (ESRCH);
  392         return (intr_event_bind(ie, cpu));
  393 }
  394 
  395 int
  396 intr_getaffinity(int irq, void *m)
  397 {
  398         struct intr_event *ie;
  399         cpuset_t *mask;
  400 
  401         mask = m;
  402         ie = intr_lookup(irq);
  403         if (ie == NULL)
  404                 return (ESRCH);
  405         CPU_ZERO(mask);
  406         mtx_lock(&ie->ie_lock);
  407         if (ie->ie_cpu == NOCPU)
  408                 CPU_COPY(cpuset_root, mask);
  409         else
  410                 CPU_SET(ie->ie_cpu, mask);
  411         mtx_unlock(&ie->ie_lock);
  412         return (0);
  413 }
  414 
  415 int
  416 intr_event_destroy(struct intr_event *ie)
  417 {
  418 
  419         mtx_lock(&event_lock);
  420         mtx_lock(&ie->ie_lock);
  421         if (!TAILQ_EMPTY(&ie->ie_handlers)) {
  422                 mtx_unlock(&ie->ie_lock);
  423                 mtx_unlock(&event_lock);
  424                 return (EBUSY);
  425         }
  426         TAILQ_REMOVE(&event_list, ie, ie_list);
  427 #ifndef notyet
  428         if (ie->ie_thread != NULL) {
  429                 ithread_destroy(ie->ie_thread);
  430                 ie->ie_thread = NULL;
  431         }
  432 #endif
  433         mtx_unlock(&ie->ie_lock);
  434         mtx_unlock(&event_lock);
  435         mtx_destroy(&ie->ie_lock);
  436         free(ie, M_ITHREAD);
  437         return (0);
  438 }
  439 
  440 #ifndef INTR_FILTER
  441 static struct intr_thread *
  442 ithread_create(const char *name)
  443 {
  444         struct intr_thread *ithd;
  445         struct thread *td;
  446         int error;
  447 
  448         ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO);
  449 
  450         error = kproc_kthread_add(ithread_loop, ithd, &intrproc,
  451                     &td, RFSTOPPED | RFHIGHPID,
  452                     0, "intr", "%s", name);
  453         if (error)
  454                 panic("kproc_create() failed with %d", error);
  455         thread_lock(td);
  456         sched_class(td, PRI_ITHD);
  457         TD_SET_IWAIT(td);
  458         thread_unlock(td);
  459         td->td_pflags |= TDP_ITHREAD;
  460         ithd->it_thread = td;
  461         CTR2(KTR_INTR, "%s: created %s", __func__, name);
  462         return (ithd);
  463 }
  464 #else
  465 static struct intr_thread *
  466 ithread_create(const char *name, struct intr_handler *ih)
  467 {
  468         struct intr_thread *ithd;
  469         struct thread *td;
  470         int error;
  471 
  472         ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO);
  473 
  474         error = kproc_kthread_add(ithread_loop, ih, &intrproc,
  475                     &td, RFSTOPPED | RFHIGHPID,
  476                     0, "intr", "%s", name);
  477         if (error)
  478                 panic("kproc_create() failed with %d", error);
  479         thread_lock(td);
  480         sched_class(td, PRI_ITHD);
  481         TD_SET_IWAIT(td);
  482         thread_unlock(td);
  483         td->td_pflags |= TDP_ITHREAD;
  484         ithd->it_thread = td;
  485         CTR2(KTR_INTR, "%s: created %s", __func__, name);
  486         return (ithd);
  487 }
  488 #endif
  489 
  490 static void
  491 ithread_destroy(struct intr_thread *ithread)
  492 {
  493         struct thread *td;
  494 
  495         CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_event->ie_name);
  496         td = ithread->it_thread;
  497         thread_lock(td);
  498         ithread->it_flags |= IT_DEAD;
  499         if (TD_AWAITING_INTR(td)) {
  500                 TD_CLR_IWAIT(td);
  501                 sched_add(td, SRQ_INTR);
  502         }
  503         thread_unlock(td);
  504 }
  505 
  506 #ifndef INTR_FILTER
  507 int
  508 intr_event_add_handler(struct intr_event *ie, const char *name,
  509     driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri,
  510     enum intr_type flags, void **cookiep)
  511 {
  512         struct intr_handler *ih, *temp_ih;
  513         struct intr_thread *it;
  514 
  515         if (ie == NULL || name == NULL || (handler == NULL && filter == NULL))
  516                 return (EINVAL);
  517 
  518         /* Allocate and populate an interrupt handler structure. */
  519         ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO);
  520         ih->ih_filter = filter;
  521         ih->ih_handler = handler;
  522         ih->ih_argument = arg;
  523         strlcpy(ih->ih_name, name, sizeof(ih->ih_name));
  524         ih->ih_event = ie;
  525         ih->ih_pri = pri;
  526         if (flags & INTR_EXCL)
  527                 ih->ih_flags = IH_EXCLUSIVE;
  528         if (flags & INTR_MPSAFE)
  529                 ih->ih_flags |= IH_MPSAFE;
  530         if (flags & INTR_ENTROPY)
  531                 ih->ih_flags |= IH_ENTROPY;
  532 
  533         /* We can only have one exclusive handler in a event. */
  534         mtx_lock(&ie->ie_lock);
  535         if (!TAILQ_EMPTY(&ie->ie_handlers)) {
  536                 if ((flags & INTR_EXCL) ||
  537                     (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) {
  538                         mtx_unlock(&ie->ie_lock);
  539                         free(ih, M_ITHREAD);
  540                         return (EINVAL);
  541                 }
  542         }
  543 
  544         /* Add the new handler to the event in priority order. */
  545         TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) {
  546                 if (temp_ih->ih_pri > ih->ih_pri)
  547                         break;
  548         }
  549         if (temp_ih == NULL)
  550                 TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next);
  551         else
  552                 TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next);
  553         intr_event_update(ie);
  554 
  555         /* Create a thread if we need one. */
  556         while (ie->ie_thread == NULL && handler != NULL) {
  557                 if (ie->ie_flags & IE_ADDING_THREAD)
  558                         msleep(ie, &ie->ie_lock, 0, "ithread", 0);
  559                 else {
  560                         ie->ie_flags |= IE_ADDING_THREAD;
  561                         mtx_unlock(&ie->ie_lock);
  562                         it = ithread_create("intr: newborn");
  563                         mtx_lock(&ie->ie_lock);
  564                         ie->ie_flags &= ~IE_ADDING_THREAD;
  565                         ie->ie_thread = it;
  566                         it->it_event = ie;
  567                         ithread_update(it);
  568                         wakeup(ie);
  569                 }
  570         }
  571         CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name,
  572             ie->ie_name);
  573         mtx_unlock(&ie->ie_lock);
  574 
  575         if (cookiep != NULL)
  576                 *cookiep = ih;
  577         return (0);
  578 }
  579 #else
  580 int
  581 intr_event_add_handler(struct intr_event *ie, const char *name,
  582     driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri,
  583     enum intr_type flags, void **cookiep)
  584 {
  585         struct intr_handler *ih, *temp_ih;
  586         struct intr_thread *it;
  587 
  588         if (ie == NULL || name == NULL || (handler == NULL && filter == NULL))
  589                 return (EINVAL);
  590 
  591         /* Allocate and populate an interrupt handler structure. */
  592         ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO);
  593         ih->ih_filter = filter;
  594         ih->ih_handler = handler;
  595         ih->ih_argument = arg;
  596         strlcpy(ih->ih_name, name, sizeof(ih->ih_name));
  597         ih->ih_event = ie;
  598         ih->ih_pri = pri;
  599         if (flags & INTR_EXCL)
  600                 ih->ih_flags = IH_EXCLUSIVE;
  601         if (flags & INTR_MPSAFE)
  602                 ih->ih_flags |= IH_MPSAFE;
  603         if (flags & INTR_ENTROPY)
  604                 ih->ih_flags |= IH_ENTROPY;
  605 
  606         /* We can only have one exclusive handler in a event. */
  607         mtx_lock(&ie->ie_lock);
  608         if (!TAILQ_EMPTY(&ie->ie_handlers)) {
  609                 if ((flags & INTR_EXCL) ||
  610                     (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) {
  611                         mtx_unlock(&ie->ie_lock);
  612                         free(ih, M_ITHREAD);
  613                         return (EINVAL);
  614                 }
  615         }
  616 
  617         /* Add the new handler to the event in priority order. */
  618         TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) {
  619                 if (temp_ih->ih_pri > ih->ih_pri)
  620                         break;
  621         }
  622         if (temp_ih == NULL)
  623                 TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next);
  624         else
  625                 TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next);
  626         intr_event_update(ie);
  627 
  628         /* For filtered handlers, create a private ithread to run on. */
  629         if (filter != NULL && handler != NULL) { 
  630                 mtx_unlock(&ie->ie_lock);
  631                 it = ithread_create("intr: newborn", ih);               
  632                 mtx_lock(&ie->ie_lock);
  633                 it->it_event = ie; 
  634                 ih->ih_thread = it;
  635                 ithread_update(it); // XXX - do we really need this?!?!?
  636         } else { /* Create the global per-event thread if we need one. */
  637                 while (ie->ie_thread == NULL && handler != NULL) {
  638                         if (ie->ie_flags & IE_ADDING_THREAD)
  639                                 msleep(ie, &ie->ie_lock, 0, "ithread", 0);
  640                         else {
  641                                 ie->ie_flags |= IE_ADDING_THREAD;
  642                                 mtx_unlock(&ie->ie_lock);
  643                                 it = ithread_create("intr: newborn", ih);
  644                                 mtx_lock(&ie->ie_lock);
  645                                 ie->ie_flags &= ~IE_ADDING_THREAD;
  646                                 ie->ie_thread = it;
  647                                 it->it_event = ie;
  648                                 ithread_update(it);
  649                                 wakeup(ie);
  650                         }
  651                 }
  652         }
  653         CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name,
  654             ie->ie_name);
  655         mtx_unlock(&ie->ie_lock);
  656 
  657         if (cookiep != NULL)
  658                 *cookiep = ih;
  659         return (0);
  660 }
  661 #endif
  662 
  663 /*
  664  * Append a description preceded by a ':' to the name of the specified
  665  * interrupt handler.
  666  */
  667 int
  668 intr_event_describe_handler(struct intr_event *ie, void *cookie,
  669     const char *descr)
  670 {
  671         struct intr_handler *ih;
  672         size_t space;
  673         char *start;
  674 
  675         mtx_lock(&ie->ie_lock);
  676 #ifdef INVARIANTS
  677         TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
  678                 if (ih == cookie)
  679                         break;
  680         }
  681         if (ih == NULL) {
  682                 mtx_unlock(&ie->ie_lock);
  683                 panic("handler %p not found in interrupt event %p", cookie, ie);
  684         }
  685 #endif
  686         ih = cookie;
  687 
  688         /*
  689          * Look for an existing description by checking for an
  690          * existing ":".  This assumes device names do not include
  691          * colons.  If one is found, prepare to insert the new
  692          * description at that point.  If one is not found, find the
  693          * end of the name to use as the insertion point.
  694          */
  695         start = index(ih->ih_name, ':');
  696         if (start == NULL)
  697                 start = index(ih->ih_name, 0);
  698 
  699         /*
  700          * See if there is enough remaining room in the string for the
  701          * description + ":".  The "- 1" leaves room for the trailing
  702          * '\0'.  The "+ 1" accounts for the colon.
  703          */
  704         space = sizeof(ih->ih_name) - (start - ih->ih_name) - 1;
  705         if (strlen(descr) + 1 > space) {
  706                 mtx_unlock(&ie->ie_lock);
  707                 return (ENOSPC);
  708         }
  709 
  710         /* Append a colon followed by the description. */
  711         *start = ':';
  712         strcpy(start + 1, descr);
  713         intr_event_update(ie);
  714         mtx_unlock(&ie->ie_lock);
  715         return (0);
  716 }
  717 
  718 /*
  719  * Return the ie_source field from the intr_event an intr_handler is
  720  * associated with.
  721  */
  722 void *
  723 intr_handler_source(void *cookie)
  724 {
  725         struct intr_handler *ih;
  726         struct intr_event *ie;
  727 
  728         ih = (struct intr_handler *)cookie;
  729         if (ih == NULL)
  730                 return (NULL);
  731         ie = ih->ih_event;
  732         KASSERT(ie != NULL,
  733             ("interrupt handler \"%s\" has a NULL interrupt event",
  734             ih->ih_name));
  735         return (ie->ie_source);
  736 }
  737 
  738 #ifndef INTR_FILTER
  739 int
  740 intr_event_remove_handler(void *cookie)
  741 {
  742         struct intr_handler *handler = (struct intr_handler *)cookie;
  743         struct intr_event *ie;
  744 #ifdef INVARIANTS
  745         struct intr_handler *ih;
  746 #endif
  747 #ifdef notyet
  748         int dead;
  749 #endif
  750 
  751         if (handler == NULL)
  752                 return (EINVAL);
  753         ie = handler->ih_event;
  754         KASSERT(ie != NULL,
  755             ("interrupt handler \"%s\" has a NULL interrupt event",
  756             handler->ih_name));
  757         mtx_lock(&ie->ie_lock);
  758         CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name,
  759             ie->ie_name);
  760 #ifdef INVARIANTS
  761         TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
  762                 if (ih == handler)
  763                         goto ok;
  764         mtx_unlock(&ie->ie_lock);
  765         panic("interrupt handler \"%s\" not found in interrupt event \"%s\"",
  766             ih->ih_name, ie->ie_name);
  767 ok:
  768 #endif
  769         /*
  770          * If there is no ithread, then just remove the handler and return.
  771          * XXX: Note that an INTR_FAST handler might be running on another
  772          * CPU!
  773          */
  774         if (ie->ie_thread == NULL) {
  775                 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
  776                 mtx_unlock(&ie->ie_lock);
  777                 free(handler, M_ITHREAD);
  778                 return (0);
  779         }
  780 
  781         /*
  782          * If the interrupt thread is already running, then just mark this
  783          * handler as being dead and let the ithread do the actual removal.
  784          *
  785          * During a cold boot while cold is set, msleep() does not sleep,
  786          * so we have to remove the handler here rather than letting the
  787          * thread do it.
  788          */
  789         thread_lock(ie->ie_thread->it_thread);
  790         if (!TD_AWAITING_INTR(ie->ie_thread->it_thread) && !cold) {
  791                 handler->ih_flags |= IH_DEAD;
  792 
  793                 /*
  794                  * Ensure that the thread will process the handler list
  795                  * again and remove this handler if it has already passed
  796                  * it on the list.
  797                  */
  798                 ie->ie_thread->it_need = 1;
  799         } else
  800                 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
  801         thread_unlock(ie->ie_thread->it_thread);
  802         while (handler->ih_flags & IH_DEAD)
  803                 msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0);
  804         intr_event_update(ie);
  805 #ifdef notyet
  806         /*
  807          * XXX: This could be bad in the case of ppbus(8).  Also, I think
  808          * this could lead to races of stale data when servicing an
  809          * interrupt.
  810          */
  811         dead = 1;
  812         TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
  813                 if (!(ih->ih_flags & IH_FAST)) {
  814                         dead = 0;
  815                         break;
  816                 }
  817         }
  818         if (dead) {
  819                 ithread_destroy(ie->ie_thread);
  820                 ie->ie_thread = NULL;
  821         }
  822 #endif
  823         mtx_unlock(&ie->ie_lock);
  824         free(handler, M_ITHREAD);
  825         return (0);
  826 }
  827 
  828 static int
  829 intr_event_schedule_thread(struct intr_event *ie)
  830 {
  831         struct intr_entropy entropy;
  832         struct intr_thread *it;
  833         struct thread *td;
  834         struct thread *ctd;
  835         struct proc *p;
  836 
  837         /*
  838          * If no ithread or no handlers, then we have a stray interrupt.
  839          */
  840         if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) ||
  841             ie->ie_thread == NULL)
  842                 return (EINVAL);
  843 
  844         ctd = curthread;
  845         it = ie->ie_thread;
  846         td = it->it_thread;
  847         p = td->td_proc;
  848 
  849         /*
  850          * If any of the handlers for this ithread claim to be good
  851          * sources of entropy, then gather some.
  852          */
  853         if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) {
  854                 CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__,
  855                     p->p_pid, td->td_name);
  856                 entropy.event = (uintptr_t)ie;
  857                 entropy.td = ctd;
  858                 random_harvest(&entropy, sizeof(entropy), 2, 0,
  859                     RANDOM_INTERRUPT);
  860         }
  861 
  862         KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name));
  863 
  864         /*
  865          * Set it_need to tell the thread to keep running if it is already
  866          * running.  Then, lock the thread and see if we actually need to
  867          * put it on the runqueue.
  868          */
  869         it->it_need = 1;
  870         thread_lock(td);
  871         if (TD_AWAITING_INTR(td)) {
  872                 CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid,
  873                     td->td_name);
  874                 TD_CLR_IWAIT(td);
  875                 sched_add(td, SRQ_INTR);
  876         } else {
  877                 CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d",
  878                     __func__, p->p_pid, td->td_name, it->it_need, td->td_state);
  879         }
  880         thread_unlock(td);
  881 
  882         return (0);
  883 }
  884 #else
  885 int
  886 intr_event_remove_handler(void *cookie)
  887 {
  888         struct intr_handler *handler = (struct intr_handler *)cookie;
  889         struct intr_event *ie;
  890         struct intr_thread *it;
  891 #ifdef INVARIANTS
  892         struct intr_handler *ih;
  893 #endif
  894 #ifdef notyet
  895         int dead;
  896 #endif
  897 
  898         if (handler == NULL)
  899                 return (EINVAL);
  900         ie = handler->ih_event;
  901         KASSERT(ie != NULL,
  902             ("interrupt handler \"%s\" has a NULL interrupt event",
  903             handler->ih_name));
  904         mtx_lock(&ie->ie_lock);
  905         CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name,
  906             ie->ie_name);
  907 #ifdef INVARIANTS
  908         TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
  909                 if (ih == handler)
  910                         goto ok;
  911         mtx_unlock(&ie->ie_lock);
  912         panic("interrupt handler \"%s\" not found in interrupt event \"%s\"",
  913             ih->ih_name, ie->ie_name);
  914 ok:
  915 #endif
  916         /*
  917          * If there are no ithreads (per event and per handler), then
  918          * just remove the handler and return.  
  919          * XXX: Note that an INTR_FAST handler might be running on another CPU!
  920          */
  921         if (ie->ie_thread == NULL && handler->ih_thread == NULL) {
  922                 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
  923                 mtx_unlock(&ie->ie_lock);
  924                 free(handler, M_ITHREAD);
  925                 return (0);
  926         }
  927 
  928         /* Private or global ithread? */
  929         it = (handler->ih_thread) ? handler->ih_thread : ie->ie_thread;
  930         /*
  931          * If the interrupt thread is already running, then just mark this
  932          * handler as being dead and let the ithread do the actual removal.
  933          *
  934          * During a cold boot while cold is set, msleep() does not sleep,
  935          * so we have to remove the handler here rather than letting the
  936          * thread do it.
  937          */
  938         thread_lock(it->it_thread);
  939         if (!TD_AWAITING_INTR(it->it_thread) && !cold) {
  940                 handler->ih_flags |= IH_DEAD;
  941 
  942                 /*
  943                  * Ensure that the thread will process the handler list
  944                  * again and remove this handler if it has already passed
  945                  * it on the list.
  946                  */
  947                 it->it_need = 1;
  948         } else
  949                 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
  950         thread_unlock(it->it_thread);
  951         while (handler->ih_flags & IH_DEAD)
  952                 msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0);
  953         /* 
  954          * At this point, the handler has been disconnected from the event,
  955          * so we can kill the private ithread if any.
  956          */
  957         if (handler->ih_thread) {
  958                 ithread_destroy(handler->ih_thread);
  959                 handler->ih_thread = NULL;
  960         }
  961         intr_event_update(ie);
  962 #ifdef notyet
  963         /*
  964          * XXX: This could be bad in the case of ppbus(8).  Also, I think
  965          * this could lead to races of stale data when servicing an
  966          * interrupt.
  967          */
  968         dead = 1;
  969         TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
  970                 if (handler != NULL) {
  971                         dead = 0;
  972                         break;
  973                 }
  974         }
  975         if (dead) {
  976                 ithread_destroy(ie->ie_thread);
  977                 ie->ie_thread = NULL;
  978         }
  979 #endif
  980         mtx_unlock(&ie->ie_lock);
  981         free(handler, M_ITHREAD);
  982         return (0);
  983 }
  984 
  985 static int
  986 intr_event_schedule_thread(struct intr_event *ie, struct intr_thread *it)
  987 {
  988         struct intr_entropy entropy;
  989         struct thread *td;
  990         struct thread *ctd;
  991         struct proc *p;
  992 
  993         /*
  994          * If no ithread or no handlers, then we have a stray interrupt.
  995          */
  996         if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || it == NULL)
  997                 return (EINVAL);
  998 
  999         ctd = curthread;
 1000         td = it->it_thread;
 1001         p = td->td_proc;
 1002 
 1003         /*
 1004          * If any of the handlers for this ithread claim to be good
 1005          * sources of entropy, then gather some.
 1006          */
 1007         if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) {
 1008                 CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__,
 1009                     p->p_pid, td->td_name);
 1010                 entropy.event = (uintptr_t)ie;
 1011                 entropy.td = ctd;
 1012                 random_harvest(&entropy, sizeof(entropy), 2, 0,
 1013                     RANDOM_INTERRUPT);
 1014         }
 1015 
 1016         KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name));
 1017 
 1018         /*
 1019          * Set it_need to tell the thread to keep running if it is already
 1020          * running.  Then, lock the thread and see if we actually need to
 1021          * put it on the runqueue.
 1022          */
 1023         it->it_need = 1;
 1024         thread_lock(td);
 1025         if (TD_AWAITING_INTR(td)) {
 1026                 CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid,
 1027                     td->td_name);
 1028                 TD_CLR_IWAIT(td);
 1029                 sched_add(td, SRQ_INTR);
 1030         } else {
 1031                 CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d",
 1032                     __func__, p->p_pid, td->td_name, it->it_need, td->td_state);
 1033         }
 1034         thread_unlock(td);
 1035 
 1036         return (0);
 1037 }
 1038 #endif
 1039 
 1040 /*
 1041  * Allow interrupt event binding for software interrupt handlers -- a no-op,
 1042  * since interrupts are generated in software rather than being directed by
 1043  * a PIC.
 1044  */
 1045 static int
 1046 swi_assign_cpu(void *arg, u_char cpu)
 1047 {
 1048 
 1049         return (0);
 1050 }
 1051 
 1052 /*
 1053  * Add a software interrupt handler to a specified event.  If a given event
 1054  * is not specified, then a new event is created.
 1055  */
 1056 int
 1057 swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler,
 1058             void *arg, int pri, enum intr_type flags, void **cookiep)
 1059 {
 1060         struct thread *td;
 1061         struct intr_event *ie;
 1062         int error;
 1063 
 1064         if (flags & INTR_ENTROPY)
 1065                 return (EINVAL);
 1066 
 1067         ie = (eventp != NULL) ? *eventp : NULL;
 1068 
 1069         if (ie != NULL) {
 1070                 if (!(ie->ie_flags & IE_SOFT))
 1071                         return (EINVAL);
 1072         } else {
 1073                 error = intr_event_create(&ie, NULL, IE_SOFT, 0,
 1074                     NULL, NULL, NULL, swi_assign_cpu, "swi%d:", pri);
 1075                 if (error)
 1076                         return (error);
 1077                 if (eventp != NULL)
 1078                         *eventp = ie;
 1079         }
 1080         error = intr_event_add_handler(ie, name, NULL, handler, arg,
 1081             PI_SWI(pri), flags, cookiep);
 1082         if (error)
 1083                 return (error);
 1084         if (pri == SWI_CLOCK) {
 1085                 td = ie->ie_thread->it_thread;
 1086                 thread_lock(td);
 1087                 td->td_flags |= TDF_NOLOAD;
 1088                 thread_unlock(td);
 1089         }
 1090         return (0);
 1091 }
 1092 
 1093 /*
 1094  * Schedule a software interrupt thread.
 1095  */
 1096 void
 1097 swi_sched(void *cookie, int flags)
 1098 {
 1099         struct intr_handler *ih = (struct intr_handler *)cookie;
 1100         struct intr_event *ie = ih->ih_event;
 1101         int error;
 1102 
 1103         CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name,
 1104             ih->ih_need);
 1105 
 1106         /*
 1107          * Set ih_need for this handler so that if the ithread is already
 1108          * running it will execute this handler on the next pass.  Otherwise,
 1109          * it will execute it the next time it runs.
 1110          */
 1111         atomic_store_rel_int(&ih->ih_need, 1);
 1112 
 1113         if (!(flags & SWI_DELAY)) {
 1114                 PCPU_INC(cnt.v_soft);
 1115 #ifdef INTR_FILTER
 1116                 error = intr_event_schedule_thread(ie, ie->ie_thread);
 1117 #else
 1118                 error = intr_event_schedule_thread(ie);
 1119 #endif
 1120                 KASSERT(error == 0, ("stray software interrupt"));
 1121         }
 1122 }
 1123 
 1124 /*
 1125  * Remove a software interrupt handler.  Currently this code does not
 1126  * remove the associated interrupt event if it becomes empty.  Calling code
 1127  * may do so manually via intr_event_destroy(), but that's not really
 1128  * an optimal interface.
 1129  */
 1130 int
 1131 swi_remove(void *cookie)
 1132 {
 1133 
 1134         return (intr_event_remove_handler(cookie));
 1135 }
 1136 
 1137 #ifdef INTR_FILTER
 1138 static void
 1139 priv_ithread_execute_handler(struct proc *p, struct intr_handler *ih)
 1140 {
 1141         struct intr_event *ie;
 1142 
 1143         ie = ih->ih_event;
 1144         /*
 1145          * If this handler is marked for death, remove it from
 1146          * the list of handlers and wake up the sleeper.
 1147          */
 1148         if (ih->ih_flags & IH_DEAD) {
 1149                 mtx_lock(&ie->ie_lock);
 1150                 TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next);
 1151                 ih->ih_flags &= ~IH_DEAD;
 1152                 wakeup(ih);
 1153                 mtx_unlock(&ie->ie_lock);
 1154                 return;
 1155         }
 1156         
 1157         /* Execute this handler. */
 1158         CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x",
 1159              __func__, p->p_pid, (void *)ih->ih_handler, ih->ih_argument,
 1160              ih->ih_name, ih->ih_flags);
 1161         
 1162         if (!(ih->ih_flags & IH_MPSAFE))
 1163                 mtx_lock(&Giant);
 1164         ih->ih_handler(ih->ih_argument);
 1165         if (!(ih->ih_flags & IH_MPSAFE))
 1166                 mtx_unlock(&Giant);
 1167 }
 1168 #endif
 1169 
 1170 /*
 1171  * This is a public function for use by drivers that mux interrupt
 1172  * handlers for child devices from their interrupt handler.
 1173  */
 1174 void
 1175 intr_event_execute_handlers(struct proc *p, struct intr_event *ie)
 1176 {
 1177         struct intr_handler *ih, *ihn;
 1178 
 1179         TAILQ_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) {
 1180                 /*
 1181                  * If this handler is marked for death, remove it from
 1182                  * the list of handlers and wake up the sleeper.
 1183                  */
 1184                 if (ih->ih_flags & IH_DEAD) {
 1185                         mtx_lock(&ie->ie_lock);
 1186                         TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next);
 1187                         ih->ih_flags &= ~IH_DEAD;
 1188                         wakeup(ih);
 1189                         mtx_unlock(&ie->ie_lock);
 1190                         continue;
 1191                 }
 1192 
 1193                 /* Skip filter only handlers */
 1194                 if (ih->ih_handler == NULL)
 1195                         continue;
 1196 
 1197                 /*
 1198                  * For software interrupt threads, we only execute
 1199                  * handlers that have their need flag set.  Hardware
 1200                  * interrupt threads always invoke all of their handlers.
 1201                  */
 1202                 if (ie->ie_flags & IE_SOFT) {
 1203                         if (!ih->ih_need)
 1204                                 continue;
 1205                         else
 1206                                 atomic_store_rel_int(&ih->ih_need, 0);
 1207                 }
 1208 
 1209                 /* Execute this handler. */
 1210                 CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x",
 1211                     __func__, p->p_pid, (void *)ih->ih_handler, 
 1212                     ih->ih_argument, ih->ih_name, ih->ih_flags);
 1213 
 1214                 if (!(ih->ih_flags & IH_MPSAFE))
 1215                         mtx_lock(&Giant);
 1216                 ih->ih_handler(ih->ih_argument);
 1217                 if (!(ih->ih_flags & IH_MPSAFE))
 1218                         mtx_unlock(&Giant);
 1219         }
 1220 }
 1221 
 1222 static void
 1223 ithread_execute_handlers(struct proc *p, struct intr_event *ie)
 1224 {
 1225 
 1226         /* Interrupt handlers should not sleep. */
 1227         if (!(ie->ie_flags & IE_SOFT))
 1228                 THREAD_NO_SLEEPING();
 1229         intr_event_execute_handlers(p, ie);
 1230         if (!(ie->ie_flags & IE_SOFT))
 1231                 THREAD_SLEEPING_OK();
 1232 
 1233         /*
 1234          * Interrupt storm handling:
 1235          *
 1236          * If this interrupt source is currently storming, then throttle
 1237          * it to only fire the handler once  per clock tick.
 1238          *
 1239          * If this interrupt source is not currently storming, but the
 1240          * number of back to back interrupts exceeds the storm threshold,
 1241          * then enter storming mode.
 1242          */
 1243         if (intr_storm_threshold != 0 && ie->ie_count >= intr_storm_threshold &&
 1244             !(ie->ie_flags & IE_SOFT)) {
 1245                 /* Report the message only once every second. */
 1246                 if (ppsratecheck(&ie->ie_warntm, &ie->ie_warncnt, 1)) {
 1247                         printf(
 1248         "interrupt storm detected on \"%s\"; throttling interrupt source\n",
 1249                             ie->ie_name);
 1250                 }
 1251                 pause("istorm", 1);
 1252         } else
 1253                 ie->ie_count++;
 1254 
 1255         /*
 1256          * Now that all the handlers have had a chance to run, reenable
 1257          * the interrupt source.
 1258          */
 1259         if (ie->ie_post_ithread != NULL)
 1260                 ie->ie_post_ithread(ie->ie_source);
 1261 }
 1262 
 1263 #ifndef INTR_FILTER
 1264 /*
 1265  * This is the main code for interrupt threads.
 1266  */
 1267 static void
 1268 ithread_loop(void *arg)
 1269 {
 1270         struct intr_thread *ithd;
 1271         struct intr_event *ie;
 1272         struct thread *td;
 1273         struct proc *p;
 1274 
 1275         td = curthread;
 1276         p = td->td_proc;
 1277         ithd = (struct intr_thread *)arg;
 1278         KASSERT(ithd->it_thread == td,
 1279             ("%s: ithread and proc linkage out of sync", __func__));
 1280         ie = ithd->it_event;
 1281         ie->ie_count = 0;
 1282 
 1283         /*
 1284          * As long as we have interrupts outstanding, go through the
 1285          * list of handlers, giving each one a go at it.
 1286          */
 1287         for (;;) {
 1288                 /*
 1289                  * If we are an orphaned thread, then just die.
 1290                  */
 1291                 if (ithd->it_flags & IT_DEAD) {
 1292                         CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__,
 1293                             p->p_pid, td->td_name);
 1294                         free(ithd, M_ITHREAD);
 1295                         kthread_exit();
 1296                 }
 1297 
 1298                 /*
 1299                  * Service interrupts.  If another interrupt arrives while
 1300                  * we are running, it will set it_need to note that we
 1301                  * should make another pass.
 1302                  */
 1303                 while (ithd->it_need) {
 1304                         /*
 1305                          * This might need a full read and write barrier
 1306                          * to make sure that this write posts before any
 1307                          * of the memory or device accesses in the
 1308                          * handlers.
 1309                          */
 1310                         atomic_store_rel_int(&ithd->it_need, 0);
 1311                         ithread_execute_handlers(p, ie);
 1312                 }
 1313                 WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread");
 1314                 mtx_assert(&Giant, MA_NOTOWNED);
 1315 
 1316                 /*
 1317                  * Processed all our interrupts.  Now get the sched
 1318                  * lock.  This may take a while and it_need may get
 1319                  * set again, so we have to check it again.
 1320                  */
 1321                 thread_lock(td);
 1322                 if (!ithd->it_need && !(ithd->it_flags & IT_DEAD)) {
 1323                         TD_SET_IWAIT(td);
 1324                         ie->ie_count = 0;
 1325                         mi_switch(SW_VOL | SWT_IWAIT, NULL);
 1326                 }
 1327                 thread_unlock(td);
 1328         }
 1329 }
 1330 
 1331 /*
 1332  * Main interrupt handling body.
 1333  *
 1334  * Input:
 1335  * o ie:                        the event connected to this interrupt.
 1336  * o frame:                     some archs (i.e. i386) pass a frame to some.
 1337  *                              handlers as their main argument.
 1338  * Return value:
 1339  * o 0:                         everything ok.
 1340  * o EINVAL:                    stray interrupt.
 1341  */
 1342 int
 1343 intr_event_handle(struct intr_event *ie, struct trapframe *frame)
 1344 {
 1345         struct intr_handler *ih;
 1346         struct thread *td;
 1347         int error, ret, thread;
 1348 
 1349         td = curthread;
 1350 
 1351         /* An interrupt with no event or handlers is a stray interrupt. */
 1352         if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers))
 1353                 return (EINVAL);
 1354 
 1355         /*
 1356          * Execute fast interrupt handlers directly.
 1357          * To support clock handlers, if a handler registers
 1358          * with a NULL argument, then we pass it a pointer to
 1359          * a trapframe as its argument.
 1360          */
 1361         td->td_intr_nesting_level++;
 1362         thread = 0;
 1363         ret = 0;
 1364         critical_enter();
 1365         TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
 1366                 if (ih->ih_filter == NULL) {
 1367                         thread = 1;
 1368                         continue;
 1369                 }
 1370                 CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__,
 1371                     ih->ih_filter, ih->ih_argument == NULL ? frame :
 1372                     ih->ih_argument, ih->ih_name);
 1373                 if (ih->ih_argument == NULL)
 1374                         ret = ih->ih_filter(frame);
 1375                 else
 1376                         ret = ih->ih_filter(ih->ih_argument);
 1377                 KASSERT(ret == FILTER_STRAY ||
 1378                     ((ret & (FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) != 0 &&
 1379                     (ret & ~(FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) == 0),
 1380                     ("%s: incorrect return value %#x from %s", __func__, ret,
 1381                     ih->ih_name));
 1382 
 1383                 /* 
 1384                  * Wrapper handler special handling:
 1385                  *
 1386                  * in some particular cases (like pccard and pccbb), 
 1387                  * the _real_ device handler is wrapped in a couple of
 1388                  * functions - a filter wrapper and an ithread wrapper.
 1389                  * In this case (and just in this case), the filter wrapper 
 1390                  * could ask the system to schedule the ithread and mask
 1391                  * the interrupt source if the wrapped handler is composed
 1392                  * of just an ithread handler.
 1393                  *
 1394                  * TODO: write a generic wrapper to avoid people rolling 
 1395                  * their own
 1396                  */
 1397                 if (!thread) {
 1398                         if (ret == FILTER_SCHEDULE_THREAD)
 1399                                 thread = 1;
 1400                 }
 1401         }
 1402 
 1403         if (thread) {
 1404                 if (ie->ie_pre_ithread != NULL)
 1405                         ie->ie_pre_ithread(ie->ie_source);
 1406         } else {
 1407                 if (ie->ie_post_filter != NULL)
 1408                         ie->ie_post_filter(ie->ie_source);
 1409         }
 1410         
 1411         /* Schedule the ithread if needed. */
 1412         if (thread) {
 1413                 error = intr_event_schedule_thread(ie);
 1414 #ifndef XEN             
 1415                 KASSERT(error == 0, ("bad stray interrupt"));
 1416 #else
 1417                 if (error != 0)
 1418                         log(LOG_WARNING, "bad stray interrupt");
 1419 #endif          
 1420         }
 1421         critical_exit();
 1422         td->td_intr_nesting_level--;
 1423         return (0);
 1424 }
 1425 #else
 1426 /*
 1427  * This is the main code for interrupt threads.
 1428  */
 1429 static void
 1430 ithread_loop(void *arg)
 1431 {
 1432         struct intr_thread *ithd;
 1433         struct intr_handler *ih;
 1434         struct intr_event *ie;
 1435         struct thread *td;
 1436         struct proc *p;
 1437         int priv;
 1438 
 1439         td = curthread;
 1440         p = td->td_proc;
 1441         ih = (struct intr_handler *)arg;
 1442         priv = (ih->ih_thread != NULL) ? 1 : 0;
 1443         ithd = (priv) ? ih->ih_thread : ih->ih_event->ie_thread;
 1444         KASSERT(ithd->it_thread == td,
 1445             ("%s: ithread and proc linkage out of sync", __func__));
 1446         ie = ithd->it_event;
 1447         ie->ie_count = 0;
 1448 
 1449         /*
 1450          * As long as we have interrupts outstanding, go through the
 1451          * list of handlers, giving each one a go at it.
 1452          */
 1453         for (;;) {
 1454                 /*
 1455                  * If we are an orphaned thread, then just die.
 1456                  */
 1457                 if (ithd->it_flags & IT_DEAD) {
 1458                         CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__,
 1459                             p->p_pid, td->td_name);
 1460                         free(ithd, M_ITHREAD);
 1461                         kthread_exit();
 1462                 }
 1463 
 1464                 /*
 1465                  * Service interrupts.  If another interrupt arrives while
 1466                  * we are running, it will set it_need to note that we
 1467                  * should make another pass.
 1468                  */
 1469                 while (ithd->it_need) {
 1470                         /*
 1471                          * This might need a full read and write barrier
 1472                          * to make sure that this write posts before any
 1473                          * of the memory or device accesses in the
 1474                          * handlers.
 1475                          */
 1476                         atomic_store_rel_int(&ithd->it_need, 0);
 1477                         if (priv)
 1478                                 priv_ithread_execute_handler(p, ih);
 1479                         else 
 1480                                 ithread_execute_handlers(p, ie);
 1481                 }
 1482                 WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread");
 1483                 mtx_assert(&Giant, MA_NOTOWNED);
 1484 
 1485                 /*
 1486                  * Processed all our interrupts.  Now get the sched
 1487                  * lock.  This may take a while and it_need may get
 1488                  * set again, so we have to check it again.
 1489                  */
 1490                 thread_lock(td);
 1491                 if (!ithd->it_need && !(ithd->it_flags & IT_DEAD)) {
 1492                         TD_SET_IWAIT(td);
 1493                         ie->ie_count = 0;
 1494                         mi_switch(SW_VOL | SWT_IWAIT, NULL);
 1495                 }
 1496                 thread_unlock(td);
 1497         }
 1498 }
 1499 
 1500 /* 
 1501  * Main loop for interrupt filter.
 1502  *
 1503  * Some architectures (i386, amd64 and arm) require the optional frame 
 1504  * parameter, and use it as the main argument for fast handler execution
 1505  * when ih_argument == NULL.
 1506  *
 1507  * Return value:
 1508  * o FILTER_STRAY:              No filter recognized the event, and no
 1509  *                              filter-less handler is registered on this 
 1510  *                              line.
 1511  * o FILTER_HANDLED:            A filter claimed the event and served it.
 1512  * o FILTER_SCHEDULE_THREAD:    No filter claimed the event, but there's at
 1513  *                              least one filter-less handler on this line.
 1514  * o FILTER_HANDLED | 
 1515  *   FILTER_SCHEDULE_THREAD:    A filter claimed the event, and asked for
 1516  *                              scheduling the per-handler ithread.
 1517  *
 1518  * In case an ithread has to be scheduled, in *ithd there will be a 
 1519  * pointer to a struct intr_thread containing the thread to be
 1520  * scheduled.
 1521  */
 1522 
 1523 static int
 1524 intr_filter_loop(struct intr_event *ie, struct trapframe *frame, 
 1525                  struct intr_thread **ithd) 
 1526 {
 1527         struct intr_handler *ih;
 1528         void *arg;
 1529         int ret, thread_only;
 1530 
 1531         ret = 0;
 1532         thread_only = 0;
 1533         TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
 1534                 /*
 1535                  * Execute fast interrupt handlers directly.
 1536                  * To support clock handlers, if a handler registers
 1537                  * with a NULL argument, then we pass it a pointer to
 1538                  * a trapframe as its argument.
 1539                  */
 1540                 arg = ((ih->ih_argument == NULL) ? frame : ih->ih_argument);
 1541                 
 1542                 CTR5(KTR_INTR, "%s: exec %p/%p(%p) for %s", __func__,
 1543                      ih->ih_filter, ih->ih_handler, arg, ih->ih_name);
 1544 
 1545                 if (ih->ih_filter != NULL)
 1546                         ret = ih->ih_filter(arg);
 1547                 else {
 1548                         thread_only = 1;
 1549                         continue;
 1550                 }
 1551                 KASSERT(ret == FILTER_STRAY ||
 1552                     ((ret & (FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) != 0 &&
 1553                     (ret & ~(FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) == 0),
 1554                     ("%s: incorrect return value %#x from %s", __func__, ret,
 1555                     ih->ih_name));
 1556                 if (ret & FILTER_STRAY)
 1557                         continue;
 1558                 else { 
 1559                         *ithd = ih->ih_thread;
 1560                         return (ret);
 1561                 }
 1562         }
 1563 
 1564         /*
 1565          * No filters handled the interrupt and we have at least
 1566          * one handler without a filter.  In this case, we schedule
 1567          * all of the filter-less handlers to run in the ithread.
 1568          */     
 1569         if (thread_only) {
 1570                 *ithd = ie->ie_thread;
 1571                 return (FILTER_SCHEDULE_THREAD);
 1572         }
 1573         return (FILTER_STRAY);
 1574 }
 1575 
 1576 /*
 1577  * Main interrupt handling body.
 1578  *
 1579  * Input:
 1580  * o ie:                        the event connected to this interrupt.
 1581  * o frame:                     some archs (i.e. i386) pass a frame to some.
 1582  *                              handlers as their main argument.
 1583  * Return value:
 1584  * o 0:                         everything ok.
 1585  * o EINVAL:                    stray interrupt.
 1586  */
 1587 int
 1588 intr_event_handle(struct intr_event *ie, struct trapframe *frame)
 1589 {
 1590         struct intr_thread *ithd;
 1591         struct thread *td;
 1592         int thread;
 1593 
 1594         ithd = NULL;
 1595         td = curthread;
 1596 
 1597         if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers))
 1598                 return (EINVAL);
 1599 
 1600         td->td_intr_nesting_level++;
 1601         thread = 0;
 1602         critical_enter();
 1603         thread = intr_filter_loop(ie, frame, &ithd);    
 1604         if (thread & FILTER_HANDLED) {
 1605                 if (ie->ie_post_filter != NULL)
 1606                         ie->ie_post_filter(ie->ie_source);
 1607         } else {
 1608                 if (ie->ie_pre_ithread != NULL)
 1609                         ie->ie_pre_ithread(ie->ie_source);
 1610         }
 1611         critical_exit();
 1612         
 1613         /* Interrupt storm logic */
 1614         if (thread & FILTER_STRAY) {
 1615                 ie->ie_count++;
 1616                 if (ie->ie_count < intr_storm_threshold)
 1617                         printf("Interrupt stray detection not present\n");
 1618         }
 1619 
 1620         /* Schedule an ithread if needed. */
 1621         if (thread & FILTER_SCHEDULE_THREAD) {
 1622                 if (intr_event_schedule_thread(ie, ithd) != 0)
 1623                         panic("%s: impossible stray interrupt", __func__);
 1624         }
 1625         td->td_intr_nesting_level--;
 1626         return (0);
 1627 }
 1628 #endif
 1629 
 1630 #ifdef DDB
 1631 /*
 1632  * Dump details about an interrupt handler
 1633  */
 1634 static void
 1635 db_dump_intrhand(struct intr_handler *ih)
 1636 {
 1637         int comma;
 1638 
 1639         db_printf("\t%-10s ", ih->ih_name);
 1640         switch (ih->ih_pri) {
 1641         case PI_REALTIME:
 1642                 db_printf("CLK ");
 1643                 break;
 1644         case PI_AV:
 1645                 db_printf("AV  ");
 1646                 break;
 1647         case PI_TTY:
 1648                 db_printf("TTY ");
 1649                 break;
 1650         case PI_NET:
 1651                 db_printf("NET ");
 1652                 break;
 1653         case PI_DISK:
 1654                 db_printf("DISK");
 1655                 break;
 1656         case PI_DULL:
 1657                 db_printf("DULL");
 1658                 break;
 1659         default:
 1660                 if (ih->ih_pri >= PI_SOFT)
 1661                         db_printf("SWI ");
 1662                 else
 1663                         db_printf("%4u", ih->ih_pri);
 1664                 break;
 1665         }
 1666         db_printf(" ");
 1667         db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC);
 1668         db_printf("(%p)", ih->ih_argument);
 1669         if (ih->ih_need ||
 1670             (ih->ih_flags & (IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD |
 1671             IH_MPSAFE)) != 0) {
 1672                 db_printf(" {");
 1673                 comma = 0;
 1674                 if (ih->ih_flags & IH_EXCLUSIVE) {
 1675                         if (comma)
 1676                                 db_printf(", ");
 1677                         db_printf("EXCL");
 1678                         comma = 1;
 1679                 }
 1680                 if (ih->ih_flags & IH_ENTROPY) {
 1681                         if (comma)
 1682                                 db_printf(", ");
 1683                         db_printf("ENTROPY");
 1684                         comma = 1;
 1685                 }
 1686                 if (ih->ih_flags & IH_DEAD) {
 1687                         if (comma)
 1688                                 db_printf(", ");
 1689                         db_printf("DEAD");
 1690                         comma = 1;
 1691                 }
 1692                 if (ih->ih_flags & IH_MPSAFE) {
 1693                         if (comma)
 1694                                 db_printf(", ");
 1695                         db_printf("MPSAFE");
 1696                         comma = 1;
 1697                 }
 1698                 if (ih->ih_need) {
 1699                         if (comma)
 1700                                 db_printf(", ");
 1701                         db_printf("NEED");
 1702                 }
 1703                 db_printf("}");
 1704         }
 1705         db_printf("\n");
 1706 }
 1707 
 1708 /*
 1709  * Dump details about a event.
 1710  */
 1711 void
 1712 db_dump_intr_event(struct intr_event *ie, int handlers)
 1713 {
 1714         struct intr_handler *ih;
 1715         struct intr_thread *it;
 1716         int comma;
 1717 
 1718         db_printf("%s ", ie->ie_fullname);
 1719         it = ie->ie_thread;
 1720         if (it != NULL)
 1721                 db_printf("(pid %d)", it->it_thread->td_proc->p_pid);
 1722         else
 1723                 db_printf("(no thread)");
 1724         if ((ie->ie_flags & (IE_SOFT | IE_ENTROPY | IE_ADDING_THREAD)) != 0 ||
 1725             (it != NULL && it->it_need)) {
 1726                 db_printf(" {");
 1727                 comma = 0;
 1728                 if (ie->ie_flags & IE_SOFT) {
 1729                         db_printf("SOFT");
 1730                         comma = 1;
 1731                 }
 1732                 if (ie->ie_flags & IE_ENTROPY) {
 1733                         if (comma)
 1734                                 db_printf(", ");
 1735                         db_printf("ENTROPY");
 1736                         comma = 1;
 1737                 }
 1738                 if (ie->ie_flags & IE_ADDING_THREAD) {
 1739                         if (comma)
 1740                                 db_printf(", ");
 1741                         db_printf("ADDING_THREAD");
 1742                         comma = 1;
 1743                 }
 1744                 if (it != NULL && it->it_need) {
 1745                         if (comma)
 1746                                 db_printf(", ");
 1747                         db_printf("NEED");
 1748                 }
 1749                 db_printf("}");
 1750         }
 1751         db_printf("\n");
 1752 
 1753         if (handlers)
 1754                 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
 1755                     db_dump_intrhand(ih);
 1756 }
 1757 
 1758 /*
 1759  * Dump data about interrupt handlers
 1760  */
 1761 DB_SHOW_COMMAND(intr, db_show_intr)
 1762 {
 1763         struct intr_event *ie;
 1764         int all, verbose;
 1765 
 1766         verbose = index(modif, 'v') != NULL;
 1767         all = index(modif, 'a') != NULL;
 1768         TAILQ_FOREACH(ie, &event_list, ie_list) {
 1769                 if (!all && TAILQ_EMPTY(&ie->ie_handlers))
 1770                         continue;
 1771                 db_dump_intr_event(ie, verbose);
 1772                 if (db_pager_quit)
 1773                         break;
 1774         }
 1775 }
 1776 #endif /* DDB */
 1777 
 1778 /*
 1779  * Start standard software interrupt threads
 1780  */
 1781 static void
 1782 start_softintr(void *dummy)
 1783 {
 1784 
 1785         if (swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, INTR_MPSAFE, &vm_ih))
 1786                 panic("died while creating vm swi ithread");
 1787 }
 1788 SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr,
 1789     NULL);
 1790 
 1791 /*
 1792  * Sysctls used by systat and others: hw.intrnames and hw.intrcnt.
 1793  * The data for this machine dependent, and the declarations are in machine
 1794  * dependent code.  The layout of intrnames and intrcnt however is machine
 1795  * independent.
 1796  *
 1797  * We do not know the length of intrcnt and intrnames at compile time, so
 1798  * calculate things at run time.
 1799  */
 1800 static int
 1801 sysctl_intrnames(SYSCTL_HANDLER_ARGS)
 1802 {
 1803         return (sysctl_handle_opaque(oidp, intrnames, eintrnames - intrnames,
 1804            req));
 1805 }
 1806 
 1807 SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD,
 1808     NULL, 0, sysctl_intrnames, "", "Interrupt Names");
 1809 
 1810 static int
 1811 sysctl_intrcnt(SYSCTL_HANDLER_ARGS)
 1812 {
 1813         return (sysctl_handle_opaque(oidp, intrcnt,
 1814             (char *)eintrcnt - (char *)intrcnt, req));
 1815 }
 1816 
 1817 SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD,
 1818     NULL, 0, sysctl_intrcnt, "", "Interrupt Counts");
 1819 
 1820 #ifdef DDB
 1821 /*
 1822  * DDB command to dump the interrupt statistics.
 1823  */
 1824 DB_SHOW_COMMAND(intrcnt, db_show_intrcnt)
 1825 {
 1826         u_long *i;
 1827         char *cp;
 1828 
 1829         cp = intrnames;
 1830         for (i = intrcnt; i != eintrcnt && !db_pager_quit; i++) {
 1831                 if (*cp == '\0')
 1832                         break;
 1833                 if (*i != 0)
 1834                         db_printf("%s\t%lu\n", cp, *i);
 1835                 cp += strlen(cp) + 1;
 1836         }
 1837 }
 1838 #endif

Cache object: b6f8dce33e5dc76e15162e899593ac6d


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.