The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_intr.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice unmodified, this list of conditions, and the following
   10  *    disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  *
   15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   25  */
   26 
   27 #include <sys/cdefs.h>
   28 __FBSDID("$FreeBSD: releng/11.1/sys/kern/kern_intr.c 285751 2015-07-21 14:39:34Z kib $");
   29 
   30 #include "opt_ddb.h"
   31 #include "opt_kstack_usage_prof.h"
   32 
   33 #include <sys/param.h>
   34 #include <sys/bus.h>
   35 #include <sys/conf.h>
   36 #include <sys/cpuset.h>
   37 #include <sys/rtprio.h>
   38 #include <sys/systm.h>
   39 #include <sys/interrupt.h>
   40 #include <sys/kernel.h>
   41 #include <sys/kthread.h>
   42 #include <sys/ktr.h>
   43 #include <sys/limits.h>
   44 #include <sys/lock.h>
   45 #include <sys/malloc.h>
   46 #include <sys/mutex.h>
   47 #include <sys/priv.h>
   48 #include <sys/proc.h>
   49 #include <sys/random.h>
   50 #include <sys/resourcevar.h>
   51 #include <sys/sched.h>
   52 #include <sys/smp.h>
   53 #include <sys/sysctl.h>
   54 #include <sys/syslog.h>
   55 #include <sys/unistd.h>
   56 #include <sys/vmmeter.h>
   57 #include <machine/atomic.h>
   58 #include <machine/cpu.h>
   59 #include <machine/md_var.h>
   60 #include <machine/stdarg.h>
   61 #ifdef DDB
   62 #include <ddb/ddb.h>
   63 #include <ddb/db_sym.h>
   64 #endif
   65 
   66 /*
   67  * Describe an interrupt thread.  There is one of these per interrupt event.
   68  */
   69 struct intr_thread {
   70         struct intr_event *it_event;
   71         struct thread *it_thread;       /* Kernel thread. */
   72         int     it_flags;               /* (j) IT_* flags. */
   73         int     it_need;                /* Needs service. */
   74 };
   75 
   76 /* Interrupt thread flags kept in it_flags */
   77 #define IT_DEAD         0x000001        /* Thread is waiting to exit. */
   78 #define IT_WAIT         0x000002        /* Thread is waiting for completion. */
   79 
   80 struct  intr_entropy {
   81         struct  thread *td;
   82         uintptr_t event;
   83 };
   84 
   85 struct  intr_event *clk_intr_event;
   86 struct  intr_event *tty_intr_event;
   87 void    *vm_ih;
   88 struct proc *intrproc;
   89 
   90 static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads");
   91 
   92 static int intr_storm_threshold = 1000;
   93 SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RWTUN,
   94     &intr_storm_threshold, 0,
   95     "Number of consecutive interrupts before storm protection is enabled");
   96 static TAILQ_HEAD(, intr_event) event_list =
   97     TAILQ_HEAD_INITIALIZER(event_list);
   98 static struct mtx event_lock;
   99 MTX_SYSINIT(intr_event_list, &event_lock, "intr event list", MTX_DEF);
  100 
  101 static void     intr_event_update(struct intr_event *ie);
  102 #ifdef INTR_FILTER
  103 static int      intr_event_schedule_thread(struct intr_event *ie,
  104                     struct intr_thread *ithd);
  105 static int      intr_filter_loop(struct intr_event *ie,
  106                     struct trapframe *frame, struct intr_thread **ithd);
  107 static struct intr_thread *ithread_create(const char *name,
  108                               struct intr_handler *ih);
  109 #else
  110 static int      intr_event_schedule_thread(struct intr_event *ie);
  111 static struct intr_thread *ithread_create(const char *name);
  112 #endif
  113 static void     ithread_destroy(struct intr_thread *ithread);
  114 static void     ithread_execute_handlers(struct proc *p, 
  115                     struct intr_event *ie);
  116 #ifdef INTR_FILTER
  117 static void     priv_ithread_execute_handler(struct proc *p, 
  118                     struct intr_handler *ih);
  119 #endif
  120 static void     ithread_loop(void *);
  121 static void     ithread_update(struct intr_thread *ithd);
  122 static void     start_softintr(void *);
  123 
  124 /* Map an interrupt type to an ithread priority. */
  125 u_char
  126 intr_priority(enum intr_type flags)
  127 {
  128         u_char pri;
  129 
  130         flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET |
  131             INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV);
  132         switch (flags) {
  133         case INTR_TYPE_TTY:
  134                 pri = PI_TTY;
  135                 break;
  136         case INTR_TYPE_BIO:
  137                 pri = PI_DISK;
  138                 break;
  139         case INTR_TYPE_NET:
  140                 pri = PI_NET;
  141                 break;
  142         case INTR_TYPE_CAM:
  143                 pri = PI_DISK;
  144                 break;
  145         case INTR_TYPE_AV:
  146                 pri = PI_AV;
  147                 break;
  148         case INTR_TYPE_CLK:
  149                 pri = PI_REALTIME;
  150                 break;
  151         case INTR_TYPE_MISC:
  152                 pri = PI_DULL;          /* don't care */
  153                 break;
  154         default:
  155                 /* We didn't specify an interrupt level. */
  156                 panic("intr_priority: no interrupt type in flags");
  157         }
  158 
  159         return pri;
  160 }
  161 
  162 /*
  163  * Update an ithread based on the associated intr_event.
  164  */
  165 static void
  166 ithread_update(struct intr_thread *ithd)
  167 {
  168         struct intr_event *ie;
  169         struct thread *td;
  170         u_char pri;
  171 
  172         ie = ithd->it_event;
  173         td = ithd->it_thread;
  174 
  175         /* Determine the overall priority of this event. */
  176         if (TAILQ_EMPTY(&ie->ie_handlers))
  177                 pri = PRI_MAX_ITHD;
  178         else
  179                 pri = TAILQ_FIRST(&ie->ie_handlers)->ih_pri;
  180 
  181         /* Update name and priority. */
  182         strlcpy(td->td_name, ie->ie_fullname, sizeof(td->td_name));
  183 #ifdef KTR
  184         sched_clear_tdname(td);
  185 #endif
  186         thread_lock(td);
  187         sched_prio(td, pri);
  188         thread_unlock(td);
  189 }
  190 
  191 /*
  192  * Regenerate the full name of an interrupt event and update its priority.
  193  */
  194 static void
  195 intr_event_update(struct intr_event *ie)
  196 {
  197         struct intr_handler *ih;
  198         char *last;
  199         int missed, space;
  200 
  201         /* Start off with no entropy and just the name of the event. */
  202         mtx_assert(&ie->ie_lock, MA_OWNED);
  203         strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname));
  204         ie->ie_flags &= ~IE_ENTROPY;
  205         missed = 0;
  206         space = 1;
  207 
  208         /* Run through all the handlers updating values. */
  209         TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
  210                 if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 <
  211                     sizeof(ie->ie_fullname)) {
  212                         strcat(ie->ie_fullname, " ");
  213                         strcat(ie->ie_fullname, ih->ih_name);
  214                         space = 0;
  215                 } else
  216                         missed++;
  217                 if (ih->ih_flags & IH_ENTROPY)
  218                         ie->ie_flags |= IE_ENTROPY;
  219         }
  220 
  221         /*
  222          * If the handler names were too long, add +'s to indicate missing
  223          * names. If we run out of room and still have +'s to add, change
  224          * the last character from a + to a *.
  225          */
  226         last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2];
  227         while (missed-- > 0) {
  228                 if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) {
  229                         if (*last == '+') {
  230                                 *last = '*';
  231                                 break;
  232                         } else
  233                                 *last = '+';
  234                 } else if (space) {
  235                         strcat(ie->ie_fullname, " +");
  236                         space = 0;
  237                 } else
  238                         strcat(ie->ie_fullname, "+");
  239         }
  240 
  241         /*
  242          * If this event has an ithread, update it's priority and
  243          * name.
  244          */
  245         if (ie->ie_thread != NULL)
  246                 ithread_update(ie->ie_thread);
  247         CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname);
  248 }
  249 
  250 int
  251 intr_event_create(struct intr_event **event, void *source, int flags, int irq,
  252     void (*pre_ithread)(void *), void (*post_ithread)(void *),
  253     void (*post_filter)(void *), int (*assign_cpu)(void *, int),
  254     const char *fmt, ...)
  255 {
  256         struct intr_event *ie;
  257         va_list ap;
  258 
  259         /* The only valid flag during creation is IE_SOFT. */
  260         if ((flags & ~IE_SOFT) != 0)
  261                 return (EINVAL);
  262         ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO);
  263         ie->ie_source = source;
  264         ie->ie_pre_ithread = pre_ithread;
  265         ie->ie_post_ithread = post_ithread;
  266         ie->ie_post_filter = post_filter;
  267         ie->ie_assign_cpu = assign_cpu;
  268         ie->ie_flags = flags;
  269         ie->ie_irq = irq;
  270         ie->ie_cpu = NOCPU;
  271         TAILQ_INIT(&ie->ie_handlers);
  272         mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF);
  273 
  274         va_start(ap, fmt);
  275         vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap);
  276         va_end(ap);
  277         strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname));
  278         mtx_lock(&event_lock);
  279         TAILQ_INSERT_TAIL(&event_list, ie, ie_list);
  280         mtx_unlock(&event_lock);
  281         if (event != NULL)
  282                 *event = ie;
  283         CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name);
  284         return (0);
  285 }
  286 
  287 /*
  288  * Bind an interrupt event to the specified CPU.  Note that not all
  289  * platforms support binding an interrupt to a CPU.  For those
  290  * platforms this request will fail.  For supported platforms, any
  291  * associated ithreads as well as the primary interrupt context will
  292  * be bound to the specificed CPU.  Using a cpu id of NOCPU unbinds
  293  * the interrupt event.
  294  */
  295 int
  296 intr_event_bind(struct intr_event *ie, int cpu)
  297 {
  298         lwpid_t id;
  299         int error;
  300 
  301         /* Need a CPU to bind to. */
  302         if (cpu != NOCPU && CPU_ABSENT(cpu))
  303                 return (EINVAL);
  304 
  305         if (ie->ie_assign_cpu == NULL)
  306                 return (EOPNOTSUPP);
  307 
  308         error = priv_check(curthread, PRIV_SCHED_CPUSET_INTR);
  309         if (error)
  310                 return (error);
  311 
  312         /*
  313          * If we have any ithreads try to set their mask first to verify
  314          * permissions, etc.
  315          */
  316         mtx_lock(&ie->ie_lock);
  317         if (ie->ie_thread != NULL) {
  318                 id = ie->ie_thread->it_thread->td_tid;
  319                 mtx_unlock(&ie->ie_lock);
  320                 error = cpuset_setithread(id, cpu);
  321                 if (error)
  322                         return (error);
  323         } else
  324                 mtx_unlock(&ie->ie_lock);
  325         error = ie->ie_assign_cpu(ie->ie_source, cpu);
  326         if (error) {
  327                 mtx_lock(&ie->ie_lock);
  328                 if (ie->ie_thread != NULL) {
  329                         cpu = ie->ie_cpu;
  330                         id = ie->ie_thread->it_thread->td_tid;
  331                         mtx_unlock(&ie->ie_lock);
  332                         (void)cpuset_setithread(id, cpu);
  333                 } else
  334                         mtx_unlock(&ie->ie_lock);
  335                 return (error);
  336         }
  337 
  338         mtx_lock(&ie->ie_lock);
  339         ie->ie_cpu = cpu;
  340         mtx_unlock(&ie->ie_lock);
  341 
  342         return (error);
  343 }
  344 
  345 static struct intr_event *
  346 intr_lookup(int irq)
  347 {
  348         struct intr_event *ie;
  349 
  350         mtx_lock(&event_lock);
  351         TAILQ_FOREACH(ie, &event_list, ie_list)
  352                 if (ie->ie_irq == irq &&
  353                     (ie->ie_flags & IE_SOFT) == 0 &&
  354                     TAILQ_FIRST(&ie->ie_handlers) != NULL)
  355                         break;
  356         mtx_unlock(&event_lock);
  357         return (ie);
  358 }
  359 
  360 int
  361 intr_setaffinity(int irq, void *m)
  362 {
  363         struct intr_event *ie;
  364         cpuset_t *mask;
  365         int cpu, n;
  366 
  367         mask = m;
  368         cpu = NOCPU;
  369         /*
  370          * If we're setting all cpus we can unbind.  Otherwise make sure
  371          * only one cpu is in the set.
  372          */
  373         if (CPU_CMP(cpuset_root, mask)) {
  374                 for (n = 0; n < CPU_SETSIZE; n++) {
  375                         if (!CPU_ISSET(n, mask))
  376                                 continue;
  377                         if (cpu != NOCPU)
  378                                 return (EINVAL);
  379                         cpu = n;
  380                 }
  381         }
  382         ie = intr_lookup(irq);
  383         if (ie == NULL)
  384                 return (ESRCH);
  385         return (intr_event_bind(ie, cpu));
  386 }
  387 
  388 int
  389 intr_getaffinity(int irq, void *m)
  390 {
  391         struct intr_event *ie;
  392         cpuset_t *mask;
  393 
  394         mask = m;
  395         ie = intr_lookup(irq);
  396         if (ie == NULL)
  397                 return (ESRCH);
  398         CPU_ZERO(mask);
  399         mtx_lock(&ie->ie_lock);
  400         if (ie->ie_cpu == NOCPU)
  401                 CPU_COPY(cpuset_root, mask);
  402         else
  403                 CPU_SET(ie->ie_cpu, mask);
  404         mtx_unlock(&ie->ie_lock);
  405         return (0);
  406 }
  407 
  408 int
  409 intr_event_destroy(struct intr_event *ie)
  410 {
  411 
  412         mtx_lock(&event_lock);
  413         mtx_lock(&ie->ie_lock);
  414         if (!TAILQ_EMPTY(&ie->ie_handlers)) {
  415                 mtx_unlock(&ie->ie_lock);
  416                 mtx_unlock(&event_lock);
  417                 return (EBUSY);
  418         }
  419         TAILQ_REMOVE(&event_list, ie, ie_list);
  420 #ifndef notyet
  421         if (ie->ie_thread != NULL) {
  422                 ithread_destroy(ie->ie_thread);
  423                 ie->ie_thread = NULL;
  424         }
  425 #endif
  426         mtx_unlock(&ie->ie_lock);
  427         mtx_unlock(&event_lock);
  428         mtx_destroy(&ie->ie_lock);
  429         free(ie, M_ITHREAD);
  430         return (0);
  431 }
  432 
  433 #ifndef INTR_FILTER
  434 static struct intr_thread *
  435 ithread_create(const char *name)
  436 {
  437         struct intr_thread *ithd;
  438         struct thread *td;
  439         int error;
  440 
  441         ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO);
  442 
  443         error = kproc_kthread_add(ithread_loop, ithd, &intrproc,
  444                     &td, RFSTOPPED | RFHIGHPID,
  445                     0, "intr", "%s", name);
  446         if (error)
  447                 panic("kproc_create() failed with %d", error);
  448         thread_lock(td);
  449         sched_class(td, PRI_ITHD);
  450         TD_SET_IWAIT(td);
  451         thread_unlock(td);
  452         td->td_pflags |= TDP_ITHREAD;
  453         ithd->it_thread = td;
  454         CTR2(KTR_INTR, "%s: created %s", __func__, name);
  455         return (ithd);
  456 }
  457 #else
  458 static struct intr_thread *
  459 ithread_create(const char *name, struct intr_handler *ih)
  460 {
  461         struct intr_thread *ithd;
  462         struct thread *td;
  463         int error;
  464 
  465         ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO);
  466 
  467         error = kproc_kthread_add(ithread_loop, ih, &intrproc,
  468                     &td, RFSTOPPED | RFHIGHPID,
  469                     0, "intr", "%s", name);
  470         if (error)
  471                 panic("kproc_create() failed with %d", error);
  472         thread_lock(td);
  473         sched_class(td, PRI_ITHD);
  474         TD_SET_IWAIT(td);
  475         thread_unlock(td);
  476         td->td_pflags |= TDP_ITHREAD;
  477         ithd->it_thread = td;
  478         CTR2(KTR_INTR, "%s: created %s", __func__, name);
  479         return (ithd);
  480 }
  481 #endif
  482 
  483 static void
  484 ithread_destroy(struct intr_thread *ithread)
  485 {
  486         struct thread *td;
  487 
  488         CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_event->ie_name);
  489         td = ithread->it_thread;
  490         thread_lock(td);
  491         ithread->it_flags |= IT_DEAD;
  492         if (TD_AWAITING_INTR(td)) {
  493                 TD_CLR_IWAIT(td);
  494                 sched_add(td, SRQ_INTR);
  495         }
  496         thread_unlock(td);
  497 }
  498 
  499 #ifndef INTR_FILTER
  500 int
  501 intr_event_add_handler(struct intr_event *ie, const char *name,
  502     driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri,
  503     enum intr_type flags, void **cookiep)
  504 {
  505         struct intr_handler *ih, *temp_ih;
  506         struct intr_thread *it;
  507 
  508         if (ie == NULL || name == NULL || (handler == NULL && filter == NULL))
  509                 return (EINVAL);
  510 
  511         /* Allocate and populate an interrupt handler structure. */
  512         ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO);
  513         ih->ih_filter = filter;
  514         ih->ih_handler = handler;
  515         ih->ih_argument = arg;
  516         strlcpy(ih->ih_name, name, sizeof(ih->ih_name));
  517         ih->ih_event = ie;
  518         ih->ih_pri = pri;
  519         if (flags & INTR_EXCL)
  520                 ih->ih_flags = IH_EXCLUSIVE;
  521         if (flags & INTR_MPSAFE)
  522                 ih->ih_flags |= IH_MPSAFE;
  523         if (flags & INTR_ENTROPY)
  524                 ih->ih_flags |= IH_ENTROPY;
  525 
  526         /* We can only have one exclusive handler in a event. */
  527         mtx_lock(&ie->ie_lock);
  528         if (!TAILQ_EMPTY(&ie->ie_handlers)) {
  529                 if ((flags & INTR_EXCL) ||
  530                     (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) {
  531                         mtx_unlock(&ie->ie_lock);
  532                         free(ih, M_ITHREAD);
  533                         return (EINVAL);
  534                 }
  535         }
  536 
  537         /* Create a thread if we need one. */
  538         while (ie->ie_thread == NULL && handler != NULL) {
  539                 if (ie->ie_flags & IE_ADDING_THREAD)
  540                         msleep(ie, &ie->ie_lock, 0, "ithread", 0);
  541                 else {
  542                         ie->ie_flags |= IE_ADDING_THREAD;
  543                         mtx_unlock(&ie->ie_lock);
  544                         it = ithread_create("intr: newborn");
  545                         mtx_lock(&ie->ie_lock);
  546                         ie->ie_flags &= ~IE_ADDING_THREAD;
  547                         ie->ie_thread = it;
  548                         it->it_event = ie;
  549                         ithread_update(it);
  550                         wakeup(ie);
  551                 }
  552         }
  553 
  554         /* Add the new handler to the event in priority order. */
  555         TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) {
  556                 if (temp_ih->ih_pri > ih->ih_pri)
  557                         break;
  558         }
  559         if (temp_ih == NULL)
  560                 TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next);
  561         else
  562                 TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next);
  563         intr_event_update(ie);
  564 
  565         CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name,
  566             ie->ie_name);
  567         mtx_unlock(&ie->ie_lock);
  568 
  569         if (cookiep != NULL)
  570                 *cookiep = ih;
  571         return (0);
  572 }
  573 #else
  574 int
  575 intr_event_add_handler(struct intr_event *ie, const char *name,
  576     driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri,
  577     enum intr_type flags, void **cookiep)
  578 {
  579         struct intr_handler *ih, *temp_ih;
  580         struct intr_thread *it;
  581 
  582         if (ie == NULL || name == NULL || (handler == NULL && filter == NULL))
  583                 return (EINVAL);
  584 
  585         /* Allocate and populate an interrupt handler structure. */
  586         ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO);
  587         ih->ih_filter = filter;
  588         ih->ih_handler = handler;
  589         ih->ih_argument = arg;
  590         strlcpy(ih->ih_name, name, sizeof(ih->ih_name));
  591         ih->ih_event = ie;
  592         ih->ih_pri = pri;
  593         if (flags & INTR_EXCL)
  594                 ih->ih_flags = IH_EXCLUSIVE;
  595         if (flags & INTR_MPSAFE)
  596                 ih->ih_flags |= IH_MPSAFE;
  597         if (flags & INTR_ENTROPY)
  598                 ih->ih_flags |= IH_ENTROPY;
  599 
  600         /* We can only have one exclusive handler in a event. */
  601         mtx_lock(&ie->ie_lock);
  602         if (!TAILQ_EMPTY(&ie->ie_handlers)) {
  603                 if ((flags & INTR_EXCL) ||
  604                     (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) {
  605                         mtx_unlock(&ie->ie_lock);
  606                         free(ih, M_ITHREAD);
  607                         return (EINVAL);
  608                 }
  609         }
  610 
  611         /* For filtered handlers, create a private ithread to run on. */
  612         if (filter != NULL && handler != NULL) {
  613                 mtx_unlock(&ie->ie_lock);
  614                 it = ithread_create("intr: newborn", ih);
  615                 mtx_lock(&ie->ie_lock);
  616                 it->it_event = ie;
  617                 ih->ih_thread = it;
  618                 ithread_update(it); /* XXX - do we really need this?!?!? */
  619         } else { /* Create the global per-event thread if we need one. */
  620                 while (ie->ie_thread == NULL && handler != NULL) {
  621                         if (ie->ie_flags & IE_ADDING_THREAD)
  622                                 msleep(ie, &ie->ie_lock, 0, "ithread", 0);
  623                         else {
  624                                 ie->ie_flags |= IE_ADDING_THREAD;
  625                                 mtx_unlock(&ie->ie_lock);
  626                                 it = ithread_create("intr: newborn", ih);
  627                                 mtx_lock(&ie->ie_lock);
  628                                 ie->ie_flags &= ~IE_ADDING_THREAD;
  629                                 ie->ie_thread = it;
  630                                 it->it_event = ie;
  631                                 ithread_update(it);
  632                                 wakeup(ie);
  633                         }
  634                 }
  635         }
  636 
  637         /* Add the new handler to the event in priority order. */
  638         TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) {
  639                 if (temp_ih->ih_pri > ih->ih_pri)
  640                         break;
  641         }
  642         if (temp_ih == NULL)
  643                 TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next);
  644         else
  645                 TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next);
  646         intr_event_update(ie);
  647 
  648         CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name,
  649             ie->ie_name);
  650         mtx_unlock(&ie->ie_lock);
  651 
  652         if (cookiep != NULL)
  653                 *cookiep = ih;
  654         return (0);
  655 }
  656 #endif
  657 
  658 /*
  659  * Append a description preceded by a ':' to the name of the specified
  660  * interrupt handler.
  661  */
  662 int
  663 intr_event_describe_handler(struct intr_event *ie, void *cookie,
  664     const char *descr)
  665 {
  666         struct intr_handler *ih;
  667         size_t space;
  668         char *start;
  669 
  670         mtx_lock(&ie->ie_lock);
  671 #ifdef INVARIANTS
  672         TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
  673                 if (ih == cookie)
  674                         break;
  675         }
  676         if (ih == NULL) {
  677                 mtx_unlock(&ie->ie_lock);
  678                 panic("handler %p not found in interrupt event %p", cookie, ie);
  679         }
  680 #endif
  681         ih = cookie;
  682 
  683         /*
  684          * Look for an existing description by checking for an
  685          * existing ":".  This assumes device names do not include
  686          * colons.  If one is found, prepare to insert the new
  687          * description at that point.  If one is not found, find the
  688          * end of the name to use as the insertion point.
  689          */
  690         start = strchr(ih->ih_name, ':');
  691         if (start == NULL)
  692                 start = strchr(ih->ih_name, 0);
  693 
  694         /*
  695          * See if there is enough remaining room in the string for the
  696          * description + ":".  The "- 1" leaves room for the trailing
  697          * '\0'.  The "+ 1" accounts for the colon.
  698          */
  699         space = sizeof(ih->ih_name) - (start - ih->ih_name) - 1;
  700         if (strlen(descr) + 1 > space) {
  701                 mtx_unlock(&ie->ie_lock);
  702                 return (ENOSPC);
  703         }
  704 
  705         /* Append a colon followed by the description. */
  706         *start = ':';
  707         strcpy(start + 1, descr);
  708         intr_event_update(ie);
  709         mtx_unlock(&ie->ie_lock);
  710         return (0);
  711 }
  712 
  713 /*
  714  * Return the ie_source field from the intr_event an intr_handler is
  715  * associated with.
  716  */
  717 void *
  718 intr_handler_source(void *cookie)
  719 {
  720         struct intr_handler *ih;
  721         struct intr_event *ie;
  722 
  723         ih = (struct intr_handler *)cookie;
  724         if (ih == NULL)
  725                 return (NULL);
  726         ie = ih->ih_event;
  727         KASSERT(ie != NULL,
  728             ("interrupt handler \"%s\" has a NULL interrupt event",
  729             ih->ih_name));
  730         return (ie->ie_source);
  731 }
  732 
  733 /*
  734  * Sleep until an ithread finishes executing an interrupt handler.
  735  *
  736  * XXX Doesn't currently handle interrupt filters or fast interrupt
  737  * handlers.  This is intended for compatibility with linux drivers
  738  * only.  Do not use in BSD code.
  739  */
  740 void
  741 _intr_drain(int irq)
  742 {
  743         struct intr_event *ie;
  744         struct intr_thread *ithd;
  745         struct thread *td;
  746 
  747         ie = intr_lookup(irq);
  748         if (ie == NULL)
  749                 return;
  750         if (ie->ie_thread == NULL)
  751                 return;
  752         ithd = ie->ie_thread;
  753         td = ithd->it_thread;
  754         /*
  755          * We set the flag and wait for it to be cleared to avoid
  756          * long delays with potentially busy interrupt handlers
  757          * were we to only sample TD_AWAITING_INTR() every tick.
  758          */
  759         thread_lock(td);
  760         if (!TD_AWAITING_INTR(td)) {
  761                 ithd->it_flags |= IT_WAIT;
  762                 while (ithd->it_flags & IT_WAIT) {
  763                         thread_unlock(td);
  764                         pause("idrain", 1);
  765                         thread_lock(td);
  766                 }
  767         }
  768         thread_unlock(td);
  769         return;
  770 }
  771 
  772 
  773 #ifndef INTR_FILTER
  774 int
  775 intr_event_remove_handler(void *cookie)
  776 {
  777         struct intr_handler *handler = (struct intr_handler *)cookie;
  778         struct intr_event *ie;
  779 #ifdef INVARIANTS
  780         struct intr_handler *ih;
  781 #endif
  782 #ifdef notyet
  783         int dead;
  784 #endif
  785 
  786         if (handler == NULL)
  787                 return (EINVAL);
  788         ie = handler->ih_event;
  789         KASSERT(ie != NULL,
  790             ("interrupt handler \"%s\" has a NULL interrupt event",
  791             handler->ih_name));
  792         mtx_lock(&ie->ie_lock);
  793         CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name,
  794             ie->ie_name);
  795 #ifdef INVARIANTS
  796         TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
  797                 if (ih == handler)
  798                         goto ok;
  799         mtx_unlock(&ie->ie_lock);
  800         panic("interrupt handler \"%s\" not found in interrupt event \"%s\"",
  801             ih->ih_name, ie->ie_name);
  802 ok:
  803 #endif
  804         /*
  805          * If there is no ithread, then just remove the handler and return.
  806          * XXX: Note that an INTR_FAST handler might be running on another
  807          * CPU!
  808          */
  809         if (ie->ie_thread == NULL) {
  810                 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
  811                 mtx_unlock(&ie->ie_lock);
  812                 free(handler, M_ITHREAD);
  813                 return (0);
  814         }
  815 
  816         /*
  817          * If the interrupt thread is already running, then just mark this
  818          * handler as being dead and let the ithread do the actual removal.
  819          *
  820          * During a cold boot while cold is set, msleep() does not sleep,
  821          * so we have to remove the handler here rather than letting the
  822          * thread do it.
  823          */
  824         thread_lock(ie->ie_thread->it_thread);
  825         if (!TD_AWAITING_INTR(ie->ie_thread->it_thread) && !cold) {
  826                 handler->ih_flags |= IH_DEAD;
  827 
  828                 /*
  829                  * Ensure that the thread will process the handler list
  830                  * again and remove this handler if it has already passed
  831                  * it on the list.
  832                  *
  833                  * The release part of the following store ensures
  834                  * that the update of ih_flags is ordered before the
  835                  * it_need setting.  See the comment before
  836                  * atomic_cmpset_acq(&ithd->it_need, ...) operation in
  837                  * the ithread_execute_handlers().
  838                  */
  839                 atomic_store_rel_int(&ie->ie_thread->it_need, 1);
  840         } else
  841                 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
  842         thread_unlock(ie->ie_thread->it_thread);
  843         while (handler->ih_flags & IH_DEAD)
  844                 msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0);
  845         intr_event_update(ie);
  846 #ifdef notyet
  847         /*
  848          * XXX: This could be bad in the case of ppbus(8).  Also, I think
  849          * this could lead to races of stale data when servicing an
  850          * interrupt.
  851          */
  852         dead = 1;
  853         TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
  854                 if (!(ih->ih_flags & IH_FAST)) {
  855                         dead = 0;
  856                         break;
  857                 }
  858         }
  859         if (dead) {
  860                 ithread_destroy(ie->ie_thread);
  861                 ie->ie_thread = NULL;
  862         }
  863 #endif
  864         mtx_unlock(&ie->ie_lock);
  865         free(handler, M_ITHREAD);
  866         return (0);
  867 }
  868 
  869 static int
  870 intr_event_schedule_thread(struct intr_event *ie)
  871 {
  872         struct intr_entropy entropy;
  873         struct intr_thread *it;
  874         struct thread *td;
  875         struct thread *ctd;
  876         struct proc *p;
  877 
  878         /*
  879          * If no ithread or no handlers, then we have a stray interrupt.
  880          */
  881         if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) ||
  882             ie->ie_thread == NULL)
  883                 return (EINVAL);
  884 
  885         ctd = curthread;
  886         it = ie->ie_thread;
  887         td = it->it_thread;
  888         p = td->td_proc;
  889 
  890         /*
  891          * If any of the handlers for this ithread claim to be good
  892          * sources of entropy, then gather some.
  893          */
  894         if (ie->ie_flags & IE_ENTROPY) {
  895                 entropy.event = (uintptr_t)ie;
  896                 entropy.td = ctd;
  897                 random_harvest_queue(&entropy, sizeof(entropy), 2, RANDOM_INTERRUPT);
  898         }
  899 
  900         KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name));
  901 
  902         /*
  903          * Set it_need to tell the thread to keep running if it is already
  904          * running.  Then, lock the thread and see if we actually need to
  905          * put it on the runqueue.
  906          *
  907          * Use store_rel to arrange that the store to ih_need in
  908          * swi_sched() is before the store to it_need and prepare for
  909          * transfer of this order to loads in the ithread.
  910          */
  911         atomic_store_rel_int(&it->it_need, 1);
  912         thread_lock(td);
  913         if (TD_AWAITING_INTR(td)) {
  914                 CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid,
  915                     td->td_name);
  916                 TD_CLR_IWAIT(td);
  917                 sched_add(td, SRQ_INTR);
  918         } else {
  919                 CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d",
  920                     __func__, p->p_pid, td->td_name, it->it_need, td->td_state);
  921         }
  922         thread_unlock(td);
  923 
  924         return (0);
  925 }
  926 #else
  927 int
  928 intr_event_remove_handler(void *cookie)
  929 {
  930         struct intr_handler *handler = (struct intr_handler *)cookie;
  931         struct intr_event *ie;
  932         struct intr_thread *it;
  933 #ifdef INVARIANTS
  934         struct intr_handler *ih;
  935 #endif
  936 #ifdef notyet
  937         int dead;
  938 #endif
  939 
  940         if (handler == NULL)
  941                 return (EINVAL);
  942         ie = handler->ih_event;
  943         KASSERT(ie != NULL,
  944             ("interrupt handler \"%s\" has a NULL interrupt event",
  945             handler->ih_name));
  946         mtx_lock(&ie->ie_lock);
  947         CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name,
  948             ie->ie_name);
  949 #ifdef INVARIANTS
  950         TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
  951                 if (ih == handler)
  952                         goto ok;
  953         mtx_unlock(&ie->ie_lock);
  954         panic("interrupt handler \"%s\" not found in interrupt event \"%s\"",
  955             ih->ih_name, ie->ie_name);
  956 ok:
  957 #endif
  958         /*
  959          * If there are no ithreads (per event and per handler), then
  960          * just remove the handler and return.  
  961          * XXX: Note that an INTR_FAST handler might be running on another CPU!
  962          */
  963         if (ie->ie_thread == NULL && handler->ih_thread == NULL) {
  964                 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
  965                 mtx_unlock(&ie->ie_lock);
  966                 free(handler, M_ITHREAD);
  967                 return (0);
  968         }
  969 
  970         /* Private or global ithread? */
  971         it = (handler->ih_thread) ? handler->ih_thread : ie->ie_thread;
  972         /*
  973          * If the interrupt thread is already running, then just mark this
  974          * handler as being dead and let the ithread do the actual removal.
  975          *
  976          * During a cold boot while cold is set, msleep() does not sleep,
  977          * so we have to remove the handler here rather than letting the
  978          * thread do it.
  979          */
  980         thread_lock(it->it_thread);
  981         if (!TD_AWAITING_INTR(it->it_thread) && !cold) {
  982                 handler->ih_flags |= IH_DEAD;
  983 
  984                 /*
  985                  * Ensure that the thread will process the handler list
  986                  * again and remove this handler if it has already passed
  987                  * it on the list.
  988                  *
  989                  * The release part of the following store ensures
  990                  * that the update of ih_flags is ordered before the
  991                  * it_need setting.  See the comment before
  992                  * atomic_cmpset_acq(&ithd->it_need, ...) operation in
  993                  * the ithread_execute_handlers().
  994                  */
  995                 atomic_store_rel_int(&it->it_need, 1);
  996         } else
  997                 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
  998         thread_unlock(it->it_thread);
  999         while (handler->ih_flags & IH_DEAD)
 1000                 msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0);
 1001         /* 
 1002          * At this point, the handler has been disconnected from the event,
 1003          * so we can kill the private ithread if any.
 1004          */
 1005         if (handler->ih_thread) {
 1006                 ithread_destroy(handler->ih_thread);
 1007                 handler->ih_thread = NULL;
 1008         }
 1009         intr_event_update(ie);
 1010 #ifdef notyet
 1011         /*
 1012          * XXX: This could be bad in the case of ppbus(8).  Also, I think
 1013          * this could lead to races of stale data when servicing an
 1014          * interrupt.
 1015          */
 1016         dead = 1;
 1017         TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
 1018                 if (handler != NULL) {
 1019                         dead = 0;
 1020                         break;
 1021                 }
 1022         }
 1023         if (dead) {
 1024                 ithread_destroy(ie->ie_thread);
 1025                 ie->ie_thread = NULL;
 1026         }
 1027 #endif
 1028         mtx_unlock(&ie->ie_lock);
 1029         free(handler, M_ITHREAD);
 1030         return (0);
 1031 }
 1032 
 1033 static int
 1034 intr_event_schedule_thread(struct intr_event *ie, struct intr_thread *it)
 1035 {
 1036         struct intr_entropy entropy;
 1037         struct thread *td;
 1038         struct thread *ctd;
 1039         struct proc *p;
 1040 
 1041         /*
 1042          * If no ithread or no handlers, then we have a stray interrupt.
 1043          */
 1044         if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || it == NULL)
 1045                 return (EINVAL);
 1046 
 1047         ctd = curthread;
 1048         td = it->it_thread;
 1049         p = td->td_proc;
 1050 
 1051         /*
 1052          * If any of the handlers for this ithread claim to be good
 1053          * sources of entropy, then gather some.
 1054          */
 1055         if (ie->ie_flags & IE_ENTROPY) {
 1056                 entropy.event = (uintptr_t)ie;
 1057                 entropy.td = ctd;
 1058                 random_harvest_queue(&entropy, sizeof(entropy), 2, RANDOM_INTERRUPT);
 1059         }
 1060 
 1061         KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name));
 1062 
 1063         /*
 1064          * Set it_need to tell the thread to keep running if it is already
 1065          * running.  Then, lock the thread and see if we actually need to
 1066          * put it on the runqueue.
 1067          *
 1068          * Use store_rel to arrange that the store to ih_need in
 1069          * swi_sched() is before the store to it_need and prepare for
 1070          * transfer of this order to loads in the ithread.
 1071          */
 1072         atomic_store_rel_int(&it->it_need, 1);
 1073         thread_lock(td);
 1074         if (TD_AWAITING_INTR(td)) {
 1075                 CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid,
 1076                     td->td_name);
 1077                 TD_CLR_IWAIT(td);
 1078                 sched_add(td, SRQ_INTR);
 1079         } else {
 1080                 CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d",
 1081                     __func__, p->p_pid, td->td_name, it->it_need, td->td_state);
 1082         }
 1083         thread_unlock(td);
 1084 
 1085         return (0);
 1086 }
 1087 #endif
 1088 
 1089 /*
 1090  * Allow interrupt event binding for software interrupt handlers -- a no-op,
 1091  * since interrupts are generated in software rather than being directed by
 1092  * a PIC.
 1093  */
 1094 static int
 1095 swi_assign_cpu(void *arg, int cpu)
 1096 {
 1097 
 1098         return (0);
 1099 }
 1100 
 1101 /*
 1102  * Add a software interrupt handler to a specified event.  If a given event
 1103  * is not specified, then a new event is created.
 1104  */
 1105 int
 1106 swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler,
 1107             void *arg, int pri, enum intr_type flags, void **cookiep)
 1108 {
 1109         struct intr_event *ie;
 1110         int error;
 1111 
 1112         if (flags & INTR_ENTROPY)
 1113                 return (EINVAL);
 1114 
 1115         ie = (eventp != NULL) ? *eventp : NULL;
 1116 
 1117         if (ie != NULL) {
 1118                 if (!(ie->ie_flags & IE_SOFT))
 1119                         return (EINVAL);
 1120         } else {
 1121                 error = intr_event_create(&ie, NULL, IE_SOFT, 0,
 1122                     NULL, NULL, NULL, swi_assign_cpu, "swi%d:", pri);
 1123                 if (error)
 1124                         return (error);
 1125                 if (eventp != NULL)
 1126                         *eventp = ie;
 1127         }
 1128         error = intr_event_add_handler(ie, name, NULL, handler, arg,
 1129             PI_SWI(pri), flags, cookiep);
 1130         return (error);
 1131 }
 1132 
 1133 /*
 1134  * Schedule a software interrupt thread.
 1135  */
 1136 void
 1137 swi_sched(void *cookie, int flags)
 1138 {
 1139         struct intr_handler *ih = (struct intr_handler *)cookie;
 1140         struct intr_event *ie = ih->ih_event;
 1141         struct intr_entropy entropy;
 1142         int error;
 1143 
 1144         CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name,
 1145             ih->ih_need);
 1146 
 1147         entropy.event = (uintptr_t)ih;
 1148         entropy.td = curthread;
 1149         random_harvest_queue(&entropy, sizeof(entropy), 1, RANDOM_SWI);
 1150 
 1151         /*
 1152          * Set ih_need for this handler so that if the ithread is already
 1153          * running it will execute this handler on the next pass.  Otherwise,
 1154          * it will execute it the next time it runs.
 1155          */
 1156         ih->ih_need = 1;
 1157 
 1158         if (!(flags & SWI_DELAY)) {
 1159                 PCPU_INC(cnt.v_soft);
 1160 #ifdef INTR_FILTER
 1161                 error = intr_event_schedule_thread(ie, ie->ie_thread);
 1162 #else
 1163                 error = intr_event_schedule_thread(ie);
 1164 #endif
 1165                 KASSERT(error == 0, ("stray software interrupt"));
 1166         }
 1167 }
 1168 
 1169 /*
 1170  * Remove a software interrupt handler.  Currently this code does not
 1171  * remove the associated interrupt event if it becomes empty.  Calling code
 1172  * may do so manually via intr_event_destroy(), but that's not really
 1173  * an optimal interface.
 1174  */
 1175 int
 1176 swi_remove(void *cookie)
 1177 {
 1178 
 1179         return (intr_event_remove_handler(cookie));
 1180 }
 1181 
 1182 #ifdef INTR_FILTER
 1183 static void
 1184 priv_ithread_execute_handler(struct proc *p, struct intr_handler *ih)
 1185 {
 1186         struct intr_event *ie;
 1187 
 1188         ie = ih->ih_event;
 1189         /*
 1190          * If this handler is marked for death, remove it from
 1191          * the list of handlers and wake up the sleeper.
 1192          */
 1193         if (ih->ih_flags & IH_DEAD) {
 1194                 mtx_lock(&ie->ie_lock);
 1195                 TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next);
 1196                 ih->ih_flags &= ~IH_DEAD;
 1197                 wakeup(ih);
 1198                 mtx_unlock(&ie->ie_lock);
 1199                 return;
 1200         }
 1201         
 1202         /* Execute this handler. */
 1203         CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x",
 1204              __func__, p->p_pid, (void *)ih->ih_handler, ih->ih_argument,
 1205              ih->ih_name, ih->ih_flags);
 1206         
 1207         if (!(ih->ih_flags & IH_MPSAFE))
 1208                 mtx_lock(&Giant);
 1209         ih->ih_handler(ih->ih_argument);
 1210         if (!(ih->ih_flags & IH_MPSAFE))
 1211                 mtx_unlock(&Giant);
 1212 }
 1213 #endif
 1214 
 1215 /*
 1216  * This is a public function for use by drivers that mux interrupt
 1217  * handlers for child devices from their interrupt handler.
 1218  */
 1219 void
 1220 intr_event_execute_handlers(struct proc *p, struct intr_event *ie)
 1221 {
 1222         struct intr_handler *ih, *ihn;
 1223 
 1224         TAILQ_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) {
 1225                 /*
 1226                  * If this handler is marked for death, remove it from
 1227                  * the list of handlers and wake up the sleeper.
 1228                  */
 1229                 if (ih->ih_flags & IH_DEAD) {
 1230                         mtx_lock(&ie->ie_lock);
 1231                         TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next);
 1232                         ih->ih_flags &= ~IH_DEAD;
 1233                         wakeup(ih);
 1234                         mtx_unlock(&ie->ie_lock);
 1235                         continue;
 1236                 }
 1237 
 1238                 /* Skip filter only handlers */
 1239                 if (ih->ih_handler == NULL)
 1240                         continue;
 1241 
 1242                 /*
 1243                  * For software interrupt threads, we only execute
 1244                  * handlers that have their need flag set.  Hardware
 1245                  * interrupt threads always invoke all of their handlers.
 1246                  *
 1247                  * ih_need can only be 0 or 1.  Failed cmpset below
 1248                  * means that there is no request to execute handlers,
 1249                  * so a retry of the cmpset is not needed.
 1250                  */
 1251                 if ((ie->ie_flags & IE_SOFT) != 0 &&
 1252                     atomic_cmpset_int(&ih->ih_need, 1, 0) == 0)
 1253                         continue;
 1254 
 1255                 /* Execute this handler. */
 1256                 CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x",
 1257                     __func__, p->p_pid, (void *)ih->ih_handler, 
 1258                     ih->ih_argument, ih->ih_name, ih->ih_flags);
 1259 
 1260                 if (!(ih->ih_flags & IH_MPSAFE))
 1261                         mtx_lock(&Giant);
 1262                 ih->ih_handler(ih->ih_argument);
 1263                 if (!(ih->ih_flags & IH_MPSAFE))
 1264                         mtx_unlock(&Giant);
 1265         }
 1266 }
 1267 
 1268 static void
 1269 ithread_execute_handlers(struct proc *p, struct intr_event *ie)
 1270 {
 1271 
 1272         /* Interrupt handlers should not sleep. */
 1273         if (!(ie->ie_flags & IE_SOFT))
 1274                 THREAD_NO_SLEEPING();
 1275         intr_event_execute_handlers(p, ie);
 1276         if (!(ie->ie_flags & IE_SOFT))
 1277                 THREAD_SLEEPING_OK();
 1278 
 1279         /*
 1280          * Interrupt storm handling:
 1281          *
 1282          * If this interrupt source is currently storming, then throttle
 1283          * it to only fire the handler once  per clock tick.
 1284          *
 1285          * If this interrupt source is not currently storming, but the
 1286          * number of back to back interrupts exceeds the storm threshold,
 1287          * then enter storming mode.
 1288          */
 1289         if (intr_storm_threshold != 0 && ie->ie_count >= intr_storm_threshold &&
 1290             !(ie->ie_flags & IE_SOFT)) {
 1291                 /* Report the message only once every second. */
 1292                 if (ppsratecheck(&ie->ie_warntm, &ie->ie_warncnt, 1)) {
 1293                         printf(
 1294         "interrupt storm detected on \"%s\"; throttling interrupt source\n",
 1295                             ie->ie_name);
 1296                 }
 1297                 pause("istorm", 1);
 1298         } else
 1299                 ie->ie_count++;
 1300 
 1301         /*
 1302          * Now that all the handlers have had a chance to run, reenable
 1303          * the interrupt source.
 1304          */
 1305         if (ie->ie_post_ithread != NULL)
 1306                 ie->ie_post_ithread(ie->ie_source);
 1307 }
 1308 
 1309 #ifndef INTR_FILTER
 1310 /*
 1311  * This is the main code for interrupt threads.
 1312  */
 1313 static void
 1314 ithread_loop(void *arg)
 1315 {
 1316         struct intr_thread *ithd;
 1317         struct intr_event *ie;
 1318         struct thread *td;
 1319         struct proc *p;
 1320         int wake;
 1321 
 1322         td = curthread;
 1323         p = td->td_proc;
 1324         ithd = (struct intr_thread *)arg;
 1325         KASSERT(ithd->it_thread == td,
 1326             ("%s: ithread and proc linkage out of sync", __func__));
 1327         ie = ithd->it_event;
 1328         ie->ie_count = 0;
 1329         wake = 0;
 1330 
 1331         /*
 1332          * As long as we have interrupts outstanding, go through the
 1333          * list of handlers, giving each one a go at it.
 1334          */
 1335         for (;;) {
 1336                 /*
 1337                  * If we are an orphaned thread, then just die.
 1338                  */
 1339                 if (ithd->it_flags & IT_DEAD) {
 1340                         CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__,
 1341                             p->p_pid, td->td_name);
 1342                         free(ithd, M_ITHREAD);
 1343                         kthread_exit();
 1344                 }
 1345 
 1346                 /*
 1347                  * Service interrupts.  If another interrupt arrives while
 1348                  * we are running, it will set it_need to note that we
 1349                  * should make another pass.
 1350                  *
 1351                  * The load_acq part of the following cmpset ensures
 1352                  * that the load of ih_need in ithread_execute_handlers()
 1353                  * is ordered after the load of it_need here.
 1354                  */
 1355                 while (atomic_cmpset_acq_int(&ithd->it_need, 1, 0) != 0)
 1356                         ithread_execute_handlers(p, ie);
 1357                 WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread");
 1358                 mtx_assert(&Giant, MA_NOTOWNED);
 1359 
 1360                 /*
 1361                  * Processed all our interrupts.  Now get the sched
 1362                  * lock.  This may take a while and it_need may get
 1363                  * set again, so we have to check it again.
 1364                  */
 1365                 thread_lock(td);
 1366                 if (atomic_load_acq_int(&ithd->it_need) == 0 &&
 1367                     (ithd->it_flags & (IT_DEAD | IT_WAIT)) == 0) {
 1368                         TD_SET_IWAIT(td);
 1369                         ie->ie_count = 0;
 1370                         mi_switch(SW_VOL | SWT_IWAIT, NULL);
 1371                 }
 1372                 if (ithd->it_flags & IT_WAIT) {
 1373                         wake = 1;
 1374                         ithd->it_flags &= ~IT_WAIT;
 1375                 }
 1376                 thread_unlock(td);
 1377                 if (wake) {
 1378                         wakeup(ithd);
 1379                         wake = 0;
 1380                 }
 1381         }
 1382 }
 1383 
 1384 /*
 1385  * Main interrupt handling body.
 1386  *
 1387  * Input:
 1388  * o ie:                        the event connected to this interrupt.
 1389  * o frame:                     some archs (i.e. i386) pass a frame to some.
 1390  *                              handlers as their main argument.
 1391  * Return value:
 1392  * o 0:                         everything ok.
 1393  * o EINVAL:                    stray interrupt.
 1394  */
 1395 int
 1396 intr_event_handle(struct intr_event *ie, struct trapframe *frame)
 1397 {
 1398         struct intr_handler *ih;
 1399         struct trapframe *oldframe;
 1400         struct thread *td;
 1401         int error, ret, thread;
 1402 
 1403         td = curthread;
 1404 
 1405 #ifdef KSTACK_USAGE_PROF
 1406         intr_prof_stack_use(td, frame);
 1407 #endif
 1408 
 1409         /* An interrupt with no event or handlers is a stray interrupt. */
 1410         if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers))
 1411                 return (EINVAL);
 1412 
 1413         /*
 1414          * Execute fast interrupt handlers directly.
 1415          * To support clock handlers, if a handler registers
 1416          * with a NULL argument, then we pass it a pointer to
 1417          * a trapframe as its argument.
 1418          */
 1419         td->td_intr_nesting_level++;
 1420         thread = 0;
 1421         ret = 0;
 1422         critical_enter();
 1423         oldframe = td->td_intr_frame;
 1424         td->td_intr_frame = frame;
 1425         TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
 1426                 if (ih->ih_filter == NULL) {
 1427                         thread = 1;
 1428                         continue;
 1429                 }
 1430                 CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__,
 1431                     ih->ih_filter, ih->ih_argument == NULL ? frame :
 1432                     ih->ih_argument, ih->ih_name);
 1433                 if (ih->ih_argument == NULL)
 1434                         ret = ih->ih_filter(frame);
 1435                 else
 1436                         ret = ih->ih_filter(ih->ih_argument);
 1437                 KASSERT(ret == FILTER_STRAY ||
 1438                     ((ret & (FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) != 0 &&
 1439                     (ret & ~(FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) == 0),
 1440                     ("%s: incorrect return value %#x from %s", __func__, ret,
 1441                     ih->ih_name));
 1442 
 1443                 /* 
 1444                  * Wrapper handler special handling:
 1445                  *
 1446                  * in some particular cases (like pccard and pccbb), 
 1447                  * the _real_ device handler is wrapped in a couple of
 1448                  * functions - a filter wrapper and an ithread wrapper.
 1449                  * In this case (and just in this case), the filter wrapper 
 1450                  * could ask the system to schedule the ithread and mask
 1451                  * the interrupt source if the wrapped handler is composed
 1452                  * of just an ithread handler.
 1453                  *
 1454                  * TODO: write a generic wrapper to avoid people rolling 
 1455                  * their own
 1456                  */
 1457                 if (!thread) {
 1458                         if (ret == FILTER_SCHEDULE_THREAD)
 1459                                 thread = 1;
 1460                 }
 1461         }
 1462         td->td_intr_frame = oldframe;
 1463 
 1464         if (thread) {
 1465                 if (ie->ie_pre_ithread != NULL)
 1466                         ie->ie_pre_ithread(ie->ie_source);
 1467         } else {
 1468                 if (ie->ie_post_filter != NULL)
 1469                         ie->ie_post_filter(ie->ie_source);
 1470         }
 1471         
 1472         /* Schedule the ithread if needed. */
 1473         if (thread) {
 1474                 error = intr_event_schedule_thread(ie);
 1475                 KASSERT(error == 0, ("bad stray interrupt"));
 1476         }
 1477         critical_exit();
 1478         td->td_intr_nesting_level--;
 1479         return (0);
 1480 }
 1481 #else
 1482 /*
 1483  * This is the main code for interrupt threads.
 1484  */
 1485 static void
 1486 ithread_loop(void *arg)
 1487 {
 1488         struct intr_thread *ithd;
 1489         struct intr_handler *ih;
 1490         struct intr_event *ie;
 1491         struct thread *td;
 1492         struct proc *p;
 1493         int priv;
 1494         int wake;
 1495 
 1496         td = curthread;
 1497         p = td->td_proc;
 1498         ih = (struct intr_handler *)arg;
 1499         priv = (ih->ih_thread != NULL) ? 1 : 0;
 1500         ithd = (priv) ? ih->ih_thread : ih->ih_event->ie_thread;
 1501         KASSERT(ithd->it_thread == td,
 1502             ("%s: ithread and proc linkage out of sync", __func__));
 1503         ie = ithd->it_event;
 1504         ie->ie_count = 0;
 1505         wake = 0;
 1506 
 1507         /*
 1508          * As long as we have interrupts outstanding, go through the
 1509          * list of handlers, giving each one a go at it.
 1510          */
 1511         for (;;) {
 1512                 /*
 1513                  * If we are an orphaned thread, then just die.
 1514                  */
 1515                 if (ithd->it_flags & IT_DEAD) {
 1516                         CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__,
 1517                             p->p_pid, td->td_name);
 1518                         free(ithd, M_ITHREAD);
 1519                         kthread_exit();
 1520                 }
 1521 
 1522                 /*
 1523                  * Service interrupts.  If another interrupt arrives while
 1524                  * we are running, it will set it_need to note that we
 1525                  * should make another pass.
 1526                  *
 1527                  * The load_acq part of the following cmpset ensures
 1528                  * that the load of ih_need in ithread_execute_handlers()
 1529                  * is ordered after the load of it_need here.
 1530                  */
 1531                 while (atomic_cmpset_acq_int(&ithd->it_need, 1, 0) != 0) {
 1532                         if (priv)
 1533                                 priv_ithread_execute_handler(p, ih);
 1534                         else 
 1535                                 ithread_execute_handlers(p, ie);
 1536                 }
 1537                 WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread");
 1538                 mtx_assert(&Giant, MA_NOTOWNED);
 1539 
 1540                 /*
 1541                  * Processed all our interrupts.  Now get the sched
 1542                  * lock.  This may take a while and it_need may get
 1543                  * set again, so we have to check it again.
 1544                  */
 1545                 thread_lock(td);
 1546                 if (atomic_load_acq_int(&ithd->it_need) == 0 &&
 1547                     (ithd->it_flags & (IT_DEAD | IT_WAIT)) == 0) {
 1548                         TD_SET_IWAIT(td);
 1549                         ie->ie_count = 0;
 1550                         mi_switch(SW_VOL | SWT_IWAIT, NULL);
 1551                 }
 1552                 if (ithd->it_flags & IT_WAIT) {
 1553                         wake = 1;
 1554                         ithd->it_flags &= ~IT_WAIT;
 1555                 }
 1556                 thread_unlock(td);
 1557                 if (wake) {
 1558                         wakeup(ithd);
 1559                         wake = 0;
 1560                 }
 1561         }
 1562 }
 1563 
 1564 /* 
 1565  * Main loop for interrupt filter.
 1566  *
 1567  * Some architectures (i386, amd64 and arm) require the optional frame 
 1568  * parameter, and use it as the main argument for fast handler execution
 1569  * when ih_argument == NULL.
 1570  *
 1571  * Return value:
 1572  * o FILTER_STRAY:              No filter recognized the event, and no
 1573  *                              filter-less handler is registered on this 
 1574  *                              line.
 1575  * o FILTER_HANDLED:            A filter claimed the event and served it.
 1576  * o FILTER_SCHEDULE_THREAD:    No filter claimed the event, but there's at
 1577  *                              least one filter-less handler on this line.
 1578  * o FILTER_HANDLED | 
 1579  *   FILTER_SCHEDULE_THREAD:    A filter claimed the event, and asked for
 1580  *                              scheduling the per-handler ithread.
 1581  *
 1582  * In case an ithread has to be scheduled, in *ithd there will be a 
 1583  * pointer to a struct intr_thread containing the thread to be
 1584  * scheduled.
 1585  */
 1586 
 1587 static int
 1588 intr_filter_loop(struct intr_event *ie, struct trapframe *frame, 
 1589                  struct intr_thread **ithd) 
 1590 {
 1591         struct intr_handler *ih;
 1592         void *arg;
 1593         int ret, thread_only;
 1594 
 1595         ret = 0;
 1596         thread_only = 0;
 1597         TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
 1598                 /*
 1599                  * Execute fast interrupt handlers directly.
 1600                  * To support clock handlers, if a handler registers
 1601                  * with a NULL argument, then we pass it a pointer to
 1602                  * a trapframe as its argument.
 1603                  */
 1604                 arg = ((ih->ih_argument == NULL) ? frame : ih->ih_argument);
 1605                 
 1606                 CTR5(KTR_INTR, "%s: exec %p/%p(%p) for %s", __func__,
 1607                      ih->ih_filter, ih->ih_handler, arg, ih->ih_name);
 1608 
 1609                 if (ih->ih_filter != NULL)
 1610                         ret = ih->ih_filter(arg);
 1611                 else {
 1612                         thread_only = 1;
 1613                         continue;
 1614                 }
 1615                 KASSERT(ret == FILTER_STRAY ||
 1616                     ((ret & (FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) != 0 &&
 1617                     (ret & ~(FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) == 0),
 1618                     ("%s: incorrect return value %#x from %s", __func__, ret,
 1619                     ih->ih_name));
 1620                 if (ret & FILTER_STRAY)
 1621                         continue;
 1622                 else { 
 1623                         *ithd = ih->ih_thread;
 1624                         return (ret);
 1625                 }
 1626         }
 1627 
 1628         /*
 1629          * No filters handled the interrupt and we have at least
 1630          * one handler without a filter.  In this case, we schedule
 1631          * all of the filter-less handlers to run in the ithread.
 1632          */     
 1633         if (thread_only) {
 1634                 *ithd = ie->ie_thread;
 1635                 return (FILTER_SCHEDULE_THREAD);
 1636         }
 1637         return (FILTER_STRAY);
 1638 }
 1639 
 1640 /*
 1641  * Main interrupt handling body.
 1642  *
 1643  * Input:
 1644  * o ie:                        the event connected to this interrupt.
 1645  * o frame:                     some archs (i.e. i386) pass a frame to some.
 1646  *                              handlers as their main argument.
 1647  * Return value:
 1648  * o 0:                         everything ok.
 1649  * o EINVAL:                    stray interrupt.
 1650  */
 1651 int
 1652 intr_event_handle(struct intr_event *ie, struct trapframe *frame)
 1653 {
 1654         struct intr_thread *ithd;
 1655         struct trapframe *oldframe;
 1656         struct thread *td;
 1657         int thread;
 1658 
 1659         ithd = NULL;
 1660         td = curthread;
 1661 
 1662         if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers))
 1663                 return (EINVAL);
 1664 
 1665         td->td_intr_nesting_level++;
 1666         thread = 0;
 1667         critical_enter();
 1668         oldframe = td->td_intr_frame;
 1669         td->td_intr_frame = frame;
 1670         thread = intr_filter_loop(ie, frame, &ithd);    
 1671         if (thread & FILTER_HANDLED) {
 1672                 if (ie->ie_post_filter != NULL)
 1673                         ie->ie_post_filter(ie->ie_source);
 1674         } else {
 1675                 if (ie->ie_pre_ithread != NULL)
 1676                         ie->ie_pre_ithread(ie->ie_source);
 1677         }
 1678         td->td_intr_frame = oldframe;
 1679         critical_exit();
 1680         
 1681         /* Interrupt storm logic */
 1682         if (thread & FILTER_STRAY) {
 1683                 ie->ie_count++;
 1684                 if (ie->ie_count < intr_storm_threshold)
 1685                         printf("Interrupt stray detection not present\n");
 1686         }
 1687 
 1688         /* Schedule an ithread if needed. */
 1689         if (thread & FILTER_SCHEDULE_THREAD) {
 1690                 if (intr_event_schedule_thread(ie, ithd) != 0)
 1691                         panic("%s: impossible stray interrupt", __func__);
 1692         }
 1693         td->td_intr_nesting_level--;
 1694         return (0);
 1695 }
 1696 #endif
 1697 
 1698 #ifdef DDB
 1699 /*
 1700  * Dump details about an interrupt handler
 1701  */
 1702 static void
 1703 db_dump_intrhand(struct intr_handler *ih)
 1704 {
 1705         int comma;
 1706 
 1707         db_printf("\t%-10s ", ih->ih_name);
 1708         switch (ih->ih_pri) {
 1709         case PI_REALTIME:
 1710                 db_printf("CLK ");
 1711                 break;
 1712         case PI_AV:
 1713                 db_printf("AV  ");
 1714                 break;
 1715         case PI_TTY:
 1716                 db_printf("TTY ");
 1717                 break;
 1718         case PI_NET:
 1719                 db_printf("NET ");
 1720                 break;
 1721         case PI_DISK:
 1722                 db_printf("DISK");
 1723                 break;
 1724         case PI_DULL:
 1725                 db_printf("DULL");
 1726                 break;
 1727         default:
 1728                 if (ih->ih_pri >= PI_SOFT)
 1729                         db_printf("SWI ");
 1730                 else
 1731                         db_printf("%4u", ih->ih_pri);
 1732                 break;
 1733         }
 1734         db_printf(" ");
 1735         if (ih->ih_filter != NULL) {
 1736                 db_printf("[F]");
 1737                 db_printsym((uintptr_t)ih->ih_filter, DB_STGY_PROC);
 1738         }
 1739         if (ih->ih_handler != NULL) {
 1740                 if (ih->ih_filter != NULL)
 1741                         db_printf(",");
 1742                 db_printf("[H]");
 1743                 db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC);
 1744         }
 1745         db_printf("(%p)", ih->ih_argument);
 1746         if (ih->ih_need ||
 1747             (ih->ih_flags & (IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD |
 1748             IH_MPSAFE)) != 0) {
 1749                 db_printf(" {");
 1750                 comma = 0;
 1751                 if (ih->ih_flags & IH_EXCLUSIVE) {
 1752                         if (comma)
 1753                                 db_printf(", ");
 1754                         db_printf("EXCL");
 1755                         comma = 1;
 1756                 }
 1757                 if (ih->ih_flags & IH_ENTROPY) {
 1758                         if (comma)
 1759                                 db_printf(", ");
 1760                         db_printf("ENTROPY");
 1761                         comma = 1;
 1762                 }
 1763                 if (ih->ih_flags & IH_DEAD) {
 1764                         if (comma)
 1765                                 db_printf(", ");
 1766                         db_printf("DEAD");
 1767                         comma = 1;
 1768                 }
 1769                 if (ih->ih_flags & IH_MPSAFE) {
 1770                         if (comma)
 1771                                 db_printf(", ");
 1772                         db_printf("MPSAFE");
 1773                         comma = 1;
 1774                 }
 1775                 if (ih->ih_need) {
 1776                         if (comma)
 1777                                 db_printf(", ");
 1778                         db_printf("NEED");
 1779                 }
 1780                 db_printf("}");
 1781         }
 1782         db_printf("\n");
 1783 }
 1784 
 1785 /*
 1786  * Dump details about a event.
 1787  */
 1788 void
 1789 db_dump_intr_event(struct intr_event *ie, int handlers)
 1790 {
 1791         struct intr_handler *ih;
 1792         struct intr_thread *it;
 1793         int comma;
 1794 
 1795         db_printf("%s ", ie->ie_fullname);
 1796         it = ie->ie_thread;
 1797         if (it != NULL)
 1798                 db_printf("(pid %d)", it->it_thread->td_proc->p_pid);
 1799         else
 1800                 db_printf("(no thread)");
 1801         if ((ie->ie_flags & (IE_SOFT | IE_ENTROPY | IE_ADDING_THREAD)) != 0 ||
 1802             (it != NULL && it->it_need)) {
 1803                 db_printf(" {");
 1804                 comma = 0;
 1805                 if (ie->ie_flags & IE_SOFT) {
 1806                         db_printf("SOFT");
 1807                         comma = 1;
 1808                 }
 1809                 if (ie->ie_flags & IE_ENTROPY) {
 1810                         if (comma)
 1811                                 db_printf(", ");
 1812                         db_printf("ENTROPY");
 1813                         comma = 1;
 1814                 }
 1815                 if (ie->ie_flags & IE_ADDING_THREAD) {
 1816                         if (comma)
 1817                                 db_printf(", ");
 1818                         db_printf("ADDING_THREAD");
 1819                         comma = 1;
 1820                 }
 1821                 if (it != NULL && it->it_need) {
 1822                         if (comma)
 1823                                 db_printf(", ");
 1824                         db_printf("NEED");
 1825                 }
 1826                 db_printf("}");
 1827         }
 1828         db_printf("\n");
 1829 
 1830         if (handlers)
 1831                 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
 1832                     db_dump_intrhand(ih);
 1833 }
 1834 
 1835 /*
 1836  * Dump data about interrupt handlers
 1837  */
 1838 DB_SHOW_COMMAND(intr, db_show_intr)
 1839 {
 1840         struct intr_event *ie;
 1841         int all, verbose;
 1842 
 1843         verbose = strchr(modif, 'v') != NULL;
 1844         all = strchr(modif, 'a') != NULL;
 1845         TAILQ_FOREACH(ie, &event_list, ie_list) {
 1846                 if (!all && TAILQ_EMPTY(&ie->ie_handlers))
 1847                         continue;
 1848                 db_dump_intr_event(ie, verbose);
 1849                 if (db_pager_quit)
 1850                         break;
 1851         }
 1852 }
 1853 #endif /* DDB */
 1854 
 1855 /*
 1856  * Start standard software interrupt threads
 1857  */
 1858 static void
 1859 start_softintr(void *dummy)
 1860 {
 1861 
 1862         if (swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, INTR_MPSAFE, &vm_ih))
 1863                 panic("died while creating vm swi ithread");
 1864 }
 1865 SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr,
 1866     NULL);
 1867 
 1868 /*
 1869  * Sysctls used by systat and others: hw.intrnames and hw.intrcnt.
 1870  * The data for this machine dependent, and the declarations are in machine
 1871  * dependent code.  The layout of intrnames and intrcnt however is machine
 1872  * independent.
 1873  *
 1874  * We do not know the length of intrcnt and intrnames at compile time, so
 1875  * calculate things at run time.
 1876  */
 1877 static int
 1878 sysctl_intrnames(SYSCTL_HANDLER_ARGS)
 1879 {
 1880         return (sysctl_handle_opaque(oidp, intrnames, sintrnames, req));
 1881 }
 1882 
 1883 SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD,
 1884     NULL, 0, sysctl_intrnames, "", "Interrupt Names");
 1885 
 1886 static int
 1887 sysctl_intrcnt(SYSCTL_HANDLER_ARGS)
 1888 {
 1889 #ifdef SCTL_MASK32
 1890         uint32_t *intrcnt32;
 1891         unsigned i;
 1892         int error;
 1893 
 1894         if (req->flags & SCTL_MASK32) {
 1895                 if (!req->oldptr)
 1896                         return (sysctl_handle_opaque(oidp, NULL, sintrcnt / 2, req));
 1897                 intrcnt32 = malloc(sintrcnt / 2, M_TEMP, M_NOWAIT);
 1898                 if (intrcnt32 == NULL)
 1899                         return (ENOMEM);
 1900                 for (i = 0; i < sintrcnt / sizeof (u_long); i++)
 1901                         intrcnt32[i] = intrcnt[i];
 1902                 error = sysctl_handle_opaque(oidp, intrcnt32, sintrcnt / 2, req);
 1903                 free(intrcnt32, M_TEMP);
 1904                 return (error);
 1905         }
 1906 #endif
 1907         return (sysctl_handle_opaque(oidp, intrcnt, sintrcnt, req));
 1908 }
 1909 
 1910 SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD,
 1911     NULL, 0, sysctl_intrcnt, "", "Interrupt Counts");
 1912 
 1913 #ifdef DDB
 1914 /*
 1915  * DDB command to dump the interrupt statistics.
 1916  */
 1917 DB_SHOW_COMMAND(intrcnt, db_show_intrcnt)
 1918 {
 1919         u_long *i;
 1920         char *cp;
 1921         u_int j;
 1922 
 1923         cp = intrnames;
 1924         j = 0;
 1925         for (i = intrcnt; j < (sintrcnt / sizeof(u_long)) && !db_pager_quit;
 1926             i++, j++) {
 1927                 if (*cp == '\0')
 1928                         break;
 1929                 if (*i != 0)
 1930                         db_printf("%s\t%lu\n", cp, *i);
 1931                 cp += strlen(cp) + 1;
 1932         }
 1933 }
 1934 #endif

Cache object: 8b24df053ff295581eadbd47bf7dabcb


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.