The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_intr.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice unmodified, this list of conditions, and the following
   10  *    disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  *
   15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   25  */
   26 
   27 #include <sys/cdefs.h>
   28 __FBSDID("$FreeBSD$");
   29 
   30 #include "opt_ddb.h"
   31 #include "opt_kstack_usage_prof.h"
   32 
   33 #include <sys/param.h>
   34 #include <sys/bus.h>
   35 #include <sys/conf.h>
   36 #include <sys/cpuset.h>
   37 #include <sys/rtprio.h>
   38 #include <sys/systm.h>
   39 #include <sys/interrupt.h>
   40 #include <sys/kernel.h>
   41 #include <sys/kthread.h>
   42 #include <sys/ktr.h>
   43 #include <sys/limits.h>
   44 #include <sys/lock.h>
   45 #include <sys/malloc.h>
   46 #include <sys/mutex.h>
   47 #include <sys/priv.h>
   48 #include <sys/proc.h>
   49 #include <sys/random.h>
   50 #include <sys/resourcevar.h>
   51 #include <sys/sched.h>
   52 #include <sys/smp.h>
   53 #include <sys/sysctl.h>
   54 #include <sys/syslog.h>
   55 #include <sys/unistd.h>
   56 #include <sys/vmmeter.h>
   57 #include <machine/atomic.h>
   58 #include <machine/cpu.h>
   59 #include <machine/md_var.h>
   60 #include <machine/stdarg.h>
   61 #ifdef DDB
   62 #include <ddb/ddb.h>
   63 #include <ddb/db_sym.h>
   64 #endif
   65 
   66 /*
   67  * Describe an interrupt thread.  There is one of these per interrupt event.
   68  */
   69 struct intr_thread {
   70         struct intr_event *it_event;
   71         struct thread *it_thread;       /* Kernel thread. */
   72         int     it_flags;               /* (j) IT_* flags. */
   73         int     it_need;                /* Needs service. */
   74 };
   75 
   76 /* Interrupt thread flags kept in it_flags */
   77 #define IT_DEAD         0x000001        /* Thread is waiting to exit. */
   78 #define IT_WAIT         0x000002        /* Thread is waiting for completion. */
   79 
   80 struct  intr_entropy {
   81         struct  thread *td;
   82         uintptr_t event;
   83 };
   84 
   85 struct  intr_event *clk_intr_event;
   86 struct  intr_event *tty_intr_event;
   87 void    *vm_ih;
   88 struct proc *intrproc;
   89 
   90 static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads");
   91 
   92 static int intr_storm_threshold = 1000;
   93 SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RWTUN,
   94     &intr_storm_threshold, 0,
   95     "Number of consecutive interrupts before storm protection is enabled");
   96 static TAILQ_HEAD(, intr_event) event_list =
   97     TAILQ_HEAD_INITIALIZER(event_list);
   98 static struct mtx event_lock;
   99 MTX_SYSINIT(intr_event_list, &event_lock, "intr event list", MTX_DEF);
  100 
  101 static void     intr_event_update(struct intr_event *ie);
  102 #ifdef INTR_FILTER
  103 static int      intr_event_schedule_thread(struct intr_event *ie,
  104                     struct intr_thread *ithd);
  105 static int      intr_filter_loop(struct intr_event *ie,
  106                     struct trapframe *frame, struct intr_thread **ithd);
  107 static struct intr_thread *ithread_create(const char *name,
  108                               struct intr_handler *ih);
  109 #else
  110 static int      intr_event_schedule_thread(struct intr_event *ie);
  111 static struct intr_thread *ithread_create(const char *name);
  112 #endif
  113 static void     ithread_destroy(struct intr_thread *ithread);
  114 static void     ithread_execute_handlers(struct proc *p, 
  115                     struct intr_event *ie);
  116 #ifdef INTR_FILTER
  117 static void     priv_ithread_execute_handler(struct proc *p, 
  118                     struct intr_handler *ih);
  119 #endif
  120 static void     ithread_loop(void *);
  121 static void     ithread_update(struct intr_thread *ithd);
  122 static void     start_softintr(void *);
  123 
  124 /* Map an interrupt type to an ithread priority. */
  125 u_char
  126 intr_priority(enum intr_type flags)
  127 {
  128         u_char pri;
  129 
  130         flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET |
  131             INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV);
  132         switch (flags) {
  133         case INTR_TYPE_TTY:
  134                 pri = PI_TTY;
  135                 break;
  136         case INTR_TYPE_BIO:
  137                 pri = PI_DISK;
  138                 break;
  139         case INTR_TYPE_NET:
  140                 pri = PI_NET;
  141                 break;
  142         case INTR_TYPE_CAM:
  143                 pri = PI_DISK;
  144                 break;
  145         case INTR_TYPE_AV:
  146                 pri = PI_AV;
  147                 break;
  148         case INTR_TYPE_CLK:
  149                 pri = PI_REALTIME;
  150                 break;
  151         case INTR_TYPE_MISC:
  152                 pri = PI_DULL;          /* don't care */
  153                 break;
  154         default:
  155                 /* We didn't specify an interrupt level. */
  156                 panic("intr_priority: no interrupt type in flags");
  157         }
  158 
  159         return pri;
  160 }
  161 
  162 /*
  163  * Update an ithread based on the associated intr_event.
  164  */
  165 static void
  166 ithread_update(struct intr_thread *ithd)
  167 {
  168         struct intr_event *ie;
  169         struct thread *td;
  170         u_char pri;
  171 
  172         ie = ithd->it_event;
  173         td = ithd->it_thread;
  174 
  175         /* Determine the overall priority of this event. */
  176         if (TAILQ_EMPTY(&ie->ie_handlers))
  177                 pri = PRI_MAX_ITHD;
  178         else
  179                 pri = TAILQ_FIRST(&ie->ie_handlers)->ih_pri;
  180 
  181         /* Update name and priority. */
  182         strlcpy(td->td_name, ie->ie_fullname, sizeof(td->td_name));
  183 #ifdef KTR
  184         sched_clear_tdname(td);
  185 #endif
  186         thread_lock(td);
  187         sched_prio(td, pri);
  188         thread_unlock(td);
  189 }
  190 
  191 /*
  192  * Regenerate the full name of an interrupt event and update its priority.
  193  */
  194 static void
  195 intr_event_update(struct intr_event *ie)
  196 {
  197         struct intr_handler *ih;
  198         char *last;
  199         int missed, space;
  200 
  201         /* Start off with no entropy and just the name of the event. */
  202         mtx_assert(&ie->ie_lock, MA_OWNED);
  203         strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname));
  204         ie->ie_flags &= ~IE_ENTROPY;
  205         missed = 0;
  206         space = 1;
  207 
  208         /* Run through all the handlers updating values. */
  209         TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
  210                 if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 <
  211                     sizeof(ie->ie_fullname)) {
  212                         strcat(ie->ie_fullname, " ");
  213                         strcat(ie->ie_fullname, ih->ih_name);
  214                         space = 0;
  215                 } else
  216                         missed++;
  217                 if (ih->ih_flags & IH_ENTROPY)
  218                         ie->ie_flags |= IE_ENTROPY;
  219         }
  220 
  221         /*
  222          * If there is only one handler and its name is too long, just copy in
  223          * as much of the end of the name (includes the unit number) as will
  224          * fit.  Otherwise, we have multiple handlers and not all of the names
  225          * will fit.  Add +'s to indicate missing names.  If we run out of room
  226          * and still have +'s to add, change the last character from a + to a *.
  227          */
  228         if (missed == 1 && space == 1) {
  229                 ih = TAILQ_FIRST(&ie->ie_handlers);
  230                 missed = strlen(ie->ie_fullname) + strlen(ih->ih_name) + 2 -
  231                     sizeof(ie->ie_fullname);
  232                 strcat(ie->ie_fullname, (missed == 0) ? " " : "-");
  233                 strcat(ie->ie_fullname, &ih->ih_name[missed]);
  234                 missed = 0;
  235         }
  236         last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2];
  237         while (missed-- > 0) {
  238                 if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) {
  239                         if (*last == '+') {
  240                                 *last = '*';
  241                                 break;
  242                         } else
  243                                 *last = '+';
  244                 } else if (space) {
  245                         strcat(ie->ie_fullname, " +");
  246                         space = 0;
  247                 } else
  248                         strcat(ie->ie_fullname, "+");
  249         }
  250 
  251         /*
  252          * If this event has an ithread, update it's priority and
  253          * name.
  254          */
  255         if (ie->ie_thread != NULL)
  256                 ithread_update(ie->ie_thread);
  257         CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname);
  258 }
  259 
  260 int
  261 intr_event_create(struct intr_event **event, void *source, int flags, int irq,
  262     void (*pre_ithread)(void *), void (*post_ithread)(void *),
  263     void (*post_filter)(void *), int (*assign_cpu)(void *, int),
  264     const char *fmt, ...)
  265 {
  266         struct intr_event *ie;
  267         va_list ap;
  268 
  269         /* The only valid flag during creation is IE_SOFT. */
  270         if ((flags & ~IE_SOFT) != 0)
  271                 return (EINVAL);
  272         ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO);
  273         ie->ie_source = source;
  274         ie->ie_pre_ithread = pre_ithread;
  275         ie->ie_post_ithread = post_ithread;
  276         ie->ie_post_filter = post_filter;
  277         ie->ie_assign_cpu = assign_cpu;
  278         ie->ie_flags = flags;
  279         ie->ie_irq = irq;
  280         ie->ie_cpu = NOCPU;
  281         TAILQ_INIT(&ie->ie_handlers);
  282         mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF);
  283 
  284         va_start(ap, fmt);
  285         vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap);
  286         va_end(ap);
  287         strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname));
  288         mtx_lock(&event_lock);
  289         TAILQ_INSERT_TAIL(&event_list, ie, ie_list);
  290         mtx_unlock(&event_lock);
  291         if (event != NULL)
  292                 *event = ie;
  293         CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name);
  294         return (0);
  295 }
  296 
  297 /*
  298  * Bind an interrupt event to the specified CPU.  Note that not all
  299  * platforms support binding an interrupt to a CPU.  For those
  300  * platforms this request will fail.  Using a cpu id of NOCPU unbinds
  301  * the interrupt event.
  302  */
  303 static int
  304 _intr_event_bind(struct intr_event *ie, int cpu, bool bindirq, bool bindithread)
  305 {
  306         lwpid_t id;
  307         int error;
  308 
  309         /* Need a CPU to bind to. */
  310         if (cpu != NOCPU && CPU_ABSENT(cpu))
  311                 return (EINVAL);
  312 
  313         if (ie->ie_assign_cpu == NULL)
  314                 return (EOPNOTSUPP);
  315 
  316         error = priv_check(curthread, PRIV_SCHED_CPUSET_INTR);
  317         if (error)
  318                 return (error);
  319 
  320         /*
  321          * If we have any ithreads try to set their mask first to verify
  322          * permissions, etc.
  323          */
  324         if (bindithread) {
  325                 mtx_lock(&ie->ie_lock);
  326                 if (ie->ie_thread != NULL) {
  327                         id = ie->ie_thread->it_thread->td_tid;
  328                         mtx_unlock(&ie->ie_lock);
  329                         error = cpuset_setithread(id, cpu);
  330                         if (error)
  331                                 return (error);
  332                 } else
  333                         mtx_unlock(&ie->ie_lock);
  334         }
  335         if (bindirq)
  336                 error = ie->ie_assign_cpu(ie->ie_source, cpu);
  337         if (error) {
  338                 if (bindithread) {
  339                         mtx_lock(&ie->ie_lock);
  340                         if (ie->ie_thread != NULL) {
  341                                 cpu = ie->ie_cpu;
  342                                 id = ie->ie_thread->it_thread->td_tid;
  343                                 mtx_unlock(&ie->ie_lock);
  344                                 (void)cpuset_setithread(id, cpu);
  345                         } else
  346                                 mtx_unlock(&ie->ie_lock);
  347                 }
  348                 return (error);
  349         }
  350 
  351         if (bindirq) {
  352                 mtx_lock(&ie->ie_lock);
  353                 ie->ie_cpu = cpu;
  354                 mtx_unlock(&ie->ie_lock);
  355         }
  356 
  357         return (error);
  358 }
  359 
  360 /*
  361  * Bind an interrupt event to the specified CPU.  For supported platforms, any
  362  * associated ithreads as well as the primary interrupt context will be bound
  363  * to the specificed CPU.
  364  */
  365 int
  366 intr_event_bind(struct intr_event *ie, int cpu)
  367 {
  368 
  369         return (_intr_event_bind(ie, cpu, true, true));
  370 }
  371 
  372 /*
  373  * Bind an interrupt event to the specified CPU, but do not bind associated
  374  * ithreads.
  375  */
  376 int
  377 intr_event_bind_irqonly(struct intr_event *ie, int cpu)
  378 {
  379 
  380         return (_intr_event_bind(ie, cpu, true, false));
  381 }
  382 
  383 /*
  384  * Bind an interrupt event's ithread to the specified CPU.
  385  */
  386 int
  387 intr_event_bind_ithread(struct intr_event *ie, int cpu)
  388 {
  389 
  390         return (_intr_event_bind(ie, cpu, false, true));
  391 }
  392 
  393 static struct intr_event *
  394 intr_lookup(int irq)
  395 {
  396         struct intr_event *ie;
  397 
  398         mtx_lock(&event_lock);
  399         TAILQ_FOREACH(ie, &event_list, ie_list)
  400                 if (ie->ie_irq == irq &&
  401                     (ie->ie_flags & IE_SOFT) == 0 &&
  402                     TAILQ_FIRST(&ie->ie_handlers) != NULL)
  403                         break;
  404         mtx_unlock(&event_lock);
  405         return (ie);
  406 }
  407 
  408 int
  409 intr_setaffinity(int irq, int mode, void *m)
  410 {
  411         struct intr_event *ie;
  412         cpuset_t *mask;
  413         int cpu, n;
  414 
  415         mask = m;
  416         cpu = NOCPU;
  417         /*
  418          * If we're setting all cpus we can unbind.  Otherwise make sure
  419          * only one cpu is in the set.
  420          */
  421         if (CPU_CMP(cpuset_root, mask)) {
  422                 for (n = 0; n < CPU_SETSIZE; n++) {
  423                         if (!CPU_ISSET(n, mask))
  424                                 continue;
  425                         if (cpu != NOCPU)
  426                                 return (EINVAL);
  427                         cpu = n;
  428                 }
  429         }
  430         ie = intr_lookup(irq);
  431         if (ie == NULL)
  432                 return (ESRCH);
  433         switch (mode) {
  434         case CPU_WHICH_IRQ:
  435                 return (intr_event_bind(ie, cpu));
  436         case CPU_WHICH_INTRHANDLER:
  437                 return (intr_event_bind_irqonly(ie, cpu));
  438         case CPU_WHICH_ITHREAD:
  439                 return (intr_event_bind_ithread(ie, cpu));
  440         default:
  441                 return (EINVAL);
  442         }
  443 }
  444 
  445 int
  446 intr_getaffinity(int irq, int mode, void *m)
  447 {
  448         struct intr_event *ie;
  449         struct thread *td;
  450         struct proc *p;
  451         cpuset_t *mask;
  452         lwpid_t id;
  453         int error;
  454 
  455         mask = m;
  456         ie = intr_lookup(irq);
  457         if (ie == NULL)
  458                 return (ESRCH);
  459 
  460         error = 0;
  461         CPU_ZERO(mask);
  462         switch (mode) {
  463         case CPU_WHICH_IRQ:
  464         case CPU_WHICH_INTRHANDLER:
  465                 mtx_lock(&ie->ie_lock);
  466                 if (ie->ie_cpu == NOCPU)
  467                         CPU_COPY(cpuset_root, mask);
  468                 else
  469                         CPU_SET(ie->ie_cpu, mask);
  470                 mtx_unlock(&ie->ie_lock);
  471                 break;
  472         case CPU_WHICH_ITHREAD:
  473                 mtx_lock(&ie->ie_lock);
  474                 if (ie->ie_thread == NULL) {
  475                         mtx_unlock(&ie->ie_lock);
  476                         CPU_COPY(cpuset_root, mask);
  477                 } else {
  478                         id = ie->ie_thread->it_thread->td_tid;
  479                         mtx_unlock(&ie->ie_lock);
  480                         error = cpuset_which(CPU_WHICH_TID, id, &p, &td, NULL);
  481                         if (error != 0)
  482                                 return (error);
  483                         CPU_COPY(&td->td_cpuset->cs_mask, mask);
  484                         PROC_UNLOCK(p);
  485                 }
  486         default:
  487                 return (EINVAL);
  488         }
  489         return (0);
  490 }
  491 
  492 int
  493 intr_event_destroy(struct intr_event *ie)
  494 {
  495 
  496         mtx_lock(&event_lock);
  497         mtx_lock(&ie->ie_lock);
  498         if (!TAILQ_EMPTY(&ie->ie_handlers)) {
  499                 mtx_unlock(&ie->ie_lock);
  500                 mtx_unlock(&event_lock);
  501                 return (EBUSY);
  502         }
  503         TAILQ_REMOVE(&event_list, ie, ie_list);
  504 #ifndef notyet
  505         if (ie->ie_thread != NULL) {
  506                 ithread_destroy(ie->ie_thread);
  507                 ie->ie_thread = NULL;
  508         }
  509 #endif
  510         mtx_unlock(&ie->ie_lock);
  511         mtx_unlock(&event_lock);
  512         mtx_destroy(&ie->ie_lock);
  513         free(ie, M_ITHREAD);
  514         return (0);
  515 }
  516 
  517 #ifndef INTR_FILTER
  518 static struct intr_thread *
  519 ithread_create(const char *name)
  520 {
  521         struct intr_thread *ithd;
  522         struct thread *td;
  523         int error;
  524 
  525         ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO);
  526 
  527         error = kproc_kthread_add(ithread_loop, ithd, &intrproc,
  528                     &td, RFSTOPPED | RFHIGHPID,
  529                     0, "intr", "%s", name);
  530         if (error)
  531                 panic("kproc_create() failed with %d", error);
  532         thread_lock(td);
  533         sched_class(td, PRI_ITHD);
  534         TD_SET_IWAIT(td);
  535         thread_unlock(td);
  536         td->td_pflags |= TDP_ITHREAD;
  537         ithd->it_thread = td;
  538         CTR2(KTR_INTR, "%s: created %s", __func__, name);
  539         return (ithd);
  540 }
  541 #else
  542 static struct intr_thread *
  543 ithread_create(const char *name, struct intr_handler *ih)
  544 {
  545         struct intr_thread *ithd;
  546         struct thread *td;
  547         int error;
  548 
  549         ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO);
  550 
  551         error = kproc_kthread_add(ithread_loop, ih, &intrproc,
  552                     &td, RFSTOPPED | RFHIGHPID,
  553                     0, "intr", "%s", name);
  554         if (error)
  555                 panic("kproc_create() failed with %d", error);
  556         thread_lock(td);
  557         sched_class(td, PRI_ITHD);
  558         TD_SET_IWAIT(td);
  559         thread_unlock(td);
  560         td->td_pflags |= TDP_ITHREAD;
  561         ithd->it_thread = td;
  562         CTR2(KTR_INTR, "%s: created %s", __func__, name);
  563         return (ithd);
  564 }
  565 #endif
  566 
  567 static void
  568 ithread_destroy(struct intr_thread *ithread)
  569 {
  570         struct thread *td;
  571 
  572         CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_event->ie_name);
  573         td = ithread->it_thread;
  574         thread_lock(td);
  575         ithread->it_flags |= IT_DEAD;
  576         if (TD_AWAITING_INTR(td)) {
  577                 TD_CLR_IWAIT(td);
  578                 sched_add(td, SRQ_INTR);
  579         }
  580         thread_unlock(td);
  581 }
  582 
  583 #ifndef INTR_FILTER
  584 int
  585 intr_event_add_handler(struct intr_event *ie, const char *name,
  586     driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri,
  587     enum intr_type flags, void **cookiep)
  588 {
  589         struct intr_handler *ih, *temp_ih;
  590         struct intr_thread *it;
  591 
  592         if (ie == NULL || name == NULL || (handler == NULL && filter == NULL))
  593                 return (EINVAL);
  594 
  595         /* Allocate and populate an interrupt handler structure. */
  596         ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO);
  597         ih->ih_filter = filter;
  598         ih->ih_handler = handler;
  599         ih->ih_argument = arg;
  600         strlcpy(ih->ih_name, name, sizeof(ih->ih_name));
  601         ih->ih_event = ie;
  602         ih->ih_pri = pri;
  603         if (flags & INTR_EXCL)
  604                 ih->ih_flags = IH_EXCLUSIVE;
  605         if (flags & INTR_MPSAFE)
  606                 ih->ih_flags |= IH_MPSAFE;
  607         if (flags & INTR_ENTROPY)
  608                 ih->ih_flags |= IH_ENTROPY;
  609 
  610         /* We can only have one exclusive handler in a event. */
  611         mtx_lock(&ie->ie_lock);
  612         if (!TAILQ_EMPTY(&ie->ie_handlers)) {
  613                 if ((flags & INTR_EXCL) ||
  614                     (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) {
  615                         mtx_unlock(&ie->ie_lock);
  616                         free(ih, M_ITHREAD);
  617                         return (EINVAL);
  618                 }
  619         }
  620 
  621         /* Create a thread if we need one. */
  622         while (ie->ie_thread == NULL && handler != NULL) {
  623                 if (ie->ie_flags & IE_ADDING_THREAD)
  624                         msleep(ie, &ie->ie_lock, 0, "ithread", 0);
  625                 else {
  626                         ie->ie_flags |= IE_ADDING_THREAD;
  627                         mtx_unlock(&ie->ie_lock);
  628                         it = ithread_create("intr: newborn");
  629                         mtx_lock(&ie->ie_lock);
  630                         ie->ie_flags &= ~IE_ADDING_THREAD;
  631                         ie->ie_thread = it;
  632                         it->it_event = ie;
  633                         ithread_update(it);
  634                         wakeup(ie);
  635                 }
  636         }
  637 
  638         /* Add the new handler to the event in priority order. */
  639         TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) {
  640                 if (temp_ih->ih_pri > ih->ih_pri)
  641                         break;
  642         }
  643         if (temp_ih == NULL)
  644                 TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next);
  645         else
  646                 TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next);
  647         intr_event_update(ie);
  648 
  649         CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name,
  650             ie->ie_name);
  651         mtx_unlock(&ie->ie_lock);
  652 
  653         if (cookiep != NULL)
  654                 *cookiep = ih;
  655         return (0);
  656 }
  657 #else
  658 int
  659 intr_event_add_handler(struct intr_event *ie, const char *name,
  660     driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri,
  661     enum intr_type flags, void **cookiep)
  662 {
  663         struct intr_handler *ih, *temp_ih;
  664         struct intr_thread *it;
  665 
  666         if (ie == NULL || name == NULL || (handler == NULL && filter == NULL))
  667                 return (EINVAL);
  668 
  669         /* Allocate and populate an interrupt handler structure. */
  670         ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO);
  671         ih->ih_filter = filter;
  672         ih->ih_handler = handler;
  673         ih->ih_argument = arg;
  674         strlcpy(ih->ih_name, name, sizeof(ih->ih_name));
  675         ih->ih_event = ie;
  676         ih->ih_pri = pri;
  677         if (flags & INTR_EXCL)
  678                 ih->ih_flags = IH_EXCLUSIVE;
  679         if (flags & INTR_MPSAFE)
  680                 ih->ih_flags |= IH_MPSAFE;
  681         if (flags & INTR_ENTROPY)
  682                 ih->ih_flags |= IH_ENTROPY;
  683 
  684         /* We can only have one exclusive handler in a event. */
  685         mtx_lock(&ie->ie_lock);
  686         if (!TAILQ_EMPTY(&ie->ie_handlers)) {
  687                 if ((flags & INTR_EXCL) ||
  688                     (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) {
  689                         mtx_unlock(&ie->ie_lock);
  690                         free(ih, M_ITHREAD);
  691                         return (EINVAL);
  692                 }
  693         }
  694 
  695         /* For filtered handlers, create a private ithread to run on. */
  696         if (filter != NULL && handler != NULL) {
  697                 mtx_unlock(&ie->ie_lock);
  698                 it = ithread_create("intr: newborn", ih);
  699                 mtx_lock(&ie->ie_lock);
  700                 it->it_event = ie;
  701                 ih->ih_thread = it;
  702                 ithread_update(it); /* XXX - do we really need this?!?!? */
  703         } else { /* Create the global per-event thread if we need one. */
  704                 while (ie->ie_thread == NULL && handler != NULL) {
  705                         if (ie->ie_flags & IE_ADDING_THREAD)
  706                                 msleep(ie, &ie->ie_lock, 0, "ithread", 0);
  707                         else {
  708                                 ie->ie_flags |= IE_ADDING_THREAD;
  709                                 mtx_unlock(&ie->ie_lock);
  710                                 it = ithread_create("intr: newborn", ih);
  711                                 mtx_lock(&ie->ie_lock);
  712                                 ie->ie_flags &= ~IE_ADDING_THREAD;
  713                                 ie->ie_thread = it;
  714                                 it->it_event = ie;
  715                                 ithread_update(it);
  716                                 wakeup(ie);
  717                         }
  718                 }
  719         }
  720 
  721         /* Add the new handler to the event in priority order. */
  722         TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) {
  723                 if (temp_ih->ih_pri > ih->ih_pri)
  724                         break;
  725         }
  726         if (temp_ih == NULL)
  727                 TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next);
  728         else
  729                 TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next);
  730         intr_event_update(ie);
  731 
  732         CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name,
  733             ie->ie_name);
  734         mtx_unlock(&ie->ie_lock);
  735 
  736         if (cookiep != NULL)
  737                 *cookiep = ih;
  738         return (0);
  739 }
  740 #endif
  741 
  742 /*
  743  * Append a description preceded by a ':' to the name of the specified
  744  * interrupt handler.
  745  */
  746 int
  747 intr_event_describe_handler(struct intr_event *ie, void *cookie,
  748     const char *descr)
  749 {
  750         struct intr_handler *ih;
  751         size_t space;
  752         char *start;
  753 
  754         mtx_lock(&ie->ie_lock);
  755 #ifdef INVARIANTS
  756         TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
  757                 if (ih == cookie)
  758                         break;
  759         }
  760         if (ih == NULL) {
  761                 mtx_unlock(&ie->ie_lock);
  762                 panic("handler %p not found in interrupt event %p", cookie, ie);
  763         }
  764 #endif
  765         ih = cookie;
  766 
  767         /*
  768          * Look for an existing description by checking for an
  769          * existing ":".  This assumes device names do not include
  770          * colons.  If one is found, prepare to insert the new
  771          * description at that point.  If one is not found, find the
  772          * end of the name to use as the insertion point.
  773          */
  774         start = strchr(ih->ih_name, ':');
  775         if (start == NULL)
  776                 start = strchr(ih->ih_name, 0);
  777 
  778         /*
  779          * See if there is enough remaining room in the string for the
  780          * description + ":".  The "- 1" leaves room for the trailing
  781          * '\0'.  The "+ 1" accounts for the colon.
  782          */
  783         space = sizeof(ih->ih_name) - (start - ih->ih_name) - 1;
  784         if (strlen(descr) + 1 > space) {
  785                 mtx_unlock(&ie->ie_lock);
  786                 return (ENOSPC);
  787         }
  788 
  789         /* Append a colon followed by the description. */
  790         *start = ':';
  791         strcpy(start + 1, descr);
  792         intr_event_update(ie);
  793         mtx_unlock(&ie->ie_lock);
  794         return (0);
  795 }
  796 
  797 /*
  798  * Return the ie_source field from the intr_event an intr_handler is
  799  * associated with.
  800  */
  801 void *
  802 intr_handler_source(void *cookie)
  803 {
  804         struct intr_handler *ih;
  805         struct intr_event *ie;
  806 
  807         ih = (struct intr_handler *)cookie;
  808         if (ih == NULL)
  809                 return (NULL);
  810         ie = ih->ih_event;
  811         KASSERT(ie != NULL,
  812             ("interrupt handler \"%s\" has a NULL interrupt event",
  813             ih->ih_name));
  814         return (ie->ie_source);
  815 }
  816 
  817 /*
  818  * Sleep until an ithread finishes executing an interrupt handler.
  819  *
  820  * XXX Doesn't currently handle interrupt filters or fast interrupt
  821  * handlers.  This is intended for compatibility with linux drivers
  822  * only.  Do not use in BSD code.
  823  */
  824 void
  825 _intr_drain(int irq)
  826 {
  827         struct intr_event *ie;
  828         struct intr_thread *ithd;
  829         struct thread *td;
  830 
  831         ie = intr_lookup(irq);
  832         if (ie == NULL)
  833                 return;
  834         if (ie->ie_thread == NULL)
  835                 return;
  836         ithd = ie->ie_thread;
  837         td = ithd->it_thread;
  838         /*
  839          * We set the flag and wait for it to be cleared to avoid
  840          * long delays with potentially busy interrupt handlers
  841          * were we to only sample TD_AWAITING_INTR() every tick.
  842          */
  843         thread_lock(td);
  844         if (!TD_AWAITING_INTR(td)) {
  845                 ithd->it_flags |= IT_WAIT;
  846                 while (ithd->it_flags & IT_WAIT) {
  847                         thread_unlock(td);
  848                         pause("idrain", 1);
  849                         thread_lock(td);
  850                 }
  851         }
  852         thread_unlock(td);
  853         return;
  854 }
  855 
  856 
  857 #ifndef INTR_FILTER
  858 int
  859 intr_event_remove_handler(void *cookie)
  860 {
  861         struct intr_handler *handler = (struct intr_handler *)cookie;
  862         struct intr_event *ie;
  863 #ifdef INVARIANTS
  864         struct intr_handler *ih;
  865 #endif
  866 #ifdef notyet
  867         int dead;
  868 #endif
  869 
  870         if (handler == NULL)
  871                 return (EINVAL);
  872         ie = handler->ih_event;
  873         KASSERT(ie != NULL,
  874             ("interrupt handler \"%s\" has a NULL interrupt event",
  875             handler->ih_name));
  876         mtx_lock(&ie->ie_lock);
  877         CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name,
  878             ie->ie_name);
  879 #ifdef INVARIANTS
  880         TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
  881                 if (ih == handler)
  882                         goto ok;
  883         mtx_unlock(&ie->ie_lock);
  884         panic("interrupt handler \"%s\" not found in interrupt event \"%s\"",
  885             ih->ih_name, ie->ie_name);
  886 ok:
  887 #endif
  888         /*
  889          * If there is no ithread, then just remove the handler and return.
  890          * XXX: Note that an INTR_FAST handler might be running on another
  891          * CPU!
  892          */
  893         if (ie->ie_thread == NULL) {
  894                 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
  895                 mtx_unlock(&ie->ie_lock);
  896                 free(handler, M_ITHREAD);
  897                 return (0);
  898         }
  899 
  900         /*
  901          * If the interrupt thread is already running, then just mark this
  902          * handler as being dead and let the ithread do the actual removal.
  903          *
  904          * During a cold boot while cold is set, msleep() does not sleep,
  905          * so we have to remove the handler here rather than letting the
  906          * thread do it.
  907          */
  908         thread_lock(ie->ie_thread->it_thread);
  909         if (!TD_AWAITING_INTR(ie->ie_thread->it_thread) && !cold) {
  910                 handler->ih_flags |= IH_DEAD;
  911 
  912                 /*
  913                  * Ensure that the thread will process the handler list
  914                  * again and remove this handler if it has already passed
  915                  * it on the list.
  916                  *
  917                  * The release part of the following store ensures
  918                  * that the update of ih_flags is ordered before the
  919                  * it_need setting.  See the comment before
  920                  * atomic_cmpset_acq(&ithd->it_need, ...) operation in
  921                  * the ithread_execute_handlers().
  922                  */
  923                 atomic_store_rel_int(&ie->ie_thread->it_need, 1);
  924         } else
  925                 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
  926         thread_unlock(ie->ie_thread->it_thread);
  927         while (handler->ih_flags & IH_DEAD)
  928                 msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0);
  929         intr_event_update(ie);
  930 #ifdef notyet
  931         /*
  932          * XXX: This could be bad in the case of ppbus(8).  Also, I think
  933          * this could lead to races of stale data when servicing an
  934          * interrupt.
  935          */
  936         dead = 1;
  937         TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
  938                 if (!(ih->ih_flags & IH_FAST)) {
  939                         dead = 0;
  940                         break;
  941                 }
  942         }
  943         if (dead) {
  944                 ithread_destroy(ie->ie_thread);
  945                 ie->ie_thread = NULL;
  946         }
  947 #endif
  948         mtx_unlock(&ie->ie_lock);
  949         free(handler, M_ITHREAD);
  950         return (0);
  951 }
  952 
  953 static int
  954 intr_event_schedule_thread(struct intr_event *ie)
  955 {
  956         struct intr_entropy entropy;
  957         struct intr_thread *it;
  958         struct thread *td;
  959         struct thread *ctd;
  960         struct proc *p;
  961 
  962         /*
  963          * If no ithread or no handlers, then we have a stray interrupt.
  964          */
  965         if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) ||
  966             ie->ie_thread == NULL)
  967                 return (EINVAL);
  968 
  969         ctd = curthread;
  970         it = ie->ie_thread;
  971         td = it->it_thread;
  972         p = td->td_proc;
  973 
  974         /*
  975          * If any of the handlers for this ithread claim to be good
  976          * sources of entropy, then gather some.
  977          */
  978         if (ie->ie_flags & IE_ENTROPY) {
  979                 entropy.event = (uintptr_t)ie;
  980                 entropy.td = ctd;
  981                 random_harvest_queue(&entropy, sizeof(entropy), 2, RANDOM_INTERRUPT);
  982         }
  983 
  984         KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name));
  985 
  986         /*
  987          * Set it_need to tell the thread to keep running if it is already
  988          * running.  Then, lock the thread and see if we actually need to
  989          * put it on the runqueue.
  990          *
  991          * Use store_rel to arrange that the store to ih_need in
  992          * swi_sched() is before the store to it_need and prepare for
  993          * transfer of this order to loads in the ithread.
  994          */
  995         atomic_store_rel_int(&it->it_need, 1);
  996         thread_lock(td);
  997         if (TD_AWAITING_INTR(td)) {
  998                 CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid,
  999                     td->td_name);
 1000                 TD_CLR_IWAIT(td);
 1001                 sched_add(td, SRQ_INTR);
 1002         } else {
 1003                 CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d",
 1004                     __func__, p->p_pid, td->td_name, it->it_need, td->td_state);
 1005         }
 1006         thread_unlock(td);
 1007 
 1008         return (0);
 1009 }
 1010 #else
 1011 int
 1012 intr_event_remove_handler(void *cookie)
 1013 {
 1014         struct intr_handler *handler = (struct intr_handler *)cookie;
 1015         struct intr_event *ie;
 1016         struct intr_thread *it;
 1017 #ifdef INVARIANTS
 1018         struct intr_handler *ih;
 1019 #endif
 1020 #ifdef notyet
 1021         int dead;
 1022 #endif
 1023 
 1024         if (handler == NULL)
 1025                 return (EINVAL);
 1026         ie = handler->ih_event;
 1027         KASSERT(ie != NULL,
 1028             ("interrupt handler \"%s\" has a NULL interrupt event",
 1029             handler->ih_name));
 1030         mtx_lock(&ie->ie_lock);
 1031         CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name,
 1032             ie->ie_name);
 1033 #ifdef INVARIANTS
 1034         TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
 1035                 if (ih == handler)
 1036                         goto ok;
 1037         mtx_unlock(&ie->ie_lock);
 1038         panic("interrupt handler \"%s\" not found in interrupt event \"%s\"",
 1039             ih->ih_name, ie->ie_name);
 1040 ok:
 1041 #endif
 1042         /*
 1043          * If there are no ithreads (per event and per handler), then
 1044          * just remove the handler and return.  
 1045          * XXX: Note that an INTR_FAST handler might be running on another CPU!
 1046          */
 1047         if (ie->ie_thread == NULL && handler->ih_thread == NULL) {
 1048                 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
 1049                 mtx_unlock(&ie->ie_lock);
 1050                 free(handler, M_ITHREAD);
 1051                 return (0);
 1052         }
 1053 
 1054         /* Private or global ithread? */
 1055         it = (handler->ih_thread) ? handler->ih_thread : ie->ie_thread;
 1056         /*
 1057          * If the interrupt thread is already running, then just mark this
 1058          * handler as being dead and let the ithread do the actual removal.
 1059          *
 1060          * During a cold boot while cold is set, msleep() does not sleep,
 1061          * so we have to remove the handler here rather than letting the
 1062          * thread do it.
 1063          */
 1064         thread_lock(it->it_thread);
 1065         if (!TD_AWAITING_INTR(it->it_thread) && !cold) {
 1066                 handler->ih_flags |= IH_DEAD;
 1067 
 1068                 /*
 1069                  * Ensure that the thread will process the handler list
 1070                  * again and remove this handler if it has already passed
 1071                  * it on the list.
 1072                  *
 1073                  * The release part of the following store ensures
 1074                  * that the update of ih_flags is ordered before the
 1075                  * it_need setting.  See the comment before
 1076                  * atomic_cmpset_acq(&ithd->it_need, ...) operation in
 1077                  * the ithread_execute_handlers().
 1078                  */
 1079                 atomic_store_rel_int(&it->it_need, 1);
 1080         } else
 1081                 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
 1082         thread_unlock(it->it_thread);
 1083         while (handler->ih_flags & IH_DEAD)
 1084                 msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0);
 1085         /* 
 1086          * At this point, the handler has been disconnected from the event,
 1087          * so we can kill the private ithread if any.
 1088          */
 1089         if (handler->ih_thread) {
 1090                 ithread_destroy(handler->ih_thread);
 1091                 handler->ih_thread = NULL;
 1092         }
 1093         intr_event_update(ie);
 1094 #ifdef notyet
 1095         /*
 1096          * XXX: This could be bad in the case of ppbus(8).  Also, I think
 1097          * this could lead to races of stale data when servicing an
 1098          * interrupt.
 1099          */
 1100         dead = 1;
 1101         TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
 1102                 if (handler != NULL) {
 1103                         dead = 0;
 1104                         break;
 1105                 }
 1106         }
 1107         if (dead) {
 1108                 ithread_destroy(ie->ie_thread);
 1109                 ie->ie_thread = NULL;
 1110         }
 1111 #endif
 1112         mtx_unlock(&ie->ie_lock);
 1113         free(handler, M_ITHREAD);
 1114         return (0);
 1115 }
 1116 
 1117 static int
 1118 intr_event_schedule_thread(struct intr_event *ie, struct intr_thread *it)
 1119 {
 1120         struct intr_entropy entropy;
 1121         struct thread *td;
 1122         struct thread *ctd;
 1123         struct proc *p;
 1124 
 1125         /*
 1126          * If no ithread or no handlers, then we have a stray interrupt.
 1127          */
 1128         if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || it == NULL)
 1129                 return (EINVAL);
 1130 
 1131         ctd = curthread;
 1132         td = it->it_thread;
 1133         p = td->td_proc;
 1134 
 1135         /*
 1136          * If any of the handlers for this ithread claim to be good
 1137          * sources of entropy, then gather some.
 1138          */
 1139         if (ie->ie_flags & IE_ENTROPY) {
 1140                 entropy.event = (uintptr_t)ie;
 1141                 entropy.td = ctd;
 1142                 random_harvest_queue(&entropy, sizeof(entropy), 2, RANDOM_INTERRUPT);
 1143         }
 1144 
 1145         KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name));
 1146 
 1147         /*
 1148          * Set it_need to tell the thread to keep running if it is already
 1149          * running.  Then, lock the thread and see if we actually need to
 1150          * put it on the runqueue.
 1151          *
 1152          * Use store_rel to arrange that the store to ih_need in
 1153          * swi_sched() is before the store to it_need and prepare for
 1154          * transfer of this order to loads in the ithread.
 1155          */
 1156         atomic_store_rel_int(&it->it_need, 1);
 1157         thread_lock(td);
 1158         if (TD_AWAITING_INTR(td)) {
 1159                 CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid,
 1160                     td->td_name);
 1161                 TD_CLR_IWAIT(td);
 1162                 sched_add(td, SRQ_INTR);
 1163         } else {
 1164                 CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d",
 1165                     __func__, p->p_pid, td->td_name, it->it_need, td->td_state);
 1166         }
 1167         thread_unlock(td);
 1168 
 1169         return (0);
 1170 }
 1171 #endif
 1172 
 1173 /*
 1174  * Allow interrupt event binding for software interrupt handlers -- a no-op,
 1175  * since interrupts are generated in software rather than being directed by
 1176  * a PIC.
 1177  */
 1178 static int
 1179 swi_assign_cpu(void *arg, int cpu)
 1180 {
 1181 
 1182         return (0);
 1183 }
 1184 
 1185 /*
 1186  * Add a software interrupt handler to a specified event.  If a given event
 1187  * is not specified, then a new event is created.
 1188  */
 1189 int
 1190 swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler,
 1191             void *arg, int pri, enum intr_type flags, void **cookiep)
 1192 {
 1193         struct intr_event *ie;
 1194         int error;
 1195 
 1196         if (flags & INTR_ENTROPY)
 1197                 return (EINVAL);
 1198 
 1199         ie = (eventp != NULL) ? *eventp : NULL;
 1200 
 1201         if (ie != NULL) {
 1202                 if (!(ie->ie_flags & IE_SOFT))
 1203                         return (EINVAL);
 1204         } else {
 1205                 error = intr_event_create(&ie, NULL, IE_SOFT, 0,
 1206                     NULL, NULL, NULL, swi_assign_cpu, "swi%d:", pri);
 1207                 if (error)
 1208                         return (error);
 1209                 if (eventp != NULL)
 1210                         *eventp = ie;
 1211         }
 1212         error = intr_event_add_handler(ie, name, NULL, handler, arg,
 1213             PI_SWI(pri), flags, cookiep);
 1214         return (error);
 1215 }
 1216 
 1217 /*
 1218  * Schedule a software interrupt thread.
 1219  */
 1220 void
 1221 swi_sched(void *cookie, int flags)
 1222 {
 1223         struct intr_handler *ih = (struct intr_handler *)cookie;
 1224         struct intr_event *ie = ih->ih_event;
 1225         struct intr_entropy entropy;
 1226         int error;
 1227 
 1228         CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name,
 1229             ih->ih_need);
 1230 
 1231         entropy.event = (uintptr_t)ih;
 1232         entropy.td = curthread;
 1233         random_harvest_queue(&entropy, sizeof(entropy), 1, RANDOM_SWI);
 1234 
 1235         /*
 1236          * Set ih_need for this handler so that if the ithread is already
 1237          * running it will execute this handler on the next pass.  Otherwise,
 1238          * it will execute it the next time it runs.
 1239          */
 1240         ih->ih_need = 1;
 1241 
 1242         if (!(flags & SWI_DELAY)) {
 1243                 PCPU_INC(cnt.v_soft);
 1244 #ifdef INTR_FILTER
 1245                 error = intr_event_schedule_thread(ie, ie->ie_thread);
 1246 #else
 1247                 error = intr_event_schedule_thread(ie);
 1248 #endif
 1249                 KASSERT(error == 0, ("stray software interrupt"));
 1250         }
 1251 }
 1252 
 1253 /*
 1254  * Remove a software interrupt handler.  Currently this code does not
 1255  * remove the associated interrupt event if it becomes empty.  Calling code
 1256  * may do so manually via intr_event_destroy(), but that's not really
 1257  * an optimal interface.
 1258  */
 1259 int
 1260 swi_remove(void *cookie)
 1261 {
 1262 
 1263         return (intr_event_remove_handler(cookie));
 1264 }
 1265 
 1266 #ifdef INTR_FILTER
 1267 static void
 1268 priv_ithread_execute_handler(struct proc *p, struct intr_handler *ih)
 1269 {
 1270         struct intr_event *ie;
 1271 
 1272         ie = ih->ih_event;
 1273         /*
 1274          * If this handler is marked for death, remove it from
 1275          * the list of handlers and wake up the sleeper.
 1276          */
 1277         if (ih->ih_flags & IH_DEAD) {
 1278                 mtx_lock(&ie->ie_lock);
 1279                 TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next);
 1280                 ih->ih_flags &= ~IH_DEAD;
 1281                 wakeup(ih);
 1282                 mtx_unlock(&ie->ie_lock);
 1283                 return;
 1284         }
 1285         
 1286         /* Execute this handler. */
 1287         CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x",
 1288              __func__, p->p_pid, (void *)ih->ih_handler, ih->ih_argument,
 1289              ih->ih_name, ih->ih_flags);
 1290         
 1291         if (!(ih->ih_flags & IH_MPSAFE))
 1292                 mtx_lock(&Giant);
 1293         ih->ih_handler(ih->ih_argument);
 1294         if (!(ih->ih_flags & IH_MPSAFE))
 1295                 mtx_unlock(&Giant);
 1296 }
 1297 #endif
 1298 
 1299 /*
 1300  * This is a public function for use by drivers that mux interrupt
 1301  * handlers for child devices from their interrupt handler.
 1302  */
 1303 void
 1304 intr_event_execute_handlers(struct proc *p, struct intr_event *ie)
 1305 {
 1306         struct intr_handler *ih, *ihn;
 1307 
 1308         TAILQ_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) {
 1309                 /*
 1310                  * If this handler is marked for death, remove it from
 1311                  * the list of handlers and wake up the sleeper.
 1312                  */
 1313                 if (ih->ih_flags & IH_DEAD) {
 1314                         mtx_lock(&ie->ie_lock);
 1315                         TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next);
 1316                         ih->ih_flags &= ~IH_DEAD;
 1317                         wakeup(ih);
 1318                         mtx_unlock(&ie->ie_lock);
 1319                         continue;
 1320                 }
 1321 
 1322                 /* Skip filter only handlers */
 1323                 if (ih->ih_handler == NULL)
 1324                         continue;
 1325 
 1326                 /*
 1327                  * For software interrupt threads, we only execute
 1328                  * handlers that have their need flag set.  Hardware
 1329                  * interrupt threads always invoke all of their handlers.
 1330                  *
 1331                  * ih_need can only be 0 or 1.  Failed cmpset below
 1332                  * means that there is no request to execute handlers,
 1333                  * so a retry of the cmpset is not needed.
 1334                  */
 1335                 if ((ie->ie_flags & IE_SOFT) != 0 &&
 1336                     atomic_cmpset_int(&ih->ih_need, 1, 0) == 0)
 1337                         continue;
 1338 
 1339                 /* Execute this handler. */
 1340                 CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x",
 1341                     __func__, p->p_pid, (void *)ih->ih_handler, 
 1342                     ih->ih_argument, ih->ih_name, ih->ih_flags);
 1343 
 1344                 if (!(ih->ih_flags & IH_MPSAFE))
 1345                         mtx_lock(&Giant);
 1346                 ih->ih_handler(ih->ih_argument);
 1347                 if (!(ih->ih_flags & IH_MPSAFE))
 1348                         mtx_unlock(&Giant);
 1349         }
 1350 }
 1351 
 1352 static void
 1353 ithread_execute_handlers(struct proc *p, struct intr_event *ie)
 1354 {
 1355 
 1356         /* Interrupt handlers should not sleep. */
 1357         if (!(ie->ie_flags & IE_SOFT))
 1358                 THREAD_NO_SLEEPING();
 1359         intr_event_execute_handlers(p, ie);
 1360         if (!(ie->ie_flags & IE_SOFT))
 1361                 THREAD_SLEEPING_OK();
 1362 
 1363         /*
 1364          * Interrupt storm handling:
 1365          *
 1366          * If this interrupt source is currently storming, then throttle
 1367          * it to only fire the handler once  per clock tick.
 1368          *
 1369          * If this interrupt source is not currently storming, but the
 1370          * number of back to back interrupts exceeds the storm threshold,
 1371          * then enter storming mode.
 1372          */
 1373         if (intr_storm_threshold != 0 && ie->ie_count >= intr_storm_threshold &&
 1374             !(ie->ie_flags & IE_SOFT)) {
 1375                 /* Report the message only once every second. */
 1376                 if (ppsratecheck(&ie->ie_warntm, &ie->ie_warncnt, 1)) {
 1377                         printf(
 1378         "interrupt storm detected on \"%s\"; throttling interrupt source\n",
 1379                             ie->ie_name);
 1380                 }
 1381                 pause("istorm", 1);
 1382         } else
 1383                 ie->ie_count++;
 1384 
 1385         /*
 1386          * Now that all the handlers have had a chance to run, reenable
 1387          * the interrupt source.
 1388          */
 1389         if (ie->ie_post_ithread != NULL)
 1390                 ie->ie_post_ithread(ie->ie_source);
 1391 }
 1392 
 1393 #ifndef INTR_FILTER
 1394 /*
 1395  * This is the main code for interrupt threads.
 1396  */
 1397 static void
 1398 ithread_loop(void *arg)
 1399 {
 1400         struct intr_thread *ithd;
 1401         struct intr_event *ie;
 1402         struct thread *td;
 1403         struct proc *p;
 1404         int wake;
 1405 
 1406         td = curthread;
 1407         p = td->td_proc;
 1408         ithd = (struct intr_thread *)arg;
 1409         KASSERT(ithd->it_thread == td,
 1410             ("%s: ithread and proc linkage out of sync", __func__));
 1411         ie = ithd->it_event;
 1412         ie->ie_count = 0;
 1413         wake = 0;
 1414 
 1415         /*
 1416          * As long as we have interrupts outstanding, go through the
 1417          * list of handlers, giving each one a go at it.
 1418          */
 1419         for (;;) {
 1420                 /*
 1421                  * If we are an orphaned thread, then just die.
 1422                  */
 1423                 if (ithd->it_flags & IT_DEAD) {
 1424                         CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__,
 1425                             p->p_pid, td->td_name);
 1426                         free(ithd, M_ITHREAD);
 1427                         kthread_exit();
 1428                 }
 1429 
 1430                 /*
 1431                  * Service interrupts.  If another interrupt arrives while
 1432                  * we are running, it will set it_need to note that we
 1433                  * should make another pass.
 1434                  *
 1435                  * The load_acq part of the following cmpset ensures
 1436                  * that the load of ih_need in ithread_execute_handlers()
 1437                  * is ordered after the load of it_need here.
 1438                  */
 1439                 while (atomic_cmpset_acq_int(&ithd->it_need, 1, 0) != 0)
 1440                         ithread_execute_handlers(p, ie);
 1441                 WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread");
 1442                 mtx_assert(&Giant, MA_NOTOWNED);
 1443 
 1444                 /*
 1445                  * Processed all our interrupts.  Now get the sched
 1446                  * lock.  This may take a while and it_need may get
 1447                  * set again, so we have to check it again.
 1448                  */
 1449                 thread_lock(td);
 1450                 if (atomic_load_acq_int(&ithd->it_need) == 0 &&
 1451                     (ithd->it_flags & (IT_DEAD | IT_WAIT)) == 0) {
 1452                         TD_SET_IWAIT(td);
 1453                         ie->ie_count = 0;
 1454                         mi_switch(SW_VOL | SWT_IWAIT, NULL);
 1455                 }
 1456                 if (ithd->it_flags & IT_WAIT) {
 1457                         wake = 1;
 1458                         ithd->it_flags &= ~IT_WAIT;
 1459                 }
 1460                 thread_unlock(td);
 1461                 if (wake) {
 1462                         wakeup(ithd);
 1463                         wake = 0;
 1464                 }
 1465         }
 1466 }
 1467 
 1468 /*
 1469  * Main interrupt handling body.
 1470  *
 1471  * Input:
 1472  * o ie:                        the event connected to this interrupt.
 1473  * o frame:                     some archs (i.e. i386) pass a frame to some.
 1474  *                              handlers as their main argument.
 1475  * Return value:
 1476  * o 0:                         everything ok.
 1477  * o EINVAL:                    stray interrupt.
 1478  */
 1479 int
 1480 intr_event_handle(struct intr_event *ie, struct trapframe *frame)
 1481 {
 1482         struct intr_handler *ih;
 1483         struct trapframe *oldframe;
 1484         struct thread *td;
 1485         int error, ret, thread;
 1486 
 1487         td = curthread;
 1488 
 1489 #ifdef KSTACK_USAGE_PROF
 1490         intr_prof_stack_use(td, frame);
 1491 #endif
 1492 
 1493         /* An interrupt with no event or handlers is a stray interrupt. */
 1494         if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers))
 1495                 return (EINVAL);
 1496 
 1497         /*
 1498          * Execute fast interrupt handlers directly.
 1499          * To support clock handlers, if a handler registers
 1500          * with a NULL argument, then we pass it a pointer to
 1501          * a trapframe as its argument.
 1502          */
 1503         td->td_intr_nesting_level++;
 1504         thread = 0;
 1505         ret = 0;
 1506         critical_enter();
 1507         oldframe = td->td_intr_frame;
 1508         td->td_intr_frame = frame;
 1509         TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
 1510                 if (ih->ih_filter == NULL) {
 1511                         thread = 1;
 1512                         continue;
 1513                 }
 1514                 CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__,
 1515                     ih->ih_filter, ih->ih_argument == NULL ? frame :
 1516                     ih->ih_argument, ih->ih_name);
 1517                 if (ih->ih_argument == NULL)
 1518                         ret = ih->ih_filter(frame);
 1519                 else
 1520                         ret = ih->ih_filter(ih->ih_argument);
 1521                 KASSERT(ret == FILTER_STRAY ||
 1522                     ((ret & (FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) != 0 &&
 1523                     (ret & ~(FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) == 0),
 1524                     ("%s: incorrect return value %#x from %s", __func__, ret,
 1525                     ih->ih_name));
 1526 
 1527                 /* 
 1528                  * Wrapper handler special handling:
 1529                  *
 1530                  * in some particular cases (like pccard and pccbb), 
 1531                  * the _real_ device handler is wrapped in a couple of
 1532                  * functions - a filter wrapper and an ithread wrapper.
 1533                  * In this case (and just in this case), the filter wrapper 
 1534                  * could ask the system to schedule the ithread and mask
 1535                  * the interrupt source if the wrapped handler is composed
 1536                  * of just an ithread handler.
 1537                  *
 1538                  * TODO: write a generic wrapper to avoid people rolling 
 1539                  * their own
 1540                  */
 1541                 if (!thread) {
 1542                         if (ret == FILTER_SCHEDULE_THREAD)
 1543                                 thread = 1;
 1544                 }
 1545         }
 1546         td->td_intr_frame = oldframe;
 1547 
 1548         if (thread) {
 1549                 if (ie->ie_pre_ithread != NULL)
 1550                         ie->ie_pre_ithread(ie->ie_source);
 1551         } else {
 1552                 if (ie->ie_post_filter != NULL)
 1553                         ie->ie_post_filter(ie->ie_source);
 1554         }
 1555         
 1556         /* Schedule the ithread if needed. */
 1557         if (thread) {
 1558                 error = intr_event_schedule_thread(ie);
 1559                 KASSERT(error == 0, ("bad stray interrupt"));
 1560         }
 1561         critical_exit();
 1562         td->td_intr_nesting_level--;
 1563         return (0);
 1564 }
 1565 #else
 1566 /*
 1567  * This is the main code for interrupt threads.
 1568  */
 1569 static void
 1570 ithread_loop(void *arg)
 1571 {
 1572         struct intr_thread *ithd;
 1573         struct intr_handler *ih;
 1574         struct intr_event *ie;
 1575         struct thread *td;
 1576         struct proc *p;
 1577         int priv;
 1578         int wake;
 1579 
 1580         td = curthread;
 1581         p = td->td_proc;
 1582         ih = (struct intr_handler *)arg;
 1583         priv = (ih->ih_thread != NULL) ? 1 : 0;
 1584         ithd = (priv) ? ih->ih_thread : ih->ih_event->ie_thread;
 1585         KASSERT(ithd->it_thread == td,
 1586             ("%s: ithread and proc linkage out of sync", __func__));
 1587         ie = ithd->it_event;
 1588         ie->ie_count = 0;
 1589         wake = 0;
 1590 
 1591         /*
 1592          * As long as we have interrupts outstanding, go through the
 1593          * list of handlers, giving each one a go at it.
 1594          */
 1595         for (;;) {
 1596                 /*
 1597                  * If we are an orphaned thread, then just die.
 1598                  */
 1599                 if (ithd->it_flags & IT_DEAD) {
 1600                         CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__,
 1601                             p->p_pid, td->td_name);
 1602                         free(ithd, M_ITHREAD);
 1603                         kthread_exit();
 1604                 }
 1605 
 1606                 /*
 1607                  * Service interrupts.  If another interrupt arrives while
 1608                  * we are running, it will set it_need to note that we
 1609                  * should make another pass.
 1610                  *
 1611                  * The load_acq part of the following cmpset ensures
 1612                  * that the load of ih_need in ithread_execute_handlers()
 1613                  * is ordered after the load of it_need here.
 1614                  */
 1615                 while (atomic_cmpset_acq_int(&ithd->it_need, 1, 0) != 0) {
 1616                         if (priv)
 1617                                 priv_ithread_execute_handler(p, ih);
 1618                         else 
 1619                                 ithread_execute_handlers(p, ie);
 1620                 }
 1621                 WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread");
 1622                 mtx_assert(&Giant, MA_NOTOWNED);
 1623 
 1624                 /*
 1625                  * Processed all our interrupts.  Now get the sched
 1626                  * lock.  This may take a while and it_need may get
 1627                  * set again, so we have to check it again.
 1628                  */
 1629                 thread_lock(td);
 1630                 if (atomic_load_acq_int(&ithd->it_need) == 0 &&
 1631                     (ithd->it_flags & (IT_DEAD | IT_WAIT)) == 0) {
 1632                         TD_SET_IWAIT(td);
 1633                         ie->ie_count = 0;
 1634                         mi_switch(SW_VOL | SWT_IWAIT, NULL);
 1635                 }
 1636                 if (ithd->it_flags & IT_WAIT) {
 1637                         wake = 1;
 1638                         ithd->it_flags &= ~IT_WAIT;
 1639                 }
 1640                 thread_unlock(td);
 1641                 if (wake) {
 1642                         wakeup(ithd);
 1643                         wake = 0;
 1644                 }
 1645         }
 1646 }
 1647 
 1648 /* 
 1649  * Main loop for interrupt filter.
 1650  *
 1651  * Some architectures (i386, amd64 and arm) require the optional frame 
 1652  * parameter, and use it as the main argument for fast handler execution
 1653  * when ih_argument == NULL.
 1654  *
 1655  * Return value:
 1656  * o FILTER_STRAY:              No filter recognized the event, and no
 1657  *                              filter-less handler is registered on this 
 1658  *                              line.
 1659  * o FILTER_HANDLED:            A filter claimed the event and served it.
 1660  * o FILTER_SCHEDULE_THREAD:    No filter claimed the event, but there's at
 1661  *                              least one filter-less handler on this line.
 1662  * o FILTER_HANDLED | 
 1663  *   FILTER_SCHEDULE_THREAD:    A filter claimed the event, and asked for
 1664  *                              scheduling the per-handler ithread.
 1665  *
 1666  * In case an ithread has to be scheduled, in *ithd there will be a 
 1667  * pointer to a struct intr_thread containing the thread to be
 1668  * scheduled.
 1669  */
 1670 
 1671 static int
 1672 intr_filter_loop(struct intr_event *ie, struct trapframe *frame, 
 1673                  struct intr_thread **ithd) 
 1674 {
 1675         struct intr_handler *ih;
 1676         void *arg;
 1677         int ret, thread_only;
 1678 
 1679         ret = 0;
 1680         thread_only = 0;
 1681         TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
 1682                 /*
 1683                  * Execute fast interrupt handlers directly.
 1684                  * To support clock handlers, if a handler registers
 1685                  * with a NULL argument, then we pass it a pointer to
 1686                  * a trapframe as its argument.
 1687                  */
 1688                 arg = ((ih->ih_argument == NULL) ? frame : ih->ih_argument);
 1689                 
 1690                 CTR5(KTR_INTR, "%s: exec %p/%p(%p) for %s", __func__,
 1691                      ih->ih_filter, ih->ih_handler, arg, ih->ih_name);
 1692 
 1693                 if (ih->ih_filter != NULL)
 1694                         ret = ih->ih_filter(arg);
 1695                 else {
 1696                         thread_only = 1;
 1697                         continue;
 1698                 }
 1699                 KASSERT(ret == FILTER_STRAY ||
 1700                     ((ret & (FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) != 0 &&
 1701                     (ret & ~(FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) == 0),
 1702                     ("%s: incorrect return value %#x from %s", __func__, ret,
 1703                     ih->ih_name));
 1704                 if (ret & FILTER_STRAY)
 1705                         continue;
 1706                 else { 
 1707                         *ithd = ih->ih_thread;
 1708                         return (ret);
 1709                 }
 1710         }
 1711 
 1712         /*
 1713          * No filters handled the interrupt and we have at least
 1714          * one handler without a filter.  In this case, we schedule
 1715          * all of the filter-less handlers to run in the ithread.
 1716          */     
 1717         if (thread_only) {
 1718                 *ithd = ie->ie_thread;
 1719                 return (FILTER_SCHEDULE_THREAD);
 1720         }
 1721         return (FILTER_STRAY);
 1722 }
 1723 
 1724 /*
 1725  * Main interrupt handling body.
 1726  *
 1727  * Input:
 1728  * o ie:                        the event connected to this interrupt.
 1729  * o frame:                     some archs (i.e. i386) pass a frame to some.
 1730  *                              handlers as their main argument.
 1731  * Return value:
 1732  * o 0:                         everything ok.
 1733  * o EINVAL:                    stray interrupt.
 1734  */
 1735 int
 1736 intr_event_handle(struct intr_event *ie, struct trapframe *frame)
 1737 {
 1738         struct intr_thread *ithd;
 1739         struct trapframe *oldframe;
 1740         struct thread *td;
 1741         int thread;
 1742 
 1743         ithd = NULL;
 1744         td = curthread;
 1745 
 1746         if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers))
 1747                 return (EINVAL);
 1748 
 1749         td->td_intr_nesting_level++;
 1750         thread = 0;
 1751         critical_enter();
 1752         oldframe = td->td_intr_frame;
 1753         td->td_intr_frame = frame;
 1754         thread = intr_filter_loop(ie, frame, &ithd);    
 1755         if (thread & FILTER_HANDLED) {
 1756                 if (ie->ie_post_filter != NULL)
 1757                         ie->ie_post_filter(ie->ie_source);
 1758         } else {
 1759                 if (ie->ie_pre_ithread != NULL)
 1760                         ie->ie_pre_ithread(ie->ie_source);
 1761         }
 1762         td->td_intr_frame = oldframe;
 1763         critical_exit();
 1764         
 1765         /* Interrupt storm logic */
 1766         if (thread & FILTER_STRAY) {
 1767                 ie->ie_count++;
 1768                 if (ie->ie_count < intr_storm_threshold)
 1769                         printf("Interrupt stray detection not present\n");
 1770         }
 1771 
 1772         /* Schedule an ithread if needed. */
 1773         if (thread & FILTER_SCHEDULE_THREAD) {
 1774                 if (intr_event_schedule_thread(ie, ithd) != 0)
 1775                         panic("%s: impossible stray interrupt", __func__);
 1776         }
 1777         td->td_intr_nesting_level--;
 1778         return (0);
 1779 }
 1780 #endif
 1781 
 1782 #ifdef DDB
 1783 /*
 1784  * Dump details about an interrupt handler
 1785  */
 1786 static void
 1787 db_dump_intrhand(struct intr_handler *ih)
 1788 {
 1789         int comma;
 1790 
 1791         db_printf("\t%-10s ", ih->ih_name);
 1792         switch (ih->ih_pri) {
 1793         case PI_REALTIME:
 1794                 db_printf("CLK ");
 1795                 break;
 1796         case PI_AV:
 1797                 db_printf("AV  ");
 1798                 break;
 1799         case PI_TTY:
 1800                 db_printf("TTY ");
 1801                 break;
 1802         case PI_NET:
 1803                 db_printf("NET ");
 1804                 break;
 1805         case PI_DISK:
 1806                 db_printf("DISK");
 1807                 break;
 1808         case PI_DULL:
 1809                 db_printf("DULL");
 1810                 break;
 1811         default:
 1812                 if (ih->ih_pri >= PI_SOFT)
 1813                         db_printf("SWI ");
 1814                 else
 1815                         db_printf("%4u", ih->ih_pri);
 1816                 break;
 1817         }
 1818         db_printf(" ");
 1819         if (ih->ih_filter != NULL) {
 1820                 db_printf("[F]");
 1821                 db_printsym((uintptr_t)ih->ih_filter, DB_STGY_PROC);
 1822         }
 1823         if (ih->ih_handler != NULL) {
 1824                 if (ih->ih_filter != NULL)
 1825                         db_printf(",");
 1826                 db_printf("[H]");
 1827                 db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC);
 1828         }
 1829         db_printf("(%p)", ih->ih_argument);
 1830         if (ih->ih_need ||
 1831             (ih->ih_flags & (IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD |
 1832             IH_MPSAFE)) != 0) {
 1833                 db_printf(" {");
 1834                 comma = 0;
 1835                 if (ih->ih_flags & IH_EXCLUSIVE) {
 1836                         if (comma)
 1837                                 db_printf(", ");
 1838                         db_printf("EXCL");
 1839                         comma = 1;
 1840                 }
 1841                 if (ih->ih_flags & IH_ENTROPY) {
 1842                         if (comma)
 1843                                 db_printf(", ");
 1844                         db_printf("ENTROPY");
 1845                         comma = 1;
 1846                 }
 1847                 if (ih->ih_flags & IH_DEAD) {
 1848                         if (comma)
 1849                                 db_printf(", ");
 1850                         db_printf("DEAD");
 1851                         comma = 1;
 1852                 }
 1853                 if (ih->ih_flags & IH_MPSAFE) {
 1854                         if (comma)
 1855                                 db_printf(", ");
 1856                         db_printf("MPSAFE");
 1857                         comma = 1;
 1858                 }
 1859                 if (ih->ih_need) {
 1860                         if (comma)
 1861                                 db_printf(", ");
 1862                         db_printf("NEED");
 1863                 }
 1864                 db_printf("}");
 1865         }
 1866         db_printf("\n");
 1867 }
 1868 
 1869 /*
 1870  * Dump details about a event.
 1871  */
 1872 void
 1873 db_dump_intr_event(struct intr_event *ie, int handlers)
 1874 {
 1875         struct intr_handler *ih;
 1876         struct intr_thread *it;
 1877         int comma;
 1878 
 1879         db_printf("%s ", ie->ie_fullname);
 1880         it = ie->ie_thread;
 1881         if (it != NULL)
 1882                 db_printf("(pid %d)", it->it_thread->td_proc->p_pid);
 1883         else
 1884                 db_printf("(no thread)");
 1885         if ((ie->ie_flags & (IE_SOFT | IE_ENTROPY | IE_ADDING_THREAD)) != 0 ||
 1886             (it != NULL && it->it_need)) {
 1887                 db_printf(" {");
 1888                 comma = 0;
 1889                 if (ie->ie_flags & IE_SOFT) {
 1890                         db_printf("SOFT");
 1891                         comma = 1;
 1892                 }
 1893                 if (ie->ie_flags & IE_ENTROPY) {
 1894                         if (comma)
 1895                                 db_printf(", ");
 1896                         db_printf("ENTROPY");
 1897                         comma = 1;
 1898                 }
 1899                 if (ie->ie_flags & IE_ADDING_THREAD) {
 1900                         if (comma)
 1901                                 db_printf(", ");
 1902                         db_printf("ADDING_THREAD");
 1903                         comma = 1;
 1904                 }
 1905                 if (it != NULL && it->it_need) {
 1906                         if (comma)
 1907                                 db_printf(", ");
 1908                         db_printf("NEED");
 1909                 }
 1910                 db_printf("}");
 1911         }
 1912         db_printf("\n");
 1913 
 1914         if (handlers)
 1915                 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
 1916                     db_dump_intrhand(ih);
 1917 }
 1918 
 1919 /*
 1920  * Dump data about interrupt handlers
 1921  */
 1922 DB_SHOW_COMMAND(intr, db_show_intr)
 1923 {
 1924         struct intr_event *ie;
 1925         int all, verbose;
 1926 
 1927         verbose = strchr(modif, 'v') != NULL;
 1928         all = strchr(modif, 'a') != NULL;
 1929         TAILQ_FOREACH(ie, &event_list, ie_list) {
 1930                 if (!all && TAILQ_EMPTY(&ie->ie_handlers))
 1931                         continue;
 1932                 db_dump_intr_event(ie, verbose);
 1933                 if (db_pager_quit)
 1934                         break;
 1935         }
 1936 }
 1937 #endif /* DDB */
 1938 
 1939 /*
 1940  * Start standard software interrupt threads
 1941  */
 1942 static void
 1943 start_softintr(void *dummy)
 1944 {
 1945 
 1946         if (swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, INTR_MPSAFE, &vm_ih))
 1947                 panic("died while creating vm swi ithread");
 1948 }
 1949 SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr,
 1950     NULL);
 1951 
 1952 /*
 1953  * Sysctls used by systat and others: hw.intrnames and hw.intrcnt.
 1954  * The data for this machine dependent, and the declarations are in machine
 1955  * dependent code.  The layout of intrnames and intrcnt however is machine
 1956  * independent.
 1957  *
 1958  * We do not know the length of intrcnt and intrnames at compile time, so
 1959  * calculate things at run time.
 1960  */
 1961 static int
 1962 sysctl_intrnames(SYSCTL_HANDLER_ARGS)
 1963 {
 1964         return (sysctl_handle_opaque(oidp, intrnames, sintrnames, req));
 1965 }
 1966 
 1967 SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD,
 1968     NULL, 0, sysctl_intrnames, "", "Interrupt Names");
 1969 
 1970 static int
 1971 sysctl_intrcnt(SYSCTL_HANDLER_ARGS)
 1972 {
 1973 #ifdef SCTL_MASK32
 1974         uint32_t *intrcnt32;
 1975         unsigned i;
 1976         int error;
 1977 
 1978         if (req->flags & SCTL_MASK32) {
 1979                 if (!req->oldptr)
 1980                         return (sysctl_handle_opaque(oidp, NULL, sintrcnt / 2, req));
 1981                 intrcnt32 = malloc(sintrcnt / 2, M_TEMP, M_NOWAIT);
 1982                 if (intrcnt32 == NULL)
 1983                         return (ENOMEM);
 1984                 for (i = 0; i < sintrcnt / sizeof (u_long); i++)
 1985                         intrcnt32[i] = intrcnt[i];
 1986                 error = sysctl_handle_opaque(oidp, intrcnt32, sintrcnt / 2, req);
 1987                 free(intrcnt32, M_TEMP);
 1988                 return (error);
 1989         }
 1990 #endif
 1991         return (sysctl_handle_opaque(oidp, intrcnt, sintrcnt, req));
 1992 }
 1993 
 1994 SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD,
 1995     NULL, 0, sysctl_intrcnt, "", "Interrupt Counts");
 1996 
 1997 #ifdef DDB
 1998 /*
 1999  * DDB command to dump the interrupt statistics.
 2000  */
 2001 DB_SHOW_COMMAND(intrcnt, db_show_intrcnt)
 2002 {
 2003         u_long *i;
 2004         char *cp;
 2005         u_int j;
 2006 
 2007         cp = intrnames;
 2008         j = 0;
 2009         for (i = intrcnt; j < (sintrcnt / sizeof(u_long)) && !db_pager_quit;
 2010             i++, j++) {
 2011                 if (*cp == '\0')
 2012                         break;
 2013                 if (*i != 0)
 2014                         db_printf("%s\t%lu\n", cp, *i);
 2015                 cp += strlen(cp) + 1;
 2016         }
 2017 }
 2018 #endif

Cache object: 94a5fca6995ef164d5e08aba30ba26d1


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.