The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_intr.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    3  *
    4  * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice unmodified, this list of conditions, and the following
   12  *    disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   27  */
   28 
   29 #include <sys/cdefs.h>
   30 __FBSDID("$FreeBSD: releng/12.0/sys/kern/kern_intr.c 338324 2018-08-26 12:51:46Z markm $");
   31 
   32 #include "opt_ddb.h"
   33 #include "opt_kstack_usage_prof.h"
   34 
   35 #include <sys/param.h>
   36 #include <sys/bus.h>
   37 #include <sys/conf.h>
   38 #include <sys/cpuset.h>
   39 #include <sys/rtprio.h>
   40 #include <sys/systm.h>
   41 #include <sys/interrupt.h>
   42 #include <sys/kernel.h>
   43 #include <sys/kthread.h>
   44 #include <sys/ktr.h>
   45 #include <sys/limits.h>
   46 #include <sys/lock.h>
   47 #include <sys/malloc.h>
   48 #include <sys/mutex.h>
   49 #include <sys/priv.h>
   50 #include <sys/proc.h>
   51 #include <sys/random.h>
   52 #include <sys/resourcevar.h>
   53 #include <sys/sched.h>
   54 #include <sys/smp.h>
   55 #include <sys/sysctl.h>
   56 #include <sys/syslog.h>
   57 #include <sys/unistd.h>
   58 #include <sys/vmmeter.h>
   59 #include <machine/atomic.h>
   60 #include <machine/cpu.h>
   61 #include <machine/md_var.h>
   62 #include <machine/stdarg.h>
   63 #ifdef DDB
   64 #include <ddb/ddb.h>
   65 #include <ddb/db_sym.h>
   66 #endif
   67 
   68 /*
   69  * Describe an interrupt thread.  There is one of these per interrupt event.
   70  */
   71 struct intr_thread {
   72         struct intr_event *it_event;
   73         struct thread *it_thread;       /* Kernel thread. */
   74         int     it_flags;               /* (j) IT_* flags. */
   75         int     it_need;                /* Needs service. */
   76 };
   77 
   78 /* Interrupt thread flags kept in it_flags */
   79 #define IT_DEAD         0x000001        /* Thread is waiting to exit. */
   80 #define IT_WAIT         0x000002        /* Thread is waiting for completion. */
   81 
   82 struct  intr_entropy {
   83         struct  thread *td;
   84         uintptr_t event;
   85 };
   86 
   87 struct  intr_event *clk_intr_event;
   88 struct  intr_event *tty_intr_event;
   89 void    *vm_ih;
   90 struct proc *intrproc;
   91 
   92 static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads");
   93 
   94 static int intr_storm_threshold = 1000;
   95 SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RWTUN,
   96     &intr_storm_threshold, 0,
   97     "Number of consecutive interrupts before storm protection is enabled");
   98 static TAILQ_HEAD(, intr_event) event_list =
   99     TAILQ_HEAD_INITIALIZER(event_list);
  100 static struct mtx event_lock;
  101 MTX_SYSINIT(intr_event_list, &event_lock, "intr event list", MTX_DEF);
  102 
  103 static void     intr_event_update(struct intr_event *ie);
  104 static int      intr_event_schedule_thread(struct intr_event *ie);
  105 static struct intr_thread *ithread_create(const char *name);
  106 static void     ithread_destroy(struct intr_thread *ithread);
  107 static void     ithread_execute_handlers(struct proc *p, 
  108                     struct intr_event *ie);
  109 static void     ithread_loop(void *);
  110 static void     ithread_update(struct intr_thread *ithd);
  111 static void     start_softintr(void *);
  112 
  113 /* Map an interrupt type to an ithread priority. */
  114 u_char
  115 intr_priority(enum intr_type flags)
  116 {
  117         u_char pri;
  118 
  119         flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET |
  120             INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV);
  121         switch (flags) {
  122         case INTR_TYPE_TTY:
  123                 pri = PI_TTY;
  124                 break;
  125         case INTR_TYPE_BIO:
  126                 pri = PI_DISK;
  127                 break;
  128         case INTR_TYPE_NET:
  129                 pri = PI_NET;
  130                 break;
  131         case INTR_TYPE_CAM:
  132                 pri = PI_DISK;
  133                 break;
  134         case INTR_TYPE_AV:
  135                 pri = PI_AV;
  136                 break;
  137         case INTR_TYPE_CLK:
  138                 pri = PI_REALTIME;
  139                 break;
  140         case INTR_TYPE_MISC:
  141                 pri = PI_DULL;          /* don't care */
  142                 break;
  143         default:
  144                 /* We didn't specify an interrupt level. */
  145                 panic("intr_priority: no interrupt type in flags");
  146         }
  147 
  148         return pri;
  149 }
  150 
  151 /*
  152  * Update an ithread based on the associated intr_event.
  153  */
  154 static void
  155 ithread_update(struct intr_thread *ithd)
  156 {
  157         struct intr_event *ie;
  158         struct thread *td;
  159         u_char pri;
  160 
  161         ie = ithd->it_event;
  162         td = ithd->it_thread;
  163         mtx_assert(&ie->ie_lock, MA_OWNED);
  164 
  165         /* Determine the overall priority of this event. */
  166         if (CK_SLIST_EMPTY(&ie->ie_handlers))
  167                 pri = PRI_MAX_ITHD;
  168         else
  169                 pri = CK_SLIST_FIRST(&ie->ie_handlers)->ih_pri;
  170 
  171         /* Update name and priority. */
  172         strlcpy(td->td_name, ie->ie_fullname, sizeof(td->td_name));
  173 #ifdef KTR
  174         sched_clear_tdname(td);
  175 #endif
  176         thread_lock(td);
  177         sched_prio(td, pri);
  178         thread_unlock(td);
  179 }
  180 
  181 /*
  182  * Regenerate the full name of an interrupt event and update its priority.
  183  */
  184 static void
  185 intr_event_update(struct intr_event *ie)
  186 {
  187         struct intr_handler *ih;
  188         char *last;
  189         int missed, space;
  190 
  191         /* Start off with no entropy and just the name of the event. */
  192         mtx_assert(&ie->ie_lock, MA_OWNED);
  193         strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname));
  194         ie->ie_flags &= ~IE_ENTROPY;
  195         missed = 0;
  196         space = 1;
  197 
  198         /* Run through all the handlers updating values. */
  199         CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) {
  200                 if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 <
  201                     sizeof(ie->ie_fullname)) {
  202                         strcat(ie->ie_fullname, " ");
  203                         strcat(ie->ie_fullname, ih->ih_name);
  204                         space = 0;
  205                 } else
  206                         missed++;
  207                 if (ih->ih_flags & IH_ENTROPY)
  208                         ie->ie_flags |= IE_ENTROPY;
  209         }
  210 
  211         /*
  212          * If the handler names were too long, add +'s to indicate missing
  213          * names. If we run out of room and still have +'s to add, change
  214          * the last character from a + to a *.
  215          */
  216         last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2];
  217         while (missed-- > 0) {
  218                 if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) {
  219                         if (*last == '+') {
  220                                 *last = '*';
  221                                 break;
  222                         } else
  223                                 *last = '+';
  224                 } else if (space) {
  225                         strcat(ie->ie_fullname, " +");
  226                         space = 0;
  227                 } else
  228                         strcat(ie->ie_fullname, "+");
  229         }
  230 
  231         /*
  232          * If this event has an ithread, update it's priority and
  233          * name.
  234          */
  235         if (ie->ie_thread != NULL)
  236                 ithread_update(ie->ie_thread);
  237         CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname);
  238 }
  239 
  240 int
  241 intr_event_create(struct intr_event **event, void *source, int flags, int irq,
  242     void (*pre_ithread)(void *), void (*post_ithread)(void *),
  243     void (*post_filter)(void *), int (*assign_cpu)(void *, int),
  244     const char *fmt, ...)
  245 {
  246         struct intr_event *ie;
  247         va_list ap;
  248 
  249         /* The only valid flag during creation is IE_SOFT. */
  250         if ((flags & ~IE_SOFT) != 0)
  251                 return (EINVAL);
  252         ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO);
  253         ie->ie_source = source;
  254         ie->ie_pre_ithread = pre_ithread;
  255         ie->ie_post_ithread = post_ithread;
  256         ie->ie_post_filter = post_filter;
  257         ie->ie_assign_cpu = assign_cpu;
  258         ie->ie_flags = flags;
  259         ie->ie_irq = irq;
  260         ie->ie_cpu = NOCPU;
  261         CK_SLIST_INIT(&ie->ie_handlers);
  262         mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF);
  263 
  264         va_start(ap, fmt);
  265         vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap);
  266         va_end(ap);
  267         strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname));
  268         mtx_lock(&event_lock);
  269         TAILQ_INSERT_TAIL(&event_list, ie, ie_list);
  270         mtx_unlock(&event_lock);
  271         if (event != NULL)
  272                 *event = ie;
  273         CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name);
  274         return (0);
  275 }
  276 
  277 /*
  278  * Bind an interrupt event to the specified CPU.  Note that not all
  279  * platforms support binding an interrupt to a CPU.  For those
  280  * platforms this request will fail.  Using a cpu id of NOCPU unbinds
  281  * the interrupt event.
  282  */
  283 static int
  284 _intr_event_bind(struct intr_event *ie, int cpu, bool bindirq, bool bindithread)
  285 {
  286         lwpid_t id;
  287         int error;
  288 
  289         /* Need a CPU to bind to. */
  290         if (cpu != NOCPU && CPU_ABSENT(cpu))
  291                 return (EINVAL);
  292 
  293         if (ie->ie_assign_cpu == NULL)
  294                 return (EOPNOTSUPP);
  295 
  296         error = priv_check(curthread, PRIV_SCHED_CPUSET_INTR);
  297         if (error)
  298                 return (error);
  299 
  300         /*
  301          * If we have any ithreads try to set their mask first to verify
  302          * permissions, etc.
  303          */
  304         if (bindithread) {
  305                 mtx_lock(&ie->ie_lock);
  306                 if (ie->ie_thread != NULL) {
  307                         id = ie->ie_thread->it_thread->td_tid;
  308                         mtx_unlock(&ie->ie_lock);
  309                         error = cpuset_setithread(id, cpu);
  310                         if (error)
  311                                 return (error);
  312                 } else
  313                         mtx_unlock(&ie->ie_lock);
  314         }
  315         if (bindirq)
  316                 error = ie->ie_assign_cpu(ie->ie_source, cpu);
  317         if (error) {
  318                 if (bindithread) {
  319                         mtx_lock(&ie->ie_lock);
  320                         if (ie->ie_thread != NULL) {
  321                                 cpu = ie->ie_cpu;
  322                                 id = ie->ie_thread->it_thread->td_tid;
  323                                 mtx_unlock(&ie->ie_lock);
  324                                 (void)cpuset_setithread(id, cpu);
  325                         } else
  326                                 mtx_unlock(&ie->ie_lock);
  327                 }
  328                 return (error);
  329         }
  330 
  331         if (bindirq) {
  332                 mtx_lock(&ie->ie_lock);
  333                 ie->ie_cpu = cpu;
  334                 mtx_unlock(&ie->ie_lock);
  335         }
  336 
  337         return (error);
  338 }
  339 
  340 /*
  341  * Bind an interrupt event to the specified CPU.  For supported platforms, any
  342  * associated ithreads as well as the primary interrupt context will be bound
  343  * to the specificed CPU.
  344  */
  345 int
  346 intr_event_bind(struct intr_event *ie, int cpu)
  347 {
  348 
  349         return (_intr_event_bind(ie, cpu, true, true));
  350 }
  351 
  352 /*
  353  * Bind an interrupt event to the specified CPU, but do not bind associated
  354  * ithreads.
  355  */
  356 int
  357 intr_event_bind_irqonly(struct intr_event *ie, int cpu)
  358 {
  359 
  360         return (_intr_event_bind(ie, cpu, true, false));
  361 }
  362 
  363 /*
  364  * Bind an interrupt event's ithread to the specified CPU.
  365  */
  366 int
  367 intr_event_bind_ithread(struct intr_event *ie, int cpu)
  368 {
  369 
  370         return (_intr_event_bind(ie, cpu, false, true));
  371 }
  372 
  373 static struct intr_event *
  374 intr_lookup(int irq)
  375 {
  376         struct intr_event *ie;
  377 
  378         mtx_lock(&event_lock);
  379         TAILQ_FOREACH(ie, &event_list, ie_list)
  380                 if (ie->ie_irq == irq &&
  381                     (ie->ie_flags & IE_SOFT) == 0 &&
  382                     CK_SLIST_FIRST(&ie->ie_handlers) != NULL)
  383                         break;
  384         mtx_unlock(&event_lock);
  385         return (ie);
  386 }
  387 
  388 int
  389 intr_setaffinity(int irq, int mode, void *m)
  390 {
  391         struct intr_event *ie;
  392         cpuset_t *mask;
  393         int cpu, n;
  394 
  395         mask = m;
  396         cpu = NOCPU;
  397         /*
  398          * If we're setting all cpus we can unbind.  Otherwise make sure
  399          * only one cpu is in the set.
  400          */
  401         if (CPU_CMP(cpuset_root, mask)) {
  402                 for (n = 0; n < CPU_SETSIZE; n++) {
  403                         if (!CPU_ISSET(n, mask))
  404                                 continue;
  405                         if (cpu != NOCPU)
  406                                 return (EINVAL);
  407                         cpu = n;
  408                 }
  409         }
  410         ie = intr_lookup(irq);
  411         if (ie == NULL)
  412                 return (ESRCH);
  413         switch (mode) {
  414         case CPU_WHICH_IRQ:
  415                 return (intr_event_bind(ie, cpu));
  416         case CPU_WHICH_INTRHANDLER:
  417                 return (intr_event_bind_irqonly(ie, cpu));
  418         case CPU_WHICH_ITHREAD:
  419                 return (intr_event_bind_ithread(ie, cpu));
  420         default:
  421                 return (EINVAL);
  422         }
  423 }
  424 
  425 int
  426 intr_getaffinity(int irq, int mode, void *m)
  427 {
  428         struct intr_event *ie;
  429         struct thread *td;
  430         struct proc *p;
  431         cpuset_t *mask;
  432         lwpid_t id;
  433         int error;
  434 
  435         mask = m;
  436         ie = intr_lookup(irq);
  437         if (ie == NULL)
  438                 return (ESRCH);
  439 
  440         error = 0;
  441         CPU_ZERO(mask);
  442         switch (mode) {
  443         case CPU_WHICH_IRQ:
  444         case CPU_WHICH_INTRHANDLER:
  445                 mtx_lock(&ie->ie_lock);
  446                 if (ie->ie_cpu == NOCPU)
  447                         CPU_COPY(cpuset_root, mask);
  448                 else
  449                         CPU_SET(ie->ie_cpu, mask);
  450                 mtx_unlock(&ie->ie_lock);
  451                 break;
  452         case CPU_WHICH_ITHREAD:
  453                 mtx_lock(&ie->ie_lock);
  454                 if (ie->ie_thread == NULL) {
  455                         mtx_unlock(&ie->ie_lock);
  456                         CPU_COPY(cpuset_root, mask);
  457                 } else {
  458                         id = ie->ie_thread->it_thread->td_tid;
  459                         mtx_unlock(&ie->ie_lock);
  460                         error = cpuset_which(CPU_WHICH_TID, id, &p, &td, NULL);
  461                         if (error != 0)
  462                                 return (error);
  463                         CPU_COPY(&td->td_cpuset->cs_mask, mask);
  464                         PROC_UNLOCK(p);
  465                 }
  466         default:
  467                 return (EINVAL);
  468         }
  469         return (0);
  470 }
  471 
  472 int
  473 intr_event_destroy(struct intr_event *ie)
  474 {
  475 
  476         mtx_lock(&event_lock);
  477         mtx_lock(&ie->ie_lock);
  478         if (!CK_SLIST_EMPTY(&ie->ie_handlers)) {
  479                 mtx_unlock(&ie->ie_lock);
  480                 mtx_unlock(&event_lock);
  481                 return (EBUSY);
  482         }
  483         TAILQ_REMOVE(&event_list, ie, ie_list);
  484 #ifndef notyet
  485         if (ie->ie_thread != NULL) {
  486                 ithread_destroy(ie->ie_thread);
  487                 ie->ie_thread = NULL;
  488         }
  489 #endif
  490         mtx_unlock(&ie->ie_lock);
  491         mtx_unlock(&event_lock);
  492         mtx_destroy(&ie->ie_lock);
  493         free(ie, M_ITHREAD);
  494         return (0);
  495 }
  496 
  497 static struct intr_thread *
  498 ithread_create(const char *name)
  499 {
  500         struct intr_thread *ithd;
  501         struct thread *td;
  502         int error;
  503 
  504         ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO);
  505 
  506         error = kproc_kthread_add(ithread_loop, ithd, &intrproc,
  507                     &td, RFSTOPPED | RFHIGHPID,
  508                     0, "intr", "%s", name);
  509         if (error)
  510                 panic("kproc_create() failed with %d", error);
  511         thread_lock(td);
  512         sched_class(td, PRI_ITHD);
  513         TD_SET_IWAIT(td);
  514         thread_unlock(td);
  515         td->td_pflags |= TDP_ITHREAD;
  516         ithd->it_thread = td;
  517         CTR2(KTR_INTR, "%s: created %s", __func__, name);
  518         return (ithd);
  519 }
  520 
  521 static void
  522 ithread_destroy(struct intr_thread *ithread)
  523 {
  524         struct thread *td;
  525 
  526         CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_event->ie_name);
  527         td = ithread->it_thread;
  528         thread_lock(td);
  529         ithread->it_flags |= IT_DEAD;
  530         if (TD_AWAITING_INTR(td)) {
  531                 TD_CLR_IWAIT(td);
  532                 sched_add(td, SRQ_INTR);
  533         }
  534         thread_unlock(td);
  535 }
  536 
  537 int
  538 intr_event_add_handler(struct intr_event *ie, const char *name,
  539     driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri,
  540     enum intr_type flags, void **cookiep)
  541 {
  542         struct intr_handler *ih, *temp_ih;
  543         struct intr_handler **prevptr;
  544         struct intr_thread *it;
  545 
  546         if (ie == NULL || name == NULL || (handler == NULL && filter == NULL))
  547                 return (EINVAL);
  548 
  549         /* Allocate and populate an interrupt handler structure. */
  550         ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO);
  551         ih->ih_filter = filter;
  552         ih->ih_handler = handler;
  553         ih->ih_argument = arg;
  554         strlcpy(ih->ih_name, name, sizeof(ih->ih_name));
  555         ih->ih_event = ie;
  556         ih->ih_pri = pri;
  557         if (flags & INTR_EXCL)
  558                 ih->ih_flags = IH_EXCLUSIVE;
  559         if (flags & INTR_MPSAFE)
  560                 ih->ih_flags |= IH_MPSAFE;
  561         if (flags & INTR_ENTROPY)
  562                 ih->ih_flags |= IH_ENTROPY;
  563 
  564         /* We can only have one exclusive handler in a event. */
  565         mtx_lock(&ie->ie_lock);
  566         if (!CK_SLIST_EMPTY(&ie->ie_handlers)) {
  567                 if ((flags & INTR_EXCL) ||
  568                     (CK_SLIST_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) {
  569                         mtx_unlock(&ie->ie_lock);
  570                         free(ih, M_ITHREAD);
  571                         return (EINVAL);
  572                 }
  573         }
  574 
  575         /* Create a thread if we need one. */
  576         while (ie->ie_thread == NULL && handler != NULL) {
  577                 if (ie->ie_flags & IE_ADDING_THREAD)
  578                         msleep(ie, &ie->ie_lock, 0, "ithread", 0);
  579                 else {
  580                         ie->ie_flags |= IE_ADDING_THREAD;
  581                         mtx_unlock(&ie->ie_lock);
  582                         it = ithread_create("intr: newborn");
  583                         mtx_lock(&ie->ie_lock);
  584                         ie->ie_flags &= ~IE_ADDING_THREAD;
  585                         ie->ie_thread = it;
  586                         it->it_event = ie;
  587                         ithread_update(it);
  588                         wakeup(ie);
  589                 }
  590         }
  591 
  592         /* Add the new handler to the event in priority order. */
  593         CK_SLIST_FOREACH_PREVPTR(temp_ih, prevptr, &ie->ie_handlers, ih_next) {
  594                 if (temp_ih->ih_pri > ih->ih_pri)
  595                         break;
  596         }
  597         CK_SLIST_INSERT_PREVPTR(prevptr, temp_ih, ih, ih_next);
  598 
  599         intr_event_update(ie);
  600 
  601         CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name,
  602             ie->ie_name);
  603         mtx_unlock(&ie->ie_lock);
  604 
  605         if (cookiep != NULL)
  606                 *cookiep = ih;
  607         return (0);
  608 }
  609 
  610 /*
  611  * Append a description preceded by a ':' to the name of the specified
  612  * interrupt handler.
  613  */
  614 int
  615 intr_event_describe_handler(struct intr_event *ie, void *cookie,
  616     const char *descr)
  617 {
  618         struct intr_handler *ih;
  619         size_t space;
  620         char *start;
  621 
  622         mtx_lock(&ie->ie_lock);
  623 #ifdef INVARIANTS
  624         CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) {
  625                 if (ih == cookie)
  626                         break;
  627         }
  628         if (ih == NULL) {
  629                 mtx_unlock(&ie->ie_lock);
  630                 panic("handler %p not found in interrupt event %p", cookie, ie);
  631         }
  632 #endif
  633         ih = cookie;
  634 
  635         /*
  636          * Look for an existing description by checking for an
  637          * existing ":".  This assumes device names do not include
  638          * colons.  If one is found, prepare to insert the new
  639          * description at that point.  If one is not found, find the
  640          * end of the name to use as the insertion point.
  641          */
  642         start = strchr(ih->ih_name, ':');
  643         if (start == NULL)
  644                 start = strchr(ih->ih_name, 0);
  645 
  646         /*
  647          * See if there is enough remaining room in the string for the
  648          * description + ":".  The "- 1" leaves room for the trailing
  649          * '\0'.  The "+ 1" accounts for the colon.
  650          */
  651         space = sizeof(ih->ih_name) - (start - ih->ih_name) - 1;
  652         if (strlen(descr) + 1 > space) {
  653                 mtx_unlock(&ie->ie_lock);
  654                 return (ENOSPC);
  655         }
  656 
  657         /* Append a colon followed by the description. */
  658         *start = ':';
  659         strcpy(start + 1, descr);
  660         intr_event_update(ie);
  661         mtx_unlock(&ie->ie_lock);
  662         return (0);
  663 }
  664 
  665 /*
  666  * Return the ie_source field from the intr_event an intr_handler is
  667  * associated with.
  668  */
  669 void *
  670 intr_handler_source(void *cookie)
  671 {
  672         struct intr_handler *ih;
  673         struct intr_event *ie;
  674 
  675         ih = (struct intr_handler *)cookie;
  676         if (ih == NULL)
  677                 return (NULL);
  678         ie = ih->ih_event;
  679         KASSERT(ie != NULL,
  680             ("interrupt handler \"%s\" has a NULL interrupt event",
  681             ih->ih_name));
  682         return (ie->ie_source);
  683 }
  684 
  685 /*
  686  * If intr_event_handle() is running in the ISR context at the time of the call,
  687  * then wait for it to complete.
  688  */
  689 static void
  690 intr_event_barrier(struct intr_event *ie)
  691 {
  692         int phase;
  693 
  694         mtx_assert(&ie->ie_lock, MA_OWNED);
  695         phase = ie->ie_phase;
  696 
  697         /*
  698          * Switch phase to direct future interrupts to the other active counter.
  699          * Make sure that any preceding stores are visible before the switch.
  700          */
  701         KASSERT(ie->ie_active[!phase] == 0, ("idle phase has activity"));
  702         atomic_store_rel_int(&ie->ie_phase, !phase);
  703 
  704         /*
  705          * This code cooperates with wait-free iteration of ie_handlers
  706          * in intr_event_handle.
  707          * Make sure that the removal and the phase update are not reordered
  708          * with the active count check.
  709          * Note that no combination of acquire and release fences can provide
  710          * that guarantee as Store->Load sequences can always be reordered.
  711          */
  712         atomic_thread_fence_seq_cst();
  713 
  714         /*
  715          * Now wait on the inactive phase.
  716          * The acquire fence is needed so that that all post-barrier accesses
  717          * are after the check.
  718          */
  719         while (ie->ie_active[phase] > 0)
  720                 cpu_spinwait();
  721         atomic_thread_fence_acq();
  722 }
  723 
  724 /*
  725  * Sleep until an ithread finishes executing an interrupt handler.
  726  *
  727  * XXX Doesn't currently handle interrupt filters or fast interrupt
  728  * handlers.  This is intended for compatibility with linux drivers
  729  * only.  Do not use in BSD code.
  730  */
  731 void
  732 _intr_drain(int irq)
  733 {
  734         struct intr_event *ie;
  735         struct intr_thread *ithd;
  736         struct thread *td;
  737 
  738         ie = intr_lookup(irq);
  739         if (ie == NULL)
  740                 return;
  741         if (ie->ie_thread == NULL)
  742                 return;
  743         ithd = ie->ie_thread;
  744         td = ithd->it_thread;
  745         /*
  746          * We set the flag and wait for it to be cleared to avoid
  747          * long delays with potentially busy interrupt handlers
  748          * were we to only sample TD_AWAITING_INTR() every tick.
  749          */
  750         thread_lock(td);
  751         if (!TD_AWAITING_INTR(td)) {
  752                 ithd->it_flags |= IT_WAIT;
  753                 while (ithd->it_flags & IT_WAIT) {
  754                         thread_unlock(td);
  755                         pause("idrain", 1);
  756                         thread_lock(td);
  757                 }
  758         }
  759         thread_unlock(td);
  760         return;
  761 }
  762 
  763 int
  764 intr_event_remove_handler(void *cookie)
  765 {
  766         struct intr_handler *handler = (struct intr_handler *)cookie;
  767         struct intr_event *ie;
  768         struct intr_handler *ih;
  769         struct intr_handler **prevptr;
  770 #ifdef notyet
  771         int dead;
  772 #endif
  773 
  774         if (handler == NULL)
  775                 return (EINVAL);
  776         ie = handler->ih_event;
  777         KASSERT(ie != NULL,
  778             ("interrupt handler \"%s\" has a NULL interrupt event",
  779             handler->ih_name));
  780 
  781         mtx_lock(&ie->ie_lock);
  782         CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name,
  783             ie->ie_name);
  784         CK_SLIST_FOREACH_PREVPTR(ih, prevptr, &ie->ie_handlers, ih_next) {
  785                 if (ih == handler)
  786                         break;
  787         }
  788         if (ih == NULL) {
  789                 panic("interrupt handler \"%s\" not found in "
  790                     "interrupt event \"%s\"", handler->ih_name, ie->ie_name);
  791         }
  792 
  793         /*
  794          * If there is no ithread, then directly remove the handler.  Note that
  795          * intr_event_handle() iterates ie_handlers in a lock-less fashion, so
  796          * care needs to be taken to keep ie_handlers consistent and to free
  797          * the removed handler only when ie_handlers is quiescent.
  798          */
  799         if (ie->ie_thread == NULL) {
  800                 CK_SLIST_REMOVE_PREVPTR(prevptr, ih, ih_next);
  801                 intr_event_barrier(ie);
  802                 intr_event_update(ie);
  803                 mtx_unlock(&ie->ie_lock);
  804                 free(handler, M_ITHREAD);
  805                 return (0);
  806         }
  807 
  808         /*
  809          * Let the interrupt thread do the job.
  810          * The interrupt source is disabled when the interrupt thread is
  811          * running, so it does not have to worry about interaction with
  812          * intr_event_handle().
  813          */
  814         KASSERT((handler->ih_flags & IH_DEAD) == 0,
  815             ("duplicate handle remove"));
  816         handler->ih_flags |= IH_DEAD;
  817         intr_event_schedule_thread(ie);
  818         while (handler->ih_flags & IH_DEAD)
  819                 msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0);
  820         intr_event_update(ie);
  821 
  822 #ifdef notyet
  823         /*
  824          * XXX: This could be bad in the case of ppbus(8).  Also, I think
  825          * this could lead to races of stale data when servicing an
  826          * interrupt.
  827          */
  828         dead = 1;
  829         CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) {
  830                 if (ih->ih_handler != NULL) {
  831                         dead = 0;
  832                         break;
  833                 }
  834         }
  835         if (dead) {
  836                 ithread_destroy(ie->ie_thread);
  837                 ie->ie_thread = NULL;
  838         }
  839 #endif
  840         mtx_unlock(&ie->ie_lock);
  841         free(handler, M_ITHREAD);
  842         return (0);
  843 }
  844 
  845 static int
  846 intr_event_schedule_thread(struct intr_event *ie)
  847 {
  848         struct intr_entropy entropy;
  849         struct intr_thread *it;
  850         struct thread *td;
  851         struct thread *ctd;
  852 
  853         /*
  854          * If no ithread or no handlers, then we have a stray interrupt.
  855          */
  856         if (ie == NULL || CK_SLIST_EMPTY(&ie->ie_handlers) ||
  857             ie->ie_thread == NULL)
  858                 return (EINVAL);
  859 
  860         ctd = curthread;
  861         it = ie->ie_thread;
  862         td = it->it_thread;
  863 
  864         /*
  865          * If any of the handlers for this ithread claim to be good
  866          * sources of entropy, then gather some.
  867          */
  868         if (ie->ie_flags & IE_ENTROPY) {
  869                 entropy.event = (uintptr_t)ie;
  870                 entropy.td = ctd;
  871                 random_harvest_queue(&entropy, sizeof(entropy), RANDOM_INTERRUPT);
  872         }
  873 
  874         KASSERT(td->td_proc != NULL, ("ithread %s has no process", ie->ie_name));
  875 
  876         /*
  877          * Set it_need to tell the thread to keep running if it is already
  878          * running.  Then, lock the thread and see if we actually need to
  879          * put it on the runqueue.
  880          *
  881          * Use store_rel to arrange that the store to ih_need in
  882          * swi_sched() is before the store to it_need and prepare for
  883          * transfer of this order to loads in the ithread.
  884          */
  885         atomic_store_rel_int(&it->it_need, 1);
  886         thread_lock(td);
  887         if (TD_AWAITING_INTR(td)) {
  888                 CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, td->td_proc->p_pid,
  889                     td->td_name);
  890                 TD_CLR_IWAIT(td);
  891                 sched_add(td, SRQ_INTR);
  892         } else {
  893                 CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d",
  894                     __func__, td->td_proc->p_pid, td->td_name, it->it_need, td->td_state);
  895         }
  896         thread_unlock(td);
  897 
  898         return (0);
  899 }
  900 
  901 /*
  902  * Allow interrupt event binding for software interrupt handlers -- a no-op,
  903  * since interrupts are generated in software rather than being directed by
  904  * a PIC.
  905  */
  906 static int
  907 swi_assign_cpu(void *arg, int cpu)
  908 {
  909 
  910         return (0);
  911 }
  912 
  913 /*
  914  * Add a software interrupt handler to a specified event.  If a given event
  915  * is not specified, then a new event is created.
  916  */
  917 int
  918 swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler,
  919             void *arg, int pri, enum intr_type flags, void **cookiep)
  920 {
  921         struct intr_event *ie;
  922         int error;
  923 
  924         if (flags & INTR_ENTROPY)
  925                 return (EINVAL);
  926 
  927         ie = (eventp != NULL) ? *eventp : NULL;
  928 
  929         if (ie != NULL) {
  930                 if (!(ie->ie_flags & IE_SOFT))
  931                         return (EINVAL);
  932         } else {
  933                 error = intr_event_create(&ie, NULL, IE_SOFT, 0,
  934                     NULL, NULL, NULL, swi_assign_cpu, "swi%d:", pri);
  935                 if (error)
  936                         return (error);
  937                 if (eventp != NULL)
  938                         *eventp = ie;
  939         }
  940         error = intr_event_add_handler(ie, name, NULL, handler, arg,
  941             PI_SWI(pri), flags, cookiep);
  942         return (error);
  943 }
  944 
  945 /*
  946  * Schedule a software interrupt thread.
  947  */
  948 void
  949 swi_sched(void *cookie, int flags)
  950 {
  951         struct intr_handler *ih = (struct intr_handler *)cookie;
  952         struct intr_event *ie = ih->ih_event;
  953         struct intr_entropy entropy;
  954         int error __unused;
  955 
  956         CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name,
  957             ih->ih_need);
  958 
  959         entropy.event = (uintptr_t)ih;
  960         entropy.td = curthread;
  961         random_harvest_queue(&entropy, sizeof(entropy), RANDOM_SWI);
  962 
  963         /*
  964          * Set ih_need for this handler so that if the ithread is already
  965          * running it will execute this handler on the next pass.  Otherwise,
  966          * it will execute it the next time it runs.
  967          */
  968         ih->ih_need = 1;
  969 
  970         if (!(flags & SWI_DELAY)) {
  971                 VM_CNT_INC(v_soft);
  972                 error = intr_event_schedule_thread(ie);
  973                 KASSERT(error == 0, ("stray software interrupt"));
  974         }
  975 }
  976 
  977 /*
  978  * Remove a software interrupt handler.  Currently this code does not
  979  * remove the associated interrupt event if it becomes empty.  Calling code
  980  * may do so manually via intr_event_destroy(), but that's not really
  981  * an optimal interface.
  982  */
  983 int
  984 swi_remove(void *cookie)
  985 {
  986 
  987         return (intr_event_remove_handler(cookie));
  988 }
  989 
  990 static void
  991 intr_event_execute_handlers(struct proc *p, struct intr_event *ie)
  992 {
  993         struct intr_handler *ih, *ihn, *ihp;
  994 
  995         ihp = NULL;
  996         CK_SLIST_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) {
  997                 /*
  998                  * If this handler is marked for death, remove it from
  999                  * the list of handlers and wake up the sleeper.
 1000                  */
 1001                 if (ih->ih_flags & IH_DEAD) {
 1002                         mtx_lock(&ie->ie_lock);
 1003                         if (ihp == NULL)
 1004                                 CK_SLIST_REMOVE_HEAD(&ie->ie_handlers, ih_next);
 1005                         else
 1006                                 CK_SLIST_REMOVE_AFTER(ihp, ih_next);
 1007                         ih->ih_flags &= ~IH_DEAD;
 1008                         wakeup(ih);
 1009                         mtx_unlock(&ie->ie_lock);
 1010                         continue;
 1011                 }
 1012 
 1013                 /*
 1014                  * Now that we know that the current element won't be removed
 1015                  * update the previous element.
 1016                  */
 1017                 ihp = ih;
 1018 
 1019                 /* Skip filter only handlers */
 1020                 if (ih->ih_handler == NULL)
 1021                         continue;
 1022 
 1023                 /*
 1024                  * For software interrupt threads, we only execute
 1025                  * handlers that have their need flag set.  Hardware
 1026                  * interrupt threads always invoke all of their handlers.
 1027                  *
 1028                  * ih_need can only be 0 or 1.  Failed cmpset below
 1029                  * means that there is no request to execute handlers,
 1030                  * so a retry of the cmpset is not needed.
 1031                  */
 1032                 if ((ie->ie_flags & IE_SOFT) != 0 &&
 1033                     atomic_cmpset_int(&ih->ih_need, 1, 0) == 0)
 1034                         continue;
 1035 
 1036                 /* Execute this handler. */
 1037                 CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x",
 1038                     __func__, p->p_pid, (void *)ih->ih_handler, 
 1039                     ih->ih_argument, ih->ih_name, ih->ih_flags);
 1040 
 1041                 if (!(ih->ih_flags & IH_MPSAFE))
 1042                         mtx_lock(&Giant);
 1043                 ih->ih_handler(ih->ih_argument);
 1044                 if (!(ih->ih_flags & IH_MPSAFE))
 1045                         mtx_unlock(&Giant);
 1046         }
 1047 }
 1048 
 1049 static void
 1050 ithread_execute_handlers(struct proc *p, struct intr_event *ie)
 1051 {
 1052 
 1053         /* Interrupt handlers should not sleep. */
 1054         if (!(ie->ie_flags & IE_SOFT))
 1055                 THREAD_NO_SLEEPING();
 1056         intr_event_execute_handlers(p, ie);
 1057         if (!(ie->ie_flags & IE_SOFT))
 1058                 THREAD_SLEEPING_OK();
 1059 
 1060         /*
 1061          * Interrupt storm handling:
 1062          *
 1063          * If this interrupt source is currently storming, then throttle
 1064          * it to only fire the handler once  per clock tick.
 1065          *
 1066          * If this interrupt source is not currently storming, but the
 1067          * number of back to back interrupts exceeds the storm threshold,
 1068          * then enter storming mode.
 1069          */
 1070         if (intr_storm_threshold != 0 && ie->ie_count >= intr_storm_threshold &&
 1071             !(ie->ie_flags & IE_SOFT)) {
 1072                 /* Report the message only once every second. */
 1073                 if (ppsratecheck(&ie->ie_warntm, &ie->ie_warncnt, 1)) {
 1074                         printf(
 1075         "interrupt storm detected on \"%s\"; throttling interrupt source\n",
 1076                             ie->ie_name);
 1077                 }
 1078                 pause("istorm", 1);
 1079         } else
 1080                 ie->ie_count++;
 1081 
 1082         /*
 1083          * Now that all the handlers have had a chance to run, reenable
 1084          * the interrupt source.
 1085          */
 1086         if (ie->ie_post_ithread != NULL)
 1087                 ie->ie_post_ithread(ie->ie_source);
 1088 }
 1089 
 1090 /*
 1091  * This is the main code for interrupt threads.
 1092  */
 1093 static void
 1094 ithread_loop(void *arg)
 1095 {
 1096         struct intr_thread *ithd;
 1097         struct intr_event *ie;
 1098         struct thread *td;
 1099         struct proc *p;
 1100         int wake;
 1101 
 1102         td = curthread;
 1103         p = td->td_proc;
 1104         ithd = (struct intr_thread *)arg;
 1105         KASSERT(ithd->it_thread == td,
 1106             ("%s: ithread and proc linkage out of sync", __func__));
 1107         ie = ithd->it_event;
 1108         ie->ie_count = 0;
 1109         wake = 0;
 1110 
 1111         /*
 1112          * As long as we have interrupts outstanding, go through the
 1113          * list of handlers, giving each one a go at it.
 1114          */
 1115         for (;;) {
 1116                 /*
 1117                  * If we are an orphaned thread, then just die.
 1118                  */
 1119                 if (ithd->it_flags & IT_DEAD) {
 1120                         CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__,
 1121                             p->p_pid, td->td_name);
 1122                         free(ithd, M_ITHREAD);
 1123                         kthread_exit();
 1124                 }
 1125 
 1126                 /*
 1127                  * Service interrupts.  If another interrupt arrives while
 1128                  * we are running, it will set it_need to note that we
 1129                  * should make another pass.
 1130                  *
 1131                  * The load_acq part of the following cmpset ensures
 1132                  * that the load of ih_need in ithread_execute_handlers()
 1133                  * is ordered after the load of it_need here.
 1134                  */
 1135                 while (atomic_cmpset_acq_int(&ithd->it_need, 1, 0) != 0)
 1136                         ithread_execute_handlers(p, ie);
 1137                 WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread");
 1138                 mtx_assert(&Giant, MA_NOTOWNED);
 1139 
 1140                 /*
 1141                  * Processed all our interrupts.  Now get the sched
 1142                  * lock.  This may take a while and it_need may get
 1143                  * set again, so we have to check it again.
 1144                  */
 1145                 thread_lock(td);
 1146                 if (atomic_load_acq_int(&ithd->it_need) == 0 &&
 1147                     (ithd->it_flags & (IT_DEAD | IT_WAIT)) == 0) {
 1148                         TD_SET_IWAIT(td);
 1149                         ie->ie_count = 0;
 1150                         mi_switch(SW_VOL | SWT_IWAIT, NULL);
 1151                 }
 1152                 if (ithd->it_flags & IT_WAIT) {
 1153                         wake = 1;
 1154                         ithd->it_flags &= ~IT_WAIT;
 1155                 }
 1156                 thread_unlock(td);
 1157                 if (wake) {
 1158                         wakeup(ithd);
 1159                         wake = 0;
 1160                 }
 1161         }
 1162 }
 1163 
 1164 /*
 1165  * Main interrupt handling body.
 1166  *
 1167  * Input:
 1168  * o ie:                        the event connected to this interrupt.
 1169  * o frame:                     some archs (i.e. i386) pass a frame to some.
 1170  *                              handlers as their main argument.
 1171  * Return value:
 1172  * o 0:                         everything ok.
 1173  * o EINVAL:                    stray interrupt.
 1174  */
 1175 int
 1176 intr_event_handle(struct intr_event *ie, struct trapframe *frame)
 1177 {
 1178         struct intr_handler *ih;
 1179         struct trapframe *oldframe;
 1180         struct thread *td;
 1181         int ret, thread;
 1182         int phase;
 1183 
 1184         td = curthread;
 1185 
 1186 #ifdef KSTACK_USAGE_PROF
 1187         intr_prof_stack_use(td, frame);
 1188 #endif
 1189 
 1190         /* An interrupt with no event or handlers is a stray interrupt. */
 1191         if (ie == NULL || CK_SLIST_EMPTY(&ie->ie_handlers))
 1192                 return (EINVAL);
 1193 
 1194         /*
 1195          * Execute fast interrupt handlers directly.
 1196          * To support clock handlers, if a handler registers
 1197          * with a NULL argument, then we pass it a pointer to
 1198          * a trapframe as its argument.
 1199          */
 1200         td->td_intr_nesting_level++;
 1201         thread = 0;
 1202         ret = 0;
 1203         critical_enter();
 1204         oldframe = td->td_intr_frame;
 1205         td->td_intr_frame = frame;
 1206 
 1207         phase = ie->ie_phase;
 1208         atomic_add_int(&ie->ie_active[phase], 1);
 1209 
 1210         /*
 1211          * This fence is required to ensure that no later loads are
 1212          * re-ordered before the ie_active store.
 1213          */
 1214         atomic_thread_fence_seq_cst();
 1215 
 1216         CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) {
 1217                 if (ih->ih_filter == NULL) {
 1218                         thread = 1;
 1219                         continue;
 1220                 }
 1221                 CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__,
 1222                     ih->ih_filter, ih->ih_argument == NULL ? frame :
 1223                     ih->ih_argument, ih->ih_name);
 1224                 if (ih->ih_argument == NULL)
 1225                         ret = ih->ih_filter(frame);
 1226                 else
 1227                         ret = ih->ih_filter(ih->ih_argument);
 1228                 KASSERT(ret == FILTER_STRAY ||
 1229                     ((ret & (FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) != 0 &&
 1230                     (ret & ~(FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) == 0),
 1231                     ("%s: incorrect return value %#x from %s", __func__, ret,
 1232                     ih->ih_name));
 1233 
 1234                 /* 
 1235                  * Wrapper handler special handling:
 1236                  *
 1237                  * in some particular cases (like pccard and pccbb), 
 1238                  * the _real_ device handler is wrapped in a couple of
 1239                  * functions - a filter wrapper and an ithread wrapper.
 1240                  * In this case (and just in this case), the filter wrapper 
 1241                  * could ask the system to schedule the ithread and mask
 1242                  * the interrupt source if the wrapped handler is composed
 1243                  * of just an ithread handler.
 1244                  *
 1245                  * TODO: write a generic wrapper to avoid people rolling 
 1246                  * their own
 1247                  */
 1248                 if (!thread) {
 1249                         if (ret == FILTER_SCHEDULE_THREAD)
 1250                                 thread = 1;
 1251                 }
 1252         }
 1253         atomic_add_rel_int(&ie->ie_active[phase], -1);
 1254 
 1255         td->td_intr_frame = oldframe;
 1256 
 1257         if (thread) {
 1258                 if (ie->ie_pre_ithread != NULL)
 1259                         ie->ie_pre_ithread(ie->ie_source);
 1260         } else {
 1261                 if (ie->ie_post_filter != NULL)
 1262                         ie->ie_post_filter(ie->ie_source);
 1263         }
 1264 
 1265         /* Schedule the ithread if needed. */
 1266         if (thread) {
 1267                 int error __unused;
 1268 
 1269                 error =  intr_event_schedule_thread(ie);
 1270                 KASSERT(error == 0, ("bad stray interrupt"));
 1271         }
 1272         critical_exit();
 1273         td->td_intr_nesting_level--;
 1274         return (0);
 1275 }
 1276 
 1277 #ifdef DDB
 1278 /*
 1279  * Dump details about an interrupt handler
 1280  */
 1281 static void
 1282 db_dump_intrhand(struct intr_handler *ih)
 1283 {
 1284         int comma;
 1285 
 1286         db_printf("\t%-10s ", ih->ih_name);
 1287         switch (ih->ih_pri) {
 1288         case PI_REALTIME:
 1289                 db_printf("CLK ");
 1290                 break;
 1291         case PI_AV:
 1292                 db_printf("AV  ");
 1293                 break;
 1294         case PI_TTY:
 1295                 db_printf("TTY ");
 1296                 break;
 1297         case PI_NET:
 1298                 db_printf("NET ");
 1299                 break;
 1300         case PI_DISK:
 1301                 db_printf("DISK");
 1302                 break;
 1303         case PI_DULL:
 1304                 db_printf("DULL");
 1305                 break;
 1306         default:
 1307                 if (ih->ih_pri >= PI_SOFT)
 1308                         db_printf("SWI ");
 1309                 else
 1310                         db_printf("%4u", ih->ih_pri);
 1311                 break;
 1312         }
 1313         db_printf(" ");
 1314         if (ih->ih_filter != NULL) {
 1315                 db_printf("[F]");
 1316                 db_printsym((uintptr_t)ih->ih_filter, DB_STGY_PROC);
 1317         }
 1318         if (ih->ih_handler != NULL) {
 1319                 if (ih->ih_filter != NULL)
 1320                         db_printf(",");
 1321                 db_printf("[H]");
 1322                 db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC);
 1323         }
 1324         db_printf("(%p)", ih->ih_argument);
 1325         if (ih->ih_need ||
 1326             (ih->ih_flags & (IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD |
 1327             IH_MPSAFE)) != 0) {
 1328                 db_printf(" {");
 1329                 comma = 0;
 1330                 if (ih->ih_flags & IH_EXCLUSIVE) {
 1331                         if (comma)
 1332                                 db_printf(", ");
 1333                         db_printf("EXCL");
 1334                         comma = 1;
 1335                 }
 1336                 if (ih->ih_flags & IH_ENTROPY) {
 1337                         if (comma)
 1338                                 db_printf(", ");
 1339                         db_printf("ENTROPY");
 1340                         comma = 1;
 1341                 }
 1342                 if (ih->ih_flags & IH_DEAD) {
 1343                         if (comma)
 1344                                 db_printf(", ");
 1345                         db_printf("DEAD");
 1346                         comma = 1;
 1347                 }
 1348                 if (ih->ih_flags & IH_MPSAFE) {
 1349                         if (comma)
 1350                                 db_printf(", ");
 1351                         db_printf("MPSAFE");
 1352                         comma = 1;
 1353                 }
 1354                 if (ih->ih_need) {
 1355                         if (comma)
 1356                                 db_printf(", ");
 1357                         db_printf("NEED");
 1358                 }
 1359                 db_printf("}");
 1360         }
 1361         db_printf("\n");
 1362 }
 1363 
 1364 /*
 1365  * Dump details about a event.
 1366  */
 1367 void
 1368 db_dump_intr_event(struct intr_event *ie, int handlers)
 1369 {
 1370         struct intr_handler *ih;
 1371         struct intr_thread *it;
 1372         int comma;
 1373 
 1374         db_printf("%s ", ie->ie_fullname);
 1375         it = ie->ie_thread;
 1376         if (it != NULL)
 1377                 db_printf("(pid %d)", it->it_thread->td_proc->p_pid);
 1378         else
 1379                 db_printf("(no thread)");
 1380         if ((ie->ie_flags & (IE_SOFT | IE_ENTROPY | IE_ADDING_THREAD)) != 0 ||
 1381             (it != NULL && it->it_need)) {
 1382                 db_printf(" {");
 1383                 comma = 0;
 1384                 if (ie->ie_flags & IE_SOFT) {
 1385                         db_printf("SOFT");
 1386                         comma = 1;
 1387                 }
 1388                 if (ie->ie_flags & IE_ENTROPY) {
 1389                         if (comma)
 1390                                 db_printf(", ");
 1391                         db_printf("ENTROPY");
 1392                         comma = 1;
 1393                 }
 1394                 if (ie->ie_flags & IE_ADDING_THREAD) {
 1395                         if (comma)
 1396                                 db_printf(", ");
 1397                         db_printf("ADDING_THREAD");
 1398                         comma = 1;
 1399                 }
 1400                 if (it != NULL && it->it_need) {
 1401                         if (comma)
 1402                                 db_printf(", ");
 1403                         db_printf("NEED");
 1404                 }
 1405                 db_printf("}");
 1406         }
 1407         db_printf("\n");
 1408 
 1409         if (handlers)
 1410                 CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next)
 1411                     db_dump_intrhand(ih);
 1412 }
 1413 
 1414 /*
 1415  * Dump data about interrupt handlers
 1416  */
 1417 DB_SHOW_COMMAND(intr, db_show_intr)
 1418 {
 1419         struct intr_event *ie;
 1420         int all, verbose;
 1421 
 1422         verbose = strchr(modif, 'v') != NULL;
 1423         all = strchr(modif, 'a') != NULL;
 1424         TAILQ_FOREACH(ie, &event_list, ie_list) {
 1425                 if (!all && CK_SLIST_EMPTY(&ie->ie_handlers))
 1426                         continue;
 1427                 db_dump_intr_event(ie, verbose);
 1428                 if (db_pager_quit)
 1429                         break;
 1430         }
 1431 }
 1432 #endif /* DDB */
 1433 
 1434 /*
 1435  * Start standard software interrupt threads
 1436  */
 1437 static void
 1438 start_softintr(void *dummy)
 1439 {
 1440 
 1441         if (swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, INTR_MPSAFE, &vm_ih))
 1442                 panic("died while creating vm swi ithread");
 1443 }
 1444 SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr,
 1445     NULL);
 1446 
 1447 /*
 1448  * Sysctls used by systat and others: hw.intrnames and hw.intrcnt.
 1449  * The data for this machine dependent, and the declarations are in machine
 1450  * dependent code.  The layout of intrnames and intrcnt however is machine
 1451  * independent.
 1452  *
 1453  * We do not know the length of intrcnt and intrnames at compile time, so
 1454  * calculate things at run time.
 1455  */
 1456 static int
 1457 sysctl_intrnames(SYSCTL_HANDLER_ARGS)
 1458 {
 1459         return (sysctl_handle_opaque(oidp, intrnames, sintrnames, req));
 1460 }
 1461 
 1462 SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD,
 1463     NULL, 0, sysctl_intrnames, "", "Interrupt Names");
 1464 
 1465 static int
 1466 sysctl_intrcnt(SYSCTL_HANDLER_ARGS)
 1467 {
 1468 #ifdef SCTL_MASK32
 1469         uint32_t *intrcnt32;
 1470         unsigned i;
 1471         int error;
 1472 
 1473         if (req->flags & SCTL_MASK32) {
 1474                 if (!req->oldptr)
 1475                         return (sysctl_handle_opaque(oidp, NULL, sintrcnt / 2, req));
 1476                 intrcnt32 = malloc(sintrcnt / 2, M_TEMP, M_NOWAIT);
 1477                 if (intrcnt32 == NULL)
 1478                         return (ENOMEM);
 1479                 for (i = 0; i < sintrcnt / sizeof (u_long); i++)
 1480                         intrcnt32[i] = intrcnt[i];
 1481                 error = sysctl_handle_opaque(oidp, intrcnt32, sintrcnt / 2, req);
 1482                 free(intrcnt32, M_TEMP);
 1483                 return (error);
 1484         }
 1485 #endif
 1486         return (sysctl_handle_opaque(oidp, intrcnt, sintrcnt, req));
 1487 }
 1488 
 1489 SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD,
 1490     NULL, 0, sysctl_intrcnt, "", "Interrupt Counts");
 1491 
 1492 #ifdef DDB
 1493 /*
 1494  * DDB command to dump the interrupt statistics.
 1495  */
 1496 DB_SHOW_COMMAND(intrcnt, db_show_intrcnt)
 1497 {
 1498         u_long *i;
 1499         char *cp;
 1500         u_int j;
 1501 
 1502         cp = intrnames;
 1503         j = 0;
 1504         for (i = intrcnt; j < (sintrcnt / sizeof(u_long)) && !db_pager_quit;
 1505             i++, j++) {
 1506                 if (*cp == '\0')
 1507                         break;
 1508                 if (*i != 0)
 1509                         db_printf("%s\t%lu\n", cp, *i);
 1510                 cp += strlen(cp) + 1;
 1511         }
 1512 }
 1513 #endif

Cache object: bb3c8f60ae5c1356169a9a0986bc2346


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.