The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_intr.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice unmodified, this list of conditions, and the following
   10  *    disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  *
   15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   25  */
   26 
   27 #include <sys/cdefs.h>
   28 __FBSDID("$FreeBSD$");
   29 
   30 #include "opt_ddb.h"
   31 
   32 #include <sys/param.h>
   33 #include <sys/bus.h>
   34 #include <sys/conf.h>
   35 #include <sys/rtprio.h>
   36 #include <sys/systm.h>
   37 #include <sys/interrupt.h>
   38 #include <sys/kernel.h>
   39 #include <sys/kthread.h>
   40 #include <sys/ktr.h>
   41 #include <sys/limits.h>
   42 #include <sys/lock.h>
   43 #include <sys/malloc.h>
   44 #include <sys/mutex.h>
   45 #include <sys/proc.h>
   46 #include <sys/random.h>
   47 #include <sys/resourcevar.h>
   48 #include <sys/sched.h>
   49 #include <sys/smp.h>
   50 #include <sys/sysctl.h>
   51 #include <sys/unistd.h>
   52 #include <sys/vmmeter.h>
   53 #include <machine/atomic.h>
   54 #include <machine/cpu.h>
   55 #include <machine/md_var.h>
   56 #include <machine/stdarg.h>
   57 #ifdef DDB
   58 #include <ddb/ddb.h>
   59 #include <ddb/db_sym.h>
   60 #endif
   61 
   62 /*
   63  * Describe an interrupt thread.  There is one of these per interrupt event.
   64  */
   65 struct intr_thread {
   66         struct intr_event *it_event;
   67         struct thread *it_thread;       /* Kernel thread. */
   68         int     it_flags;               /* (j) IT_* flags. */
   69         int     it_need;                /* Needs service. */
   70 };
   71 
   72 /* Interrupt thread flags kept in it_flags */
   73 #define IT_DEAD         0x000001        /* Thread is waiting to exit. */
   74 
   75 struct  intr_entropy {
   76         struct  thread *td;
   77         uintptr_t event;
   78 };
   79 
   80 struct  intr_event *clk_intr_event;
   81 struct  intr_event *tty_intr_event;
   82 void    *softclock_ih;
   83 void    *vm_ih;
   84 
   85 static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads");
   86 
   87 static int intr_storm_threshold = 1000;
   88 TUNABLE_INT("hw.intr_storm_threshold", &intr_storm_threshold);
   89 SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RW,
   90     &intr_storm_threshold, 0,
   91     "Number of consecutive interrupts before storm protection is enabled");
   92 static TAILQ_HEAD(, intr_event) event_list =
   93     TAILQ_HEAD_INITIALIZER(event_list);
   94 
   95 static void     intr_event_update(struct intr_event *ie);
   96 #ifdef INTR_FILTER
   97 static struct intr_thread *ithread_create(const char *name,
   98                               struct intr_handler *ih);
   99 #else
  100 static struct intr_thread *ithread_create(const char *name);
  101 #endif
  102 static void     ithread_destroy(struct intr_thread *ithread);
  103 static void     ithread_execute_handlers(struct proc *p, 
  104                     struct intr_event *ie);
  105 #ifdef INTR_FILTER
  106 static void     priv_ithread_execute_handler(struct proc *p, 
  107                     struct intr_handler *ih);
  108 #endif
  109 static void     ithread_loop(void *);
  110 static void     ithread_update(struct intr_thread *ithd);
  111 static void     start_softintr(void *);
  112 
  113 /* Map an interrupt type to an ithread priority. */
  114 u_char
  115 intr_priority(enum intr_type flags)
  116 {
  117         u_char pri;
  118 
  119         flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET |
  120             INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV);
  121         switch (flags) {
  122         case INTR_TYPE_TTY:
  123                 pri = PI_TTYLOW;
  124                 break;
  125         case INTR_TYPE_BIO:
  126                 /*
  127                  * XXX We need to refine this.  BSD/OS distinguishes
  128                  * between tape and disk priorities.
  129                  */
  130                 pri = PI_DISK;
  131                 break;
  132         case INTR_TYPE_NET:
  133                 pri = PI_NET;
  134                 break;
  135         case INTR_TYPE_CAM:
  136                 pri = PI_DISK;          /* XXX or PI_CAM? */
  137                 break;
  138         case INTR_TYPE_AV:              /* Audio/video */
  139                 pri = PI_AV;
  140                 break;
  141         case INTR_TYPE_CLK:
  142                 pri = PI_REALTIME;
  143                 break;
  144         case INTR_TYPE_MISC:
  145                 pri = PI_DULL;          /* don't care */
  146                 break;
  147         default:
  148                 /* We didn't specify an interrupt level. */
  149                 panic("intr_priority: no interrupt type in flags");
  150         }
  151 
  152         return pri;
  153 }
  154 
  155 /*
  156  * Update an ithread based on the associated intr_event.
  157  */
  158 static void
  159 ithread_update(struct intr_thread *ithd)
  160 {
  161         struct intr_event *ie;
  162         struct thread *td;
  163         u_char pri;
  164 
  165         ie = ithd->it_event;
  166         td = ithd->it_thread;
  167 
  168         /* Determine the overall priority of this event. */
  169         if (TAILQ_EMPTY(&ie->ie_handlers))
  170                 pri = PRI_MAX_ITHD;
  171         else
  172                 pri = TAILQ_FIRST(&ie->ie_handlers)->ih_pri;
  173 
  174         /* Update name and priority. */
  175         strlcpy(td->td_proc->p_comm, ie->ie_fullname,
  176             sizeof(td->td_proc->p_comm));
  177         thread_lock(td);
  178         sched_prio(td, pri);
  179         thread_unlock(td);
  180 }
  181 
  182 /*
  183  * Regenerate the full name of an interrupt event and update its priority.
  184  */
  185 static void
  186 intr_event_update(struct intr_event *ie)
  187 {
  188         struct intr_handler *ih;
  189         char *last;
  190         int missed, space;
  191 
  192         /* Start off with no entropy and just the name of the event. */
  193         mtx_assert(&ie->ie_lock, MA_OWNED);
  194         strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname));
  195         ie->ie_flags &= ~IE_ENTROPY;
  196         missed = 0;
  197         space = 1;
  198 
  199         /* Run through all the handlers updating values. */
  200         TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
  201                 if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 <
  202                     sizeof(ie->ie_fullname)) {
  203                         strcat(ie->ie_fullname, " ");
  204                         strcat(ie->ie_fullname, ih->ih_name);
  205                         space = 0;
  206                 } else
  207                         missed++;
  208                 if (ih->ih_flags & IH_ENTROPY)
  209                         ie->ie_flags |= IE_ENTROPY;
  210         }
  211 
  212         /*
  213          * If the handler names were too long, add +'s to indicate missing
  214          * names. If we run out of room and still have +'s to add, change
  215          * the last character from a + to a *.
  216          */
  217         last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2];
  218         while (missed-- > 0) {
  219                 if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) {
  220                         if (*last == '+') {
  221                                 *last = '*';
  222                                 break;
  223                         } else
  224                                 *last = '+';
  225                 } else if (space) {
  226                         strcat(ie->ie_fullname, " +");
  227                         space = 0;
  228                 } else
  229                         strcat(ie->ie_fullname, "+");
  230         }
  231 
  232         /*
  233          * If this event has an ithread, update it's priority and
  234          * name.
  235          */
  236         if (ie->ie_thread != NULL)
  237                 ithread_update(ie->ie_thread);
  238         CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname);
  239 }
  240 
  241 #ifndef INTR_FILTER
  242 int
  243 intr_event_create(struct intr_event **event, void *source, int flags,
  244     void (*enable)(void *), int (*assign_cpu)(void *, u_char), const char *fmt,
  245     ...)
  246 {
  247         struct intr_event *ie;
  248         va_list ap;
  249 
  250         /* The only valid flag during creation is IE_SOFT. */
  251         if ((flags & ~IE_SOFT) != 0)
  252                 return (EINVAL);
  253         ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO);
  254         ie->ie_source = source;
  255         ie->ie_enable = enable;
  256         ie->ie_assign_cpu = assign_cpu;
  257         ie->ie_flags = flags;
  258         ie->ie_cpu = NOCPU;
  259         TAILQ_INIT(&ie->ie_handlers);
  260         mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF);
  261 
  262         va_start(ap, fmt);
  263         vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap);
  264         va_end(ap);
  265         strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname));
  266         mtx_pool_lock(mtxpool_sleep, &event_list);
  267         TAILQ_INSERT_TAIL(&event_list, ie, ie_list);
  268         mtx_pool_unlock(mtxpool_sleep, &event_list);
  269         if (event != NULL)
  270                 *event = ie;
  271         CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name);
  272         return (0);
  273 }
  274 #else
  275 int
  276 intr_event_create(struct intr_event **event, void *source, int flags,
  277     void (*enable)(void *), void (*eoi)(void *), void (*disab)(void *), 
  278     int (*assign_cpu)(void *, u_char), const char *fmt, ...)
  279 {
  280         struct intr_event *ie;
  281         va_list ap;
  282 
  283         /* The only valid flag during creation is IE_SOFT. */
  284         if ((flags & ~IE_SOFT) != 0)
  285                 return (EINVAL);
  286         ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO);
  287         ie->ie_source = source;
  288         ie->ie_enable = enable;
  289         ie->ie_assign_cpu = assign_cpu;
  290         ie->ie_eoi = eoi;
  291         ie->ie_disab = disab;
  292         ie->ie_flags = flags;
  293         ie->ie_cpu = NOCPU;
  294         TAILQ_INIT(&ie->ie_handlers);
  295         mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF);
  296 
  297         va_start(ap, fmt);
  298         vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap);
  299         va_end(ap);
  300         strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname));
  301         mtx_pool_lock(mtxpool_sleep, &event_list);
  302         TAILQ_INSERT_TAIL(&event_list, ie, ie_list);
  303         mtx_pool_unlock(mtxpool_sleep, &event_list);
  304         if (event != NULL)
  305                 *event = ie;
  306         CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name);
  307         return (0);
  308 }
  309 #endif
  310 
  311 /*
  312  * Bind an interrupt event to the specified CPU.  Note that not all
  313  * platforms support binding an interrupt to a CPU.  For those
  314  * platforms this request will fail.  For supported platforms, any
  315  * associated ithreads as well as the primary interrupt context will
  316  * be bound to the specificed CPU.  Using a cpu id of NOCPU unbinds
  317  * the interrupt event.
  318  */
  319 int
  320 intr_event_bind(struct intr_event *ie, u_char cpu)
  321 {
  322         struct thread *td;
  323         int error;
  324 
  325         /* Need a CPU to bind to. */
  326         if (cpu != NOCPU && CPU_ABSENT(cpu))
  327                 return (EINVAL);
  328 
  329         if (ie->ie_assign_cpu == NULL)
  330                 return (EOPNOTSUPP);
  331 
  332         /* Don't allow a bind request if the interrupt is already bound. */
  333         mtx_lock(&ie->ie_lock);
  334         if (ie->ie_cpu != NOCPU && cpu != NOCPU) {
  335                 mtx_unlock(&ie->ie_lock);
  336                 return (EBUSY);
  337         }
  338         mtx_unlock(&ie->ie_lock);
  339 
  340         error = ie->ie_assign_cpu(ie->ie_source, cpu);
  341         if (error)
  342                 return (error);
  343         mtx_lock(&ie->ie_lock);
  344         if (ie->ie_thread != NULL)
  345                 td = ie->ie_thread->it_thread;
  346         else
  347                 td = NULL;
  348         if (td != NULL)
  349                 thread_lock(td);
  350         ie->ie_cpu = cpu;
  351         if (td != NULL)
  352                 thread_unlock(td);
  353         mtx_unlock(&ie->ie_lock);
  354         return (0);
  355 }
  356 
  357 int
  358 intr_event_destroy(struct intr_event *ie)
  359 {
  360 
  361         mtx_lock(&ie->ie_lock);
  362         if (!TAILQ_EMPTY(&ie->ie_handlers)) {
  363                 mtx_unlock(&ie->ie_lock);
  364                 return (EBUSY);
  365         }
  366         mtx_pool_lock(mtxpool_sleep, &event_list);
  367         TAILQ_REMOVE(&event_list, ie, ie_list);
  368         mtx_pool_unlock(mtxpool_sleep, &event_list);
  369 #ifndef notyet
  370         if (ie->ie_thread != NULL) {
  371                 ithread_destroy(ie->ie_thread);
  372                 ie->ie_thread = NULL;
  373         }
  374 #endif
  375         mtx_unlock(&ie->ie_lock);
  376         mtx_destroy(&ie->ie_lock);
  377         free(ie, M_ITHREAD);
  378         return (0);
  379 }
  380 
  381 #ifndef INTR_FILTER
  382 static struct intr_thread *
  383 ithread_create(const char *name)
  384 {
  385         struct intr_thread *ithd;
  386         struct thread *td;
  387         struct proc *p;
  388         int error;
  389 
  390         ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO);
  391 
  392         error = kthread_create(ithread_loop, ithd, &p, RFSTOPPED | RFHIGHPID,
  393             0, "%s", name);
  394         if (error)
  395                 panic("kthread_create() failed with %d", error);
  396         td = FIRST_THREAD_IN_PROC(p);   /* XXXKSE */
  397         thread_lock(td);
  398         sched_class(td, PRI_ITHD);
  399         TD_SET_IWAIT(td);
  400         thread_unlock(td);
  401         td->td_pflags |= TDP_ITHREAD;
  402         ithd->it_thread = td;
  403         CTR2(KTR_INTR, "%s: created %s", __func__, name);
  404         return (ithd);
  405 }
  406 #else
  407 static struct intr_thread *
  408 ithread_create(const char *name, struct intr_handler *ih)
  409 {
  410         struct intr_thread *ithd;
  411         struct thread *td;
  412         struct proc *p;
  413         int error;
  414 
  415         ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO);
  416 
  417         error = kthread_create(ithread_loop, ih, &p, RFSTOPPED | RFHIGHPID,
  418             0, "%s", name);
  419         if (error)
  420                 panic("kthread_create() failed with %d", error);
  421         td = FIRST_THREAD_IN_PROC(p);   /* XXXKSE */
  422         thread_lock(td);
  423         sched_class(td, PRI_ITHD);
  424         TD_SET_IWAIT(td);
  425         thread_unlock(td);
  426         td->td_pflags |= TDP_ITHREAD;
  427         ithd->it_thread = td;
  428         CTR2(KTR_INTR, "%s: created %s", __func__, name);
  429         return (ithd);
  430 }
  431 #endif
  432 
  433 static void
  434 ithread_destroy(struct intr_thread *ithread)
  435 {
  436         struct thread *td;
  437 
  438         CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_event->ie_name);
  439         td = ithread->it_thread;
  440         thread_lock(td);
  441         ithread->it_flags |= IT_DEAD;
  442         if (TD_AWAITING_INTR(td)) {
  443                 TD_CLR_IWAIT(td);
  444                 sched_add(td, SRQ_INTR);
  445         }
  446         thread_unlock(td);
  447 }
  448 
  449 #ifndef INTR_FILTER
  450 int
  451 intr_event_add_handler(struct intr_event *ie, const char *name,
  452     driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri,
  453     enum intr_type flags, void **cookiep)
  454 {
  455         struct intr_handler *ih, *temp_ih;
  456         struct intr_thread *it;
  457 
  458         if (ie == NULL || name == NULL || (handler == NULL && filter == NULL))
  459                 return (EINVAL);
  460 
  461         /* Allocate and populate an interrupt handler structure. */
  462         ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO);
  463         ih->ih_filter = filter;
  464         ih->ih_handler = handler;
  465         ih->ih_argument = arg;
  466         ih->ih_name = name;
  467         ih->ih_event = ie;
  468         ih->ih_pri = pri;
  469         if (flags & INTR_EXCL)
  470                 ih->ih_flags = IH_EXCLUSIVE;
  471         if (flags & INTR_MPSAFE)
  472                 ih->ih_flags |= IH_MPSAFE;
  473         if (flags & INTR_ENTROPY)
  474                 ih->ih_flags |= IH_ENTROPY;
  475 
  476         /* We can only have one exclusive handler in a event. */
  477         mtx_lock(&ie->ie_lock);
  478         if (!TAILQ_EMPTY(&ie->ie_handlers)) {
  479                 if ((flags & INTR_EXCL) ||
  480                     (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) {
  481                         mtx_unlock(&ie->ie_lock);
  482                         free(ih, M_ITHREAD);
  483                         return (EINVAL);
  484                 }
  485         }
  486 
  487         /* Add the new handler to the event in priority order. */
  488         TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) {
  489                 if (temp_ih->ih_pri > ih->ih_pri)
  490                         break;
  491         }
  492         if (temp_ih == NULL)
  493                 TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next);
  494         else
  495                 TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next);
  496         intr_event_update(ie);
  497 
  498         /* Create a thread if we need one. */
  499         while (ie->ie_thread == NULL && handler != NULL) {
  500                 if (ie->ie_flags & IE_ADDING_THREAD)
  501                         msleep(ie, &ie->ie_lock, 0, "ithread", 0);
  502                 else {
  503                         ie->ie_flags |= IE_ADDING_THREAD;
  504                         mtx_unlock(&ie->ie_lock);
  505                         it = ithread_create("intr: newborn");
  506                         mtx_lock(&ie->ie_lock);
  507                         ie->ie_flags &= ~IE_ADDING_THREAD;
  508                         ie->ie_thread = it;
  509                         it->it_event = ie;
  510                         ithread_update(it);
  511                         wakeup(ie);
  512                 }
  513         }
  514         CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name,
  515             ie->ie_name);
  516         mtx_unlock(&ie->ie_lock);
  517 
  518         if (cookiep != NULL)
  519                 *cookiep = ih;
  520         return (0);
  521 }
  522 #else
  523 int
  524 intr_event_add_handler(struct intr_event *ie, const char *name,
  525     driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri,
  526     enum intr_type flags, void **cookiep)
  527 {
  528         struct intr_handler *ih, *temp_ih;
  529         struct intr_thread *it;
  530 
  531         if (ie == NULL || name == NULL || (handler == NULL && filter == NULL))
  532                 return (EINVAL);
  533 
  534         /* Allocate and populate an interrupt handler structure. */
  535         ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO);
  536         ih->ih_filter = filter;
  537         ih->ih_handler = handler;
  538         ih->ih_argument = arg;
  539         ih->ih_name = name;
  540         ih->ih_event = ie;
  541         ih->ih_pri = pri;
  542         if (flags & INTR_EXCL)
  543                 ih->ih_flags = IH_EXCLUSIVE;
  544         if (flags & INTR_MPSAFE)
  545                 ih->ih_flags |= IH_MPSAFE;
  546         if (flags & INTR_ENTROPY)
  547                 ih->ih_flags |= IH_ENTROPY;
  548 
  549         /* We can only have one exclusive handler in a event. */
  550         mtx_lock(&ie->ie_lock);
  551         if (!TAILQ_EMPTY(&ie->ie_handlers)) {
  552                 if ((flags & INTR_EXCL) ||
  553                     (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) {
  554                         mtx_unlock(&ie->ie_lock);
  555                         free(ih, M_ITHREAD);
  556                         return (EINVAL);
  557                 }
  558         }
  559 
  560         /* Add the new handler to the event in priority order. */
  561         TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) {
  562                 if (temp_ih->ih_pri > ih->ih_pri)
  563                         break;
  564         }
  565         if (temp_ih == NULL)
  566                 TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next);
  567         else
  568                 TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next);
  569         intr_event_update(ie);
  570 
  571         /* For filtered handlers, create a private ithread to run on. */
  572         if (filter != NULL && handler != NULL) { 
  573                 mtx_unlock(&ie->ie_lock);
  574                 it = ithread_create("intr: newborn", ih);               
  575                 mtx_lock(&ie->ie_lock);
  576                 it->it_event = ie; 
  577                 ih->ih_thread = it;
  578                 ithread_update(it); // XXX - do we really need this?!?!?
  579         } else { /* Create the global per-event thread if we need one. */
  580                 while (ie->ie_thread == NULL && handler != NULL) {
  581                         if (ie->ie_flags & IE_ADDING_THREAD)
  582                                 msleep(ie, &ie->ie_lock, 0, "ithread", 0);
  583                         else {
  584                                 ie->ie_flags |= IE_ADDING_THREAD;
  585                                 mtx_unlock(&ie->ie_lock);
  586                                 it = ithread_create("intr: newborn", ih);
  587                                 mtx_lock(&ie->ie_lock);
  588                                 ie->ie_flags &= ~IE_ADDING_THREAD;
  589                                 ie->ie_thread = it;
  590                                 it->it_event = ie;
  591                                 ithread_update(it);
  592                                 wakeup(ie);
  593                         }
  594                 }
  595         }
  596         CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name,
  597             ie->ie_name);
  598         mtx_unlock(&ie->ie_lock);
  599 
  600         if (cookiep != NULL)
  601                 *cookiep = ih;
  602         return (0);
  603 }
  604 #endif
  605 
  606 /*
  607  * Return the ie_source field from the intr_event an intr_handler is
  608  * associated with.
  609  */
  610 void *
  611 intr_handler_source(void *cookie)
  612 {
  613         struct intr_handler *ih;
  614         struct intr_event *ie;
  615 
  616         ih = (struct intr_handler *)cookie;
  617         if (ih == NULL)
  618                 return (NULL);
  619         ie = ih->ih_event;
  620         KASSERT(ie != NULL,
  621             ("interrupt handler \"%s\" has a NULL interrupt event",
  622             ih->ih_name));
  623         return (ie->ie_source);
  624 }
  625 
  626 #ifndef INTR_FILTER
  627 int
  628 intr_event_remove_handler(void *cookie)
  629 {
  630         struct intr_handler *handler = (struct intr_handler *)cookie;
  631         struct intr_event *ie;
  632 #ifdef INVARIANTS
  633         struct intr_handler *ih;
  634 #endif
  635 #ifdef notyet
  636         int dead;
  637 #endif
  638 
  639         if (handler == NULL)
  640                 return (EINVAL);
  641         ie = handler->ih_event;
  642         KASSERT(ie != NULL,
  643             ("interrupt handler \"%s\" has a NULL interrupt event",
  644             handler->ih_name));
  645         mtx_lock(&ie->ie_lock);
  646         CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name,
  647             ie->ie_name);
  648 #ifdef INVARIANTS
  649         TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
  650                 if (ih == handler)
  651                         goto ok;
  652         mtx_unlock(&ie->ie_lock);
  653         panic("interrupt handler \"%s\" not found in interrupt event \"%s\"",
  654             ih->ih_name, ie->ie_name);
  655 ok:
  656 #endif
  657         /*
  658          * If there is no ithread, then just remove the handler and return.
  659          * XXX: Note that an INTR_FAST handler might be running on another
  660          * CPU!
  661          */
  662         if (ie->ie_thread == NULL) {
  663                 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
  664                 mtx_unlock(&ie->ie_lock);
  665                 free(handler, M_ITHREAD);
  666                 return (0);
  667         }
  668 
  669         /*
  670          * If the interrupt thread is already running, then just mark this
  671          * handler as being dead and let the ithread do the actual removal.
  672          *
  673          * During a cold boot while cold is set, msleep() does not sleep,
  674          * so we have to remove the handler here rather than letting the
  675          * thread do it.
  676          */
  677         thread_lock(ie->ie_thread->it_thread);
  678         if (!TD_AWAITING_INTR(ie->ie_thread->it_thread) && !cold) {
  679                 handler->ih_flags |= IH_DEAD;
  680 
  681                 /*
  682                  * Ensure that the thread will process the handler list
  683                  * again and remove this handler if it has already passed
  684                  * it on the list.
  685                  */
  686                 ie->ie_thread->it_need = 1;
  687         } else
  688                 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
  689         thread_unlock(ie->ie_thread->it_thread);
  690         while (handler->ih_flags & IH_DEAD)
  691                 msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0);
  692         intr_event_update(ie);
  693 #ifdef notyet
  694         /*
  695          * XXX: This could be bad in the case of ppbus(8).  Also, I think
  696          * this could lead to races of stale data when servicing an
  697          * interrupt.
  698          */
  699         dead = 1;
  700         TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
  701                 if (!(ih->ih_flags & IH_FAST)) {
  702                         dead = 0;
  703                         break;
  704                 }
  705         }
  706         if (dead) {
  707                 ithread_destroy(ie->ie_thread);
  708                 ie->ie_thread = NULL;
  709         }
  710 #endif
  711         mtx_unlock(&ie->ie_lock);
  712         free(handler, M_ITHREAD);
  713         return (0);
  714 }
  715 
  716 int
  717 intr_event_schedule_thread(struct intr_event *ie)
  718 {
  719         struct intr_entropy entropy;
  720         struct intr_thread *it;
  721         struct thread *td;
  722         struct thread *ctd;
  723         struct proc *p;
  724 
  725         /*
  726          * If no ithread or no handlers, then we have a stray interrupt.
  727          */
  728         if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) ||
  729             ie->ie_thread == NULL)
  730                 return (EINVAL);
  731 
  732         ctd = curthread;
  733         it = ie->ie_thread;
  734         td = it->it_thread;
  735         p = td->td_proc;
  736 
  737         /*
  738          * If any of the handlers for this ithread claim to be good
  739          * sources of entropy, then gather some.
  740          */
  741         if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) {
  742                 CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__,
  743                     p->p_pid, p->p_comm);
  744                 entropy.event = (uintptr_t)ie;
  745                 entropy.td = ctd;
  746                 random_harvest(&entropy, sizeof(entropy), 2, 0,
  747                     RANDOM_INTERRUPT);
  748         }
  749 
  750         KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name));
  751 
  752         /*
  753          * Set it_need to tell the thread to keep running if it is already
  754          * running.  Then, lock the thread and see if we actually need to
  755          * put it on the runqueue.
  756          */
  757         it->it_need = 1;
  758         thread_lock(td);
  759         if (TD_AWAITING_INTR(td)) {
  760                 CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid,
  761                     p->p_comm);
  762                 TD_CLR_IWAIT(td);
  763                 sched_add(td, SRQ_INTR);
  764         } else {
  765                 CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d",
  766                     __func__, p->p_pid, p->p_comm, it->it_need, td->td_state);
  767         }
  768         thread_unlock(td);
  769 
  770         return (0);
  771 }
  772 #else
  773 int
  774 intr_event_remove_handler(void *cookie)
  775 {
  776         struct intr_handler *handler = (struct intr_handler *)cookie;
  777         struct intr_event *ie;
  778         struct intr_thread *it;
  779 #ifdef INVARIANTS
  780         struct intr_handler *ih;
  781 #endif
  782 #ifdef notyet
  783         int dead;
  784 #endif
  785 
  786         if (handler == NULL)
  787                 return (EINVAL);
  788         ie = handler->ih_event;
  789         KASSERT(ie != NULL,
  790             ("interrupt handler \"%s\" has a NULL interrupt event",
  791             handler->ih_name));
  792         mtx_lock(&ie->ie_lock);
  793         CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name,
  794             ie->ie_name);
  795 #ifdef INVARIANTS
  796         TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
  797                 if (ih == handler)
  798                         goto ok;
  799         mtx_unlock(&ie->ie_lock);
  800         panic("interrupt handler \"%s\" not found in interrupt event \"%s\"",
  801             ih->ih_name, ie->ie_name);
  802 ok:
  803 #endif
  804         /*
  805          * If there are no ithreads (per event and per handler), then
  806          * just remove the handler and return.  
  807          * XXX: Note that an INTR_FAST handler might be running on another CPU!
  808          */
  809         if (ie->ie_thread == NULL && handler->ih_thread == NULL) {
  810                 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
  811                 mtx_unlock(&ie->ie_lock);
  812                 free(handler, M_ITHREAD);
  813                 return (0);
  814         }
  815 
  816         /* Private or global ithread? */
  817         it = (handler->ih_thread) ? handler->ih_thread : ie->ie_thread;
  818         /*
  819          * If the interrupt thread is already running, then just mark this
  820          * handler as being dead and let the ithread do the actual removal.
  821          *
  822          * During a cold boot while cold is set, msleep() does not sleep,
  823          * so we have to remove the handler here rather than letting the
  824          * thread do it.
  825          */
  826         thread_lock(it->it_thread);
  827         if (!TD_AWAITING_INTR(it->it_thread) && !cold) {
  828                 handler->ih_flags |= IH_DEAD;
  829 
  830                 /*
  831                  * Ensure that the thread will process the handler list
  832                  * again and remove this handler if it has already passed
  833                  * it on the list.
  834                  */
  835                 it->it_need = 1;
  836         } else
  837                 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next);
  838         thread_unlock(it->it_thread);
  839         while (handler->ih_flags & IH_DEAD)
  840                 msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0);
  841         /* 
  842          * At this point, the handler has been disconnected from the event,
  843          * so we can kill the private ithread if any.
  844          */
  845         if (handler->ih_thread) {
  846                 ithread_destroy(handler->ih_thread);
  847                 handler->ih_thread = NULL;
  848         }
  849         intr_event_update(ie);
  850 #ifdef notyet
  851         /*
  852          * XXX: This could be bad in the case of ppbus(8).  Also, I think
  853          * this could lead to races of stale data when servicing an
  854          * interrupt.
  855          */
  856         dead = 1;
  857         TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
  858                 if (handler != NULL) {
  859                         dead = 0;
  860                         break;
  861                 }
  862         }
  863         if (dead) {
  864                 ithread_destroy(ie->ie_thread);
  865                 ie->ie_thread = NULL;
  866         }
  867 #endif
  868         mtx_unlock(&ie->ie_lock);
  869         free(handler, M_ITHREAD);
  870         return (0);
  871 }
  872 
  873 int
  874 intr_event_schedule_thread(struct intr_event *ie, struct intr_thread *it)
  875 {
  876         struct intr_entropy entropy;
  877         struct thread *td;
  878         struct thread *ctd;
  879         struct proc *p;
  880 
  881         /*
  882          * If no ithread or no handlers, then we have a stray interrupt.
  883          */
  884         if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || it == NULL)
  885                 return (EINVAL);
  886 
  887         ctd = curthread;
  888         td = it->it_thread;
  889         p = td->td_proc;
  890 
  891         /*
  892          * If any of the handlers for this ithread claim to be good
  893          * sources of entropy, then gather some.
  894          */
  895         if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) {
  896                 CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__,
  897                     p->p_pid, p->p_comm);
  898                 entropy.event = (uintptr_t)ie;
  899                 entropy.td = ctd;
  900                 random_harvest(&entropy, sizeof(entropy), 2, 0,
  901                     RANDOM_INTERRUPT);
  902         }
  903 
  904         KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name));
  905 
  906         /*
  907          * Set it_need to tell the thread to keep running if it is already
  908          * running.  Then, lock the thread and see if we actually need to
  909          * put it on the runqueue.
  910          */
  911         it->it_need = 1;
  912         thread_lock(td);
  913         if (TD_AWAITING_INTR(td)) {
  914                 CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid,
  915                     p->p_comm);
  916                 TD_CLR_IWAIT(td);
  917                 sched_add(td, SRQ_INTR);
  918         } else {
  919                 CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d",
  920                     __func__, p->p_pid, p->p_comm, it->it_need, td->td_state);
  921         }
  922         thread_unlock(td);
  923 
  924         return (0);
  925 }
  926 #endif
  927 
  928 /*
  929  * Add a software interrupt handler to a specified event.  If a given event
  930  * is not specified, then a new event is created.
  931  */
  932 int
  933 swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler,
  934             void *arg, int pri, enum intr_type flags, void **cookiep)
  935 {
  936         struct intr_event *ie;
  937         int error;
  938 
  939         if (flags & INTR_ENTROPY)
  940                 return (EINVAL);
  941 
  942         ie = (eventp != NULL) ? *eventp : NULL;
  943 
  944         if (ie != NULL) {
  945                 if (!(ie->ie_flags & IE_SOFT))
  946                         return (EINVAL);
  947         } else {
  948 #ifdef INTR_FILTER
  949                 error = intr_event_create(&ie, NULL, IE_SOFT,
  950                     NULL, NULL, NULL, NULL, "swi%d:", pri);
  951 #else
  952                 error = intr_event_create(&ie, NULL, IE_SOFT,
  953                     NULL, NULL, "swi%d:", pri);
  954 #endif
  955                 if (error)
  956                         return (error);
  957                 if (eventp != NULL)
  958                         *eventp = ie;
  959         }
  960         return (intr_event_add_handler(ie, name, NULL, handler, arg,
  961                     (pri * RQ_PPQ) + PI_SOFT, flags, cookiep));
  962                     /* XXKSE.. think of a better way to get separate queues */
  963 }
  964 
  965 /*
  966  * Schedule a software interrupt thread.
  967  */
  968 void
  969 swi_sched(void *cookie, int flags)
  970 {
  971         struct intr_handler *ih = (struct intr_handler *)cookie;
  972         struct intr_event *ie = ih->ih_event;
  973         int error;
  974 
  975         CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name,
  976             ih->ih_need);
  977 
  978         /*
  979          * Set ih_need for this handler so that if the ithread is already
  980          * running it will execute this handler on the next pass.  Otherwise,
  981          * it will execute it the next time it runs.
  982          */
  983         atomic_store_rel_int(&ih->ih_need, 1);
  984 
  985         if (!(flags & SWI_DELAY)) {
  986                 PCPU_INC(cnt.v_soft);
  987 #ifdef INTR_FILTER
  988                 error = intr_event_schedule_thread(ie, ie->ie_thread);
  989 #else
  990                 error = intr_event_schedule_thread(ie);
  991 #endif
  992                 KASSERT(error == 0, ("stray software interrupt"));
  993         }
  994 }
  995 
  996 /*
  997  * Remove a software interrupt handler.  Currently this code does not
  998  * remove the associated interrupt event if it becomes empty.  Calling code
  999  * may do so manually via intr_event_destroy(), but that's not really
 1000  * an optimal interface.
 1001  */
 1002 int
 1003 swi_remove(void *cookie)
 1004 {
 1005 
 1006         return (intr_event_remove_handler(cookie));
 1007 }
 1008 
 1009 #ifdef INTR_FILTER
 1010 static void
 1011 priv_ithread_execute_handler(struct proc *p, struct intr_handler *ih)
 1012 {
 1013         struct intr_event *ie;
 1014 
 1015         ie = ih->ih_event;
 1016         /*
 1017          * If this handler is marked for death, remove it from
 1018          * the list of handlers and wake up the sleeper.
 1019          */
 1020         if (ih->ih_flags & IH_DEAD) {
 1021                 mtx_lock(&ie->ie_lock);
 1022                 TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next);
 1023                 ih->ih_flags &= ~IH_DEAD;
 1024                 wakeup(ih);
 1025                 mtx_unlock(&ie->ie_lock);
 1026                 return;
 1027         }
 1028         
 1029         /* Execute this handler. */
 1030         CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x",
 1031              __func__, p->p_pid, (void *)ih->ih_handler, ih->ih_argument,
 1032              ih->ih_name, ih->ih_flags);
 1033         
 1034         if (!(ih->ih_flags & IH_MPSAFE))
 1035                 mtx_lock(&Giant);
 1036         ih->ih_handler(ih->ih_argument);
 1037         if (!(ih->ih_flags & IH_MPSAFE))
 1038                 mtx_unlock(&Giant);
 1039 }
 1040 #endif
 1041 
 1042 static void
 1043 ithread_execute_handlers(struct proc *p, struct intr_event *ie)
 1044 {
 1045         struct intr_handler *ih, *ihn;
 1046 
 1047         /* Interrupt handlers should not sleep. */
 1048         if (!(ie->ie_flags & IE_SOFT))
 1049                 THREAD_NO_SLEEPING();
 1050         TAILQ_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) {
 1051 
 1052                 /*
 1053                  * If this handler is marked for death, remove it from
 1054                  * the list of handlers and wake up the sleeper.
 1055                  */
 1056                 if (ih->ih_flags & IH_DEAD) {
 1057                         mtx_lock(&ie->ie_lock);
 1058                         TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next);
 1059                         ih->ih_flags &= ~IH_DEAD;
 1060                         wakeup(ih);
 1061                         mtx_unlock(&ie->ie_lock);
 1062                         continue;
 1063                 }
 1064 
 1065                 /* Skip filter only handlers */
 1066                 if (ih->ih_handler == NULL)
 1067                         continue;
 1068 
 1069                 /*
 1070                  * For software interrupt threads, we only execute
 1071                  * handlers that have their need flag set.  Hardware
 1072                  * interrupt threads always invoke all of their handlers.
 1073                  */
 1074                 if (ie->ie_flags & IE_SOFT) {
 1075                         if (!ih->ih_need)
 1076                                 continue;
 1077                         else
 1078                                 atomic_store_rel_int(&ih->ih_need, 0);
 1079                 }
 1080 
 1081                 /* Execute this handler. */
 1082                 CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x",
 1083                     __func__, p->p_pid, (void *)ih->ih_handler, 
 1084                     ih->ih_argument, ih->ih_name, ih->ih_flags);
 1085 
 1086                 if (!(ih->ih_flags & IH_MPSAFE))
 1087                         mtx_lock(&Giant);
 1088                 ih->ih_handler(ih->ih_argument);
 1089                 if (!(ih->ih_flags & IH_MPSAFE))
 1090                         mtx_unlock(&Giant);
 1091         }
 1092         if (!(ie->ie_flags & IE_SOFT))
 1093                 THREAD_SLEEPING_OK();
 1094 
 1095         /*
 1096          * Interrupt storm handling:
 1097          *
 1098          * If this interrupt source is currently storming, then throttle
 1099          * it to only fire the handler once  per clock tick.
 1100          *
 1101          * If this interrupt source is not currently storming, but the
 1102          * number of back to back interrupts exceeds the storm threshold,
 1103          * then enter storming mode.
 1104          */
 1105         if (intr_storm_threshold != 0 && ie->ie_count >= intr_storm_threshold &&
 1106             !(ie->ie_flags & IE_SOFT)) {
 1107                 /* Report the message only once every second. */
 1108                 if (ppsratecheck(&ie->ie_warntm, &ie->ie_warncnt, 1)) {
 1109                         printf(
 1110         "interrupt storm detected on \"%s\"; throttling interrupt source\n",
 1111                             ie->ie_name);
 1112                 }
 1113                 pause("istorm", 1);
 1114         } else
 1115                 ie->ie_count++;
 1116 
 1117         /*
 1118          * Now that all the handlers have had a chance to run, reenable
 1119          * the interrupt source.
 1120          */
 1121         if (ie->ie_enable != NULL)
 1122                 ie->ie_enable(ie->ie_source);
 1123 }
 1124 
 1125 #ifndef INTR_FILTER
 1126 /*
 1127  * This is the main code for interrupt threads.
 1128  */
 1129 static void
 1130 ithread_loop(void *arg)
 1131 {
 1132         struct intr_thread *ithd;
 1133         struct intr_event *ie;
 1134         struct thread *td;
 1135         struct proc *p;
 1136         u_char cpu;
 1137 
 1138         td = curthread;
 1139         p = td->td_proc;
 1140         ithd = (struct intr_thread *)arg;
 1141         KASSERT(ithd->it_thread == td,
 1142             ("%s: ithread and proc linkage out of sync", __func__));
 1143         ie = ithd->it_event;
 1144         ie->ie_count = 0;
 1145         cpu = NOCPU;
 1146 
 1147         /*
 1148          * As long as we have interrupts outstanding, go through the
 1149          * list of handlers, giving each one a go at it.
 1150          */
 1151         for (;;) {
 1152                 /*
 1153                  * If we are an orphaned thread, then just die.
 1154                  */
 1155                 if (ithd->it_flags & IT_DEAD) {
 1156                         CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__,
 1157                             p->p_pid, p->p_comm);
 1158                         free(ithd, M_ITHREAD);
 1159                         kthread_exit(0);
 1160                 }
 1161 
 1162                 /*
 1163                  * Service interrupts.  If another interrupt arrives while
 1164                  * we are running, it will set it_need to note that we
 1165                  * should make another pass.
 1166                  */
 1167                 while (ithd->it_need) {
 1168                         /*
 1169                          * This might need a full read and write barrier
 1170                          * to make sure that this write posts before any
 1171                          * of the memory or device accesses in the
 1172                          * handlers.
 1173                          */
 1174                         atomic_store_rel_int(&ithd->it_need, 0);
 1175                         ithread_execute_handlers(p, ie);
 1176                 }
 1177                 WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread");
 1178                 mtx_assert(&Giant, MA_NOTOWNED);
 1179 
 1180                 /*
 1181                  * Processed all our interrupts.  Now get the sched
 1182                  * lock.  This may take a while and it_need may get
 1183                  * set again, so we have to check it again.
 1184                  */
 1185                 thread_lock(td);
 1186                 if (!ithd->it_need && !(ithd->it_flags & IT_DEAD)) {
 1187                         TD_SET_IWAIT(td);
 1188                         ie->ie_count = 0;
 1189                         mi_switch(SW_VOL, NULL);
 1190                 }
 1191 
 1192 #ifdef SMP
 1193                 /*
 1194                  * Ensure we are bound to the correct CPU.  We can't
 1195                  * move ithreads until SMP is running however, so just
 1196                  * leave interrupts on the boor CPU during boot.
 1197                  */
 1198                 if (ie->ie_cpu != cpu && smp_started) {
 1199                         cpu = ie->ie_cpu;
 1200                         if (cpu == NOCPU)
 1201                                 sched_unbind(td);
 1202                         else
 1203                                 sched_bind(td, cpu);
 1204                 }
 1205 #endif
 1206                 thread_unlock(td);
 1207         }
 1208 }
 1209 #else
 1210 /*
 1211  * This is the main code for interrupt threads.
 1212  */
 1213 static void
 1214 ithread_loop(void *arg)
 1215 {
 1216         struct intr_thread *ithd;
 1217         struct intr_handler *ih;
 1218         struct intr_event *ie;
 1219         struct thread *td;
 1220         struct proc *p;
 1221         int priv;
 1222         u_char cpu;
 1223 
 1224         td = curthread;
 1225         p = td->td_proc;
 1226         ih = (struct intr_handler *)arg;
 1227         priv = (ih->ih_thread != NULL) ? 1 : 0;
 1228         ithd = (priv) ? ih->ih_thread : ih->ih_event->ie_thread;
 1229         KASSERT(ithd->it_thread == td,
 1230             ("%s: ithread and proc linkage out of sync", __func__));
 1231         ie = ithd->it_event;
 1232         ie->ie_count = 0;
 1233         cpu = NOCPU;
 1234 
 1235         /*
 1236          * As long as we have interrupts outstanding, go through the
 1237          * list of handlers, giving each one a go at it.
 1238          */
 1239         for (;;) {
 1240                 /*
 1241                  * If we are an orphaned thread, then just die.
 1242                  */
 1243                 if (ithd->it_flags & IT_DEAD) {
 1244                         CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__,
 1245                             p->p_pid, p->p_comm);
 1246                         free(ithd, M_ITHREAD);
 1247                         kthread_exit(0);
 1248                 }
 1249 
 1250                 /*
 1251                  * Service interrupts.  If another interrupt arrives while
 1252                  * we are running, it will set it_need to note that we
 1253                  * should make another pass.
 1254                  */
 1255                 while (ithd->it_need) {
 1256                         /*
 1257                          * This might need a full read and write barrier
 1258                          * to make sure that this write posts before any
 1259                          * of the memory or device accesses in the
 1260                          * handlers.
 1261                          */
 1262                         atomic_store_rel_int(&ithd->it_need, 0);
 1263                         if (priv)
 1264                                 priv_ithread_execute_handler(p, ih);
 1265                         else 
 1266                                 ithread_execute_handlers(p, ie);
 1267                 }
 1268                 WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread");
 1269                 mtx_assert(&Giant, MA_NOTOWNED);
 1270 
 1271                 /*
 1272                  * Processed all our interrupts.  Now get the sched
 1273                  * lock.  This may take a while and it_need may get
 1274                  * set again, so we have to check it again.
 1275                  */
 1276                 thread_lock(td);
 1277                 if (!ithd->it_need && !(ithd->it_flags & IT_DEAD)) {
 1278                         TD_SET_IWAIT(td);
 1279                         ie->ie_count = 0;
 1280                         mi_switch(SW_VOL, NULL);
 1281                 }
 1282 
 1283 #ifdef SMP
 1284                 /*
 1285                  * Ensure we are bound to the correct CPU.  We can't
 1286                  * move ithreads until SMP is running however, so just
 1287                  * leave interrupts on the boor CPU during boot.
 1288                  */
 1289                 if (!priv && ie->ie_cpu != cpu && smp_started) {
 1290                         cpu = ie->ie_cpu;
 1291                         if (cpu == NOCPU)
 1292                                 sched_unbind(td);
 1293                         else
 1294                                 sched_bind(td, cpu);
 1295                 }
 1296 #endif
 1297                 thread_unlock(td);
 1298         }
 1299 }
 1300 
 1301 /* 
 1302  * Main loop for interrupt filter.
 1303  *
 1304  * Some architectures (i386, amd64 and arm) require the optional frame 
 1305  * parameter, and use it as the main argument for fast handler execution
 1306  * when ih_argument == NULL.
 1307  *
 1308  * Return value:
 1309  * o FILTER_STRAY:              No filter recognized the event, and no
 1310  *                              filter-less handler is registered on this 
 1311  *                              line.
 1312  * o FILTER_HANDLED:            A filter claimed the event and served it.
 1313  * o FILTER_SCHEDULE_THREAD:    No filter claimed the event, but there's at
 1314  *                              least one filter-less handler on this line.
 1315  * o FILTER_HANDLED | 
 1316  *   FILTER_SCHEDULE_THREAD:    A filter claimed the event, and asked for
 1317  *                              scheduling the per-handler ithread.
 1318  *
 1319  * In case an ithread has to be scheduled, in *ithd there will be a 
 1320  * pointer to a struct intr_thread containing the thread to be
 1321  * scheduled.
 1322  */
 1323 
 1324 int
 1325 intr_filter_loop(struct intr_event *ie, struct trapframe *frame, 
 1326                  struct intr_thread **ithd) 
 1327 {
 1328         struct intr_handler *ih;
 1329         void *arg;
 1330         int ret, thread_only;
 1331 
 1332         ret = 0;
 1333         thread_only = 0;
 1334         TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
 1335                 /*
 1336                  * Execute fast interrupt handlers directly.
 1337                  * To support clock handlers, if a handler registers
 1338                  * with a NULL argument, then we pass it a pointer to
 1339                  * a trapframe as its argument.
 1340                  */
 1341                 arg = ((ih->ih_argument == NULL) ? frame : ih->ih_argument);
 1342                 
 1343                 CTR5(KTR_INTR, "%s: exec %p/%p(%p) for %s", __func__,
 1344                      ih->ih_filter, ih->ih_handler, arg, ih->ih_name);
 1345 
 1346                 if (ih->ih_filter != NULL)
 1347                         ret = ih->ih_filter(arg);
 1348                 else {
 1349                         thread_only = 1;
 1350                         continue;
 1351                 }
 1352 
 1353                 if (ret & FILTER_STRAY)
 1354                         continue;
 1355                 else { 
 1356                         *ithd = ih->ih_thread;
 1357                         return (ret);
 1358                 }
 1359         }
 1360 
 1361         /*
 1362          * No filters handled the interrupt and we have at least
 1363          * one handler without a filter.  In this case, we schedule
 1364          * all of the filter-less handlers to run in the ithread.
 1365          */     
 1366         if (thread_only) {
 1367                 *ithd = ie->ie_thread;
 1368                 return (FILTER_SCHEDULE_THREAD);
 1369         }
 1370         return (FILTER_STRAY);
 1371 }
 1372 
 1373 /*
 1374  * Main interrupt handling body.
 1375  *
 1376  * Input:
 1377  * o ie:                        the event connected to this interrupt.
 1378  * o frame:                     some archs (i.e. i386) pass a frame to some.
 1379  *                              handlers as their main argument.
 1380  * Return value:
 1381  * o 0:                         everything ok.
 1382  * o EINVAL:                    stray interrupt.
 1383  */
 1384 int
 1385 intr_event_handle(struct intr_event *ie, struct trapframe *frame)
 1386 {
 1387         struct intr_thread *ithd;
 1388         struct thread *td;
 1389         int thread;
 1390 
 1391         ithd = NULL;
 1392         td = curthread;
 1393 
 1394         if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers))
 1395                 return (EINVAL);
 1396 
 1397         td->td_intr_nesting_level++;
 1398         thread = 0;
 1399         critical_enter();
 1400         thread = intr_filter_loop(ie, frame, &ithd);
 1401         
 1402         /*
 1403          * If the interrupt was fully served, send it an EOI but leave
 1404          * it unmasked. Otherwise, mask the source as well as sending
 1405          * it an EOI.
 1406          */
 1407         if (thread & FILTER_HANDLED) {
 1408                 if (ie->ie_eoi != NULL)
 1409                         ie->ie_eoi(ie->ie_source);
 1410         } else {
 1411                 if (ie->ie_disab != NULL)
 1412                         ie->ie_disab(ie->ie_source);
 1413         }
 1414         critical_exit();
 1415         
 1416         /* Interrupt storm logic */
 1417         if (thread & FILTER_STRAY) {
 1418                 ie->ie_count++;
 1419                 if (ie->ie_count < intr_storm_threshold)
 1420                         printf("Interrupt stray detection not present\n");
 1421         }
 1422 
 1423         /* Schedule an ithread if needed. */
 1424         if (thread & FILTER_SCHEDULE_THREAD) {
 1425                 if (intr_event_schedule_thread(ie, ithd) != 0)
 1426                         panic("%s: impossible stray interrupt", __func__);
 1427         }
 1428         td->td_intr_nesting_level--;
 1429         return (0);
 1430 }
 1431 #endif
 1432 
 1433 #ifdef DDB
 1434 /*
 1435  * Dump details about an interrupt handler
 1436  */
 1437 static void
 1438 db_dump_intrhand(struct intr_handler *ih)
 1439 {
 1440         int comma;
 1441 
 1442         db_printf("\t%-10s ", ih->ih_name);
 1443         switch (ih->ih_pri) {
 1444         case PI_REALTIME:
 1445                 db_printf("CLK ");
 1446                 break;
 1447         case PI_AV:
 1448                 db_printf("AV  ");
 1449                 break;
 1450         case PI_TTYHIGH:
 1451         case PI_TTYLOW:
 1452                 db_printf("TTY ");
 1453                 break;
 1454         case PI_TAPE:
 1455                 db_printf("TAPE");
 1456                 break;
 1457         case PI_NET:
 1458                 db_printf("NET ");
 1459                 break;
 1460         case PI_DISK:
 1461         case PI_DISKLOW:
 1462                 db_printf("DISK");
 1463                 break;
 1464         case PI_DULL:
 1465                 db_printf("DULL");
 1466                 break;
 1467         default:
 1468                 if (ih->ih_pri >= PI_SOFT)
 1469                         db_printf("SWI ");
 1470                 else
 1471                         db_printf("%4u", ih->ih_pri);
 1472                 break;
 1473         }
 1474         db_printf(" ");
 1475         db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC);
 1476         db_printf("(%p)", ih->ih_argument);
 1477         if (ih->ih_need ||
 1478             (ih->ih_flags & (IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD |
 1479             IH_MPSAFE)) != 0) {
 1480                 db_printf(" {");
 1481                 comma = 0;
 1482                 if (ih->ih_flags & IH_EXCLUSIVE) {
 1483                         if (comma)
 1484                                 db_printf(", ");
 1485                         db_printf("EXCL");
 1486                         comma = 1;
 1487                 }
 1488                 if (ih->ih_flags & IH_ENTROPY) {
 1489                         if (comma)
 1490                                 db_printf(", ");
 1491                         db_printf("ENTROPY");
 1492                         comma = 1;
 1493                 }
 1494                 if (ih->ih_flags & IH_DEAD) {
 1495                         if (comma)
 1496                                 db_printf(", ");
 1497                         db_printf("DEAD");
 1498                         comma = 1;
 1499                 }
 1500                 if (ih->ih_flags & IH_MPSAFE) {
 1501                         if (comma)
 1502                                 db_printf(", ");
 1503                         db_printf("MPSAFE");
 1504                         comma = 1;
 1505                 }
 1506                 if (ih->ih_need) {
 1507                         if (comma)
 1508                                 db_printf(", ");
 1509                         db_printf("NEED");
 1510                 }
 1511                 db_printf("}");
 1512         }
 1513         db_printf("\n");
 1514 }
 1515 
 1516 /*
 1517  * Dump details about a event.
 1518  */
 1519 void
 1520 db_dump_intr_event(struct intr_event *ie, int handlers)
 1521 {
 1522         struct intr_handler *ih;
 1523         struct intr_thread *it;
 1524         int comma;
 1525 
 1526         db_printf("%s ", ie->ie_fullname);
 1527         it = ie->ie_thread;
 1528         if (it != NULL)
 1529                 db_printf("(pid %d)", it->it_thread->td_proc->p_pid);
 1530         else
 1531                 db_printf("(no thread)");
 1532         if (ie->ie_cpu != NOCPU)
 1533                 db_printf(" (CPU %d)", ie->ie_cpu);
 1534         if ((ie->ie_flags & (IE_SOFT | IE_ENTROPY | IE_ADDING_THREAD)) != 0 ||
 1535             (it != NULL && it->it_need)) {
 1536                 db_printf(" {");
 1537                 comma = 0;
 1538                 if (ie->ie_flags & IE_SOFT) {
 1539                         db_printf("SOFT");
 1540                         comma = 1;
 1541                 }
 1542                 if (ie->ie_flags & IE_ENTROPY) {
 1543                         if (comma)
 1544                                 db_printf(", ");
 1545                         db_printf("ENTROPY");
 1546                         comma = 1;
 1547                 }
 1548                 if (ie->ie_flags & IE_ADDING_THREAD) {
 1549                         if (comma)
 1550                                 db_printf(", ");
 1551                         db_printf("ADDING_THREAD");
 1552                         comma = 1;
 1553                 }
 1554                 if (it != NULL && it->it_need) {
 1555                         if (comma)
 1556                                 db_printf(", ");
 1557                         db_printf("NEED");
 1558                 }
 1559                 db_printf("}");
 1560         }
 1561         db_printf("\n");
 1562 
 1563         if (handlers)
 1564                 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next)
 1565                     db_dump_intrhand(ih);
 1566 }
 1567 
 1568 /*
 1569  * Dump data about interrupt handlers
 1570  */
 1571 DB_SHOW_COMMAND(intr, db_show_intr)
 1572 {
 1573         struct intr_event *ie;
 1574         int all, verbose;
 1575 
 1576         verbose = index(modif, 'v') != NULL;
 1577         all = index(modif, 'a') != NULL;
 1578         TAILQ_FOREACH(ie, &event_list, ie_list) {
 1579                 if (!all && TAILQ_EMPTY(&ie->ie_handlers))
 1580                         continue;
 1581                 db_dump_intr_event(ie, verbose);
 1582                 if (db_pager_quit)
 1583                         break;
 1584         }
 1585 }
 1586 #endif /* DDB */
 1587 
 1588 /*
 1589  * Start standard software interrupt threads
 1590  */
 1591 static void
 1592 start_softintr(void *dummy)
 1593 {
 1594         struct proc *p;
 1595 
 1596         if (swi_add(&clk_intr_event, "clock", softclock, NULL, SWI_CLOCK,
 1597                 INTR_MPSAFE, &softclock_ih) ||
 1598             swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, INTR_MPSAFE, &vm_ih))
 1599                 panic("died while creating standard software ithreads");
 1600 
 1601         p = clk_intr_event->ie_thread->it_thread->td_proc;
 1602         PROC_LOCK(p);
 1603         p->p_flag |= P_NOLOAD;
 1604         PROC_UNLOCK(p);
 1605 }
 1606 SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr,
 1607     NULL);
 1608 
 1609 /*
 1610  * Sysctls used by systat and others: hw.intrnames and hw.intrcnt.
 1611  * The data for this machine dependent, and the declarations are in machine
 1612  * dependent code.  The layout of intrnames and intrcnt however is machine
 1613  * independent.
 1614  *
 1615  * We do not know the length of intrcnt and intrnames at compile time, so
 1616  * calculate things at run time.
 1617  */
 1618 static int
 1619 sysctl_intrnames(SYSCTL_HANDLER_ARGS)
 1620 {
 1621         return (sysctl_handle_opaque(oidp, intrnames, eintrnames - intrnames,
 1622            req));
 1623 }
 1624 
 1625 SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD,
 1626     NULL, 0, sysctl_intrnames, "", "Interrupt Names");
 1627 
 1628 static int
 1629 sysctl_intrcnt(SYSCTL_HANDLER_ARGS)
 1630 {
 1631         return (sysctl_handle_opaque(oidp, intrcnt,
 1632             (char *)eintrcnt - (char *)intrcnt, req));
 1633 }
 1634 
 1635 SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD,
 1636     NULL, 0, sysctl_intrcnt, "", "Interrupt Counts");
 1637 
 1638 #ifdef DDB
 1639 /*
 1640  * DDB command to dump the interrupt statistics.
 1641  */
 1642 DB_SHOW_COMMAND(intrcnt, db_show_intrcnt)
 1643 {
 1644         u_long *i;
 1645         char *cp;
 1646 
 1647         cp = intrnames;
 1648         for (i = intrcnt; i != eintrcnt && !db_pager_quit; i++) {
 1649                 if (*cp == '\0')
 1650                         break;
 1651                 if (*i != 0)
 1652                         db_printf("%s\t%lu\n", cp, *i);
 1653                 cp += strlen(cp) + 1;
 1654         }
 1655 }
 1656 #endif

Cache object: 8419d5f462255078cb78f818380887bd


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.