The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_intr.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice unmodified, this list of conditions, and the following
   10  *    disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  *
   15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   25  */
   26 
   27 #include <sys/cdefs.h>
   28 __FBSDID("$FreeBSD: releng/6.0/sys/kern/kern_intr.c 150591 2005-09-26 19:49:12Z jhb $");
   29 
   30 #include "opt_ddb.h"
   31 
   32 #include <sys/param.h>
   33 #include <sys/bus.h>
   34 #include <sys/conf.h>
   35 #include <sys/rtprio.h>
   36 #include <sys/systm.h>
   37 #include <sys/interrupt.h>
   38 #include <sys/kernel.h>
   39 #include <sys/kthread.h>
   40 #include <sys/ktr.h>
   41 #include <sys/limits.h>
   42 #include <sys/lock.h>
   43 #include <sys/malloc.h>
   44 #include <sys/mutex.h>
   45 #include <sys/proc.h>
   46 #include <sys/random.h>
   47 #include <sys/resourcevar.h>
   48 #include <sys/sched.h>
   49 #include <sys/sysctl.h>
   50 #include <sys/unistd.h>
   51 #include <sys/vmmeter.h>
   52 #include <machine/atomic.h>
   53 #include <machine/cpu.h>
   54 #include <machine/md_var.h>
   55 #include <machine/stdarg.h>
   56 #ifdef DDB
   57 #include <ddb/ddb.h>
   58 #include <ddb/db_sym.h>
   59 #endif
   60 
   61 struct  int_entropy {
   62         struct  proc *proc;
   63         uintptr_t vector;
   64 };
   65 
   66 struct  ithd *clk_ithd;
   67 struct  ithd *tty_ithd;
   68 void    *softclock_ih;
   69 void    *vm_ih;
   70 
   71 static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads");
   72 
   73 static int intr_storm_threshold = 500;
   74 TUNABLE_INT("hw.intr_storm_threshold", &intr_storm_threshold);
   75 SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RW,
   76     &intr_storm_threshold, 0,
   77     "Number of consecutive interrupts before storm protection is enabled");
   78 
   79 static void     ithread_loop(void *);
   80 static void     ithread_update(struct ithd *);
   81 static void     start_softintr(void *);
   82 
   83 u_char
   84 ithread_priority(enum intr_type flags)
   85 {
   86         u_char pri;
   87 
   88         flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET |
   89             INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV);
   90         switch (flags) {
   91         case INTR_TYPE_TTY:
   92                 pri = PI_TTYLOW;
   93                 break;
   94         case INTR_TYPE_BIO:
   95                 /*
   96                  * XXX We need to refine this.  BSD/OS distinguishes
   97                  * between tape and disk priorities.
   98                  */
   99                 pri = PI_DISK;
  100                 break;
  101         case INTR_TYPE_NET:
  102                 pri = PI_NET;
  103                 break;
  104         case INTR_TYPE_CAM:
  105                 pri = PI_DISK;          /* XXX or PI_CAM? */
  106                 break;
  107         case INTR_TYPE_AV:              /* Audio/video */
  108                 pri = PI_AV;
  109                 break;
  110         case INTR_TYPE_CLK:
  111                 pri = PI_REALTIME;
  112                 break;
  113         case INTR_TYPE_MISC:
  114                 pri = PI_DULL;          /* don't care */
  115                 break;
  116         default:
  117                 /* We didn't specify an interrupt level. */
  118                 panic("ithread_priority: no interrupt type in flags");
  119         }
  120 
  121         return pri;
  122 }
  123 
  124 /*
  125  * Regenerate the name (p_comm) and priority for a threaded interrupt thread.
  126  */
  127 static void
  128 ithread_update(struct ithd *ithd)
  129 {
  130         struct intrhand *ih;
  131         struct thread *td;
  132         struct proc *p;
  133         int missed;
  134 
  135         mtx_assert(&ithd->it_lock, MA_OWNED);
  136         td = ithd->it_td;
  137         if (td == NULL)
  138                 return;
  139         p = td->td_proc;
  140 
  141         strlcpy(p->p_comm, ithd->it_name, sizeof(p->p_comm));
  142         ithd->it_flags &= ~IT_ENTROPY;
  143 
  144         ih = TAILQ_FIRST(&ithd->it_handlers);
  145         if (ih == NULL) {
  146                 mtx_lock_spin(&sched_lock);
  147                 sched_prio(td, PRI_MAX_ITHD);
  148                 mtx_unlock_spin(&sched_lock);
  149                 return;
  150         }
  151         mtx_lock_spin(&sched_lock);
  152         sched_prio(td, ih->ih_pri);
  153         mtx_unlock_spin(&sched_lock);
  154         missed = 0;
  155         TAILQ_FOREACH(ih, &ithd->it_handlers, ih_next) {
  156                 if (strlen(p->p_comm) + strlen(ih->ih_name) + 1 <
  157                     sizeof(p->p_comm)) {
  158                         strcat(p->p_comm, " ");
  159                         strcat(p->p_comm, ih->ih_name);
  160                 } else
  161                         missed++;
  162                 if (ih->ih_flags & IH_ENTROPY)
  163                         ithd->it_flags |= IT_ENTROPY;
  164         }
  165         while (missed-- > 0) {
  166                 if (strlen(p->p_comm) + 1 == sizeof(p->p_comm)) {
  167                         if (p->p_comm[sizeof(p->p_comm) - 2] == '+')
  168                                 p->p_comm[sizeof(p->p_comm) - 2] = '*';
  169                         else
  170                                 p->p_comm[sizeof(p->p_comm) - 2] = '+';
  171                 } else
  172                         strcat(p->p_comm, "+");
  173         }
  174         CTR2(KTR_INTR, "%s: updated %s", __func__, p->p_comm);
  175 }
  176 
  177 int
  178 ithread_create(struct ithd **ithread, uintptr_t vector, int flags,
  179     void (*disable)(uintptr_t), void (*enable)(uintptr_t), const char *fmt, ...)
  180 {
  181         struct ithd *ithd;
  182         struct thread *td;
  183         struct proc *p;
  184         int error;
  185         va_list ap;
  186 
  187         /* The only valid flag during creation is IT_SOFT. */
  188         if ((flags & ~IT_SOFT) != 0)
  189                 return (EINVAL);
  190 
  191         ithd = malloc(sizeof(struct ithd), M_ITHREAD, M_WAITOK | M_ZERO);
  192         ithd->it_vector = vector;
  193         ithd->it_disable = disable;
  194         ithd->it_enable = enable;
  195         ithd->it_flags = flags;
  196         TAILQ_INIT(&ithd->it_handlers);
  197         mtx_init(&ithd->it_lock, "ithread", NULL, MTX_DEF);
  198 
  199         va_start(ap, fmt);
  200         vsnprintf(ithd->it_name, sizeof(ithd->it_name), fmt, ap);
  201         va_end(ap);
  202 
  203         error = kthread_create(ithread_loop, ithd, &p, RFSTOPPED | RFHIGHPID,
  204             0, "%s", ithd->it_name);
  205         if (error) {
  206                 mtx_destroy(&ithd->it_lock);
  207                 free(ithd, M_ITHREAD);
  208                 return (error);
  209         }
  210         td = FIRST_THREAD_IN_PROC(p);   /* XXXKSE */
  211         mtx_lock_spin(&sched_lock);
  212         td->td_ksegrp->kg_pri_class = PRI_ITHD;
  213         td->td_priority = PRI_MAX_ITHD;
  214         TD_SET_IWAIT(td);
  215         mtx_unlock_spin(&sched_lock);
  216         ithd->it_td = td;
  217         td->td_ithd = ithd;
  218         if (ithread != NULL)
  219                 *ithread = ithd;
  220         CTR2(KTR_INTR, "%s: created %s", __func__, ithd->it_name);
  221         return (0);
  222 }
  223 
  224 int
  225 ithread_destroy(struct ithd *ithread)
  226 {
  227 
  228         struct thread *td;
  229         if (ithread == NULL)
  230                 return (EINVAL);
  231 
  232         td = ithread->it_td;
  233         mtx_lock(&ithread->it_lock);
  234         if (!TAILQ_EMPTY(&ithread->it_handlers)) {
  235                 mtx_unlock(&ithread->it_lock);
  236                 return (EINVAL);
  237         }
  238         ithread->it_flags |= IT_DEAD;
  239         mtx_lock_spin(&sched_lock);
  240         if (TD_AWAITING_INTR(td)) {
  241                 TD_CLR_IWAIT(td);
  242                 setrunqueue(td, SRQ_INTR);
  243         }
  244         mtx_unlock_spin(&sched_lock);
  245         mtx_unlock(&ithread->it_lock);
  246         CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_name);
  247         return (0);
  248 }
  249 
  250 int
  251 ithread_add_handler(struct ithd* ithread, const char *name,
  252     driver_intr_t handler, void *arg, u_char pri, enum intr_type flags,
  253     void **cookiep)
  254 {
  255         struct intrhand *ih, *temp_ih;
  256 
  257         if (ithread == NULL || name == NULL || handler == NULL)
  258                 return (EINVAL);
  259 
  260         ih = malloc(sizeof(struct intrhand), M_ITHREAD, M_WAITOK | M_ZERO);
  261         ih->ih_handler = handler;
  262         ih->ih_argument = arg;
  263         ih->ih_name = name;
  264         ih->ih_ithread = ithread;
  265         ih->ih_pri = pri;
  266         if (flags & INTR_FAST)
  267                 ih->ih_flags = IH_FAST;
  268         else if (flags & INTR_EXCL)
  269                 ih->ih_flags = IH_EXCLUSIVE;
  270         if (flags & INTR_MPSAFE)
  271                 ih->ih_flags |= IH_MPSAFE;
  272         if (flags & INTR_ENTROPY)
  273                 ih->ih_flags |= IH_ENTROPY;
  274 
  275         mtx_lock(&ithread->it_lock);
  276         if ((flags & INTR_EXCL) != 0 && !TAILQ_EMPTY(&ithread->it_handlers))
  277                 goto fail;
  278         if (!TAILQ_EMPTY(&ithread->it_handlers)) {
  279                 temp_ih = TAILQ_FIRST(&ithread->it_handlers);
  280                 if (temp_ih->ih_flags & IH_EXCLUSIVE)
  281                         goto fail;
  282                 if ((ih->ih_flags & IH_FAST) && !(temp_ih->ih_flags & IH_FAST))
  283                         goto fail;
  284                 if (!(ih->ih_flags & IH_FAST) && (temp_ih->ih_flags & IH_FAST))
  285                         goto fail;
  286         }
  287 
  288         TAILQ_FOREACH(temp_ih, &ithread->it_handlers, ih_next)
  289             if (temp_ih->ih_pri > ih->ih_pri)
  290                     break;
  291         if (temp_ih == NULL)
  292                 TAILQ_INSERT_TAIL(&ithread->it_handlers, ih, ih_next);
  293         else
  294                 TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next);
  295         ithread_update(ithread);
  296         mtx_unlock(&ithread->it_lock);
  297 
  298         if (cookiep != NULL)
  299                 *cookiep = ih;
  300         CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name,
  301             ithread->it_name);
  302         return (0);
  303 
  304 fail:
  305         mtx_unlock(&ithread->it_lock);
  306         free(ih, M_ITHREAD);
  307         return (EINVAL);
  308 }
  309 
  310 int
  311 ithread_remove_handler(void *cookie)
  312 {
  313         struct intrhand *handler = (struct intrhand *)cookie;
  314         struct ithd *ithread;
  315 #ifdef INVARIANTS
  316         struct intrhand *ih;
  317 #endif
  318 
  319         if (handler == NULL)
  320                 return (EINVAL);
  321         ithread = handler->ih_ithread;
  322         KASSERT(ithread != NULL,
  323             ("interrupt handler \"%s\" has a NULL interrupt thread",
  324                 handler->ih_name));
  325         CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name,
  326             ithread->it_name);
  327         mtx_lock(&ithread->it_lock);
  328 #ifdef INVARIANTS
  329         TAILQ_FOREACH(ih, &ithread->it_handlers, ih_next)
  330                 if (ih == handler)
  331                         goto ok;
  332         mtx_unlock(&ithread->it_lock);
  333         panic("interrupt handler \"%s\" not found in interrupt thread \"%s\"",
  334             ih->ih_name, ithread->it_name);
  335 ok:
  336 #endif
  337         /*
  338          * If the interrupt thread is already running, then just mark this
  339          * handler as being dead and let the ithread do the actual removal.
  340          *
  341          * During a cold boot while cold is set, msleep() does not sleep,
  342          * so we have to remove the handler here rather than letting the
  343          * thread do it.
  344          */
  345         mtx_lock_spin(&sched_lock);
  346         if (!TD_AWAITING_INTR(ithread->it_td) && !cold) {
  347                 handler->ih_flags |= IH_DEAD;
  348 
  349                 /*
  350                  * Ensure that the thread will process the handler list
  351                  * again and remove this handler if it has already passed
  352                  * it on the list.
  353                  */
  354                 ithread->it_need = 1;
  355         } else 
  356                 TAILQ_REMOVE(&ithread->it_handlers, handler, ih_next);
  357         mtx_unlock_spin(&sched_lock);
  358         if ((handler->ih_flags & IH_DEAD) != 0)
  359                 msleep(handler, &ithread->it_lock, PUSER, "itrmh", 0);
  360         ithread_update(ithread);
  361         mtx_unlock(&ithread->it_lock);
  362         free(handler, M_ITHREAD);
  363         return (0);
  364 }
  365 
  366 int
  367 ithread_schedule(struct ithd *ithread)
  368 {
  369         struct int_entropy entropy;
  370         struct thread *td;
  371         struct thread *ctd;
  372         struct proc *p;
  373 
  374         /*
  375          * If no ithread or no handlers, then we have a stray interrupt.
  376          */
  377         if ((ithread == NULL) || TAILQ_EMPTY(&ithread->it_handlers))
  378                 return (EINVAL);
  379 
  380         ctd = curthread;
  381         td = ithread->it_td;
  382         p = td->td_proc;
  383         /*
  384          * If any of the handlers for this ithread claim to be good
  385          * sources of entropy, then gather some.
  386          */
  387         if (harvest.interrupt && ithread->it_flags & IT_ENTROPY) {
  388                 CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__,
  389                     p->p_pid, p->p_comm);
  390                 entropy.vector = ithread->it_vector;
  391                 entropy.proc = ctd->td_proc;
  392                 random_harvest(&entropy, sizeof(entropy), 2, 0,
  393                     RANDOM_INTERRUPT);
  394         }
  395 
  396         KASSERT(p != NULL, ("ithread %s has no process", ithread->it_name));
  397         CTR4(KTR_INTR, "%s: pid %d: (%s) need = %d",
  398             __func__, p->p_pid, p->p_comm, ithread->it_need);
  399 
  400         /*
  401          * Set it_need to tell the thread to keep running if it is already
  402          * running.  Then, grab sched_lock and see if we actually need to
  403          * put this thread on the runqueue.
  404          */
  405         ithread->it_need = 1;
  406         mtx_lock_spin(&sched_lock);
  407         if (TD_AWAITING_INTR(td)) {
  408                 CTR2(KTR_INTR, "%s: setrunqueue %d", __func__, p->p_pid);
  409                 TD_CLR_IWAIT(td);
  410                 setrunqueue(td, SRQ_INTR);
  411         } else {
  412                 CTR4(KTR_INTR, "%s: pid %d: it_need %d, state %d",
  413                     __func__, p->p_pid, ithread->it_need, td->td_state);
  414         }
  415         mtx_unlock_spin(&sched_lock);
  416 
  417         return (0);
  418 }
  419 
  420 int
  421 swi_add(struct ithd **ithdp, const char *name, driver_intr_t handler, 
  422             void *arg, int pri, enum intr_type flags, void **cookiep)
  423 {
  424         struct ithd *ithd;
  425         int error;
  426 
  427         if (flags & (INTR_FAST | INTR_ENTROPY))
  428                 return (EINVAL);
  429 
  430         ithd = (ithdp != NULL) ? *ithdp : NULL;
  431 
  432         if (ithd != NULL) {
  433                 if ((ithd->it_flags & IT_SOFT) == 0)
  434                         return(EINVAL);
  435         } else {
  436                 error = ithread_create(&ithd, pri, IT_SOFT, NULL, NULL,
  437                     "swi%d:", pri);
  438                 if (error)
  439                         return (error);
  440 
  441                 if (ithdp != NULL)
  442                         *ithdp = ithd;
  443         }
  444         return (ithread_add_handler(ithd, name, handler, arg,
  445                     (pri * RQ_PPQ) + PI_SOFT, flags, cookiep));
  446                     /* XXKSE.. think of a better way to get separate queues */
  447 }
  448 
  449 
  450 /*
  451  * Schedule a heavyweight software interrupt process. 
  452  */
  453 void
  454 swi_sched(void *cookie, int flags)
  455 {
  456         struct intrhand *ih = (struct intrhand *)cookie;
  457         struct ithd *it = ih->ih_ithread;
  458         int error;
  459 
  460         PCPU_LAZY_INC(cnt.v_intr);
  461                 
  462         CTR3(KTR_INTR, "swi_sched pid %d(%s) need=%d",
  463                 it->it_td->td_proc->p_pid, it->it_td->td_proc->p_comm, it->it_need);
  464 
  465         /*
  466          * Set ih_need for this handler so that if the ithread is already
  467          * running it will execute this handler on the next pass.  Otherwise,
  468          * it will execute it the next time it runs.
  469          */
  470         atomic_store_rel_int(&ih->ih_need, 1);
  471         if (!(flags & SWI_DELAY)) {
  472                 error = ithread_schedule(it);
  473                 KASSERT(error == 0, ("stray software interrupt"));
  474         }
  475 }
  476 
  477 /*
  478  * This is the main code for interrupt threads.
  479  */
  480 static void
  481 ithread_loop(void *arg)
  482 {
  483         struct ithd *ithd;              /* our thread context */
  484         struct intrhand *ih;            /* and our interrupt handler chain */
  485         struct thread *td;
  486         struct proc *p;
  487         int count, warned;
  488         
  489         td = curthread;
  490         p = td->td_proc;
  491         ithd = (struct ithd *)arg;      /* point to myself */
  492         KASSERT(ithd->it_td == td && td->td_ithd == ithd,
  493             ("%s: ithread and proc linkage out of sync", __func__));
  494         count = 0;
  495         warned = 0;
  496 
  497         /*
  498          * As long as we have interrupts outstanding, go through the
  499          * list of handlers, giving each one a go at it.
  500          */
  501         for (;;) {
  502                 /*
  503                  * If we are an orphaned thread, then just die.
  504                  */
  505                 if (ithd->it_flags & IT_DEAD) {
  506                         CTR3(KTR_INTR, "%s: pid %d: (%s) exiting", __func__,
  507                             p->p_pid, p->p_comm);
  508                         td->td_ithd = NULL;
  509                         mtx_destroy(&ithd->it_lock);
  510                         free(ithd, M_ITHREAD);
  511                         kthread_exit(0);
  512                 }
  513 
  514                 CTR4(KTR_INTR, "%s: pid %d: (%s) need=%d", __func__,
  515                      p->p_pid, p->p_comm, ithd->it_need);
  516                 while (ithd->it_need) {
  517                         /*
  518                          * Service interrupts.  If another interrupt
  519                          * arrives while we are running, they will set
  520                          * it_need to denote that we should make
  521                          * another pass.
  522                          */
  523                         atomic_store_rel_int(&ithd->it_need, 0);
  524                         if (!(ithd->it_flags & IT_SOFT))
  525                                 THREAD_NO_SLEEPING();
  526 restart:
  527                         TAILQ_FOREACH(ih, &ithd->it_handlers, ih_next) {
  528                                 if (ithd->it_flags & IT_SOFT && !ih->ih_need)
  529                                         continue;
  530                                 atomic_store_rel_int(&ih->ih_need, 0);
  531                                 CTR6(KTR_INTR,
  532                                     "%s: pid %d ih=%p: %p(%p) flg=%x", __func__,
  533                                     p->p_pid, (void *)ih,
  534                                     (void *)ih->ih_handler, ih->ih_argument,
  535                                     ih->ih_flags);
  536 
  537                                 if ((ih->ih_flags & IH_DEAD) != 0) {
  538                                         mtx_lock(&ithd->it_lock);
  539                                         TAILQ_REMOVE(&ithd->it_handlers, ih,
  540                                             ih_next);
  541                                         wakeup(ih);
  542                                         mtx_unlock(&ithd->it_lock);
  543                                         goto restart;
  544                                 }
  545                                 if ((ih->ih_flags & IH_MPSAFE) == 0)
  546                                         mtx_lock(&Giant);
  547                                 ih->ih_handler(ih->ih_argument);
  548                                 if ((ih->ih_flags & IH_MPSAFE) == 0)
  549                                         mtx_unlock(&Giant);
  550                         }
  551                         if (!(ithd->it_flags & IT_SOFT))
  552                                 THREAD_SLEEPING_OK();
  553 
  554                         /*
  555                          * Interrupt storm handling:
  556                          *
  557                          * If this interrupt source is currently storming,
  558                          * then throttle it to only fire the handler once
  559                          * per clock tick.
  560                          *
  561                          * If this interrupt source is not currently
  562                          * storming, but the number of back to back
  563                          * interrupts exceeds the storm threshold, then
  564                          * enter storming mode.
  565                          */
  566                         if (intr_storm_threshold != 0 &&
  567                             count >= intr_storm_threshold) {
  568                                 if (!warned) {
  569                                         printf(
  570         "Interrupt storm detected on \"%s\"; throttling interrupt source\n",
  571                                             p->p_comm);
  572                                         warned = 1;
  573                                 }
  574                                 tsleep(&count, td->td_priority, "istorm", 1);
  575                         } else
  576                                 count++;
  577 
  578                         if (ithd->it_enable != NULL)
  579                                 ithd->it_enable(ithd->it_vector);
  580                 }
  581                 WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread");
  582                 mtx_assert(&Giant, MA_NOTOWNED);
  583 
  584                 /*
  585                  * Processed all our interrupts.  Now get the sched
  586                  * lock.  This may take a while and it_need may get
  587                  * set again, so we have to check it again.
  588                  */
  589                 mtx_lock_spin(&sched_lock);
  590                 if (!ithd->it_need) {
  591                         TD_SET_IWAIT(td);
  592                         count = 0;
  593                         CTR2(KTR_INTR, "%s: pid %d: done", __func__, p->p_pid);
  594                         mi_switch(SW_VOL, NULL);
  595                         CTR2(KTR_INTR, "%s: pid %d: resumed", __func__, p->p_pid);
  596                 }
  597                 mtx_unlock_spin(&sched_lock);
  598         }
  599 }
  600 
  601 #ifdef DDB
  602 /*
  603  * Dump details about an interrupt handler
  604  */
  605 static void
  606 db_dump_intrhand(struct intrhand *ih)
  607 {
  608         int comma;
  609 
  610         db_printf("\t%-10s ", ih->ih_name);
  611         switch (ih->ih_pri) {
  612         case PI_REALTIME:
  613                 db_printf("CLK ");
  614                 break;
  615         case PI_AV:
  616                 db_printf("AV  ");
  617                 break;
  618         case PI_TTYHIGH:
  619         case PI_TTYLOW:
  620                 db_printf("TTY ");
  621                 break;
  622         case PI_TAPE:
  623                 db_printf("TAPE");
  624                 break;
  625         case PI_NET:
  626                 db_printf("NET ");
  627                 break;
  628         case PI_DISK:
  629         case PI_DISKLOW:
  630                 db_printf("DISK");
  631                 break;
  632         case PI_DULL:
  633                 db_printf("DULL");
  634                 break;
  635         default:
  636                 if (ih->ih_pri >= PI_SOFT)
  637                         db_printf("SWI ");
  638                 else
  639                         db_printf("%4u", ih->ih_pri);
  640                 break;
  641         }
  642         db_printf(" ");
  643         db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC);
  644         db_printf("(%p)", ih->ih_argument);
  645         if (ih->ih_need ||
  646             (ih->ih_flags & (IH_FAST | IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD |
  647             IH_MPSAFE)) != 0) {
  648                 db_printf(" {");
  649                 comma = 0;
  650                 if (ih->ih_flags & IH_FAST) {
  651                         db_printf("FAST");
  652                         comma = 1;
  653                 }
  654                 if (ih->ih_flags & IH_EXCLUSIVE) {
  655                         if (comma)
  656                                 db_printf(", ");
  657                         db_printf("EXCL");
  658                         comma = 1;
  659                 }
  660                 if (ih->ih_flags & IH_ENTROPY) {
  661                         if (comma)
  662                                 db_printf(", ");
  663                         db_printf("ENTROPY");
  664                         comma = 1;
  665                 }
  666                 if (ih->ih_flags & IH_DEAD) {
  667                         if (comma)
  668                                 db_printf(", ");
  669                         db_printf("DEAD");
  670                         comma = 1;
  671                 }
  672                 if (ih->ih_flags & IH_MPSAFE) {
  673                         if (comma)
  674                                 db_printf(", ");
  675                         db_printf("MPSAFE");
  676                         comma = 1;
  677                 }
  678                 if (ih->ih_need) {
  679                         if (comma)
  680                                 db_printf(", ");
  681                         db_printf("NEED");
  682                 }
  683                 db_printf("}");
  684         }
  685         db_printf("\n");
  686 }
  687 
  688 /*
  689  * Dump details about an ithread
  690  */
  691 void
  692 db_dump_ithread(struct ithd *ithd, int handlers)
  693 {
  694         struct proc *p;
  695         struct intrhand *ih;
  696         int comma;
  697 
  698         if (ithd->it_td != NULL) {
  699                 p = ithd->it_td->td_proc;
  700                 db_printf("%s (pid %d)", p->p_comm, p->p_pid);
  701         } else
  702                 db_printf("%s: (no thread)", ithd->it_name);
  703         if ((ithd->it_flags & (IT_SOFT | IT_ENTROPY | IT_DEAD)) != 0 ||
  704             ithd->it_need) {
  705                 db_printf(" {");
  706                 comma = 0;
  707                 if (ithd->it_flags & IT_SOFT) {
  708                         db_printf("SOFT");
  709                         comma = 1;
  710                 }
  711                 if (ithd->it_flags & IT_ENTROPY) {
  712                         if (comma)
  713                                 db_printf(", ");
  714                         db_printf("ENTROPY");
  715                         comma = 1;
  716                 }
  717                 if (ithd->it_flags & IT_DEAD) {
  718                         if (comma)
  719                                 db_printf(", ");
  720                         db_printf("DEAD");
  721                         comma = 1;
  722                 }
  723                 if (ithd->it_need) {
  724                         if (comma)
  725                                 db_printf(", ");
  726                         db_printf("NEED");
  727                 }
  728                 db_printf("}");
  729         }
  730         db_printf("\n");
  731 
  732         if (handlers)
  733                 TAILQ_FOREACH(ih, &ithd->it_handlers, ih_next)
  734                     db_dump_intrhand(ih);
  735 }
  736 #endif /* DDB */
  737 
  738 /*
  739  * Start standard software interrupt threads
  740  */
  741 static void
  742 start_softintr(void *dummy)
  743 {
  744         struct proc *p;
  745 
  746         if (swi_add(&clk_ithd, "clock", softclock, NULL, SWI_CLOCK,
  747                 INTR_MPSAFE, &softclock_ih) ||
  748             swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, INTR_MPSAFE, &vm_ih))
  749                 panic("died while creating standard software ithreads");
  750 
  751         p = clk_ithd->it_td->td_proc;
  752         PROC_LOCK(p);
  753         p->p_flag |= P_NOLOAD;
  754         PROC_UNLOCK(p);
  755 }
  756 SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr, NULL)
  757 
  758 /* 
  759  * Sysctls used by systat and others: hw.intrnames and hw.intrcnt.
  760  * The data for this machine dependent, and the declarations are in machine
  761  * dependent code.  The layout of intrnames and intrcnt however is machine
  762  * independent.
  763  *
  764  * We do not know the length of intrcnt and intrnames at compile time, so
  765  * calculate things at run time.
  766  */
  767 static int
  768 sysctl_intrnames(SYSCTL_HANDLER_ARGS)
  769 {
  770         return (sysctl_handle_opaque(oidp, intrnames, eintrnames - intrnames, 
  771            req));
  772 }
  773 
  774 SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD,
  775     NULL, 0, sysctl_intrnames, "", "Interrupt Names");
  776 
  777 static int
  778 sysctl_intrcnt(SYSCTL_HANDLER_ARGS)
  779 {
  780         return (sysctl_handle_opaque(oidp, intrcnt, 
  781             (char *)eintrcnt - (char *)intrcnt, req));
  782 }
  783 
  784 SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD,
  785     NULL, 0, sysctl_intrcnt, "", "Interrupt Counts");
  786 
  787 #ifdef DDB
  788 /*
  789  * DDB command to dump the interrupt statistics.
  790  */
  791 DB_SHOW_COMMAND(intrcnt, db_show_intrcnt)
  792 {
  793         u_long *i;
  794         char *cp;
  795         int quit;
  796 
  797         cp = intrnames;
  798         db_setup_paging(db_simple_pager, &quit, db_lines_per_page);
  799         for (i = intrcnt, quit = 0; i != eintrcnt && !quit; i++) {
  800                 if (*cp == '\0')
  801                         break;
  802                 if (*i != 0)
  803                         db_printf("%s\t%lu\n", cp, *i);
  804                 cp += strlen(cp) + 1;
  805         }
  806 }
  807 #endif

Cache object: 70fe45f09eea87dc5fb1638bbf348b39


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.