The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/i386/i386/intr_machdep.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2003 John Baldwin <jhb@FreeBSD.org>
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  * 3. Neither the name of the author nor the names of any co-contributors
   14  *    may be used to endorse or promote products derived from this software
   15  *    without specific prior written permission.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  *
   29  * $FreeBSD$
   30  */
   31 
   32 /*
   33  * Machine dependent interrupt code for i386.  For the i386, we have to
   34  * deal with different PICs.  Thus, we use the passed in vector to lookup
   35  * an interrupt source associated with that vector.  The interrupt source
   36  * describes which PIC the source belongs to and includes methods to handle
   37  * that source.
   38  */
   39 
   40 #include "opt_ddb.h"
   41 
   42 #include <sys/param.h>
   43 #include <sys/bus.h>
   44 #include <sys/interrupt.h>
   45 #include <sys/ktr.h>
   46 #include <sys/kernel.h>
   47 #include <sys/lock.h>
   48 #include <sys/mutex.h>
   49 #include <sys/proc.h>
   50 #include <sys/smp.h>
   51 #include <sys/syslog.h>
   52 #include <sys/systm.h>
   53 #include <sys/sx.h>
   54 #include <machine/clock.h>
   55 #include <machine/intr_machdep.h>
   56 #include <machine/smp.h>
   57 #ifdef DDB
   58 #include <ddb/ddb.h>
   59 #endif
   60 
   61 #define MAX_STRAY_LOG   5
   62 
   63 typedef void (*mask_fn)(void *);
   64 
   65 static int intrcnt_index;
   66 static struct intsrc *interrupt_sources[NUM_IO_INTS];
   67 static struct sx intr_table_lock;
   68 static struct mtx intrcnt_lock;
   69 static STAILQ_HEAD(, pic) pics;
   70 
   71 #ifdef INTR_FILTER
   72 static void intr_eoi_src(void *arg);
   73 static void intr_disab_eoi_src(void *arg);
   74 static void intr_event_stray(void *cookie);
   75 #endif
   76 
   77 #ifdef SMP
   78 static int assign_cpu;
   79 
   80 static void     intr_assign_next_cpu(struct intsrc *isrc);
   81 #endif
   82 
   83 static int      intr_assign_cpu(void *arg, u_char cpu);
   84 static void     intr_init(void *__dummy);
   85 static int      intr_pic_registered(struct pic *pic);
   86 static void     intrcnt_setname(const char *name, int index);
   87 static void     intrcnt_updatename(struct intsrc *is);
   88 static void     intrcnt_register(struct intsrc *is);
   89 
   90 static int
   91 intr_pic_registered(struct pic *pic)
   92 {
   93         struct pic *p;
   94 
   95         STAILQ_FOREACH(p, &pics, pics) {
   96                 if (p == pic)
   97                         return (1);
   98         }
   99         return (0);
  100 }
  101 
  102 /*
  103  * Register a new interrupt controller (PIC).  This is to support suspend
  104  * and resume where we suspend/resume controllers rather than individual
  105  * sources.  This also allows controllers with no active sources (such as
  106  * 8259As in a system using the APICs) to participate in suspend and resume.
  107  */
  108 int
  109 intr_register_pic(struct pic *pic)
  110 {
  111         int error;
  112 
  113         sx_xlock(&intr_table_lock);
  114         if (intr_pic_registered(pic))
  115                 error = EBUSY;
  116         else {
  117                 STAILQ_INSERT_TAIL(&pics, pic, pics);
  118                 error = 0;
  119         }
  120         sx_xunlock(&intr_table_lock);
  121         return (error);
  122 }
  123 
  124 /*
  125  * Register a new interrupt source with the global interrupt system.
  126  * The global interrupts need to be disabled when this function is
  127  * called.
  128  */
  129 int
  130 intr_register_source(struct intsrc *isrc)
  131 {
  132         int error, vector;
  133 
  134         KASSERT(intr_pic_registered(isrc->is_pic), ("unregistered PIC"));
  135         vector = isrc->is_pic->pic_vector(isrc);
  136         if (interrupt_sources[vector] != NULL)
  137                 return (EEXIST);
  138 #ifdef INTR_FILTER
  139         error = intr_event_create(&isrc->is_event, isrc, 0,
  140             (mask_fn)isrc->is_pic->pic_enable_source,
  141             intr_eoi_src, intr_disab_eoi_src, intr_assign_cpu, "irq%d:",
  142             vector);
  143 #else
  144         error = intr_event_create(&isrc->is_event, isrc, 0,
  145             (mask_fn)isrc->is_pic->pic_enable_source, intr_assign_cpu, "irq%d:",
  146             vector);
  147 #endif
  148         if (error)
  149                 return (error);
  150         sx_xlock(&intr_table_lock);
  151         if (interrupt_sources[vector] != NULL) {
  152                 sx_xunlock(&intr_table_lock);
  153                 intr_event_destroy(isrc->is_event);
  154                 return (EEXIST);
  155         }
  156         intrcnt_register(isrc);
  157         interrupt_sources[vector] = isrc;
  158         isrc->is_handlers = 0;
  159         sx_xunlock(&intr_table_lock);
  160         return (0);
  161 }
  162 
  163 struct intsrc *
  164 intr_lookup_source(int vector)
  165 {
  166 
  167         return (interrupt_sources[vector]);
  168 }
  169 
  170 int
  171 intr_add_handler(const char *name, int vector, driver_filter_t filter,
  172     driver_intr_t handler, void *arg, enum intr_type flags, void **cookiep)
  173 {
  174         struct intsrc *isrc;
  175         int error;
  176 
  177         isrc = intr_lookup_source(vector);
  178         if (isrc == NULL)
  179                 return (EINVAL);
  180         error = intr_event_add_handler(isrc->is_event, name, filter, handler,
  181             arg, intr_priority(flags), flags, cookiep);
  182         if (error == 0) {
  183                 sx_xlock(&intr_table_lock);
  184                 intrcnt_updatename(isrc);
  185                 isrc->is_handlers++;
  186                 if (isrc->is_handlers == 1) {
  187 #ifdef SMP
  188                         if (assign_cpu)
  189                                 intr_assign_next_cpu(isrc);
  190 #endif
  191                         isrc->is_pic->pic_enable_intr(isrc);
  192                         isrc->is_pic->pic_enable_source(isrc);
  193                 }
  194                 sx_xunlock(&intr_table_lock);
  195         }
  196         return (error);
  197 }
  198 
  199 int
  200 intr_remove_handler(void *cookie)
  201 {
  202         struct intsrc *isrc;
  203         int error;
  204 
  205         isrc = intr_handler_source(cookie);
  206         error = intr_event_remove_handler(cookie);
  207         if (error == 0) {
  208                 sx_xlock(&intr_table_lock);
  209                 isrc->is_handlers--;
  210                 if (isrc->is_handlers == 0) {
  211                         isrc->is_pic->pic_disable_source(isrc, PIC_NO_EOI);
  212                         isrc->is_pic->pic_disable_intr(isrc);
  213                 }
  214                 intrcnt_updatename(isrc);
  215                 sx_xunlock(&intr_table_lock);
  216         }
  217         return (error);
  218 }
  219 
  220 int
  221 intr_config_intr(int vector, enum intr_trigger trig, enum intr_polarity pol)
  222 {
  223         struct intsrc *isrc;
  224 
  225         isrc = intr_lookup_source(vector);
  226         if (isrc == NULL)
  227                 return (EINVAL);
  228         return (isrc->is_pic->pic_config_intr(isrc, trig, pol));
  229 }
  230 
  231 #ifdef INTR_FILTER
  232 void
  233 intr_execute_handlers(struct intsrc *isrc, struct trapframe *frame)
  234 {
  235         struct thread *td;
  236         struct intr_event *ie;
  237         int vector;
  238 
  239         td = curthread;
  240 
  241         /*
  242          * We count software interrupts when we process them.  The
  243          * code here follows previous practice, but there's an
  244          * argument for counting hardware interrupts when they're
  245          * processed too.
  246          */
  247         (*isrc->is_count)++;
  248         PCPU_INC(cnt.v_intr);
  249 
  250         ie = isrc->is_event;
  251 
  252         /*
  253          * XXX: We assume that IRQ 0 is only used for the ISA timer
  254          * device (clk).
  255          */
  256         vector = isrc->is_pic->pic_vector(isrc);
  257         if (vector == 0)
  258                 clkintr_pending = 1;
  259 
  260         if (intr_event_handle(ie, frame) != 0)
  261                 intr_event_stray(isrc);         
  262 }
  263 
  264 static void
  265 intr_event_stray(void *cookie)
  266 {
  267         struct intsrc *isrc;
  268 
  269         isrc = cookie;
  270         /*
  271          * For stray interrupts, mask and EOI the source, bump the
  272          * stray count, and log the condition.
  273          */
  274         isrc->is_pic->pic_disable_source(isrc, PIC_EOI);
  275         (*isrc->is_straycount)++;
  276         if (*isrc->is_straycount < MAX_STRAY_LOG)
  277                 log(LOG_ERR, "stray irq%d\n", isrc->is_pic->pic_vector(isrc));
  278         else if (*isrc->is_straycount == MAX_STRAY_LOG)
  279                 log(LOG_CRIT,
  280                     "too many stray irq %d's: not logging anymore\n",
  281                     isrc->is_pic->pic_vector(isrc));
  282 }
  283 
  284 static void
  285 intr_eoi_src(void *arg)
  286 {
  287         struct intsrc *isrc;
  288 
  289         isrc = arg;
  290         isrc->is_pic->pic_eoi_source(isrc);
  291 }
  292 
  293 static void
  294 intr_disab_eoi_src(void *arg)
  295 {
  296         struct intsrc *isrc;
  297 
  298         isrc = arg;
  299         isrc->is_pic->pic_disable_source(isrc, PIC_EOI);
  300 }
  301 #else
  302 void
  303 intr_execute_handlers(struct intsrc *isrc, struct trapframe *frame)
  304 {
  305         struct thread *td;
  306         struct intr_event *ie;
  307         struct intr_handler *ih;
  308         int error, vector, thread, ret;
  309 
  310         td = curthread;
  311 
  312         /*
  313          * We count software interrupts when we process them.  The
  314          * code here follows previous practice, but there's an
  315          * argument for counting hardware interrupts when they're
  316          * processed too.
  317          */
  318         (*isrc->is_count)++;
  319         PCPU_INC(cnt.v_intr);
  320 
  321         ie = isrc->is_event;
  322 
  323         /*
  324          * XXX: We assume that IRQ 0 is only used for the ISA timer
  325          * device (clk).
  326          */
  327         vector = isrc->is_pic->pic_vector(isrc);
  328         if (vector == 0)
  329                 clkintr_pending = 1;
  330 
  331         /*
  332          * For stray interrupts, mask and EOI the source, bump the
  333          * stray count, and log the condition.
  334          */
  335         if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers)) {
  336                 isrc->is_pic->pic_disable_source(isrc, PIC_EOI);
  337                 (*isrc->is_straycount)++;
  338                 if (*isrc->is_straycount < MAX_STRAY_LOG)
  339                         log(LOG_ERR, "stray irq%d\n", vector);
  340                 else if (*isrc->is_straycount == MAX_STRAY_LOG)
  341                         log(LOG_CRIT,
  342                             "too many stray irq %d's: not logging anymore\n",
  343                             vector);
  344                 return;
  345         }
  346 
  347         /*
  348          * Execute fast interrupt handlers directly.
  349          * To support clock handlers, if a handler registers
  350          * with a NULL argument, then we pass it a pointer to
  351          * a trapframe as its argument.
  352          */
  353         td->td_intr_nesting_level++;
  354         ret = 0;
  355         thread = 0;
  356         critical_enter();
  357         TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
  358                 if (ih->ih_filter == NULL) {
  359                         thread = 1;
  360                         continue;
  361                 }
  362                 CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__,
  363                     ih->ih_filter, ih->ih_argument == NULL ? frame :
  364                     ih->ih_argument, ih->ih_name);
  365                 if (ih->ih_argument == NULL)
  366                         ret = ih->ih_filter(frame);
  367                 else
  368                         ret = ih->ih_filter(ih->ih_argument);
  369                 /* 
  370                  * Wrapper handler special handling:
  371                  *
  372                  * in some particular cases (like pccard and pccbb), 
  373                  * the _real_ device handler is wrapped in a couple of
  374                  * functions - a filter wrapper and an ithread wrapper.
  375                  * In this case (and just in this case), the filter wrapper 
  376                  * could ask the system to schedule the ithread and mask
  377                  * the interrupt source if the wrapped handler is composed
  378                  * of just an ithread handler.
  379                  *
  380                  * TODO: write a generic wrapper to avoid people rolling 
  381                  * their own
  382                  */
  383                 if (!thread) {
  384                         if (ret == FILTER_SCHEDULE_THREAD)
  385                                 thread = 1;
  386                 }
  387         }
  388 
  389         /*
  390          * If there are any threaded handlers that need to run,
  391          * mask the source as well as sending it an EOI.  Otherwise,
  392          * just send it an EOI but leave it unmasked.
  393          */
  394         if (thread)
  395                 isrc->is_pic->pic_disable_source(isrc, PIC_EOI);
  396         else
  397                 isrc->is_pic->pic_eoi_source(isrc);
  398 
  399         /* Schedule the ithread if needed. */
  400         if (thread) {
  401                 error = intr_event_schedule_thread(ie);
  402                 KASSERT(error == 0, ("bad stray interrupt"));
  403         }
  404         critical_exit();
  405         td->td_intr_nesting_level--;
  406 }
  407 #endif
  408 
  409 void
  410 intr_resume(void)
  411 {
  412         struct pic *pic;
  413 
  414         sx_xlock(&intr_table_lock);
  415         STAILQ_FOREACH(pic, &pics, pics) {
  416                 if (pic->pic_resume != NULL)
  417                         pic->pic_resume(pic);
  418         }
  419         sx_xunlock(&intr_table_lock);
  420 }
  421 
  422 void
  423 intr_suspend(void)
  424 {
  425         struct pic *pic;
  426 
  427         sx_xlock(&intr_table_lock);
  428         STAILQ_FOREACH(pic, &pics, pics) {
  429                 if (pic->pic_suspend != NULL)
  430                         pic->pic_suspend(pic);
  431         }
  432         sx_xunlock(&intr_table_lock);
  433 }
  434 
  435 static int
  436 intr_assign_cpu(void *arg, u_char cpu)
  437 {
  438 #ifdef SMP
  439         struct intsrc *isrc;    
  440 
  441         /*
  442          * Don't do anything during early boot.  We will pick up the
  443          * assignment once the APs are started.
  444          */
  445         if (assign_cpu && cpu != NOCPU) {
  446                 isrc = arg;
  447                 sx_xlock(&intr_table_lock);
  448                 isrc->is_pic->pic_assign_cpu(isrc, cpu_apic_ids[cpu]);
  449                 sx_xunlock(&intr_table_lock);
  450         }
  451         return (0);
  452 #else
  453         return (EOPNOTSUPP);
  454 #endif
  455 }
  456 
  457 static void
  458 intrcnt_setname(const char *name, int index)
  459 {
  460 
  461         snprintf(intrnames + (MAXCOMLEN + 1) * index, MAXCOMLEN + 1, "%-*s",
  462             MAXCOMLEN, name);
  463 }
  464 
  465 static void
  466 intrcnt_updatename(struct intsrc *is)
  467 {
  468 
  469         intrcnt_setname(is->is_event->ie_fullname, is->is_index);
  470 }
  471 
  472 static void
  473 intrcnt_register(struct intsrc *is)
  474 {
  475         char straystr[MAXCOMLEN + 1];
  476 
  477         KASSERT(is->is_event != NULL, ("%s: isrc with no event", __func__));
  478         mtx_lock_spin(&intrcnt_lock);
  479         is->is_index = intrcnt_index;
  480         intrcnt_index += 2;
  481         snprintf(straystr, MAXCOMLEN + 1, "stray irq%d",
  482             is->is_pic->pic_vector(is));
  483         intrcnt_updatename(is);
  484         is->is_count = &intrcnt[is->is_index];
  485         intrcnt_setname(straystr, is->is_index + 1);
  486         is->is_straycount = &intrcnt[is->is_index + 1];
  487         mtx_unlock_spin(&intrcnt_lock);
  488 }
  489 
  490 void
  491 intrcnt_add(const char *name, u_long **countp)
  492 {
  493 
  494         mtx_lock_spin(&intrcnt_lock);
  495         *countp = &intrcnt[intrcnt_index];
  496         intrcnt_setname(name, intrcnt_index);
  497         intrcnt_index++;
  498         mtx_unlock_spin(&intrcnt_lock);
  499 }
  500 
  501 static void
  502 intr_init(void *dummy __unused)
  503 {
  504 
  505         intrcnt_setname("???", 0);
  506         intrcnt_index = 1;
  507         STAILQ_INIT(&pics);
  508         sx_init(&intr_table_lock, "intr sources");
  509         mtx_init(&intrcnt_lock, "intrcnt", NULL, MTX_SPIN);
  510 }
  511 SYSINIT(intr_init, SI_SUB_INTR, SI_ORDER_FIRST, intr_init, NULL);
  512 
  513 #ifdef DDB
  514 /*
  515  * Dump data about interrupt handlers
  516  */
  517 DB_SHOW_COMMAND(irqs, db_show_irqs)
  518 {
  519         struct intsrc **isrc;
  520         int i, verbose;
  521 
  522         if (strcmp(modif, "v") == 0)
  523                 verbose = 1;
  524         else
  525                 verbose = 0;
  526         isrc = interrupt_sources;
  527         for (i = 0; i < NUM_IO_INTS && !db_pager_quit; i++, isrc++)
  528                 if (*isrc != NULL)
  529                         db_dump_intr_event((*isrc)->is_event, verbose);
  530 }
  531 #endif
  532 
  533 #ifdef SMP
  534 /*
  535  * Support for balancing interrupt sources across CPUs.  For now we just
  536  * allocate CPUs round-robin.
  537  */
  538 
  539 /* The BSP is always a valid target. */
  540 static cpumask_t intr_cpus = (1 << 0);
  541 static int current_cpu;
  542 
  543 static void
  544 intr_assign_next_cpu(struct intsrc *isrc)
  545 {
  546 
  547         /*
  548          * Assign this source to a local APIC in a round-robin fashion.
  549          */
  550         isrc->is_pic->pic_assign_cpu(isrc, cpu_apic_ids[current_cpu]);
  551         do {
  552                 current_cpu++;
  553                 if (current_cpu > mp_maxid)
  554                         current_cpu = 0;
  555         } while (!(intr_cpus & (1 << current_cpu)));
  556 }
  557 
  558 /* Attempt to bind the specified IRQ to the specified CPU. */
  559 int
  560 intr_bind(u_int vector, u_char cpu)
  561 {
  562         struct intsrc *isrc;
  563 
  564         isrc = intr_lookup_source(vector);
  565         if (isrc == NULL)
  566                 return (EINVAL);
  567         return (intr_event_bind(isrc->is_event, cpu));
  568 }
  569 
  570 /*
  571  * Add a CPU to our mask of valid CPUs that can be destinations of
  572  * interrupts.
  573  */
  574 void
  575 intr_add_cpu(u_int cpu)
  576 {
  577 
  578         if (cpu >= MAXCPU)
  579                 panic("%s: Invalid CPU ID", __func__);
  580         if (bootverbose)
  581                 printf("INTR: Adding local APIC %d as a target\n",
  582                     cpu_apic_ids[cpu]);
  583 
  584         intr_cpus |= (1 << cpu);
  585 }
  586 
  587 /*
  588  * Distribute all the interrupt sources among the available CPUs once the
  589  * AP's have been launched.
  590  */
  591 static void
  592 intr_shuffle_irqs(void *arg __unused)
  593 {
  594         struct intsrc *isrc;
  595         int i;
  596 
  597         /* Don't bother on UP. */
  598         if (mp_ncpus == 1)
  599                 return;
  600 
  601         /* Round-robin assign a CPU to each enabled source. */
  602         sx_xlock(&intr_table_lock);
  603         assign_cpu = 1;
  604         for (i = 0; i < NUM_IO_INTS; i++) {
  605                 isrc = interrupt_sources[i];
  606                 if (isrc != NULL && isrc->is_handlers > 0) {
  607                         /*
  608                          * If this event is already bound to a CPU,
  609                          * then assign the source to that CPU instead
  610                          * of picking one via round-robin.
  611                          */
  612                         if (isrc->is_event->ie_cpu != NOCPU)
  613                                 isrc->is_pic->pic_assign_cpu(isrc,
  614                                     cpu_apic_ids[isrc->is_event->ie_cpu]);
  615                         else
  616                                 intr_assign_next_cpu(isrc);
  617                 }
  618         }
  619         sx_xunlock(&intr_table_lock);
  620 }
  621 SYSINIT(intr_shuffle_irqs, SI_SUB_SMP, SI_ORDER_SECOND, intr_shuffle_irqs,
  622     NULL);
  623 #endif

Cache object: 0ec1dc1388b0a0e3b1e015617b7cd620


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.