The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/amd64/amd64/intr_machdep.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2003 John Baldwin <jhb@FreeBSD.org>
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  * 3. Neither the name of the author nor the names of any co-contributors
   14  *    may be used to endorse or promote products derived from this software
   15  *    without specific prior written permission.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  *
   29  * $FreeBSD$
   30  */
   31 
   32 /*
   33  * Machine dependent interrupt code for amd64.  For amd64, we have to
   34  * deal with different PICs.  Thus, we use the passed in vector to lookup
   35  * an interrupt source associated with that vector.  The interrupt source
   36  * describes which PIC the source belongs to and includes methods to handle
   37  * that source.
   38  */
   39 
   40 #include "opt_atpic.h"
   41 #include "opt_ddb.h"
   42 
   43 #include <sys/param.h>
   44 #include <sys/bus.h>
   45 #include <sys/interrupt.h>
   46 #include <sys/ktr.h>
   47 #include <sys/kernel.h>
   48 #include <sys/lock.h>
   49 #include <sys/mutex.h>
   50 #include <sys/proc.h>
   51 #include <sys/smp.h>
   52 #include <sys/syslog.h>
   53 #include <sys/systm.h>
   54 #include <sys/sx.h>
   55 #include <machine/clock.h>
   56 #include <machine/intr_machdep.h>
   57 #include <machine/smp.h>
   58 #ifdef DDB
   59 #include <ddb/ddb.h>
   60 #endif
   61 
   62 #ifndef DEV_ATPIC
   63 #include <machine/segments.h>
   64 #include <machine/frame.h>
   65 #include <dev/ic/i8259.h>
   66 #include <amd64/isa/icu.h>
   67 #include <amd64/isa/isa.h>
   68 #endif
   69 
   70 #define MAX_STRAY_LOG   5
   71 
   72 typedef void (*mask_fn)(void *);
   73 
   74 static int intrcnt_index;
   75 static struct intsrc *interrupt_sources[NUM_IO_INTS];
   76 static struct sx intr_table_lock;
   77 static struct mtx intrcnt_lock;
   78 static STAILQ_HEAD(, pic) pics;
   79 
   80 #ifdef INTR_FILTER
   81 static void intr_eoi_src(void *arg);
   82 static void intr_disab_eoi_src(void *arg);
   83 static void intr_event_stray(void *cookie);
   84 #endif
   85 
   86 #ifdef SMP
   87 static int assign_cpu;
   88 
   89 static void     intr_assign_next_cpu(struct intsrc *isrc);
   90 #endif
   91 
   92 static int      intr_assign_cpu(void *arg, u_char cpu);
   93 static void     intr_init(void *__dummy);
   94 static int      intr_pic_registered(struct pic *pic);
   95 static void     intrcnt_setname(const char *name, int index);
   96 static void     intrcnt_updatename(struct intsrc *is);
   97 static void     intrcnt_register(struct intsrc *is);
   98 
   99 static int
  100 intr_pic_registered(struct pic *pic)
  101 {
  102         struct pic *p;
  103 
  104         STAILQ_FOREACH(p, &pics, pics) {
  105                 if (p == pic)
  106                         return (1);
  107         }
  108         return (0);
  109 }
  110 
  111 /*
  112  * Register a new interrupt controller (PIC).  This is to support suspend
  113  * and resume where we suspend/resume controllers rather than individual
  114  * sources.  This also allows controllers with no active sources (such as
  115  * 8259As in a system using the APICs) to participate in suspend and resume.
  116  */
  117 int
  118 intr_register_pic(struct pic *pic)
  119 {
  120         int error;
  121 
  122         sx_xlock(&intr_table_lock);
  123         if (intr_pic_registered(pic))
  124                 error = EBUSY;
  125         else {
  126                 STAILQ_INSERT_TAIL(&pics, pic, pics);
  127                 error = 0;
  128         }
  129         sx_xunlock(&intr_table_lock);
  130         return (error);
  131 }
  132 
  133 /*
  134  * Register a new interrupt source with the global interrupt system.
  135  * The global interrupts need to be disabled when this function is
  136  * called.
  137  */
  138 int
  139 intr_register_source(struct intsrc *isrc)
  140 {
  141         int error, vector;
  142 
  143         KASSERT(intr_pic_registered(isrc->is_pic), ("unregistered PIC"));
  144         vector = isrc->is_pic->pic_vector(isrc);
  145         if (interrupt_sources[vector] != NULL)
  146                 return (EEXIST);
  147 #ifdef INTR_FILTER
  148         error = intr_event_create(&isrc->is_event, isrc, 0,
  149             (mask_fn)isrc->is_pic->pic_enable_source,
  150             intr_eoi_src, intr_disab_eoi_src, intr_assign_cpu, "irq%d:",
  151             vector);
  152 #else
  153         error = intr_event_create(&isrc->is_event, isrc, 0,
  154             (mask_fn)isrc->is_pic->pic_enable_source, intr_assign_cpu, "irq%d:",
  155             vector);
  156 #endif
  157         if (error)
  158                 return (error);
  159         sx_xlock(&intr_table_lock);
  160         if (interrupt_sources[vector] != NULL) {
  161                 sx_xunlock(&intr_table_lock);
  162                 intr_event_destroy(isrc->is_event);
  163                 return (EEXIST);
  164         }
  165         intrcnt_register(isrc);
  166         interrupt_sources[vector] = isrc;
  167         isrc->is_handlers = 0;
  168         sx_xunlock(&intr_table_lock);
  169         return (0);
  170 }
  171 
  172 struct intsrc *
  173 intr_lookup_source(int vector)
  174 {
  175 
  176         return (interrupt_sources[vector]);
  177 }
  178 
  179 int
  180 intr_add_handler(const char *name, int vector, driver_filter_t filter, 
  181     driver_intr_t handler, void *arg, enum intr_type flags, void **cookiep)    
  182 {
  183         struct intsrc *isrc;
  184         int error;
  185 
  186         isrc = intr_lookup_source(vector);
  187         if (isrc == NULL)
  188                 return (EINVAL);
  189         error = intr_event_add_handler(isrc->is_event, name, filter, handler,
  190             arg, intr_priority(flags), flags, cookiep);
  191         if (error == 0) {
  192                 sx_xlock(&intr_table_lock);
  193                 intrcnt_updatename(isrc);
  194                 isrc->is_handlers++;
  195                 if (isrc->is_handlers == 1) {
  196 #ifdef SMP
  197                         if (assign_cpu)
  198                                 intr_assign_next_cpu(isrc);
  199 #endif
  200                         isrc->is_pic->pic_enable_intr(isrc);
  201                         isrc->is_pic->pic_enable_source(isrc);
  202                 }
  203                 sx_xunlock(&intr_table_lock);
  204         }
  205         return (error);
  206 }
  207 
  208 int
  209 intr_remove_handler(void *cookie)
  210 {
  211         struct intsrc *isrc;
  212         int error;
  213 
  214         isrc = intr_handler_source(cookie);
  215         error = intr_event_remove_handler(cookie);
  216         if (error == 0) {
  217                 sx_xlock(&intr_table_lock);
  218                 isrc->is_handlers--;
  219                 if (isrc->is_handlers == 0) {
  220                         isrc->is_pic->pic_disable_source(isrc, PIC_NO_EOI);
  221                         isrc->is_pic->pic_disable_intr(isrc);
  222                 }
  223                 intrcnt_updatename(isrc);
  224                 sx_xunlock(&intr_table_lock);
  225         }
  226         return (error);
  227 }
  228 
  229 int
  230 intr_config_intr(int vector, enum intr_trigger trig, enum intr_polarity pol)
  231 {
  232         struct intsrc *isrc;
  233 
  234         isrc = intr_lookup_source(vector);
  235         if (isrc == NULL)
  236                 return (EINVAL);
  237         return (isrc->is_pic->pic_config_intr(isrc, trig, pol));
  238 }
  239 
  240 #ifdef INTR_FILTER
  241 void
  242 intr_execute_handlers(struct intsrc *isrc, struct trapframe *frame)
  243 {
  244         struct thread *td;
  245         struct intr_event *ie;
  246         int vector;
  247 
  248         td = curthread;
  249 
  250         /*
  251          * We count software interrupts when we process them.  The
  252          * code here follows previous practice, but there's an
  253          * argument for counting hardware interrupts when they're
  254          * processed too.
  255          */
  256         (*isrc->is_count)++;
  257         PCPU_INC(cnt.v_intr);
  258 
  259         ie = isrc->is_event;
  260 
  261         /*
  262          * XXX: We assume that IRQ 0 is only used for the ISA timer
  263          * device (clk).
  264          */
  265         vector = isrc->is_pic->pic_vector(isrc);
  266         if (vector == 0)
  267                 clkintr_pending = 1;
  268 
  269         if (intr_event_handle(ie, frame) != 0)
  270                 intr_event_stray(isrc);
  271 }
  272 
  273 static void
  274 intr_event_stray(void *cookie)
  275 {
  276         struct intsrc *isrc;
  277 
  278         isrc = cookie;
  279         /*
  280          * For stray interrupts, mask and EOI the source, bump the
  281          * stray count, and log the condition.
  282          */
  283         isrc->is_pic->pic_disable_source(isrc, PIC_EOI);
  284         (*isrc->is_straycount)++;
  285         if (*isrc->is_straycount < MAX_STRAY_LOG)
  286                 log(LOG_ERR, "stray irq%d\n", isrc->is_pic->pic_vector(isrc));
  287         else if (*isrc->is_straycount == MAX_STRAY_LOG)
  288                 log(LOG_CRIT,
  289                     "too many stray irq %d's: not logging anymore\n",
  290                     isrc->is_pic->pic_vector(isrc));
  291 }
  292 
  293 static void
  294 intr_eoi_src(void *arg)
  295 {
  296         struct intsrc *isrc;
  297 
  298         isrc = arg;
  299         isrc->is_pic->pic_eoi_source(isrc);
  300 }
  301 
  302 static void
  303 intr_disab_eoi_src(void *arg)
  304 {
  305         struct intsrc *isrc;
  306 
  307         isrc = arg;
  308         isrc->is_pic->pic_disable_source(isrc, PIC_EOI);
  309 }
  310 #else
  311 void
  312 intr_execute_handlers(struct intsrc *isrc, struct trapframe *frame)
  313 {
  314         struct thread *td;
  315         struct intr_event *ie;
  316         struct intr_handler *ih;
  317         int error, vector, thread, ret;
  318 
  319         td = curthread;
  320 
  321         /*
  322          * We count software interrupts when we process them.  The
  323          * code here follows previous practice, but there's an
  324          * argument for counting hardware interrupts when they're
  325          * processed too.
  326          */
  327         (*isrc->is_count)++;
  328         PCPU_INC(cnt.v_intr);
  329 
  330         ie = isrc->is_event;
  331 
  332         /*
  333          * XXX: We assume that IRQ 0 is only used for the ISA timer
  334          * device (clk).
  335          */
  336         vector = isrc->is_pic->pic_vector(isrc);
  337         if (vector == 0)
  338                 clkintr_pending = 1;
  339 
  340         /*
  341          * For stray interrupts, mask and EOI the source, bump the
  342          * stray count, and log the condition.
  343          */
  344         if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers)) {
  345                 isrc->is_pic->pic_disable_source(isrc, PIC_EOI);
  346                 (*isrc->is_straycount)++;
  347                 if (*isrc->is_straycount < MAX_STRAY_LOG)
  348                         log(LOG_ERR, "stray irq%d\n", vector);
  349                 else if (*isrc->is_straycount == MAX_STRAY_LOG)
  350                         log(LOG_CRIT,
  351                             "too many stray irq %d's: not logging anymore\n",
  352                             vector);
  353                 return;
  354         }
  355 
  356         /*
  357          * Execute fast interrupt handlers directly.
  358          * To support clock handlers, if a handler registers
  359          * with a NULL argument, then we pass it a pointer to
  360          * a trapframe as its argument.
  361          */
  362         td->td_intr_nesting_level++;
  363         ret = 0;
  364         thread = 0;
  365         critical_enter();
  366         TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) {
  367                 if (ih->ih_filter == NULL) {
  368                         thread = 1;
  369                         continue;
  370                 }
  371                 CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__,
  372                     ih->ih_filter, ih->ih_argument == NULL ? frame :
  373                     ih->ih_argument, ih->ih_name);
  374                 if (ih->ih_argument == NULL)
  375                         ret = ih->ih_filter(frame);
  376                 else
  377                         ret = ih->ih_filter(ih->ih_argument);
  378                 /*
  379                  * Wrapper handler special case: see
  380                  * i386/intr_machdep.c::intr_execute_handlers()
  381                  */
  382                 if (!thread) {
  383                         if (ret == FILTER_SCHEDULE_THREAD)
  384                                 thread = 1;
  385                 }
  386         }
  387 
  388         /*
  389          * If there are any threaded handlers that need to run,
  390          * mask the source as well as sending it an EOI.  Otherwise,
  391          * just send it an EOI but leave it unmasked.
  392          */
  393         if (thread)
  394                 isrc->is_pic->pic_disable_source(isrc, PIC_EOI);
  395         else
  396                 isrc->is_pic->pic_eoi_source(isrc);
  397 
  398         /* Schedule the ithread if needed. */
  399         if (thread) {
  400                 error = intr_event_schedule_thread(ie);
  401                 KASSERT(error == 0, ("bad stray interrupt"));
  402         }
  403         critical_exit();
  404         td->td_intr_nesting_level--;
  405 }
  406 #endif
  407 
  408 void
  409 intr_resume(void)
  410 {
  411         struct pic *pic;
  412 
  413 #ifndef DEV_ATPIC
  414         atpic_reset();
  415 #endif
  416         sx_xlock(&intr_table_lock);
  417         STAILQ_FOREACH(pic, &pics, pics) {
  418                 if (pic->pic_resume != NULL)
  419                         pic->pic_resume(pic);
  420         }
  421         sx_xunlock(&intr_table_lock);
  422 }
  423 
  424 void
  425 intr_suspend(void)
  426 {
  427         struct pic *pic;
  428 
  429         sx_xlock(&intr_table_lock);
  430         STAILQ_FOREACH(pic, &pics, pics) {
  431                 if (pic->pic_suspend != NULL)
  432                         pic->pic_suspend(pic);
  433         }
  434         sx_xunlock(&intr_table_lock);
  435 }
  436 
  437 static int
  438 intr_assign_cpu(void *arg, u_char cpu)
  439 {
  440 #ifdef SMP
  441         struct intsrc *isrc;    
  442 
  443         /*
  444          * Don't do anything during early boot.  We will pick up the
  445          * assignment once the APs are started.
  446          */
  447         if (assign_cpu && cpu != NOCPU) {
  448                 isrc = arg;
  449                 sx_xlock(&intr_table_lock);
  450                 isrc->is_pic->pic_assign_cpu(isrc, cpu_apic_ids[cpu]);
  451                 sx_xunlock(&intr_table_lock);
  452         }
  453         return (0);
  454 #else
  455         return (EOPNOTSUPP);
  456 #endif
  457 }
  458 
  459 static void
  460 intrcnt_setname(const char *name, int index)
  461 {
  462 
  463         snprintf(intrnames + (MAXCOMLEN + 1) * index, MAXCOMLEN + 1, "%-*s",
  464             MAXCOMLEN, name);
  465 }
  466 
  467 static void
  468 intrcnt_updatename(struct intsrc *is)
  469 {
  470 
  471         intrcnt_setname(is->is_event->ie_fullname, is->is_index);
  472 }
  473 
  474 static void
  475 intrcnt_register(struct intsrc *is)
  476 {
  477         char straystr[MAXCOMLEN + 1];
  478 
  479         KASSERT(is->is_event != NULL, ("%s: isrc with no event", __func__));
  480         mtx_lock_spin(&intrcnt_lock);
  481         is->is_index = intrcnt_index;
  482         intrcnt_index += 2;
  483         snprintf(straystr, MAXCOMLEN + 1, "stray irq%d",
  484             is->is_pic->pic_vector(is));
  485         intrcnt_updatename(is);
  486         is->is_count = &intrcnt[is->is_index];
  487         intrcnt_setname(straystr, is->is_index + 1);
  488         is->is_straycount = &intrcnt[is->is_index + 1];
  489         mtx_unlock_spin(&intrcnt_lock);
  490 }
  491 
  492 void
  493 intrcnt_add(const char *name, u_long **countp)
  494 {
  495 
  496         mtx_lock_spin(&intrcnt_lock);
  497         *countp = &intrcnt[intrcnt_index];
  498         intrcnt_setname(name, intrcnt_index);
  499         intrcnt_index++;
  500         mtx_unlock_spin(&intrcnt_lock);
  501 }
  502 
  503 static void
  504 intr_init(void *dummy __unused)
  505 {
  506 
  507         intrcnt_setname("???", 0);
  508         intrcnt_index = 1;
  509         STAILQ_INIT(&pics);
  510         sx_init(&intr_table_lock, "intr sources");
  511         mtx_init(&intrcnt_lock, "intrcnt", NULL, MTX_SPIN);
  512 }
  513 SYSINIT(intr_init, SI_SUB_INTR, SI_ORDER_FIRST, intr_init, NULL);
  514 
  515 #ifndef DEV_ATPIC
  516 /* Initialize the two 8259A's to a known-good shutdown state. */
  517 void
  518 atpic_reset(void)
  519 {
  520 
  521         outb(IO_ICU1, ICW1_RESET | ICW1_IC4);
  522         outb(IO_ICU1 + ICU_IMR_OFFSET, IDT_IO_INTS);
  523         outb(IO_ICU1 + ICU_IMR_OFFSET, 1 << 2);
  524         outb(IO_ICU1 + ICU_IMR_OFFSET, ICW4_8086);
  525         outb(IO_ICU1 + ICU_IMR_OFFSET, 0xff);
  526         outb(IO_ICU1, OCW3_SEL | OCW3_RR);
  527 
  528         outb(IO_ICU2, ICW1_RESET | ICW1_IC4);
  529         outb(IO_ICU2 + ICU_IMR_OFFSET, IDT_IO_INTS + 8);
  530         outb(IO_ICU2 + ICU_IMR_OFFSET, 2);
  531         outb(IO_ICU2 + ICU_IMR_OFFSET, ICW4_8086);
  532         outb(IO_ICU2 + ICU_IMR_OFFSET, 0xff);
  533         outb(IO_ICU2, OCW3_SEL | OCW3_RR);
  534 }
  535 #endif
  536 
  537 #ifdef DDB
  538 /*
  539  * Dump data about interrupt handlers
  540  */
  541 DB_SHOW_COMMAND(irqs, db_show_irqs)
  542 {
  543         struct intsrc **isrc;
  544         int i, verbose;
  545 
  546         if (strcmp(modif, "v") == 0)
  547                 verbose = 1;
  548         else
  549                 verbose = 0;
  550         isrc = interrupt_sources;
  551         for (i = 0; i < NUM_IO_INTS && !db_pager_quit; i++, isrc++)
  552                 if (*isrc != NULL)
  553                         db_dump_intr_event((*isrc)->is_event, verbose);
  554 }
  555 #endif
  556 
  557 #ifdef SMP
  558 /*
  559  * Support for balancing interrupt sources across CPUs.  For now we just
  560  * allocate CPUs round-robin.
  561  */
  562 
  563 /* The BSP is always a valid target. */
  564 static cpumask_t intr_cpus = (1 << 0);
  565 static int current_cpu;
  566 
  567 static void
  568 intr_assign_next_cpu(struct intsrc *isrc)
  569 {
  570 
  571         /*
  572          * Assign this source to a local APIC in a round-robin fashion.
  573          */
  574         isrc->is_pic->pic_assign_cpu(isrc, cpu_apic_ids[current_cpu]);
  575         do {
  576                 current_cpu++;
  577                 if (current_cpu > mp_maxid)
  578                         current_cpu = 0;
  579         } while (!(intr_cpus & (1 << current_cpu)));
  580 }
  581 
  582 /* Attempt to bind the specified IRQ to the specified CPU. */
  583 int
  584 intr_bind(u_int vector, u_char cpu)
  585 {
  586         struct intsrc *isrc;
  587 
  588         isrc = intr_lookup_source(vector);
  589         if (isrc == NULL)
  590                 return (EINVAL);
  591         return (intr_event_bind(isrc->is_event, cpu));
  592 }
  593 
  594 /*
  595  * Add a CPU to our mask of valid CPUs that can be destinations of
  596  * interrupts.
  597  */
  598 void
  599 intr_add_cpu(u_int cpu)
  600 {
  601 
  602         if (cpu >= MAXCPU)
  603                 panic("%s: Invalid CPU ID", __func__);
  604         if (bootverbose)
  605                 printf("INTR: Adding local APIC %d as a target\n",
  606                     cpu_apic_ids[cpu]);
  607 
  608         intr_cpus |= (1 << cpu);
  609 }
  610 
  611 /*
  612  * Distribute all the interrupt sources among the available CPUs once the
  613  * AP's have been launched.
  614  */
  615 static void
  616 intr_shuffle_irqs(void *arg __unused)
  617 {
  618         struct intsrc *isrc;
  619         int i;
  620 
  621         /* Don't bother on UP. */
  622         if (mp_ncpus == 1)
  623                 return;
  624 
  625         /* Round-robin assign a CPU to each enabled source. */
  626         sx_xlock(&intr_table_lock);
  627         assign_cpu = 1;
  628         for (i = 0; i < NUM_IO_INTS; i++) {
  629                 isrc = interrupt_sources[i];
  630                 if (isrc != NULL && isrc->is_handlers > 0) {
  631                         /*
  632                          * If this event is already bound to a CPU,
  633                          * then assign the source to that CPU instead
  634                          * of picking one via round-robin.
  635                          */
  636                         if (isrc->is_event->ie_cpu != NOCPU)
  637                                 isrc->is_pic->pic_assign_cpu(isrc,
  638                                     cpu_apic_ids[isrc->is_event->ie_cpu]);
  639                         else
  640                                 intr_assign_next_cpu(isrc);
  641                 }
  642         }
  643         sx_xunlock(&intr_table_lock);
  644 }
  645 SYSINIT(intr_shuffle_irqs, SI_SUB_SMP, SI_ORDER_SECOND, intr_shuffle_irqs,
  646     NULL);
  647 #endif

Cache object: 55d5e6289cdb3ada136ba7735391dbf6


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.