The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/xen/evtchn/evtchn.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /******************************************************************************
    2  * evtchn.c
    3  * 
    4  * Communication via Xen event channels.
    5  * 
    6  * Copyright (c) 2002-2005, K A Fraser
    7  * Copyright (c) 2005-2006 Kip Macy
    8  */
    9 
   10 #include <sys/cdefs.h>
   11 __FBSDID("$FreeBSD: releng/8.1/sys/xen/evtchn/evtchn.c 195806 2009-07-21 16:54:11Z alc $");
   12 
   13 #include <sys/param.h>
   14 #include <sys/systm.h>
   15 #include <sys/bus.h>
   16 #include <sys/limits.h>
   17 #include <sys/malloc.h>
   18 #include <sys/kernel.h>
   19 #include <sys/lock.h>
   20 #include <sys/mutex.h>
   21 #include <sys/interrupt.h>
   22 #include <sys/pcpu.h>
   23 #include <sys/smp.h>
   24 
   25 #include <machine/cpufunc.h>
   26 #include <machine/intr_machdep.h>
   27 
   28 #include <machine/xen/xen-os.h>
   29 #include <machine/xen/xenvar.h>
   30 #include <xen/xen_intr.h>
   31 #include <machine/xen/synch_bitops.h>
   32 #include <xen/evtchn.h>
   33 #include <xen/hypervisor.h>
   34 #include <sys/smp.h>
   35 
   36 #include <xen/xen_intr.h>
   37 #include <xen/evtchn.h>
   38 
   39 static inline unsigned long __ffs(unsigned long word)
   40 {
   41         __asm__("bsfl %1,%0"
   42                 :"=r" (word)
   43                 :"rm" (word));
   44         return word;
   45 }
   46 
   47 static struct mtx irq_mapping_update_lock;
   48 static struct xenpic *xp;
   49 struct xenpic_intsrc {
   50         struct intsrc     xp_intsrc;
   51         void              *xp_cookie;
   52         uint8_t           xp_vector;
   53         boolean_t         xp_masked;
   54 };
   55 
   56 struct xenpic { 
   57         struct pic           *xp_dynirq_pic; 
   58         struct pic           *xp_pirq_pic;   
   59         uint16_t             xp_numintr; 
   60         struct xenpic_intsrc xp_pins[0]; 
   61 }; 
   62 
   63 #define TODO            printf("%s: not implemented!\n", __func__) 
   64 
   65 /* IRQ <-> event-channel mappings. */
   66 static int evtchn_to_irq[NR_EVENT_CHANNELS];
   67 
   68 /* Packed IRQ information: binding type, sub-type index, and event channel. */
   69 static uint32_t irq_info[NR_IRQS];
   70 /* Binding types. */
   71 enum {
   72         IRQT_UNBOUND,
   73         IRQT_PIRQ,
   74         IRQT_VIRQ,
   75         IRQT_IPI,
   76         IRQT_LOCAL_PORT,
   77         IRQT_CALLER_PORT,
   78         _IRQT_COUNT
   79         
   80 };
   81 
   82 
   83 #define _IRQT_BITS 4
   84 #define _EVTCHN_BITS 12
   85 #define _INDEX_BITS (32 - _IRQT_BITS - _EVTCHN_BITS)
   86 
   87 /* Constructor for packed IRQ information. */
   88 static inline uint32_t
   89 mk_irq_info(uint32_t type, uint32_t index, uint32_t evtchn)
   90 {
   91 
   92         return ((type << (32 - _IRQT_BITS)) | (index << _EVTCHN_BITS) | evtchn);
   93 }
   94 
   95 /* Constructor for packed IRQ information. */
   96 
   97 /* Convenient shorthand for packed representation of an unbound IRQ. */
   98 #define IRQ_UNBOUND     mk_irq_info(IRQT_UNBOUND, 0, 0)
   99 
  100 /*
  101  * Accessors for packed IRQ information.
  102  */
  103 
  104 static inline unsigned int evtchn_from_irq(int irq)
  105 {
  106         return irq_info[irq] & ((1U << _EVTCHN_BITS) - 1);
  107 }
  108 
  109 static inline unsigned int index_from_irq(int irq)
  110 {
  111         return (irq_info[irq] >> _EVTCHN_BITS) & ((1U << _INDEX_BITS) - 1);
  112 }
  113 
  114 static inline unsigned int type_from_irq(int irq)
  115 {
  116         return irq_info[irq] >> (32 - _IRQT_BITS);
  117 }
  118 
  119 
  120 /* IRQ <-> VIRQ mapping. */ 
  121  
  122 /* IRQ <-> IPI mapping. */ 
  123 #ifndef NR_IPIS
  124 #ifdef SMP
  125 #error "NR_IPIS not defined"
  126 #endif
  127 #define NR_IPIS 1 
  128 #endif 
  129 
  130 /* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
  131 static unsigned long pirq_needs_unmask_notify[NR_PIRQS/sizeof(unsigned long)];
  132 
  133 /* Reference counts for bindings to IRQs. */
  134 static int irq_bindcount[NR_IRQS];
  135 
  136 #define VALID_EVTCHN(_chn) ((_chn) != 0)
  137 
  138 #ifdef SMP
  139 
  140 static uint8_t cpu_evtchn[NR_EVENT_CHANNELS];
  141 static unsigned long cpu_evtchn_mask[MAX_VIRT_CPUS][NR_EVENT_CHANNELS/LONG_BIT];
  142 
  143 #define active_evtchns(cpu,sh,idx)              \
  144         ((sh)->evtchn_pending[idx] &            \
  145          cpu_evtchn_mask[cpu][idx] &            \
  146          ~(sh)->evtchn_mask[idx])
  147 
  148 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
  149 {
  150         clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]);
  151         set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]);
  152         cpu_evtchn[chn] = cpu;
  153 }
  154 
  155 static void init_evtchn_cpu_bindings(void)
  156 {
  157         /* By default all event channels notify CPU#0. */
  158         memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
  159         memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
  160 }
  161 
  162 #define cpu_from_evtchn(evtchn)         (cpu_evtchn[evtchn])
  163 
  164 #else
  165 
  166 #define active_evtchns(cpu,sh,idx)              \
  167         ((sh)->evtchn_pending[idx] &            \
  168          ~(sh)->evtchn_mask[idx])
  169 #define bind_evtchn_to_cpu(chn,cpu)     ((void)0)
  170 #define init_evtchn_cpu_bindings()      ((void)0)
  171 #define cpu_from_evtchn(evtchn)         (0)
  172 
  173 #endif
  174 
  175 
  176 /*
  177  * Force a proper event-channel callback from Xen after clearing the
  178  * callback mask. We do this in a very simple manner, by making a call
  179  * down into Xen. The pending flag will be checked by Xen on return.
  180  */
  181 void force_evtchn_callback(void)
  182 {
  183         (void)HYPERVISOR_xen_version(0, NULL);
  184 }
  185 
  186 void 
  187 evtchn_do_upcall(struct trapframe *frame) 
  188 {
  189         unsigned long  l1, l2;
  190         unsigned int   l1i, l2i, port;
  191         int            irq, cpu;
  192         shared_info_t *s;
  193         vcpu_info_t   *vcpu_info;
  194         
  195         cpu = PCPU_GET(cpuid);
  196         s = HYPERVISOR_shared_info;
  197         vcpu_info = &s->vcpu_info[cpu];
  198 
  199         vcpu_info->evtchn_upcall_pending = 0;
  200 
  201         /* NB. No need for a barrier here -- XCHG is a barrier on x86. */
  202         l1 = xen_xchg(&vcpu_info->evtchn_pending_sel, 0);
  203 
  204         while (l1 != 0) {
  205                 l1i = __ffs(l1);
  206                 l1 &= ~(1 << l1i);
  207                 
  208                 while ((l2 = active_evtchns(cpu, s, l1i)) != 0) {
  209                         l2i = __ffs(l2);
  210 
  211                         port = (l1i * LONG_BIT) + l2i;
  212                         if ((irq = evtchn_to_irq[port]) != -1) {
  213                                 struct intsrc *isrc = intr_lookup_source(irq);
  214                                 /* 
  215                                  * ack 
  216                                  */
  217                                 mask_evtchn(port);
  218                                 clear_evtchn(port); 
  219 
  220                                 intr_execute_handlers(isrc, frame);
  221                         } else {
  222                                 evtchn_device_upcall(port);
  223                         }
  224                 }
  225         }
  226 }
  227 
  228 /*
  229  * Send an IPI from the current CPU to the destination CPU.
  230  */
  231 void
  232 ipi_pcpu(unsigned int cpu, int vector) 
  233 { 
  234         int irq;
  235 
  236         irq = pcpu_find(cpu)->pc_ipi_to_irq[vector];
  237         
  238         notify_remote_via_irq(irq); 
  239 } 
  240 
  241 static int 
  242 find_unbound_irq(void)
  243 {
  244         int dynirq, irq;
  245         
  246         for (dynirq = 0; dynirq < NR_IRQS; dynirq++) {
  247                 irq = dynirq_to_irq(dynirq);
  248                 if (irq_bindcount[irq] == 0)
  249                         break;
  250         }
  251         
  252         if (irq == NR_IRQS)
  253                 panic("No available IRQ to bind to: increase NR_IRQS!\n");
  254 
  255         return (irq);
  256 }
  257 
  258 static int
  259 bind_caller_port_to_irq(unsigned int caller_port)
  260 {
  261         int irq;
  262 
  263         mtx_lock_spin(&irq_mapping_update_lock);
  264 
  265         if ((irq = evtchn_to_irq[caller_port]) == -1) {
  266                 if ((irq = find_unbound_irq()) < 0)
  267                         goto out;
  268 
  269                 evtchn_to_irq[caller_port] = irq;
  270                 irq_info[irq] = mk_irq_info(IRQT_CALLER_PORT, 0, caller_port);
  271         }
  272 
  273         irq_bindcount[irq]++;
  274         unmask_evtchn(caller_port);
  275 
  276  out:
  277         mtx_unlock_spin(&irq_mapping_update_lock);
  278         return irq;
  279 }
  280 
  281 static int
  282 bind_local_port_to_irq(unsigned int local_port)
  283 {
  284         int irq;
  285 
  286         mtx_lock_spin(&irq_mapping_update_lock);
  287 
  288         KASSERT(evtchn_to_irq[local_port] == -1,
  289             ("evtchn_to_irq inconsistent"));
  290         
  291         if ((irq = find_unbound_irq()) < 0) {
  292                 struct evtchn_close close = { .port = local_port };
  293                 HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
  294                 
  295                 goto out;
  296         }
  297 
  298         evtchn_to_irq[local_port] = irq;
  299         irq_info[irq] = mk_irq_info(IRQT_LOCAL_PORT, 0, local_port);
  300         irq_bindcount[irq]++;
  301         unmask_evtchn(local_port);
  302 
  303  out:
  304         mtx_unlock_spin(&irq_mapping_update_lock);
  305         return irq;
  306 }
  307 
  308 static int
  309 bind_listening_port_to_irq(unsigned int remote_domain)
  310 {
  311         struct evtchn_alloc_unbound alloc_unbound;
  312         int err;
  313 
  314         alloc_unbound.dom        = DOMID_SELF;
  315         alloc_unbound.remote_dom = remote_domain;
  316 
  317         err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
  318                                           &alloc_unbound);
  319 
  320         return err ? : bind_local_port_to_irq(alloc_unbound.port);
  321 }
  322 
  323 static int
  324 bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
  325     unsigned int remote_port)
  326 {
  327         struct evtchn_bind_interdomain bind_interdomain;
  328         int err;
  329 
  330         bind_interdomain.remote_dom  = remote_domain;
  331         bind_interdomain.remote_port = remote_port;
  332 
  333         err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
  334                                           &bind_interdomain);
  335 
  336         return err ? : bind_local_port_to_irq(bind_interdomain.local_port);
  337 }
  338 
  339 static int 
  340 bind_virq_to_irq(unsigned int virq, unsigned int cpu)
  341 {
  342         struct evtchn_bind_virq bind_virq;
  343         int evtchn = 0, irq;
  344 
  345         mtx_lock_spin(&irq_mapping_update_lock);
  346 
  347         if ((irq = pcpu_find(cpu)->pc_virq_to_irq[virq]) == -1) {
  348                 if ((irq = find_unbound_irq()) < 0)
  349                         goto out;
  350 
  351                 bind_virq.virq = virq;
  352                 bind_virq.vcpu = cpu;
  353                 HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, &bind_virq);
  354 
  355                 evtchn = bind_virq.port;
  356 
  357                 evtchn_to_irq[evtchn] = irq;
  358                 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
  359 
  360                 pcpu_find(cpu)->pc_virq_to_irq[virq] = irq;
  361 
  362                 bind_evtchn_to_cpu(evtchn, cpu);
  363         }
  364 
  365         irq_bindcount[irq]++;
  366         unmask_evtchn(evtchn);
  367 out:
  368         mtx_unlock_spin(&irq_mapping_update_lock);
  369 
  370         return irq;
  371 }
  372 
  373 
  374 extern int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu);
  375 
  376 int 
  377 bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
  378 {
  379         struct evtchn_bind_ipi bind_ipi;
  380         int irq;
  381         int evtchn = 0;
  382 
  383         mtx_lock_spin(&irq_mapping_update_lock);
  384         
  385         if ((irq = pcpu_find(cpu)->pc_ipi_to_irq[ipi]) == -1) {
  386                 if ((irq = find_unbound_irq()) < 0)
  387                         goto out;
  388 
  389                 bind_ipi.vcpu = cpu;
  390                 HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, &bind_ipi);
  391                 evtchn = bind_ipi.port;
  392 
  393                 evtchn_to_irq[evtchn] = irq;
  394                 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
  395 
  396                 pcpu_find(cpu)->pc_ipi_to_irq[ipi] = irq;
  397 
  398                 bind_evtchn_to_cpu(evtchn, cpu);
  399         }
  400         irq_bindcount[irq]++;
  401         unmask_evtchn(evtchn);
  402 out:
  403         
  404         mtx_unlock_spin(&irq_mapping_update_lock);
  405 
  406         return irq;
  407 }
  408 
  409 
  410 static void 
  411 unbind_from_irq(int irq)
  412 {
  413         struct evtchn_close close;
  414         int evtchn = evtchn_from_irq(irq);
  415         int cpu;
  416 
  417         mtx_lock_spin(&irq_mapping_update_lock);
  418 
  419         if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) {
  420                 close.port = evtchn;
  421                 HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
  422 
  423                 switch (type_from_irq(irq)) {
  424                 case IRQT_VIRQ:
  425                         cpu = cpu_from_evtchn(evtchn);
  426                         pcpu_find(cpu)->pc_virq_to_irq[index_from_irq(irq)] = -1;
  427                         break;
  428                 case IRQT_IPI:
  429                         cpu = cpu_from_evtchn(evtchn);
  430                         pcpu_find(cpu)->pc_ipi_to_irq[index_from_irq(irq)] = -1;
  431                         break;
  432                 default:
  433                         break;
  434                 }
  435 
  436                 /* Closed ports are implicitly re-bound to VCPU0. */
  437                 bind_evtchn_to_cpu(evtchn, 0);
  438 
  439                 evtchn_to_irq[evtchn] = -1;
  440                 irq_info[irq] = IRQ_UNBOUND;
  441         }
  442 
  443         mtx_unlock_spin(&irq_mapping_update_lock);
  444 }
  445 
  446 int 
  447 bind_caller_port_to_irqhandler(unsigned int caller_port,
  448     const char *devname, driver_intr_t handler, void *arg,
  449     unsigned long irqflags, unsigned int *irqp)
  450 {
  451         unsigned int irq;
  452         int error;
  453 
  454         irq = bind_caller_port_to_irq(caller_port);
  455         intr_register_source(&xp->xp_pins[irq].xp_intsrc);
  456         error = intr_add_handler(devname, irq, NULL, handler, arg, irqflags,
  457             &xp->xp_pins[irq].xp_cookie);
  458 
  459         if (error) {
  460                 unbind_from_irq(irq);
  461                 return (error);
  462         }
  463 
  464         if (irqp)
  465                 *irqp = irq;
  466 
  467         return (0);
  468 }
  469 
  470 int 
  471 bind_listening_port_to_irqhandler(unsigned int remote_domain,
  472     const char *devname, driver_intr_t handler, void *arg,
  473     unsigned long irqflags, unsigned int *irqp)
  474 {
  475         unsigned int irq;
  476         int error;
  477 
  478         irq = bind_listening_port_to_irq(remote_domain);
  479         intr_register_source(&xp->xp_pins[irq].xp_intsrc);
  480         error = intr_add_handler(devname, irq, NULL, handler, arg, irqflags,
  481             &xp->xp_pins[irq].xp_cookie);
  482         if (error) {
  483                 unbind_from_irq(irq);
  484                 return (error);
  485         }
  486         if (irqp)
  487                 *irqp = irq;
  488         
  489         return (0);
  490 }
  491 
  492 int 
  493 bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
  494     unsigned int remote_port, const char *devname,
  495     driver_filter_t filter, driver_intr_t handler,
  496     unsigned long irqflags, unsigned int *irqp)
  497 {
  498         unsigned int irq;
  499         int error;
  500 
  501         irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port);
  502         intr_register_source(&xp->xp_pins[irq].xp_intsrc);
  503         error = intr_add_handler(devname, irq, filter, handler, NULL,
  504             irqflags, &xp->xp_pins[irq].xp_cookie);
  505         if (error) {
  506                 unbind_from_irq(irq);
  507                 return (error);
  508         }
  509 
  510         if (irqp)
  511                 *irqp = irq;
  512         return (0);
  513 }
  514 
  515 int 
  516 bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
  517     const char *devname, driver_filter_t filter, driver_intr_t handler,
  518     void *arg, unsigned long irqflags, unsigned int *irqp)
  519 {
  520         unsigned int irq;
  521         int error;
  522 
  523         irq = bind_virq_to_irq(virq, cpu);
  524         intr_register_source(&xp->xp_pins[irq].xp_intsrc);
  525         error = intr_add_handler(devname, irq, filter, handler,
  526             arg, irqflags, &xp->xp_pins[irq].xp_cookie);
  527         if (error) {
  528                 unbind_from_irq(irq);
  529                 return (error);
  530         }
  531 
  532         if (irqp)
  533                 *irqp = irq;
  534         return (0);
  535 }
  536 
  537 int 
  538 bind_ipi_to_irqhandler(unsigned int ipi, unsigned int cpu,
  539     const char *devname, driver_filter_t filter,
  540     unsigned long irqflags, unsigned int *irqp)
  541 {
  542         unsigned int irq;
  543         int error;
  544         
  545         irq = bind_ipi_to_irq(ipi, cpu);
  546         intr_register_source(&xp->xp_pins[irq].xp_intsrc);
  547         error = intr_add_handler(devname, irq, filter, NULL,
  548             NULL, irqflags, &xp->xp_pins[irq].xp_cookie);
  549         if (error) {
  550                 unbind_from_irq(irq);
  551                 return (error);
  552         }
  553 
  554         if (irqp)
  555                 *irqp = irq;
  556         return (0);
  557 }
  558 
  559 void
  560 unbind_from_irqhandler(unsigned int irq)
  561 {
  562         intr_remove_handler(xp->xp_pins[irq].xp_cookie);
  563         unbind_from_irq(irq);
  564 }
  565 
  566 #if 0
  567 /* Rebind an evtchn so that it gets delivered to a specific cpu */
  568 static void
  569 rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
  570 {
  571         evtchn_op_t op = { .cmd = EVTCHNOP_bind_vcpu };
  572         int evtchn;
  573 
  574         mtx_lock_spin(&irq_mapping_update_lock);
  575 
  576         evtchn = evtchn_from_irq(irq);
  577         if (!VALID_EVTCHN(evtchn)) {
  578                 mtx_unlock_spin(&irq_mapping_update_lock);
  579                 return;
  580         }
  581 
  582         /* Send future instances of this interrupt to other vcpu. */
  583         bind_vcpu.port = evtchn;
  584         bind_vcpu.vcpu = tcpu;
  585 
  586         /*
  587          * If this fails, it usually just indicates that we're dealing with a 
  588          * virq or IPI channel, which don't actually need to be rebound. Ignore
  589          * it, but don't do the xenlinux-level rebind in that case.
  590          */
  591         if (HYPERVISOR_event_channel_op(&op) >= 0)
  592                 bind_evtchn_to_cpu(evtchn, tcpu);
  593 
  594         mtx_unlock_spin(&irq_mapping_update_lock);
  595 
  596 }
  597 
  598 static void set_affinity_irq(unsigned irq, cpumask_t dest)
  599 {
  600         unsigned tcpu = ffs(dest) - 1;
  601         rebind_irq_to_cpu(irq, tcpu);
  602 }
  603 #endif
  604 
  605 /*
  606  * Interface to generic handling in intr_machdep.c
  607  */
  608 
  609 
  610 /*------------ interrupt handling --------------------------------------*/
  611 #define TODO            printf("%s: not implemented!\n", __func__) 
  612 
  613 
  614 static void     xenpic_dynirq_enable_source(struct intsrc *isrc); 
  615 static void     xenpic_dynirq_disable_source(struct intsrc *isrc, int); 
  616 static void     xenpic_dynirq_eoi_source(struct intsrc *isrc); 
  617 static void     xenpic_dynirq_enable_intr(struct intsrc *isrc); 
  618 
  619 static void     xenpic_pirq_enable_source(struct intsrc *isrc); 
  620 static void     xenpic_pirq_disable_source(struct intsrc *isrc, int); 
  621 static void     xenpic_pirq_eoi_source(struct intsrc *isrc); 
  622 static void     xenpic_pirq_enable_intr(struct intsrc *isrc); 
  623 
  624 
  625 static int      xenpic_vector(struct intsrc *isrc); 
  626 static int      xenpic_source_pending(struct intsrc *isrc); 
  627 static void     xenpic_suspend(struct pic* pic); 
  628 static void     xenpic_resume(struct pic* pic); 
  629 static int      xenpic_assign_cpu(struct intsrc *, u_int apic_id);
  630 
  631 
  632 struct pic xenpic_dynirq_template  =  { 
  633         .pic_enable_source      =       xenpic_dynirq_enable_source, 
  634         .pic_disable_source     =       xenpic_dynirq_disable_source,
  635         .pic_eoi_source         =       xenpic_dynirq_eoi_source, 
  636         .pic_enable_intr        =       xenpic_dynirq_enable_intr, 
  637         .pic_vector             =       xenpic_vector, 
  638         .pic_source_pending     =       xenpic_source_pending,
  639         .pic_suspend            =       xenpic_suspend, 
  640         .pic_resume             =       xenpic_resume 
  641 };
  642 
  643 struct pic xenpic_pirq_template  =  { 
  644         .pic_enable_source      =       xenpic_pirq_enable_source, 
  645         .pic_disable_source     =       xenpic_pirq_disable_source,
  646         .pic_eoi_source         =       xenpic_pirq_eoi_source, 
  647         .pic_enable_intr        =       xenpic_pirq_enable_intr, 
  648         .pic_vector             =       xenpic_vector, 
  649         .pic_source_pending     =       xenpic_source_pending,
  650         .pic_suspend            =       xenpic_suspend, 
  651         .pic_resume             =       xenpic_resume,
  652         .pic_assign_cpu         =       xenpic_assign_cpu
  653 };
  654 
  655 
  656 
  657 void 
  658 xenpic_dynirq_enable_source(struct intsrc *isrc)
  659 {
  660         unsigned int irq;
  661         struct xenpic_intsrc *xp;
  662 
  663         xp = (struct xenpic_intsrc *)isrc;
  664         
  665         mtx_lock_spin(&irq_mapping_update_lock);
  666         if (xp->xp_masked) {
  667                 irq = xenpic_vector(isrc);
  668                 unmask_evtchn(evtchn_from_irq(irq));
  669                 xp->xp_masked = FALSE;
  670         }
  671         mtx_unlock_spin(&irq_mapping_update_lock);
  672 }
  673 
  674 static void 
  675 xenpic_dynirq_disable_source(struct intsrc *isrc, int foo)
  676 {
  677         unsigned int irq;
  678         struct xenpic_intsrc *xp;
  679         
  680         xp = (struct xenpic_intsrc *)isrc;
  681         
  682         mtx_lock_spin(&irq_mapping_update_lock);
  683         if (!xp->xp_masked) {
  684                 irq = xenpic_vector(isrc);
  685                 mask_evtchn(evtchn_from_irq(irq));
  686                 xp->xp_masked = TRUE;
  687         }       
  688         mtx_unlock_spin(&irq_mapping_update_lock);
  689 }
  690 
  691 static void 
  692 xenpic_dynirq_enable_intr(struct intsrc *isrc)
  693 {
  694         unsigned int irq;
  695         struct xenpic_intsrc *xp;
  696         
  697         xp = (struct xenpic_intsrc *)isrc;      
  698         mtx_lock_spin(&irq_mapping_update_lock);
  699         xp->xp_masked = 0;
  700         irq = xenpic_vector(isrc);
  701         unmask_evtchn(evtchn_from_irq(irq));
  702         mtx_unlock_spin(&irq_mapping_update_lock);
  703 }
  704 
  705 static void 
  706 xenpic_dynirq_eoi_source(struct intsrc *isrc)
  707 {
  708         unsigned int irq;
  709         struct xenpic_intsrc *xp;
  710         
  711         xp = (struct xenpic_intsrc *)isrc;      
  712         mtx_lock_spin(&irq_mapping_update_lock);
  713         xp->xp_masked = 0;
  714         irq = xenpic_vector(isrc);
  715         unmask_evtchn(evtchn_from_irq(irq));
  716         mtx_unlock_spin(&irq_mapping_update_lock);
  717 }
  718 
  719 static int
  720 xenpic_vector(struct intsrc *isrc)
  721 {
  722     struct xenpic_intsrc *pin;
  723 
  724     pin = (struct xenpic_intsrc *)isrc;
  725    //printf("xenpic_vector(): isrc=%p,vector=%u\n", pin, pin->xp_vector);
  726 
  727     return (pin->xp_vector);
  728 }
  729 
  730 static int
  731 xenpic_source_pending(struct intsrc *isrc)
  732 {
  733     struct xenpic_intsrc *pin = (struct xenpic_intsrc *)isrc;
  734 
  735         /* XXXEN: TODO */
  736         printf("xenpic_source_pending(): vector=%x,masked=%x\n",
  737             pin->xp_vector, pin->xp_masked);
  738 
  739 /*      notify_remote_via_evtchn(pin->xp_vector); // XXX RS: Is this correct? */
  740         return 0;
  741 }
  742 
  743 static void 
  744 xenpic_suspend(struct pic* pic)
  745 { 
  746         TODO; 
  747 } 
  748  
  749 static void 
  750 xenpic_resume(struct pic* pic)
  751 { 
  752         TODO; 
  753 }
  754 
  755 static int
  756 xenpic_assign_cpu(struct intsrc *isrc, u_int apic_id)
  757 { 
  758         TODO; 
  759         return (EOPNOTSUPP);
  760 }
  761 
  762 void
  763 notify_remote_via_irq(int irq)
  764 {
  765         int evtchn = evtchn_from_irq(irq);
  766 
  767         if (VALID_EVTCHN(evtchn))
  768                 notify_remote_via_evtchn(evtchn);
  769         else
  770                 panic("invalid evtchn %d", irq);
  771 }
  772 
  773 /* required for support of physical devices */
  774 static inline void 
  775 pirq_unmask_notify(int pirq)
  776 {
  777         struct physdev_eoi eoi = { .irq = pirq };
  778 
  779         if (unlikely(test_bit(pirq, &pirq_needs_unmask_notify[0]))) {
  780                 (void)HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
  781         }
  782 }
  783 
  784 static inline void 
  785 pirq_query_unmask(int pirq)
  786 {
  787         struct physdev_irq_status_query irq_status_query;
  788 
  789         irq_status_query.irq = pirq;
  790         (void)HYPERVISOR_physdev_op(PHYSDEVOP_IRQ_STATUS_QUERY, &irq_status_query);
  791         clear_bit(pirq, &pirq_needs_unmask_notify[0]);
  792         if ( irq_status_query.flags & PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY )
  793                 set_bit(pirq, &pirq_needs_unmask_notify[0]);
  794 }
  795 
  796 /*
  797  * On startup, if there is no action associated with the IRQ then we are
  798  * probing. In this case we should not share with others as it will confuse us.
  799  */
  800 #define probing_irq(_irq) (intr_lookup_source(irq) == NULL)
  801 
  802 static void 
  803 xenpic_pirq_enable_intr(struct intsrc *isrc)
  804 {
  805         struct evtchn_bind_pirq bind_pirq;
  806         int evtchn;
  807         unsigned int irq;
  808         
  809         mtx_lock_spin(&irq_mapping_update_lock);
  810         irq = xenpic_vector(isrc);
  811         evtchn = evtchn_from_irq(irq);
  812 
  813         if (VALID_EVTCHN(evtchn))
  814                 goto out;
  815 
  816         bind_pirq.pirq  = irq;
  817         /* NB. We are happy to share unless we are probing. */
  818         bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
  819         
  820         if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq) != 0) {
  821 #ifndef XEN_PRIVILEGED_GUEST
  822                 panic("unexpected pirq call");
  823 #endif
  824                 if (!probing_irq(irq)) /* Some failures are expected when probing. */
  825                         printf("Failed to obtain physical IRQ %d\n", irq);
  826                 mtx_unlock_spin(&irq_mapping_update_lock);
  827                 return;
  828         }
  829         evtchn = bind_pirq.port;
  830 
  831         pirq_query_unmask(irq_to_pirq(irq));
  832 
  833         bind_evtchn_to_cpu(evtchn, 0);
  834         evtchn_to_irq[evtchn] = irq;
  835         irq_info[irq] = mk_irq_info(IRQT_PIRQ, irq, evtchn);
  836 
  837  out:
  838         unmask_evtchn(evtchn);
  839         pirq_unmask_notify(irq_to_pirq(irq));
  840         mtx_unlock_spin(&irq_mapping_update_lock);
  841 }
  842 
  843 static void 
  844 xenpic_pirq_enable_source(struct intsrc *isrc)
  845 {
  846         int evtchn;
  847         unsigned int irq;
  848 
  849         mtx_lock_spin(&irq_mapping_update_lock);
  850         irq = xenpic_vector(isrc);
  851         evtchn = evtchn_from_irq(irq);
  852 
  853         if (!VALID_EVTCHN(evtchn))
  854                 goto done;
  855 
  856         unmask_evtchn(evtchn);
  857         pirq_unmask_notify(irq_to_pirq(irq));
  858  done:
  859         mtx_unlock_spin(&irq_mapping_update_lock);
  860 }
  861 
  862 static void 
  863 xenpic_pirq_disable_source(struct intsrc *isrc, int eoi)
  864 {
  865         int evtchn;
  866         unsigned int irq;
  867 
  868         mtx_lock_spin(&irq_mapping_update_lock);
  869         irq = xenpic_vector(isrc);
  870         evtchn = evtchn_from_irq(irq);
  871 
  872         if (!VALID_EVTCHN(evtchn))
  873                 goto done;
  874 
  875         mask_evtchn(evtchn);
  876  done:
  877         mtx_unlock_spin(&irq_mapping_update_lock);
  878 }
  879 
  880 
  881 static void 
  882 xenpic_pirq_eoi_source(struct intsrc *isrc)
  883 {
  884         int evtchn;
  885         unsigned int irq;
  886 
  887         mtx_lock_spin(&irq_mapping_update_lock);
  888         irq = xenpic_vector(isrc);
  889         evtchn = evtchn_from_irq(irq);
  890 
  891         if (!VALID_EVTCHN(evtchn))
  892                 goto done;
  893 
  894         unmask_evtchn(evtchn);
  895         pirq_unmask_notify(irq_to_pirq(irq));
  896  done:
  897         mtx_unlock_spin(&irq_mapping_update_lock);
  898 }
  899 
  900 int
  901 irq_to_evtchn_port(int irq)
  902 {
  903         return evtchn_from_irq(irq);
  904 }
  905 
  906 void 
  907 mask_evtchn(int port)
  908 {
  909         shared_info_t *s = HYPERVISOR_shared_info;
  910         synch_set_bit(port, &s->evtchn_mask[0]);
  911 }
  912 
  913 void 
  914 unmask_evtchn(int port)
  915 {
  916         shared_info_t *s = HYPERVISOR_shared_info;
  917         unsigned int cpu = PCPU_GET(cpuid);
  918         vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
  919 
  920         /* Slow path (hypercall) if this is a non-local port. */
  921         if (unlikely(cpu != cpu_from_evtchn(port))) {
  922                 struct evtchn_unmask unmask = { .port = port };
  923                 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
  924                 return;
  925         }
  926 
  927         synch_clear_bit(port, &s->evtchn_mask);
  928 
  929         /*
  930          * The following is basically the equivalent of 'hw_resend_irq'. Just
  931          * like a real IO-APIC we 'lose the interrupt edge' if the channel is
  932          * masked.
  933          */
  934         if (synch_test_bit(port, &s->evtchn_pending) && 
  935             !synch_test_and_set_bit(port / LONG_BIT,
  936                                     &vcpu_info->evtchn_pending_sel)) {
  937                 vcpu_info->evtchn_upcall_pending = 1;
  938                 if (!vcpu_info->evtchn_upcall_mask)
  939                         force_evtchn_callback();
  940         }
  941 }
  942 
  943 void irq_resume(void)
  944 {
  945         evtchn_op_t op;
  946         int         cpu, pirq, virq, ipi, irq, evtchn;
  947 
  948         struct evtchn_bind_virq bind_virq;
  949         struct evtchn_bind_ipi bind_ipi;        
  950 
  951         init_evtchn_cpu_bindings();
  952 
  953         /* New event-channel space is not 'live' yet. */
  954         for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
  955                 mask_evtchn(evtchn);
  956 
  957         /* Check that no PIRQs are still bound. */
  958         for (pirq = 0; pirq < NR_PIRQS; pirq++) {
  959                 KASSERT(irq_info[pirq_to_irq(pirq)] == IRQ_UNBOUND,
  960                     ("pirq_to_irq inconsistent"));
  961         }
  962 
  963         /* Secondary CPUs must have no VIRQ or IPI bindings. */
  964         for (cpu = 1; cpu < MAX_VIRT_CPUS; cpu++) {
  965                 for (virq = 0; virq < NR_VIRQS; virq++) {
  966                         KASSERT(pcpu_find(cpu)->pc_virq_to_irq[virq] == -1,
  967                             ("virq_to_irq inconsistent"));
  968                 }
  969                 for (ipi = 0; ipi < NR_IPIS; ipi++) {
  970                         KASSERT(pcpu_find(cpu)->pc_ipi_to_irq[ipi] == -1,
  971                             ("ipi_to_irq inconsistent"));
  972                 }
  973         }
  974 
  975         /* No IRQ <-> event-channel mappings. */
  976         for (irq = 0; irq < NR_IRQS; irq++)
  977                 irq_info[irq] &= ~0xFFFF; /* zap event-channel binding */
  978         for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
  979                 evtchn_to_irq[evtchn] = -1;
  980 
  981         /* Primary CPU: rebind VIRQs automatically. */
  982         for (virq = 0; virq < NR_VIRQS; virq++) {
  983                 if ((irq = pcpu_find(0)->pc_virq_to_irq[virq]) == -1)
  984                         continue;
  985 
  986                 KASSERT(irq_info[irq] == mk_irq_info(IRQT_VIRQ, virq, 0),
  987                     ("irq_info inconsistent"));
  988 
  989                 /* Get a new binding from Xen. */
  990                 bind_virq.virq = virq;
  991                 bind_virq.vcpu = 0;
  992                 HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, &bind_virq);
  993                 evtchn = bind_virq.port;
  994         
  995                 /* Record the new mapping. */
  996                 evtchn_to_irq[evtchn] = irq;
  997                 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
  998 
  999                 /* Ready for use. */
 1000                 unmask_evtchn(evtchn);
 1001         }
 1002 
 1003         /* Primary CPU: rebind IPIs automatically. */
 1004         for (ipi = 0; ipi < NR_IPIS; ipi++) {
 1005                 if ((irq = pcpu_find(0)->pc_ipi_to_irq[ipi]) == -1)
 1006                         continue;
 1007 
 1008                 KASSERT(irq_info[irq] == mk_irq_info(IRQT_IPI, ipi, 0),
 1009                     ("irq_info inconsistent"));
 1010 
 1011                 /* Get a new binding from Xen. */
 1012                 memset(&op, 0, sizeof(op));
 1013                 bind_ipi.vcpu = 0;
 1014                 HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, &bind_ipi);
 1015                 evtchn = bind_ipi.port;
 1016         
 1017                 /* Record the new mapping. */
 1018                 evtchn_to_irq[evtchn] = irq;
 1019                 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
 1020 
 1021                 /* Ready for use. */
 1022                 unmask_evtchn(evtchn);
 1023         }
 1024 }
 1025 
 1026 static void 
 1027 evtchn_init(void *dummy __unused)
 1028 {
 1029         int i, cpu;
 1030         struct xenpic_intsrc *pin, *tpin;
 1031 
 1032 
 1033         init_evtchn_cpu_bindings();
 1034         
 1035          /* No VIRQ or IPI bindings. */
 1036         for (cpu = 0; cpu < mp_ncpus; cpu++) {
 1037                 for (i = 0; i < NR_VIRQS; i++)
 1038                         pcpu_find(cpu)->pc_virq_to_irq[i] = -1;
 1039                 for (i = 0; i < NR_IPIS; i++)
 1040                         pcpu_find(cpu)->pc_ipi_to_irq[i] = -1;
 1041         }
 1042 
 1043         /* No event-channel -> IRQ mappings. */
 1044         for (i = 0; i < NR_EVENT_CHANNELS; i++) {
 1045                 evtchn_to_irq[i] = -1;
 1046                 mask_evtchn(i); /* No event channels are 'live' right now. */
 1047         }
 1048 
 1049         /* No IRQ -> event-channel mappings. */
 1050         for (i = 0; i < NR_IRQS; i++)
 1051                 irq_info[i] = IRQ_UNBOUND;
 1052         
 1053         xp = malloc(sizeof(struct xenpic) + NR_IRQS*sizeof(struct xenpic_intsrc), 
 1054                     M_DEVBUF, M_WAITOK);
 1055 
 1056         xp->xp_dynirq_pic = &xenpic_dynirq_template;
 1057         xp->xp_pirq_pic = &xenpic_pirq_template;
 1058         xp->xp_numintr = NR_IRQS;
 1059         bzero(xp->xp_pins, sizeof(struct xenpic_intsrc) * NR_IRQS);
 1060 
 1061 
 1062         /* We need to register our PIC's beforehand */
 1063         if (intr_register_pic(&xenpic_pirq_template))
 1064                 panic("XEN: intr_register_pic() failure");
 1065         if (intr_register_pic(&xenpic_dynirq_template))
 1066                 panic("XEN: intr_register_pic() failure");
 1067 
 1068         /*
 1069          * Initialize the dynamic IRQ's - we initialize the structures, but
 1070          * we do not bind them (bind_evtchn_to_irqhandle() does this)
 1071          */
 1072         pin = xp->xp_pins;
 1073         for (i = 0; i < NR_DYNIRQS; i++) {
 1074                 /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
 1075                 irq_bindcount[dynirq_to_irq(i)] = 0;
 1076 
 1077                 tpin = &pin[dynirq_to_irq(i)];
 1078                 tpin->xp_intsrc.is_pic = xp->xp_dynirq_pic;
 1079                 tpin->xp_vector = dynirq_to_irq(i);
 1080                 
 1081         }
 1082         /*
 1083          * Now, we go ahead and claim every PIRQ there is.
 1084          */
 1085         pin = xp->xp_pins;
 1086         for (i = 0; i < NR_PIRQS; i++) {
 1087                 /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
 1088                 irq_bindcount[pirq_to_irq(i)] = 0;
 1089 
 1090 #ifdef RTC_IRQ
 1091                 /* If not domain 0, force our RTC driver to fail its probe. */
 1092                 if ((i == RTC_IRQ) &&
 1093                     !(xen_start_info->flags & SIF_INITDOMAIN))
 1094                         continue;
 1095 #endif
 1096                 tpin = &pin[pirq_to_irq(i)];            
 1097                 tpin->xp_intsrc.is_pic = xp->xp_pirq_pic;
 1098                 tpin->xp_vector = pirq_to_irq(i);
 1099 
 1100         }
 1101 }
 1102 
 1103 SYSINIT(evtchn_init, SI_SUB_INTR, SI_ORDER_MIDDLE, evtchn_init, NULL);
 1104     /*
 1105      * irq_mapping_update_lock: in order to allow an interrupt to occur in a critical
 1106      *          section, to set pcpu->ipending (etc...) properly, we
 1107      *          must be able to get the icu lock, so it can't be
 1108      *          under witness.
 1109      */
 1110 
 1111 MTX_SYSINIT(irq_mapping_update_lock, &irq_mapping_update_lock, "xp", MTX_SPIN);

Cache object: 2d5a2b240102c8ea3602f918982a457a


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.