FreeBSD/Linux Kernel Cross Reference
sys/x86/xen/xen_intr.c
1 /******************************************************************************
2 * xen_intr.c
3 *
4 * Xen event and interrupt services for x86 HVM guests.
5 *
6 * Copyright (c) 2002-2005, K A Fraser
7 * Copyright (c) 2005, Intel Corporation <xiaofeng.ling@intel.com>
8 * Copyright (c) 2012, Spectra Logic Corporation
9 *
10 * This file may be distributed separately from the Linux kernel, or
11 * incorporated into other software packages, subject to the following license:
12 *
13 * Permission is hereby granted, free of charge, to any person obtaining a copy
14 * of this source file (the "Software"), to deal in the Software without
15 * restriction, including without limitation the rights to use, copy, modify,
16 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
17 * and to permit persons to whom the Software is furnished to do so, subject to
18 * the following conditions:
19 *
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29 * IN THE SOFTWARE.
30 */
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34
35 #include "opt_ddb.h"
36
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/bus.h>
40 #include <sys/malloc.h>
41 #include <sys/kernel.h>
42 #include <sys/limits.h>
43 #include <sys/lock.h>
44 #include <sys/mutex.h>
45 #include <sys/interrupt.h>
46 #include <sys/pcpu.h>
47 #include <sys/smp.h>
48 #include <sys/refcount.h>
49
50 #include <vm/vm.h>
51 #include <vm/pmap.h>
52
53 #include <machine/intr_machdep.h>
54 #include <x86/apicvar.h>
55 #include <x86/apicreg.h>
56 #include <machine/smp.h>
57 #include <machine/stdarg.h>
58
59 #include <machine/xen/synch_bitops.h>
60
61 #include <xen/xen-os.h>
62 #include <xen/hvm.h>
63 #include <xen/hypervisor.h>
64 #include <xen/xen_intr.h>
65 #include <xen/evtchn/evtchnvar.h>
66
67 #include <dev/xen/xenpci/xenpcivar.h>
68 #include <dev/pci/pcivar.h>
69
70 #ifdef DDB
71 #include <ddb/ddb.h>
72 #endif
73
74 static MALLOC_DEFINE(M_XENINTR, "xen_intr", "Xen Interrupt Services");
75
76 static u_int first_evtchn_irq;
77
78 /**
79 * Per-cpu event channel processing state.
80 */
81 struct xen_intr_pcpu_data {
82 /**
83 * The last event channel bitmap section (level one bit) processed.
84 * This is used to ensure we scan all ports before
85 * servicing an already servied port again.
86 */
87 u_int last_processed_l1i;
88
89 /**
90 * The last event channel processed within the event channel
91 * bitmap being scanned.
92 */
93 u_int last_processed_l2i;
94
95 /** Pointer to this CPU's interrupt statistic counter. */
96 u_long *evtchn_intrcnt;
97
98 /**
99 * A bitmap of ports that can be serviced from this CPU.
100 * A set bit means interrupt handling is enabled.
101 */
102 u_long evtchn_enabled[sizeof(u_long) * 8];
103 };
104
105 /*
106 * Start the scan at port 0 by initializing the last scanned
107 * location as the highest numbered event channel port.
108 */
109 DPCPU_DEFINE_STATIC(struct xen_intr_pcpu_data, xen_intr_pcpu) = {
110 .last_processed_l1i = LONG_BIT - 1,
111 .last_processed_l2i = LONG_BIT - 1
112 };
113
114 DPCPU_DECLARE(struct vcpu_info *, vcpu_info);
115
116 #define XEN_INVALID_EVTCHN 0 /* Invalid event channel */
117
118 #define is_valid_evtchn(x) ((x) != XEN_INVALID_EVTCHN)
119
120 struct xenisrc {
121 struct intsrc xi_intsrc;
122 enum evtchn_type xi_type;
123 int xi_cpu; /* VCPU for delivery. */
124 int xi_vector; /* Global isrc vector number. */
125 evtchn_port_t xi_port;
126 int xi_virq;
127 void *xi_cookie;
128 u_int xi_close:1; /* close on unbind? */
129 u_int xi_masked:1;
130 volatile u_int xi_refcount;
131 };
132
133 static void xen_intr_suspend(struct pic *);
134 static void xen_intr_resume(struct pic *, bool suspend_cancelled);
135 static void xen_intr_enable_source(struct intsrc *isrc);
136 static void xen_intr_disable_source(struct intsrc *isrc, int eoi);
137 static void xen_intr_eoi_source(struct intsrc *isrc);
138 static void xen_intr_enable_intr(struct intsrc *isrc);
139 static void xen_intr_disable_intr(struct intsrc *isrc);
140 static int xen_intr_vector(struct intsrc *isrc);
141 static int xen_intr_source_pending(struct intsrc *isrc);
142 static int xen_intr_config_intr(struct intsrc *isrc,
143 enum intr_trigger trig, enum intr_polarity pol);
144 static int xen_intr_assign_cpu(struct intsrc *isrc, u_int apic_id);
145
146 /**
147 * PIC interface for all event channel port types except physical IRQs.
148 */
149 struct pic xen_intr_pic = {
150 .pic_enable_source = xen_intr_enable_source,
151 .pic_disable_source = xen_intr_disable_source,
152 .pic_eoi_source = xen_intr_eoi_source,
153 .pic_enable_intr = xen_intr_enable_intr,
154 .pic_disable_intr = xen_intr_disable_intr,
155 .pic_vector = xen_intr_vector,
156 .pic_source_pending = xen_intr_source_pending,
157 .pic_suspend = xen_intr_suspend,
158 .pic_resume = xen_intr_resume,
159 .pic_config_intr = xen_intr_config_intr,
160 .pic_assign_cpu = xen_intr_assign_cpu
161 };
162
163 static struct mtx xen_intr_isrc_lock;
164 static u_int xen_intr_auto_vector_count;
165 static struct xenisrc *xen_intr_port_to_isrc[NR_EVENT_CHANNELS];
166
167 /*------------------------- Private Functions --------------------------------*/
168
169 /**
170 * Retrieve a handle for a Xen interrupt source.
171 *
172 * \param isrc A valid Xen interrupt source structure.
173 *
174 * \returns A handle suitable for use with xen_intr_isrc_from_handle()
175 * to retrieve the original Xen interrupt source structure.
176 */
177
178 static inline xen_intr_handle_t
179 xen_intr_handle_from_isrc(struct xenisrc *isrc)
180 {
181 return (isrc);
182 }
183
184 /**
185 * Lookup a Xen interrupt source object given an interrupt binding handle.
186 *
187 * \param handle A handle initialized by a previous call to
188 * xen_intr_bind_isrc().
189 *
190 * \returns A pointer to the Xen interrupt source object associated
191 * with the given interrupt handle. NULL if no association
192 * currently exists.
193 */
194 static inline struct xenisrc *
195 xen_intr_isrc_from_handle(xen_intr_handle_t handle)
196 {
197 return ((struct xenisrc *)handle);
198 }
199
200 /**
201 * Disable signal delivery for an event channel port on the
202 * specified CPU.
203 *
204 * \param port The event channel port to mask.
205 *
206 * This API is used to manage the port<=>CPU binding of event
207 * channel handlers.
208 *
209 * \note This operation does not preclude reception of an event
210 * for this event channel on another CPU. To mask the
211 * event channel globally, use evtchn_mask().
212 */
213 static inline void
214 evtchn_cpu_mask_port(u_int cpu, evtchn_port_t port)
215 {
216 struct xen_intr_pcpu_data *pcpu;
217
218 pcpu = DPCPU_ID_PTR(cpu, xen_intr_pcpu);
219 xen_clear_bit(port, pcpu->evtchn_enabled);
220 }
221
222 /**
223 * Enable signal delivery for an event channel port on the
224 * specified CPU.
225 *
226 * \param port The event channel port to unmask.
227 *
228 * This API is used to manage the port<=>CPU binding of event
229 * channel handlers.
230 *
231 * \note This operation does not guarantee that event delivery
232 * is enabled for this event channel port. The port must
233 * also be globally enabled. See evtchn_unmask().
234 */
235 static inline void
236 evtchn_cpu_unmask_port(u_int cpu, evtchn_port_t port)
237 {
238 struct xen_intr_pcpu_data *pcpu;
239
240 pcpu = DPCPU_ID_PTR(cpu, xen_intr_pcpu);
241 xen_set_bit(port, pcpu->evtchn_enabled);
242 }
243
244 /**
245 * Allocate and register a per-cpu Xen upcall interrupt counter.
246 *
247 * \param cpu The cpu for which to register this interrupt count.
248 */
249 static void
250 xen_intr_intrcnt_add(u_int cpu)
251 {
252 char buf[MAXCOMLEN + 1];
253 struct xen_intr_pcpu_data *pcpu;
254
255 pcpu = DPCPU_ID_PTR(cpu, xen_intr_pcpu);
256 if (pcpu->evtchn_intrcnt != NULL)
257 return;
258
259 snprintf(buf, sizeof(buf), "cpu%d:xen", cpu);
260 intrcnt_add(buf, &pcpu->evtchn_intrcnt);
261 }
262
263 /**
264 * Search for an already allocated but currently unused Xen interrupt
265 * source object.
266 *
267 * \param type Restrict the search to interrupt sources of the given
268 * type.
269 *
270 * \return A pointer to a free Xen interrupt source object or NULL.
271 */
272 static struct xenisrc *
273 xen_intr_find_unused_isrc(enum evtchn_type type)
274 {
275 int isrc_idx;
276
277 KASSERT(mtx_owned(&xen_intr_isrc_lock), ("Evtchn isrc lock not held"));
278
279 for (isrc_idx = 0; isrc_idx < xen_intr_auto_vector_count; isrc_idx ++) {
280 struct xenisrc *isrc;
281 u_int vector;
282
283 vector = first_evtchn_irq + isrc_idx;
284 isrc = (struct xenisrc *)intr_lookup_source(vector);
285 if (isrc != NULL
286 && isrc->xi_type == EVTCHN_TYPE_UNBOUND) {
287 KASSERT(isrc->xi_intsrc.is_handlers == 0,
288 ("Free evtchn still has handlers"));
289 isrc->xi_type = type;
290 return (isrc);
291 }
292 }
293 return (NULL);
294 }
295
296 /**
297 * Allocate a Xen interrupt source object.
298 *
299 * \param type The type of interrupt source to create.
300 *
301 * \return A pointer to a newly allocated Xen interrupt source
302 * object or NULL.
303 */
304 static struct xenisrc *
305 xen_intr_alloc_isrc(enum evtchn_type type)
306 {
307 static int warned;
308 struct xenisrc *isrc;
309 unsigned int vector;
310
311 KASSERT(mtx_owned(&xen_intr_isrc_lock), ("Evtchn alloc lock not held"));
312
313 if (xen_intr_auto_vector_count > NR_EVENT_CHANNELS) {
314 if (!warned) {
315 warned = 1;
316 printf("%s: Event channels exhausted.\n", __func__);
317 }
318 return (NULL);
319 }
320
321 vector = first_evtchn_irq + xen_intr_auto_vector_count;
322 xen_intr_auto_vector_count++;
323
324 KASSERT((intr_lookup_source(vector) == NULL),
325 ("Trying to use an already allocated vector"));
326
327 mtx_unlock(&xen_intr_isrc_lock);
328 isrc = malloc(sizeof(*isrc), M_XENINTR, M_WAITOK | M_ZERO);
329 isrc->xi_intsrc.is_pic = &xen_intr_pic;
330 isrc->xi_vector = vector;
331 isrc->xi_type = type;
332 intr_register_source(&isrc->xi_intsrc);
333 mtx_lock(&xen_intr_isrc_lock);
334
335 return (isrc);
336 }
337
338 /**
339 * Attempt to free an active Xen interrupt source object.
340 *
341 * \param isrc The interrupt source object to release.
342 *
343 * \returns EBUSY if the source is still in use, otherwise 0.
344 */
345 static int
346 xen_intr_release_isrc(struct xenisrc *isrc)
347 {
348
349 mtx_lock(&xen_intr_isrc_lock);
350 KASSERT(isrc->xi_intsrc.is_handlers == 0,
351 ("Release called, but xenisrc still in use"));
352 evtchn_mask_port(isrc->xi_port);
353 evtchn_clear_port(isrc->xi_port);
354
355 /* Rebind port to CPU 0. */
356 evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port);
357 evtchn_cpu_unmask_port(0, isrc->xi_port);
358
359 if (isrc->xi_close != 0 && is_valid_evtchn(isrc->xi_port)) {
360 struct evtchn_close close = { .port = isrc->xi_port };
361 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
362 panic("EVTCHNOP_close failed");
363 }
364
365 xen_intr_port_to_isrc[isrc->xi_port] = NULL;
366 isrc->xi_cpu = 0;
367 isrc->xi_type = EVTCHN_TYPE_UNBOUND;
368 isrc->xi_port = 0;
369 isrc->xi_cookie = NULL;
370 mtx_unlock(&xen_intr_isrc_lock);
371 return (0);
372 }
373
374 /**
375 * Associate an interrupt handler with an already allocated local Xen
376 * event channel port.
377 *
378 * \param isrcp The returned Xen interrupt object associated with
379 * the specified local port.
380 * \param local_port The event channel to bind.
381 * \param type The event channel type of local_port.
382 * \param intr_owner The device making this bind request.
383 * \param filter An interrupt filter handler. Specify NULL
384 * to always dispatch to the ithread handler.
385 * \param handler An interrupt ithread handler. Optional (can
386 * specify NULL) if all necessary event actions
387 * are performed by filter.
388 * \param arg Argument to present to both filter and handler.
389 * \param irqflags Interrupt handler flags. See sys/bus.h.
390 * \param handlep Pointer to an opaque handle used to manage this
391 * registration.
392 *
393 * \returns 0 on success, otherwise an errno.
394 */
395 static int
396 xen_intr_bind_isrc(struct xenisrc **isrcp, evtchn_port_t local_port,
397 enum evtchn_type type, const char *intr_owner, driver_filter_t filter,
398 driver_intr_t handler, void *arg, enum intr_type flags,
399 xen_intr_handle_t *port_handlep)
400 {
401 struct xenisrc *isrc;
402 int error;
403
404 *isrcp = NULL;
405 if (port_handlep == NULL) {
406 printf("%s: %s: Bad event handle\n", intr_owner, __func__);
407 return (EINVAL);
408 }
409
410 mtx_lock(&xen_intr_isrc_lock);
411 isrc = xen_intr_find_unused_isrc(type);
412 if (isrc == NULL) {
413 isrc = xen_intr_alloc_isrc(type);
414 if (isrc == NULL) {
415 mtx_unlock(&xen_intr_isrc_lock);
416 return (ENOSPC);
417 }
418 }
419 isrc->xi_port = local_port;
420 xen_intr_port_to_isrc[local_port] = isrc;
421 refcount_init(&isrc->xi_refcount, 1);
422 mtx_unlock(&xen_intr_isrc_lock);
423
424 /* Assign the opaque handler */
425 *port_handlep = xen_intr_handle_from_isrc(isrc);
426
427 #ifdef SMP
428 if (type == EVTCHN_TYPE_PORT) {
429 /*
430 * By default all interrupts are assigned to vCPU#0
431 * unless specified otherwise, so shuffle them to balance
432 * the interrupt load.
433 */
434 xen_intr_assign_cpu(&isrc->xi_intsrc, intr_next_cpu(0));
435 }
436 #endif
437
438 if (filter == NULL && handler == NULL) {
439 /*
440 * No filter/handler provided, leave the event channel
441 * masked and without a valid handler, the caller is
442 * in charge of setting that up.
443 */
444 *isrcp = isrc;
445 return (0);
446 }
447
448 error = xen_intr_add_handler(intr_owner, filter, handler, arg, flags,
449 *port_handlep);
450 if (error != 0) {
451 xen_intr_release_isrc(isrc);
452 return (error);
453 }
454 *isrcp = isrc;
455 return (0);
456 }
457
458 /**
459 * Determine the event channel ports at the given section of the
460 * event port bitmap which have pending events for the given cpu.
461 *
462 * \param pcpu The Xen interrupt pcpu data for the cpu being queried.
463 * \param sh The Xen shared info area.
464 * \param idx The index of the section of the event channel bitmap to
465 * inspect.
466 *
467 * \returns A u_long with bits set for every event channel with pending
468 * events.
469 */
470 static inline u_long
471 xen_intr_active_ports(struct xen_intr_pcpu_data *pcpu, shared_info_t *sh,
472 u_int idx)
473 {
474
475 CTASSERT(sizeof(sh->evtchn_mask[0]) == sizeof(sh->evtchn_pending[0]));
476 CTASSERT(sizeof(sh->evtchn_mask[0]) == sizeof(pcpu->evtchn_enabled[0]));
477 CTASSERT(sizeof(sh->evtchn_mask) == sizeof(sh->evtchn_pending));
478 CTASSERT(sizeof(sh->evtchn_mask) == sizeof(pcpu->evtchn_enabled));
479 return (sh->evtchn_pending[idx]
480 & ~sh->evtchn_mask[idx]
481 & pcpu->evtchn_enabled[idx]);
482 }
483
484 /**
485 * Interrupt handler for processing all Xen event channel events.
486 *
487 * \param trap_frame The trap frame context for the current interrupt.
488 */
489 void
490 xen_intr_handle_upcall(struct trapframe *trap_frame)
491 {
492 u_int l1i, l2i, port, cpu __diagused;
493 u_long masked_l1, masked_l2;
494 struct xenisrc *isrc;
495 shared_info_t *s;
496 vcpu_info_t *v;
497 struct xen_intr_pcpu_data *pc;
498 u_long l1, l2;
499
500 /*
501 * Disable preemption in order to always check and fire events
502 * on the right vCPU
503 */
504 critical_enter();
505
506 cpu = PCPU_GET(cpuid);
507 pc = DPCPU_PTR(xen_intr_pcpu);
508 s = HYPERVISOR_shared_info;
509 v = DPCPU_GET(vcpu_info);
510
511 if (!xen_has_percpu_evtchn()) {
512 KASSERT((cpu == 0), ("Fired PCI event callback on wrong CPU"));
513 }
514
515 v->evtchn_upcall_pending = 0;
516
517 #if 0
518 #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
519 /* Clear master flag /before/ clearing selector flag. */
520 wmb();
521 #endif
522 #endif
523
524 l1 = atomic_readandclear_long(&v->evtchn_pending_sel);
525
526 l1i = pc->last_processed_l1i;
527 l2i = pc->last_processed_l2i;
528 (*pc->evtchn_intrcnt)++;
529
530 while (l1 != 0) {
531 l1i = (l1i + 1) % LONG_BIT;
532 masked_l1 = l1 & ((~0UL) << l1i);
533
534 if (masked_l1 == 0) {
535 /*
536 * if we masked out all events, wrap around
537 * to the beginning.
538 */
539 l1i = LONG_BIT - 1;
540 l2i = LONG_BIT - 1;
541 continue;
542 }
543 l1i = ffsl(masked_l1) - 1;
544
545 do {
546 l2 = xen_intr_active_ports(pc, s, l1i);
547
548 l2i = (l2i + 1) % LONG_BIT;
549 masked_l2 = l2 & ((~0UL) << l2i);
550
551 if (masked_l2 == 0) {
552 /* if we masked out all events, move on */
553 l2i = LONG_BIT - 1;
554 break;
555 }
556 l2i = ffsl(masked_l2) - 1;
557
558 /* process port */
559 port = (l1i * LONG_BIT) + l2i;
560 synch_clear_bit(port, &s->evtchn_pending[0]);
561
562 isrc = xen_intr_port_to_isrc[port];
563 if (__predict_false(isrc == NULL))
564 continue;
565
566 /* Make sure we are firing on the right vCPU */
567 KASSERT((isrc->xi_cpu == PCPU_GET(cpuid)),
568 ("Received unexpected event on vCPU#%d, event bound to vCPU#%d",
569 PCPU_GET(cpuid), isrc->xi_cpu));
570
571 intr_execute_handlers(&isrc->xi_intsrc, trap_frame);
572
573 /*
574 * If this is the final port processed,
575 * we'll pick up here+1 next time.
576 */
577 pc->last_processed_l1i = l1i;
578 pc->last_processed_l2i = l2i;
579
580 } while (l2i != LONG_BIT - 1);
581
582 l2 = xen_intr_active_ports(pc, s, l1i);
583 if (l2 == 0) {
584 /*
585 * We handled all ports, so we can clear the
586 * selector bit.
587 */
588 l1 &= ~(1UL << l1i);
589 }
590 }
591
592 if (xen_evtchn_needs_ack)
593 lapic_eoi();
594
595 critical_exit();
596 }
597
598 static int
599 xen_intr_init(void *dummy __unused)
600 {
601 shared_info_t *s = HYPERVISOR_shared_info;
602 struct xen_intr_pcpu_data *pcpu;
603 int i;
604
605 if (!xen_domain())
606 return (0);
607
608 mtx_init(&xen_intr_isrc_lock, "xen-irq-lock", NULL, MTX_DEF);
609
610 /*
611 * Set the per-cpu mask of CPU#0 to enable all, since by default all
612 * event channels are bound to CPU#0.
613 */
614 CPU_FOREACH(i) {
615 pcpu = DPCPU_ID_PTR(i, xen_intr_pcpu);
616 memset(pcpu->evtchn_enabled, i == 0 ? ~0 : 0,
617 sizeof(pcpu->evtchn_enabled));
618 }
619
620 for (i = 0; i < nitems(s->evtchn_mask); i++)
621 atomic_store_rel_long(&s->evtchn_mask[i], ~0);
622
623 intr_register_pic(&xen_intr_pic);
624
625 if (bootverbose)
626 printf("Xen interrupt system initialized\n");
627
628 return (0);
629 }
630 SYSINIT(xen_intr_init, SI_SUB_INTR, SI_ORDER_SECOND, xen_intr_init, NULL);
631
632 static void
633 xen_intrcnt_init(void *dummy __unused)
634 {
635 unsigned int i;
636
637 if (!xen_domain())
638 return;
639
640 /*
641 * Register interrupt count manually as we aren't guaranteed to see a
642 * call to xen_intr_assign_cpu() before our first interrupt.
643 */
644 CPU_FOREACH(i)
645 xen_intr_intrcnt_add(i);
646 }
647 SYSINIT(xen_intrcnt_init, SI_SUB_INTR, SI_ORDER_MIDDLE, xen_intrcnt_init, NULL);
648
649 void
650 xen_intr_alloc_irqs(void)
651 {
652
653 if (num_io_irqs > UINT_MAX - NR_EVENT_CHANNELS)
654 panic("IRQ allocation overflow (num_msi_irqs too high?)");
655 first_evtchn_irq = num_io_irqs;
656 num_io_irqs += NR_EVENT_CHANNELS;
657 }
658
659 /*--------------------------- Common PIC Functions ---------------------------*/
660 /**
661 * Prepare this PIC for system suspension.
662 */
663 static void
664 xen_intr_suspend(struct pic *unused)
665 {
666 }
667
668 static void
669 xen_rebind_ipi(struct xenisrc *isrc)
670 {
671 #ifdef SMP
672 int cpu = isrc->xi_cpu;
673 int vcpu_id = pcpu_find(cpu)->pc_vcpu_id;
674 int error;
675 struct evtchn_bind_ipi bind_ipi = { .vcpu = vcpu_id };
676
677 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
678 &bind_ipi);
679 if (error != 0)
680 panic("unable to rebind xen IPI: %d", error);
681
682 isrc->xi_port = bind_ipi.port;
683 isrc->xi_cpu = 0;
684 xen_intr_port_to_isrc[bind_ipi.port] = isrc;
685
686 error = xen_intr_assign_cpu(&isrc->xi_intsrc,
687 cpu_apic_ids[cpu]);
688 if (error)
689 panic("unable to bind xen IPI to CPU#%d: %d",
690 cpu, error);
691
692 evtchn_unmask_port(bind_ipi.port);
693 #else
694 panic("Resume IPI event channel on UP");
695 #endif
696 }
697
698 static void
699 xen_rebind_virq(struct xenisrc *isrc)
700 {
701 int cpu = isrc->xi_cpu;
702 int vcpu_id = pcpu_find(cpu)->pc_vcpu_id;
703 int error;
704 struct evtchn_bind_virq bind_virq = { .virq = isrc->xi_virq,
705 .vcpu = vcpu_id };
706
707 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
708 &bind_virq);
709 if (error != 0)
710 panic("unable to rebind xen VIRQ#%d: %d", isrc->xi_virq, error);
711
712 isrc->xi_port = bind_virq.port;
713 isrc->xi_cpu = 0;
714 xen_intr_port_to_isrc[bind_virq.port] = isrc;
715
716 #ifdef SMP
717 error = xen_intr_assign_cpu(&isrc->xi_intsrc,
718 cpu_apic_ids[cpu]);
719 if (error)
720 panic("unable to bind xen VIRQ#%d to CPU#%d: %d",
721 isrc->xi_virq, cpu, error);
722 #endif
723
724 evtchn_unmask_port(bind_virq.port);
725 }
726
727 /**
728 * Return this PIC to service after being suspended.
729 */
730 static void
731 xen_intr_resume(struct pic *unused, bool suspend_cancelled)
732 {
733 shared_info_t *s = HYPERVISOR_shared_info;
734 struct xenisrc *isrc;
735 u_int isrc_idx;
736 int i;
737
738 if (suspend_cancelled)
739 return;
740
741 /* Reset the per-CPU masks */
742 CPU_FOREACH(i) {
743 struct xen_intr_pcpu_data *pcpu;
744
745 pcpu = DPCPU_ID_PTR(i, xen_intr_pcpu);
746 memset(pcpu->evtchn_enabled, i == 0 ? ~0 : 0,
747 sizeof(pcpu->evtchn_enabled));
748 }
749
750 /* Mask all event channels. */
751 for (i = 0; i < nitems(s->evtchn_mask); i++)
752 atomic_store_rel_long(&s->evtchn_mask[i], ~0);
753
754 /* Remove port -> isrc mappings */
755 memset(xen_intr_port_to_isrc, 0, sizeof(xen_intr_port_to_isrc));
756
757 /* Free unused isrcs and rebind VIRQs and IPIs */
758 for (isrc_idx = 0; isrc_idx < xen_intr_auto_vector_count; isrc_idx++) {
759 u_int vector;
760
761 vector = first_evtchn_irq + isrc_idx;
762 isrc = (struct xenisrc *)intr_lookup_source(vector);
763 if (isrc != NULL) {
764 isrc->xi_port = 0;
765 switch (isrc->xi_type) {
766 case EVTCHN_TYPE_IPI:
767 xen_rebind_ipi(isrc);
768 break;
769 case EVTCHN_TYPE_VIRQ:
770 xen_rebind_virq(isrc);
771 break;
772 default:
773 break;
774 }
775 }
776 }
777 }
778
779 /**
780 * Disable a Xen interrupt source.
781 *
782 * \param isrc The interrupt source to disable.
783 */
784 static void
785 xen_intr_disable_intr(struct intsrc *base_isrc)
786 {
787 struct xenisrc *isrc = (struct xenisrc *)base_isrc;
788
789 evtchn_mask_port(isrc->xi_port);
790 }
791
792 /**
793 * Determine the global interrupt vector number for
794 * a Xen interrupt source.
795 *
796 * \param isrc The interrupt source to query.
797 *
798 * \return The vector number corresponding to the given interrupt source.
799 */
800 static int
801 xen_intr_vector(struct intsrc *base_isrc)
802 {
803 struct xenisrc *isrc = (struct xenisrc *)base_isrc;
804
805 return (isrc->xi_vector);
806 }
807
808 /**
809 * Determine whether or not interrupt events are pending on the
810 * the given interrupt source.
811 *
812 * \param isrc The interrupt source to query.
813 *
814 * \returns 0 if no events are pending, otherwise non-zero.
815 */
816 static int
817 xen_intr_source_pending(struct intsrc *isrc)
818 {
819 /*
820 * EventChannels are edge triggered and never masked.
821 * There can be no pending events.
822 */
823 return (0);
824 }
825
826 /**
827 * Perform configuration of an interrupt source.
828 *
829 * \param isrc The interrupt source to configure.
830 * \param trig Edge or level.
831 * \param pol Active high or low.
832 *
833 * \returns 0 if no events are pending, otherwise non-zero.
834 */
835 static int
836 xen_intr_config_intr(struct intsrc *isrc, enum intr_trigger trig,
837 enum intr_polarity pol)
838 {
839 /* Configuration is only possible via the evtchn apis. */
840 return (ENODEV);
841 }
842
843 /**
844 * Configure CPU affinity for interrupt source event delivery.
845 *
846 * \param isrc The interrupt source to configure.
847 * \param apic_id The apic id of the CPU for handling future events.
848 *
849 * \returns 0 if successful, otherwise an errno.
850 */
851 static int
852 xen_intr_assign_cpu(struct intsrc *base_isrc, u_int apic_id)
853 {
854 #ifdef SMP
855 struct evtchn_bind_vcpu bind_vcpu;
856 struct xenisrc *isrc;
857 u_int to_cpu, vcpu_id;
858 int error, masked;
859
860 if (!xen_has_percpu_evtchn())
861 return (EOPNOTSUPP);
862
863 to_cpu = apic_cpuid(apic_id);
864 vcpu_id = pcpu_find(to_cpu)->pc_vcpu_id;
865
866 mtx_lock(&xen_intr_isrc_lock);
867 isrc = (struct xenisrc *)base_isrc;
868 if (!is_valid_evtchn(isrc->xi_port)) {
869 mtx_unlock(&xen_intr_isrc_lock);
870 return (EINVAL);
871 }
872
873 /*
874 * Mask the event channel while binding it to prevent interrupt
875 * delivery with an inconsistent state in isrc->xi_cpu.
876 */
877 masked = evtchn_test_and_set_mask(isrc->xi_port);
878 if ((isrc->xi_type == EVTCHN_TYPE_VIRQ) ||
879 (isrc->xi_type == EVTCHN_TYPE_IPI)) {
880 /*
881 * Virtual IRQs are associated with a cpu by
882 * the Hypervisor at evtchn_bind_virq time, so
883 * all we need to do is update the per-CPU masks.
884 */
885 evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port);
886 isrc->xi_cpu = to_cpu;
887 evtchn_cpu_unmask_port(isrc->xi_cpu, isrc->xi_port);
888 goto out;
889 }
890
891 bind_vcpu.port = isrc->xi_port;
892 bind_vcpu.vcpu = vcpu_id;
893
894 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu);
895 if (isrc->xi_cpu != to_cpu) {
896 if (error == 0) {
897 /* Commit to new binding by removing the old one. */
898 evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port);
899 isrc->xi_cpu = to_cpu;
900 evtchn_cpu_unmask_port(isrc->xi_cpu, isrc->xi_port);
901 }
902 }
903
904 out:
905 if (masked == 0)
906 evtchn_unmask_port(isrc->xi_port);
907 mtx_unlock(&xen_intr_isrc_lock);
908 return (0);
909 #else
910 return (EOPNOTSUPP);
911 #endif
912 }
913
914 /*------------------- Virtual Interrupt Source PIC Functions -----------------*/
915 /*
916 * Mask a level triggered interrupt source.
917 *
918 * \param isrc The interrupt source to mask (if necessary).
919 * \param eoi If non-zero, perform any necessary end-of-interrupt
920 * acknowledgements.
921 */
922 static void
923 xen_intr_disable_source(struct intsrc *base_isrc, int eoi)
924 {
925 struct xenisrc *isrc;
926
927 isrc = (struct xenisrc *)base_isrc;
928
929 /*
930 * NB: checking if the event channel is already masked is
931 * needed because the event channel user-space device
932 * masks event channels on its filter as part of its
933 * normal operation, and those shouldn't be automatically
934 * unmasked by the generic interrupt code. The event channel
935 * device will unmask them when needed.
936 */
937 isrc->xi_masked = !!evtchn_test_and_set_mask(isrc->xi_port);
938 }
939
940 /*
941 * Unmask a level triggered interrupt source.
942 *
943 * \param isrc The interrupt source to unmask (if necessary).
944 */
945 static void
946 xen_intr_enable_source(struct intsrc *base_isrc)
947 {
948 struct xenisrc *isrc;
949
950 isrc = (struct xenisrc *)base_isrc;
951
952 if (isrc->xi_masked == 0)
953 evtchn_unmask_port(isrc->xi_port);
954 }
955
956 /*
957 * Perform any necessary end-of-interrupt acknowledgements.
958 *
959 * \param isrc The interrupt source to EOI.
960 */
961 static void
962 xen_intr_eoi_source(struct intsrc *base_isrc)
963 {
964 }
965
966 /*
967 * Enable and unmask the interrupt source.
968 *
969 * \param isrc The interrupt source to enable.
970 */
971 static void
972 xen_intr_enable_intr(struct intsrc *base_isrc)
973 {
974 struct xenisrc *isrc = (struct xenisrc *)base_isrc;
975
976 evtchn_unmask_port(isrc->xi_port);
977 }
978
979 /*--------------------------- Public Functions -------------------------------*/
980 /*------- API comments for these methods can be found in xen/xenintr.h -------*/
981 int
982 xen_intr_bind_local_port(device_t dev, evtchn_port_t local_port,
983 driver_filter_t filter, driver_intr_t handler, void *arg,
984 enum intr_type flags, xen_intr_handle_t *port_handlep)
985 {
986 struct xenisrc *isrc;
987 int error;
988
989 error = xen_intr_bind_isrc(&isrc, local_port, EVTCHN_TYPE_PORT,
990 device_get_nameunit(dev), filter, handler, arg, flags,
991 port_handlep);
992 if (error != 0)
993 return (error);
994
995 /*
996 * The Event Channel API didn't open this port, so it is not
997 * responsible for closing it automatically on unbind.
998 */
999 isrc->xi_close = 0;
1000 return (0);
1001 }
1002
1003 int
1004 xen_intr_alloc_and_bind_local_port(device_t dev, u_int remote_domain,
1005 driver_filter_t filter, driver_intr_t handler, void *arg,
1006 enum intr_type flags, xen_intr_handle_t *port_handlep)
1007 {
1008 struct xenisrc *isrc;
1009 struct evtchn_alloc_unbound alloc_unbound;
1010 int error;
1011
1012 alloc_unbound.dom = DOMID_SELF;
1013 alloc_unbound.remote_dom = remote_domain;
1014 error = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
1015 &alloc_unbound);
1016 if (error != 0) {
1017 /*
1018 * XXX Trap Hypercall error code Linuxisms in
1019 * the HYPERCALL layer.
1020 */
1021 return (-error);
1022 }
1023
1024 error = xen_intr_bind_isrc(&isrc, alloc_unbound.port, EVTCHN_TYPE_PORT,
1025 device_get_nameunit(dev), filter, handler, arg, flags,
1026 port_handlep);
1027 if (error != 0) {
1028 evtchn_close_t close = { .port = alloc_unbound.port };
1029 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
1030 panic("EVTCHNOP_close failed");
1031 return (error);
1032 }
1033
1034 isrc->xi_close = 1;
1035 return (0);
1036 }
1037
1038 int
1039 xen_intr_bind_remote_port(device_t dev, u_int remote_domain,
1040 u_int remote_port, driver_filter_t filter, driver_intr_t handler,
1041 void *arg, enum intr_type flags, xen_intr_handle_t *port_handlep)
1042 {
1043 struct xenisrc *isrc;
1044 struct evtchn_bind_interdomain bind_interdomain;
1045 int error;
1046
1047 bind_interdomain.remote_dom = remote_domain;
1048 bind_interdomain.remote_port = remote_port;
1049 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
1050 &bind_interdomain);
1051 if (error != 0) {
1052 /*
1053 * XXX Trap Hypercall error code Linuxisms in
1054 * the HYPERCALL layer.
1055 */
1056 return (-error);
1057 }
1058
1059 error = xen_intr_bind_isrc(&isrc, bind_interdomain.local_port,
1060 EVTCHN_TYPE_PORT, device_get_nameunit(dev), filter, handler, arg,
1061 flags, port_handlep);
1062 if (error) {
1063 evtchn_close_t close = { .port = bind_interdomain.local_port };
1064 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
1065 panic("EVTCHNOP_close failed");
1066 return (error);
1067 }
1068
1069 /*
1070 * The Event Channel API opened this port, so it is
1071 * responsible for closing it automatically on unbind.
1072 */
1073 isrc->xi_close = 1;
1074 return (0);
1075 }
1076
1077 int
1078 xen_intr_bind_virq(device_t dev, u_int virq, u_int cpu,
1079 driver_filter_t filter, driver_intr_t handler, void *arg,
1080 enum intr_type flags, xen_intr_handle_t *port_handlep)
1081 {
1082 int vcpu_id = pcpu_find(cpu)->pc_vcpu_id;
1083 struct xenisrc *isrc;
1084 struct evtchn_bind_virq bind_virq = { .virq = virq, .vcpu = vcpu_id };
1085 int error;
1086
1087 isrc = NULL;
1088 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, &bind_virq);
1089 if (error != 0) {
1090 /*
1091 * XXX Trap Hypercall error code Linuxisms in
1092 * the HYPERCALL layer.
1093 */
1094 return (-error);
1095 }
1096
1097 error = xen_intr_bind_isrc(&isrc, bind_virq.port, EVTCHN_TYPE_VIRQ,
1098 device_get_nameunit(dev), filter, handler, arg, flags,
1099 port_handlep);
1100
1101 #ifdef SMP
1102 if (error == 0)
1103 error = intr_event_bind(isrc->xi_intsrc.is_event, cpu);
1104 #endif
1105
1106 if (error != 0) {
1107 evtchn_close_t close = { .port = bind_virq.port };
1108
1109 xen_intr_unbind(*port_handlep);
1110 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
1111 panic("EVTCHNOP_close failed");
1112 return (error);
1113 }
1114
1115 #ifdef SMP
1116 if (isrc->xi_cpu != cpu) {
1117 /*
1118 * Too early in the boot process for the generic interrupt
1119 * code to perform the binding. Update our event channel
1120 * masks manually so events can't fire on the wrong cpu
1121 * during AP startup.
1122 */
1123 xen_intr_assign_cpu(&isrc->xi_intsrc, cpu_apic_ids[cpu]);
1124 }
1125 #endif
1126
1127 /*
1128 * The Event Channel API opened this port, so it is
1129 * responsible for closing it automatically on unbind.
1130 */
1131 isrc->xi_close = 1;
1132 isrc->xi_virq = virq;
1133
1134 return (0);
1135 }
1136
1137 int
1138 xen_intr_alloc_and_bind_ipi(u_int cpu, driver_filter_t filter,
1139 enum intr_type flags, xen_intr_handle_t *port_handlep)
1140 {
1141 #ifdef SMP
1142 int vcpu_id = pcpu_find(cpu)->pc_vcpu_id;
1143 struct xenisrc *isrc;
1144 struct evtchn_bind_ipi bind_ipi = { .vcpu = vcpu_id };
1145 /* Same size as the one used by intr_handler->ih_name. */
1146 char name[MAXCOMLEN + 1];
1147 int error;
1148
1149 isrc = NULL;
1150 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, &bind_ipi);
1151 if (error != 0) {
1152 /*
1153 * XXX Trap Hypercall error code Linuxisms in
1154 * the HYPERCALL layer.
1155 */
1156 return (-error);
1157 }
1158
1159 snprintf(name, sizeof(name), "cpu%u", cpu);
1160
1161 error = xen_intr_bind_isrc(&isrc, bind_ipi.port, EVTCHN_TYPE_IPI,
1162 name, filter, NULL, NULL, flags, port_handlep);
1163 if (error != 0) {
1164 evtchn_close_t close = { .port = bind_ipi.port };
1165
1166 xen_intr_unbind(*port_handlep);
1167 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
1168 panic("EVTCHNOP_close failed");
1169 return (error);
1170 }
1171
1172 if (isrc->xi_cpu != cpu) {
1173 /*
1174 * Too early in the boot process for the generic interrupt
1175 * code to perform the binding. Update our event channel
1176 * masks manually so events can't fire on the wrong cpu
1177 * during AP startup.
1178 */
1179 xen_intr_assign_cpu(&isrc->xi_intsrc, cpu_apic_ids[cpu]);
1180 }
1181
1182 /*
1183 * The Event Channel API opened this port, so it is
1184 * responsible for closing it automatically on unbind.
1185 */
1186 isrc->xi_close = 1;
1187 return (0);
1188 #else
1189 return (EOPNOTSUPP);
1190 #endif
1191 }
1192
1193 int
1194 xen_intr_describe(xen_intr_handle_t port_handle, const char *fmt, ...)
1195 {
1196 char descr[MAXCOMLEN + 1];
1197 struct xenisrc *isrc;
1198 va_list ap;
1199
1200 isrc = xen_intr_isrc_from_handle(port_handle);
1201 if (isrc == NULL)
1202 return (EINVAL);
1203
1204 va_start(ap, fmt);
1205 vsnprintf(descr, sizeof(descr), fmt, ap);
1206 va_end(ap);
1207 return (intr_describe(isrc->xi_vector, isrc->xi_cookie, descr));
1208 }
1209
1210 void
1211 xen_intr_unbind(xen_intr_handle_t *port_handlep)
1212 {
1213 struct xenisrc *isrc;
1214
1215 KASSERT(port_handlep != NULL,
1216 ("NULL xen_intr_handle_t passed to %s", __func__));
1217
1218 isrc = xen_intr_isrc_from_handle(*port_handlep);
1219 *port_handlep = NULL;
1220 if (isrc == NULL)
1221 return;
1222
1223 mtx_lock(&xen_intr_isrc_lock);
1224 if (refcount_release(&isrc->xi_refcount) == 0) {
1225 mtx_unlock(&xen_intr_isrc_lock);
1226 return;
1227 }
1228 mtx_unlock(&xen_intr_isrc_lock);
1229
1230 if (isrc->xi_cookie != NULL)
1231 intr_remove_handler(isrc->xi_cookie);
1232 xen_intr_release_isrc(isrc);
1233 }
1234
1235 void
1236 xen_intr_signal(xen_intr_handle_t handle)
1237 {
1238 struct xenisrc *isrc;
1239
1240 isrc = xen_intr_isrc_from_handle(handle);
1241 if (isrc != NULL) {
1242 KASSERT(isrc->xi_type == EVTCHN_TYPE_PORT ||
1243 isrc->xi_type == EVTCHN_TYPE_IPI,
1244 ("evtchn_signal on something other than a local port"));
1245 struct evtchn_send send = { .port = isrc->xi_port };
1246 (void)HYPERVISOR_event_channel_op(EVTCHNOP_send, &send);
1247 }
1248 }
1249
1250 evtchn_port_t
1251 xen_intr_port(xen_intr_handle_t handle)
1252 {
1253 struct xenisrc *isrc;
1254
1255 isrc = xen_intr_isrc_from_handle(handle);
1256 if (isrc == NULL)
1257 return (0);
1258
1259 return (isrc->xi_port);
1260 }
1261
1262 int
1263 xen_intr_add_handler(const char *name, driver_filter_t filter,
1264 driver_intr_t handler, void *arg, enum intr_type flags,
1265 xen_intr_handle_t handle)
1266 {
1267 struct xenisrc *isrc;
1268 int error;
1269
1270 isrc = xen_intr_isrc_from_handle(handle);
1271 if (isrc == NULL || isrc->xi_cookie != NULL)
1272 return (EINVAL);
1273
1274 error = intr_add_handler(name, isrc->xi_vector,filter, handler, arg,
1275 flags|INTR_EXCL, &isrc->xi_cookie, 0);
1276 if (error != 0)
1277 printf("%s: %s: add handler failed: %d\n", name, __func__,
1278 error);
1279
1280 return (error);
1281 }
1282
1283 int
1284 xen_intr_get_evtchn_from_port(evtchn_port_t port, xen_intr_handle_t *handlep)
1285 {
1286
1287 if (!is_valid_evtchn(port) || port >= NR_EVENT_CHANNELS)
1288 return (EINVAL);
1289
1290 if (handlep == NULL) {
1291 return (EINVAL);
1292 }
1293
1294 mtx_lock(&xen_intr_isrc_lock);
1295 if (xen_intr_port_to_isrc[port] == NULL) {
1296 mtx_unlock(&xen_intr_isrc_lock);
1297 return (EINVAL);
1298 }
1299 refcount_acquire(&xen_intr_port_to_isrc[port]->xi_refcount);
1300 mtx_unlock(&xen_intr_isrc_lock);
1301
1302 /* Assign the opaque handler */
1303 *handlep = xen_intr_handle_from_isrc(xen_intr_port_to_isrc[port]);
1304
1305 return (0);
1306 }
1307
1308 #ifdef DDB
1309 static const char *
1310 xen_intr_print_type(enum evtchn_type type)
1311 {
1312 static const char *evtchn_type_to_string[EVTCHN_TYPE_COUNT] = {
1313 [EVTCHN_TYPE_UNBOUND] = "UNBOUND",
1314 [EVTCHN_TYPE_VIRQ] = "VIRQ",
1315 [EVTCHN_TYPE_IPI] = "IPI",
1316 [EVTCHN_TYPE_PORT] = "PORT",
1317 };
1318
1319 if (type >= EVTCHN_TYPE_COUNT)
1320 return ("UNKNOWN");
1321
1322 return (evtchn_type_to_string[type]);
1323 }
1324
1325 static void
1326 xen_intr_dump_port(struct xenisrc *isrc)
1327 {
1328 struct xen_intr_pcpu_data *pcpu;
1329 shared_info_t *s = HYPERVISOR_shared_info;
1330 int i;
1331
1332 db_printf("Port %d Type: %s\n",
1333 isrc->xi_port, xen_intr_print_type(isrc->xi_type));
1334 if (isrc->xi_type == EVTCHN_TYPE_VIRQ)
1335 db_printf("\tVirq: %d\n", isrc->xi_virq);
1336
1337 db_printf("\tMasked: %d Pending: %d\n",
1338 !!xen_test_bit(isrc->xi_port, &s->evtchn_mask[0]),
1339 !!xen_test_bit(isrc->xi_port, &s->evtchn_pending[0]));
1340
1341 db_printf("\tPer-CPU Masks: ");
1342 CPU_FOREACH(i) {
1343 pcpu = DPCPU_ID_PTR(i, xen_intr_pcpu);
1344 db_printf("cpu#%d: %d ", i,
1345 !!xen_test_bit(isrc->xi_port, pcpu->evtchn_enabled));
1346 }
1347 db_printf("\n");
1348 }
1349
1350 DB_SHOW_COMMAND(xen_evtchn, db_show_xen_evtchn)
1351 {
1352 int i;
1353
1354 if (!xen_domain()) {
1355 db_printf("Only available on Xen guests\n");
1356 return;
1357 }
1358
1359 for (i = 0; i < NR_EVENT_CHANNELS; i++) {
1360 struct xenisrc *isrc;
1361
1362 isrc = xen_intr_port_to_isrc[i];
1363 if (isrc == NULL)
1364 continue;
1365
1366 xen_intr_dump_port(isrc);
1367 }
1368 }
1369 #endif /* DDB */
Cache object: b4795d2dc2b4c24d3f23682c34913b7b
|