FreeBSD/Linux Kernel Cross Reference
sys/x86/xen/xen_intr.c
1 /******************************************************************************
2 * xen_intr.c
3 *
4 * Xen event and interrupt services for x86 HVM guests.
5 *
6 * Copyright (c) 2002-2005, K A Fraser
7 * Copyright (c) 2005, Intel Corporation <xiaofeng.ling@intel.com>
8 * Copyright (c) 2012, Spectra Logic Corporation
9 *
10 * This file may be distributed separately from the Linux kernel, or
11 * incorporated into other software packages, subject to the following license:
12 *
13 * Permission is hereby granted, free of charge, to any person obtaining a copy
14 * of this source file (the "Software"), to deal in the Software without
15 * restriction, including without limitation the rights to use, copy, modify,
16 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
17 * and to permit persons to whom the Software is furnished to do so, subject to
18 * the following conditions:
19 *
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29 * IN THE SOFTWARE.
30 */
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD: releng/11.2/sys/x86/xen/xen_intr.c 318347 2017-05-16 09:39:20Z royger $");
34
35 #include "opt_ddb.h"
36
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/bus.h>
40 #include <sys/malloc.h>
41 #include <sys/kernel.h>
42 #include <sys/limits.h>
43 #include <sys/lock.h>
44 #include <sys/mutex.h>
45 #include <sys/interrupt.h>
46 #include <sys/pcpu.h>
47 #include <sys/smp.h>
48
49 #include <vm/vm.h>
50 #include <vm/pmap.h>
51
52 #include <machine/intr_machdep.h>
53 #include <x86/apicvar.h>
54 #include <x86/apicreg.h>
55 #include <machine/smp.h>
56 #include <machine/stdarg.h>
57
58 #include <machine/xen/synch_bitops.h>
59 #include <machine/xen/xen-os.h>
60
61 #include <xen/hypervisor.h>
62 #include <xen/xen_intr.h>
63 #include <xen/evtchn/evtchnvar.h>
64
65 #include <dev/xen/xenpci/xenpcivar.h>
66 #include <dev/pci/pcivar.h>
67
68 #ifdef DDB
69 #include <ddb/ddb.h>
70 #endif
71
72 static MALLOC_DEFINE(M_XENINTR, "xen_intr", "Xen Interrupt Services");
73
74 /**
75 * Per-cpu event channel processing state.
76 */
77 struct xen_intr_pcpu_data {
78 /**
79 * The last event channel bitmap section (level one bit) processed.
80 * This is used to ensure we scan all ports before
81 * servicing an already servied port again.
82 */
83 u_int last_processed_l1i;
84
85 /**
86 * The last event channel processed within the event channel
87 * bitmap being scanned.
88 */
89 u_int last_processed_l2i;
90
91 /** Pointer to this CPU's interrupt statistic counter. */
92 u_long *evtchn_intrcnt;
93
94 /**
95 * A bitmap of ports that can be serviced from this CPU.
96 * A set bit means interrupt handling is enabled.
97 */
98 u_long evtchn_enabled[sizeof(u_long) * 8];
99 };
100
101 /*
102 * Start the scan at port 0 by initializing the last scanned
103 * location as the highest numbered event channel port.
104 */
105 static DPCPU_DEFINE(struct xen_intr_pcpu_data, xen_intr_pcpu) = {
106 .last_processed_l1i = LONG_BIT - 1,
107 .last_processed_l2i = LONG_BIT - 1
108 };
109
110 DPCPU_DECLARE(struct vcpu_info *, vcpu_info);
111
112 #define XEN_EEXIST 17 /* Xen "already exists" error */
113 #define XEN_ALLOCATE_VECTOR 0 /* Allocate a vector for this event channel */
114 #define XEN_INVALID_EVTCHN 0 /* Invalid event channel */
115
116 #define is_valid_evtchn(x) ((x) != XEN_INVALID_EVTCHN)
117
118 struct xenisrc {
119 struct intsrc xi_intsrc;
120 enum evtchn_type xi_type;
121 int xi_cpu; /* VCPU for delivery. */
122 int xi_vector; /* Global isrc vector number. */
123 evtchn_port_t xi_port;
124 int xi_pirq;
125 int xi_virq;
126 void *xi_cookie;
127 u_int xi_close:1; /* close on unbind? */
128 u_int xi_activehi:1;
129 u_int xi_edgetrigger:1;
130 u_int xi_masked:1;
131 };
132
133 static void xen_intr_suspend(struct pic *);
134 static void xen_intr_resume(struct pic *, bool suspend_cancelled);
135 static void xen_intr_enable_source(struct intsrc *isrc);
136 static void xen_intr_disable_source(struct intsrc *isrc, int eoi);
137 static void xen_intr_eoi_source(struct intsrc *isrc);
138 static void xen_intr_enable_intr(struct intsrc *isrc);
139 static void xen_intr_disable_intr(struct intsrc *isrc);
140 static int xen_intr_vector(struct intsrc *isrc);
141 static int xen_intr_source_pending(struct intsrc *isrc);
142 static int xen_intr_config_intr(struct intsrc *isrc,
143 enum intr_trigger trig, enum intr_polarity pol);
144 static int xen_intr_assign_cpu(struct intsrc *isrc, u_int apic_id);
145
146 static void xen_intr_pirq_enable_source(struct intsrc *isrc);
147 static void xen_intr_pirq_disable_source(struct intsrc *isrc, int eoi);
148 static void xen_intr_pirq_eoi_source(struct intsrc *isrc);
149 static void xen_intr_pirq_enable_intr(struct intsrc *isrc);
150 static void xen_intr_pirq_disable_intr(struct intsrc *isrc);
151 static int xen_intr_pirq_config_intr(struct intsrc *isrc,
152 enum intr_trigger trig, enum intr_polarity pol);
153
154 /**
155 * PIC interface for all event channel port types except physical IRQs.
156 */
157 struct pic xen_intr_pic = {
158 .pic_enable_source = xen_intr_enable_source,
159 .pic_disable_source = xen_intr_disable_source,
160 .pic_eoi_source = xen_intr_eoi_source,
161 .pic_enable_intr = xen_intr_enable_intr,
162 .pic_disable_intr = xen_intr_disable_intr,
163 .pic_vector = xen_intr_vector,
164 .pic_source_pending = xen_intr_source_pending,
165 .pic_suspend = xen_intr_suspend,
166 .pic_resume = xen_intr_resume,
167 .pic_config_intr = xen_intr_config_intr,
168 .pic_assign_cpu = xen_intr_assign_cpu
169 };
170
171 /**
172 * PIC interface for all event channel representing
173 * physical interrupt sources.
174 */
175 struct pic xen_intr_pirq_pic = {
176 .pic_enable_source = xen_intr_pirq_enable_source,
177 .pic_disable_source = xen_intr_pirq_disable_source,
178 .pic_eoi_source = xen_intr_pirq_eoi_source,
179 .pic_enable_intr = xen_intr_pirq_enable_intr,
180 .pic_disable_intr = xen_intr_pirq_disable_intr,
181 .pic_vector = xen_intr_vector,
182 .pic_source_pending = xen_intr_source_pending,
183 .pic_config_intr = xen_intr_pirq_config_intr,
184 .pic_assign_cpu = xen_intr_assign_cpu
185 };
186
187 static struct mtx xen_intr_isrc_lock;
188 static int xen_intr_auto_vector_count;
189 static struct xenisrc *xen_intr_port_to_isrc[NR_EVENT_CHANNELS];
190 static u_long *xen_intr_pirq_eoi_map;
191 static boolean_t xen_intr_pirq_eoi_map_enabled;
192
193 /*------------------------- Private Functions --------------------------------*/
194 /**
195 * Disable signal delivery for an event channel port on the
196 * specified CPU.
197 *
198 * \param port The event channel port to mask.
199 *
200 * This API is used to manage the port<=>CPU binding of event
201 * channel handlers.
202 *
203 * \note This operation does not preclude reception of an event
204 * for this event channel on another CPU. To mask the
205 * event channel globally, use evtchn_mask().
206 */
207 static inline void
208 evtchn_cpu_mask_port(u_int cpu, evtchn_port_t port)
209 {
210 struct xen_intr_pcpu_data *pcpu;
211
212 pcpu = DPCPU_ID_PTR(cpu, xen_intr_pcpu);
213 xen_clear_bit(port, pcpu->evtchn_enabled);
214 }
215
216 /**
217 * Enable signal delivery for an event channel port on the
218 * specified CPU.
219 *
220 * \param port The event channel port to unmask.
221 *
222 * This API is used to manage the port<=>CPU binding of event
223 * channel handlers.
224 *
225 * \note This operation does not guarantee that event delivery
226 * is enabled for this event channel port. The port must
227 * also be globally enabled. See evtchn_unmask().
228 */
229 static inline void
230 evtchn_cpu_unmask_port(u_int cpu, evtchn_port_t port)
231 {
232 struct xen_intr_pcpu_data *pcpu;
233
234 pcpu = DPCPU_ID_PTR(cpu, xen_intr_pcpu);
235 xen_set_bit(port, pcpu->evtchn_enabled);
236 }
237
238 /**
239 * Allocate and register a per-cpu Xen upcall interrupt counter.
240 *
241 * \param cpu The cpu for which to register this interrupt count.
242 */
243 static void
244 xen_intr_intrcnt_add(u_int cpu)
245 {
246 char buf[MAXCOMLEN + 1];
247 struct xen_intr_pcpu_data *pcpu;
248
249 pcpu = DPCPU_ID_PTR(cpu, xen_intr_pcpu);
250 if (pcpu->evtchn_intrcnt != NULL)
251 return;
252
253 snprintf(buf, sizeof(buf), "cpu%d:xen", cpu);
254 intrcnt_add(buf, &pcpu->evtchn_intrcnt);
255 }
256
257 /**
258 * Search for an already allocated but currently unused Xen interrupt
259 * source object.
260 *
261 * \param type Restrict the search to interrupt sources of the given
262 * type.
263 *
264 * \return A pointer to a free Xen interrupt source object or NULL.
265 */
266 static struct xenisrc *
267 xen_intr_find_unused_isrc(enum evtchn_type type)
268 {
269 int isrc_idx;
270
271 KASSERT(mtx_owned(&xen_intr_isrc_lock), ("Evtchn isrc lock not held"));
272
273 for (isrc_idx = 0; isrc_idx < xen_intr_auto_vector_count; isrc_idx ++) {
274 struct xenisrc *isrc;
275 u_int vector;
276
277 vector = FIRST_EVTCHN_INT + isrc_idx;
278 isrc = (struct xenisrc *)intr_lookup_source(vector);
279 if (isrc != NULL
280 && isrc->xi_type == EVTCHN_TYPE_UNBOUND) {
281 KASSERT(isrc->xi_intsrc.is_handlers == 0,
282 ("Free evtchn still has handlers"));
283 isrc->xi_type = type;
284 return (isrc);
285 }
286 }
287 return (NULL);
288 }
289
290 /**
291 * Allocate a Xen interrupt source object.
292 *
293 * \param type The type of interrupt source to create.
294 *
295 * \return A pointer to a newly allocated Xen interrupt source
296 * object or NULL.
297 */
298 static struct xenisrc *
299 xen_intr_alloc_isrc(enum evtchn_type type, int vector)
300 {
301 static int warned;
302 struct xenisrc *isrc;
303
304 KASSERT(mtx_owned(&xen_intr_isrc_lock), ("Evtchn alloc lock not held"));
305
306 if (xen_intr_auto_vector_count > NR_EVENT_CHANNELS) {
307 if (!warned) {
308 warned = 1;
309 printf("xen_intr_alloc: Event channels exhausted.\n");
310 }
311 return (NULL);
312 }
313
314 if (type != EVTCHN_TYPE_PIRQ) {
315 vector = FIRST_EVTCHN_INT + xen_intr_auto_vector_count;
316 xen_intr_auto_vector_count++;
317 }
318
319 KASSERT((intr_lookup_source(vector) == NULL),
320 ("Trying to use an already allocated vector"));
321
322 mtx_unlock(&xen_intr_isrc_lock);
323 isrc = malloc(sizeof(*isrc), M_XENINTR, M_WAITOK | M_ZERO);
324 isrc->xi_intsrc.is_pic =
325 (type == EVTCHN_TYPE_PIRQ) ? &xen_intr_pirq_pic : &xen_intr_pic;
326 isrc->xi_vector = vector;
327 isrc->xi_type = type;
328 intr_register_source(&isrc->xi_intsrc);
329 mtx_lock(&xen_intr_isrc_lock);
330
331 return (isrc);
332 }
333
334 /**
335 * Attempt to free an active Xen interrupt source object.
336 *
337 * \param isrc The interrupt source object to release.
338 *
339 * \returns EBUSY if the source is still in use, otherwise 0.
340 */
341 static int
342 xen_intr_release_isrc(struct xenisrc *isrc)
343 {
344
345 mtx_lock(&xen_intr_isrc_lock);
346 if (isrc->xi_intsrc.is_handlers != 0) {
347 mtx_unlock(&xen_intr_isrc_lock);
348 return (EBUSY);
349 }
350 evtchn_mask_port(isrc->xi_port);
351 evtchn_clear_port(isrc->xi_port);
352
353 /* Rebind port to CPU 0. */
354 evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port);
355 evtchn_cpu_unmask_port(0, isrc->xi_port);
356
357 if (isrc->xi_close != 0 && is_valid_evtchn(isrc->xi_port)) {
358 struct evtchn_close close = { .port = isrc->xi_port };
359 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
360 panic("EVTCHNOP_close failed");
361 }
362
363 xen_intr_port_to_isrc[isrc->xi_port] = NULL;
364 isrc->xi_cpu = 0;
365 isrc->xi_type = EVTCHN_TYPE_UNBOUND;
366 isrc->xi_port = 0;
367 isrc->xi_cookie = NULL;
368 mtx_unlock(&xen_intr_isrc_lock);
369 return (0);
370 }
371
372 /**
373 * Associate an interrupt handler with an already allocated local Xen
374 * event channel port.
375 *
376 * \param isrcp The returned Xen interrupt object associated with
377 * the specified local port.
378 * \param local_port The event channel to bind.
379 * \param type The event channel type of local_port.
380 * \param intr_owner The device making this bind request.
381 * \param filter An interrupt filter handler. Specify NULL
382 * to always dispatch to the ithread handler.
383 * \param handler An interrupt ithread handler. Optional (can
384 * specify NULL) if all necessary event actions
385 * are performed by filter.
386 * \param arg Argument to present to both filter and handler.
387 * \param irqflags Interrupt handler flags. See sys/bus.h.
388 * \param handlep Pointer to an opaque handle used to manage this
389 * registration.
390 *
391 * \returns 0 on success, otherwise an errno.
392 */
393 static int
394 xen_intr_bind_isrc(struct xenisrc **isrcp, evtchn_port_t local_port,
395 enum evtchn_type type, const char *intr_owner, driver_filter_t filter,
396 driver_intr_t handler, void *arg, enum intr_type flags,
397 xen_intr_handle_t *port_handlep)
398 {
399 struct xenisrc *isrc;
400 int error;
401
402 *isrcp = NULL;
403 if (port_handlep == NULL) {
404 printf("%s: xen_intr_bind_isrc: Bad event handle\n",
405 intr_owner);
406 return (EINVAL);
407 }
408
409 mtx_lock(&xen_intr_isrc_lock);
410 isrc = xen_intr_find_unused_isrc(type);
411 if (isrc == NULL) {
412 isrc = xen_intr_alloc_isrc(type, XEN_ALLOCATE_VECTOR);
413 if (isrc == NULL) {
414 mtx_unlock(&xen_intr_isrc_lock);
415 return (ENOSPC);
416 }
417 }
418 isrc->xi_port = local_port;
419 xen_intr_port_to_isrc[local_port] = isrc;
420 mtx_unlock(&xen_intr_isrc_lock);
421
422 /* Assign the opaque handler (the event channel port) */
423 *port_handlep = &isrc->xi_vector;
424
425 #ifdef SMP
426 if (type == EVTCHN_TYPE_PORT) {
427 /*
428 * By default all interrupts are assigned to vCPU#0
429 * unless specified otherwise, so shuffle them to balance
430 * the interrupt load.
431 */
432 xen_intr_assign_cpu(&isrc->xi_intsrc, intr_next_cpu());
433 }
434 #endif
435
436 if (filter == NULL && handler == NULL) {
437 /*
438 * No filter/handler provided, leave the event channel
439 * masked and without a valid handler, the caller is
440 * in charge of setting that up.
441 */
442 *isrcp = isrc;
443 return (0);
444 }
445
446 error = xen_intr_add_handler(intr_owner, filter, handler, arg, flags,
447 *port_handlep);
448 if (error != 0) {
449 xen_intr_release_isrc(isrc);
450 return (error);
451 }
452 *isrcp = isrc;
453 return (0);
454 }
455
456 /**
457 * Lookup a Xen interrupt source object given an interrupt binding handle.
458 *
459 * \param handle A handle initialized by a previous call to
460 * xen_intr_bind_isrc().
461 *
462 * \returns A pointer to the Xen interrupt source object associated
463 * with the given interrupt handle. NULL if no association
464 * currently exists.
465 */
466 static struct xenisrc *
467 xen_intr_isrc(xen_intr_handle_t handle)
468 {
469 int vector;
470
471 if (handle == NULL)
472 return (NULL);
473
474 vector = *(int *)handle;
475 KASSERT(vector >= FIRST_EVTCHN_INT &&
476 vector < (FIRST_EVTCHN_INT + xen_intr_auto_vector_count),
477 ("Xen interrupt vector is out of range"));
478
479 return ((struct xenisrc *)intr_lookup_source(vector));
480 }
481
482 /**
483 * Determine the event channel ports at the given section of the
484 * event port bitmap which have pending events for the given cpu.
485 *
486 * \param pcpu The Xen interrupt pcpu data for the cpu being querried.
487 * \param sh The Xen shared info area.
488 * \param idx The index of the section of the event channel bitmap to
489 * inspect.
490 *
491 * \returns A u_long with bits set for every event channel with pending
492 * events.
493 */
494 static inline u_long
495 xen_intr_active_ports(struct xen_intr_pcpu_data *pcpu, shared_info_t *sh,
496 u_int idx)
497 {
498
499 CTASSERT(sizeof(sh->evtchn_mask[0]) == sizeof(sh->evtchn_pending[0]));
500 CTASSERT(sizeof(sh->evtchn_mask[0]) == sizeof(pcpu->evtchn_enabled[0]));
501 CTASSERT(sizeof(sh->evtchn_mask) == sizeof(sh->evtchn_pending));
502 CTASSERT(sizeof(sh->evtchn_mask) == sizeof(pcpu->evtchn_enabled));
503 return (sh->evtchn_pending[idx]
504 & ~sh->evtchn_mask[idx]
505 & pcpu->evtchn_enabled[idx]);
506 }
507
508 /**
509 * Interrupt handler for processing all Xen event channel events.
510 *
511 * \param trap_frame The trap frame context for the current interrupt.
512 */
513 void
514 xen_intr_handle_upcall(struct trapframe *trap_frame)
515 {
516 u_int l1i, l2i, port, cpu;
517 u_long masked_l1, masked_l2;
518 struct xenisrc *isrc;
519 shared_info_t *s;
520 vcpu_info_t *v;
521 struct xen_intr_pcpu_data *pc;
522 u_long l1, l2;
523
524 /*
525 * Disable preemption in order to always check and fire events
526 * on the right vCPU
527 */
528 critical_enter();
529
530 cpu = PCPU_GET(cpuid);
531 pc = DPCPU_PTR(xen_intr_pcpu);
532 s = HYPERVISOR_shared_info;
533 v = DPCPU_GET(vcpu_info);
534
535 if (xen_hvm_domain() && !xen_vector_callback_enabled) {
536 KASSERT((cpu == 0), ("Fired PCI event callback on wrong CPU"));
537 }
538
539 v->evtchn_upcall_pending = 0;
540
541 #if 0
542 #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
543 /* Clear master flag /before/ clearing selector flag. */
544 wmb();
545 #endif
546 #endif
547
548 l1 = atomic_readandclear_long(&v->evtchn_pending_sel);
549
550 l1i = pc->last_processed_l1i;
551 l2i = pc->last_processed_l2i;
552 (*pc->evtchn_intrcnt)++;
553
554 while (l1 != 0) {
555
556 l1i = (l1i + 1) % LONG_BIT;
557 masked_l1 = l1 & ((~0UL) << l1i);
558
559 if (masked_l1 == 0) {
560 /*
561 * if we masked out all events, wrap around
562 * to the beginning.
563 */
564 l1i = LONG_BIT - 1;
565 l2i = LONG_BIT - 1;
566 continue;
567 }
568 l1i = ffsl(masked_l1) - 1;
569
570 do {
571 l2 = xen_intr_active_ports(pc, s, l1i);
572
573 l2i = (l2i + 1) % LONG_BIT;
574 masked_l2 = l2 & ((~0UL) << l2i);
575
576 if (masked_l2 == 0) {
577 /* if we masked out all events, move on */
578 l2i = LONG_BIT - 1;
579 break;
580 }
581 l2i = ffsl(masked_l2) - 1;
582
583 /* process port */
584 port = (l1i * LONG_BIT) + l2i;
585 synch_clear_bit(port, &s->evtchn_pending[0]);
586
587 isrc = xen_intr_port_to_isrc[port];
588 if (__predict_false(isrc == NULL))
589 continue;
590
591 /* Make sure we are firing on the right vCPU */
592 KASSERT((isrc->xi_cpu == PCPU_GET(cpuid)),
593 ("Received unexpected event on vCPU#%d, event bound to vCPU#%d",
594 PCPU_GET(cpuid), isrc->xi_cpu));
595
596 intr_execute_handlers(&isrc->xi_intsrc, trap_frame);
597
598 /*
599 * If this is the final port processed,
600 * we'll pick up here+1 next time.
601 */
602 pc->last_processed_l1i = l1i;
603 pc->last_processed_l2i = l2i;
604
605 } while (l2i != LONG_BIT - 1);
606
607 l2 = xen_intr_active_ports(pc, s, l1i);
608 if (l2 == 0) {
609 /*
610 * We handled all ports, so we can clear the
611 * selector bit.
612 */
613 l1 &= ~(1UL << l1i);
614 }
615 }
616 critical_exit();
617 }
618
619 static int
620 xen_intr_init(void *dummy __unused)
621 {
622 shared_info_t *s = HYPERVISOR_shared_info;
623 struct xen_intr_pcpu_data *pcpu;
624 struct physdev_pirq_eoi_gmfn eoi_gmfn;
625 int i, rc;
626
627 if (!xen_domain())
628 return (0);
629
630 mtx_init(&xen_intr_isrc_lock, "xen-irq-lock", NULL, MTX_DEF);
631
632 /*
633 * Register interrupt count manually as we aren't
634 * guaranteed to see a call to xen_intr_assign_cpu()
635 * before our first interrupt. Also set the per-cpu
636 * mask of CPU#0 to enable all, since by default
637 * all event channels are bound to CPU#0.
638 */
639 CPU_FOREACH(i) {
640 pcpu = DPCPU_ID_PTR(i, xen_intr_pcpu);
641 memset(pcpu->evtchn_enabled, i == 0 ? ~0 : 0,
642 sizeof(pcpu->evtchn_enabled));
643 xen_intr_intrcnt_add(i);
644 }
645
646 for (i = 0; i < nitems(s->evtchn_mask); i++)
647 atomic_store_rel_long(&s->evtchn_mask[i], ~0);
648
649 /* Try to register PIRQ EOI map */
650 xen_intr_pirq_eoi_map = malloc(PAGE_SIZE, M_XENINTR, M_WAITOK | M_ZERO);
651 eoi_gmfn.gmfn = atop(vtophys(xen_intr_pirq_eoi_map));
652 rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn);
653 if (rc != 0 && bootverbose)
654 printf("Xen interrupts: unable to register PIRQ EOI map\n");
655 else
656 xen_intr_pirq_eoi_map_enabled = true;
657
658 intr_register_pic(&xen_intr_pic);
659 intr_register_pic(&xen_intr_pirq_pic);
660
661 if (bootverbose)
662 printf("Xen interrupt system initialized\n");
663
664 return (0);
665 }
666 SYSINIT(xen_intr_init, SI_SUB_INTR, SI_ORDER_SECOND, xen_intr_init, NULL);
667
668 /*--------------------------- Common PIC Functions ---------------------------*/
669 /**
670 * Prepare this PIC for system suspension.
671 */
672 static void
673 xen_intr_suspend(struct pic *unused)
674 {
675 }
676
677 static void
678 xen_rebind_ipi(struct xenisrc *isrc)
679 {
680 #ifdef SMP
681 int cpu = isrc->xi_cpu;
682 int vcpu_id = pcpu_find(cpu)->pc_vcpu_id;
683 int error;
684 struct evtchn_bind_ipi bind_ipi = { .vcpu = vcpu_id };
685
686 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
687 &bind_ipi);
688 if (error != 0)
689 panic("unable to rebind xen IPI: %d", error);
690
691 isrc->xi_port = bind_ipi.port;
692 isrc->xi_cpu = 0;
693 xen_intr_port_to_isrc[bind_ipi.port] = isrc;
694
695 error = xen_intr_assign_cpu(&isrc->xi_intsrc,
696 cpu_apic_ids[cpu]);
697 if (error)
698 panic("unable to bind xen IPI to CPU#%d: %d",
699 cpu, error);
700
701 evtchn_unmask_port(bind_ipi.port);
702 #else
703 panic("Resume IPI event channel on UP");
704 #endif
705 }
706
707 static void
708 xen_rebind_virq(struct xenisrc *isrc)
709 {
710 int cpu = isrc->xi_cpu;
711 int vcpu_id = pcpu_find(cpu)->pc_vcpu_id;
712 int error;
713 struct evtchn_bind_virq bind_virq = { .virq = isrc->xi_virq,
714 .vcpu = vcpu_id };
715
716 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
717 &bind_virq);
718 if (error != 0)
719 panic("unable to rebind xen VIRQ#%d: %d", isrc->xi_virq, error);
720
721 isrc->xi_port = bind_virq.port;
722 isrc->xi_cpu = 0;
723 xen_intr_port_to_isrc[bind_virq.port] = isrc;
724
725 #ifdef SMP
726 error = xen_intr_assign_cpu(&isrc->xi_intsrc,
727 cpu_apic_ids[cpu]);
728 if (error)
729 panic("unable to bind xen VIRQ#%d to CPU#%d: %d",
730 isrc->xi_virq, cpu, error);
731 #endif
732
733 evtchn_unmask_port(bind_virq.port);
734 }
735
736 /**
737 * Return this PIC to service after being suspended.
738 */
739 static void
740 xen_intr_resume(struct pic *unused, bool suspend_cancelled)
741 {
742 shared_info_t *s = HYPERVISOR_shared_info;
743 struct xenisrc *isrc;
744 u_int isrc_idx;
745 int i;
746
747 if (suspend_cancelled)
748 return;
749
750 /* Reset the per-CPU masks */
751 CPU_FOREACH(i) {
752 struct xen_intr_pcpu_data *pcpu;
753
754 pcpu = DPCPU_ID_PTR(i, xen_intr_pcpu);
755 memset(pcpu->evtchn_enabled, i == 0 ? ~0 : 0,
756 sizeof(pcpu->evtchn_enabled));
757 }
758
759 /* Mask all event channels. */
760 for (i = 0; i < nitems(s->evtchn_mask); i++)
761 atomic_store_rel_long(&s->evtchn_mask[i], ~0);
762
763 /* Remove port -> isrc mappings */
764 memset(xen_intr_port_to_isrc, 0, sizeof(xen_intr_port_to_isrc));
765
766 /* Free unused isrcs and rebind VIRQs and IPIs */
767 for (isrc_idx = 0; isrc_idx < xen_intr_auto_vector_count; isrc_idx++) {
768 u_int vector;
769
770 vector = FIRST_EVTCHN_INT + isrc_idx;
771 isrc = (struct xenisrc *)intr_lookup_source(vector);
772 if (isrc != NULL) {
773 isrc->xi_port = 0;
774 switch (isrc->xi_type) {
775 case EVTCHN_TYPE_IPI:
776 xen_rebind_ipi(isrc);
777 break;
778 case EVTCHN_TYPE_VIRQ:
779 xen_rebind_virq(isrc);
780 break;
781 default:
782 break;
783 }
784 }
785 }
786 }
787
788 /**
789 * Disable a Xen interrupt source.
790 *
791 * \param isrc The interrupt source to disable.
792 */
793 static void
794 xen_intr_disable_intr(struct intsrc *base_isrc)
795 {
796 struct xenisrc *isrc = (struct xenisrc *)base_isrc;
797
798 evtchn_mask_port(isrc->xi_port);
799 }
800
801 /**
802 * Determine the global interrupt vector number for
803 * a Xen interrupt source.
804 *
805 * \param isrc The interrupt source to query.
806 *
807 * \return The vector number corresponding to the given interrupt source.
808 */
809 static int
810 xen_intr_vector(struct intsrc *base_isrc)
811 {
812 struct xenisrc *isrc = (struct xenisrc *)base_isrc;
813
814 return (isrc->xi_vector);
815 }
816
817 /**
818 * Determine whether or not interrupt events are pending on the
819 * the given interrupt source.
820 *
821 * \param isrc The interrupt source to query.
822 *
823 * \returns 0 if no events are pending, otherwise non-zero.
824 */
825 static int
826 xen_intr_source_pending(struct intsrc *isrc)
827 {
828 /*
829 * EventChannels are edge triggered and never masked.
830 * There can be no pending events.
831 */
832 return (0);
833 }
834
835 /**
836 * Perform configuration of an interrupt source.
837 *
838 * \param isrc The interrupt source to configure.
839 * \param trig Edge or level.
840 * \param pol Active high or low.
841 *
842 * \returns 0 if no events are pending, otherwise non-zero.
843 */
844 static int
845 xen_intr_config_intr(struct intsrc *isrc, enum intr_trigger trig,
846 enum intr_polarity pol)
847 {
848 /* Configuration is only possible via the evtchn apis. */
849 return (ENODEV);
850 }
851
852 /**
853 * Configure CPU affinity for interrupt source event delivery.
854 *
855 * \param isrc The interrupt source to configure.
856 * \param apic_id The apic id of the CPU for handling future events.
857 *
858 * \returns 0 if successful, otherwise an errno.
859 */
860 static int
861 xen_intr_assign_cpu(struct intsrc *base_isrc, u_int apic_id)
862 {
863 #ifdef SMP
864 struct evtchn_bind_vcpu bind_vcpu;
865 struct xenisrc *isrc;
866 u_int to_cpu, vcpu_id;
867 int error, masked;
868
869 if (xen_vector_callback_enabled == 0)
870 return (EOPNOTSUPP);
871
872 to_cpu = apic_cpuid(apic_id);
873 vcpu_id = pcpu_find(to_cpu)->pc_vcpu_id;
874 xen_intr_intrcnt_add(to_cpu);
875
876 mtx_lock(&xen_intr_isrc_lock);
877 isrc = (struct xenisrc *)base_isrc;
878 if (!is_valid_evtchn(isrc->xi_port)) {
879 mtx_unlock(&xen_intr_isrc_lock);
880 return (EINVAL);
881 }
882
883 /*
884 * Mask the event channel while binding it to prevent interrupt
885 * delivery with an inconsistent state in isrc->xi_cpu.
886 */
887 masked = evtchn_test_and_set_mask(isrc->xi_port);
888 if ((isrc->xi_type == EVTCHN_TYPE_VIRQ) ||
889 (isrc->xi_type == EVTCHN_TYPE_IPI)) {
890 /*
891 * Virtual IRQs are associated with a cpu by
892 * the Hypervisor at evtchn_bind_virq time, so
893 * all we need to do is update the per-CPU masks.
894 */
895 evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port);
896 isrc->xi_cpu = to_cpu;
897 evtchn_cpu_unmask_port(isrc->xi_cpu, isrc->xi_port);
898 goto out;
899 }
900
901 bind_vcpu.port = isrc->xi_port;
902 bind_vcpu.vcpu = vcpu_id;
903
904 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu);
905 if (isrc->xi_cpu != to_cpu) {
906 if (error == 0) {
907 /* Commit to new binding by removing the old one. */
908 evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port);
909 isrc->xi_cpu = to_cpu;
910 evtchn_cpu_unmask_port(isrc->xi_cpu, isrc->xi_port);
911 }
912 }
913
914 out:
915 if (masked == 0)
916 evtchn_unmask_port(isrc->xi_port);
917 mtx_unlock(&xen_intr_isrc_lock);
918 return (0);
919 #else
920 return (EOPNOTSUPP);
921 #endif
922 }
923
924 /*------------------- Virtual Interrupt Source PIC Functions -----------------*/
925 /*
926 * Mask a level triggered interrupt source.
927 *
928 * \param isrc The interrupt source to mask (if necessary).
929 * \param eoi If non-zero, perform any necessary end-of-interrupt
930 * acknowledgements.
931 */
932 static void
933 xen_intr_disable_source(struct intsrc *base_isrc, int eoi)
934 {
935 struct xenisrc *isrc;
936
937 isrc = (struct xenisrc *)base_isrc;
938
939 /*
940 * NB: checking if the event channel is already masked is
941 * needed because the event channel user-space device
942 * masks event channels on it's filter as part of it's
943 * normal operation, and those shouldn't be automatically
944 * unmasked by the generic interrupt code. The event channel
945 * device will unmask them when needed.
946 */
947 isrc->xi_masked = !!evtchn_test_and_set_mask(isrc->xi_port);
948 }
949
950 /*
951 * Unmask a level triggered interrupt source.
952 *
953 * \param isrc The interrupt source to unmask (if necessary).
954 */
955 static void
956 xen_intr_enable_source(struct intsrc *base_isrc)
957 {
958 struct xenisrc *isrc;
959
960 isrc = (struct xenisrc *)base_isrc;
961
962 if (isrc->xi_masked == 0)
963 evtchn_unmask_port(isrc->xi_port);
964 }
965
966 /*
967 * Perform any necessary end-of-interrupt acknowledgements.
968 *
969 * \param isrc The interrupt source to EOI.
970 */
971 static void
972 xen_intr_eoi_source(struct intsrc *base_isrc)
973 {
974 }
975
976 /*
977 * Enable and unmask the interrupt source.
978 *
979 * \param isrc The interrupt source to enable.
980 */
981 static void
982 xen_intr_enable_intr(struct intsrc *base_isrc)
983 {
984 struct xenisrc *isrc = (struct xenisrc *)base_isrc;
985
986 evtchn_unmask_port(isrc->xi_port);
987 }
988
989 /*------------------ Physical Interrupt Source PIC Functions -----------------*/
990 /*
991 * Mask a level triggered interrupt source.
992 *
993 * \param isrc The interrupt source to mask (if necessary).
994 * \param eoi If non-zero, perform any necessary end-of-interrupt
995 * acknowledgements.
996 */
997 static void
998 xen_intr_pirq_disable_source(struct intsrc *base_isrc, int eoi)
999 {
1000 struct xenisrc *isrc;
1001
1002 isrc = (struct xenisrc *)base_isrc;
1003
1004 if (isrc->xi_edgetrigger == 0)
1005 evtchn_mask_port(isrc->xi_port);
1006 if (eoi == PIC_EOI)
1007 xen_intr_pirq_eoi_source(base_isrc);
1008 }
1009
1010 /*
1011 * Unmask a level triggered interrupt source.
1012 *
1013 * \param isrc The interrupt source to unmask (if necessary).
1014 */
1015 static void
1016 xen_intr_pirq_enable_source(struct intsrc *base_isrc)
1017 {
1018 struct xenisrc *isrc;
1019
1020 isrc = (struct xenisrc *)base_isrc;
1021
1022 if (isrc->xi_edgetrigger == 0)
1023 evtchn_unmask_port(isrc->xi_port);
1024 }
1025
1026 /*
1027 * Perform any necessary end-of-interrupt acknowledgements.
1028 *
1029 * \param isrc The interrupt source to EOI.
1030 */
1031 static void
1032 xen_intr_pirq_eoi_source(struct intsrc *base_isrc)
1033 {
1034 struct xenisrc *isrc;
1035 int error;
1036
1037 isrc = (struct xenisrc *)base_isrc;
1038
1039 if (xen_test_bit(isrc->xi_pirq, xen_intr_pirq_eoi_map)) {
1040 struct physdev_eoi eoi = { .irq = isrc->xi_pirq };
1041
1042 error = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
1043 if (error != 0)
1044 panic("Unable to EOI PIRQ#%d: %d\n",
1045 isrc->xi_pirq, error);
1046 }
1047 }
1048
1049 /*
1050 * Enable and unmask the interrupt source.
1051 *
1052 * \param isrc The interrupt source to enable.
1053 */
1054 static void
1055 xen_intr_pirq_enable_intr(struct intsrc *base_isrc)
1056 {
1057 struct xenisrc *isrc;
1058 struct evtchn_bind_pirq bind_pirq;
1059 struct physdev_irq_status_query irq_status;
1060 int error;
1061
1062 isrc = (struct xenisrc *)base_isrc;
1063
1064 if (!xen_intr_pirq_eoi_map_enabled) {
1065 irq_status.irq = isrc->xi_pirq;
1066 error = HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query,
1067 &irq_status);
1068 if (error)
1069 panic("unable to get status of IRQ#%d", isrc->xi_pirq);
1070
1071 if (irq_status.flags & XENIRQSTAT_needs_eoi) {
1072 /*
1073 * Since the dynamic PIRQ EOI map is not available
1074 * mark the PIRQ as needing EOI unconditionally.
1075 */
1076 xen_set_bit(isrc->xi_pirq, xen_intr_pirq_eoi_map);
1077 }
1078 }
1079
1080 bind_pirq.pirq = isrc->xi_pirq;
1081 bind_pirq.flags = isrc->xi_edgetrigger ? 0 : BIND_PIRQ__WILL_SHARE;
1082 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq);
1083 if (error)
1084 panic("unable to bind IRQ#%d", isrc->xi_pirq);
1085
1086 isrc->xi_port = bind_pirq.port;
1087
1088 mtx_lock(&xen_intr_isrc_lock);
1089 KASSERT((xen_intr_port_to_isrc[bind_pirq.port] == NULL),
1090 ("trying to override an already setup event channel port"));
1091 xen_intr_port_to_isrc[bind_pirq.port] = isrc;
1092 mtx_unlock(&xen_intr_isrc_lock);
1093
1094 evtchn_unmask_port(isrc->xi_port);
1095 }
1096
1097 /*
1098 * Disable an interrupt source.
1099 *
1100 * \param isrc The interrupt source to disable.
1101 */
1102 static void
1103 xen_intr_pirq_disable_intr(struct intsrc *base_isrc)
1104 {
1105 struct xenisrc *isrc;
1106 struct evtchn_close close;
1107 int error;
1108
1109 isrc = (struct xenisrc *)base_isrc;
1110
1111 evtchn_mask_port(isrc->xi_port);
1112
1113 close.port = isrc->xi_port;
1114 error = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
1115 if (error)
1116 panic("unable to close event channel %d IRQ#%d",
1117 isrc->xi_port, isrc->xi_pirq);
1118
1119 mtx_lock(&xen_intr_isrc_lock);
1120 xen_intr_port_to_isrc[isrc->xi_port] = NULL;
1121 mtx_unlock(&xen_intr_isrc_lock);
1122
1123 isrc->xi_port = 0;
1124 }
1125
1126 /**
1127 * Perform configuration of an interrupt source.
1128 *
1129 * \param isrc The interrupt source to configure.
1130 * \param trig Edge or level.
1131 * \param pol Active high or low.
1132 *
1133 * \returns 0 if no events are pending, otherwise non-zero.
1134 */
1135 static int
1136 xen_intr_pirq_config_intr(struct intsrc *base_isrc, enum intr_trigger trig,
1137 enum intr_polarity pol)
1138 {
1139 struct xenisrc *isrc = (struct xenisrc *)base_isrc;
1140 struct physdev_setup_gsi setup_gsi;
1141 int error;
1142
1143 KASSERT(!(trig == INTR_TRIGGER_CONFORM || pol == INTR_POLARITY_CONFORM),
1144 ("%s: Conforming trigger or polarity\n", __func__));
1145
1146 setup_gsi.gsi = isrc->xi_pirq;
1147 setup_gsi.triggering = trig == INTR_TRIGGER_EDGE ? 0 : 1;
1148 setup_gsi.polarity = pol == INTR_POLARITY_HIGH ? 0 : 1;
1149
1150 error = HYPERVISOR_physdev_op(PHYSDEVOP_setup_gsi, &setup_gsi);
1151 if (error == -XEN_EEXIST) {
1152 if ((isrc->xi_edgetrigger && (trig != INTR_TRIGGER_EDGE)) ||
1153 (isrc->xi_activehi && (pol != INTR_POLARITY_HIGH)))
1154 panic("unable to reconfigure interrupt IRQ#%d",
1155 isrc->xi_pirq);
1156 error = 0;
1157 }
1158 if (error)
1159 panic("unable to configure IRQ#%d\n", isrc->xi_pirq);
1160
1161 isrc->xi_activehi = pol == INTR_POLARITY_HIGH ? 1 : 0;
1162 isrc->xi_edgetrigger = trig == INTR_TRIGGER_EDGE ? 1 : 0;
1163
1164 return (0);
1165 }
1166
1167 /*--------------------------- Public Functions -------------------------------*/
1168 /*------- API comments for these methods can be found in xen/xenintr.h -------*/
1169 int
1170 xen_intr_bind_local_port(device_t dev, evtchn_port_t local_port,
1171 driver_filter_t filter, driver_intr_t handler, void *arg,
1172 enum intr_type flags, xen_intr_handle_t *port_handlep)
1173 {
1174 struct xenisrc *isrc;
1175 int error;
1176
1177 error = xen_intr_bind_isrc(&isrc, local_port, EVTCHN_TYPE_PORT,
1178 device_get_nameunit(dev), filter, handler, arg, flags,
1179 port_handlep);
1180 if (error != 0)
1181 return (error);
1182
1183 /*
1184 * The Event Channel API didn't open this port, so it is not
1185 * responsible for closing it automatically on unbind.
1186 */
1187 isrc->xi_close = 0;
1188 return (0);
1189 }
1190
1191 int
1192 xen_intr_alloc_and_bind_local_port(device_t dev, u_int remote_domain,
1193 driver_filter_t filter, driver_intr_t handler, void *arg,
1194 enum intr_type flags, xen_intr_handle_t *port_handlep)
1195 {
1196 struct xenisrc *isrc;
1197 struct evtchn_alloc_unbound alloc_unbound;
1198 int error;
1199
1200 alloc_unbound.dom = DOMID_SELF;
1201 alloc_unbound.remote_dom = remote_domain;
1202 error = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
1203 &alloc_unbound);
1204 if (error != 0) {
1205 /*
1206 * XXX Trap Hypercall error code Linuxisms in
1207 * the HYPERCALL layer.
1208 */
1209 return (-error);
1210 }
1211
1212 error = xen_intr_bind_isrc(&isrc, alloc_unbound.port, EVTCHN_TYPE_PORT,
1213 device_get_nameunit(dev), filter, handler, arg, flags,
1214 port_handlep);
1215 if (error != 0) {
1216 evtchn_close_t close = { .port = alloc_unbound.port };
1217 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
1218 panic("EVTCHNOP_close failed");
1219 return (error);
1220 }
1221
1222 isrc->xi_close = 1;
1223 return (0);
1224 }
1225
1226 int
1227 xen_intr_bind_remote_port(device_t dev, u_int remote_domain,
1228 u_int remote_port, driver_filter_t filter, driver_intr_t handler,
1229 void *arg, enum intr_type flags, xen_intr_handle_t *port_handlep)
1230 {
1231 struct xenisrc *isrc;
1232 struct evtchn_bind_interdomain bind_interdomain;
1233 int error;
1234
1235 bind_interdomain.remote_dom = remote_domain;
1236 bind_interdomain.remote_port = remote_port;
1237 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
1238 &bind_interdomain);
1239 if (error != 0) {
1240 /*
1241 * XXX Trap Hypercall error code Linuxisms in
1242 * the HYPERCALL layer.
1243 */
1244 return (-error);
1245 }
1246
1247 error = xen_intr_bind_isrc(&isrc, bind_interdomain.local_port,
1248 EVTCHN_TYPE_PORT, device_get_nameunit(dev), filter, handler, arg,
1249 flags, port_handlep);
1250 if (error) {
1251 evtchn_close_t close = { .port = bind_interdomain.local_port };
1252 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
1253 panic("EVTCHNOP_close failed");
1254 return (error);
1255 }
1256
1257 /*
1258 * The Event Channel API opened this port, so it is
1259 * responsible for closing it automatically on unbind.
1260 */
1261 isrc->xi_close = 1;
1262 return (0);
1263 }
1264
1265 int
1266 xen_intr_bind_virq(device_t dev, u_int virq, u_int cpu,
1267 driver_filter_t filter, driver_intr_t handler, void *arg,
1268 enum intr_type flags, xen_intr_handle_t *port_handlep)
1269 {
1270 int vcpu_id = pcpu_find(cpu)->pc_vcpu_id;
1271 struct xenisrc *isrc;
1272 struct evtchn_bind_virq bind_virq = { .virq = virq, .vcpu = vcpu_id };
1273 int error;
1274
1275 /* Ensure the target CPU is ready to handle evtchn interrupts. */
1276 xen_intr_intrcnt_add(cpu);
1277
1278 isrc = NULL;
1279 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, &bind_virq);
1280 if (error != 0) {
1281 /*
1282 * XXX Trap Hypercall error code Linuxisms in
1283 * the HYPERCALL layer.
1284 */
1285 return (-error);
1286 }
1287
1288 error = xen_intr_bind_isrc(&isrc, bind_virq.port, EVTCHN_TYPE_VIRQ,
1289 device_get_nameunit(dev), filter, handler, arg, flags,
1290 port_handlep);
1291
1292 #ifdef SMP
1293 if (error == 0)
1294 error = intr_event_bind(isrc->xi_intsrc.is_event, cpu);
1295 #endif
1296
1297 if (error != 0) {
1298 evtchn_close_t close = { .port = bind_virq.port };
1299
1300 xen_intr_unbind(*port_handlep);
1301 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
1302 panic("EVTCHNOP_close failed");
1303 return (error);
1304 }
1305
1306 #ifdef SMP
1307 if (isrc->xi_cpu != cpu) {
1308 /*
1309 * Too early in the boot process for the generic interrupt
1310 * code to perform the binding. Update our event channel
1311 * masks manually so events can't fire on the wrong cpu
1312 * during AP startup.
1313 */
1314 xen_intr_assign_cpu(&isrc->xi_intsrc, cpu_apic_ids[cpu]);
1315 }
1316 #endif
1317
1318 /*
1319 * The Event Channel API opened this port, so it is
1320 * responsible for closing it automatically on unbind.
1321 */
1322 isrc->xi_close = 1;
1323 isrc->xi_virq = virq;
1324
1325 return (0);
1326 }
1327
1328 int
1329 xen_intr_alloc_and_bind_ipi(u_int cpu, driver_filter_t filter,
1330 enum intr_type flags, xen_intr_handle_t *port_handlep)
1331 {
1332 #ifdef SMP
1333 int vcpu_id = pcpu_find(cpu)->pc_vcpu_id;
1334 struct xenisrc *isrc;
1335 struct evtchn_bind_ipi bind_ipi = { .vcpu = vcpu_id };
1336 /* Same size as the one used by intr_handler->ih_name. */
1337 char name[MAXCOMLEN + 1];
1338 int error;
1339
1340 /* Ensure the target CPU is ready to handle evtchn interrupts. */
1341 xen_intr_intrcnt_add(cpu);
1342
1343 isrc = NULL;
1344 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, &bind_ipi);
1345 if (error != 0) {
1346 /*
1347 * XXX Trap Hypercall error code Linuxisms in
1348 * the HYPERCALL layer.
1349 */
1350 return (-error);
1351 }
1352
1353 snprintf(name, sizeof(name), "cpu%u", cpu);
1354
1355 error = xen_intr_bind_isrc(&isrc, bind_ipi.port, EVTCHN_TYPE_IPI,
1356 name, filter, NULL, NULL, flags, port_handlep);
1357 if (error != 0) {
1358 evtchn_close_t close = { .port = bind_ipi.port };
1359
1360 xen_intr_unbind(*port_handlep);
1361 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
1362 panic("EVTCHNOP_close failed");
1363 return (error);
1364 }
1365
1366 if (isrc->xi_cpu != cpu) {
1367 /*
1368 * Too early in the boot process for the generic interrupt
1369 * code to perform the binding. Update our event channel
1370 * masks manually so events can't fire on the wrong cpu
1371 * during AP startup.
1372 */
1373 xen_intr_assign_cpu(&isrc->xi_intsrc, cpu_apic_ids[cpu]);
1374 }
1375
1376 /*
1377 * The Event Channel API opened this port, so it is
1378 * responsible for closing it automatically on unbind.
1379 */
1380 isrc->xi_close = 1;
1381 return (0);
1382 #else
1383 return (EOPNOTSUPP);
1384 #endif
1385 }
1386
1387 int
1388 xen_register_pirq(int vector, enum intr_trigger trig, enum intr_polarity pol)
1389 {
1390 struct physdev_map_pirq map_pirq;
1391 struct xenisrc *isrc;
1392 int error;
1393
1394 if (vector == 0)
1395 return (EINVAL);
1396
1397 if (bootverbose)
1398 printf("xen: register IRQ#%d\n", vector);
1399
1400 map_pirq.domid = DOMID_SELF;
1401 map_pirq.type = MAP_PIRQ_TYPE_GSI;
1402 map_pirq.index = vector;
1403 map_pirq.pirq = vector;
1404
1405 error = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_pirq);
1406 if (error) {
1407 printf("xen: unable to map IRQ#%d\n", vector);
1408 return (error);
1409 }
1410
1411 mtx_lock(&xen_intr_isrc_lock);
1412 isrc = xen_intr_alloc_isrc(EVTCHN_TYPE_PIRQ, vector);
1413 mtx_unlock(&xen_intr_isrc_lock);
1414 KASSERT((isrc != NULL), ("xen: unable to allocate isrc for interrupt"));
1415 isrc->xi_pirq = vector;
1416 isrc->xi_activehi = pol == INTR_POLARITY_HIGH ? 1 : 0;
1417 isrc->xi_edgetrigger = trig == INTR_TRIGGER_EDGE ? 1 : 0;
1418
1419 return (0);
1420 }
1421
1422 int
1423 xen_register_msi(device_t dev, int vector, int count)
1424 {
1425 struct physdev_map_pirq msi_irq;
1426 struct xenisrc *isrc;
1427 int ret;
1428
1429 memset(&msi_irq, 0, sizeof(msi_irq));
1430 msi_irq.domid = DOMID_SELF;
1431 msi_irq.type = count == 1 ?
1432 MAP_PIRQ_TYPE_MSI_SEG : MAP_PIRQ_TYPE_MULTI_MSI;
1433 msi_irq.index = -1;
1434 msi_irq.pirq = -1;
1435 msi_irq.bus = pci_get_bus(dev) | (pci_get_domain(dev) << 16);
1436 msi_irq.devfn = (pci_get_slot(dev) << 3) | pci_get_function(dev);
1437 msi_irq.entry_nr = count;
1438
1439 ret = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &msi_irq);
1440 if (ret != 0)
1441 return (ret);
1442 if (count != msi_irq.entry_nr) {
1443 panic("unable to setup all requested MSI vectors "
1444 "(expected %d got %d)", count, msi_irq.entry_nr);
1445 }
1446
1447 mtx_lock(&xen_intr_isrc_lock);
1448 for (int i = 0; i < count; i++) {
1449 isrc = xen_intr_alloc_isrc(EVTCHN_TYPE_PIRQ, vector + i);
1450 KASSERT(isrc != NULL,
1451 ("xen: unable to allocate isrc for interrupt"));
1452 isrc->xi_pirq = msi_irq.pirq + i;
1453 /* MSI interrupts are always edge triggered */
1454 isrc->xi_edgetrigger = 1;
1455 }
1456 mtx_unlock(&xen_intr_isrc_lock);
1457
1458 return (0);
1459 }
1460
1461 int
1462 xen_release_msi(int vector)
1463 {
1464 struct physdev_unmap_pirq unmap;
1465 struct xenisrc *isrc;
1466 int ret;
1467
1468 isrc = (struct xenisrc *)intr_lookup_source(vector);
1469 if (isrc == NULL)
1470 return (ENXIO);
1471
1472 unmap.pirq = isrc->xi_pirq;
1473 ret = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap);
1474 if (ret != 0)
1475 return (ret);
1476
1477 xen_intr_release_isrc(isrc);
1478
1479 return (0);
1480 }
1481
1482 int
1483 xen_intr_describe(xen_intr_handle_t port_handle, const char *fmt, ...)
1484 {
1485 char descr[MAXCOMLEN + 1];
1486 struct xenisrc *isrc;
1487 va_list ap;
1488
1489 isrc = xen_intr_isrc(port_handle);
1490 if (isrc == NULL)
1491 return (EINVAL);
1492
1493 va_start(ap, fmt);
1494 vsnprintf(descr, sizeof(descr), fmt, ap);
1495 va_end(ap);
1496 return (intr_describe(isrc->xi_vector, isrc->xi_cookie, descr));
1497 }
1498
1499 void
1500 xen_intr_unbind(xen_intr_handle_t *port_handlep)
1501 {
1502 struct xenisrc *isrc;
1503
1504 KASSERT(port_handlep != NULL,
1505 ("NULL xen_intr_handle_t passed to xen_intr_unbind"));
1506
1507 isrc = xen_intr_isrc(*port_handlep);
1508 *port_handlep = NULL;
1509 if (isrc == NULL)
1510 return;
1511
1512 if (isrc->xi_cookie != NULL)
1513 intr_remove_handler(isrc->xi_cookie);
1514 xen_intr_release_isrc(isrc);
1515 }
1516
1517 void
1518 xen_intr_signal(xen_intr_handle_t handle)
1519 {
1520 struct xenisrc *isrc;
1521
1522 isrc = xen_intr_isrc(handle);
1523 if (isrc != NULL) {
1524 KASSERT(isrc->xi_type == EVTCHN_TYPE_PORT ||
1525 isrc->xi_type == EVTCHN_TYPE_IPI,
1526 ("evtchn_signal on something other than a local port"));
1527 struct evtchn_send send = { .port = isrc->xi_port };
1528 (void)HYPERVISOR_event_channel_op(EVTCHNOP_send, &send);
1529 }
1530 }
1531
1532 evtchn_port_t
1533 xen_intr_port(xen_intr_handle_t handle)
1534 {
1535 struct xenisrc *isrc;
1536
1537 isrc = xen_intr_isrc(handle);
1538 if (isrc == NULL)
1539 return (0);
1540
1541 return (isrc->xi_port);
1542 }
1543
1544 int
1545 xen_intr_add_handler(const char *name, driver_filter_t filter,
1546 driver_intr_t handler, void *arg, enum intr_type flags,
1547 xen_intr_handle_t handle)
1548 {
1549 struct xenisrc *isrc;
1550 int error;
1551
1552 isrc = xen_intr_isrc(handle);
1553 if (isrc == NULL || isrc->xi_cookie != NULL)
1554 return (EINVAL);
1555
1556 error = intr_add_handler(name, isrc->xi_vector,filter, handler, arg,
1557 flags|INTR_EXCL, &isrc->xi_cookie);
1558 if (error != 0) {
1559 printf(
1560 "%s: xen_intr_add_handler: intr_add_handler failed: %d\n",
1561 name, error);
1562 }
1563
1564 return (error);
1565 }
1566
1567 #ifdef DDB
1568 static const char *
1569 xen_intr_print_type(enum evtchn_type type)
1570 {
1571 static const char *evtchn_type_to_string[EVTCHN_TYPE_COUNT] = {
1572 [EVTCHN_TYPE_UNBOUND] = "UNBOUND",
1573 [EVTCHN_TYPE_PIRQ] = "PIRQ",
1574 [EVTCHN_TYPE_VIRQ] = "VIRQ",
1575 [EVTCHN_TYPE_IPI] = "IPI",
1576 [EVTCHN_TYPE_PORT] = "PORT",
1577 };
1578
1579 if (type >= EVTCHN_TYPE_COUNT)
1580 return ("UNKNOWN");
1581
1582 return (evtchn_type_to_string[type]);
1583 }
1584
1585 static void
1586 xen_intr_dump_port(struct xenisrc *isrc)
1587 {
1588 struct xen_intr_pcpu_data *pcpu;
1589 shared_info_t *s = HYPERVISOR_shared_info;
1590 int i;
1591
1592 db_printf("Port %d Type: %s\n",
1593 isrc->xi_port, xen_intr_print_type(isrc->xi_type));
1594 if (isrc->xi_type == EVTCHN_TYPE_PIRQ) {
1595 db_printf("\tPirq: %d ActiveHi: %d EdgeTrigger: %d "
1596 "NeedsEOI: %d\n",
1597 isrc->xi_pirq, isrc->xi_activehi, isrc->xi_edgetrigger,
1598 !!xen_test_bit(isrc->xi_pirq, xen_intr_pirq_eoi_map));
1599 }
1600 if (isrc->xi_type == EVTCHN_TYPE_VIRQ)
1601 db_printf("\tVirq: %d\n", isrc->xi_virq);
1602
1603 db_printf("\tMasked: %d Pending: %d\n",
1604 !!xen_test_bit(isrc->xi_port, &s->evtchn_mask[0]),
1605 !!xen_test_bit(isrc->xi_port, &s->evtchn_pending[0]));
1606
1607 db_printf("\tPer-CPU Masks: ");
1608 CPU_FOREACH(i) {
1609 pcpu = DPCPU_ID_PTR(i, xen_intr_pcpu);
1610 db_printf("cpu#%d: %d ", i,
1611 !!xen_test_bit(isrc->xi_port, pcpu->evtchn_enabled));
1612 }
1613 db_printf("\n");
1614 }
1615
1616 DB_SHOW_COMMAND(xen_evtchn, db_show_xen_evtchn)
1617 {
1618 int i;
1619
1620 if (!xen_domain()) {
1621 db_printf("Only available on Xen guests\n");
1622 return;
1623 }
1624
1625 for (i = 0; i < NR_EVENT_CHANNELS; i++) {
1626 struct xenisrc *isrc;
1627
1628 isrc = xen_intr_port_to_isrc[i];
1629 if (isrc == NULL)
1630 continue;
1631
1632 xen_intr_dump_port(isrc);
1633 }
1634 }
1635 #endif /* DDB */
Cache object: d7206db27c6d4f5c981cefb7cfe29083
|