FreeBSD/Linux Kernel Cross Reference
sys/kern/subr_intr.c
1 /*-
2 * Copyright (c) 2015-2016 Svatopluk Kraus
3 * Copyright (c) 2015-2016 Michal Meloun
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD: releng/12.0/sys/kern/subr_intr.c 322580 2017-08-16 16:51:55Z ian $");
30
31 /*
32 * New-style Interrupt Framework
33 *
34 * TODO: - add support for disconnected PICs.
35 * - to support IPI (PPI) enabling on other CPUs if already started.
36 * - to complete things for removable PICs.
37 */
38
39 #include "opt_ddb.h"
40 #include "opt_hwpmc_hooks.h"
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/syslog.h>
46 #include <sys/malloc.h>
47 #include <sys/proc.h>
48 #include <sys/queue.h>
49 #include <sys/bus.h>
50 #include <sys/interrupt.h>
51 #include <sys/conf.h>
52 #include <sys/cpuset.h>
53 #include <sys/rman.h>
54 #include <sys/sched.h>
55 #include <sys/smp.h>
56 #include <sys/vmmeter.h>
57 #ifdef HWPMC_HOOKS
58 #include <sys/pmckern.h>
59 #endif
60
61 #include <machine/atomic.h>
62 #include <machine/intr.h>
63 #include <machine/cpu.h>
64 #include <machine/smp.h>
65 #include <machine/stdarg.h>
66
67 #ifdef DDB
68 #include <ddb/ddb.h>
69 #endif
70
71 #include "pic_if.h"
72 #include "msi_if.h"
73
74 #define INTRNAME_LEN (2*MAXCOMLEN + 1)
75
76 #ifdef DEBUG
77 #define debugf(fmt, args...) do { printf("%s(): ", __func__); \
78 printf(fmt,##args); } while (0)
79 #else
80 #define debugf(fmt, args...)
81 #endif
82
83 MALLOC_DECLARE(M_INTRNG);
84 MALLOC_DEFINE(M_INTRNG, "intr", "intr interrupt handling");
85
86 /* Main interrupt handler called from assembler -> 'hidden' for C code. */
87 void intr_irq_handler(struct trapframe *tf);
88
89 /* Root interrupt controller stuff. */
90 device_t intr_irq_root_dev;
91 static intr_irq_filter_t *irq_root_filter;
92 static void *irq_root_arg;
93 static u_int irq_root_ipicount;
94
95 struct intr_pic_child {
96 SLIST_ENTRY(intr_pic_child) pc_next;
97 struct intr_pic *pc_pic;
98 intr_child_irq_filter_t *pc_filter;
99 void *pc_filter_arg;
100 uintptr_t pc_start;
101 uintptr_t pc_length;
102 };
103
104 /* Interrupt controller definition. */
105 struct intr_pic {
106 SLIST_ENTRY(intr_pic) pic_next;
107 intptr_t pic_xref; /* hardware identification */
108 device_t pic_dev;
109 /* Only one of FLAG_PIC or FLAG_MSI may be set */
110 #define FLAG_PIC (1 << 0)
111 #define FLAG_MSI (1 << 1)
112 #define FLAG_TYPE_MASK (FLAG_PIC | FLAG_MSI)
113 u_int pic_flags;
114 struct mtx pic_child_lock;
115 SLIST_HEAD(, intr_pic_child) pic_children;
116 };
117
118 static struct mtx pic_list_lock;
119 static SLIST_HEAD(, intr_pic) pic_list;
120
121 static struct intr_pic *pic_lookup(device_t dev, intptr_t xref, int flags);
122
123 /* Interrupt source definition. */
124 static struct mtx isrc_table_lock;
125 static struct intr_irqsrc *irq_sources[NIRQ];
126 u_int irq_next_free;
127
128 #ifdef SMP
129 static boolean_t irq_assign_cpu = FALSE;
130 #endif
131
132 /*
133 * - 2 counters for each I/O interrupt.
134 * - MAXCPU counters for each IPI counters for SMP.
135 */
136 #ifdef SMP
137 #define INTRCNT_COUNT (NIRQ * 2 + INTR_IPI_COUNT * MAXCPU)
138 #else
139 #define INTRCNT_COUNT (NIRQ * 2)
140 #endif
141
142 /* Data for MI statistics reporting. */
143 u_long intrcnt[INTRCNT_COUNT];
144 char intrnames[INTRCNT_COUNT * INTRNAME_LEN];
145 size_t sintrcnt = sizeof(intrcnt);
146 size_t sintrnames = sizeof(intrnames);
147 static u_int intrcnt_index;
148
149 static struct intr_irqsrc *intr_map_get_isrc(u_int res_id);
150 static void intr_map_set_isrc(u_int res_id, struct intr_irqsrc *isrc);
151 static struct intr_map_data * intr_map_get_map_data(u_int res_id);
152 static void intr_map_copy_map_data(u_int res_id, device_t *dev, intptr_t *xref,
153 struct intr_map_data **data);
154
155 /*
156 * Interrupt framework initialization routine.
157 */
158 static void
159 intr_irq_init(void *dummy __unused)
160 {
161
162 SLIST_INIT(&pic_list);
163 mtx_init(&pic_list_lock, "intr pic list", NULL, MTX_DEF);
164
165 mtx_init(&isrc_table_lock, "intr isrc table", NULL, MTX_DEF);
166 }
167 SYSINIT(intr_irq_init, SI_SUB_INTR, SI_ORDER_FIRST, intr_irq_init, NULL);
168
169 static void
170 intrcnt_setname(const char *name, int index)
171 {
172
173 snprintf(intrnames + INTRNAME_LEN * index, INTRNAME_LEN, "%-*s",
174 INTRNAME_LEN - 1, name);
175 }
176
177 /*
178 * Update name for interrupt source with interrupt event.
179 */
180 static void
181 intrcnt_updatename(struct intr_irqsrc *isrc)
182 {
183
184 /* QQQ: What about stray counter name? */
185 mtx_assert(&isrc_table_lock, MA_OWNED);
186 intrcnt_setname(isrc->isrc_event->ie_fullname, isrc->isrc_index);
187 }
188
189 /*
190 * Virtualization for interrupt source interrupt counter increment.
191 */
192 static inline void
193 isrc_increment_count(struct intr_irqsrc *isrc)
194 {
195
196 if (isrc->isrc_flags & INTR_ISRCF_PPI)
197 atomic_add_long(&isrc->isrc_count[0], 1);
198 else
199 isrc->isrc_count[0]++;
200 }
201
202 /*
203 * Virtualization for interrupt source interrupt stray counter increment.
204 */
205 static inline void
206 isrc_increment_straycount(struct intr_irqsrc *isrc)
207 {
208
209 isrc->isrc_count[1]++;
210 }
211
212 /*
213 * Virtualization for interrupt source interrupt name update.
214 */
215 static void
216 isrc_update_name(struct intr_irqsrc *isrc, const char *name)
217 {
218 char str[INTRNAME_LEN];
219
220 mtx_assert(&isrc_table_lock, MA_OWNED);
221
222 if (name != NULL) {
223 snprintf(str, INTRNAME_LEN, "%s: %s", isrc->isrc_name, name);
224 intrcnt_setname(str, isrc->isrc_index);
225 snprintf(str, INTRNAME_LEN, "stray %s: %s", isrc->isrc_name,
226 name);
227 intrcnt_setname(str, isrc->isrc_index + 1);
228 } else {
229 snprintf(str, INTRNAME_LEN, "%s:", isrc->isrc_name);
230 intrcnt_setname(str, isrc->isrc_index);
231 snprintf(str, INTRNAME_LEN, "stray %s:", isrc->isrc_name);
232 intrcnt_setname(str, isrc->isrc_index + 1);
233 }
234 }
235
236 /*
237 * Virtualization for interrupt source interrupt counters setup.
238 */
239 static void
240 isrc_setup_counters(struct intr_irqsrc *isrc)
241 {
242 u_int index;
243
244 /*
245 * XXX - it does not work well with removable controllers and
246 * interrupt sources !!!
247 */
248 index = atomic_fetchadd_int(&intrcnt_index, 2);
249 isrc->isrc_index = index;
250 isrc->isrc_count = &intrcnt[index];
251 isrc_update_name(isrc, NULL);
252 }
253
254 /*
255 * Virtualization for interrupt source interrupt counters release.
256 */
257 static void
258 isrc_release_counters(struct intr_irqsrc *isrc)
259 {
260
261 panic("%s: not implemented", __func__);
262 }
263
264 #ifdef SMP
265 /*
266 * Virtualization for interrupt source IPI counters setup.
267 */
268 u_long *
269 intr_ipi_setup_counters(const char *name)
270 {
271 u_int index, i;
272 char str[INTRNAME_LEN];
273
274 index = atomic_fetchadd_int(&intrcnt_index, MAXCPU);
275 for (i = 0; i < MAXCPU; i++) {
276 snprintf(str, INTRNAME_LEN, "cpu%d:%s", i, name);
277 intrcnt_setname(str, index + i);
278 }
279 return (&intrcnt[index]);
280 }
281 #endif
282
283 /*
284 * Main interrupt dispatch handler. It's called straight
285 * from the assembler, where CPU interrupt is served.
286 */
287 void
288 intr_irq_handler(struct trapframe *tf)
289 {
290 struct trapframe * oldframe;
291 struct thread * td;
292
293 KASSERT(irq_root_filter != NULL, ("%s: no filter", __func__));
294
295 VM_CNT_INC(v_intr);
296 critical_enter();
297 td = curthread;
298 oldframe = td->td_intr_frame;
299 td->td_intr_frame = tf;
300 irq_root_filter(irq_root_arg);
301 td->td_intr_frame = oldframe;
302 critical_exit();
303 #ifdef HWPMC_HOOKS
304 if (pmc_hook && TRAPF_USERMODE(tf) &&
305 (PCPU_GET(curthread)->td_pflags & TDP_CALLCHAIN))
306 pmc_hook(PCPU_GET(curthread), PMC_FN_USER_CALLCHAIN, tf);
307 #endif
308 }
309
310 int
311 intr_child_irq_handler(struct intr_pic *parent, uintptr_t irq)
312 {
313 struct intr_pic_child *child;
314 bool found;
315
316 found = false;
317 mtx_lock_spin(&parent->pic_child_lock);
318 SLIST_FOREACH(child, &parent->pic_children, pc_next) {
319 if (child->pc_start <= irq &&
320 irq < (child->pc_start + child->pc_length)) {
321 found = true;
322 break;
323 }
324 }
325 mtx_unlock_spin(&parent->pic_child_lock);
326
327 if (found)
328 return (child->pc_filter(child->pc_filter_arg, irq));
329
330 return (FILTER_STRAY);
331 }
332
333 /*
334 * interrupt controller dispatch function for interrupts. It should
335 * be called straight from the interrupt controller, when associated interrupt
336 * source is learned.
337 */
338 int
339 intr_isrc_dispatch(struct intr_irqsrc *isrc, struct trapframe *tf)
340 {
341
342 KASSERT(isrc != NULL, ("%s: no source", __func__));
343
344 isrc_increment_count(isrc);
345
346 #ifdef INTR_SOLO
347 if (isrc->isrc_filter != NULL) {
348 int error;
349 error = isrc->isrc_filter(isrc->isrc_arg, tf);
350 PIC_POST_FILTER(isrc->isrc_dev, isrc);
351 if (error == FILTER_HANDLED)
352 return (0);
353 } else
354 #endif
355 if (isrc->isrc_event != NULL) {
356 if (intr_event_handle(isrc->isrc_event, tf) == 0)
357 return (0);
358 }
359
360 isrc_increment_straycount(isrc);
361 return (EINVAL);
362 }
363
364 /*
365 * Alloc unique interrupt number (resource handle) for interrupt source.
366 *
367 * There could be various strategies how to allocate free interrupt number
368 * (resource handle) for new interrupt source.
369 *
370 * 1. Handles are always allocated forward, so handles are not recycled
371 * immediately. However, if only one free handle left which is reused
372 * constantly...
373 */
374 static inline int
375 isrc_alloc_irq(struct intr_irqsrc *isrc)
376 {
377 u_int maxirqs, irq;
378
379 mtx_assert(&isrc_table_lock, MA_OWNED);
380
381 maxirqs = nitems(irq_sources);
382 if (irq_next_free >= maxirqs)
383 return (ENOSPC);
384
385 for (irq = irq_next_free; irq < maxirqs; irq++) {
386 if (irq_sources[irq] == NULL)
387 goto found;
388 }
389 for (irq = 0; irq < irq_next_free; irq++) {
390 if (irq_sources[irq] == NULL)
391 goto found;
392 }
393
394 irq_next_free = maxirqs;
395 return (ENOSPC);
396
397 found:
398 isrc->isrc_irq = irq;
399 irq_sources[irq] = isrc;
400
401 irq_next_free = irq + 1;
402 if (irq_next_free >= maxirqs)
403 irq_next_free = 0;
404 return (0);
405 }
406
407 /*
408 * Free unique interrupt number (resource handle) from interrupt source.
409 */
410 static inline int
411 isrc_free_irq(struct intr_irqsrc *isrc)
412 {
413
414 mtx_assert(&isrc_table_lock, MA_OWNED);
415
416 if (isrc->isrc_irq >= nitems(irq_sources))
417 return (EINVAL);
418 if (irq_sources[isrc->isrc_irq] != isrc)
419 return (EINVAL);
420
421 irq_sources[isrc->isrc_irq] = NULL;
422 isrc->isrc_irq = INTR_IRQ_INVALID; /* just to be safe */
423 return (0);
424 }
425
426 /*
427 * Initialize interrupt source and register it into global interrupt table.
428 */
429 int
430 intr_isrc_register(struct intr_irqsrc *isrc, device_t dev, u_int flags,
431 const char *fmt, ...)
432 {
433 int error;
434 va_list ap;
435
436 bzero(isrc, sizeof(struct intr_irqsrc));
437 isrc->isrc_dev = dev;
438 isrc->isrc_irq = INTR_IRQ_INVALID; /* just to be safe */
439 isrc->isrc_flags = flags;
440
441 va_start(ap, fmt);
442 vsnprintf(isrc->isrc_name, INTR_ISRC_NAMELEN, fmt, ap);
443 va_end(ap);
444
445 mtx_lock(&isrc_table_lock);
446 error = isrc_alloc_irq(isrc);
447 if (error != 0) {
448 mtx_unlock(&isrc_table_lock);
449 return (error);
450 }
451 /*
452 * Setup interrupt counters, but not for IPI sources. Those are setup
453 * later and only for used ones (up to INTR_IPI_COUNT) to not exhaust
454 * our counter pool.
455 */
456 if ((isrc->isrc_flags & INTR_ISRCF_IPI) == 0)
457 isrc_setup_counters(isrc);
458 mtx_unlock(&isrc_table_lock);
459 return (0);
460 }
461
462 /*
463 * Deregister interrupt source from global interrupt table.
464 */
465 int
466 intr_isrc_deregister(struct intr_irqsrc *isrc)
467 {
468 int error;
469
470 mtx_lock(&isrc_table_lock);
471 if ((isrc->isrc_flags & INTR_ISRCF_IPI) == 0)
472 isrc_release_counters(isrc);
473 error = isrc_free_irq(isrc);
474 mtx_unlock(&isrc_table_lock);
475 return (error);
476 }
477
478 #ifdef SMP
479 /*
480 * A support function for a PIC to decide if provided ISRC should be inited
481 * on given cpu. The logic of INTR_ISRCF_BOUND flag and isrc_cpu member of
482 * struct intr_irqsrc is the following:
483 *
484 * If INTR_ISRCF_BOUND is set, the ISRC should be inited only on cpus
485 * set in isrc_cpu. If not, the ISRC should be inited on every cpu and
486 * isrc_cpu is kept consistent with it. Thus isrc_cpu is always correct.
487 */
488 bool
489 intr_isrc_init_on_cpu(struct intr_irqsrc *isrc, u_int cpu)
490 {
491
492 if (isrc->isrc_handlers == 0)
493 return (false);
494 if ((isrc->isrc_flags & (INTR_ISRCF_PPI | INTR_ISRCF_IPI)) == 0)
495 return (false);
496 if (isrc->isrc_flags & INTR_ISRCF_BOUND)
497 return (CPU_ISSET(cpu, &isrc->isrc_cpu));
498
499 CPU_SET(cpu, &isrc->isrc_cpu);
500 return (true);
501 }
502 #endif
503
504 #ifdef INTR_SOLO
505 /*
506 * Setup filter into interrupt source.
507 */
508 static int
509 iscr_setup_filter(struct intr_irqsrc *isrc, const char *name,
510 intr_irq_filter_t *filter, void *arg, void **cookiep)
511 {
512
513 if (filter == NULL)
514 return (EINVAL);
515
516 mtx_lock(&isrc_table_lock);
517 /*
518 * Make sure that we do not mix the two ways
519 * how we handle interrupt sources.
520 */
521 if (isrc->isrc_filter != NULL || isrc->isrc_event != NULL) {
522 mtx_unlock(&isrc_table_lock);
523 return (EBUSY);
524 }
525 isrc->isrc_filter = filter;
526 isrc->isrc_arg = arg;
527 isrc_update_name(isrc, name);
528 mtx_unlock(&isrc_table_lock);
529
530 *cookiep = isrc;
531 return (0);
532 }
533 #endif
534
535 /*
536 * Interrupt source pre_ithread method for MI interrupt framework.
537 */
538 static void
539 intr_isrc_pre_ithread(void *arg)
540 {
541 struct intr_irqsrc *isrc = arg;
542
543 PIC_PRE_ITHREAD(isrc->isrc_dev, isrc);
544 }
545
546 /*
547 * Interrupt source post_ithread method for MI interrupt framework.
548 */
549 static void
550 intr_isrc_post_ithread(void *arg)
551 {
552 struct intr_irqsrc *isrc = arg;
553
554 PIC_POST_ITHREAD(isrc->isrc_dev, isrc);
555 }
556
557 /*
558 * Interrupt source post_filter method for MI interrupt framework.
559 */
560 static void
561 intr_isrc_post_filter(void *arg)
562 {
563 struct intr_irqsrc *isrc = arg;
564
565 PIC_POST_FILTER(isrc->isrc_dev, isrc);
566 }
567
568 /*
569 * Interrupt source assign_cpu method for MI interrupt framework.
570 */
571 static int
572 intr_isrc_assign_cpu(void *arg, int cpu)
573 {
574 #ifdef SMP
575 struct intr_irqsrc *isrc = arg;
576 int error;
577
578 if (isrc->isrc_dev != intr_irq_root_dev)
579 return (EINVAL);
580
581 mtx_lock(&isrc_table_lock);
582 if (cpu == NOCPU) {
583 CPU_ZERO(&isrc->isrc_cpu);
584 isrc->isrc_flags &= ~INTR_ISRCF_BOUND;
585 } else {
586 CPU_SETOF(cpu, &isrc->isrc_cpu);
587 isrc->isrc_flags |= INTR_ISRCF_BOUND;
588 }
589
590 /*
591 * In NOCPU case, it's up to PIC to either leave ISRC on same CPU or
592 * re-balance it to another CPU or enable it on more CPUs. However,
593 * PIC is expected to change isrc_cpu appropriately to keep us well
594 * informed if the call is successful.
595 */
596 if (irq_assign_cpu) {
597 error = PIC_BIND_INTR(isrc->isrc_dev, isrc);
598 if (error) {
599 CPU_ZERO(&isrc->isrc_cpu);
600 mtx_unlock(&isrc_table_lock);
601 return (error);
602 }
603 }
604 mtx_unlock(&isrc_table_lock);
605 return (0);
606 #else
607 return (EOPNOTSUPP);
608 #endif
609 }
610
611 /*
612 * Create interrupt event for interrupt source.
613 */
614 static int
615 isrc_event_create(struct intr_irqsrc *isrc)
616 {
617 struct intr_event *ie;
618 int error;
619
620 error = intr_event_create(&ie, isrc, 0, isrc->isrc_irq,
621 intr_isrc_pre_ithread, intr_isrc_post_ithread, intr_isrc_post_filter,
622 intr_isrc_assign_cpu, "%s:", isrc->isrc_name);
623 if (error)
624 return (error);
625
626 mtx_lock(&isrc_table_lock);
627 /*
628 * Make sure that we do not mix the two ways
629 * how we handle interrupt sources. Let contested event wins.
630 */
631 #ifdef INTR_SOLO
632 if (isrc->isrc_filter != NULL || isrc->isrc_event != NULL) {
633 #else
634 if (isrc->isrc_event != NULL) {
635 #endif
636 mtx_unlock(&isrc_table_lock);
637 intr_event_destroy(ie);
638 return (isrc->isrc_event != NULL ? EBUSY : 0);
639 }
640 isrc->isrc_event = ie;
641 mtx_unlock(&isrc_table_lock);
642
643 return (0);
644 }
645 #ifdef notyet
646 /*
647 * Destroy interrupt event for interrupt source.
648 */
649 static void
650 isrc_event_destroy(struct intr_irqsrc *isrc)
651 {
652 struct intr_event *ie;
653
654 mtx_lock(&isrc_table_lock);
655 ie = isrc->isrc_event;
656 isrc->isrc_event = NULL;
657 mtx_unlock(&isrc_table_lock);
658
659 if (ie != NULL)
660 intr_event_destroy(ie);
661 }
662 #endif
663 /*
664 * Add handler to interrupt source.
665 */
666 static int
667 isrc_add_handler(struct intr_irqsrc *isrc, const char *name,
668 driver_filter_t filter, driver_intr_t handler, void *arg,
669 enum intr_type flags, void **cookiep)
670 {
671 int error;
672
673 if (isrc->isrc_event == NULL) {
674 error = isrc_event_create(isrc);
675 if (error)
676 return (error);
677 }
678
679 error = intr_event_add_handler(isrc->isrc_event, name, filter, handler,
680 arg, intr_priority(flags), flags, cookiep);
681 if (error == 0) {
682 mtx_lock(&isrc_table_lock);
683 intrcnt_updatename(isrc);
684 mtx_unlock(&isrc_table_lock);
685 }
686
687 return (error);
688 }
689
690 /*
691 * Lookup interrupt controller locked.
692 */
693 static inline struct intr_pic *
694 pic_lookup_locked(device_t dev, intptr_t xref, int flags)
695 {
696 struct intr_pic *pic;
697
698 mtx_assert(&pic_list_lock, MA_OWNED);
699
700 if (dev == NULL && xref == 0)
701 return (NULL);
702
703 /* Note that pic->pic_dev is never NULL on registered PIC. */
704 SLIST_FOREACH(pic, &pic_list, pic_next) {
705 if ((pic->pic_flags & FLAG_TYPE_MASK) !=
706 (flags & FLAG_TYPE_MASK))
707 continue;
708
709 if (dev == NULL) {
710 if (xref == pic->pic_xref)
711 return (pic);
712 } else if (xref == 0 || pic->pic_xref == 0) {
713 if (dev == pic->pic_dev)
714 return (pic);
715 } else if (xref == pic->pic_xref && dev == pic->pic_dev)
716 return (pic);
717 }
718 return (NULL);
719 }
720
721 /*
722 * Lookup interrupt controller.
723 */
724 static struct intr_pic *
725 pic_lookup(device_t dev, intptr_t xref, int flags)
726 {
727 struct intr_pic *pic;
728
729 mtx_lock(&pic_list_lock);
730 pic = pic_lookup_locked(dev, xref, flags);
731 mtx_unlock(&pic_list_lock);
732 return (pic);
733 }
734
735 /*
736 * Create interrupt controller.
737 */
738 static struct intr_pic *
739 pic_create(device_t dev, intptr_t xref, int flags)
740 {
741 struct intr_pic *pic;
742
743 mtx_lock(&pic_list_lock);
744 pic = pic_lookup_locked(dev, xref, flags);
745 if (pic != NULL) {
746 mtx_unlock(&pic_list_lock);
747 return (pic);
748 }
749 pic = malloc(sizeof(*pic), M_INTRNG, M_NOWAIT | M_ZERO);
750 if (pic == NULL) {
751 mtx_unlock(&pic_list_lock);
752 return (NULL);
753 }
754 pic->pic_xref = xref;
755 pic->pic_dev = dev;
756 pic->pic_flags = flags;
757 mtx_init(&pic->pic_child_lock, "pic child lock", NULL, MTX_SPIN);
758 SLIST_INSERT_HEAD(&pic_list, pic, pic_next);
759 mtx_unlock(&pic_list_lock);
760
761 return (pic);
762 }
763 #ifdef notyet
764 /*
765 * Destroy interrupt controller.
766 */
767 static void
768 pic_destroy(device_t dev, intptr_t xref, int flags)
769 {
770 struct intr_pic *pic;
771
772 mtx_lock(&pic_list_lock);
773 pic = pic_lookup_locked(dev, xref, flags);
774 if (pic == NULL) {
775 mtx_unlock(&pic_list_lock);
776 return;
777 }
778 SLIST_REMOVE(&pic_list, pic, intr_pic, pic_next);
779 mtx_unlock(&pic_list_lock);
780
781 free(pic, M_INTRNG);
782 }
783 #endif
784 /*
785 * Register interrupt controller.
786 */
787 struct intr_pic *
788 intr_pic_register(device_t dev, intptr_t xref)
789 {
790 struct intr_pic *pic;
791
792 if (dev == NULL)
793 return (NULL);
794 pic = pic_create(dev, xref, FLAG_PIC);
795 if (pic == NULL)
796 return (NULL);
797
798 debugf("PIC %p registered for %s <dev %p, xref %x>\n", pic,
799 device_get_nameunit(dev), dev, xref);
800 return (pic);
801 }
802
803 /*
804 * Unregister interrupt controller.
805 */
806 int
807 intr_pic_deregister(device_t dev, intptr_t xref)
808 {
809
810 panic("%s: not implemented", __func__);
811 }
812
813 /*
814 * Mark interrupt controller (itself) as a root one.
815 *
816 * Note that only an interrupt controller can really know its position
817 * in interrupt controller's tree. So root PIC must claim itself as a root.
818 *
819 * In FDT case, according to ePAPR approved version 1.1 from 08 April 2011,
820 * page 30:
821 * "The root of the interrupt tree is determined when traversal
822 * of the interrupt tree reaches an interrupt controller node without
823 * an interrupts property and thus no explicit interrupt parent."
824 */
825 int
826 intr_pic_claim_root(device_t dev, intptr_t xref, intr_irq_filter_t *filter,
827 void *arg, u_int ipicount)
828 {
829 struct intr_pic *pic;
830
831 pic = pic_lookup(dev, xref, FLAG_PIC);
832 if (pic == NULL) {
833 device_printf(dev, "not registered\n");
834 return (EINVAL);
835 }
836
837 KASSERT((pic->pic_flags & FLAG_TYPE_MASK) == FLAG_PIC,
838 ("%s: Found a non-PIC controller: %s", __func__,
839 device_get_name(pic->pic_dev)));
840
841 if (filter == NULL) {
842 device_printf(dev, "filter missing\n");
843 return (EINVAL);
844 }
845
846 /*
847 * Only one interrupt controllers could be on the root for now.
848 * Note that we further suppose that there is not threaded interrupt
849 * routine (handler) on the root. See intr_irq_handler().
850 */
851 if (intr_irq_root_dev != NULL) {
852 device_printf(dev, "another root already set\n");
853 return (EBUSY);
854 }
855
856 intr_irq_root_dev = dev;
857 irq_root_filter = filter;
858 irq_root_arg = arg;
859 irq_root_ipicount = ipicount;
860
861 debugf("irq root set to %s\n", device_get_nameunit(dev));
862 return (0);
863 }
864
865 /*
866 * Add a handler to manage a sub range of a parents interrupts.
867 */
868 struct intr_pic *
869 intr_pic_add_handler(device_t parent, struct intr_pic *pic,
870 intr_child_irq_filter_t *filter, void *arg, uintptr_t start,
871 uintptr_t length)
872 {
873 struct intr_pic *parent_pic;
874 struct intr_pic_child *newchild;
875 #ifdef INVARIANTS
876 struct intr_pic_child *child;
877 #endif
878
879 /* Find the parent PIC */
880 parent_pic = pic_lookup(parent, 0, FLAG_PIC);
881 if (parent_pic == NULL)
882 return (NULL);
883
884 newchild = malloc(sizeof(*newchild), M_INTRNG, M_WAITOK | M_ZERO);
885 newchild->pc_pic = pic;
886 newchild->pc_filter = filter;
887 newchild->pc_filter_arg = arg;
888 newchild->pc_start = start;
889 newchild->pc_length = length;
890
891 mtx_lock_spin(&parent_pic->pic_child_lock);
892 #ifdef INVARIANTS
893 SLIST_FOREACH(child, &parent_pic->pic_children, pc_next) {
894 KASSERT(child->pc_pic != pic, ("%s: Adding a child PIC twice",
895 __func__));
896 }
897 #endif
898 SLIST_INSERT_HEAD(&parent_pic->pic_children, newchild, pc_next);
899 mtx_unlock_spin(&parent_pic->pic_child_lock);
900
901 return (pic);
902 }
903
904 static int
905 intr_resolve_irq(device_t dev, intptr_t xref, struct intr_map_data *data,
906 struct intr_irqsrc **isrc)
907 {
908 struct intr_pic *pic;
909 struct intr_map_data_msi *msi;
910
911 if (data == NULL)
912 return (EINVAL);
913
914 pic = pic_lookup(dev, xref,
915 (data->type == INTR_MAP_DATA_MSI) ? FLAG_MSI : FLAG_PIC);
916 if (pic == NULL)
917 return (ESRCH);
918
919 switch (data->type) {
920 case INTR_MAP_DATA_MSI:
921 KASSERT((pic->pic_flags & FLAG_TYPE_MASK) == FLAG_MSI,
922 ("%s: Found a non-MSI controller: %s", __func__,
923 device_get_name(pic->pic_dev)));
924 msi = (struct intr_map_data_msi *)data;
925 *isrc = msi->isrc;
926 return (0);
927
928 default:
929 KASSERT((pic->pic_flags & FLAG_TYPE_MASK) == FLAG_PIC,
930 ("%s: Found a non-PIC controller: %s", __func__,
931 device_get_name(pic->pic_dev)));
932 return (PIC_MAP_INTR(pic->pic_dev, data, isrc));
933
934 }
935 }
936
937 int
938 intr_activate_irq(device_t dev, struct resource *res)
939 {
940 device_t map_dev;
941 intptr_t map_xref;
942 struct intr_map_data *data;
943 struct intr_irqsrc *isrc;
944 u_int res_id;
945 int error;
946
947 KASSERT(rman_get_start(res) == rman_get_end(res),
948 ("%s: more interrupts in resource", __func__));
949
950 res_id = (u_int)rman_get_start(res);
951 if (intr_map_get_isrc(res_id) != NULL)
952 panic("Attempt to double activation of resource id: %u\n",
953 res_id);
954 intr_map_copy_map_data(res_id, &map_dev, &map_xref, &data);
955 error = intr_resolve_irq(map_dev, map_xref, data, &isrc);
956 if (error != 0) {
957 free(data, M_INTRNG);
958 /* XXX TODO DISCONECTED PICs */
959 /* if (error == EINVAL) return(0); */
960 return (error);
961 }
962 intr_map_set_isrc(res_id, isrc);
963 rman_set_virtual(res, data);
964 return (PIC_ACTIVATE_INTR(isrc->isrc_dev, isrc, res, data));
965 }
966
967 int
968 intr_deactivate_irq(device_t dev, struct resource *res)
969 {
970 struct intr_map_data *data;
971 struct intr_irqsrc *isrc;
972 u_int res_id;
973 int error;
974
975 KASSERT(rman_get_start(res) == rman_get_end(res),
976 ("%s: more interrupts in resource", __func__));
977
978 res_id = (u_int)rman_get_start(res);
979 isrc = intr_map_get_isrc(res_id);
980 if (isrc == NULL)
981 panic("Attempt to deactivate non-active resource id: %u\n",
982 res_id);
983
984 data = rman_get_virtual(res);
985 error = PIC_DEACTIVATE_INTR(isrc->isrc_dev, isrc, res, data);
986 intr_map_set_isrc(res_id, NULL);
987 rman_set_virtual(res, NULL);
988 free(data, M_INTRNG);
989 return (error);
990 }
991
992 int
993 intr_setup_irq(device_t dev, struct resource *res, driver_filter_t filt,
994 driver_intr_t hand, void *arg, int flags, void **cookiep)
995 {
996 int error;
997 struct intr_map_data *data;
998 struct intr_irqsrc *isrc;
999 const char *name;
1000 u_int res_id;
1001
1002 KASSERT(rman_get_start(res) == rman_get_end(res),
1003 ("%s: more interrupts in resource", __func__));
1004
1005 res_id = (u_int)rman_get_start(res);
1006 isrc = intr_map_get_isrc(res_id);
1007 if (isrc == NULL) {
1008 /* XXX TODO DISCONECTED PICs */
1009 return (EINVAL);
1010 }
1011
1012 data = rman_get_virtual(res);
1013 name = device_get_nameunit(dev);
1014
1015 #ifdef INTR_SOLO
1016 /*
1017 * Standard handling is done through MI interrupt framework. However,
1018 * some interrupts could request solely own special handling. This
1019 * non standard handling can be used for interrupt controllers without
1020 * handler (filter only), so in case that interrupt controllers are
1021 * chained, MI interrupt framework is called only in leaf controller.
1022 *
1023 * Note that root interrupt controller routine is served as well,
1024 * however in intr_irq_handler(), i.e. main system dispatch routine.
1025 */
1026 if (flags & INTR_SOLO && hand != NULL) {
1027 debugf("irq %u cannot solo on %s\n", irq, name);
1028 return (EINVAL);
1029 }
1030
1031 if (flags & INTR_SOLO) {
1032 error = iscr_setup_filter(isrc, name, (intr_irq_filter_t *)filt,
1033 arg, cookiep);
1034 debugf("irq %u setup filter error %d on %s\n", isrc->isrc_irq, error,
1035 name);
1036 } else
1037 #endif
1038 {
1039 error = isrc_add_handler(isrc, name, filt, hand, arg, flags,
1040 cookiep);
1041 debugf("irq %u add handler error %d on %s\n", isrc->isrc_irq, error, name);
1042 }
1043 if (error != 0)
1044 return (error);
1045
1046 mtx_lock(&isrc_table_lock);
1047 error = PIC_SETUP_INTR(isrc->isrc_dev, isrc, res, data);
1048 if (error == 0) {
1049 isrc->isrc_handlers++;
1050 if (isrc->isrc_handlers == 1)
1051 PIC_ENABLE_INTR(isrc->isrc_dev, isrc);
1052 }
1053 mtx_unlock(&isrc_table_lock);
1054 if (error != 0)
1055 intr_event_remove_handler(*cookiep);
1056 return (error);
1057 }
1058
1059 int
1060 intr_teardown_irq(device_t dev, struct resource *res, void *cookie)
1061 {
1062 int error;
1063 struct intr_map_data *data;
1064 struct intr_irqsrc *isrc;
1065 u_int res_id;
1066
1067 KASSERT(rman_get_start(res) == rman_get_end(res),
1068 ("%s: more interrupts in resource", __func__));
1069
1070 res_id = (u_int)rman_get_start(res);
1071 isrc = intr_map_get_isrc(res_id);
1072 if (isrc == NULL || isrc->isrc_handlers == 0)
1073 return (EINVAL);
1074
1075 data = rman_get_virtual(res);
1076
1077 #ifdef INTR_SOLO
1078 if (isrc->isrc_filter != NULL) {
1079 if (isrc != cookie)
1080 return (EINVAL);
1081
1082 mtx_lock(&isrc_table_lock);
1083 isrc->isrc_filter = NULL;
1084 isrc->isrc_arg = NULL;
1085 isrc->isrc_handlers = 0;
1086 PIC_DISABLE_INTR(isrc->isrc_dev, isrc);
1087 PIC_TEARDOWN_INTR(isrc->isrc_dev, isrc, res, data);
1088 isrc_update_name(isrc, NULL);
1089 mtx_unlock(&isrc_table_lock);
1090 return (0);
1091 }
1092 #endif
1093 if (isrc != intr_handler_source(cookie))
1094 return (EINVAL);
1095
1096 error = intr_event_remove_handler(cookie);
1097 if (error == 0) {
1098 mtx_lock(&isrc_table_lock);
1099 isrc->isrc_handlers--;
1100 if (isrc->isrc_handlers == 0)
1101 PIC_DISABLE_INTR(isrc->isrc_dev, isrc);
1102 PIC_TEARDOWN_INTR(isrc->isrc_dev, isrc, res, data);
1103 intrcnt_updatename(isrc);
1104 mtx_unlock(&isrc_table_lock);
1105 }
1106 return (error);
1107 }
1108
1109 int
1110 intr_describe_irq(device_t dev, struct resource *res, void *cookie,
1111 const char *descr)
1112 {
1113 int error;
1114 struct intr_irqsrc *isrc;
1115 u_int res_id;
1116
1117 KASSERT(rman_get_start(res) == rman_get_end(res),
1118 ("%s: more interrupts in resource", __func__));
1119
1120 res_id = (u_int)rman_get_start(res);
1121 isrc = intr_map_get_isrc(res_id);
1122 if (isrc == NULL || isrc->isrc_handlers == 0)
1123 return (EINVAL);
1124 #ifdef INTR_SOLO
1125 if (isrc->isrc_filter != NULL) {
1126 if (isrc != cookie)
1127 return (EINVAL);
1128
1129 mtx_lock(&isrc_table_lock);
1130 isrc_update_name(isrc, descr);
1131 mtx_unlock(&isrc_table_lock);
1132 return (0);
1133 }
1134 #endif
1135 error = intr_event_describe_handler(isrc->isrc_event, cookie, descr);
1136 if (error == 0) {
1137 mtx_lock(&isrc_table_lock);
1138 intrcnt_updatename(isrc);
1139 mtx_unlock(&isrc_table_lock);
1140 }
1141 return (error);
1142 }
1143
1144 #ifdef SMP
1145 int
1146 intr_bind_irq(device_t dev, struct resource *res, int cpu)
1147 {
1148 struct intr_irqsrc *isrc;
1149 u_int res_id;
1150
1151 KASSERT(rman_get_start(res) == rman_get_end(res),
1152 ("%s: more interrupts in resource", __func__));
1153
1154 res_id = (u_int)rman_get_start(res);
1155 isrc = intr_map_get_isrc(res_id);
1156 if (isrc == NULL || isrc->isrc_handlers == 0)
1157 return (EINVAL);
1158 #ifdef INTR_SOLO
1159 if (isrc->isrc_filter != NULL)
1160 return (intr_isrc_assign_cpu(isrc, cpu));
1161 #endif
1162 return (intr_event_bind(isrc->isrc_event, cpu));
1163 }
1164
1165 /*
1166 * Return the CPU that the next interrupt source should use.
1167 * For now just returns the next CPU according to round-robin.
1168 */
1169 u_int
1170 intr_irq_next_cpu(u_int last_cpu, cpuset_t *cpumask)
1171 {
1172 u_int cpu;
1173
1174 KASSERT(!CPU_EMPTY(cpumask), ("%s: Empty CPU mask", __func__));
1175 if (!irq_assign_cpu || mp_ncpus == 1) {
1176 cpu = PCPU_GET(cpuid);
1177
1178 if (CPU_ISSET(cpu, cpumask))
1179 return (curcpu);
1180
1181 return (CPU_FFS(cpumask) - 1);
1182 }
1183
1184 do {
1185 last_cpu++;
1186 if (last_cpu > mp_maxid)
1187 last_cpu = 0;
1188 } while (!CPU_ISSET(last_cpu, cpumask));
1189 return (last_cpu);
1190 }
1191
1192 /*
1193 * Distribute all the interrupt sources among the available
1194 * CPUs once the AP's have been launched.
1195 */
1196 static void
1197 intr_irq_shuffle(void *arg __unused)
1198 {
1199 struct intr_irqsrc *isrc;
1200 u_int i;
1201
1202 if (mp_ncpus == 1)
1203 return;
1204
1205 mtx_lock(&isrc_table_lock);
1206 irq_assign_cpu = TRUE;
1207 for (i = 0; i < NIRQ; i++) {
1208 isrc = irq_sources[i];
1209 if (isrc == NULL || isrc->isrc_handlers == 0 ||
1210 isrc->isrc_flags & (INTR_ISRCF_PPI | INTR_ISRCF_IPI))
1211 continue;
1212
1213 if (isrc->isrc_event != NULL &&
1214 isrc->isrc_flags & INTR_ISRCF_BOUND &&
1215 isrc->isrc_event->ie_cpu != CPU_FFS(&isrc->isrc_cpu) - 1)
1216 panic("%s: CPU inconsistency", __func__);
1217
1218 if ((isrc->isrc_flags & INTR_ISRCF_BOUND) == 0)
1219 CPU_ZERO(&isrc->isrc_cpu); /* start again */
1220
1221 /*
1222 * We are in wicked position here if the following call fails
1223 * for bound ISRC. The best thing we can do is to clear
1224 * isrc_cpu so inconsistency with ie_cpu will be detectable.
1225 */
1226 if (PIC_BIND_INTR(isrc->isrc_dev, isrc) != 0)
1227 CPU_ZERO(&isrc->isrc_cpu);
1228 }
1229 mtx_unlock(&isrc_table_lock);
1230 }
1231 SYSINIT(intr_irq_shuffle, SI_SUB_SMP, SI_ORDER_SECOND, intr_irq_shuffle, NULL);
1232
1233 #else
1234 u_int
1235 intr_irq_next_cpu(u_int current_cpu, cpuset_t *cpumask)
1236 {
1237
1238 return (PCPU_GET(cpuid));
1239 }
1240 #endif
1241
1242 /*
1243 * Allocate memory for new intr_map_data structure.
1244 * Initialize common fields.
1245 */
1246 struct intr_map_data *
1247 intr_alloc_map_data(enum intr_map_data_type type, size_t len, int flags)
1248 {
1249 struct intr_map_data *data;
1250
1251 data = malloc(len, M_INTRNG, flags);
1252 data->type = type;
1253 data->len = len;
1254 return (data);
1255 }
1256
1257 void intr_free_intr_map_data(struct intr_map_data *data)
1258 {
1259
1260 free(data, M_INTRNG);
1261 }
1262
1263
1264 /*
1265 * Register a MSI/MSI-X interrupt controller
1266 */
1267 int
1268 intr_msi_register(device_t dev, intptr_t xref)
1269 {
1270 struct intr_pic *pic;
1271
1272 if (dev == NULL)
1273 return (EINVAL);
1274 pic = pic_create(dev, xref, FLAG_MSI);
1275 if (pic == NULL)
1276 return (ENOMEM);
1277
1278 debugf("PIC %p registered for %s <dev %p, xref %jx>\n", pic,
1279 device_get_nameunit(dev), dev, (uintmax_t)xref);
1280 return (0);
1281 }
1282
1283 int
1284 intr_alloc_msi(device_t pci, device_t child, intptr_t xref, int count,
1285 int maxcount, int *irqs)
1286 {
1287 struct intr_irqsrc **isrc;
1288 struct intr_pic *pic;
1289 device_t pdev;
1290 struct intr_map_data_msi *msi;
1291 int err, i;
1292
1293 pic = pic_lookup(NULL, xref, FLAG_MSI);
1294 if (pic == NULL)
1295 return (ESRCH);
1296
1297 KASSERT((pic->pic_flags & FLAG_TYPE_MASK) == FLAG_MSI,
1298 ("%s: Found a non-MSI controller: %s", __func__,
1299 device_get_name(pic->pic_dev)));
1300
1301 isrc = malloc(sizeof(*isrc) * count, M_INTRNG, M_WAITOK);
1302 err = MSI_ALLOC_MSI(pic->pic_dev, child, count, maxcount, &pdev, isrc);
1303 if (err != 0) {
1304 free(isrc, M_INTRNG);
1305 return (err);
1306 }
1307
1308 for (i = 0; i < count; i++) {
1309 msi = (struct intr_map_data_msi *)intr_alloc_map_data(
1310 INTR_MAP_DATA_MSI, sizeof(*msi), M_WAITOK | M_ZERO);
1311 msi-> isrc = isrc[i];
1312 irqs[i] = intr_map_irq(pic->pic_dev, xref,
1313 (struct intr_map_data *)msi);
1314
1315 }
1316 free(isrc, M_INTRNG);
1317
1318 return (err);
1319 }
1320
1321 int
1322 intr_release_msi(device_t pci, device_t child, intptr_t xref, int count,
1323 int *irqs)
1324 {
1325 struct intr_irqsrc **isrc;
1326 struct intr_pic *pic;
1327 struct intr_map_data_msi *msi;
1328 int i, err;
1329
1330 pic = pic_lookup(NULL, xref, FLAG_MSI);
1331 if (pic == NULL)
1332 return (ESRCH);
1333
1334 KASSERT((pic->pic_flags & FLAG_TYPE_MASK) == FLAG_MSI,
1335 ("%s: Found a non-MSI controller: %s", __func__,
1336 device_get_name(pic->pic_dev)));
1337
1338 isrc = malloc(sizeof(*isrc) * count, M_INTRNG, M_WAITOK);
1339
1340 for (i = 0; i < count; i++) {
1341 msi = (struct intr_map_data_msi *)
1342 intr_map_get_map_data(irqs[i]);
1343 KASSERT(msi->hdr.type == INTR_MAP_DATA_MSI,
1344 ("%s: irq %d map data is not MSI", __func__,
1345 irqs[i]));
1346 isrc[i] = msi->isrc;
1347 }
1348
1349 err = MSI_RELEASE_MSI(pic->pic_dev, child, count, isrc);
1350
1351 for (i = 0; i < count; i++) {
1352 if (isrc[i] != NULL)
1353 intr_unmap_irq(irqs[i]);
1354 }
1355
1356 free(isrc, M_INTRNG);
1357 return (err);
1358 }
1359
1360 int
1361 intr_alloc_msix(device_t pci, device_t child, intptr_t xref, int *irq)
1362 {
1363 struct intr_irqsrc *isrc;
1364 struct intr_pic *pic;
1365 device_t pdev;
1366 struct intr_map_data_msi *msi;
1367 int err;
1368
1369 pic = pic_lookup(NULL, xref, FLAG_MSI);
1370 if (pic == NULL)
1371 return (ESRCH);
1372
1373 KASSERT((pic->pic_flags & FLAG_TYPE_MASK) == FLAG_MSI,
1374 ("%s: Found a non-MSI controller: %s", __func__,
1375 device_get_name(pic->pic_dev)));
1376
1377
1378 err = MSI_ALLOC_MSIX(pic->pic_dev, child, &pdev, &isrc);
1379 if (err != 0)
1380 return (err);
1381
1382 msi = (struct intr_map_data_msi *)intr_alloc_map_data(
1383 INTR_MAP_DATA_MSI, sizeof(*msi), M_WAITOK | M_ZERO);
1384 msi->isrc = isrc;
1385 *irq = intr_map_irq(pic->pic_dev, xref, (struct intr_map_data *)msi);
1386 return (0);
1387 }
1388
1389 int
1390 intr_release_msix(device_t pci, device_t child, intptr_t xref, int irq)
1391 {
1392 struct intr_irqsrc *isrc;
1393 struct intr_pic *pic;
1394 struct intr_map_data_msi *msi;
1395 int err;
1396
1397 pic = pic_lookup(NULL, xref, FLAG_MSI);
1398 if (pic == NULL)
1399 return (ESRCH);
1400
1401 KASSERT((pic->pic_flags & FLAG_TYPE_MASK) == FLAG_MSI,
1402 ("%s: Found a non-MSI controller: %s", __func__,
1403 device_get_name(pic->pic_dev)));
1404
1405 msi = (struct intr_map_data_msi *)
1406 intr_map_get_map_data(irq);
1407 KASSERT(msi->hdr.type == INTR_MAP_DATA_MSI,
1408 ("%s: irq %d map data is not MSI", __func__,
1409 irq));
1410 isrc = msi->isrc;
1411 if (isrc == NULL) {
1412 intr_unmap_irq(irq);
1413 return (EINVAL);
1414 }
1415
1416 err = MSI_RELEASE_MSIX(pic->pic_dev, child, isrc);
1417 intr_unmap_irq(irq);
1418
1419 return (err);
1420 }
1421
1422 int
1423 intr_map_msi(device_t pci, device_t child, intptr_t xref, int irq,
1424 uint64_t *addr, uint32_t *data)
1425 {
1426 struct intr_irqsrc *isrc;
1427 struct intr_pic *pic;
1428 int err;
1429
1430 pic = pic_lookup(NULL, xref, FLAG_MSI);
1431 if (pic == NULL)
1432 return (ESRCH);
1433
1434 KASSERT((pic->pic_flags & FLAG_TYPE_MASK) == FLAG_MSI,
1435 ("%s: Found a non-MSI controller: %s", __func__,
1436 device_get_name(pic->pic_dev)));
1437
1438 isrc = intr_map_get_isrc(irq);
1439 if (isrc == NULL)
1440 return (EINVAL);
1441
1442 err = MSI_MAP_MSI(pic->pic_dev, child, isrc, addr, data);
1443 return (err);
1444 }
1445
1446
1447 void dosoftints(void);
1448 void
1449 dosoftints(void)
1450 {
1451 }
1452
1453 #ifdef SMP
1454 /*
1455 * Init interrupt controller on another CPU.
1456 */
1457 void
1458 intr_pic_init_secondary(void)
1459 {
1460
1461 /*
1462 * QQQ: Only root PIC is aware of other CPUs ???
1463 */
1464 KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__));
1465
1466 //mtx_lock(&isrc_table_lock);
1467 PIC_INIT_SECONDARY(intr_irq_root_dev);
1468 //mtx_unlock(&isrc_table_lock);
1469 }
1470 #endif
1471
1472 #ifdef DDB
1473 DB_SHOW_COMMAND(irqs, db_show_irqs)
1474 {
1475 u_int i, irqsum;
1476 u_long num;
1477 struct intr_irqsrc *isrc;
1478
1479 for (irqsum = 0, i = 0; i < NIRQ; i++) {
1480 isrc = irq_sources[i];
1481 if (isrc == NULL)
1482 continue;
1483
1484 num = isrc->isrc_count != NULL ? isrc->isrc_count[0] : 0;
1485 db_printf("irq%-3u <%s>: cpu %02lx%s cnt %lu\n", i,
1486 isrc->isrc_name, isrc->isrc_cpu.__bits[0],
1487 isrc->isrc_flags & INTR_ISRCF_BOUND ? " (bound)" : "", num);
1488 irqsum += num;
1489 }
1490 db_printf("irq total %u\n", irqsum);
1491 }
1492 #endif
1493
1494 /*
1495 * Interrupt mapping table functions.
1496 *
1497 * Please, keep this part separately, it can be transformed to
1498 * extension of standard resources.
1499 */
1500 struct intr_map_entry
1501 {
1502 device_t dev;
1503 intptr_t xref;
1504 struct intr_map_data *map_data;
1505 struct intr_irqsrc *isrc;
1506 /* XXX TODO DISCONECTED PICs */
1507 /*int flags */
1508 };
1509
1510 /* XXX Convert irq_map[] to dynamicaly expandable one. */
1511 static struct intr_map_entry *irq_map[2 * NIRQ];
1512 static int irq_map_count = nitems(irq_map);
1513 static int irq_map_first_free_idx;
1514 static struct mtx irq_map_lock;
1515
1516 static struct intr_irqsrc *
1517 intr_map_get_isrc(u_int res_id)
1518 {
1519 struct intr_irqsrc *isrc;
1520
1521 mtx_lock(&irq_map_lock);
1522 if ((res_id >= irq_map_count) || (irq_map[res_id] == NULL)) {
1523 mtx_unlock(&irq_map_lock);
1524 return (NULL);
1525 }
1526 isrc = irq_map[res_id]->isrc;
1527 mtx_unlock(&irq_map_lock);
1528 return (isrc);
1529 }
1530
1531 static void
1532 intr_map_set_isrc(u_int res_id, struct intr_irqsrc *isrc)
1533 {
1534
1535 mtx_lock(&irq_map_lock);
1536 if ((res_id >= irq_map_count) || (irq_map[res_id] == NULL)) {
1537 mtx_unlock(&irq_map_lock);
1538 return;
1539 }
1540 irq_map[res_id]->isrc = isrc;
1541 mtx_unlock(&irq_map_lock);
1542 }
1543
1544 /*
1545 * Get a copy of intr_map_entry data
1546 */
1547 static struct intr_map_data *
1548 intr_map_get_map_data(u_int res_id)
1549 {
1550 struct intr_map_data *data;
1551
1552 data = NULL;
1553 mtx_lock(&irq_map_lock);
1554 if (res_id >= irq_map_count || irq_map[res_id] == NULL)
1555 panic("Attempt to copy invalid resource id: %u\n", res_id);
1556 data = irq_map[res_id]->map_data;
1557 mtx_unlock(&irq_map_lock);
1558
1559 return (data);
1560 }
1561
1562 /*
1563 * Get a copy of intr_map_entry data
1564 */
1565 static void
1566 intr_map_copy_map_data(u_int res_id, device_t *map_dev, intptr_t *map_xref,
1567 struct intr_map_data **data)
1568 {
1569 size_t len;
1570
1571 len = 0;
1572 mtx_lock(&irq_map_lock);
1573 if (res_id >= irq_map_count || irq_map[res_id] == NULL)
1574 panic("Attempt to copy invalid resource id: %u\n", res_id);
1575 if (irq_map[res_id]->map_data != NULL)
1576 len = irq_map[res_id]->map_data->len;
1577 mtx_unlock(&irq_map_lock);
1578
1579 if (len == 0)
1580 *data = NULL;
1581 else
1582 *data = malloc(len, M_INTRNG, M_WAITOK | M_ZERO);
1583 mtx_lock(&irq_map_lock);
1584 if (irq_map[res_id] == NULL)
1585 panic("Attempt to copy invalid resource id: %u\n", res_id);
1586 if (len != 0) {
1587 if (len != irq_map[res_id]->map_data->len)
1588 panic("Resource id: %u has changed.\n", res_id);
1589 memcpy(*data, irq_map[res_id]->map_data, len);
1590 }
1591 *map_dev = irq_map[res_id]->dev;
1592 *map_xref = irq_map[res_id]->xref;
1593 mtx_unlock(&irq_map_lock);
1594 }
1595
1596
1597 /*
1598 * Allocate and fill new entry in irq_map table.
1599 */
1600 u_int
1601 intr_map_irq(device_t dev, intptr_t xref, struct intr_map_data *data)
1602 {
1603 u_int i;
1604 struct intr_map_entry *entry;
1605
1606 /* Prepare new entry first. */
1607 entry = malloc(sizeof(*entry), M_INTRNG, M_WAITOK | M_ZERO);
1608
1609 entry->dev = dev;
1610 entry->xref = xref;
1611 entry->map_data = data;
1612 entry->isrc = NULL;
1613
1614 mtx_lock(&irq_map_lock);
1615 for (i = irq_map_first_free_idx; i < irq_map_count; i++) {
1616 if (irq_map[i] == NULL) {
1617 irq_map[i] = entry;
1618 irq_map_first_free_idx = i + 1;
1619 mtx_unlock(&irq_map_lock);
1620 return (i);
1621 }
1622 }
1623 mtx_unlock(&irq_map_lock);
1624
1625 /* XXX Expand irq_map table */
1626 panic("IRQ mapping table is full.");
1627 }
1628
1629 /*
1630 * Remove and free mapping entry.
1631 */
1632 void
1633 intr_unmap_irq(u_int res_id)
1634 {
1635 struct intr_map_entry *entry;
1636
1637 mtx_lock(&irq_map_lock);
1638 if ((res_id >= irq_map_count) || (irq_map[res_id] == NULL))
1639 panic("Attempt to unmap invalid resource id: %u\n", res_id);
1640 entry = irq_map[res_id];
1641 irq_map[res_id] = NULL;
1642 irq_map_first_free_idx = res_id;
1643 mtx_unlock(&irq_map_lock);
1644 intr_free_intr_map_data(entry->map_data);
1645 free(entry, M_INTRNG);
1646 }
1647
1648 /*
1649 * Clone mapping entry.
1650 */
1651 u_int
1652 intr_map_clone_irq(u_int old_res_id)
1653 {
1654 device_t map_dev;
1655 intptr_t map_xref;
1656 struct intr_map_data *data;
1657
1658 intr_map_copy_map_data(old_res_id, &map_dev, &map_xref, &data);
1659 return (intr_map_irq(map_dev, map_xref, data));
1660 }
1661
1662 static void
1663 intr_map_init(void *dummy __unused)
1664 {
1665
1666 mtx_init(&irq_map_lock, "intr map table", NULL, MTX_DEF);
1667 }
1668 SYSINIT(intr_map_init, SI_SUB_INTR, SI_ORDER_FIRST, intr_map_init, NULL);
Cache object: e4202f15feb9f10011a3ba1e6a9b5cc0
|