1 /*-
2 * Copyright (c) 2015-2016 Svatopluk Kraus
3 * Copyright (c) 2015-2016 Michal Meloun
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include "opt_platform.h"
29
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/syslog.h>
36 #include <sys/kernel.h>
37 #include <sys/malloc.h>
38 #include <sys/proc.h>
39 #include <sys/bus.h>
40 #include <sys/interrupt.h>
41 #include <sys/conf.h>
42 #include <sys/pmc.h>
43 #include <sys/pmckern.h>
44 #include <sys/smp.h>
45
46 #include <machine/atomic.h>
47 #include <machine/bus.h>
48 #include <machine/intr.h>
49 #include <machine/cpu.h>
50 #include <machine/smp.h>
51
52 #include "pic_if.h"
53
54 #ifdef SMP
55 #define INTR_IPI_NAMELEN (MAXCOMLEN + 1)
56
57 struct intr_ipi {
58 intr_ipi_handler_t * ii_handler;
59 void * ii_handler_arg;
60 intr_ipi_send_t * ii_send;
61 void * ii_send_arg;
62 char ii_name[INTR_IPI_NAMELEN];
63 u_long * ii_count;
64 };
65
66 static struct intr_ipi ipi_sources[INTR_IPI_COUNT];
67 #endif
68
69 /*
70 * arm_irq_memory_barrier()
71 *
72 * Ensure all writes to device memory have reached devices before proceeding.
73 *
74 * This is intended to be called from the post-filter and post-thread routines
75 * of an interrupt controller implementation. A peripheral device driver should
76 * use bus_space_barrier() if it needs to ensure a write has reached the
77 * hardware for some reason other than clearing interrupt conditions.
78 *
79 * The need for this function arises from the ARM weak memory ordering model.
80 * Writes to locations mapped with the Device attribute bypass any caches, but
81 * are buffered. Multiple writes to the same device will be observed by that
82 * device in the order issued by the cpu. Writes to different devices may
83 * appear at those devices in a different order than issued by the cpu. That
84 * is, if the cpu writes to device A then device B, the write to device B could
85 * complete before the write to device A.
86 *
87 * Consider a typical device interrupt handler which services the interrupt and
88 * writes to a device status-acknowledge register to clear the interrupt before
89 * returning. That write is posted to the L2 controller which "immediately"
90 * places it in a store buffer and automatically drains that buffer. This can
91 * be less immediate than you'd think... There may be no free slots in the store
92 * buffers, so an existing buffer has to be drained first to make room. The
93 * target bus may be busy with other traffic (such as DMA for various devices),
94 * delaying the drain of the store buffer for some indeterminate time. While
95 * all this delay is happening, execution proceeds on the CPU, unwinding its way
96 * out of the interrupt call stack to the point where the interrupt driver code
97 * is ready to EOI and unmask the interrupt. The interrupt controller may be
98 * accessed via a faster bus than the hardware whose handler just ran; the write
99 * to unmask and EOI the interrupt may complete quickly while the device write
100 * to ack and clear the interrupt source is still lingering in a store buffer
101 * waiting for access to a slower bus. With the interrupt unmasked at the
102 * interrupt controller but still active at the device, as soon as interrupts
103 * are enabled on the core the device re-interrupts immediately: now you've got
104 * a spurious interrupt on your hands.
105 *
106 * The right way to fix this problem is for every device driver to use the
107 * proper bus_space_barrier() calls in its interrupt handler. For ARM a single
108 * barrier call at the end of the handler would work. This would have to be
109 * done to every driver in the system, not just arm-specific drivers.
110 *
111 * Another potential fix is to map all device memory as Strongly-Ordered rather
112 * than Device memory, which takes the store buffers out of the picture. This
113 * has a pretty big impact on overall system performance, because each strongly
114 * ordered memory access causes all L2 store buffers to be drained.
115 *
116 * A compromise solution is to have the interrupt controller implementation call
117 * this function to establish a barrier between writes to the interrupt-source
118 * device and writes to the interrupt controller device.
119 *
120 * This takes the interrupt number as an argument, and currently doesn't use it.
121 * The plan is that maybe some day there is a way to flag certain interrupts as
122 * "memory barrier safe" and we can avoid this overhead with them.
123 */
124 void
125 arm_irq_memory_barrier(uintptr_t irq)
126 {
127
128 dsb();
129 cpu_l2cache_drain_writebuf();
130 }
131
132 #ifdef SMP
133 static inline struct intr_ipi *
134 intr_ipi_lookup(u_int ipi)
135 {
136
137 if (ipi >= INTR_IPI_COUNT)
138 panic("%s: no such IPI %u", __func__, ipi);
139
140 return (&ipi_sources[ipi]);
141 }
142
143 void
144 intr_ipi_dispatch(u_int ipi, struct trapframe *tf)
145 {
146 void *arg;
147 struct intr_ipi *ii;
148
149 ii = intr_ipi_lookup(ipi);
150 if (ii->ii_count == NULL)
151 panic("%s: not setup IPI %u", __func__, ipi);
152
153 intr_ipi_increment_count(ii->ii_count, PCPU_GET(cpuid));
154
155 /*
156 * Supply ipi filter with trapframe argument
157 * if none is registered.
158 */
159 arg = ii->ii_handler_arg != NULL ? ii->ii_handler_arg : tf;
160 ii->ii_handler(arg);
161 }
162
163 void
164 intr_ipi_send(cpuset_t cpus, u_int ipi)
165 {
166 struct intr_ipi *ii;
167
168 ii = intr_ipi_lookup(ipi);
169 if (ii->ii_count == NULL)
170 panic("%s: not setup IPI %u", __func__, ipi);
171
172 ii->ii_send(ii->ii_send_arg, cpus, ipi);
173 }
174
175 void
176 intr_ipi_setup(u_int ipi, const char *name, intr_ipi_handler_t *hand,
177 void *h_arg, intr_ipi_send_t *send, void *s_arg)
178 {
179 struct intr_ipi *ii;
180
181 ii = intr_ipi_lookup(ipi);
182
183 KASSERT(hand != NULL, ("%s: ipi %u no handler", __func__, ipi));
184 KASSERT(send != NULL, ("%s: ipi %u no sender", __func__, ipi));
185 KASSERT(ii->ii_count == NULL, ("%s: ipi %u reused", __func__, ipi));
186
187 ii->ii_handler = hand;
188 ii->ii_handler_arg = h_arg;
189 ii->ii_send = send;
190 ii->ii_send_arg = s_arg;
191 strlcpy(ii->ii_name, name, INTR_IPI_NAMELEN);
192 ii->ii_count = intr_ipi_setup_counters(name);
193 }
194
195 /*
196 * Send IPI thru interrupt controller.
197 */
198 static void
199 pic_ipi_send(void *arg, cpuset_t cpus, u_int ipi)
200 {
201
202 KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__));
203 PIC_IPI_SEND(intr_irq_root_dev, arg, cpus, ipi);
204 }
205
206 /*
207 * Setup IPI handler on interrupt controller.
208 *
209 * Not SMP coherent.
210 */
211 int
212 intr_pic_ipi_setup(u_int ipi, const char *name, intr_ipi_handler_t *hand,
213 void *arg)
214 {
215 int error;
216 struct intr_irqsrc *isrc;
217
218 KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__));
219
220 error = PIC_IPI_SETUP(intr_irq_root_dev, ipi, &isrc);
221 if (error != 0)
222 return (error);
223
224 isrc->isrc_handlers++;
225 intr_ipi_setup(ipi, name, hand, arg, pic_ipi_send, isrc);
226 return (0);
227 }
228 #endif
Cache object: 617b147f925f816a79bae2d40a0acee7
|