FreeBSD/Linux Kernel Cross Reference
sys/kern/subr_ipi.c
1 /* $NetBSD: subr_ipi.c,v 1.10 2022/04/09 23:51:22 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2014 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Mindaugas Rasiukevicius.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Inter-processor interrupt (IPI) interface: asynchronous IPIs to
34 * invoke functions with a constant argument and synchronous IPIs
35 * with the cross-call support.
36 */
37
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: subr_ipi.c,v 1.10 2022/04/09 23:51:22 riastradh Exp $");
40
41 #include <sys/param.h>
42 #include <sys/types.h>
43
44 #include <sys/atomic.h>
45 #include <sys/evcnt.h>
46 #include <sys/cpu.h>
47 #include <sys/ipi.h>
48 #include <sys/intr.h>
49 #include <sys/kcpuset.h>
50 #include <sys/kmem.h>
51 #include <sys/lock.h>
52 #include <sys/mutex.h>
53
54 /*
55 * An array of the IPI handlers used for asynchronous invocation.
56 * The lock protects the slot allocation.
57 */
58
59 typedef struct {
60 ipi_func_t func;
61 void * arg;
62 } ipi_intr_t;
63
64 static kmutex_t ipi_mngmt_lock;
65 static ipi_intr_t ipi_intrs[IPI_MAXREG] __cacheline_aligned;
66
67 /*
68 * Per-CPU mailbox for IPI messages: it is a single cache line storing
69 * up to IPI_MSG_MAX messages. This interface is built on top of the
70 * synchronous IPIs.
71 */
72
73 #define IPI_MSG_SLOTS (CACHE_LINE_SIZE / sizeof(ipi_msg_t *))
74 #define IPI_MSG_MAX IPI_MSG_SLOTS
75
76 typedef struct {
77 ipi_msg_t * msg[IPI_MSG_SLOTS];
78 } ipi_mbox_t;
79
80
81 /* Mailboxes for the synchronous IPIs. */
82 static ipi_mbox_t * ipi_mboxes __read_mostly;
83 static struct evcnt ipi_mboxfull_ev __cacheline_aligned;
84 static void ipi_msg_cpu_handler(void *);
85
86 /* Handler for the synchronous IPIs - it must be zero. */
87 #define IPI_SYNCH_ID 0
88
89 #ifndef MULTIPROCESSOR
90 #define cpu_ipi(ci) KASSERT(ci == NULL)
91 #endif
92
93 void
94 ipi_sysinit(void)
95 {
96
97 mutex_init(&ipi_mngmt_lock, MUTEX_DEFAULT, IPL_NONE);
98 memset(ipi_intrs, 0, sizeof(ipi_intrs));
99
100 /*
101 * Register the handler for synchronous IPIs. This mechanism
102 * is built on top of the asynchronous interface. Slot zero is
103 * reserved permanently; it is also handy to use zero as a failure
104 * for other registers (as it is potentially less error-prone).
105 */
106 ipi_intrs[IPI_SYNCH_ID].func = ipi_msg_cpu_handler;
107
108 evcnt_attach_dynamic(&ipi_mboxfull_ev, EVCNT_TYPE_MISC, NULL,
109 "ipi", "full");
110 }
111
112 void
113 ipi_percpu_init(void)
114 {
115 const size_t len = ncpu * sizeof(ipi_mbox_t);
116
117 /* Initialise the per-CPU bit fields. */
118 for (u_int i = 0; i < ncpu; i++) {
119 struct cpu_info *ci = cpu_lookup(i);
120 memset(&ci->ci_ipipend, 0, sizeof(ci->ci_ipipend));
121 }
122
123 /* Allocate per-CPU IPI mailboxes. */
124 ipi_mboxes = kmem_zalloc(len, KM_SLEEP);
125 KASSERT(ipi_mboxes != NULL);
126 }
127
128 /*
129 * ipi_register: register an asynchronous IPI handler.
130 *
131 * => Returns IPI ID which is greater than zero; on failure - zero.
132 */
133 u_int
134 ipi_register(ipi_func_t func, void *arg)
135 {
136 mutex_enter(&ipi_mngmt_lock);
137 for (u_int i = 0; i < IPI_MAXREG; i++) {
138 if (ipi_intrs[i].func == NULL) {
139 /* Register the function. */
140 ipi_intrs[i].func = func;
141 ipi_intrs[i].arg = arg;
142 mutex_exit(&ipi_mngmt_lock);
143
144 KASSERT(i != IPI_SYNCH_ID);
145 return i;
146 }
147 }
148 mutex_exit(&ipi_mngmt_lock);
149 printf("WARNING: ipi_register: table full, increase IPI_MAXREG\n");
150 return 0;
151 }
152
153 /*
154 * ipi_unregister: release the IPI handler given the ID.
155 */
156 void
157 ipi_unregister(u_int ipi_id)
158 {
159 ipi_msg_t ipimsg = { .func = __FPTRCAST(ipi_func_t, nullop) };
160
161 KASSERT(ipi_id != IPI_SYNCH_ID);
162 KASSERT(ipi_id < IPI_MAXREG);
163
164 /* Release the slot. */
165 mutex_enter(&ipi_mngmt_lock);
166 KASSERT(ipi_intrs[ipi_id].func != NULL);
167 ipi_intrs[ipi_id].func = NULL;
168
169 /* Ensure that there are no IPIs in flight. */
170 kpreempt_disable();
171 ipi_broadcast(&ipimsg, false);
172 ipi_wait(&ipimsg);
173 kpreempt_enable();
174 mutex_exit(&ipi_mngmt_lock);
175 }
176
177 /*
178 * ipi_mark_pending: internal routine to mark an IPI pending on the
179 * specified CPU (which might be curcpu()).
180 */
181 static bool
182 ipi_mark_pending(u_int ipi_id, struct cpu_info *ci)
183 {
184 const u_int i = ipi_id >> IPI_BITW_SHIFT;
185 const uint32_t bitm = 1U << (ipi_id & IPI_BITW_MASK);
186
187 KASSERT(ipi_id < IPI_MAXREG);
188 KASSERT(kpreempt_disabled());
189
190 /* Mark as pending and return true if not previously marked. */
191 if ((atomic_load_acquire(&ci->ci_ipipend[i]) & bitm) == 0) {
192 #ifndef __HAVE_ATOMIC_AS_MEMBAR
193 membar_release();
194 #endif
195 atomic_or_32(&ci->ci_ipipend[i], bitm);
196 return true;
197 }
198 return false;
199 }
200
201 /*
202 * ipi_trigger: asynchronously send an IPI to the specified CPU.
203 */
204 void
205 ipi_trigger(u_int ipi_id, struct cpu_info *ci)
206 {
207
208 KASSERT(curcpu() != ci);
209 if (ipi_mark_pending(ipi_id, ci)) {
210 cpu_ipi(ci);
211 }
212 }
213
214 /*
215 * ipi_trigger_multi_internal: the guts of ipi_trigger_multi() and
216 * ipi_trigger_broadcast().
217 */
218 static void
219 ipi_trigger_multi_internal(u_int ipi_id, const kcpuset_t *target,
220 bool skip_self)
221 {
222 const cpuid_t selfid = cpu_index(curcpu());
223 CPU_INFO_ITERATOR cii;
224 struct cpu_info *ci;
225
226 KASSERT(kpreempt_disabled());
227 KASSERT(target != NULL);
228
229 for (CPU_INFO_FOREACH(cii, ci)) {
230 const cpuid_t cpuid = cpu_index(ci);
231
232 if (!kcpuset_isset(target, cpuid) || cpuid == selfid) {
233 continue;
234 }
235 ipi_trigger(ipi_id, ci);
236 }
237 if (!skip_self && kcpuset_isset(target, selfid)) {
238 ipi_mark_pending(ipi_id, curcpu());
239 int s = splhigh();
240 ipi_cpu_handler();
241 splx(s);
242 }
243 }
244
245 /*
246 * ipi_trigger_multi: same as ipi_trigger() but sends to the multiple
247 * CPUs given the target CPU set.
248 */
249 void
250 ipi_trigger_multi(u_int ipi_id, const kcpuset_t *target)
251 {
252 ipi_trigger_multi_internal(ipi_id, target, false);
253 }
254
255 /*
256 * ipi_trigger_broadcast: same as ipi_trigger_multi() to kcpuset_attached,
257 * optionally skipping the sending CPU.
258 */
259 void
260 ipi_trigger_broadcast(u_int ipi_id, bool skip_self)
261 {
262 ipi_trigger_multi_internal(ipi_id, kcpuset_attached, skip_self);
263 }
264
265 /*
266 * put_msg: insert message into the mailbox.
267 *
268 * Caller is responsible for issuing membar_release first.
269 */
270 static inline void
271 put_msg(ipi_mbox_t *mbox, ipi_msg_t *msg)
272 {
273 int count = SPINLOCK_BACKOFF_MIN;
274 again:
275 for (u_int i = 0; i < IPI_MSG_MAX; i++) {
276 if (atomic_cas_ptr(&mbox->msg[i], NULL, msg) == NULL) {
277 return;
278 }
279 }
280
281 /* All slots are full: we have to spin-wait. */
282 ipi_mboxfull_ev.ev_count++;
283 SPINLOCK_BACKOFF(count);
284 goto again;
285 }
286
287 /*
288 * ipi_cpu_handler: the IPI handler.
289 */
290 void
291 ipi_cpu_handler(void)
292 {
293 struct cpu_info * const ci = curcpu();
294
295 /*
296 * Handle asynchronous IPIs: inspect per-CPU bit field, extract
297 * IPI ID numbers and execute functions in those slots.
298 */
299 for (u_int i = 0; i < IPI_BITWORDS; i++) {
300 uint32_t pending, bit;
301
302 if (atomic_load_relaxed(&ci->ci_ipipend[i]) == 0) {
303 continue;
304 }
305 pending = atomic_swap_32(&ci->ci_ipipend[i], 0);
306 #ifndef __HAVE_ATOMIC_AS_MEMBAR
307 membar_acquire();
308 #endif
309 while ((bit = ffs(pending)) != 0) {
310 const u_int ipi_id = (i << IPI_BITW_SHIFT) | --bit;
311 ipi_intr_t *ipi_hdl = &ipi_intrs[ipi_id];
312
313 pending &= ~(1U << bit);
314 KASSERT(ipi_hdl->func != NULL);
315 ipi_hdl->func(ipi_hdl->arg);
316 }
317 }
318 }
319
320 /*
321 * ipi_msg_cpu_handler: handle synchronous IPIs - iterate mailbox,
322 * execute the passed functions and acknowledge the messages.
323 */
324 static void
325 ipi_msg_cpu_handler(void *arg __unused)
326 {
327 const struct cpu_info * const ci = curcpu();
328 ipi_mbox_t *mbox = &ipi_mboxes[cpu_index(ci)];
329
330 for (u_int i = 0; i < IPI_MSG_MAX; i++) {
331 ipi_msg_t *msg;
332
333 /* Get the message. */
334 if ((msg = atomic_load_acquire(&mbox->msg[i])) == NULL) {
335 continue;
336 }
337 atomic_store_relaxed(&mbox->msg[i], NULL);
338
339 /* Execute the handler. */
340 KASSERT(msg->func);
341 msg->func(msg->arg);
342
343 /* Ack the request. */
344 #ifndef __HAVE_ATOMIC_AS_MEMBAR
345 membar_release();
346 #endif
347 atomic_dec_uint(&msg->_pending);
348 }
349 }
350
351 /*
352 * ipi_unicast: send an IPI to a single CPU.
353 *
354 * => The CPU must be remote; must not be local.
355 * => The caller must ipi_wait() on the message for completion.
356 */
357 void
358 ipi_unicast(ipi_msg_t *msg, struct cpu_info *ci)
359 {
360 const cpuid_t id = cpu_index(ci);
361
362 KASSERT(msg->func != NULL);
363 KASSERT(kpreempt_disabled());
364 KASSERT(curcpu() != ci);
365
366 msg->_pending = 1;
367 #ifndef __HAVE_ATOMIC_AS_MEMBAR
368 membar_release();
369 #endif
370
371 put_msg(&ipi_mboxes[id], msg);
372 ipi_trigger(IPI_SYNCH_ID, ci);
373 }
374
375 /*
376 * ipi_multicast: send an IPI to each CPU in the specified set.
377 *
378 * => The caller must ipi_wait() on the message for completion.
379 */
380 void
381 ipi_multicast(ipi_msg_t *msg, const kcpuset_t *target)
382 {
383 const struct cpu_info * const self = curcpu();
384 CPU_INFO_ITERATOR cii;
385 struct cpu_info *ci;
386 u_int local;
387
388 KASSERT(msg->func != NULL);
389 KASSERT(kpreempt_disabled());
390
391 local = !!kcpuset_isset(target, cpu_index(self));
392 msg->_pending = kcpuset_countset(target) - local;
393 #ifndef __HAVE_ATOMIC_AS_MEMBAR
394 membar_release();
395 #endif
396
397 for (CPU_INFO_FOREACH(cii, ci)) {
398 cpuid_t id;
399
400 if (__predict_false(ci == self)) {
401 continue;
402 }
403 id = cpu_index(ci);
404 if (!kcpuset_isset(target, id)) {
405 continue;
406 }
407 put_msg(&ipi_mboxes[id], msg);
408 ipi_trigger(IPI_SYNCH_ID, ci);
409 }
410 if (local) {
411 msg->func(msg->arg);
412 }
413 }
414
415 /*
416 * ipi_broadcast: send an IPI to all CPUs.
417 *
418 * => The caller must ipi_wait() on the message for completion.
419 */
420 void
421 ipi_broadcast(ipi_msg_t *msg, bool skip_self)
422 {
423 const struct cpu_info * const self = curcpu();
424 CPU_INFO_ITERATOR cii;
425 struct cpu_info *ci;
426
427 KASSERT(msg->func != NULL);
428 KASSERT(kpreempt_disabled());
429
430 msg->_pending = ncpu - 1;
431 #ifndef __HAVE_ATOMIC_AS_MEMBAR
432 membar_release();
433 #endif
434
435 /* Broadcast IPIs for remote CPUs. */
436 for (CPU_INFO_FOREACH(cii, ci)) {
437 cpuid_t id;
438
439 if (__predict_false(ci == self)) {
440 continue;
441 }
442 id = cpu_index(ci);
443 put_msg(&ipi_mboxes[id], msg);
444 ipi_trigger(IPI_SYNCH_ID, ci);
445 }
446
447 if (!skip_self) {
448 /* Finally, execute locally. */
449 msg->func(msg->arg);
450 }
451 }
452
453 /*
454 * ipi_wait: spin-wait until the message is processed.
455 */
456 void
457 ipi_wait(ipi_msg_t *msg)
458 {
459 int count = SPINLOCK_BACKOFF_MIN;
460
461 while (atomic_load_acquire(&msg->_pending)) {
462 KASSERT(atomic_load_relaxed(&msg->_pending) < ncpu);
463 SPINLOCK_BACKOFF(count);
464 }
465 }
Cache object: 8b7d1f4e67db5d0a3f0fb2c39455c4af
|