FreeBSD/Linux Kernel Cross Reference
sys/kern/subr_smp.c
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2001, John Baldwin <jhb@FreeBSD.org>.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 /*
29 * This module holds the global variables and machine independent functions
30 * used for the kernel SMP support.
31 */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/kernel.h>
39 #include <sys/ktr.h>
40 #include <sys/proc.h>
41 #include <sys/bus.h>
42 #include <sys/lock.h>
43 #include <sys/malloc.h>
44 #include <sys/mutex.h>
45 #include <sys/pcpu.h>
46 #include <sys/sched.h>
47 #include <sys/smp.h>
48 #include <sys/sysctl.h>
49
50 #include <machine/cpu.h>
51 #include <machine/smp.h>
52
53 #include "opt_sched.h"
54
55 #ifdef SMP
56 MALLOC_DEFINE(M_TOPO, "toponodes", "SMP topology data");
57
58 volatile cpuset_t stopped_cpus;
59 volatile cpuset_t started_cpus;
60 volatile cpuset_t suspended_cpus;
61 cpuset_t hlt_cpus_mask;
62 cpuset_t logical_cpus_mask;
63
64 void (*cpustop_restartfunc)(void);
65 #endif
66
67 static int sysctl_kern_smp_active(SYSCTL_HANDLER_ARGS);
68
69 /* This is used in modules that need to work in both SMP and UP. */
70 cpuset_t all_cpus;
71
72 int mp_ncpus;
73 /* export this for libkvm consumers. */
74 int mp_maxcpus = MAXCPU;
75
76 volatile int smp_started;
77 u_int mp_maxid;
78
79 static SYSCTL_NODE(_kern, OID_AUTO, smp,
80 CTLFLAG_RD | CTLFLAG_CAPRD | CTLFLAG_MPSAFE, NULL,
81 "Kernel SMP");
82
83 SYSCTL_INT(_kern_smp, OID_AUTO, maxid, CTLFLAG_RD|CTLFLAG_CAPRD, &mp_maxid, 0,
84 "Max CPU ID.");
85
86 SYSCTL_INT(_kern_smp, OID_AUTO, maxcpus, CTLFLAG_RD|CTLFLAG_CAPRD, &mp_maxcpus,
87 0, "Max number of CPUs that the system was compiled for.");
88
89 SYSCTL_PROC(_kern_smp, OID_AUTO, active, CTLFLAG_RD|CTLTYPE_INT|CTLFLAG_MPSAFE,
90 NULL, 0, sysctl_kern_smp_active, "I",
91 "Indicates system is running in SMP mode");
92
93 int smp_disabled = 0; /* has smp been disabled? */
94 SYSCTL_INT(_kern_smp, OID_AUTO, disabled, CTLFLAG_RDTUN|CTLFLAG_CAPRD,
95 &smp_disabled, 0, "SMP has been disabled from the loader");
96
97 int smp_cpus = 1; /* how many cpu's running */
98 SYSCTL_INT(_kern_smp, OID_AUTO, cpus, CTLFLAG_RD|CTLFLAG_CAPRD, &smp_cpus, 0,
99 "Number of CPUs online");
100
101 int smp_threads_per_core = 1; /* how many SMT threads are running per core */
102 SYSCTL_INT(_kern_smp, OID_AUTO, threads_per_core, CTLFLAG_RD|CTLFLAG_CAPRD,
103 &smp_threads_per_core, 0, "Number of SMT threads online per core");
104
105 int mp_ncores = -1; /* how many physical cores running */
106 SYSCTL_INT(_kern_smp, OID_AUTO, cores, CTLFLAG_RD|CTLFLAG_CAPRD, &mp_ncores, 0,
107 "Number of physical cores online");
108
109 int smp_topology = 0; /* Which topology we're using. */
110 SYSCTL_INT(_kern_smp, OID_AUTO, topology, CTLFLAG_RDTUN, &smp_topology, 0,
111 "Topology override setting; 0 is default provided by hardware.");
112
113 #ifdef SMP
114 /* Enable forwarding of a signal to a process running on a different CPU */
115 static int forward_signal_enabled = 1;
116 SYSCTL_INT(_kern_smp, OID_AUTO, forward_signal_enabled, CTLFLAG_RW,
117 &forward_signal_enabled, 0,
118 "Forwarding of a signal to a process on a different CPU");
119
120 /* Variables needed for SMP rendezvous. */
121 static volatile int smp_rv_ncpus;
122 static void (*volatile smp_rv_setup_func)(void *arg);
123 static void (*volatile smp_rv_action_func)(void *arg);
124 static void (*volatile smp_rv_teardown_func)(void *arg);
125 static void *volatile smp_rv_func_arg;
126 static volatile int smp_rv_waiters[4];
127
128 /*
129 * Shared mutex to restrict busywaits between smp_rendezvous() and
130 * smp(_targeted)_tlb_shootdown(). A deadlock occurs if both of these
131 * functions trigger at once and cause multiple CPUs to busywait with
132 * interrupts disabled.
133 */
134 struct mtx smp_ipi_mtx;
135
136 /*
137 * Let the MD SMP code initialize mp_maxid very early if it can.
138 */
139 static void
140 mp_setmaxid(void *dummy)
141 {
142
143 cpu_mp_setmaxid();
144
145 KASSERT(mp_ncpus >= 1, ("%s: CPU count < 1", __func__));
146 KASSERT(mp_ncpus > 1 || mp_maxid == 0,
147 ("%s: one CPU but mp_maxid is not zero", __func__));
148 KASSERT(mp_maxid >= mp_ncpus - 1,
149 ("%s: counters out of sync: max %d, count %d", __func__,
150 mp_maxid, mp_ncpus));
151 }
152 SYSINIT(cpu_mp_setmaxid, SI_SUB_TUNABLES, SI_ORDER_FIRST, mp_setmaxid, NULL);
153
154 /*
155 * Call the MD SMP initialization code.
156 */
157 static void
158 mp_start(void *dummy)
159 {
160
161 mtx_init(&smp_ipi_mtx, "smp rendezvous", NULL, MTX_SPIN);
162
163 /* Probe for MP hardware. */
164 if (smp_disabled != 0 || cpu_mp_probe() == 0) {
165 mp_ncores = 1;
166 mp_ncpus = 1;
167 CPU_SETOF(PCPU_GET(cpuid), &all_cpus);
168 return;
169 }
170
171 cpu_mp_start();
172 printf("FreeBSD/SMP: Multiprocessor System Detected: %d CPUs\n",
173 mp_ncpus);
174
175 /* Provide a default for most architectures that don't have SMT/HTT. */
176 if (mp_ncores < 0)
177 mp_ncores = mp_ncpus;
178
179 cpu_mp_announce();
180 }
181 SYSINIT(cpu_mp, SI_SUB_CPU, SI_ORDER_THIRD, mp_start, NULL);
182
183 void
184 forward_signal(struct thread *td)
185 {
186 int id;
187
188 /*
189 * signotify() has already set TDF_ASTPENDING and TDF_NEEDSIGCHECK on
190 * this thread, so all we need to do is poke it if it is currently
191 * executing so that it executes ast().
192 */
193 THREAD_LOCK_ASSERT(td, MA_OWNED);
194 KASSERT(TD_IS_RUNNING(td),
195 ("forward_signal: thread is not TDS_RUNNING"));
196
197 CTR1(KTR_SMP, "forward_signal(%p)", td->td_proc);
198
199 if (!smp_started || cold || KERNEL_PANICKED())
200 return;
201 if (!forward_signal_enabled)
202 return;
203
204 /* No need to IPI ourself. */
205 if (td == curthread)
206 return;
207
208 id = td->td_oncpu;
209 if (id == NOCPU)
210 return;
211 ipi_cpu(id, IPI_AST);
212 }
213
214 /*
215 * When called the executing CPU will send an IPI to all other CPUs
216 * requesting that they halt execution.
217 *
218 * Usually (but not necessarily) called with 'other_cpus' as its arg.
219 *
220 * - Signals all CPUs in map to stop.
221 * - Waits for each to stop.
222 *
223 * Returns:
224 * -1: error
225 * 0: NA
226 * 1: ok
227 *
228 */
229 #if defined(__amd64__) || defined(__i386__)
230 #define X86 1
231 #else
232 #define X86 0
233 #endif
234 static int
235 generic_stop_cpus(cpuset_t map, u_int type)
236 {
237 #ifdef KTR
238 char cpusetbuf[CPUSETBUFSIZ];
239 #endif
240 static volatile u_int stopping_cpu = NOCPU;
241 int i;
242 volatile cpuset_t *cpus;
243
244 KASSERT(
245 type == IPI_STOP || type == IPI_STOP_HARD
246 #if X86
247 || type == IPI_SUSPEND
248 #endif
249 , ("%s: invalid stop type", __func__));
250
251 if (!smp_started)
252 return (0);
253
254 CTR2(KTR_SMP, "stop_cpus(%s) with %u type",
255 cpusetobj_strprint(cpusetbuf, &map), type);
256
257 #if X86
258 /*
259 * When suspending, ensure there are are no IPIs in progress.
260 * IPIs that have been issued, but not yet delivered (e.g.
261 * not pending on a vCPU when running under virtualization)
262 * will be lost, violating FreeBSD's assumption of reliable
263 * IPI delivery.
264 */
265 if (type == IPI_SUSPEND)
266 mtx_lock_spin(&smp_ipi_mtx);
267 #endif
268
269 #if X86
270 if (!nmi_is_broadcast || nmi_kdb_lock == 0) {
271 #endif
272 if (stopping_cpu != PCPU_GET(cpuid))
273 while (atomic_cmpset_int(&stopping_cpu, NOCPU,
274 PCPU_GET(cpuid)) == 0)
275 while (stopping_cpu != NOCPU)
276 cpu_spinwait(); /* spin */
277
278 /* send the stop IPI to all CPUs in map */
279 ipi_selected(map, type);
280 #if X86
281 }
282 #endif
283
284 #if X86
285 if (type == IPI_SUSPEND)
286 cpus = &suspended_cpus;
287 else
288 #endif
289 cpus = &stopped_cpus;
290
291 i = 0;
292 while (!CPU_SUBSET(cpus, &map)) {
293 /* spin */
294 cpu_spinwait();
295 i++;
296 if (i == 100000000) {
297 printf("timeout stopping cpus\n");
298 break;
299 }
300 }
301
302 #if X86
303 if (type == IPI_SUSPEND)
304 mtx_unlock_spin(&smp_ipi_mtx);
305 #endif
306
307 stopping_cpu = NOCPU;
308 return (1);
309 }
310
311 int
312 stop_cpus(cpuset_t map)
313 {
314
315 return (generic_stop_cpus(map, IPI_STOP));
316 }
317
318 int
319 stop_cpus_hard(cpuset_t map)
320 {
321
322 return (generic_stop_cpus(map, IPI_STOP_HARD));
323 }
324
325 #if X86
326 int
327 suspend_cpus(cpuset_t map)
328 {
329
330 return (generic_stop_cpus(map, IPI_SUSPEND));
331 }
332 #endif
333
334 /*
335 * Called by a CPU to restart stopped CPUs.
336 *
337 * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
338 *
339 * - Signals all CPUs in map to restart.
340 * - Waits for each to restart.
341 *
342 * Returns:
343 * -1: error
344 * 0: NA
345 * 1: ok
346 */
347 static int
348 generic_restart_cpus(cpuset_t map, u_int type)
349 {
350 #ifdef KTR
351 char cpusetbuf[CPUSETBUFSIZ];
352 #endif
353 volatile cpuset_t *cpus;
354
355 #if X86
356 KASSERT(type == IPI_STOP || type == IPI_STOP_HARD
357 || type == IPI_SUSPEND, ("%s: invalid stop type", __func__));
358
359 if (!smp_started)
360 return (0);
361
362 CTR1(KTR_SMP, "restart_cpus(%s)", cpusetobj_strprint(cpusetbuf, &map));
363
364 if (type == IPI_SUSPEND)
365 cpus = &resuming_cpus;
366 else
367 cpus = &stopped_cpus;
368
369 /* signal other cpus to restart */
370 if (type == IPI_SUSPEND)
371 CPU_COPY_STORE_REL(&map, &toresume_cpus);
372 else
373 CPU_COPY_STORE_REL(&map, &started_cpus);
374
375 /*
376 * Wake up any CPUs stopped with MWAIT. From MI code we can't tell if
377 * MONITOR/MWAIT is enabled, but the potentially redundant writes are
378 * relatively inexpensive.
379 */
380 if (type == IPI_STOP) {
381 struct monitorbuf *mb;
382 u_int id;
383
384 CPU_FOREACH(id) {
385 if (!CPU_ISSET(id, &map))
386 continue;
387
388 mb = &pcpu_find(id)->pc_monitorbuf;
389 atomic_store_int(&mb->stop_state,
390 MONITOR_STOPSTATE_RUNNING);
391 }
392 }
393
394 if (!nmi_is_broadcast || nmi_kdb_lock == 0) {
395 /* wait for each to clear its bit */
396 while (CPU_OVERLAP(cpus, &map))
397 cpu_spinwait();
398 }
399 #else /* !X86 */
400 KASSERT(type == IPI_STOP || type == IPI_STOP_HARD,
401 ("%s: invalid stop type", __func__));
402
403 if (!smp_started)
404 return (0);
405
406 CTR1(KTR_SMP, "restart_cpus(%s)", cpusetobj_strprint(cpusetbuf, &map));
407
408 cpus = &stopped_cpus;
409
410 /* signal other cpus to restart */
411 CPU_COPY_STORE_REL(&map, &started_cpus);
412
413 /* wait for each to clear its bit */
414 while (CPU_OVERLAP(cpus, &map))
415 cpu_spinwait();
416 #endif
417 return (1);
418 }
419
420 int
421 restart_cpus(cpuset_t map)
422 {
423
424 return (generic_restart_cpus(map, IPI_STOP));
425 }
426
427 #if X86
428 int
429 resume_cpus(cpuset_t map)
430 {
431
432 return (generic_restart_cpus(map, IPI_SUSPEND));
433 }
434 #endif
435 #undef X86
436
437 /*
438 * All-CPU rendezvous. CPUs are signalled, all execute the setup function
439 * (if specified), rendezvous, execute the action function (if specified),
440 * rendezvous again, execute the teardown function (if specified), and then
441 * resume.
442 *
443 * Note that the supplied external functions _must_ be reentrant and aware
444 * that they are running in parallel and in an unknown lock context.
445 */
446 void
447 smp_rendezvous_action(void)
448 {
449 struct thread *td;
450 void *local_func_arg;
451 void (*local_setup_func)(void*);
452 void (*local_action_func)(void*);
453 void (*local_teardown_func)(void*);
454 #ifdef INVARIANTS
455 int owepreempt;
456 #endif
457
458 /* Ensure we have up-to-date values. */
459 atomic_add_acq_int(&smp_rv_waiters[0], 1);
460 while (smp_rv_waiters[0] < smp_rv_ncpus)
461 cpu_spinwait();
462
463 /* Fetch rendezvous parameters after acquire barrier. */
464 local_func_arg = smp_rv_func_arg;
465 local_setup_func = smp_rv_setup_func;
466 local_action_func = smp_rv_action_func;
467 local_teardown_func = smp_rv_teardown_func;
468
469 /*
470 * Use a nested critical section to prevent any preemptions
471 * from occurring during a rendezvous action routine.
472 * Specifically, if a rendezvous handler is invoked via an IPI
473 * and the interrupted thread was in the critical_exit()
474 * function after setting td_critnest to 0 but before
475 * performing a deferred preemption, this routine can be
476 * invoked with td_critnest set to 0 and td_owepreempt true.
477 * In that case, a critical_exit() during the rendezvous
478 * action would trigger a preemption which is not permitted in
479 * a rendezvous action. To fix this, wrap all of the
480 * rendezvous action handlers in a critical section. We
481 * cannot use a regular critical section however as having
482 * critical_exit() preempt from this routine would also be
483 * problematic (the preemption must not occur before the IPI
484 * has been acknowledged via an EOI). Instead, we
485 * intentionally ignore td_owepreempt when leaving the
486 * critical section. This should be harmless because we do
487 * not permit rendezvous action routines to schedule threads,
488 * and thus td_owepreempt should never transition from 0 to 1
489 * during this routine.
490 */
491 td = curthread;
492 td->td_critnest++;
493 #ifdef INVARIANTS
494 owepreempt = td->td_owepreempt;
495 #endif
496
497 /*
498 * If requested, run a setup function before the main action
499 * function. Ensure all CPUs have completed the setup
500 * function before moving on to the action function.
501 */
502 if (local_setup_func != smp_no_rendezvous_barrier) {
503 if (smp_rv_setup_func != NULL)
504 smp_rv_setup_func(smp_rv_func_arg);
505 atomic_add_int(&smp_rv_waiters[1], 1);
506 while (smp_rv_waiters[1] < smp_rv_ncpus)
507 cpu_spinwait();
508 }
509
510 if (local_action_func != NULL)
511 local_action_func(local_func_arg);
512
513 if (local_teardown_func != smp_no_rendezvous_barrier) {
514 /*
515 * Signal that the main action has been completed. If a
516 * full exit rendezvous is requested, then all CPUs will
517 * wait here until all CPUs have finished the main action.
518 */
519 atomic_add_int(&smp_rv_waiters[2], 1);
520 while (smp_rv_waiters[2] < smp_rv_ncpus)
521 cpu_spinwait();
522
523 if (local_teardown_func != NULL)
524 local_teardown_func(local_func_arg);
525 }
526
527 /*
528 * Signal that the rendezvous is fully completed by this CPU.
529 * This means that no member of smp_rv_* pseudo-structure will be
530 * accessed by this target CPU after this point; in particular,
531 * memory pointed by smp_rv_func_arg.
532 *
533 * The release semantic ensures that all accesses performed by
534 * the current CPU are visible when smp_rendezvous_cpus()
535 * returns, by synchronizing with the
536 * atomic_load_acq_int(&smp_rv_waiters[3]).
537 */
538 atomic_add_rel_int(&smp_rv_waiters[3], 1);
539
540 td->td_critnest--;
541 KASSERT(owepreempt == td->td_owepreempt,
542 ("rendezvous action changed td_owepreempt"));
543 }
544
545 void
546 smp_rendezvous_cpus(cpuset_t map,
547 void (* setup_func)(void *),
548 void (* action_func)(void *),
549 void (* teardown_func)(void *),
550 void *arg)
551 {
552 int curcpumap, i, ncpus = 0;
553
554 /* See comments in the !SMP case. */
555 if (!smp_started) {
556 spinlock_enter();
557 if (setup_func != NULL)
558 setup_func(arg);
559 if (action_func != NULL)
560 action_func(arg);
561 if (teardown_func != NULL)
562 teardown_func(arg);
563 spinlock_exit();
564 return;
565 }
566
567 /*
568 * Make sure we come here with interrupts enabled. Otherwise we
569 * livelock if smp_ipi_mtx is owned by a thread which sent us an IPI.
570 */
571 MPASS(curthread->td_md.md_spinlock_count == 0);
572
573 CPU_FOREACH(i) {
574 if (CPU_ISSET(i, &map))
575 ncpus++;
576 }
577 if (ncpus == 0)
578 panic("ncpus is 0 with non-zero map");
579
580 mtx_lock_spin(&smp_ipi_mtx);
581
582 /* Pass rendezvous parameters via global variables. */
583 smp_rv_ncpus = ncpus;
584 smp_rv_setup_func = setup_func;
585 smp_rv_action_func = action_func;
586 smp_rv_teardown_func = teardown_func;
587 smp_rv_func_arg = arg;
588 smp_rv_waiters[1] = 0;
589 smp_rv_waiters[2] = 0;
590 smp_rv_waiters[3] = 0;
591 atomic_store_rel_int(&smp_rv_waiters[0], 0);
592
593 /*
594 * Signal other processors, which will enter the IPI with
595 * interrupts off.
596 */
597 curcpumap = CPU_ISSET(curcpu, &map);
598 CPU_CLR(curcpu, &map);
599 ipi_selected(map, IPI_RENDEZVOUS);
600
601 /* Check if the current CPU is in the map */
602 if (curcpumap != 0)
603 smp_rendezvous_action();
604
605 /*
606 * Ensure that the master CPU waits for all the other
607 * CPUs to finish the rendezvous, so that smp_rv_*
608 * pseudo-structure and the arg are guaranteed to not
609 * be in use.
610 *
611 * Load acquire synchronizes with the release add in
612 * smp_rendezvous_action(), which ensures that our caller sees
613 * all memory actions done by the called functions on other
614 * CPUs.
615 */
616 while (atomic_load_acq_int(&smp_rv_waiters[3]) < ncpus)
617 cpu_spinwait();
618
619 mtx_unlock_spin(&smp_ipi_mtx);
620 }
621
622 void
623 smp_rendezvous(void (* setup_func)(void *),
624 void (* action_func)(void *),
625 void (* teardown_func)(void *),
626 void *arg)
627 {
628 smp_rendezvous_cpus(all_cpus, setup_func, action_func, teardown_func, arg);
629 }
630
631 static struct cpu_group group[MAXCPU * MAX_CACHE_LEVELS + 1];
632
633 struct cpu_group *
634 smp_topo(void)
635 {
636 char cpusetbuf[CPUSETBUFSIZ], cpusetbuf2[CPUSETBUFSIZ];
637 struct cpu_group *top;
638
639 /*
640 * Check for a fake topology request for debugging purposes.
641 */
642 switch (smp_topology) {
643 case 1:
644 /* Dual core with no sharing. */
645 top = smp_topo_1level(CG_SHARE_NONE, 2, 0);
646 break;
647 case 2:
648 /* No topology, all cpus are equal. */
649 top = smp_topo_none();
650 break;
651 case 3:
652 /* Dual core with shared L2. */
653 top = smp_topo_1level(CG_SHARE_L2, 2, 0);
654 break;
655 case 4:
656 /* quad core, shared l3 among each package, private l2. */
657 top = smp_topo_1level(CG_SHARE_L3, 4, 0);
658 break;
659 case 5:
660 /* quad core, 2 dualcore parts on each package share l2. */
661 top = smp_topo_2level(CG_SHARE_NONE, 2, CG_SHARE_L2, 2, 0);
662 break;
663 case 6:
664 /* Single-core 2xHTT */
665 top = smp_topo_1level(CG_SHARE_L1, 2, CG_FLAG_HTT);
666 break;
667 case 7:
668 /* quad core with a shared l3, 8 threads sharing L2. */
669 top = smp_topo_2level(CG_SHARE_L3, 4, CG_SHARE_L2, 8,
670 CG_FLAG_SMT);
671 break;
672 default:
673 /* Default, ask the system what it wants. */
674 top = cpu_topo();
675 break;
676 }
677 /*
678 * Verify the returned topology.
679 */
680 if (top->cg_count != mp_ncpus)
681 panic("Built bad topology at %p. CPU count %d != %d",
682 top, top->cg_count, mp_ncpus);
683 if (CPU_CMP(&top->cg_mask, &all_cpus))
684 panic("Built bad topology at %p. CPU mask (%s) != (%s)",
685 top, cpusetobj_strprint(cpusetbuf, &top->cg_mask),
686 cpusetobj_strprint(cpusetbuf2, &all_cpus));
687
688 /*
689 * Collapse nonsense levels that may be created out of convenience by
690 * the MD layers. They cause extra work in the search functions.
691 */
692 while (top->cg_children == 1) {
693 top = &top->cg_child[0];
694 top->cg_parent = NULL;
695 }
696 return (top);
697 }
698
699 struct cpu_group *
700 smp_topo_alloc(u_int count)
701 {
702 static u_int index;
703 u_int curr;
704
705 curr = index;
706 index += count;
707 return (&group[curr]);
708 }
709
710 struct cpu_group *
711 smp_topo_none(void)
712 {
713 struct cpu_group *top;
714
715 top = &group[0];
716 top->cg_parent = NULL;
717 top->cg_child = NULL;
718 top->cg_mask = all_cpus;
719 top->cg_count = mp_ncpus;
720 top->cg_children = 0;
721 top->cg_level = CG_SHARE_NONE;
722 top->cg_flags = 0;
723
724 return (top);
725 }
726
727 static int
728 smp_topo_addleaf(struct cpu_group *parent, struct cpu_group *child, int share,
729 int count, int flags, int start)
730 {
731 char cpusetbuf[CPUSETBUFSIZ], cpusetbuf2[CPUSETBUFSIZ];
732 cpuset_t mask;
733 int i;
734
735 CPU_ZERO(&mask);
736 for (i = 0; i < count; i++, start++)
737 CPU_SET(start, &mask);
738 child->cg_parent = parent;
739 child->cg_child = NULL;
740 child->cg_children = 0;
741 child->cg_level = share;
742 child->cg_count = count;
743 child->cg_flags = flags;
744 child->cg_mask = mask;
745 parent->cg_children++;
746 for (; parent != NULL; parent = parent->cg_parent) {
747 if (CPU_OVERLAP(&parent->cg_mask, &child->cg_mask))
748 panic("Duplicate children in %p. mask (%s) child (%s)",
749 parent,
750 cpusetobj_strprint(cpusetbuf, &parent->cg_mask),
751 cpusetobj_strprint(cpusetbuf2, &child->cg_mask));
752 CPU_OR(&parent->cg_mask, &child->cg_mask);
753 parent->cg_count += child->cg_count;
754 }
755
756 return (start);
757 }
758
759 struct cpu_group *
760 smp_topo_1level(int share, int count, int flags)
761 {
762 struct cpu_group *child;
763 struct cpu_group *top;
764 int packages;
765 int cpu;
766 int i;
767
768 cpu = 0;
769 top = &group[0];
770 packages = mp_ncpus / count;
771 top->cg_child = child = &group[1];
772 top->cg_level = CG_SHARE_NONE;
773 for (i = 0; i < packages; i++, child++)
774 cpu = smp_topo_addleaf(top, child, share, count, flags, cpu);
775 return (top);
776 }
777
778 struct cpu_group *
779 smp_topo_2level(int l2share, int l2count, int l1share, int l1count,
780 int l1flags)
781 {
782 struct cpu_group *top;
783 struct cpu_group *l1g;
784 struct cpu_group *l2g;
785 int cpu;
786 int i;
787 int j;
788
789 cpu = 0;
790 top = &group[0];
791 l2g = &group[1];
792 top->cg_child = l2g;
793 top->cg_level = CG_SHARE_NONE;
794 top->cg_children = mp_ncpus / (l2count * l1count);
795 l1g = l2g + top->cg_children;
796 for (i = 0; i < top->cg_children; i++, l2g++) {
797 l2g->cg_parent = top;
798 l2g->cg_child = l1g;
799 l2g->cg_level = l2share;
800 for (j = 0; j < l2count; j++, l1g++)
801 cpu = smp_topo_addleaf(l2g, l1g, l1share, l1count,
802 l1flags, cpu);
803 }
804 return (top);
805 }
806
807 struct cpu_group *
808 smp_topo_find(struct cpu_group *top, int cpu)
809 {
810 struct cpu_group *cg;
811 cpuset_t mask;
812 int children;
813 int i;
814
815 CPU_SETOF(cpu, &mask);
816 cg = top;
817 for (;;) {
818 if (!CPU_OVERLAP(&cg->cg_mask, &mask))
819 return (NULL);
820 if (cg->cg_children == 0)
821 return (cg);
822 children = cg->cg_children;
823 for (i = 0, cg = cg->cg_child; i < children; cg++, i++)
824 if (CPU_OVERLAP(&cg->cg_mask, &mask))
825 break;
826 }
827 return (NULL);
828 }
829 #else /* !SMP */
830
831 void
832 smp_rendezvous_cpus(cpuset_t map,
833 void (*setup_func)(void *),
834 void (*action_func)(void *),
835 void (*teardown_func)(void *),
836 void *arg)
837 {
838 /*
839 * In the !SMP case we just need to ensure the same initial conditions
840 * as the SMP case.
841 */
842 spinlock_enter();
843 if (setup_func != NULL)
844 setup_func(arg);
845 if (action_func != NULL)
846 action_func(arg);
847 if (teardown_func != NULL)
848 teardown_func(arg);
849 spinlock_exit();
850 }
851
852 void
853 smp_rendezvous(void (*setup_func)(void *),
854 void (*action_func)(void *),
855 void (*teardown_func)(void *),
856 void *arg)
857 {
858
859 smp_rendezvous_cpus(all_cpus, setup_func, action_func, teardown_func,
860 arg);
861 }
862
863 /*
864 * Provide dummy SMP support for UP kernels. Modules that need to use SMP
865 * APIs will still work using this dummy support.
866 */
867 static void
868 mp_setvariables_for_up(void *dummy)
869 {
870 mp_ncpus = 1;
871 mp_ncores = 1;
872 mp_maxid = PCPU_GET(cpuid);
873 CPU_SETOF(mp_maxid, &all_cpus);
874 KASSERT(PCPU_GET(cpuid) == 0, ("UP must have a CPU ID of zero"));
875 }
876 SYSINIT(cpu_mp_setvariables, SI_SUB_TUNABLES, SI_ORDER_FIRST,
877 mp_setvariables_for_up, NULL);
878 #endif /* SMP */
879
880 void
881 smp_no_rendezvous_barrier(void *dummy)
882 {
883 #ifdef SMP
884 KASSERT((!smp_started),("smp_no_rendezvous called and smp is started"));
885 #endif
886 }
887
888 void
889 smp_rendezvous_cpus_retry(cpuset_t map,
890 void (* setup_func)(void *),
891 void (* action_func)(void *),
892 void (* teardown_func)(void *),
893 void (* wait_func)(void *, int),
894 struct smp_rendezvous_cpus_retry_arg *arg)
895 {
896 int cpu;
897
898 /*
899 * Only one CPU to execute on.
900 */
901 if (!smp_started) {
902 spinlock_enter();
903 if (setup_func != NULL)
904 setup_func(arg);
905 if (action_func != NULL)
906 action_func(arg);
907 if (teardown_func != NULL)
908 teardown_func(arg);
909 spinlock_exit();
910 return;
911 }
912
913 /*
914 * Execute an action on all specified CPUs while retrying until they
915 * all acknowledge completion.
916 */
917 CPU_COPY(&map, &arg->cpus);
918 for (;;) {
919 smp_rendezvous_cpus(
920 arg->cpus,
921 setup_func,
922 action_func,
923 teardown_func,
924 arg);
925
926 if (CPU_EMPTY(&arg->cpus))
927 break;
928
929 CPU_FOREACH(cpu) {
930 if (!CPU_ISSET(cpu, &arg->cpus))
931 continue;
932 wait_func(arg, cpu);
933 }
934 }
935 }
936
937 void
938 smp_rendezvous_cpus_done(struct smp_rendezvous_cpus_retry_arg *arg)
939 {
940
941 CPU_CLR_ATOMIC(curcpu, &arg->cpus);
942 }
943
944 /*
945 * Wait for specified idle threads to switch once. This ensures that even
946 * preempted threads have cycled through the switch function once,
947 * exiting their codepaths. This allows us to change global pointers
948 * with no other synchronization.
949 */
950 int
951 quiesce_cpus(cpuset_t map, const char *wmesg, int prio)
952 {
953 struct pcpu *pcpu;
954 u_int gen[MAXCPU];
955 int error;
956 int cpu;
957
958 error = 0;
959 for (cpu = 0; cpu <= mp_maxid; cpu++) {
960 if (!CPU_ISSET(cpu, &map) || CPU_ABSENT(cpu))
961 continue;
962 pcpu = pcpu_find(cpu);
963 gen[cpu] = pcpu->pc_idlethread->td_generation;
964 }
965 for (cpu = 0; cpu <= mp_maxid; cpu++) {
966 if (!CPU_ISSET(cpu, &map) || CPU_ABSENT(cpu))
967 continue;
968 pcpu = pcpu_find(cpu);
969 thread_lock(curthread);
970 sched_bind(curthread, cpu);
971 thread_unlock(curthread);
972 while (gen[cpu] == pcpu->pc_idlethread->td_generation) {
973 error = tsleep(quiesce_cpus, prio, wmesg, 1);
974 if (error != EWOULDBLOCK)
975 goto out;
976 error = 0;
977 }
978 }
979 out:
980 thread_lock(curthread);
981 sched_unbind(curthread);
982 thread_unlock(curthread);
983
984 return (error);
985 }
986
987 int
988 quiesce_all_cpus(const char *wmesg, int prio)
989 {
990
991 return quiesce_cpus(all_cpus, wmesg, prio);
992 }
993
994 /*
995 * Observe all CPUs not executing in critical section.
996 * We are not in one so the check for us is safe. If the found
997 * thread changes to something else we know the section was
998 * exited as well.
999 */
1000 void
1001 quiesce_all_critical(void)
1002 {
1003 struct thread *td, *newtd;
1004 struct pcpu *pcpu;
1005 int cpu;
1006
1007 MPASS(curthread->td_critnest == 0);
1008
1009 CPU_FOREACH(cpu) {
1010 pcpu = cpuid_to_pcpu[cpu];
1011 td = pcpu->pc_curthread;
1012 for (;;) {
1013 if (td->td_critnest == 0)
1014 break;
1015 cpu_spinwait();
1016 newtd = (struct thread *)
1017 atomic_load_acq_ptr((void *)pcpu->pc_curthread);
1018 if (td != newtd)
1019 break;
1020 }
1021 }
1022 }
1023
1024 static void
1025 cpus_fence_seq_cst_issue(void *arg __unused)
1026 {
1027
1028 atomic_thread_fence_seq_cst();
1029 }
1030
1031 /*
1032 * Send an IPI forcing a sequentially consistent fence.
1033 *
1034 * Allows replacement of an explicitly fence with a compiler barrier.
1035 * Trades speed up during normal execution for a significant slowdown when
1036 * the barrier is needed.
1037 */
1038 void
1039 cpus_fence_seq_cst(void)
1040 {
1041
1042 #ifdef SMP
1043 smp_rendezvous(
1044 smp_no_rendezvous_barrier,
1045 cpus_fence_seq_cst_issue,
1046 smp_no_rendezvous_barrier,
1047 NULL
1048 );
1049 #else
1050 cpus_fence_seq_cst_issue(NULL);
1051 #endif
1052 }
1053
1054 /* Extra care is taken with this sysctl because the data type is volatile */
1055 static int
1056 sysctl_kern_smp_active(SYSCTL_HANDLER_ARGS)
1057 {
1058 int error, active;
1059
1060 active = smp_started;
1061 error = SYSCTL_OUT(req, &active, sizeof(active));
1062 return (error);
1063 }
1064
1065 #ifdef SMP
1066 void
1067 topo_init_node(struct topo_node *node)
1068 {
1069
1070 bzero(node, sizeof(*node));
1071 TAILQ_INIT(&node->children);
1072 }
1073
1074 void
1075 topo_init_root(struct topo_node *root)
1076 {
1077
1078 topo_init_node(root);
1079 root->type = TOPO_TYPE_SYSTEM;
1080 }
1081
1082 /*
1083 * Add a child node with the given ID under the given parent.
1084 * Do nothing if there is already a child with that ID.
1085 */
1086 struct topo_node *
1087 topo_add_node_by_hwid(struct topo_node *parent, int hwid,
1088 topo_node_type type, uintptr_t subtype)
1089 {
1090 struct topo_node *node;
1091
1092 TAILQ_FOREACH_REVERSE(node, &parent->children,
1093 topo_children, siblings) {
1094 if (node->hwid == hwid
1095 && node->type == type && node->subtype == subtype) {
1096 return (node);
1097 }
1098 }
1099
1100 node = malloc(sizeof(*node), M_TOPO, M_WAITOK);
1101 topo_init_node(node);
1102 node->parent = parent;
1103 node->hwid = hwid;
1104 node->type = type;
1105 node->subtype = subtype;
1106 TAILQ_INSERT_TAIL(&parent->children, node, siblings);
1107 parent->nchildren++;
1108
1109 return (node);
1110 }
1111
1112 /*
1113 * Find a child node with the given ID under the given parent.
1114 */
1115 struct topo_node *
1116 topo_find_node_by_hwid(struct topo_node *parent, int hwid,
1117 topo_node_type type, uintptr_t subtype)
1118 {
1119
1120 struct topo_node *node;
1121
1122 TAILQ_FOREACH(node, &parent->children, siblings) {
1123 if (node->hwid == hwid
1124 && node->type == type && node->subtype == subtype) {
1125 return (node);
1126 }
1127 }
1128
1129 return (NULL);
1130 }
1131
1132 /*
1133 * Given a node change the order of its parent's child nodes such
1134 * that the node becomes the firt child while preserving the cyclic
1135 * order of the children. In other words, the given node is promoted
1136 * by rotation.
1137 */
1138 void
1139 topo_promote_child(struct topo_node *child)
1140 {
1141 struct topo_node *next;
1142 struct topo_node *node;
1143 struct topo_node *parent;
1144
1145 parent = child->parent;
1146 next = TAILQ_NEXT(child, siblings);
1147 TAILQ_REMOVE(&parent->children, child, siblings);
1148 TAILQ_INSERT_HEAD(&parent->children, child, siblings);
1149
1150 while (next != NULL) {
1151 node = next;
1152 next = TAILQ_NEXT(node, siblings);
1153 TAILQ_REMOVE(&parent->children, node, siblings);
1154 TAILQ_INSERT_AFTER(&parent->children, child, node, siblings);
1155 child = node;
1156 }
1157 }
1158
1159 /*
1160 * Iterate to the next node in the depth-first search (traversal) of
1161 * the topology tree.
1162 */
1163 struct topo_node *
1164 topo_next_node(struct topo_node *top, struct topo_node *node)
1165 {
1166 struct topo_node *next;
1167
1168 if ((next = TAILQ_FIRST(&node->children)) != NULL)
1169 return (next);
1170
1171 if ((next = TAILQ_NEXT(node, siblings)) != NULL)
1172 return (next);
1173
1174 while (node != top && (node = node->parent) != top)
1175 if ((next = TAILQ_NEXT(node, siblings)) != NULL)
1176 return (next);
1177
1178 return (NULL);
1179 }
1180
1181 /*
1182 * Iterate to the next node in the depth-first search of the topology tree,
1183 * but without descending below the current node.
1184 */
1185 struct topo_node *
1186 topo_next_nonchild_node(struct topo_node *top, struct topo_node *node)
1187 {
1188 struct topo_node *next;
1189
1190 if ((next = TAILQ_NEXT(node, siblings)) != NULL)
1191 return (next);
1192
1193 while (node != top && (node = node->parent) != top)
1194 if ((next = TAILQ_NEXT(node, siblings)) != NULL)
1195 return (next);
1196
1197 return (NULL);
1198 }
1199
1200 /*
1201 * Assign the given ID to the given topology node that represents a logical
1202 * processor.
1203 */
1204 void
1205 topo_set_pu_id(struct topo_node *node, cpuid_t id)
1206 {
1207
1208 KASSERT(node->type == TOPO_TYPE_PU,
1209 ("topo_set_pu_id: wrong node type: %u", node->type));
1210 KASSERT(CPU_EMPTY(&node->cpuset) && node->cpu_count == 0,
1211 ("topo_set_pu_id: cpuset already not empty"));
1212 node->id = id;
1213 CPU_SET(id, &node->cpuset);
1214 node->cpu_count = 1;
1215 node->subtype = 1;
1216
1217 while ((node = node->parent) != NULL) {
1218 KASSERT(!CPU_ISSET(id, &node->cpuset),
1219 ("logical ID %u is already set in node %p", id, node));
1220 CPU_SET(id, &node->cpuset);
1221 node->cpu_count++;
1222 }
1223 }
1224
1225 static struct topology_spec {
1226 topo_node_type type;
1227 bool match_subtype;
1228 uintptr_t subtype;
1229 } topology_level_table[TOPO_LEVEL_COUNT] = {
1230 [TOPO_LEVEL_PKG] = { .type = TOPO_TYPE_PKG, },
1231 [TOPO_LEVEL_GROUP] = { .type = TOPO_TYPE_GROUP, },
1232 [TOPO_LEVEL_CACHEGROUP] = {
1233 .type = TOPO_TYPE_CACHE,
1234 .match_subtype = true,
1235 .subtype = CG_SHARE_L3,
1236 },
1237 [TOPO_LEVEL_CORE] = { .type = TOPO_TYPE_CORE, },
1238 [TOPO_LEVEL_THREAD] = { .type = TOPO_TYPE_PU, },
1239 };
1240
1241 static bool
1242 topo_analyze_table(struct topo_node *root, int all, enum topo_level level,
1243 struct topo_analysis *results)
1244 {
1245 struct topology_spec *spec;
1246 struct topo_node *node;
1247 int count;
1248
1249 if (level >= TOPO_LEVEL_COUNT)
1250 return (true);
1251
1252 spec = &topology_level_table[level];
1253 count = 0;
1254 node = topo_next_node(root, root);
1255
1256 while (node != NULL) {
1257 if (node->type != spec->type ||
1258 (spec->match_subtype && node->subtype != spec->subtype)) {
1259 node = topo_next_node(root, node);
1260 continue;
1261 }
1262 if (!all && CPU_EMPTY(&node->cpuset)) {
1263 node = topo_next_nonchild_node(root, node);
1264 continue;
1265 }
1266
1267 count++;
1268
1269 if (!topo_analyze_table(node, all, level + 1, results))
1270 return (false);
1271
1272 node = topo_next_nonchild_node(root, node);
1273 }
1274
1275 /* No explicit subgroups is essentially one subgroup. */
1276 if (count == 0) {
1277 count = 1;
1278
1279 if (!topo_analyze_table(root, all, level + 1, results))
1280 return (false);
1281 }
1282
1283 if (results->entities[level] == -1)
1284 results->entities[level] = count;
1285 else if (results->entities[level] != count)
1286 return (false);
1287
1288 return (true);
1289 }
1290
1291 /*
1292 * Check if the topology is uniform, that is, each package has the same number
1293 * of cores in it and each core has the same number of threads (logical
1294 * processors) in it. If so, calculate the number of packages, the number of
1295 * groups per package, the number of cachegroups per group, and the number of
1296 * logical processors per cachegroup. 'all' parameter tells whether to include
1297 * administratively disabled logical processors into the analysis.
1298 */
1299 int
1300 topo_analyze(struct topo_node *topo_root, int all,
1301 struct topo_analysis *results)
1302 {
1303
1304 results->entities[TOPO_LEVEL_PKG] = -1;
1305 results->entities[TOPO_LEVEL_CORE] = -1;
1306 results->entities[TOPO_LEVEL_THREAD] = -1;
1307 results->entities[TOPO_LEVEL_GROUP] = -1;
1308 results->entities[TOPO_LEVEL_CACHEGROUP] = -1;
1309
1310 if (!topo_analyze_table(topo_root, all, TOPO_LEVEL_PKG, results))
1311 return (0);
1312
1313 KASSERT(results->entities[TOPO_LEVEL_PKG] > 0,
1314 ("bug in topology or analysis"));
1315
1316 return (1);
1317 }
1318
1319 #endif /* SMP */
Cache object: a1c94b514c71dbfa87fa77fe88aadead
|