FreeBSD/Linux Kernel Cross Reference
sys/kern/subr_smp.c
1 /*-
2 * Copyright (c) 2001, John Baldwin <jhb@FreeBSD.org>.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 /*
28 * This module holds the global variables and machine independent functions
29 * used for the kernel SMP support.
30 */
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD: releng/11.2/sys/kern/subr_smp.c 331909 2018-04-03 07:31:22Z avg $");
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/ktr.h>
39 #include <sys/proc.h>
40 #include <sys/bus.h>
41 #include <sys/lock.h>
42 #include <sys/malloc.h>
43 #include <sys/mutex.h>
44 #include <sys/pcpu.h>
45 #include <sys/sched.h>
46 #include <sys/smp.h>
47 #include <sys/sysctl.h>
48
49 #include <machine/cpu.h>
50 #include <machine/smp.h>
51
52 #include "opt_sched.h"
53
54 #ifdef SMP
55 MALLOC_DEFINE(M_TOPO, "toponodes", "SMP topology data");
56
57 volatile cpuset_t stopped_cpus;
58 volatile cpuset_t started_cpus;
59 volatile cpuset_t suspended_cpus;
60 cpuset_t hlt_cpus_mask;
61 cpuset_t logical_cpus_mask;
62
63 void (*cpustop_restartfunc)(void);
64 #endif
65
66 static int sysctl_kern_smp_active(SYSCTL_HANDLER_ARGS);
67
68 /* This is used in modules that need to work in both SMP and UP. */
69 cpuset_t all_cpus;
70
71 int mp_ncpus;
72 /* export this for libkvm consumers. */
73 int mp_maxcpus = MAXCPU;
74
75 volatile int smp_started;
76 u_int mp_maxid;
77
78 static SYSCTL_NODE(_kern, OID_AUTO, smp, CTLFLAG_RD|CTLFLAG_CAPRD, NULL,
79 "Kernel SMP");
80
81 SYSCTL_INT(_kern_smp, OID_AUTO, maxid, CTLFLAG_RD|CTLFLAG_CAPRD, &mp_maxid, 0,
82 "Max CPU ID.");
83
84 SYSCTL_INT(_kern_smp, OID_AUTO, maxcpus, CTLFLAG_RD|CTLFLAG_CAPRD, &mp_maxcpus,
85 0, "Max number of CPUs that the system was compiled for.");
86
87 SYSCTL_PROC(_kern_smp, OID_AUTO, active, CTLFLAG_RD | CTLTYPE_INT, NULL, 0,
88 sysctl_kern_smp_active, "I", "Indicates system is running in SMP mode");
89
90 int smp_disabled = 0; /* has smp been disabled? */
91 SYSCTL_INT(_kern_smp, OID_AUTO, disabled, CTLFLAG_RDTUN|CTLFLAG_CAPRD,
92 &smp_disabled, 0, "SMP has been disabled from the loader");
93
94 int smp_cpus = 1; /* how many cpu's running */
95 SYSCTL_INT(_kern_smp, OID_AUTO, cpus, CTLFLAG_RD|CTLFLAG_CAPRD, &smp_cpus, 0,
96 "Number of CPUs online");
97
98 int smp_topology = 0; /* Which topology we're using. */
99 SYSCTL_INT(_kern_smp, OID_AUTO, topology, CTLFLAG_RDTUN, &smp_topology, 0,
100 "Topology override setting; 0 is default provided by hardware.");
101
102 #ifdef SMP
103 /* Enable forwarding of a signal to a process running on a different CPU */
104 static int forward_signal_enabled = 1;
105 SYSCTL_INT(_kern_smp, OID_AUTO, forward_signal_enabled, CTLFLAG_RW,
106 &forward_signal_enabled, 0,
107 "Forwarding of a signal to a process on a different CPU");
108
109 /* Variables needed for SMP rendezvous. */
110 static volatile int smp_rv_ncpus;
111 static void (*volatile smp_rv_setup_func)(void *arg);
112 static void (*volatile smp_rv_action_func)(void *arg);
113 static void (*volatile smp_rv_teardown_func)(void *arg);
114 static void *volatile smp_rv_func_arg;
115 static volatile int smp_rv_waiters[4];
116
117 /*
118 * Shared mutex to restrict busywaits between smp_rendezvous() and
119 * smp(_targeted)_tlb_shootdown(). A deadlock occurs if both of these
120 * functions trigger at once and cause multiple CPUs to busywait with
121 * interrupts disabled.
122 */
123 struct mtx smp_ipi_mtx;
124
125 /*
126 * Let the MD SMP code initialize mp_maxid very early if it can.
127 */
128 static void
129 mp_setmaxid(void *dummy)
130 {
131
132 cpu_mp_setmaxid();
133
134 KASSERT(mp_ncpus >= 1, ("%s: CPU count < 1", __func__));
135 KASSERT(mp_ncpus > 1 || mp_maxid == 0,
136 ("%s: one CPU but mp_maxid is not zero", __func__));
137 KASSERT(mp_maxid >= mp_ncpus - 1,
138 ("%s: counters out of sync: max %d, count %d", __func__,
139 mp_maxid, mp_ncpus));
140 }
141 SYSINIT(cpu_mp_setmaxid, SI_SUB_TUNABLES, SI_ORDER_FIRST, mp_setmaxid, NULL);
142
143 /*
144 * Call the MD SMP initialization code.
145 */
146 static void
147 mp_start(void *dummy)
148 {
149
150 mtx_init(&smp_ipi_mtx, "smp rendezvous", NULL, MTX_SPIN);
151
152 /* Probe for MP hardware. */
153 if (smp_disabled != 0 || cpu_mp_probe() == 0) {
154 mp_ncpus = 1;
155 CPU_SETOF(PCPU_GET(cpuid), &all_cpus);
156 return;
157 }
158
159 cpu_mp_start();
160 printf("FreeBSD/SMP: Multiprocessor System Detected: %d CPUs\n",
161 mp_ncpus);
162 cpu_mp_announce();
163 }
164 SYSINIT(cpu_mp, SI_SUB_CPU, SI_ORDER_THIRD, mp_start, NULL);
165
166 void
167 forward_signal(struct thread *td)
168 {
169 int id;
170
171 /*
172 * signotify() has already set TDF_ASTPENDING and TDF_NEEDSIGCHECK on
173 * this thread, so all we need to do is poke it if it is currently
174 * executing so that it executes ast().
175 */
176 THREAD_LOCK_ASSERT(td, MA_OWNED);
177 KASSERT(TD_IS_RUNNING(td),
178 ("forward_signal: thread is not TDS_RUNNING"));
179
180 CTR1(KTR_SMP, "forward_signal(%p)", td->td_proc);
181
182 if (!smp_started || cold || panicstr)
183 return;
184 if (!forward_signal_enabled)
185 return;
186
187 /* No need to IPI ourself. */
188 if (td == curthread)
189 return;
190
191 id = td->td_oncpu;
192 if (id == NOCPU)
193 return;
194 ipi_cpu(id, IPI_AST);
195 }
196
197 /*
198 * When called the executing CPU will send an IPI to all other CPUs
199 * requesting that they halt execution.
200 *
201 * Usually (but not necessarily) called with 'other_cpus' as its arg.
202 *
203 * - Signals all CPUs in map to stop.
204 * - Waits for each to stop.
205 *
206 * Returns:
207 * -1: error
208 * 0: NA
209 * 1: ok
210 *
211 */
212 #if defined(__amd64__) || defined(__i386__)
213 #define X86 1
214 #else
215 #define X86 0
216 #endif
217 static int
218 generic_stop_cpus(cpuset_t map, u_int type)
219 {
220 #ifdef KTR
221 char cpusetbuf[CPUSETBUFSIZ];
222 #endif
223 static volatile u_int stopping_cpu = NOCPU;
224 int i;
225 volatile cpuset_t *cpus;
226
227 KASSERT(
228 type == IPI_STOP || type == IPI_STOP_HARD
229 #if X86
230 || type == IPI_SUSPEND
231 #endif
232 , ("%s: invalid stop type", __func__));
233
234 if (!smp_started)
235 return (0);
236
237 CTR2(KTR_SMP, "stop_cpus(%s) with %u type",
238 cpusetobj_strprint(cpusetbuf, &map), type);
239
240 #if X86
241 /*
242 * When suspending, ensure there are are no IPIs in progress.
243 * IPIs that have been issued, but not yet delivered (e.g.
244 * not pending on a vCPU when running under virtualization)
245 * will be lost, violating FreeBSD's assumption of reliable
246 * IPI delivery.
247 */
248 if (type == IPI_SUSPEND)
249 mtx_lock_spin(&smp_ipi_mtx);
250 #endif
251
252 #if X86
253 if (!nmi_is_broadcast || nmi_kdb_lock == 0) {
254 #endif
255 if (stopping_cpu != PCPU_GET(cpuid))
256 while (atomic_cmpset_int(&stopping_cpu, NOCPU,
257 PCPU_GET(cpuid)) == 0)
258 while (stopping_cpu != NOCPU)
259 cpu_spinwait(); /* spin */
260
261 /* send the stop IPI to all CPUs in map */
262 ipi_selected(map, type);
263 #if X86
264 }
265 #endif
266
267 #if X86
268 if (type == IPI_SUSPEND)
269 cpus = &suspended_cpus;
270 else
271 #endif
272 cpus = &stopped_cpus;
273
274 i = 0;
275 while (!CPU_SUBSET(cpus, &map)) {
276 /* spin */
277 cpu_spinwait();
278 i++;
279 if (i == 100000000) {
280 printf("timeout stopping cpus\n");
281 break;
282 }
283 }
284
285 #if X86
286 if (type == IPI_SUSPEND)
287 mtx_unlock_spin(&smp_ipi_mtx);
288 #endif
289
290 stopping_cpu = NOCPU;
291 return (1);
292 }
293
294 int
295 stop_cpus(cpuset_t map)
296 {
297
298 return (generic_stop_cpus(map, IPI_STOP));
299 }
300
301 int
302 stop_cpus_hard(cpuset_t map)
303 {
304
305 return (generic_stop_cpus(map, IPI_STOP_HARD));
306 }
307
308 #if X86
309 int
310 suspend_cpus(cpuset_t map)
311 {
312
313 return (generic_stop_cpus(map, IPI_SUSPEND));
314 }
315 #endif
316
317 /*
318 * Called by a CPU to restart stopped CPUs.
319 *
320 * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
321 *
322 * - Signals all CPUs in map to restart.
323 * - Waits for each to restart.
324 *
325 * Returns:
326 * -1: error
327 * 0: NA
328 * 1: ok
329 */
330 static int
331 generic_restart_cpus(cpuset_t map, u_int type)
332 {
333 #ifdef KTR
334 char cpusetbuf[CPUSETBUFSIZ];
335 #endif
336 volatile cpuset_t *cpus;
337
338 KASSERT(type == IPI_STOP || type == IPI_STOP_HARD
339 #if X86
340 || type == IPI_SUSPEND
341 #endif
342 , ("%s: invalid stop type", __func__));
343
344 if (!smp_started)
345 return (0);
346
347 CTR1(KTR_SMP, "restart_cpus(%s)", cpusetobj_strprint(cpusetbuf, &map));
348
349 #if X86
350 if (type == IPI_SUSPEND)
351 cpus = &resuming_cpus;
352 else
353 #endif
354 cpus = &stopped_cpus;
355
356 /* signal other cpus to restart */
357 #if X86
358 if (type == IPI_SUSPEND)
359 CPU_COPY_STORE_REL(&map, &toresume_cpus);
360 else
361 #endif
362 CPU_COPY_STORE_REL(&map, &started_cpus);
363
364 #if X86
365 if (!nmi_is_broadcast || nmi_kdb_lock == 0) {
366 #endif
367 /* wait for each to clear its bit */
368 while (CPU_OVERLAP(cpus, &map))
369 cpu_spinwait();
370 #if X86
371 }
372 #endif
373
374 return (1);
375 }
376
377 int
378 restart_cpus(cpuset_t map)
379 {
380
381 return (generic_restart_cpus(map, IPI_STOP));
382 }
383
384 #if X86
385 int
386 resume_cpus(cpuset_t map)
387 {
388
389 return (generic_restart_cpus(map, IPI_SUSPEND));
390 }
391 #endif
392 #undef X86
393
394 /*
395 * All-CPU rendezvous. CPUs are signalled, all execute the setup function
396 * (if specified), rendezvous, execute the action function (if specified),
397 * rendezvous again, execute the teardown function (if specified), and then
398 * resume.
399 *
400 * Note that the supplied external functions _must_ be reentrant and aware
401 * that they are running in parallel and in an unknown lock context.
402 */
403 void
404 smp_rendezvous_action(void)
405 {
406 struct thread *td;
407 void *local_func_arg;
408 void (*local_setup_func)(void*);
409 void (*local_action_func)(void*);
410 void (*local_teardown_func)(void*);
411 #ifdef INVARIANTS
412 int owepreempt;
413 #endif
414
415 /* Ensure we have up-to-date values. */
416 atomic_add_acq_int(&smp_rv_waiters[0], 1);
417 while (smp_rv_waiters[0] < smp_rv_ncpus)
418 cpu_spinwait();
419
420 /* Fetch rendezvous parameters after acquire barrier. */
421 local_func_arg = smp_rv_func_arg;
422 local_setup_func = smp_rv_setup_func;
423 local_action_func = smp_rv_action_func;
424 local_teardown_func = smp_rv_teardown_func;
425
426 /*
427 * Use a nested critical section to prevent any preemptions
428 * from occurring during a rendezvous action routine.
429 * Specifically, if a rendezvous handler is invoked via an IPI
430 * and the interrupted thread was in the critical_exit()
431 * function after setting td_critnest to 0 but before
432 * performing a deferred preemption, this routine can be
433 * invoked with td_critnest set to 0 and td_owepreempt true.
434 * In that case, a critical_exit() during the rendezvous
435 * action would trigger a preemption which is not permitted in
436 * a rendezvous action. To fix this, wrap all of the
437 * rendezvous action handlers in a critical section. We
438 * cannot use a regular critical section however as having
439 * critical_exit() preempt from this routine would also be
440 * problematic (the preemption must not occur before the IPI
441 * has been acknowledged via an EOI). Instead, we
442 * intentionally ignore td_owepreempt when leaving the
443 * critical section. This should be harmless because we do
444 * not permit rendezvous action routines to schedule threads,
445 * and thus td_owepreempt should never transition from 0 to 1
446 * during this routine.
447 */
448 td = curthread;
449 td->td_critnest++;
450 #ifdef INVARIANTS
451 owepreempt = td->td_owepreempt;
452 #endif
453
454 /*
455 * If requested, run a setup function before the main action
456 * function. Ensure all CPUs have completed the setup
457 * function before moving on to the action function.
458 */
459 if (local_setup_func != smp_no_rendezvous_barrier) {
460 if (smp_rv_setup_func != NULL)
461 smp_rv_setup_func(smp_rv_func_arg);
462 atomic_add_int(&smp_rv_waiters[1], 1);
463 while (smp_rv_waiters[1] < smp_rv_ncpus)
464 cpu_spinwait();
465 }
466
467 if (local_action_func != NULL)
468 local_action_func(local_func_arg);
469
470 if (local_teardown_func != smp_no_rendezvous_barrier) {
471 /*
472 * Signal that the main action has been completed. If a
473 * full exit rendezvous is requested, then all CPUs will
474 * wait here until all CPUs have finished the main action.
475 */
476 atomic_add_int(&smp_rv_waiters[2], 1);
477 while (smp_rv_waiters[2] < smp_rv_ncpus)
478 cpu_spinwait();
479
480 if (local_teardown_func != NULL)
481 local_teardown_func(local_func_arg);
482 }
483
484 /*
485 * Signal that the rendezvous is fully completed by this CPU.
486 * This means that no member of smp_rv_* pseudo-structure will be
487 * accessed by this target CPU after this point; in particular,
488 * memory pointed by smp_rv_func_arg.
489 *
490 * The release semantic ensures that all accesses performed by
491 * the current CPU are visible when smp_rendezvous_cpus()
492 * returns, by synchronizing with the
493 * atomic_load_acq_int(&smp_rv_waiters[3]).
494 */
495 atomic_add_rel_int(&smp_rv_waiters[3], 1);
496
497 td->td_critnest--;
498 KASSERT(owepreempt == td->td_owepreempt,
499 ("rendezvous action changed td_owepreempt"));
500 }
501
502 void
503 smp_rendezvous_cpus(cpuset_t map,
504 void (* setup_func)(void *),
505 void (* action_func)(void *),
506 void (* teardown_func)(void *),
507 void *arg)
508 {
509 int curcpumap, i, ncpus = 0;
510
511 /* Look comments in the !SMP case. */
512 if (!smp_started) {
513 spinlock_enter();
514 if (setup_func != NULL)
515 setup_func(arg);
516 if (action_func != NULL)
517 action_func(arg);
518 if (teardown_func != NULL)
519 teardown_func(arg);
520 spinlock_exit();
521 return;
522 }
523
524 CPU_FOREACH(i) {
525 if (CPU_ISSET(i, &map))
526 ncpus++;
527 }
528 if (ncpus == 0)
529 panic("ncpus is 0 with non-zero map");
530
531 mtx_lock_spin(&smp_ipi_mtx);
532
533 /* Pass rendezvous parameters via global variables. */
534 smp_rv_ncpus = ncpus;
535 smp_rv_setup_func = setup_func;
536 smp_rv_action_func = action_func;
537 smp_rv_teardown_func = teardown_func;
538 smp_rv_func_arg = arg;
539 smp_rv_waiters[1] = 0;
540 smp_rv_waiters[2] = 0;
541 smp_rv_waiters[3] = 0;
542 atomic_store_rel_int(&smp_rv_waiters[0], 0);
543
544 /*
545 * Signal other processors, which will enter the IPI with
546 * interrupts off.
547 */
548 curcpumap = CPU_ISSET(curcpu, &map);
549 CPU_CLR(curcpu, &map);
550 ipi_selected(map, IPI_RENDEZVOUS);
551
552 /* Check if the current CPU is in the map */
553 if (curcpumap != 0)
554 smp_rendezvous_action();
555
556 /*
557 * Ensure that the master CPU waits for all the other
558 * CPUs to finish the rendezvous, so that smp_rv_*
559 * pseudo-structure and the arg are guaranteed to not
560 * be in use.
561 *
562 * Load acquire synchronizes with the release add in
563 * smp_rendezvous_action(), which ensures that our caller sees
564 * all memory actions done by the called functions on other
565 * CPUs.
566 */
567 while (atomic_load_acq_int(&smp_rv_waiters[3]) < ncpus)
568 cpu_spinwait();
569
570 mtx_unlock_spin(&smp_ipi_mtx);
571 }
572
573 void
574 smp_rendezvous(void (* setup_func)(void *),
575 void (* action_func)(void *),
576 void (* teardown_func)(void *),
577 void *arg)
578 {
579 smp_rendezvous_cpus(all_cpus, setup_func, action_func, teardown_func, arg);
580 }
581
582 static struct cpu_group group[MAXCPU * MAX_CACHE_LEVELS + 1];
583
584 struct cpu_group *
585 smp_topo(void)
586 {
587 char cpusetbuf[CPUSETBUFSIZ], cpusetbuf2[CPUSETBUFSIZ];
588 struct cpu_group *top;
589
590 /*
591 * Check for a fake topology request for debugging purposes.
592 */
593 switch (smp_topology) {
594 case 1:
595 /* Dual core with no sharing. */
596 top = smp_topo_1level(CG_SHARE_NONE, 2, 0);
597 break;
598 case 2:
599 /* No topology, all cpus are equal. */
600 top = smp_topo_none();
601 break;
602 case 3:
603 /* Dual core with shared L2. */
604 top = smp_topo_1level(CG_SHARE_L2, 2, 0);
605 break;
606 case 4:
607 /* quad core, shared l3 among each package, private l2. */
608 top = smp_topo_1level(CG_SHARE_L3, 4, 0);
609 break;
610 case 5:
611 /* quad core, 2 dualcore parts on each package share l2. */
612 top = smp_topo_2level(CG_SHARE_NONE, 2, CG_SHARE_L2, 2, 0);
613 break;
614 case 6:
615 /* Single-core 2xHTT */
616 top = smp_topo_1level(CG_SHARE_L1, 2, CG_FLAG_HTT);
617 break;
618 case 7:
619 /* quad core with a shared l3, 8 threads sharing L2. */
620 top = smp_topo_2level(CG_SHARE_L3, 4, CG_SHARE_L2, 8,
621 CG_FLAG_SMT);
622 break;
623 default:
624 /* Default, ask the system what it wants. */
625 top = cpu_topo();
626 break;
627 }
628 /*
629 * Verify the returned topology.
630 */
631 if (top->cg_count != mp_ncpus)
632 panic("Built bad topology at %p. CPU count %d != %d",
633 top, top->cg_count, mp_ncpus);
634 if (CPU_CMP(&top->cg_mask, &all_cpus))
635 panic("Built bad topology at %p. CPU mask (%s) != (%s)",
636 top, cpusetobj_strprint(cpusetbuf, &top->cg_mask),
637 cpusetobj_strprint(cpusetbuf2, &all_cpus));
638 return (top);
639 }
640
641 struct cpu_group *
642 smp_topo_alloc(u_int count)
643 {
644 static u_int index;
645 u_int curr;
646
647 curr = index;
648 index += count;
649 return (&group[curr]);
650 }
651
652 struct cpu_group *
653 smp_topo_none(void)
654 {
655 struct cpu_group *top;
656
657 top = &group[0];
658 top->cg_parent = NULL;
659 top->cg_child = NULL;
660 top->cg_mask = all_cpus;
661 top->cg_count = mp_ncpus;
662 top->cg_children = 0;
663 top->cg_level = CG_SHARE_NONE;
664 top->cg_flags = 0;
665
666 return (top);
667 }
668
669 static int
670 smp_topo_addleaf(struct cpu_group *parent, struct cpu_group *child, int share,
671 int count, int flags, int start)
672 {
673 char cpusetbuf[CPUSETBUFSIZ], cpusetbuf2[CPUSETBUFSIZ];
674 cpuset_t mask;
675 int i;
676
677 CPU_ZERO(&mask);
678 for (i = 0; i < count; i++, start++)
679 CPU_SET(start, &mask);
680 child->cg_parent = parent;
681 child->cg_child = NULL;
682 child->cg_children = 0;
683 child->cg_level = share;
684 child->cg_count = count;
685 child->cg_flags = flags;
686 child->cg_mask = mask;
687 parent->cg_children++;
688 for (; parent != NULL; parent = parent->cg_parent) {
689 if (CPU_OVERLAP(&parent->cg_mask, &child->cg_mask))
690 panic("Duplicate children in %p. mask (%s) child (%s)",
691 parent,
692 cpusetobj_strprint(cpusetbuf, &parent->cg_mask),
693 cpusetobj_strprint(cpusetbuf2, &child->cg_mask));
694 CPU_OR(&parent->cg_mask, &child->cg_mask);
695 parent->cg_count += child->cg_count;
696 }
697
698 return (start);
699 }
700
701 struct cpu_group *
702 smp_topo_1level(int share, int count, int flags)
703 {
704 struct cpu_group *child;
705 struct cpu_group *top;
706 int packages;
707 int cpu;
708 int i;
709
710 cpu = 0;
711 top = &group[0];
712 packages = mp_ncpus / count;
713 top->cg_child = child = &group[1];
714 top->cg_level = CG_SHARE_NONE;
715 for (i = 0; i < packages; i++, child++)
716 cpu = smp_topo_addleaf(top, child, share, count, flags, cpu);
717 return (top);
718 }
719
720 struct cpu_group *
721 smp_topo_2level(int l2share, int l2count, int l1share, int l1count,
722 int l1flags)
723 {
724 struct cpu_group *top;
725 struct cpu_group *l1g;
726 struct cpu_group *l2g;
727 int cpu;
728 int i;
729 int j;
730
731 cpu = 0;
732 top = &group[0];
733 l2g = &group[1];
734 top->cg_child = l2g;
735 top->cg_level = CG_SHARE_NONE;
736 top->cg_children = mp_ncpus / (l2count * l1count);
737 l1g = l2g + top->cg_children;
738 for (i = 0; i < top->cg_children; i++, l2g++) {
739 l2g->cg_parent = top;
740 l2g->cg_child = l1g;
741 l2g->cg_level = l2share;
742 for (j = 0; j < l2count; j++, l1g++)
743 cpu = smp_topo_addleaf(l2g, l1g, l1share, l1count,
744 l1flags, cpu);
745 }
746 return (top);
747 }
748
749
750 struct cpu_group *
751 smp_topo_find(struct cpu_group *top, int cpu)
752 {
753 struct cpu_group *cg;
754 cpuset_t mask;
755 int children;
756 int i;
757
758 CPU_SETOF(cpu, &mask);
759 cg = top;
760 for (;;) {
761 if (!CPU_OVERLAP(&cg->cg_mask, &mask))
762 return (NULL);
763 if (cg->cg_children == 0)
764 return (cg);
765 children = cg->cg_children;
766 for (i = 0, cg = cg->cg_child; i < children; cg++, i++)
767 if (CPU_OVERLAP(&cg->cg_mask, &mask))
768 break;
769 }
770 return (NULL);
771 }
772 #else /* !SMP */
773
774 void
775 smp_rendezvous_cpus(cpuset_t map,
776 void (*setup_func)(void *),
777 void (*action_func)(void *),
778 void (*teardown_func)(void *),
779 void *arg)
780 {
781 /*
782 * In the !SMP case we just need to ensure the same initial conditions
783 * as the SMP case.
784 */
785 spinlock_enter();
786 if (setup_func != NULL)
787 setup_func(arg);
788 if (action_func != NULL)
789 action_func(arg);
790 if (teardown_func != NULL)
791 teardown_func(arg);
792 spinlock_exit();
793 }
794
795 void
796 smp_rendezvous(void (*setup_func)(void *),
797 void (*action_func)(void *),
798 void (*teardown_func)(void *),
799 void *arg)
800 {
801
802 /* Look comments in the smp_rendezvous_cpus() case. */
803 spinlock_enter();
804 if (setup_func != NULL)
805 setup_func(arg);
806 if (action_func != NULL)
807 action_func(arg);
808 if (teardown_func != NULL)
809 teardown_func(arg);
810 spinlock_exit();
811 }
812
813 /*
814 * Provide dummy SMP support for UP kernels. Modules that need to use SMP
815 * APIs will still work using this dummy support.
816 */
817 static void
818 mp_setvariables_for_up(void *dummy)
819 {
820 mp_ncpus = 1;
821 mp_maxid = PCPU_GET(cpuid);
822 CPU_SETOF(mp_maxid, &all_cpus);
823 KASSERT(PCPU_GET(cpuid) == 0, ("UP must have a CPU ID of zero"));
824 }
825 SYSINIT(cpu_mp_setvariables, SI_SUB_TUNABLES, SI_ORDER_FIRST,
826 mp_setvariables_for_up, NULL);
827 #endif /* SMP */
828
829 /*
830 * smp_no_rendevous_barrier was renamed to smp_no_rendezvous_barrier
831 * in __FreeBSD_version 1101508, with the old name remaining in 11.x
832 * as an alias for compatibility. The old name will be gone in 12.0
833 * (__FreeBSD_version >= 1200028).
834 */
835 __strong_reference(smp_no_rendezvous_barrier, smp_no_rendevous_barrier);
836 void
837 smp_no_rendezvous_barrier(void *dummy)
838 {
839 #ifdef SMP
840 KASSERT((!smp_started),("smp_no_rendezvous called and smp is started"));
841 #endif
842 }
843
844 /*
845 * Wait specified idle threads to switch once. This ensures that even
846 * preempted threads have cycled through the switch function once,
847 * exiting their codepaths. This allows us to change global pointers
848 * with no other synchronization.
849 */
850 int
851 quiesce_cpus(cpuset_t map, const char *wmesg, int prio)
852 {
853 struct pcpu *pcpu;
854 u_int gen[MAXCPU];
855 int error;
856 int cpu;
857
858 error = 0;
859 for (cpu = 0; cpu <= mp_maxid; cpu++) {
860 if (!CPU_ISSET(cpu, &map) || CPU_ABSENT(cpu))
861 continue;
862 pcpu = pcpu_find(cpu);
863 gen[cpu] = pcpu->pc_idlethread->td_generation;
864 }
865 for (cpu = 0; cpu <= mp_maxid; cpu++) {
866 if (!CPU_ISSET(cpu, &map) || CPU_ABSENT(cpu))
867 continue;
868 pcpu = pcpu_find(cpu);
869 thread_lock(curthread);
870 sched_bind(curthread, cpu);
871 thread_unlock(curthread);
872 while (gen[cpu] == pcpu->pc_idlethread->td_generation) {
873 error = tsleep(quiesce_cpus, prio, wmesg, 1);
874 if (error != EWOULDBLOCK)
875 goto out;
876 error = 0;
877 }
878 }
879 out:
880 thread_lock(curthread);
881 sched_unbind(curthread);
882 thread_unlock(curthread);
883
884 return (error);
885 }
886
887 int
888 quiesce_all_cpus(const char *wmesg, int prio)
889 {
890
891 return quiesce_cpus(all_cpus, wmesg, prio);
892 }
893
894 /* Extra care is taken with this sysctl because the data type is volatile */
895 static int
896 sysctl_kern_smp_active(SYSCTL_HANDLER_ARGS)
897 {
898 int error, active;
899
900 active = smp_started;
901 error = SYSCTL_OUT(req, &active, sizeof(active));
902 return (error);
903 }
904
905
906 #ifdef SMP
907 void
908 topo_init_node(struct topo_node *node)
909 {
910
911 bzero(node, sizeof(*node));
912 TAILQ_INIT(&node->children);
913 }
914
915 void
916 topo_init_root(struct topo_node *root)
917 {
918
919 topo_init_node(root);
920 root->type = TOPO_TYPE_SYSTEM;
921 }
922
923 /*
924 * Add a child node with the given ID under the given parent.
925 * Do nothing if there is already a child with that ID.
926 */
927 struct topo_node *
928 topo_add_node_by_hwid(struct topo_node *parent, int hwid,
929 topo_node_type type, uintptr_t subtype)
930 {
931 struct topo_node *node;
932
933 TAILQ_FOREACH_REVERSE(node, &parent->children,
934 topo_children, siblings) {
935 if (node->hwid == hwid
936 && node->type == type && node->subtype == subtype) {
937 return (node);
938 }
939 }
940
941 node = malloc(sizeof(*node), M_TOPO, M_WAITOK);
942 topo_init_node(node);
943 node->parent = parent;
944 node->hwid = hwid;
945 node->type = type;
946 node->subtype = subtype;
947 TAILQ_INSERT_TAIL(&parent->children, node, siblings);
948 parent->nchildren++;
949
950 return (node);
951 }
952
953 /*
954 * Find a child node with the given ID under the given parent.
955 */
956 struct topo_node *
957 topo_find_node_by_hwid(struct topo_node *parent, int hwid,
958 topo_node_type type, uintptr_t subtype)
959 {
960
961 struct topo_node *node;
962
963 TAILQ_FOREACH(node, &parent->children, siblings) {
964 if (node->hwid == hwid
965 && node->type == type && node->subtype == subtype) {
966 return (node);
967 }
968 }
969
970 return (NULL);
971 }
972
973 /*
974 * Given a node change the order of its parent's child nodes such
975 * that the node becomes the firt child while preserving the cyclic
976 * order of the children. In other words, the given node is promoted
977 * by rotation.
978 */
979 void
980 topo_promote_child(struct topo_node *child)
981 {
982 struct topo_node *next;
983 struct topo_node *node;
984 struct topo_node *parent;
985
986 parent = child->parent;
987 next = TAILQ_NEXT(child, siblings);
988 TAILQ_REMOVE(&parent->children, child, siblings);
989 TAILQ_INSERT_HEAD(&parent->children, child, siblings);
990
991 while (next != NULL) {
992 node = next;
993 next = TAILQ_NEXT(node, siblings);
994 TAILQ_REMOVE(&parent->children, node, siblings);
995 TAILQ_INSERT_AFTER(&parent->children, child, node, siblings);
996 child = node;
997 }
998 }
999
1000 /*
1001 * Iterate to the next node in the depth-first search (traversal) of
1002 * the topology tree.
1003 */
1004 struct topo_node *
1005 topo_next_node(struct topo_node *top, struct topo_node *node)
1006 {
1007 struct topo_node *next;
1008
1009 if ((next = TAILQ_FIRST(&node->children)) != NULL)
1010 return (next);
1011
1012 if ((next = TAILQ_NEXT(node, siblings)) != NULL)
1013 return (next);
1014
1015 while ((node = node->parent) != top)
1016 if ((next = TAILQ_NEXT(node, siblings)) != NULL)
1017 return (next);
1018
1019 return (NULL);
1020 }
1021
1022 /*
1023 * Iterate to the next node in the depth-first search of the topology tree,
1024 * but without descending below the current node.
1025 */
1026 struct topo_node *
1027 topo_next_nonchild_node(struct topo_node *top, struct topo_node *node)
1028 {
1029 struct topo_node *next;
1030
1031 if ((next = TAILQ_NEXT(node, siblings)) != NULL)
1032 return (next);
1033
1034 while ((node = node->parent) != top)
1035 if ((next = TAILQ_NEXT(node, siblings)) != NULL)
1036 return (next);
1037
1038 return (NULL);
1039 }
1040
1041 /*
1042 * Assign the given ID to the given topology node that represents a logical
1043 * processor.
1044 */
1045 void
1046 topo_set_pu_id(struct topo_node *node, cpuid_t id)
1047 {
1048
1049 KASSERT(node->type == TOPO_TYPE_PU,
1050 ("topo_set_pu_id: wrong node type: %u", node->type));
1051 KASSERT(CPU_EMPTY(&node->cpuset) && node->cpu_count == 0,
1052 ("topo_set_pu_id: cpuset already not empty"));
1053 node->id = id;
1054 CPU_SET(id, &node->cpuset);
1055 node->cpu_count = 1;
1056 node->subtype = 1;
1057
1058 while ((node = node->parent) != NULL) {
1059 KASSERT(!CPU_ISSET(id, &node->cpuset),
1060 ("logical ID %u is already set in node %p", id, node));
1061 CPU_SET(id, &node->cpuset);
1062 node->cpu_count++;
1063 }
1064 }
1065
1066 /*
1067 * Check if the topology is uniform, that is, each package has the same number
1068 * of cores in it and each core has the same number of threads (logical
1069 * processors) in it. If so, calculate the number of package, the number of
1070 * cores per package and the number of logical processors per core.
1071 * 'all' parameter tells whether to include administratively disabled logical
1072 * processors into the analysis.
1073 */
1074 int
1075 topo_analyze(struct topo_node *topo_root, int all,
1076 int *pkg_count, int *cores_per_pkg, int *thrs_per_core)
1077 {
1078 struct topo_node *pkg_node;
1079 struct topo_node *core_node;
1080 struct topo_node *pu_node;
1081 int thrs_per_pkg;
1082 int cpp_counter;
1083 int tpc_counter;
1084 int tpp_counter;
1085
1086 *pkg_count = 0;
1087 *cores_per_pkg = -1;
1088 *thrs_per_core = -1;
1089 thrs_per_pkg = -1;
1090 pkg_node = topo_root;
1091 while (pkg_node != NULL) {
1092 if (pkg_node->type != TOPO_TYPE_PKG) {
1093 pkg_node = topo_next_node(topo_root, pkg_node);
1094 continue;
1095 }
1096 if (!all && CPU_EMPTY(&pkg_node->cpuset)) {
1097 pkg_node = topo_next_nonchild_node(topo_root, pkg_node);
1098 continue;
1099 }
1100
1101 (*pkg_count)++;
1102
1103 cpp_counter = 0;
1104 tpp_counter = 0;
1105 core_node = pkg_node;
1106 while (core_node != NULL) {
1107 if (core_node->type == TOPO_TYPE_CORE) {
1108 if (!all && CPU_EMPTY(&core_node->cpuset)) {
1109 core_node =
1110 topo_next_nonchild_node(pkg_node,
1111 core_node);
1112 continue;
1113 }
1114
1115 cpp_counter++;
1116
1117 tpc_counter = 0;
1118 pu_node = core_node;
1119 while (pu_node != NULL) {
1120 if (pu_node->type == TOPO_TYPE_PU &&
1121 (all || !CPU_EMPTY(&pu_node->cpuset)))
1122 tpc_counter++;
1123 pu_node = topo_next_node(core_node,
1124 pu_node);
1125 }
1126
1127 if (*thrs_per_core == -1)
1128 *thrs_per_core = tpc_counter;
1129 else if (*thrs_per_core != tpc_counter)
1130 return (0);
1131
1132 core_node = topo_next_nonchild_node(pkg_node,
1133 core_node);
1134 } else {
1135 /* PU node directly under PKG. */
1136 if (core_node->type == TOPO_TYPE_PU &&
1137 (all || !CPU_EMPTY(&core_node->cpuset)))
1138 tpp_counter++;
1139 core_node = topo_next_node(pkg_node,
1140 core_node);
1141 }
1142 }
1143
1144 if (*cores_per_pkg == -1)
1145 *cores_per_pkg = cpp_counter;
1146 else if (*cores_per_pkg != cpp_counter)
1147 return (0);
1148 if (thrs_per_pkg == -1)
1149 thrs_per_pkg = tpp_counter;
1150 else if (thrs_per_pkg != tpp_counter)
1151 return (0);
1152
1153 pkg_node = topo_next_nonchild_node(topo_root, pkg_node);
1154 }
1155
1156 KASSERT(*pkg_count > 0,
1157 ("bug in topology or analysis"));
1158 if (*cores_per_pkg == 0) {
1159 KASSERT(*thrs_per_core == -1 && thrs_per_pkg > 0,
1160 ("bug in topology or analysis"));
1161 *thrs_per_core = thrs_per_pkg;
1162 }
1163
1164 return (1);
1165 }
1166 #endif /* SMP */
1167
Cache object: 65eb1d2d926dfe9773277c3d0e42f2b4
|