1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2008 Marcel Moolenaar
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/ktr.h>
36 #include <sys/bus.h>
37 #include <sys/cpuset.h>
38 #include <sys/domainset.h>
39 #include <sys/lock.h>
40 #include <sys/malloc.h>
41 #include <sys/mutex.h>
42 #include <sys/pcpu.h>
43 #include <sys/proc.h>
44 #include <sys/sched.h>
45 #include <sys/smp.h>
46
47 #include <vm/vm.h>
48 #include <vm/vm_param.h>
49 #include <vm/pmap.h>
50 #include <vm/vm_map.h>
51 #include <vm/vm_extern.h>
52 #include <vm/vm_kern.h>
53
54 #include <machine/bus.h>
55 #include <machine/cpu.h>
56 #include <machine/intr_machdep.h>
57 #include <machine/pcb.h>
58 #include <machine/platform.h>
59 #include <machine/md_var.h>
60 #include <machine/setjmp.h>
61 #include <machine/smp.h>
62
63 #include "pic_if.h"
64
65 volatile static int ap_awake;
66 volatile static u_int ap_letgo;
67 volatile static u_quad_t ap_timebase;
68 static struct mtx ap_boot_mtx;
69 struct pcb stoppcbs[MAXCPU];
70
71 void
72 machdep_ap_bootstrap(void)
73 {
74
75 PCPU_SET(awake, 1);
76 __asm __volatile("msync; isync");
77
78 while (ap_letgo == 0)
79 nop_prio_vlow();
80 nop_prio_medium();
81
82 /*
83 * Set timebase as soon as possible to meet an implicit rendezvous
84 * from cpu_mp_unleash(), which sets ap_letgo and then immediately
85 * sets timebase.
86 *
87 * Note that this is instrinsically racy and is only relevant on
88 * platforms that do not support better mechanisms.
89 */
90 platform_smp_timebase_sync(ap_timebase, 1);
91
92 /* Give platform code a chance to do anything else necessary */
93 platform_smp_ap_init();
94
95 /* Initialize decrementer */
96 decr_ap_init();
97
98 /* Serialize console output and AP count increment */
99 mtx_lock_spin(&ap_boot_mtx);
100 ap_awake++;
101 if (bootverbose)
102 printf("SMP: AP CPU #%d launched\n", PCPU_GET(cpuid));
103 else
104 printf("%s%d%s", ap_awake == 2 ? "Launching APs: " : "",
105 PCPU_GET(cpuid), ap_awake == mp_ncpus ? "\n" : " ");
106 mtx_unlock_spin(&ap_boot_mtx);
107
108 while(smp_started == 0)
109 ;
110
111 /* Start per-CPU event timers. */
112 cpu_initclocks_ap();
113
114 /* Announce ourselves awake, and enter the scheduler */
115 sched_ap_entry();
116 }
117
118 void
119 cpu_mp_setmaxid(void)
120 {
121 struct cpuref cpuref;
122 int error;
123
124 mp_ncpus = 0;
125 mp_maxid = 0;
126 error = platform_smp_first_cpu(&cpuref);
127 while (!error) {
128 mp_ncpus++;
129 mp_maxid = max(cpuref.cr_cpuid, mp_maxid);
130 error = platform_smp_next_cpu(&cpuref);
131 }
132 /* Sanity. */
133 if (mp_ncpus == 0)
134 mp_ncpus = 1;
135 }
136
137 int
138 cpu_mp_probe(void)
139 {
140
141 /*
142 * We're not going to enable SMP if there's only 1 processor.
143 */
144 return (mp_ncpus > 1);
145 }
146
147 void
148 cpu_mp_start(void)
149 {
150 struct cpuref bsp, cpu;
151 struct pcpu *pc;
152 int domain, error;
153
154 error = platform_smp_get_bsp(&bsp);
155 KASSERT(error == 0, ("Don't know BSP"));
156
157 error = platform_smp_first_cpu(&cpu);
158 while (!error) {
159 if (cpu.cr_cpuid >= MAXCPU) {
160 printf("SMP: cpu%d: skipped -- ID out of range\n",
161 cpu.cr_cpuid);
162 goto next;
163 }
164 if (CPU_ISSET(cpu.cr_cpuid, &all_cpus)) {
165 printf("SMP: cpu%d: skipped - duplicate ID\n",
166 cpu.cr_cpuid);
167 goto next;
168 }
169
170 if (vm_ndomains > 1)
171 domain = cpu.cr_domain;
172 else
173 domain = 0;
174
175 if (cpu.cr_cpuid != bsp.cr_cpuid) {
176 void *dpcpu;
177
178 pc = &__pcpu[cpu.cr_cpuid];
179 dpcpu = kmem_malloc_domainset(DOMAINSET_PREF(domain),
180 DPCPU_SIZE, M_WAITOK | M_ZERO);
181 pcpu_init(pc, cpu.cr_cpuid, sizeof(*pc));
182 dpcpu_init(dpcpu, cpu.cr_cpuid);
183 } else {
184 pc = pcpup;
185 pc->pc_cpuid = bsp.cr_cpuid;
186 pc->pc_bsp = 1;
187 }
188 pc->pc_domain = domain;
189 pc->pc_hwref = cpu.cr_hwref;
190
191 CPU_SET(pc->pc_cpuid, &cpuset_domain[pc->pc_domain]);
192 KASSERT(pc->pc_domain < MAXMEMDOM, ("bad domain value %d\n",
193 pc->pc_domain));
194 CPU_SET(pc->pc_cpuid, &all_cpus);
195 next:
196 error = platform_smp_next_cpu(&cpu);
197 }
198
199 #ifdef SMP
200 platform_smp_probe_threads();
201 #endif
202 }
203
204 void
205 cpu_mp_announce(void)
206 {
207 struct pcpu *pc;
208 int i;
209
210 if (!bootverbose)
211 return;
212
213 CPU_FOREACH(i) {
214 pc = pcpu_find(i);
215 if (pc == NULL)
216 continue;
217 printf("cpu%d: dev=%x domain=%d ", i, (int)pc->pc_hwref, pc->pc_domain);
218 if (pc->pc_bsp)
219 printf(" (BSP)");
220 printf("\n");
221 }
222 }
223
224 static void
225 cpu_mp_unleash(void *dummy)
226 {
227 struct pcpu *pc;
228 int cpus, timeout;
229 int ret;
230
231 if (mp_ncpus <= 1)
232 return;
233
234 mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
235
236 cpus = 0;
237 smp_cpus = 0;
238 #ifdef BOOKE
239 tlb1_ap_prep();
240 #endif
241 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
242 cpus++;
243 if (!pc->pc_bsp) {
244 if (bootverbose)
245 printf("Waking up CPU %d (dev=%x)\n",
246 pc->pc_cpuid, (int)pc->pc_hwref);
247
248 pc->pc_flags = PCPU_GET(flags); /* Copy cached CPU flags */
249 ret = platform_smp_start_cpu(pc);
250 if (ret == 0) {
251 timeout = 2000; /* wait 2sec for the AP */
252 while (!pc->pc_awake && --timeout > 0)
253 DELAY(1000);
254 }
255 } else {
256 pc->pc_awake = 1;
257 }
258 if (pc->pc_awake) {
259 if (bootverbose)
260 printf("Adding CPU %d, hwref=%jx, awake=%x\n",
261 pc->pc_cpuid, (uintmax_t)pc->pc_hwref,
262 pc->pc_awake);
263 smp_cpus++;
264 } else
265 CPU_SET(pc->pc_cpuid, &stopped_cpus);
266 }
267
268 ap_awake = 1;
269
270 /* Provide our current DEC and TB values for APs */
271 ap_timebase = mftb() + 10;
272 __asm __volatile("msync; isync");
273
274 /* Let APs continue */
275 atomic_store_rel_int(&ap_letgo, 1);
276
277 platform_smp_timebase_sync(ap_timebase, 0);
278
279 while (ap_awake < smp_cpus)
280 ;
281
282 if (smp_cpus != cpus || cpus != mp_ncpus) {
283 printf("SMP: %d CPUs found; %d CPUs usable; %d CPUs woken\n",
284 mp_ncpus, cpus, smp_cpus);
285 }
286
287 if (smp_cpus > 1)
288 atomic_store_rel_int(&smp_started, 1);
289
290 /* Let the APs get into the scheduler */
291 DELAY(10000);
292
293 }
294
295 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, cpu_mp_unleash, NULL);
296
297 int
298 powerpc_ipi_handler(void *arg)
299 {
300 u_int cpuid;
301 uint32_t ipimask;
302
303 CTR2(KTR_SMP, "%s: MSR 0x%08x", __func__, mfmsr());
304
305 ipimask = atomic_readandclear_32(&(pcpup->pc_ipimask));
306 if (ipimask == 0)
307 return (FILTER_STRAY);
308 if (ipimask & (1 << IPI_AST)) {
309 CTR1(KTR_SMP, "%s: IPI_AST", __func__);
310 }
311 if (ipimask & (1 << IPI_PREEMPT)) {
312 CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__);
313 sched_preempt(curthread);
314 }
315 if (ipimask & (1 << IPI_RENDEZVOUS)) {
316 CTR1(KTR_SMP, "%s: IPI_RENDEZVOUS", __func__);
317 smp_rendezvous_action();
318 }
319 if (ipimask & (1 << IPI_STOP)) {
320
321 /*
322 * IPI_STOP_HARD is mapped to IPI_STOP so it is not
323 * necessary to add such case.
324 */
325 CTR1(KTR_SMP, "%s: IPI_STOP or IPI_STOP_HARD (stop)",
326 __func__);
327 cpuid = PCPU_GET(cpuid);
328 savectx(&stoppcbs[cpuid]);
329 CPU_SET_ATOMIC(cpuid, &stopped_cpus);
330 while (!CPU_ISSET(cpuid, &started_cpus))
331 cpu_spinwait();
332 CPU_CLR_ATOMIC(cpuid, &stopped_cpus);
333 CPU_CLR_ATOMIC(cpuid, &started_cpus);
334 CTR1(KTR_SMP, "%s: IPI_STOP (restart)", __func__);
335 }
336 if (ipimask & (1 << IPI_HARDCLOCK)) {
337 CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__);
338 hardclockintr();
339 }
340
341 return (FILTER_HANDLED);
342 }
343
344 static void
345 ipi_send(struct pcpu *pc, int ipi)
346 {
347
348 CTR4(KTR_SMP, "%s: pc=%p, targetcpu=%d, IPI=%d", __func__,
349 pc, pc->pc_cpuid, ipi);
350
351 atomic_set_32(&pc->pc_ipimask, (1 << ipi));
352 powerpc_sync();
353 PIC_IPI(root_pic, pc->pc_cpuid);
354
355 CTR1(KTR_SMP, "%s: sent", __func__);
356 }
357
358 /* Send an IPI to a set of cpus. */
359 void
360 ipi_selected(cpuset_t cpus, int ipi)
361 {
362 struct pcpu *pc;
363
364 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
365 if (CPU_ISSET(pc->pc_cpuid, &cpus))
366 ipi_send(pc, ipi);
367 }
368 }
369
370 /* Send an IPI to a specific CPU. */
371 void
372 ipi_cpu(int cpu, u_int ipi)
373 {
374
375 ipi_send(cpuid_to_pcpu[cpu], ipi);
376 }
377
378 /* Send an IPI to all CPUs EXCEPT myself. */
379 void
380 ipi_all_but_self(int ipi)
381 {
382 struct pcpu *pc;
383
384 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
385 if (pc != pcpup)
386 ipi_send(pc, ipi);
387 }
388 }
Cache object: 71199bc4000b84da61cad156fa01b6ed
|