1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2008 Marcel Moolenaar
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/ktr.h>
36 #include <sys/bus.h>
37 #include <sys/cpuset.h>
38 #include <sys/lock.h>
39 #include <sys/malloc.h>
40 #include <sys/mutex.h>
41 #include <sys/pcpu.h>
42 #include <sys/proc.h>
43 #include <sys/sched.h>
44 #include <sys/smp.h>
45
46 #include <vm/vm.h>
47 #include <vm/vm_param.h>
48 #include <vm/pmap.h>
49 #include <vm/vm_map.h>
50 #include <vm/vm_extern.h>
51 #include <vm/vm_kern.h>
52
53 #include <machine/bus.h>
54 #include <machine/cpu.h>
55 #include <machine/intr_machdep.h>
56 #include <machine/pcb.h>
57 #include <machine/platform.h>
58 #include <machine/md_var.h>
59 #include <machine/setjmp.h>
60 #include <machine/smp.h>
61
62 #include "pic_if.h"
63
64 volatile static int ap_awake;
65 volatile static u_int ap_letgo;
66 volatile static u_quad_t ap_timebase;
67 static u_int ipi_msg_cnt[32];
68 static struct mtx ap_boot_mtx;
69 struct pcb stoppcbs[MAXCPU];
70
71 void
72 machdep_ap_bootstrap(void)
73 {
74
75 PCPU_SET(awake, 1);
76 __asm __volatile("msync; isync");
77
78 while (ap_letgo == 0)
79 __asm __volatile("or 27,27,27");
80 __asm __volatile("or 6,6,6");
81
82 /*
83 * Set timebase as soon as possible to meet an implicit rendezvous
84 * from cpu_mp_unleash(), which sets ap_letgo and then immediately
85 * sets timebase.
86 *
87 * Note that this is instrinsically racy and is only relevant on
88 * platforms that do not support better mechanisms.
89 */
90 platform_smp_timebase_sync(ap_timebase, 1);
91
92 /* Give platform code a chance to do anything else necessary */
93 platform_smp_ap_init();
94
95 /* Initialize decrementer */
96 decr_ap_init();
97
98 /* Serialize console output and AP count increment */
99 mtx_lock_spin(&ap_boot_mtx);
100 ap_awake++;
101 printf("SMP: AP CPU #%d launched\n", PCPU_GET(cpuid));
102 mtx_unlock_spin(&ap_boot_mtx);
103
104 while(smp_started == 0)
105 ;
106
107 /* Start per-CPU event timers. */
108 cpu_initclocks_ap();
109
110 /* Announce ourselves awake, and enter the scheduler */
111 sched_throw(NULL);
112 }
113
114 void
115 cpu_mp_setmaxid(void)
116 {
117 struct cpuref cpuref;
118 int error;
119
120 mp_ncpus = 0;
121 mp_maxid = 0;
122 error = platform_smp_first_cpu(&cpuref);
123 while (!error) {
124 mp_ncpus++;
125 mp_maxid = max(cpuref.cr_cpuid, mp_maxid);
126 error = platform_smp_next_cpu(&cpuref);
127 }
128 /* Sanity. */
129 if (mp_ncpus == 0)
130 mp_ncpus = 1;
131 }
132
133 int
134 cpu_mp_probe(void)
135 {
136
137 /*
138 * We're not going to enable SMP if there's only 1 processor.
139 */
140 return (mp_ncpus > 1);
141 }
142
143 void
144 cpu_mp_start(void)
145 {
146 struct cpuref bsp, cpu;
147 struct pcpu *pc;
148 int error;
149
150 error = platform_smp_get_bsp(&bsp);
151 KASSERT(error == 0, ("Don't know BSP"));
152
153 error = platform_smp_first_cpu(&cpu);
154 while (!error) {
155 if (cpu.cr_cpuid >= MAXCPU) {
156 printf("SMP: cpu%d: skipped -- ID out of range\n",
157 cpu.cr_cpuid);
158 goto next;
159 }
160 if (CPU_ISSET(cpu.cr_cpuid, &all_cpus)) {
161 printf("SMP: cpu%d: skipped - duplicate ID\n",
162 cpu.cr_cpuid);
163 goto next;
164 }
165 if (cpu.cr_cpuid != bsp.cr_cpuid) {
166 void *dpcpu;
167
168 pc = &__pcpu[cpu.cr_cpuid];
169 dpcpu = (void *)kmem_malloc(DPCPU_SIZE, M_WAITOK |
170 M_ZERO);
171 pcpu_init(pc, cpu.cr_cpuid, sizeof(*pc));
172 dpcpu_init(dpcpu, cpu.cr_cpuid);
173 } else {
174 pc = pcpup;
175 pc->pc_cpuid = bsp.cr_cpuid;
176 pc->pc_bsp = 1;
177 }
178 pc->pc_hwref = cpu.cr_hwref;
179 CPU_SET(pc->pc_cpuid, &all_cpus);
180 next:
181 error = platform_smp_next_cpu(&cpu);
182 }
183
184 #ifdef SMP
185 platform_smp_probe_threads();
186 #endif
187 }
188
189 void
190 cpu_mp_announce(void)
191 {
192 struct pcpu *pc;
193 int i;
194
195 if (!bootverbose)
196 return;
197
198 CPU_FOREACH(i) {
199 pc = pcpu_find(i);
200 if (pc == NULL)
201 continue;
202 printf("cpu%d: dev=%x", i, (int)pc->pc_hwref);
203 if (pc->pc_bsp)
204 printf(" (BSP)");
205 printf("\n");
206 }
207 }
208
209 static void
210 cpu_mp_unleash(void *dummy)
211 {
212 struct pcpu *pc;
213 int cpus, timeout;
214 int ret;
215
216 if (mp_ncpus <= 1)
217 return;
218
219 mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
220
221 cpus = 0;
222 smp_cpus = 0;
223 #ifdef BOOKE
224 tlb1_ap_prep();
225 #endif
226 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
227 cpus++;
228 if (!pc->pc_bsp) {
229 if (bootverbose)
230 printf("Waking up CPU %d (dev=%x)\n",
231 pc->pc_cpuid, (int)pc->pc_hwref);
232
233 ret = platform_smp_start_cpu(pc);
234 if (ret == 0) {
235 timeout = 2000; /* wait 2sec for the AP */
236 while (!pc->pc_awake && --timeout > 0)
237 DELAY(1000);
238 }
239 } else {
240 pc->pc_awake = 1;
241 }
242 if (pc->pc_awake) {
243 if (bootverbose)
244 printf("Adding CPU %d, hwref=%jx, awake=%x\n",
245 pc->pc_cpuid, (uintmax_t)pc->pc_hwref,
246 pc->pc_awake);
247 smp_cpus++;
248 } else
249 CPU_SET(pc->pc_cpuid, &stopped_cpus);
250 }
251
252 ap_awake = 1;
253
254 /* Provide our current DEC and TB values for APs */
255 ap_timebase = mftb() + 10;
256 __asm __volatile("msync; isync");
257
258 /* Let APs continue */
259 atomic_store_rel_int(&ap_letgo, 1);
260
261 platform_smp_timebase_sync(ap_timebase, 0);
262
263 while (ap_awake < smp_cpus)
264 ;
265
266 if (smp_cpus != cpus || cpus != mp_ncpus) {
267 printf("SMP: %d CPUs found; %d CPUs usable; %d CPUs woken\n",
268 mp_ncpus, cpus, smp_cpus);
269 }
270
271 if (smp_cpus > 1)
272 atomic_store_rel_int(&smp_started, 1);
273
274 /* Let the APs get into the scheduler */
275 DELAY(10000);
276
277 }
278
279 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, cpu_mp_unleash, NULL);
280
281 int
282 powerpc_ipi_handler(void *arg)
283 {
284 u_int cpuid;
285 uint32_t ipimask;
286 int msg;
287
288 CTR2(KTR_SMP, "%s: MSR 0x%08x", __func__, mfmsr());
289
290 ipimask = atomic_readandclear_32(&(pcpup->pc_ipimask));
291 if (ipimask == 0)
292 return (FILTER_STRAY);
293 while ((msg = ffs(ipimask) - 1) != -1) {
294 ipimask &= ~(1u << msg);
295 ipi_msg_cnt[msg]++;
296 switch (msg) {
297 case IPI_AST:
298 CTR1(KTR_SMP, "%s: IPI_AST", __func__);
299 break;
300 case IPI_PREEMPT:
301 CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__);
302 sched_preempt(curthread);
303 break;
304 case IPI_RENDEZVOUS:
305 CTR1(KTR_SMP, "%s: IPI_RENDEZVOUS", __func__);
306 smp_rendezvous_action();
307 break;
308 case IPI_STOP:
309
310 /*
311 * IPI_STOP_HARD is mapped to IPI_STOP so it is not
312 * necessary to add such case in the switch.
313 */
314 CTR1(KTR_SMP, "%s: IPI_STOP or IPI_STOP_HARD (stop)",
315 __func__);
316 cpuid = PCPU_GET(cpuid);
317 savectx(&stoppcbs[cpuid]);
318 savectx(PCPU_GET(curpcb));
319 CPU_SET_ATOMIC(cpuid, &stopped_cpus);
320 while (!CPU_ISSET(cpuid, &started_cpus))
321 cpu_spinwait();
322 CPU_CLR_ATOMIC(cpuid, &stopped_cpus);
323 CPU_CLR_ATOMIC(cpuid, &started_cpus);
324 CTR1(KTR_SMP, "%s: IPI_STOP (restart)", __func__);
325 break;
326 case IPI_HARDCLOCK:
327 CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__);
328 hardclockintr();
329 break;
330 }
331 }
332
333 return (FILTER_HANDLED);
334 }
335
336 static void
337 ipi_send(struct pcpu *pc, int ipi)
338 {
339
340 CTR4(KTR_SMP, "%s: pc=%p, targetcpu=%d, IPI=%d", __func__,
341 pc, pc->pc_cpuid, ipi);
342
343 atomic_set_32(&pc->pc_ipimask, (1 << ipi));
344 powerpc_sync();
345 PIC_IPI(root_pic, pc->pc_cpuid);
346
347 CTR1(KTR_SMP, "%s: sent", __func__);
348 }
349
350 /* Send an IPI to a set of cpus. */
351 void
352 ipi_selected(cpuset_t cpus, int ipi)
353 {
354 struct pcpu *pc;
355
356 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
357 if (CPU_ISSET(pc->pc_cpuid, &cpus))
358 ipi_send(pc, ipi);
359 }
360 }
361
362 /* Send an IPI to a specific CPU. */
363 void
364 ipi_cpu(int cpu, u_int ipi)
365 {
366
367 ipi_send(cpuid_to_pcpu[cpu], ipi);
368 }
369
370 /* Send an IPI to all CPUs EXCEPT myself. */
371 void
372 ipi_all_but_self(int ipi)
373 {
374 struct pcpu *pc;
375
376 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
377 if (pc != pcpup)
378 ipi_send(pc, ipi);
379 }
380 }
Cache object: bbec04a552dec7c109855671f54c8b35
|