1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2011 Semihalf.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28 #include "opt_ddb.h"
29
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/bus.h>
35 #include <sys/kernel.h>
36 #include <sys/lock.h>
37 #include <sys/mutex.h>
38 #include <sys/proc.h>
39 #include <sys/pcpu.h>
40 #include <sys/sched.h>
41 #include <sys/smp.h>
42 #include <sys/ktr.h>
43 #include <sys/malloc.h>
44
45 #include <vm/vm.h>
46 #include <vm/vm_extern.h>
47 #include <vm/vm_kern.h>
48 #include <vm/pmap.h>
49
50 #include <machine/armreg.h>
51 #include <machine/cpu.h>
52 #include <machine/cpufunc.h>
53 #include <machine/debug_monitor.h>
54 #include <machine/smp.h>
55 #include <machine/pcb.h>
56 #include <machine/intr.h>
57 #include <machine/vmparam.h>
58 #ifdef VFP
59 #include <machine/vfp.h>
60 #endif
61 #ifdef CPU_MV_PJ4B
62 #include <arm/mv/mvwin.h>
63 #endif
64
65 /* used to hold the AP's until we are ready to release them */
66 struct mtx ap_boot_mtx;
67 struct pcb stoppcbs[MAXCPU];
68
69 /* # of Applications processors */
70 volatile int mp_naps;
71
72 /* Set to 1 once we're ready to let the APs out of the pen. */
73 volatile int aps_ready = 0;
74
75 void set_stackptrs(int cpu);
76
77 /* Temporary variables for init_secondary() */
78 void *dpcpu[MAXCPU - 1];
79
80 /* Determine if we running MP machine */
81 int
82 cpu_mp_probe(void)
83 {
84
85 KASSERT(mp_ncpus != 0, ("cpu_mp_probe: mp_ncpus is unset"));
86
87 CPU_SETOF(0, &all_cpus);
88
89 return (mp_ncpus > 1);
90 }
91
92 /* Start Application Processor via platform specific function */
93 static int
94 check_ap(void)
95 {
96 uint32_t ms;
97
98 for (ms = 0; ms < 2000; ++ms) {
99 if ((mp_naps + 1) == mp_ncpus)
100 return (0); /* success */
101 else
102 DELAY(1000);
103 }
104
105 return (-2);
106 }
107
108 /* Initialize and fire up non-boot processors */
109 void
110 cpu_mp_start(void)
111 {
112 int error, i;
113
114 mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
115
116 /* Reserve memory for application processors */
117 for(i = 0; i < (mp_ncpus - 1); i++)
118 dpcpu[i] = (void *)kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO);
119
120 dcache_wbinv_poc_all();
121
122 /* Initialize boot code and start up processors */
123 platform_mp_start_ap();
124
125 /* Check if ap's started properly */
126 error = check_ap();
127 if (error)
128 printf("WARNING: Some AP's failed to start\n");
129 else
130 for (i = 1; i < mp_ncpus; i++)
131 CPU_SET(i, &all_cpus);
132 }
133
134 /* Introduce rest of cores to the world */
135 void
136 cpu_mp_announce(void)
137 {
138
139 }
140
141 void
142 init_secondary(int cpu)
143 {
144 struct pcpu *pc;
145 uint32_t loop_counter;
146
147 pmap_set_tex();
148 cpuinfo_reinit_mmu(pmap_kern_ttb);
149 cpu_setup();
150
151 /* Provide stack pointers for other processor modes. */
152 set_stackptrs(cpu);
153
154 enable_interrupts(PSR_A);
155 pc = &__pcpu[cpu];
156
157 /*
158 * pcpu_init() updates queue, so it should not be executed in parallel
159 * on several cores
160 */
161 while(mp_naps < (cpu - 1))
162 ;
163
164 pcpu_init(pc, cpu, sizeof(struct pcpu));
165 pc->pc_mpidr = cp15_mpidr_get() & 0xFFFFFF;
166 dpcpu_init(dpcpu[cpu - 1], cpu);
167 #if defined(DDB)
168 dbg_monitor_init_secondary();
169 #endif
170 /* Signal our startup to BSP */
171 atomic_add_rel_32(&mp_naps, 1);
172
173 /* Spin until the BSP releases the APs */
174 while (!atomic_load_acq_int(&aps_ready)) {
175 #if __ARM_ARCH >= 7
176 __asm __volatile("wfe");
177 #endif
178 }
179
180 /* Initialize curthread */
181 KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
182 pc->pc_curthread = pc->pc_idlethread;
183 pc->pc_curpcb = pc->pc_idlethread->td_pcb;
184 set_curthread(pc->pc_idlethread);
185 #ifdef VFP
186 vfp_init();
187 #endif
188
189 /* Configure the interrupt controller */
190 intr_pic_init_secondary();
191
192 /* Apply possible BP hardening */
193 cpuinfo_init_bp_hardening();
194
195 mtx_lock_spin(&ap_boot_mtx);
196
197 atomic_add_rel_32(&smp_cpus, 1);
198
199 if (smp_cpus == mp_ncpus) {
200 /* enable IPI's, tlb shootdown, freezes etc */
201 atomic_store_rel_int(&smp_started, 1);
202 }
203
204 mtx_unlock_spin(&ap_boot_mtx);
205
206 loop_counter = 0;
207 while (smp_started == 0) {
208 DELAY(100);
209 loop_counter++;
210 if (loop_counter == 1000)
211 CTR0(KTR_SMP, "AP still wait for smp_started");
212 }
213 /* Start per-CPU event timers. */
214 cpu_initclocks_ap();
215
216 CTR0(KTR_SMP, "go into scheduler");
217
218 /* Enter the scheduler */
219 sched_throw(NULL);
220
221 panic("scheduler returned us to %s", __func__);
222 /* NOTREACHED */
223 }
224
225 static void
226 ipi_rendezvous(void *dummy __unused)
227 {
228
229 CTR0(KTR_SMP, "IPI_RENDEZVOUS");
230 smp_rendezvous_action();
231 }
232
233 static void
234 ipi_ast(void *dummy __unused)
235 {
236
237 CTR0(KTR_SMP, "IPI_AST");
238 }
239
240 static void
241 ipi_stop(void *dummy __unused)
242 {
243 u_int cpu;
244
245 /*
246 * IPI_STOP_HARD is mapped to IPI_STOP.
247 */
248 CTR0(KTR_SMP, "IPI_STOP or IPI_STOP_HARD");
249
250 cpu = PCPU_GET(cpuid);
251 savectx(&stoppcbs[cpu]);
252
253 /*
254 * CPUs are stopped when entering the debugger and at
255 * system shutdown, both events which can precede a
256 * panic dump. For the dump to be correct, all caches
257 * must be flushed and invalidated, but on ARM there's
258 * no way to broadcast a wbinv_all to other cores.
259 * Instead, we have each core do the local wbinv_all as
260 * part of stopping the core. The core requesting the
261 * stop will do the l2 cache flush after all other cores
262 * have done their l1 flushes and stopped.
263 */
264 dcache_wbinv_poc_all();
265
266 /* Indicate we are stopped */
267 CPU_SET_ATOMIC(cpu, &stopped_cpus);
268
269 /* Wait for restart */
270 while (!CPU_ISSET(cpu, &started_cpus))
271 cpu_spinwait();
272
273 CPU_CLR_ATOMIC(cpu, &started_cpus);
274 CPU_CLR_ATOMIC(cpu, &stopped_cpus);
275 #ifdef DDB
276 dbg_resume_dbreg();
277 #endif
278 CTR0(KTR_SMP, "IPI_STOP (restart)");
279 }
280
281 static void
282 ipi_preempt(void *arg)
283 {
284 struct trapframe *oldframe;
285 struct thread *td;
286
287 critical_enter();
288 td = curthread;
289 td->td_intr_nesting_level++;
290 oldframe = td->td_intr_frame;
291 td->td_intr_frame = (struct trapframe *)arg;
292
293 CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__);
294 sched_preempt(td);
295
296 td->td_intr_frame = oldframe;
297 td->td_intr_nesting_level--;
298 critical_exit();
299 }
300
301 static void
302 ipi_hardclock(void *arg)
303 {
304 struct trapframe *oldframe;
305 struct thread *td;
306
307 critical_enter();
308 td = curthread;
309 td->td_intr_nesting_level++;
310 oldframe = td->td_intr_frame;
311 td->td_intr_frame = (struct trapframe *)arg;
312
313 CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__);
314 hardclockintr();
315
316 td->td_intr_frame = oldframe;
317 td->td_intr_nesting_level--;
318 critical_exit();
319 }
320
321 static void
322 release_aps(void *dummy __unused)
323 {
324 uint32_t loop_counter;
325
326 if (mp_ncpus == 1)
327 return;
328
329 intr_pic_ipi_setup(IPI_RENDEZVOUS, "rendezvous", ipi_rendezvous, NULL);
330 intr_pic_ipi_setup(IPI_AST, "ast", ipi_ast, NULL);
331 intr_pic_ipi_setup(IPI_STOP, "stop", ipi_stop, NULL);
332 intr_pic_ipi_setup(IPI_PREEMPT, "preempt", ipi_preempt, NULL);
333 intr_pic_ipi_setup(IPI_HARDCLOCK, "hardclock", ipi_hardclock, NULL);
334
335 atomic_store_rel_int(&aps_ready, 1);
336 /* Wake the other threads up */
337 dsb();
338 sev();
339
340 printf("Release APs\n");
341
342 for (loop_counter = 0; loop_counter < 2000; loop_counter++) {
343 if (smp_started)
344 return;
345 DELAY(1000);
346 }
347 printf("AP's not started\n");
348 }
349
350 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
351
352 struct cpu_group *
353 cpu_topo(void)
354 {
355
356 return (smp_topo_1level(CG_SHARE_L2, mp_ncpus, 0));
357 }
358
359 void
360 cpu_mp_setmaxid(void)
361 {
362
363 platform_mp_setmaxid();
364 }
365
366 /* Sending IPI */
367 void
368 ipi_all_but_self(u_int ipi)
369 {
370 cpuset_t other_cpus;
371
372 other_cpus = all_cpus;
373 CPU_CLR(PCPU_GET(cpuid), &other_cpus);
374 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
375 intr_ipi_send(other_cpus, ipi);
376 }
377
378 void
379 ipi_cpu(int cpu, u_int ipi)
380 {
381 cpuset_t cpus;
382
383 CPU_ZERO(&cpus);
384 CPU_SET(cpu, &cpus);
385
386 CTR3(KTR_SMP, "%s: cpu: %d, ipi: %x", __func__, cpu, ipi);
387 intr_ipi_send(cpus, ipi);
388 }
389
390 void
391 ipi_selected(cpuset_t cpus, u_int ipi)
392 {
393
394 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
395 intr_ipi_send(cpus, ipi);
396 }
Cache object: 84611a47b0c6d6416f00ad91ad1e36ec
|