1 /*-
2 * Copyright (c) 2008 Marcel Moolenaar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/ktr.h>
34 #include <sys/bus.h>
35 #include <sys/pcpu.h>
36 #include <sys/proc.h>
37 #include <sys/sched.h>
38 #include <sys/smp.h>
39
40 #include <vm/vm.h>
41 #include <vm/vm_param.h>
42 #include <vm/pmap.h>
43 #include <vm/vm_map.h>
44 #include <vm/vm_extern.h>
45 #include <vm/vm_kern.h>
46
47 #include <machine/bus.h>
48 #include <machine/cpu.h>
49 #include <machine/intr_machdep.h>
50 #include <machine/platform.h>
51 #include <machine/md_var.h>
52 #include <machine/smp.h>
53
54 #include "pic_if.h"
55
56 extern struct pcpu __pcpu[MAXCPU];
57
58 volatile static int ap_awake;
59 volatile static u_int ap_letgo;
60 volatile static uint32_t ap_decr;
61 volatile static u_quad_t ap_timebase;
62 static u_int ipi_msg_cnt[32];
63
64 void
65 machdep_ap_bootstrap(void)
66 {
67 /* Set up important bits on the CPU (HID registers, etc.) */
68 cpudep_ap_setup();
69
70 /* Set PIR */
71 PCPU_SET(pir, mfspr(SPR_PIR));
72 PCPU_SET(awake, 1);
73 __asm __volatile("msync; isync");
74
75 while (ap_letgo == 0)
76 ;
77
78 /* Initialize DEC and TB, sync with the BSP values */
79 decr_ap_init();
80 mttb(ap_timebase);
81 __asm __volatile("mtdec %0" :: "r"(ap_decr));
82
83 atomic_add_int(&ap_awake, 1);
84 printf("SMP: AP CPU #%d launched\n", PCPU_GET(cpuid));
85
86 /* Initialize curthread */
87 PCPU_SET(curthread, PCPU_GET(idlethread));
88 PCPU_SET(curpcb, curthread->td_pcb);
89
90 /* Let the DEC and external interrupts go */
91 mtmsr(mfmsr() | PSL_EE);
92
93 /* Announce ourselves awake, and enter the scheduler */
94 sched_throw(NULL);
95 }
96
97 struct cpu_group *
98 cpu_topo(void)
99 {
100
101 return (smp_topo_none());
102 }
103
104 void
105 cpu_mp_setmaxid(void)
106 {
107 struct cpuref cpuref;
108 int error;
109
110 mp_ncpus = 0;
111 error = platform_smp_first_cpu(&cpuref);
112 while (!error) {
113 mp_ncpus++;
114 error = platform_smp_next_cpu(&cpuref);
115 }
116 /* Sanity. */
117 if (mp_ncpus == 0)
118 mp_ncpus = 1;
119
120 /*
121 * Set the largest cpuid we're going to use. This is necessary
122 * for VM initialization.
123 */
124 mp_maxid = min(mp_ncpus, MAXCPU) - 1;
125 }
126
127 int
128 cpu_mp_probe(void)
129 {
130
131 /*
132 * We're not going to enable SMP if there's only 1 processor.
133 */
134 return (mp_ncpus > 1);
135 }
136
137 void
138 cpu_mp_start(void)
139 {
140 struct cpuref bsp, cpu;
141 struct pcpu *pc;
142 int error;
143
144 error = platform_smp_get_bsp(&bsp);
145 KASSERT(error == 0, ("Don't know BSP"));
146 KASSERT(bsp.cr_cpuid == 0, ("%s: cpuid != 0", __func__));
147
148 error = platform_smp_first_cpu(&cpu);
149 while (!error) {
150 if (cpu.cr_cpuid >= MAXCPU) {
151 printf("SMP: cpu%d: skipped -- ID out of range\n",
152 cpu.cr_cpuid);
153 goto next;
154 }
155 if (all_cpus & (1 << cpu.cr_cpuid)) {
156 printf("SMP: cpu%d: skipped - duplicate ID\n",
157 cpu.cr_cpuid);
158 goto next;
159 }
160 if (cpu.cr_cpuid != bsp.cr_cpuid) {
161 void *dpcpu;
162
163 pc = &__pcpu[cpu.cr_cpuid];
164 dpcpu = (void *)kmem_alloc(kernel_map, DPCPU_SIZE);
165 pcpu_init(pc, cpu.cr_cpuid, sizeof(*pc));
166 dpcpu_init(dpcpu, cpu.cr_cpuid);
167 } else {
168 pc = pcpup;
169 pc->pc_cpuid = bsp.cr_cpuid;
170 pc->pc_bsp = 1;
171 }
172 pc->pc_cpumask = 1 << pc->pc_cpuid;
173 pc->pc_hwref = cpu.cr_hwref;
174 all_cpus |= pc->pc_cpumask;
175 next:
176 error = platform_smp_next_cpu(&cpu);
177 }
178 }
179
180 void
181 cpu_mp_announce(void)
182 {
183 struct pcpu *pc;
184 int i;
185
186 for (i = 0; i <= mp_maxid; i++) {
187 pc = pcpu_find(i);
188 if (pc == NULL)
189 continue;
190 printf("cpu%d: dev=%x", i, pc->pc_hwref);
191 if (pc->pc_bsp)
192 printf(" (BSP)");
193 printf("\n");
194 }
195 }
196
197 static void
198 cpu_mp_unleash(void *dummy)
199 {
200 struct pcpu *pc;
201 int cpus, timeout;
202
203 if (mp_ncpus <= 1)
204 return;
205
206 cpus = 0;
207 smp_cpus = 0;
208 SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
209 cpus++;
210 pc->pc_other_cpus = all_cpus & ~pc->pc_cpumask;
211 if (!pc->pc_bsp) {
212 if (bootverbose)
213 printf("Waking up CPU %d (dev=%x)\n",
214 pc->pc_cpuid, pc->pc_hwref);
215
216 platform_smp_start_cpu(pc);
217
218 timeout = 2000; /* wait 2sec for the AP */
219 while (!pc->pc_awake && --timeout > 0)
220 DELAY(1000);
221
222 } else {
223 PCPU_SET(pir, mfspr(SPR_PIR));
224 pc->pc_awake = 1;
225 }
226 if (pc->pc_awake) {
227 if (bootverbose)
228 printf("Adding CPU %d, pir=%x, awake=%x\n",
229 pc->pc_cpuid, pc->pc_pir, pc->pc_awake);
230 smp_cpus++;
231 } else
232 stopped_cpus |= (1 << pc->pc_cpuid);
233 }
234
235 ap_awake = 1;
236
237 /* Provide our current DEC and TB values for APs */
238 __asm __volatile("mfdec %0" : "=r"(ap_decr));
239 ap_timebase = mftb() + 10;
240 __asm __volatile("msync; isync");
241
242 /* Let APs continue */
243 atomic_store_rel_int(&ap_letgo, 1);
244
245 mttb(ap_timebase);
246
247 while (ap_awake < smp_cpus)
248 ;
249
250 if (smp_cpus != cpus || cpus != mp_ncpus) {
251 printf("SMP: %d CPUs found; %d CPUs usable; %d CPUs woken\n",
252 mp_ncpus, cpus, smp_cpus);
253 }
254
255 /* Let the APs get into the scheduler */
256 DELAY(10000);
257
258 smp_active = 1;
259 smp_started = 1;
260 }
261
262 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, cpu_mp_unleash, NULL);
263
264 int
265 powerpc_ipi_handler(void *arg)
266 {
267 cpumask_t self;
268 uint32_t ipimask;
269 int msg;
270
271 CTR2(KTR_SMP, "%s: MSR 0x%08x", __func__, mfmsr());
272
273 ipimask = atomic_readandclear_32(&(pcpup->pc_ipimask));
274 if (ipimask == 0)
275 return (FILTER_STRAY);
276 while ((msg = ffs(ipimask) - 1) != -1) {
277 ipimask &= ~(1u << msg);
278 ipi_msg_cnt[msg]++;
279 switch (msg) {
280 case IPI_AST:
281 CTR1(KTR_SMP, "%s: IPI_AST", __func__);
282 break;
283 case IPI_PREEMPT:
284 CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__);
285 sched_preempt(curthread);
286 break;
287 case IPI_RENDEZVOUS:
288 CTR1(KTR_SMP, "%s: IPI_RENDEZVOUS", __func__);
289 smp_rendezvous_action();
290 break;
291 case IPI_STOP:
292
293 /*
294 * IPI_STOP_HARD is mapped to IPI_STOP so it is not
295 * necessary to add such case in the switch.
296 */
297 CTR1(KTR_SMP, "%s: IPI_STOP or IPI_STOP_HARD (stop)",
298 __func__);
299 self = PCPU_GET(cpumask);
300 savectx(PCPU_GET(curpcb));
301 atomic_set_int(&stopped_cpus, self);
302 while ((started_cpus & self) == 0)
303 cpu_spinwait();
304 atomic_clear_int(&started_cpus, self);
305 atomic_clear_int(&stopped_cpus, self);
306 CTR1(KTR_SMP, "%s: IPI_STOP (restart)", __func__);
307 break;
308 }
309 }
310
311 return (FILTER_HANDLED);
312 }
313
314 static void
315 ipi_send(struct pcpu *pc, int ipi)
316 {
317
318 CTR4(KTR_SMP, "%s: pc=%p, targetcpu=%d, IPI=%d", __func__,
319 pc, pc->pc_cpuid, ipi);
320
321 atomic_set_32(&pc->pc_ipimask, (1 << ipi));
322 PIC_IPI(pic, pc->pc_cpuid);
323
324 CTR1(KTR_SMP, "%s: sent", __func__);
325 }
326
327 /* Send an IPI to a set of cpus. */
328 void
329 ipi_selected(cpumask_t cpus, int ipi)
330 {
331 struct pcpu *pc;
332
333 SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
334 if (cpus & pc->pc_cpumask)
335 ipi_send(pc, ipi);
336 }
337 }
338
339 /* Send an IPI to a specific CPU. */
340 void
341 ipi_cpu(int cpu, u_int ipi)
342 {
343
344 ipi_send(cpuid_to_pcpu[cpu], ipi);
345 }
346
347 /* Send an IPI to all CPUs EXCEPT myself. */
348 void
349 ipi_all_but_self(int ipi)
350 {
351 struct pcpu *pc;
352
353 SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
354 if (pc != pcpup)
355 ipi_send(pc, ipi);
356 }
357 }
Cache object: ecb04abef0b02fb9a65422d33c5eb33a
|