1 /*-
2 * Copyright (c) 2008 Marcel Moolenaar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD: releng/9.0/sys/powerpc/powerpc/mp_machdep.c 223758 2011-07-04 12:04:52Z attilio $");
29
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/ktr.h>
34 #include <sys/bus.h>
35 #include <sys/cpuset.h>
36 #include <sys/lock.h>
37 #include <sys/mutex.h>
38 #include <sys/pcpu.h>
39 #include <sys/proc.h>
40 #include <sys/sched.h>
41 #include <sys/smp.h>
42
43 #include <vm/vm.h>
44 #include <vm/vm_param.h>
45 #include <vm/pmap.h>
46 #include <vm/vm_map.h>
47 #include <vm/vm_extern.h>
48 #include <vm/vm_kern.h>
49
50 #include <machine/bus.h>
51 #include <machine/cpu.h>
52 #include <machine/intr_machdep.h>
53 #include <machine/pcb.h>
54 #include <machine/platform.h>
55 #include <machine/md_var.h>
56 #include <machine/smp.h>
57
58 #include "pic_if.h"
59
60 extern struct pcpu __pcpu[MAXCPU];
61
62 volatile static int ap_awake;
63 volatile static u_int ap_letgo;
64 volatile static u_quad_t ap_timebase;
65 static u_int ipi_msg_cnt[32];
66 static struct mtx ap_boot_mtx;
67 struct pcb stoppcbs[MAXCPU];
68
69 void
70 machdep_ap_bootstrap(void)
71 {
72 /* Set up important bits on the CPU (HID registers, etc.) */
73 cpudep_ap_setup();
74
75 /* Set PIR */
76 PCPU_SET(pir, mfspr(SPR_PIR));
77 PCPU_SET(awake, 1);
78 __asm __volatile("msync; isync");
79
80 while (ap_letgo == 0)
81 ;
82
83 /* Initialize DEC and TB, sync with the BSP values */
84 #ifdef __powerpc64__
85 /* Writing to the time base register is hypervisor-privileged */
86 if (mfmsr() & PSL_HV)
87 mttb(ap_timebase);
88 #else
89 mttb(ap_timebase);
90 #endif
91 decr_ap_init();
92
93 /* Serialize console output and AP count increment */
94 mtx_lock_spin(&ap_boot_mtx);
95 ap_awake++;
96 printf("SMP: AP CPU #%d launched\n", PCPU_GET(cpuid));
97 mtx_unlock_spin(&ap_boot_mtx);
98
99 /* Start per-CPU event timers. */
100 cpu_initclocks_ap();
101
102 /* Announce ourselves awake, and enter the scheduler */
103 sched_throw(NULL);
104 }
105
106 void
107 cpu_mp_setmaxid(void)
108 {
109 struct cpuref cpuref;
110 int error;
111
112 mp_ncpus = 0;
113 error = platform_smp_first_cpu(&cpuref);
114 while (!error) {
115 mp_ncpus++;
116 error = platform_smp_next_cpu(&cpuref);
117 }
118 /* Sanity. */
119 if (mp_ncpus == 0)
120 mp_ncpus = 1;
121
122 /*
123 * Set the largest cpuid we're going to use. This is necessary
124 * for VM initialization.
125 */
126 mp_maxid = min(mp_ncpus, MAXCPU) - 1;
127 }
128
129 int
130 cpu_mp_probe(void)
131 {
132
133 /*
134 * We're not going to enable SMP if there's only 1 processor.
135 */
136 return (mp_ncpus > 1);
137 }
138
139 void
140 cpu_mp_start(void)
141 {
142 struct cpuref bsp, cpu;
143 struct pcpu *pc;
144 int error;
145
146 error = platform_smp_get_bsp(&bsp);
147 KASSERT(error == 0, ("Don't know BSP"));
148 KASSERT(bsp.cr_cpuid == 0, ("%s: cpuid != 0", __func__));
149
150 error = platform_smp_first_cpu(&cpu);
151 while (!error) {
152 if (cpu.cr_cpuid >= MAXCPU) {
153 printf("SMP: cpu%d: skipped -- ID out of range\n",
154 cpu.cr_cpuid);
155 goto next;
156 }
157 if (CPU_ISSET(cpu.cr_cpuid, &all_cpus)) {
158 printf("SMP: cpu%d: skipped - duplicate ID\n",
159 cpu.cr_cpuid);
160 goto next;
161 }
162 if (cpu.cr_cpuid != bsp.cr_cpuid) {
163 void *dpcpu;
164
165 pc = &__pcpu[cpu.cr_cpuid];
166 dpcpu = (void *)kmem_alloc(kernel_map, DPCPU_SIZE);
167 pcpu_init(pc, cpu.cr_cpuid, sizeof(*pc));
168 dpcpu_init(dpcpu, cpu.cr_cpuid);
169 } else {
170 pc = pcpup;
171 pc->pc_cpuid = bsp.cr_cpuid;
172 pc->pc_bsp = 1;
173 }
174 pc->pc_hwref = cpu.cr_hwref;
175 CPU_SET(pc->pc_cpuid, &all_cpus);
176 next:
177 error = platform_smp_next_cpu(&cpu);
178 }
179 }
180
181 void
182 cpu_mp_announce(void)
183 {
184 struct pcpu *pc;
185 int i;
186
187 for (i = 0; i <= mp_maxid; i++) {
188 pc = pcpu_find(i);
189 if (pc == NULL)
190 continue;
191 printf("cpu%d: dev=%x", i, (int)pc->pc_hwref);
192 if (pc->pc_bsp)
193 printf(" (BSP)");
194 printf("\n");
195 }
196 }
197
198 static void
199 cpu_mp_unleash(void *dummy)
200 {
201 struct pcpu *pc;
202 int cpus, timeout;
203
204 if (mp_ncpus <= 1)
205 return;
206
207 mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
208
209 cpus = 0;
210 smp_cpus = 0;
211 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
212 cpus++;
213 if (!pc->pc_bsp) {
214 if (bootverbose)
215 printf("Waking up CPU %d (dev=%x)\n",
216 pc->pc_cpuid, (int)pc->pc_hwref);
217
218 platform_smp_start_cpu(pc);
219
220 timeout = 2000; /* wait 2sec for the AP */
221 while (!pc->pc_awake && --timeout > 0)
222 DELAY(1000);
223
224 } else {
225 PCPU_SET(pir, mfspr(SPR_PIR));
226 pc->pc_awake = 1;
227 }
228 if (pc->pc_awake) {
229 if (bootverbose)
230 printf("Adding CPU %d, pir=%x, awake=%x\n",
231 pc->pc_cpuid, pc->pc_pir, pc->pc_awake);
232 smp_cpus++;
233 } else
234 CPU_SET(pc->pc_cpuid, &stopped_cpus);
235 }
236
237 ap_awake = 1;
238
239 /* Provide our current DEC and TB values for APs */
240 ap_timebase = mftb() + 10;
241 __asm __volatile("msync; isync");
242
243 /* Let APs continue */
244 atomic_store_rel_int(&ap_letgo, 1);
245
246 #ifdef __powerpc64__
247 /* Writing to the time base register is hypervisor-privileged */
248 if (mfmsr() & PSL_HV)
249 mttb(ap_timebase);
250 #else
251 mttb(ap_timebase);
252 #endif
253
254 while (ap_awake < smp_cpus)
255 ;
256
257 if (smp_cpus != cpus || cpus != mp_ncpus) {
258 printf("SMP: %d CPUs found; %d CPUs usable; %d CPUs woken\n",
259 mp_ncpus, cpus, smp_cpus);
260 }
261
262 /* Let the APs get into the scheduler */
263 DELAY(10000);
264
265 smp_active = 1;
266 smp_started = 1;
267 }
268
269 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, cpu_mp_unleash, NULL);
270
271 int
272 powerpc_ipi_handler(void *arg)
273 {
274 u_int cpuid;
275 uint32_t ipimask;
276 int msg;
277
278 CTR2(KTR_SMP, "%s: MSR 0x%08x", __func__, mfmsr());
279
280 ipimask = atomic_readandclear_32(&(pcpup->pc_ipimask));
281 if (ipimask == 0)
282 return (FILTER_STRAY);
283 while ((msg = ffs(ipimask) - 1) != -1) {
284 ipimask &= ~(1u << msg);
285 ipi_msg_cnt[msg]++;
286 switch (msg) {
287 case IPI_AST:
288 CTR1(KTR_SMP, "%s: IPI_AST", __func__);
289 break;
290 case IPI_PREEMPT:
291 CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__);
292 sched_preempt(curthread);
293 break;
294 case IPI_RENDEZVOUS:
295 CTR1(KTR_SMP, "%s: IPI_RENDEZVOUS", __func__);
296 smp_rendezvous_action();
297 break;
298 case IPI_STOP:
299
300 /*
301 * IPI_STOP_HARD is mapped to IPI_STOP so it is not
302 * necessary to add such case in the switch.
303 */
304 CTR1(KTR_SMP, "%s: IPI_STOP or IPI_STOP_HARD (stop)",
305 __func__);
306 cpuid = PCPU_GET(cpuid);
307 savectx(&stoppcbs[cpuid]);
308 savectx(PCPU_GET(curpcb));
309 CPU_SET_ATOMIC(cpuid, &stopped_cpus);
310 while (!CPU_ISSET(cpuid, &started_cpus))
311 cpu_spinwait();
312 CPU_CLR_ATOMIC(cpuid, &stopped_cpus);
313 CPU_CLR_ATOMIC(cpuid, &started_cpus);
314 CTR1(KTR_SMP, "%s: IPI_STOP (restart)", __func__);
315 break;
316 case IPI_HARDCLOCK:
317 CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__);
318 hardclockintr();
319 break;
320 }
321 }
322
323 return (FILTER_HANDLED);
324 }
325
326 static void
327 ipi_send(struct pcpu *pc, int ipi)
328 {
329
330 CTR4(KTR_SMP, "%s: pc=%p, targetcpu=%d, IPI=%d", __func__,
331 pc, pc->pc_cpuid, ipi);
332
333 atomic_set_32(&pc->pc_ipimask, (1 << ipi));
334 PIC_IPI(root_pic, pc->pc_cpuid);
335
336 CTR1(KTR_SMP, "%s: sent", __func__);
337 }
338
339 /* Send an IPI to a set of cpus. */
340 void
341 ipi_selected(cpuset_t cpus, int ipi)
342 {
343 struct pcpu *pc;
344
345 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
346 if (CPU_ISSET(pc->pc_cpuid, &cpus))
347 ipi_send(pc, ipi);
348 }
349 }
350
351 /* Send an IPI to a specific CPU. */
352 void
353 ipi_cpu(int cpu, u_int ipi)
354 {
355
356 ipi_send(cpuid_to_pcpu[cpu], ipi);
357 }
358
359 /* Send an IPI to all CPUs EXCEPT myself. */
360 void
361 ipi_all_but_self(int ipi)
362 {
363 struct pcpu *pc;
364
365 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
366 if (pc != pcpup)
367 ipi_send(pc, ipi);
368 }
369 }
Cache object: 2c00c0d4cd0f1fff5f62f1b985deea1e
|