1 /*-
2 * Copyright (c) 2009 Neelkanth Natu
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD: releng/9.2/sys/mips/mips/mp_machdep.c 223758 2011-07-04 12:04:52Z attilio $");
29
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/cpuset.h>
33 #include <sys/ktr.h>
34 #include <sys/proc.h>
35 #include <sys/lock.h>
36 #include <sys/mutex.h>
37 #include <sys/kernel.h>
38 #include <sys/pcpu.h>
39 #include <sys/smp.h>
40 #include <sys/sched.h>
41 #include <sys/bus.h>
42
43 #include <vm/vm.h>
44 #include <vm/pmap.h>
45 #include <vm/vm_extern.h>
46 #include <vm/vm_kern.h>
47
48 #include <machine/clock.h>
49 #include <machine/smp.h>
50 #include <machine/hwfunc.h>
51 #include <machine/intr_machdep.h>
52 #include <machine/cache.h>
53 #include <machine/tlb.h>
54
55 struct pcb stoppcbs[MAXCPU];
56
57 static void *dpcpu;
58 static struct mtx ap_boot_mtx;
59
60 static volatile int aps_ready;
61 static volatile int mp_naps;
62
63 static void
64 ipi_send(struct pcpu *pc, int ipi)
65 {
66
67 CTR3(KTR_SMP, "%s: cpu=%d, ipi=%x", __func__, pc->pc_cpuid, ipi);
68
69 atomic_set_32(&pc->pc_pending_ipis, ipi);
70 platform_ipi_send(pc->pc_cpuid);
71
72 CTR1(KTR_SMP, "%s: sent", __func__);
73 }
74
75 void
76 ipi_all_but_self(int ipi)
77 {
78 cpuset_t other_cpus;
79
80 other_cpus = all_cpus;
81 CPU_CLR(PCPU_GET(cpuid), &other_cpus);
82 ipi_selected(other_cpus, ipi);
83 }
84
85 /* Send an IPI to a set of cpus. */
86 void
87 ipi_selected(cpuset_t cpus, int ipi)
88 {
89 struct pcpu *pc;
90
91 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
92 if (CPU_ISSET(pc->pc_cpuid, &cpus)) {
93 CTR3(KTR_SMP, "%s: pc: %p, ipi: %x\n", __func__, pc,
94 ipi);
95 ipi_send(pc, ipi);
96 }
97 }
98 }
99
100 /* Send an IPI to a specific CPU. */
101 void
102 ipi_cpu(int cpu, u_int ipi)
103 {
104
105 CTR3(KTR_SMP, "%s: cpu: %d, ipi: %x\n", __func__, cpu, ipi);
106 ipi_send(cpuid_to_pcpu[cpu], ipi);
107 }
108
109 /*
110 * Handle an IPI sent to this processor.
111 */
112 static int
113 mips_ipi_handler(void *arg)
114 {
115 u_int cpu, ipi, ipi_bitmap;
116 int bit;
117
118 cpu = PCPU_GET(cpuid);
119
120 platform_ipi_clear(); /* quiesce the pending ipi interrupt */
121
122 ipi_bitmap = atomic_readandclear_int(PCPU_PTR(pending_ipis));
123 if (ipi_bitmap == 0)
124 return (FILTER_STRAY);
125
126 CTR1(KTR_SMP, "smp_handle_ipi(), ipi_bitmap=%x", ipi_bitmap);
127
128 while ((bit = ffs(ipi_bitmap))) {
129 bit = bit - 1;
130 ipi = 1 << bit;
131 ipi_bitmap &= ~ipi;
132 switch (ipi) {
133 case IPI_RENDEZVOUS:
134 CTR0(KTR_SMP, "IPI_RENDEZVOUS");
135 smp_rendezvous_action();
136 break;
137
138 case IPI_AST:
139 CTR0(KTR_SMP, "IPI_AST");
140 break;
141
142 case IPI_STOP:
143 /*
144 * IPI_STOP_HARD is mapped to IPI_STOP so it is not
145 * necessary to add it in the switch.
146 */
147 CTR0(KTR_SMP, "IPI_STOP or IPI_STOP_HARD");
148
149 savectx(&stoppcbs[cpu]);
150 tlb_save();
151
152 /* Indicate we are stopped */
153 CPU_SET_ATOMIC(cpu, &stopped_cpus);
154
155 /* Wait for restart */
156 while (!CPU_ISSET(cpu, &started_cpus))
157 cpu_spinwait();
158
159 CPU_CLR_ATOMIC(cpu, &started_cpus);
160 CPU_CLR_ATOMIC(cpu, &stopped_cpus);
161 CTR0(KTR_SMP, "IPI_STOP (restart)");
162 break;
163 case IPI_PREEMPT:
164 CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__);
165 sched_preempt(curthread);
166 break;
167 case IPI_HARDCLOCK:
168 CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__);
169 hardclockintr();
170 break;
171 default:
172 panic("Unknown IPI 0x%0x on cpu %d", ipi, curcpu);
173 }
174 }
175
176 return (FILTER_HANDLED);
177 }
178
179 static int
180 start_ap(int cpuid)
181 {
182 int cpus, ms;
183
184 cpus = mp_naps;
185 dpcpu = (void *)kmem_alloc(kernel_map, DPCPU_SIZE);
186
187 mips_sync();
188
189 if (platform_start_ap(cpuid) != 0)
190 return (-1); /* could not start AP */
191
192 for (ms = 0; ms < 5000; ++ms) {
193 if (mp_naps > cpus)
194 return (0); /* success */
195 else
196 DELAY(1000);
197 }
198
199 return (-2); /* timeout initializing AP */
200 }
201
202 void
203 cpu_mp_setmaxid(void)
204 {
205 cpuset_t cpumask;
206 int cpu, last;
207
208 platform_cpu_mask(&cpumask);
209 mp_ncpus = 0;
210 last = 1;
211 while ((cpu = cpusetobj_ffs(&cpumask)) != 0) {
212 last = cpu;
213 cpu--;
214 CPU_CLR(cpu, &cpumask);
215 mp_ncpus++;
216 }
217 if (mp_ncpus <= 0)
218 mp_ncpus = 1;
219
220 mp_maxid = min(last, MAXCPU) - 1;
221 }
222
223 void
224 cpu_mp_announce(void)
225 {
226 /* NOTHING */
227 }
228
229 struct cpu_group *
230 cpu_topo(void)
231 {
232 return (platform_smp_topo());
233 }
234
235 int
236 cpu_mp_probe(void)
237 {
238
239 return (mp_ncpus > 1);
240 }
241
242 void
243 cpu_mp_start(void)
244 {
245 int error, cpuid;
246 cpuset_t cpumask;
247
248 mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
249
250 CPU_ZERO(&all_cpus);
251 platform_cpu_mask(&cpumask);
252
253 while (!CPU_EMPTY(&cpumask)) {
254 cpuid = cpusetobj_ffs(&cpumask) - 1;
255 CPU_CLR(cpuid, &cpumask);
256
257 if (cpuid >= MAXCPU) {
258 printf("cpu_mp_start: ignoring AP #%d.\n", cpuid);
259 continue;
260 }
261
262 if (cpuid != platform_processor_id()) {
263 if ((error = start_ap(cpuid)) != 0) {
264 printf("AP #%d failed to start: %d\n", cpuid, error);
265 continue;
266 }
267 if (bootverbose)
268 printf("AP #%d started!\n", cpuid);
269 }
270 CPU_SET(cpuid, &all_cpus);
271 }
272 }
273
274 void
275 smp_init_secondary(u_int32_t cpuid)
276 {
277
278 /* TLB */
279 mips_wr_wired(0);
280 tlb_invalidate_all();
281 mips_wr_wired(VMWIRED_ENTRIES);
282
283 /*
284 * We assume that the L1 cache on the APs is identical to the one
285 * on the BSP.
286 */
287 mips_dcache_wbinv_all();
288 mips_icache_sync_all();
289
290 mips_sync();
291
292 mips_wr_entryhi(0);
293
294 pcpu_init(PCPU_ADDR(cpuid), cpuid, sizeof(struct pcpu));
295 dpcpu_init(dpcpu, cpuid);
296
297 /* The AP has initialized successfully - allow the BSP to proceed */
298 ++mp_naps;
299
300 /* Spin until the BSP is ready to release the APs */
301 while (!aps_ready)
302 ;
303
304 /* Initialize curthread. */
305 KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
306 PCPU_SET(curthread, PCPU_GET(idlethread));
307
308 mtx_lock_spin(&ap_boot_mtx);
309
310 smp_cpus++;
311
312 CTR1(KTR_SMP, "SMP: AP CPU #%d launched", PCPU_GET(cpuid));
313
314 if (bootverbose)
315 printf("SMP: AP CPU #%d launched.\n", PCPU_GET(cpuid));
316
317 if (smp_cpus == mp_ncpus) {
318 atomic_store_rel_int(&smp_started, 1);
319 smp_active = 1;
320 }
321
322 mtx_unlock_spin(&ap_boot_mtx);
323
324 while (smp_started == 0)
325 ; /* nothing */
326
327 /* Start per-CPU event timers. */
328 cpu_initclocks_ap();
329
330 /* enter the scheduler */
331 sched_throw(NULL);
332
333 panic("scheduler returned us to %s", __func__);
334 /* NOTREACHED */
335 }
336
337 static void
338 release_aps(void *dummy __unused)
339 {
340 int ipi_irq;
341
342 if (mp_ncpus == 1)
343 return;
344
345 /*
346 * IPI handler
347 */
348 ipi_irq = platform_ipi_intrnum();
349 cpu_establish_hardintr("ipi", mips_ipi_handler, NULL, NULL, ipi_irq,
350 INTR_TYPE_MISC | INTR_EXCL, NULL);
351
352 atomic_store_rel_int(&aps_ready, 1);
353
354 while (smp_started == 0)
355 ; /* nothing */
356 }
357
358 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
Cache object: 4fae5ffecbbb27afd318d192a778944d
|