1 #include <sys/cdefs.h>
2 __FBSDID("$FreeBSD: releng/8.1/sys/mips/mips/mp_machdep.c 196198 2009-08-13 17:54:11Z attilio $");
3
4 #include "opt_kstack_pages.h"
5
6 #include <sys/param.h>
7 #include <sys/systm.h>
8 #include <sys/ktr.h>
9 #include <sys/proc.h>
10 #include <sys/cons.h>
11 #include <sys/lock.h>
12 #include <sys/malloc.h>
13 #include <sys/mutex.h>
14 #include <sys/kernel.h>
15 #include <sys/pcpu.h>
16 #include <sys/smp.h>
17 #include <sys/sysctl.h>
18 #include <sys/bus.h>
19
20 #include <vm/vm.h>
21 #include <vm/pmap.h>
22 #include <vm/vm_map.h>
23
24 #include <machine/atomic.h>
25 #include <machine/clock.h>
26 #include <machine/md_var.h>
27 #include <machine/pcb.h>
28 #include <machine/pmap.h>
29 #include <machine/smp.h>
30
31 static struct mtx ap_boot_mtx;
32 extern struct pcpu __pcpu[];
33 extern int num_tlbentries;
34 void mips_start_timer(void);
35 static volatile int aps_ready = 0;
36
37 u_int32_t boot_cpu_id;
38
39
40 void
41 cpu_mp_announce(void)
42 {
43 }
44
45 /*
46 * To implement IPIs on MIPS CPU, we use the Interrupt Line 2 ( bit 4 of cause
47 * register) and a bitmap to avoid redundant IPI interrupts. To interrupt a
48 * set of CPUs, the sender routine runs in a ' loop ' sending interrupts to
49 * all the specified CPUs. A single Mutex (smp_ipi_mtx) is used for all IPIs
50 * that spinwait for delivery. This includes the following IPIs
51 * IPI_RENDEZVOUS
52 * IPI_INVLPG
53 * IPI_INVLTLB
54 * IPI_INVLRNG
55 */
56
57 /*
58 * send an IPI to a set of cpus.
59 */
60 void
61 ipi_selected(u_int32_t cpus, u_int ipi)
62 {
63 struct pcpu *pcpu;
64 u_int cpuid, new_pending, old_pending;
65
66 CTR3(KTR_SMP, "%s: cpus: %x, ipi: %x\n", __func__, cpus, ipi);
67
68 while ((cpuid = ffs(cpus)) != 0) {
69 cpuid--;
70 cpus &= ~(1 << cpuid);
71 pcpu = pcpu_find(cpuid);
72
73 if (pcpu) {
74 do {
75 old_pending = pcpu->pc_pending_ipis;
76 new_pending = old_pending | ipi;
77 } while (!atomic_cmpset_int(&pcpu->pc_pending_ipis,
78 old_pending, new_pending));
79
80 if (old_pending)
81 continue;
82
83 mips_ipi_send (cpuid);
84 }
85 }
86 }
87
88 /*
89 * send an IPI to all CPUs EXCEPT myself
90 */
91 void
92 ipi_all_but_self(u_int ipi)
93 {
94
95 ipi_selected(PCPU_GET(other_cpus), ipi);
96 }
97
98 /*
99 * Handle an IPI sent to this processor.
100 */
101 intrmask_t
102 smp_handle_ipi(struct trapframe *frame)
103 {
104 cpumask_t cpumask; /* This cpu mask */
105 u_int ipi, ipi_bitmap;
106
107 ipi_bitmap = atomic_readandclear_int(PCPU_PTR(pending_ipis));
108 cpumask = PCPU_GET(cpumask);
109
110 CTR1(KTR_SMP, "smp_handle_ipi(), ipi_bitmap=%x", ipi_bitmap);
111 while (ipi_bitmap) {
112 /*
113 * Find the lowest set bit.
114 */
115 ipi = ipi_bitmap & ~(ipi_bitmap - 1);
116 ipi_bitmap &= ~ipi;
117 switch (ipi) {
118 case IPI_INVLTLB:
119 CTR0(KTR_SMP, "IPI_INVLTLB");
120 break;
121
122 case IPI_RENDEZVOUS:
123 CTR0(KTR_SMP, "IPI_RENDEZVOUS");
124 smp_rendezvous_action();
125 break;
126
127 case IPI_AST:
128 CTR0(KTR_SMP, "IPI_AST");
129 break;
130
131 case IPI_STOP:
132
133 /*
134 * IPI_STOP_HARD is mapped to IPI_STOP so it is not
135 * necessary to add it in the switch.
136 */
137 CTR0(KTR_SMP, "IPI_STOP or IPI_STOP_HARD");
138 atomic_set_int(&stopped_cpus, cpumask);
139
140 while ((started_cpus & cpumask) == 0)
141 ;
142 atomic_clear_int(&started_cpus, cpumask);
143 atomic_clear_int(&stopped_cpus, cpumask);
144 break;
145 }
146 }
147 return CR_INT_IPI;
148 }
149
150 void
151 cpu_mp_setmaxid(void)
152 {
153
154 mp_maxid = MAXCPU - 1;
155 }
156
157 void
158 smp_init_secondary(u_int32_t cpuid)
159 {
160
161 if (cpuid >= MAXCPU)
162 panic ("cpu id exceeds MAXCPU\n");
163
164 /* tlb init */
165 R4K_SetWIRED(0);
166 R4K_TLBFlush(num_tlbentries);
167 R4K_SetWIRED(VMWIRED_ENTRIES);
168 MachSetPID(0);
169
170 Mips_SyncCache();
171
172 mips_cp0_status_write(0);
173 while (!aps_ready)
174 ;
175
176 mips_sync(); mips_sync();
177 /* Initialize curthread. */
178 KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
179 PCPU_SET(curthread, PCPU_GET(idlethread));
180
181 mtx_lock_spin(&ap_boot_mtx);
182
183 smp_cpus++;
184
185 CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", PCPU_GET(cpuid));
186
187 /* Build our map of 'other' CPUs. */
188 PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
189
190 printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
191
192 if (smp_cpus == mp_ncpus) {
193 smp_started = 1;
194 smp_active = 1;
195 }
196
197 mtx_unlock_spin(&ap_boot_mtx);
198
199 while (smp_started == 0)
200 ; /* nothing */
201 /* Enable Interrupt */
202 mips_cp0_status_write(SR_INT_ENAB);
203 /* ok, now grab sched_lock and enter the scheduler */
204 mtx_lock_spin(&sched_lock);
205
206 /*
207 * Correct spinlock nesting. The idle thread context that we are
208 * borrowing was created so that it would start out with a single
209 * spin lock (sched_lock) held in fork_trampoline(). Since we've
210 * explicitly acquired locks in this function, the nesting count
211 * is now 2 rather than 1. Since we are nested, calling
212 * spinlock_exit() will simply adjust the counts without allowing
213 * spin lock using code to interrupt us.
214 */
215 spinlock_exit();
216 KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count"));
217
218 binuptime(PCPU_PTR(switchtime));
219 PCPU_SET(switchticks, ticks);
220
221 /* kick off the clock on this cpu */
222 mips_start_timer();
223 cpu_throw(NULL, choosethread()); /* doesn't return */
224
225 panic("scheduler returned us to %s", __func__);
226 }
227
228 static int
229 smp_start_secondary(int cpuid)
230 {
231 struct pcpu *pcpu;
232 void *dpcpu;
233 int i;
234
235 if (bootverbose)
236 printf("smp_start_secondary: starting cpu %d\n", cpuid);
237
238 dpcpu = (void *)kmem_alloc(kernel_map, DPCPU_SIZE);
239 pcpu_init(&__pcpu[cpuid], cpuid, sizeof(struct pcpu));
240 dpcpu_init(dpcpu, cpuid);
241
242 if (bootverbose)
243 printf("smp_start_secondary: cpu %d started\n", cpuid);
244
245 return 1;
246 }
247
248 int
249 cpu_mp_probe(void)
250 {
251 int i, cpus;
252
253 /* XXX: Need to check for valid platforms here. */
254
255 boot_cpu_id = PCPU_GET(cpuid);
256 KASSERT(boot_cpu_id == 0, ("cpu_mp_probe() called on non-primary CPU"));
257 all_cpus = PCPU_GET(cpumask);
258 mp_ncpus = 1;
259
260 /* Make sure we have at least one secondary CPU. */
261 cpus = 0;
262 for (i = 0; i < MAXCPU; i++) {
263 cpus++;
264 }
265 return (cpus);
266 }
267
268 void
269 cpu_mp_start(void)
270 {
271 int i, cpuid;
272
273 mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
274
275 cpuid = 1;
276 for (i = 0; i < MAXCPU; i++) {
277
278 if (i == boot_cpu_id)
279 continue;
280 if (smp_start_secondary(i)) {
281 all_cpus |= (1 << cpuid);
282 mp_ncpus++;
283 cpuid++;
284 }
285 }
286 idle_mask |= CR_INT_IPI;
287 PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask));
288 }
289
290 static void
291 release_aps(void *dummy __unused)
292 {
293 if (bootverbose && mp_ncpus > 1)
294 printf("%s: releasing secondary CPUs\n", __func__);
295 atomic_store_rel_int(&aps_ready, 1);
296
297 while (mp_ncpus > 1 && smp_started == 0)
298 ; /* nothing */
299 }
300
301 SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
Cache object: 6f55dbeab1f42da9eb5cf4c021db6ace
|