1 /*-
2 * Copyright (c) 2001 Jake Burkholder.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: releng/6.4/sys/sparc64/include/smp.h 182116 2008-08-24 19:19:49Z marius $
27 */
28
29 #ifndef _MACHINE_SMP_H_
30 #define _MACHINE_SMP_H_
31
32 #define CPU_CLKSYNC 1
33 #define CPU_INIT 2
34 #define CPU_BOOTSTRAP 3
35
36 #ifndef LOCORE
37
38 #include <machine/intr_machdep.h>
39 #include <machine/tte.h>
40
41 #define IDR_BUSY (1<<0)
42 #define IDR_NACK (1<<1)
43
44 #define IDC_ITID_SHIFT 14
45
46 #define IPI_AST PIL_AST
47 #define IPI_RENDEZVOUS PIL_RENDEZVOUS
48 #define IPI_STOP PIL_STOP
49
50 #define IPI_RETRIES 5000
51
52 struct cpu_start_args {
53 u_int csa_count;
54 u_int csa_mid;
55 u_int csa_state;
56 vm_offset_t csa_pcpu;
57 u_long csa_tick;
58 u_long csa_ver;
59 struct tte csa_ttes[PCPU_PAGES];
60 };
61
62 struct ipi_cache_args {
63 u_int ica_mask;
64 vm_paddr_t ica_pa;
65 };
66
67 struct ipi_tlb_args {
68 u_int ita_mask;
69 struct pmap *ita_pmap;
70 u_long ita_start;
71 u_long ita_end;
72 };
73 #define ita_va ita_start
74
75 struct pcpu;
76
77 void cpu_mp_bootstrap(struct pcpu *pc);
78 void cpu_mp_shutdown(void);
79
80 void cpu_ipi_selected(u_int cpus, u_long d0, u_long d1, u_long d2);
81
82 void ipi_selected(u_int cpus, u_int ipi);
83 void ipi_all(u_int ipi);
84 void ipi_all_but_self(u_int ipi);
85
86 vm_offset_t mp_tramp_alloc(void);
87
88 extern struct mtx ipi_mtx;
89 extern struct ipi_cache_args ipi_cache_args;
90 extern struct ipi_tlb_args ipi_tlb_args;
91
92 extern vm_offset_t mp_tramp;
93 extern char *mp_tramp_code;
94 extern u_long mp_tramp_code_len;
95 extern u_long mp_tramp_tlb_slots;
96 extern u_long mp_tramp_func;
97
98 extern void mp_startup(void);
99
100 extern char tl_ipi_cheetah_dcache_page_inval[];
101 extern char tl_ipi_spitfire_dcache_page_inval[];
102 extern char tl_ipi_spitfire_icache_page_inval[];
103
104 extern char tl_ipi_level[];
105 extern char tl_ipi_tlb_context_demap[];
106 extern char tl_ipi_tlb_page_demap[];
107 extern char tl_ipi_tlb_range_demap[];
108
109 #ifdef SMP
110
111 #if defined(_MACHINE_PMAP_H_) && defined(_SYS_MUTEX_H_)
112
113 static __inline void *
114 ipi_dcache_page_inval(void *func, vm_paddr_t pa)
115 {
116 struct ipi_cache_args *ica;
117
118 if (smp_cpus == 1)
119 return (NULL);
120 ica = &ipi_cache_args;
121 mtx_lock_spin(&ipi_mtx);
122 ica->ica_mask = all_cpus;
123 ica->ica_pa = pa;
124 cpu_ipi_selected(PCPU_GET(other_cpus), 0, (u_long)func, (u_long)ica);
125 return (&ica->ica_mask);
126 }
127
128 static __inline void *
129 ipi_icache_page_inval(void *func, vm_paddr_t pa)
130 {
131 struct ipi_cache_args *ica;
132
133 if (smp_cpus == 1)
134 return (NULL);
135 ica = &ipi_cache_args;
136 mtx_lock_spin(&ipi_mtx);
137 ica->ica_mask = all_cpus;
138 ica->ica_pa = pa;
139 cpu_ipi_selected(PCPU_GET(other_cpus), 0, (u_long)func, (u_long)ica);
140 return (&ica->ica_mask);
141 }
142
143 static __inline void *
144 ipi_tlb_context_demap(struct pmap *pm)
145 {
146 struct ipi_tlb_args *ita;
147 u_int cpus;
148
149 if (smp_cpus == 1)
150 return (NULL);
151 if ((cpus = (pm->pm_active & PCPU_GET(other_cpus))) == 0)
152 return (NULL);
153 ita = &ipi_tlb_args;
154 mtx_lock_spin(&ipi_mtx);
155 ita->ita_mask = cpus | PCPU_GET(cpumask);
156 ita->ita_pmap = pm;
157 cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_context_demap,
158 (u_long)ita);
159 return (&ita->ita_mask);
160 }
161
162 static __inline void *
163 ipi_tlb_page_demap(struct pmap *pm, vm_offset_t va)
164 {
165 struct ipi_tlb_args *ita;
166 u_int cpus;
167
168 if (smp_cpus == 1)
169 return (NULL);
170 if ((cpus = (pm->pm_active & PCPU_GET(other_cpus))) == 0)
171 return (NULL);
172 ita = &ipi_tlb_args;
173 mtx_lock_spin(&ipi_mtx);
174 ita->ita_mask = cpus | PCPU_GET(cpumask);
175 ita->ita_pmap = pm;
176 ita->ita_va = va;
177 cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_page_demap, (u_long)ita);
178 return (&ita->ita_mask);
179 }
180
181 static __inline void *
182 ipi_tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end)
183 {
184 struct ipi_tlb_args *ita;
185 u_int cpus;
186
187 if (smp_cpus == 1)
188 return (NULL);
189 if ((cpus = (pm->pm_active & PCPU_GET(other_cpus))) == 0)
190 return (NULL);
191 ita = &ipi_tlb_args;
192 mtx_lock_spin(&ipi_mtx);
193 ita->ita_mask = cpus | PCPU_GET(cpumask);
194 ita->ita_pmap = pm;
195 ita->ita_start = start;
196 ita->ita_end = end;
197 cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_range_demap, (u_long)ita);
198 return (&ita->ita_mask);
199 }
200
201 static __inline void
202 ipi_wait(void *cookie)
203 {
204 volatile u_int *mask;
205
206 if ((mask = cookie) != NULL) {
207 atomic_clear_int(mask, PCPU_GET(cpumask));
208 while (*mask != 0)
209 ;
210 mtx_unlock_spin(&ipi_mtx);
211 }
212 }
213
214 #endif /* _MACHINE_PMAP_H_ && _SYS_MUTEX_H_ */
215
216 #else
217
218 static __inline void *
219 ipi_dcache_page_inval(void *func, vm_paddr_t pa)
220 {
221
222 return (NULL);
223 }
224
225 static __inline void *
226 ipi_icache_page_inval(void *func, vm_paddr_t pa)
227 {
228
229 return (NULL);
230 }
231
232 static __inline void *
233 ipi_tlb_context_demap(struct pmap *pm)
234 {
235
236 return (NULL);
237 }
238
239 static __inline void *
240 ipi_tlb_page_demap(struct pmap *pm, vm_offset_t va)
241 {
242
243 return (NULL);
244 }
245
246 static __inline void *
247 ipi_tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end)
248 {
249
250 return (NULL);
251 }
252
253 static __inline void
254 ipi_wait(void *cookie)
255 {
256
257 }
258
259 #endif /* SMP */
260
261 #endif /* !LOCORE */
262
263 #endif /* !_MACHINE_SMP_H_ */
Cache object: 08bb2efc9a418b0a754243a0c67ac3bc
|