1 /*-
2 * Copyright (c) 2001 Jake Burkholder.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 */
28
29 #ifndef _MACHINE_SMP_H_
30 #define _MACHINE_SMP_H_
31
32 #define CPU_CLKSYNC 1
33 #define CPU_INIT 2
34 #define CPU_BOOTSTRAP 3
35
36 #ifndef LOCORE
37
38 #include <machine/intr_machdep.h>
39 #include <machine/tte.h>
40
41 #define IDR_BUSY (1<<0)
42 #define IDR_NACK (1<<1)
43
44 #define IPI_AST PIL_AST
45 #define IPI_RENDEZVOUS PIL_RENDEZVOUS
46 #define IPI_STOP PIL_STOP
47 #define IPI_PREEMPT PIL_PREEMPT
48
49
50 #define IPI_RETRIES 5000
51
52 struct cpu_start_args {
53 u_int csa_count;
54 u_int csa_state;
55 vm_offset_t csa_pcpu;
56 u_int csa_cpuid;
57 };
58
59 struct ipi_cache_args {
60 u_int ica_mask;
61 vm_paddr_t ica_pa;
62 };
63
64 struct ipi_tlb_args {
65 u_int ita_mask;
66 struct pmap *ita_pmap;
67 u_long ita_start;
68 u_long ita_end;
69 };
70 #define ita_va ita_start
71
72 struct pcpu;
73
74 void cpu_mp_bootstrap(struct pcpu *pc);
75 void cpu_mp_shutdown(void);
76
77 void cpu_ipi_selected(int cpus, uint16_t *cpulist, u_long d0, u_long d1, u_long d2, uint64_t *ackmask);
78 void cpu_ipi_send(u_int mid, u_long d0, u_long d1, u_long d2);
79
80 void cpu_ipi_ast(struct trapframe *tf);
81 void cpu_ipi_stop(struct trapframe *tf);
82 void cpu_ipi_preempt(struct trapframe *tf);
83
84 void ipi_selected(u_int cpus, u_int ipi);
85 void ipi_all(u_int ipi);
86 void ipi_all_but_self(u_int ipi);
87
88 vm_offset_t mp_tramp_alloc(void);
89 void mp_set_tsb_desc_ra(vm_paddr_t);
90 void mp_add_nucleus_mapping(vm_offset_t, uint64_t);
91 extern struct mtx ipi_mtx;
92 extern struct ipi_cache_args ipi_cache_args;
93 extern struct ipi_tlb_args ipi_tlb_args;
94
95 extern vm_offset_t mp_tramp;
96 extern char *mp_tramp_code;
97 extern u_long mp_tramp_code_len;
98 extern u_long mp_tramp_tte_slots;
99 extern u_long mp_tramp_tsb_desc_ra;
100 extern u_long mp_tramp_func;
101
102 extern void mp_startup(void);
103
104 extern char tl_ipi_level[];
105 extern char tl_invltlb[];
106 extern char tl_invlctx[];
107 extern char tl_invlpg[];
108 extern char tl_invlrng[];
109 extern char tl_tsbupdate[];
110 extern char tl_ttehashupdate[];
111
112 #ifdef SMP
113
114 #if defined(_MACHINE_PMAP_H_) && defined(_SYS_MUTEX_H_)
115 #if 0
116 static __inline void *
117 ipi_dcache_page_inval(void *func, vm_paddr_t pa)
118 {
119 struct ipi_cache_args *ica;
120
121 if (smp_cpus == 1)
122 return (NULL);
123 ica = &ipi_cache_args;
124 mtx_lock_spin(&ipi_mtx);
125 ica->ica_mask = all_cpus;
126 ica->ica_pa = pa;
127 cpu_ipi_selected(PCPU_GET(other_cpus), 0, (u_long)func, (u_long)ica);
128 return (&ica->ica_mask);
129 }
130
131 static __inline void *
132 ipi_icache_page_inval(void *func, vm_paddr_t pa)
133 {
134 struct ipi_cache_args *ica;
135
136 if (smp_cpus == 1)
137 return (NULL);
138 ica = &ipi_cache_args;
139 mtx_lock_spin(&ipi_mtx);
140 ica->ica_mask = all_cpus;
141 ica->ica_pa = pa;
142 cpu_ipi_selected(PCPU_GET(other_cpus), 0, (u_long)func, (u_long)ica);
143 return (&ica->ica_mask);
144 }
145
146 static __inline void *
147 ipi_tlb_context_demap(struct pmap *pm)
148 {
149 struct ipi_tlb_args *ita;
150 u_int cpus;
151
152 if (smp_cpus == 1)
153 return (NULL);
154 if ((cpus = (pm->pm_active & PCPU_GET(other_cpus))) == 0)
155 return (NULL);
156 ita = &ipi_tlb_args;
157 mtx_lock_spin(&ipi_mtx);
158 ita->ita_mask = cpus | PCPU_GET(cpumask);
159 ita->ita_pmap = pm;
160 cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_context_demap,
161 (u_long)ita);
162 return (&ita->ita_mask);
163 }
164
165 static __inline void *
166 ipi_tlb_page_demap(struct pmap *pm, vm_offset_t va)
167 {
168 struct ipi_tlb_args *ita;
169 u_int cpus;
170
171 if (smp_cpus == 1)
172 return (NULL);
173 if ((cpus = (pm->pm_active & PCPU_GET(other_cpus))) == 0)
174 return (NULL);
175 ita = &ipi_tlb_args;
176 mtx_lock_spin(&ipi_mtx);
177 ita->ita_mask = cpus | PCPU_GET(cpumask);
178 ita->ita_pmap = pm;
179 ita->ita_va = va;
180 cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_page_demap, (u_long)ita);
181 return (&ita->ita_mask);
182 }
183
184 static __inline void *
185 ipi_tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end)
186 {
187 struct ipi_tlb_args *ita;
188 u_int cpus;
189
190 if (smp_cpus == 1)
191 return (NULL);
192 if ((cpus = (pm->pm_active & PCPU_GET(other_cpus))) == 0)
193 return (NULL);
194 ita = &ipi_tlb_args;
195 mtx_lock_spin(&ipi_mtx);
196 ita->ita_mask = cpus | PCPU_GET(cpumask);
197 ita->ita_pmap = pm;
198 ita->ita_start = start;
199 ita->ita_end = end;
200 cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_range_demap, (u_long)ita);
201 return (&ita->ita_mask);
202 }
203 #endif
204 static __inline void
205 ipi_wait(void *cookie)
206 {
207 volatile u_int *mask;
208
209 if ((mask = cookie) != NULL) {
210 atomic_clear_int(mask, PCPU_GET(cpumask));
211 while (*mask != 0)
212 ;
213 mtx_unlock_spin(&ipi_mtx);
214 }
215 }
216
217 #endif /* _MACHINE_PMAP_H_ && _SYS_MUTEX_H_ */
218
219 #else
220
221 static __inline void *
222 ipi_dcache_page_inval(void *func, vm_paddr_t pa)
223 {
224 return (NULL);
225 }
226
227 static __inline void *
228 ipi_icache_page_inval(void *func, vm_paddr_t pa)
229 {
230 return (NULL);
231 }
232
233 static __inline void *
234 ipi_tlb_context_demap(struct pmap *pm)
235 {
236 return (NULL);
237 }
238
239 static __inline void *
240 ipi_tlb_page_demap(struct pmap *pm, vm_offset_t va)
241 {
242 return (NULL);
243 }
244
245 static __inline void *
246 ipi_tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end)
247 {
248 return (NULL);
249 }
250
251 static __inline void
252 ipi_wait(void *cookie)
253 {
254 }
255
256 #endif /* SMP */
257
258 #endif /* !LOCORE */
259
260 #endif /* !_MACHINE_SMP_H_ */
Cache object: a074a12202200f474e13b2e1cbf979af
|