1 /*
2 * Copyright (c) 1997, by Steve Passe
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. The name of the developer may NOT be used to endorse or promote products
11 * derived from this software without specific prior written permission.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 * $FreeBSD$
26 */
27
28
29 #ifndef _MACHINE_LOCK_H_
30 #define _MACHINE_LOCK_H_
31
32
33 #ifdef LOCORE
34
35 #ifdef SMP
36
37 #define MPLOCKED lock ;
38
39 /*
40 * Some handy macros to allow logical organization and
41 * convenient reassignment of various locks.
42 */
43
44 #define FPU_LOCK call _get_fpu_lock
45 #define ALIGN_LOCK call _get_align_lock
46 #define SYSCALL_LOCK call _get_syscall_lock
47 #define ALTSYSCALL_LOCK call _get_altsyscall_lock
48
49 /*
50 * Protects INTR() ISRs.
51 */
52 #define ISR_TRYLOCK \
53 pushl $_mp_lock ; /* GIANT_LOCK */ \
54 call _MPtrylock ; /* try to get lock */ \
55 add $4, %esp
56
57 #define ISR_RELLOCK \
58 pushl $_mp_lock ; /* GIANT_LOCK */ \
59 call _MPrellock ; \
60 add $4, %esp
61
62 /*
63 * Protects the IO APIC and apic_imen as a critical region.
64 */
65 #define IMASK_LOCK \
66 pushl $_imen_lock ; /* address of lock */ \
67 call _s_lock ; /* MP-safe */ \
68 addl $4, %esp
69
70 #define IMASK_UNLOCK \
71 pushl $_imen_lock ; /* address of lock */ \
72 call _s_unlock ; /* MP-safe */ \
73 addl $4, %esp
74
75 /*
76 * Variations of CPL_LOCK protect spl updates as a critical region.
77 * Items within this 'region' include:
78 * cpl
79 * cml
80 * cil
81 * ipending
82 */
83
84 /*
85 * Botom half routines, ie. those already protected from INTs.
86 *
87 * Used in:
88 * sys/i386/isa/ipl.s: _doreti
89 * sys/i386/isa/apic_vector.s: _Xintr0, ..., _Xintr23
90 */
91 #define CPL_LOCK \
92 pushl $_cpl_lock ; /* address of lock */ \
93 call _s_lock ; /* MP-safe */ \
94 addl $4, %esp
95
96 #define CPL_UNLOCK \
97 pushl $_cpl_lock ; /* address of lock */ \
98 call _s_unlock ; /* MP-safe */ \
99 addl $4, %esp
100
101 /*
102 * INT safe version for top half of kernel.
103 *
104 * Used in:
105 * sys/i386/i386/exception.s: _Xfpu, _Xalign, _Xsyscall, _Xint0x80_syscall
106 * sys/i386/isa/apic_ipl.s: splz()
107 */
108 #define SCPL_LOCK \
109 pushl $_cpl_lock ; \
110 call _ss_lock ; \
111 addl $4, %esp
112
113 #define SCPL_UNLOCK \
114 pushl $_cpl_lock ; \
115 call _ss_unlock ; \
116 addl $4, %esp
117
118 #else /* SMP */
119
120 #define MPLOCKED /* NOP */
121
122 #define FPU_LOCK /* NOP */
123 #define ALIGN_LOCK /* NOP */
124 #define SYSCALL_LOCK /* NOP */
125 #define ALTSYSCALL_LOCK /* NOP */
126
127 #endif /* SMP */
128
129 #else /* LOCORE */
130
131 #ifdef SMP
132
133 #include <machine/smptests.h> /** xxx_LOCK */
134
135 /*
136 * Locks regions protected in UP kernel via cli/sti.
137 */
138 #ifdef USE_MPINTRLOCK
139 #define MPINTR_LOCK() s_lock(&mpintr_lock)
140 #define MPINTR_UNLOCK() s_unlock(&mpintr_lock)
141 #else
142 #define MPINTR_LOCK()
143 #define MPINTR_UNLOCK()
144 #endif /* USE_MPINTRLOCK */
145
146 /*
147 * Protects cpl/cml/cil/ipending data as a critical region.
148 *
149 * Used in:
150 * sys/i386/isa/ipl_funcs.c: DO_SETBITS, softclockpending(), GENSPL,
151 * spl0(), splx(), splq()
152 */
153 #define CPL_LOCK() s_lock(&cpl_lock) /* Bottom end */
154 #define CPL_UNLOCK() s_unlock(&cpl_lock)
155 #define SCPL_LOCK() ss_lock(&cpl_lock) /* INT safe: top end */
156 #define SCPL_UNLOCK() ss_unlock(&cpl_lock)
157
158 /*
159 * sio/cy lock.
160 * XXX should rc (RISCom/8) use this?
161 */
162 #ifdef USE_COMLOCK
163 #define COM_LOCK() s_lock(&com_lock)
164 #define COM_UNLOCK() s_unlock(&com_lock)
165 #define COM_DISABLE_INTR() \
166 { __asm __volatile("cli" : : : "memory"); COM_LOCK(); }
167 #define COM_ENABLE_INTR() \
168 { COM_UNLOCK(); __asm __volatile("sti"); }
169 #else
170 #define COM_LOCK()
171 #define COM_UNLOCK()
172 #define COM_DISABLE_INTR() disable_intr()
173 #define COM_ENABLE_INTR() enable_intr()
174 #endif /* USE_COMLOCK */
175
176 /*
177 * Clock hardware/struct lock.
178 * XXX pcaudio and friends still need this lock installed.
179 */
180 #ifdef USE_CLOCKLOCK
181 #define CLOCK_LOCK() s_lock(&clock_lock)
182 #define CLOCK_UNLOCK() s_unlock(&clock_lock)
183 #define CLOCK_DISABLE_INTR() \
184 { __asm __volatile("cli" : : : "memory"); CLOCK_LOCK(); }
185 #define CLOCK_ENABLE_INTR() \
186 { CLOCK_UNLOCK(); __asm __volatile("sti"); }
187 #else
188 #define CLOCK_LOCK()
189 #define CLOCK_UNLOCK()
190 #define CLOCK_DISABLE_INTR() disable_intr()
191 #define CLOCK_ENABLE_INTR() enable_intr()
192 #endif /* USE_CLOCKLOCK */
193
194 #else /* SMP */
195
196 #define MPINTR_LOCK()
197 #define MPINTR_UNLOCK()
198
199 #define CPL_LOCK()
200 #define CPL_UNLOCK()
201 #define SCPL_LOCK()
202 #define SCPL_UNLOCK()
203
204 #define COM_LOCK()
205 #define COM_UNLOCK()
206 #define CLOCK_LOCK()
207 #define CLOCK_UNLOCK()
208
209 #endif /* SMP */
210
211 /*
212 * Simple spin lock.
213 * It is an error to hold one of these locks while a process is sleeping.
214 */
215 struct simplelock {
216 volatile int lock_data;
217 };
218
219 /* functions in simplelock.s */
220 void s_lock_init __P((struct simplelock *));
221 void s_lock __P((struct simplelock *));
222 int s_lock_try __P((struct simplelock *));
223 void s_unlock __P((struct simplelock *));
224 void ss_lock __P((struct simplelock *));
225 void ss_unlock __P((struct simplelock *));
226 void s_lock_np __P((struct simplelock *));
227 void s_unlock_np __P((struct simplelock *));
228
229 /* global data in mp_machdep.c */
230 extern struct simplelock imen_lock;
231 extern struct simplelock cpl_lock;
232 extern struct simplelock fast_intr_lock;
233 extern struct simplelock intr_lock;
234 extern struct simplelock clock_lock;
235 extern struct simplelock com_lock;
236 extern struct simplelock mpintr_lock;
237 extern struct simplelock mcount_lock;
238
239 #if !defined(SIMPLELOCK_DEBUG) && NCPUS > 1
240 /*
241 * This set of defines turns on the real functions in i386/isa/apic_ipl.s.
242 */
243 #define simple_lock_init(alp) s_lock_init(alp)
244 #define simple_lock(alp) s_lock(alp)
245 #define simple_lock_try(alp) s_lock_try(alp)
246 #define simple_unlock(alp) s_unlock(alp)
247
248 #endif /* !SIMPLELOCK_DEBUG && NCPUS > 1 */
249
250 #endif /* LOCORE */
251
252 #endif /* !_MACHINE_LOCK_H_ */
Cache object: c459525b81b258b57a3f9dad87affd92
|