FreeBSD/Linux Kernel Cross Reference
sys/sys/spinlock2.h
1 /*
2 * Copyright (c) 2005 Jeffrey M. Hsu. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Jeffrey M. Hsu.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of The DragonFly Project nor the names of its
16 * contributors may be used to endorse or promote products derived
17 * from this software without specific, prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
27 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
29 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32
33 #ifndef _SYS_SPINLOCK2_H_
34 #define _SYS_SPINLOCK2_H_
35
36 #ifndef _KERNEL
37
38 #error "This file should not be included by userland programs."
39
40 #else
41
42 #ifndef _SYS_SYSTM_H_
43 #include <sys/systm.h>
44 #endif
45 #ifndef _SYS_THREAD2_H_
46 #include <sys/thread2.h>
47 #endif
48 #ifndef _SYS_GLOBALDATA_H_
49 #include <sys/globaldata.h>
50 #endif
51 #include <machine/atomic.h>
52 #include <machine/cpufunc.h>
53
54 extern struct spinlock pmap_spin;
55
56 int spin_trylock_contested(struct spinlock *spin);
57 void _spin_lock_contested(struct spinlock *spin, const char *ident);
58 void _spin_lock_shared_contested(struct spinlock *spin, const char *ident);
59 void _spin_pool_lock(void *chan, const char *ident);
60 void _spin_pool_unlock(void *chan);
61
62 #define spin_lock(spin) _spin_lock(spin, __func__)
63 #define spin_lock_quick(spin) _spin_lock_quick(spin, __func__)
64 #define spin_lock_shared(spin) _spin_lock_shared(spin, __func__)
65 #define spin_lock_shared_quick(spin) _spin_lock_shared_quick(spin, __func__)
66 #define spin_pool_lock(chan) _spin_pool_lock(chan, __func__)
67
68 /*
69 * Attempt to obtain an exclusive spinlock. Returns FALSE on failure,
70 * TRUE on success.
71 */
72 static __inline boolean_t
73 spin_trylock(struct spinlock *spin)
74 {
75 globaldata_t gd = mycpu;
76
77 ++gd->gd_curthread->td_critcount;
78 cpu_ccfence();
79 ++gd->gd_spinlocks;
80 if (atomic_cmpset_int(&spin->counta, 0, 1) == 0)
81 return (spin_trylock_contested(spin));
82 #ifdef DEBUG_LOCKS
83 int i;
84 for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
85 if (gd->gd_curthread->td_spinlock_stack_id[i] == 0) {
86 gd->gd_curthread->td_spinlock_stack_id[i] = 1;
87 gd->gd_curthread->td_spinlock_stack[i] = spin;
88 gd->gd_curthread->td_spinlock_caller_pc[i] =
89 __builtin_return_address(0);
90 break;
91 }
92 }
93 #endif
94 return (TRUE);
95 }
96
97 /*
98 * Return TRUE if the spinlock is held (we can't tell by whom, though)
99 */
100 static __inline int
101 spin_held(struct spinlock *spin)
102 {
103 return(spin->counta != 0);
104 }
105
106 /*
107 * Obtain an exclusive spinlock and return.
108 */
109 static __inline void
110 _spin_lock_quick(globaldata_t gd, struct spinlock *spin, const char *ident)
111 {
112 ++gd->gd_curthread->td_critcount;
113 cpu_ccfence();
114 ++gd->gd_spinlocks;
115 atomic_add_int(&spin->counta, 1);
116 if (spin->counta != 1)
117 _spin_lock_contested(spin, ident);
118 #ifdef DEBUG_LOCKS
119 int i;
120 for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
121 if (gd->gd_curthread->td_spinlock_stack_id[i] == 0) {
122 gd->gd_curthread->td_spinlock_stack_id[i] = 1;
123 gd->gd_curthread->td_spinlock_stack[i] = spin;
124 gd->gd_curthread->td_spinlock_caller_pc[i] =
125 __builtin_return_address(0);
126 break;
127 }
128 }
129 #endif
130 }
131
132 static __inline void
133 _spin_lock(struct spinlock *spin, const char *ident)
134 {
135 _spin_lock_quick(mycpu, spin, ident);
136 }
137
138 /*
139 * Release an exclusive spinlock. We can just do this passively, only
140 * ensuring that our spinlock count is left intact until the mutex is
141 * cleared.
142 */
143 static __inline void
144 spin_unlock_quick(globaldata_t gd, struct spinlock *spin)
145 {
146 #ifdef DEBUG_LOCKS
147 int i;
148 for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
149 if ((gd->gd_curthread->td_spinlock_stack_id[i] == 1) &&
150 (gd->gd_curthread->td_spinlock_stack[i] == spin)) {
151 gd->gd_curthread->td_spinlock_stack_id[i] = 0;
152 gd->gd_curthread->td_spinlock_stack[i] = NULL;
153 gd->gd_curthread->td_spinlock_caller_pc[i] = NULL;
154 break;
155 }
156 }
157 #endif
158 /*
159 * Don't use a locked instruction here. To reduce latency we avoid
160 * reading spin->counta prior to writing to it.
161 */
162 #ifdef DEBUG_LOCKS
163 KKASSERT(spin->counta != 0);
164 #endif
165 cpu_sfence();
166 atomic_add_int(&spin->counta, -1);
167 cpu_sfence();
168 #ifdef DEBUG_LOCKS
169 KKASSERT(gd->gd_spinlocks > 0);
170 #endif
171 --gd->gd_spinlocks;
172 cpu_ccfence();
173 --gd->gd_curthread->td_critcount;
174 }
175
176 static __inline void
177 spin_unlock(struct spinlock *spin)
178 {
179 spin_unlock_quick(mycpu, spin);
180 }
181
182 /*
183 * Shared spinlocks
184 */
185 static __inline void
186 _spin_lock_shared_quick(globaldata_t gd, struct spinlock *spin,
187 const char *ident)
188 {
189 ++gd->gd_curthread->td_critcount;
190 cpu_ccfence();
191 ++gd->gd_spinlocks;
192 if (atomic_cmpset_int(&spin->counta, 0, SPINLOCK_SHARED | 1) == 0)
193 _spin_lock_shared_contested(spin, ident);
194 #ifdef DEBUG_LOCKS
195 int i;
196 for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
197 if (gd->gd_curthread->td_spinlock_stack_id[i] == 0) {
198 gd->gd_curthread->td_spinlock_stack_id[i] = 1;
199 gd->gd_curthread->td_spinlock_stack[i] = spin;
200 gd->gd_curthread->td_spinlock_caller_pc[i] =
201 __builtin_return_address(0);
202 break;
203 }
204 }
205 #endif
206 }
207
208 static __inline void
209 spin_unlock_shared_quick(globaldata_t gd, struct spinlock *spin)
210 {
211 #ifdef DEBUG_LOCKS
212 int i;
213 for (i = 0; i < SPINLOCK_DEBUG_ARRAY_SIZE; i++) {
214 if ((gd->gd_curthread->td_spinlock_stack_id[i] == 1) &&
215 (gd->gd_curthread->td_spinlock_stack[i] == spin)) {
216 gd->gd_curthread->td_spinlock_stack_id[i] = 0;
217 gd->gd_curthread->td_spinlock_stack[i] = NULL;
218 gd->gd_curthread->td_spinlock_caller_pc[i] = NULL;
219 break;
220 }
221 }
222 #endif
223 #ifdef DEBUG_LOCKS
224 KKASSERT(spin->counta != 0);
225 #endif
226 cpu_sfence();
227 atomic_add_int(&spin->counta, -1);
228
229 /*
230 * Make sure SPINLOCK_SHARED is cleared. If another cpu tries to
231 * get a shared or exclusive lock this loop will break out. We're
232 * only talking about a very trivial edge case here.
233 */
234 while (spin->counta == SPINLOCK_SHARED) {
235 if (atomic_cmpset_int(&spin->counta, SPINLOCK_SHARED, 0))
236 break;
237 }
238 cpu_sfence();
239 #ifdef DEBUG_LOCKS
240 KKASSERT(gd->gd_spinlocks > 0);
241 #endif
242 --gd->gd_spinlocks;
243 cpu_ccfence();
244 --gd->gd_curthread->td_critcount;
245 }
246
247 static __inline void
248 _spin_lock_shared(struct spinlock *spin, const char *ident)
249 {
250 _spin_lock_shared_quick(mycpu, spin, ident);
251 }
252
253 static __inline void
254 spin_unlock_shared(struct spinlock *spin)
255 {
256 spin_unlock_shared_quick(mycpu, spin);
257 }
258
259 static __inline void
260 spin_pool_unlock(void *chan)
261 {
262 _spin_pool_unlock(chan);
263 }
264
265 static __inline void
266 spin_init(struct spinlock *spin)
267 {
268 spin->counta = 0;
269 spin->countb = 0;
270 }
271
272 static __inline void
273 spin_uninit(struct spinlock *spin)
274 {
275 /* unused */
276 }
277
278 #endif /* _KERNEL */
279 #endif /* _SYS_SPINLOCK2_H_ */
280
Cache object: a9550c2fbfa512129ee5bf9cdeafc143
|