FreeBSD/Linux Kernel Cross Reference
sys/sys/sx.h
1 /*-
2 * Copyright (c) 2007 Attilio Rao <attilio@freebsd.org>
3 * Copyright (c) 2001 Jason Evans <jasone@freebsd.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice(s), this list of conditions and the following disclaimer as
11 * the first lines of this file unmodified other than the possible
12 * addition of one or more copyright notices.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice(s), this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
18 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
21 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
24 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
27 * DAMAGE.
28 *
29 * $FreeBSD$
30 */
31
32 #ifndef _SYS_SX_H_
33 #define _SYS_SX_H_
34
35 #include <sys/_lock.h>
36 #include <sys/_sx.h>
37
38 #ifdef _KERNEL
39 #include <sys/pcpu.h>
40 #include <sys/lock_profile.h>
41 #include <sys/lockstat.h>
42 #include <machine/atomic.h>
43 #endif
44
45 /*
46 * In general, the sx locks and rwlocks use very similar algorithms.
47 * The main difference in the implementations is how threads are
48 * blocked when a lock is unavailable. For this, sx locks use sleep
49 * queues which do not support priority propagation, and rwlocks use
50 * turnstiles which do.
51 *
52 * The sx_lock field consists of several fields. The low bit
53 * indicates if the lock is locked with a shared or exclusive lock. A
54 * value of 0 indicates an exclusive lock, and a value of 1 indicates
55 * a shared lock. Bit 1 is a boolean indicating if there are any
56 * threads waiting for a shared lock. Bit 2 is a boolean indicating
57 * if there are any threads waiting for an exclusive lock. Bit 3 is a
58 * boolean indicating if an exclusive lock is recursively held. The
59 * rest of the variable's definition is dependent on the value of the
60 * first bit. For an exclusive lock, it is a pointer to the thread
61 * holding the lock, similar to the mtx_lock field of mutexes. For
62 * shared locks, it is a count of read locks that are held.
63 *
64 * When the lock is not locked by any thread, it is encoded as a
65 * shared lock with zero waiters.
66 */
67
68 #define SX_LOCK_SHARED 0x01
69 #define SX_LOCK_SHARED_WAITERS 0x02
70 #define SX_LOCK_EXCLUSIVE_WAITERS 0x04
71 #define SX_LOCK_RECURSED 0x08
72 #define SX_LOCK_FLAGMASK \
73 (SX_LOCK_SHARED | SX_LOCK_SHARED_WAITERS | \
74 SX_LOCK_EXCLUSIVE_WAITERS | SX_LOCK_RECURSED)
75
76 #define SX_OWNER(x) ((x) & ~SX_LOCK_FLAGMASK)
77 #define SX_SHARERS_SHIFT 4
78 #define SX_SHARERS(x) (SX_OWNER(x) >> SX_SHARERS_SHIFT)
79 #define SX_SHARERS_LOCK(x) \
80 ((x) << SX_SHARERS_SHIFT | SX_LOCK_SHARED)
81 #define SX_ONE_SHARER (1 << SX_SHARERS_SHIFT)
82
83 #define SX_LOCK_UNLOCKED SX_SHARERS_LOCK(0)
84 #define SX_LOCK_DESTROYED \
85 (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS)
86
87 #ifdef _KERNEL
88
89 /*
90 * Function prototipes. Routines that start with an underscore are not part
91 * of the public interface and are wrappered with a macro.
92 */
93 void sx_sysinit(void *arg);
94 #define sx_init(sx, desc) sx_init_flags((sx), (desc), 0)
95 void sx_init_flags(struct sx *sx, const char *description, int opts);
96 void sx_destroy(struct sx *sx);
97 int _sx_slock(struct sx *sx, int opts, const char *file, int line);
98 int _sx_xlock(struct sx *sx, int opts, const char *file, int line);
99 int _sx_try_slock(struct sx *sx, const char *file, int line);
100 int _sx_try_xlock(struct sx *sx, const char *file, int line);
101 void _sx_sunlock(struct sx *sx, const char *file, int line);
102 void _sx_xunlock(struct sx *sx, const char *file, int line);
103 int _sx_try_upgrade(struct sx *sx, const char *file, int line);
104 void _sx_downgrade(struct sx *sx, const char *file, int line);
105 int _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts,
106 const char *file, int line);
107 int _sx_slock_hard(struct sx *sx, int opts, const char *file, int line);
108 void _sx_xunlock_hard(struct sx *sx, uintptr_t tid, const char *file, int
109 line);
110 void _sx_sunlock_hard(struct sx *sx, const char *file, int line);
111 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
112 void _sx_assert(struct sx *sx, int what, const char *file, int line);
113 #endif
114 #ifdef DDB
115 int sx_chain(struct thread *td, struct thread **ownerp);
116 #endif
117
118 struct sx_args {
119 struct sx *sa_sx;
120 const char *sa_desc;
121 };
122
123 #define SX_SYSINIT(name, sxa, desc) \
124 static struct sx_args name##_args = { \
125 (sxa), \
126 (desc) \
127 }; \
128 SYSINIT(name##_sx_sysinit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \
129 sx_sysinit, &name##_args); \
130 SYSUNINIT(name##_sx_sysuninit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \
131 sx_destroy, (sxa))
132
133 /*
134 * Full lock operations that are suitable to be inlined in non-debug kernels.
135 * If the lock can't be acquired or released trivially then the work is
136 * deferred to 'tougher' functions.
137 */
138
139 /* Acquire an exclusive lock. */
140 static __inline int
141 __sx_xlock(struct sx *sx, struct thread *td, int opts, const char *file,
142 int line)
143 {
144 uintptr_t tid = (uintptr_t)td;
145 int error = 0;
146
147 if (!atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, tid))
148 error = _sx_xlock_hard(sx, tid, opts, file, line);
149 else
150 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_SX_XLOCK_ACQUIRE,
151 sx, 0, 0, file, line);
152
153 return (error);
154 }
155
156 /* Release an exclusive lock. */
157 static __inline void
158 __sx_xunlock(struct sx *sx, struct thread *td, const char *file, int line)
159 {
160 uintptr_t tid = (uintptr_t)td;
161
162 if (!atomic_cmpset_rel_ptr(&sx->sx_lock, tid, SX_LOCK_UNLOCKED))
163 _sx_xunlock_hard(sx, tid, file, line);
164 }
165
166 /* Acquire a shared lock. */
167 static __inline int
168 __sx_slock(struct sx *sx, int opts, const char *file, int line)
169 {
170 uintptr_t x = sx->sx_lock;
171 int error = 0;
172
173 if (!(x & SX_LOCK_SHARED) ||
174 !atomic_cmpset_acq_ptr(&sx->sx_lock, x, x + SX_ONE_SHARER))
175 error = _sx_slock_hard(sx, opts, file, line);
176 else
177 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_SX_SLOCK_ACQUIRE, sx, 0,
178 0, file, line);
179
180 return (error);
181 }
182
183 /*
184 * Release a shared lock. We can just drop a single shared lock so
185 * long as we aren't trying to drop the last shared lock when other
186 * threads are waiting for an exclusive lock. This takes advantage of
187 * the fact that an unlocked lock is encoded as a shared lock with a
188 * count of 0.
189 */
190 static __inline void
191 __sx_sunlock(struct sx *sx, const char *file, int line)
192 {
193 uintptr_t x = sx->sx_lock;
194
195 if (x == (SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS) ||
196 !atomic_cmpset_rel_ptr(&sx->sx_lock, x, x - SX_ONE_SHARER))
197 _sx_sunlock_hard(sx, file, line);
198 }
199
200 /*
201 * Public interface for lock operations.
202 */
203 #ifndef LOCK_DEBUG
204 #error "LOCK_DEBUG not defined, include <sys/lock.h> before <sys/sx.h>"
205 #endif
206 #if (LOCK_DEBUG > 0) || defined(SX_NOINLINE)
207 #define sx_xlock(sx) (void)_sx_xlock((sx), 0, LOCK_FILE, LOCK_LINE)
208 #define sx_xlock_sig(sx) \
209 _sx_xlock((sx), SX_INTERRUPTIBLE, LOCK_FILE, LOCK_LINE)
210 #define sx_xunlock(sx) _sx_xunlock((sx), LOCK_FILE, LOCK_LINE)
211 #define sx_slock(sx) (void)_sx_slock((sx), 0, LOCK_FILE, LOCK_LINE)
212 #define sx_slock_sig(sx) \
213 _sx_slock((sx), SX_INTERRUPTIBLE, LOCK_FILE, LOCK_LINE)
214 #define sx_sunlock(sx) _sx_sunlock((sx), LOCK_FILE, LOCK_LINE)
215 #else
216 #define sx_xlock(sx) \
217 (void)__sx_xlock((sx), curthread, 0, LOCK_FILE, LOCK_LINE)
218 #define sx_xlock_sig(sx) \
219 __sx_xlock((sx), curthread, SX_INTERRUPTIBLE, LOCK_FILE, LOCK_LINE)
220 #define sx_xunlock(sx) \
221 __sx_xunlock((sx), curthread, LOCK_FILE, LOCK_LINE)
222 #define sx_slock(sx) (void)__sx_slock((sx), 0, LOCK_FILE, LOCK_LINE)
223 #define sx_slock_sig(sx) \
224 __sx_slock((sx), SX_INTERRUPTIBLE, LOCK_FILE, LOCK_LINE)
225 #define sx_sunlock(sx) __sx_sunlock((sx), LOCK_FILE, LOCK_LINE)
226 #endif /* LOCK_DEBUG > 0 || SX_NOINLINE */
227 #define sx_try_slock(sx) _sx_try_slock((sx), LOCK_FILE, LOCK_LINE)
228 #define sx_try_xlock(sx) _sx_try_xlock((sx), LOCK_FILE, LOCK_LINE)
229 #define sx_try_upgrade(sx) _sx_try_upgrade((sx), LOCK_FILE, LOCK_LINE)
230 #define sx_downgrade(sx) _sx_downgrade((sx), LOCK_FILE, LOCK_LINE)
231
232 /*
233 * Return a pointer to the owning thread if the lock is exclusively
234 * locked.
235 */
236 #define sx_xholder(sx) \
237 ((sx)->sx_lock & SX_LOCK_SHARED ? NULL : \
238 (struct thread *)SX_OWNER((sx)->sx_lock))
239
240 #define sx_xlocked(sx) \
241 (((sx)->sx_lock & ~(SX_LOCK_FLAGMASK & ~SX_LOCK_SHARED)) == \
242 (uintptr_t)curthread)
243
244 #define sx_unlock(sx) do { \
245 if (sx_xlocked(sx)) \
246 sx_xunlock(sx); \
247 else \
248 sx_sunlock(sx); \
249 } while (0)
250
251 #define sx_sleep(chan, sx, pri, wmesg, timo) \
252 _sleep((chan), &(sx)->lock_object, (pri), (wmesg), (timo))
253
254 /*
255 * Options passed to sx_init_flags().
256 */
257 #define SX_DUPOK 0x01
258 #define SX_NOPROFILE 0x02
259 #define SX_NOWITNESS 0x04
260 #define SX_QUIET 0x08
261 #define SX_NOADAPTIVE 0x10
262 #define SX_RECURSE 0x20
263
264 /*
265 * Options passed to sx_*lock_hard().
266 */
267 #define SX_INTERRUPTIBLE 0x40
268
269 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
270 #define SA_LOCKED LA_LOCKED
271 #define SA_SLOCKED LA_SLOCKED
272 #define SA_XLOCKED LA_XLOCKED
273 #define SA_UNLOCKED LA_UNLOCKED
274 #define SA_RECURSED LA_RECURSED
275 #define SA_NOTRECURSED LA_NOTRECURSED
276
277 /* Backwards compatability. */
278 #define SX_LOCKED LA_LOCKED
279 #define SX_SLOCKED LA_SLOCKED
280 #define SX_XLOCKED LA_XLOCKED
281 #define SX_UNLOCKED LA_UNLOCKED
282 #define SX_RECURSED LA_RECURSED
283 #define SX_NOTRECURSED LA_NOTRECURSED
284 #endif
285
286 #ifdef INVARIANTS
287 #define sx_assert(sx, what) _sx_assert((sx), (what), LOCK_FILE, LOCK_LINE)
288 #else
289 #define sx_assert(sx, what) (void)0
290 #endif
291
292 #endif /* _KERNEL */
293
294 #endif /* !_SYS_SX_H_ */
Cache object: 36501710113f2b6620c013aaf5475cbb
|