FreeBSD/Linux Kernel Cross Reference
sys/sys/lock.h
1 /* $NetBSD: lock.h,v 1.58.2.1 2007/08/26 18:44:17 bouyer Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * This code is derived from software contributed to The NetBSD Foundation
12 * by Ross Harvey.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
22 * 3. All advertising materials mentioning features or use of this software
23 * must display the following acknowledgement:
24 * This product includes software developed by the NetBSD
25 * Foundation, Inc. and its contributors.
26 * 4. Neither the name of The NetBSD Foundation nor the names of its
27 * contributors may be used to endorse or promote products derived
28 * from this software without specific prior written permission.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
31 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
32 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
33 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
34 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
37 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
38 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40 * POSSIBILITY OF SUCH DAMAGE.
41 */
42
43 /*
44 * Copyright (c) 1995
45 * The Regents of the University of California. All rights reserved.
46 *
47 * This code contains ideas from software contributed to Berkeley by
48 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
49 * System project at Carnegie-Mellon University.
50 *
51 * Redistribution and use in source and binary forms, with or without
52 * modification, are permitted provided that the following conditions
53 * are met:
54 * 1. Redistributions of source code must retain the above copyright
55 * notice, this list of conditions and the following disclaimer.
56 * 2. Redistributions in binary form must reproduce the above copyright
57 * notice, this list of conditions and the following disclaimer in the
58 * documentation and/or other materials provided with the distribution.
59 * 3. Neither the name of the University nor the names of its contributors
60 * may be used to endorse or promote products derived from this software
61 * without specific prior written permission.
62 *
63 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
64 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
65 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
66 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
67 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
68 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
69 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
70 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
71 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
72 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
73 * SUCH DAMAGE.
74 *
75 * @(#)lock.h 8.12 (Berkeley) 5/19/95
76 */
77
78 #ifndef _SYS_LOCK_H_
79 #define _SYS_LOCK_H_
80
81 #if defined(_KERNEL_OPT)
82 #include "opt_lockdebug.h"
83 #include "opt_multiprocessor.h"
84 #endif
85
86 #include <sys/queue.h>
87 #include <machine/lock.h>
88
89 /*
90 * The simple lock. Provides a simple spinning mutex. Note the
91 * member which is used in atomic operations must be aligned in
92 * order for it to work on the widest range of processor types.
93 */
94 struct simplelock {
95 __cpu_simple_lock_t lock_data;
96 #ifdef LOCKDEBUG
97 const char *lock_file;
98 const char *unlock_file;
99 short lock_line;
100 short unlock_line;
101 TAILQ_ENTRY(simplelock) list;
102 cpuid_t lock_holder; /* CPU ID */
103 #endif
104 };
105
106 #ifdef LOCKDEBUG
107 #define SIMPLELOCK_INITIALIZER { __SIMPLELOCK_UNLOCKED, NULL, NULL, 0, \
108 0, { NULL, NULL }, LK_NOCPU }
109 #else
110 #define SIMPLELOCK_INITIALIZER { __SIMPLELOCK_UNLOCKED }
111 #endif
112
113 /*
114 * The general lock structure. Provides for multiple shared locks,
115 * upgrading from shared to exclusive, and sleeping/spinning until the
116 * lock can be gained.
117 */
118 struct lock {
119 struct simplelock lk_interlock; /* lock on remaining fields */
120 u_int lk_flags; /* see below */
121 int lk_sharecount; /* # of accepted shared locks */
122 short lk_exclusivecount; /* # of recursive exclusive locks */
123 short lk_recurselevel; /* lvl above which recursion ok */
124 int lk_waitcount; /* # of sleepers/spinners */
125
126 /*
127 * This is the sleep message for sleep locks, and a simple name
128 * for spin locks.
129 */
130 const char *lk_wmesg;
131
132 union {
133 struct {
134 /* pid of exclusive lock holder */
135 pid_t lk_sleep_lockholder;
136 lwpid_t lk_sleep_locklwp;
137
138 /* priority at which to sleep */
139 int lk_sleep_prio;
140
141 /* maximum sleep time (for tsleep) */
142 int lk_sleep_timo;
143
144 /* lock taking over this lock */
145 struct lock *lk_newlock;
146 } lk_un_sleep;
147 struct {
148 /* CPU ID of exclusive lock holder */
149 cpuid_t lk_spin_cpu;
150 #if defined(LOCKDEBUG)
151 TAILQ_ENTRY(lock) lk_spin_list;
152 #endif
153 } lk_un_spin;
154 } lk_un;
155
156 #define lk_lockholder lk_un.lk_un_sleep.lk_sleep_lockholder
157 #define lk_locklwp lk_un.lk_un_sleep.lk_sleep_locklwp
158 #define lk_prio lk_un.lk_un_sleep.lk_sleep_prio
159 #define lk_timo lk_un.lk_un_sleep.lk_sleep_timo
160 #define lk_newlock lk_un.lk_un_sleep.lk_newlock
161
162 #define lk_cpu lk_un.lk_un_spin.lk_spin_cpu
163 #if defined(LOCKDEBUG)
164 #define lk_list lk_un.lk_un_spin.lk_spin_list
165 #endif
166
167 #if defined(LOCKDEBUG)
168 const char *lk_lock_file;
169 const char *lk_unlock_file;
170 int lk_lock_line;
171 int lk_unlock_line;
172 #endif
173 };
174
175 #define LOCK_INITIALIZER(prio, wmesg, timo, flags) \
176 { SIMPLELOCK_INITIALIZER, \
177 (flags), \
178 0, \
179 0, \
180 0, \
181 0, \
182 (wmesg), \
183 { .lk_un_sleep = { 0, 0, (prio), (timo) } } \
184 }
185
186 /*
187 * Lock request types:
188 * LK_SHARED - get one of many possible shared locks. If a process
189 * holding an exclusive lock requests a shared lock, the exclusive
190 * lock(s) will be downgraded to shared locks.
191 * LK_EXCLUSIVE - stop further shared locks, when they are cleared,
192 * grant a pending upgrade if it exists, then grant an exclusive
193 * lock. Only one exclusive lock may exist at a time, except that
194 * a process holding an exclusive lock may get additional exclusive
195 * locks if it explicitly sets the LK_CANRECURSE flag in the lock
196 * request, or if the LK_CANRECUSE flag was set when the lock was
197 * initialized.
198 * LK_UPGRADE - the process must hold a shared lock that it wants to
199 * have upgraded to an exclusive lock. Other processes may get
200 * exclusive access to the resource between the time that the upgrade
201 * is requested and the time that it is granted.
202 * LK_EXCLUPGRADE - the process must hold a shared lock that it wants to
203 * have upgraded to an exclusive lock. If the request succeeds, no
204 * other processes will have gotten exclusive access to the resource
205 * between the time that the upgrade is requested and the time that
206 * it is granted. However, if another process has already requested
207 * an upgrade, the request will fail (see error returns below).
208 * LK_DOWNGRADE - the process must hold an exclusive lock that it wants
209 * to have downgraded to a shared lock. If the process holds multiple
210 * (recursive) exclusive locks, they will all be downgraded to shared
211 * locks.
212 * LK_RELEASE - release one instance of a lock.
213 * LK_DRAIN - wait for all activity on the lock to end, then mark it
214 * decommissioned. This feature is used before freeing a lock that
215 * is part of a piece of memory that is about to be freed.
216 *
217 * These are flags that are passed to the lockmgr routine.
218 */
219 #define LK_TYPE_MASK 0x0000000f /* type of lock sought */
220 #define LK_SHARED 0x00000001 /* shared lock */
221 #define LK_EXCLUSIVE 0x00000002 /* exclusive lock */
222 #define LK_UPGRADE 0x00000003 /* shared-to-exclusive upgrade */
223 #define LK_EXCLUPGRADE 0x00000004 /* first shared-to-exclusive upgrade */
224 #define LK_DOWNGRADE 0x00000005 /* exclusive-to-shared downgrade */
225 #define LK_RELEASE 0x00000006 /* release any type of lock */
226 #define LK_DRAIN 0x00000007 /* wait for all lock activity to end */
227 #define LK_EXCLOTHER 0x00000008 /* other process holds lock */
228 /*
229 * External lock flags.
230 *
231 * The first three flags may be set in lock_init to set their mode permanently,
232 * or passed in as arguments to the lock manager. The LK_REENABLE flag may be
233 * set only at the release of a lock obtained by drain.
234 */
235 #define LK_EXTFLG_MASK 0x00f00070 /* mask of external flags */
236 #define LK_NOWAIT 0x00000010 /* do not sleep to await lock */
237 #define LK_SLEEPFAIL 0x00000020 /* sleep, then return failure */
238 #define LK_CANRECURSE 0x00000040 /* this may be recursive lock attempt */
239 #define LK_REENABLE 0x00000080 /* lock is be reenabled after drain */
240 #define LK_SETRECURSE 0x00100000 /* other locks while we have it OK */
241 #define LK_RECURSEFAIL 0x00200000 /* attempt at recursive lock fails */
242 #define LK_SPIN 0x00400000 /* lock spins instead of sleeps */
243 #define LK_RESURRECT 0x00800000 /* immediately reenable drained lock */
244 /*
245 * Internal lock flags.
246 *
247 * These flags are used internally to the lock manager.
248 */
249 #define LK_WANT_UPGRADE 0x00000100 /* waiting for share-to-excl upgrade */
250 #define LK_WANT_EXCL 0x00000200 /* exclusive lock sought */
251 #define LK_HAVE_EXCL 0x00000400 /* exclusive lock obtained */
252 #define LK_WAITDRAIN 0x00000800 /* process waiting for lock to drain */
253 #define LK_DRAINING 0x00004000 /* lock is being drained */
254 #define LK_DRAINED 0x00008000 /* lock has been decommissioned */
255 /*
256 * Internal state flags corresponding to lk_sharecount, and lk_waitcount
257 */
258 #define LK_SHARE_NONZERO 0x00040000 /* lk_sharecount != 0 */
259 #define LK_WAIT_NONZERO 0x00080000 /* lk_waitcount != 0 */
260 /*
261 * Control flags
262 *
263 * Non-persistent external flags.
264 */
265 #define LK_INTERLOCK 0x00010000 /* unlock passed simple lock after
266 getting lk_interlock */
267 #define LK_RETRY 0x00020000 /* vn_lock: retry until locked */
268
269 /*
270 * Lock return status.
271 *
272 * Successfully obtained locks return 0. Locks will always succeed
273 * unless one of the following is true:
274 * LK_FORCEUPGRADE is requested and some other process has already
275 * requested a lock upgrade (returns EBUSY).
276 * LK_NOWAIT is set and a sleep would be required (returns EBUSY).
277 * LK_SLEEPFAIL is set and a sleep was done (returns ENOLCK).
278 * PCATCH is set in lock priority and a signal arrives (returns
279 * either EINTR or ERESTART if system calls is to be restarted).
280 * Non-null lock timeout and timeout expires (returns EWOULDBLOCK).
281 * A failed lock attempt always returns a non-zero error value. No lock
282 * is held after an error return (in particular, a failed LK_UPGRADE
283 * or LK_FORCEUPGRADE will have released its shared access lock).
284 */
285
286 /*
287 * Indicator that no process/cpu holds exclusive lock
288 */
289 #define LK_KERNPROC ((pid_t) -2)
290 #define LK_NOPROC ((pid_t) -1)
291 #define LK_NOCPU ((cpuid_t) -1)
292
293 #ifdef _KERNEL
294
295 struct proc;
296
297 void lockinit(struct lock *, int, const char *, int, int);
298 #if defined(LOCKDEBUG)
299 int _lockmgr(__volatile struct lock *, u_int, struct simplelock *,
300 const char *, int);
301 #define lockmgr(l, f, i) _lockmgr((l), (f), (i), __FILE__, __LINE__)
302 #else
303 int lockmgr(__volatile struct lock *, u_int flags, struct simplelock *);
304 #endif /* LOCKDEBUG */
305 void transferlockers(struct lock *, struct lock *);
306 int lockstatus(struct lock *);
307 void lockmgr_printinfo(__volatile struct lock *);
308
309 #if defined(LOCKDEBUG) || defined(DIAGNOSTIC)
310 void spinlock_switchcheck(void);
311 #endif
312
313 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
314 #define spinlockinit(lkp, name, flags) \
315 lockinit((lkp), 0, (name), 0, (flags) | LK_SPIN)
316 #define spinlockmgr(lkp, flags, intrlk) \
317 lockmgr((lkp), (flags) | LK_SPIN, (intrlk))
318 #else
319 #define spinlockinit(lkp, name, flags) (void)(lkp)
320 #define spinlockmgr(lkp, flags, intrlk) (0)
321 #endif
322
323 #if defined(LOCKDEBUG)
324 int _spinlock_release_all(__volatile struct lock *, const char *, int);
325 void _spinlock_acquire_count(__volatile struct lock *, int, const char *,
326 int);
327
328 #define spinlock_release_all(l) _spinlock_release_all((l), __FILE__, __LINE__)
329 #define spinlock_acquire_count(l, c) _spinlock_acquire_count((l), (c), \
330 __FILE__, __LINE__)
331 #else
332 int spinlock_release_all(__volatile struct lock *);
333 void spinlock_acquire_count(__volatile struct lock *, int);
334 #endif
335
336 #if defined(LOCKDEBUG)
337 void _simple_lock(__volatile struct simplelock *, const char *, int);
338 int _simple_lock_try(__volatile struct simplelock *, const char *, int);
339 void _simple_unlock(__volatile struct simplelock *, const char *, int);
340 int _simple_lock_held(__volatile struct simplelock *);
341 void simple_lock_only_held(__volatile struct simplelock *, const char *);
342
343 #define simple_lock(alp) _simple_lock((alp), __FILE__, __LINE__)
344 #define simple_lock_try(alp) _simple_lock_try((alp), __FILE__, __LINE__)
345 #define simple_unlock(alp) _simple_unlock((alp), __FILE__, __LINE__)
346 #define simple_lock_held(alp) _simple_lock_held((alp))
347
348 #define LOCK_ASSERT(x) KASSERT(x)
349
350 void simple_lock_init(struct simplelock *);
351 void simple_lock_dump(void);
352 void simple_lock_freecheck(void *, void *);
353 void simple_lock_switchcheck(void);
354 #elif defined(MULTIPROCESSOR)
355 #define simple_lock_init(alp) __cpu_simple_lock_init(&(alp)->lock_data)
356 #define simple_lock(alp) __cpu_simple_lock(&(alp)->lock_data)
357 #define simple_lock_try(alp) __cpu_simple_lock_try(&(alp)->lock_data)
358 #define simple_unlock(alp) __cpu_simple_unlock(&(alp)->lock_data)
359 #define LOCK_ASSERT(x) /* nothing */
360 #define simple_lock_only_held(x,y) /* nothing */
361 #else
362 #define simple_lock_try(alp) (1)
363 #ifndef __lint__
364 #define simple_lock_init(alp) (void)(alp)
365 #define simple_lock(alp) (void)(alp)
366 #define simple_unlock(alp) (void)(alp)
367 #else /* __lint__ */
368 #define simple_lock_init(alp) /* nothing */
369 #define simple_lock(alp) /* nothing */
370 #define simple_unlock(alp) /* nothing */
371 #define simple_lock_only_held(x,y) /* nothing */
372 #endif /* __lint__ */
373 #define LOCK_ASSERT(x) /* nothing */
374 #endif
375
376 #endif /* _KERNEL */
377
378 #endif /* _SYS_LOCK_H_ */
Cache object: 2e13b092a7adc2fc37cff2ac4401d10c
|