FreeBSD/Linux Kernel Cross Reference
sys/sys/lock.h
1 /* $NetBSD: lock.h,v 1.66.6.1 2007/08/01 14:45:45 liamjfoy Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * This code is derived from software contributed to The NetBSD Foundation
12 * by Ross Harvey.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
22 * 3. All advertising materials mentioning features or use of this software
23 * must display the following acknowledgement:
24 * This product includes software developed by the NetBSD
25 * Foundation, Inc. and its contributors.
26 * 4. Neither the name of The NetBSD Foundation nor the names of its
27 * contributors may be used to endorse or promote products derived
28 * from this software without specific prior written permission.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
31 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
32 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
33 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
34 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
37 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
38 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40 * POSSIBILITY OF SUCH DAMAGE.
41 */
42
43 /*
44 * Copyright (c) 1995
45 * The Regents of the University of California. All rights reserved.
46 *
47 * This code contains ideas from software contributed to Berkeley by
48 * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
49 * System project at Carnegie-Mellon University.
50 *
51 * Redistribution and use in source and binary forms, with or without
52 * modification, are permitted provided that the following conditions
53 * are met:
54 * 1. Redistributions of source code must retain the above copyright
55 * notice, this list of conditions and the following disclaimer.
56 * 2. Redistributions in binary form must reproduce the above copyright
57 * notice, this list of conditions and the following disclaimer in the
58 * documentation and/or other materials provided with the distribution.
59 * 3. Neither the name of the University nor the names of its contributors
60 * may be used to endorse or promote products derived from this software
61 * without specific prior written permission.
62 *
63 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
64 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
65 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
66 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
67 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
68 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
69 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
70 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
71 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
72 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
73 * SUCH DAMAGE.
74 *
75 * @(#)lock.h 8.12 (Berkeley) 5/19/95
76 */
77
78 #ifndef _SYS_LOCK_H_
79 #define _SYS_LOCK_H_
80
81 #if defined(_KERNEL_OPT)
82 #include "opt_lockdebug.h"
83 #include "opt_multiprocessor.h"
84 #endif
85
86 #include <sys/queue.h>
87 #include <machine/lock.h>
88
89 /*
90 * The simple lock. Provides a simple spinning mutex. Note the
91 * member which is used in atomic operations must be aligned in
92 * order for it to work on the widest range of processor types.
93 */
94 struct simplelock {
95 __cpu_simple_lock_t lock_data;
96 #ifdef LOCKDEBUG
97 const char *lock_file;
98 const char *unlock_file;
99 short lock_line;
100 short unlock_line;
101 _TAILQ_ENTRY(struct simplelock, volatile) list;
102 cpuid_t lock_holder; /* CPU ID */
103 #endif
104 };
105
106 #ifdef LOCKDEBUG
107 #define SIMPLELOCK_INITIALIZER { __SIMPLELOCK_UNLOCKED, NULL, NULL, 0, \
108 0, { NULL, NULL }, LK_NOCPU }
109 #else
110 #define SIMPLELOCK_INITIALIZER { __SIMPLELOCK_UNLOCKED }
111 #endif
112
113 /*
114 * The general lock structure. Provides for multiple shared locks,
115 * upgrading from shared to exclusive, and sleeping/spinning until the
116 * lock can be gained.
117 */
118 struct lock {
119 struct simplelock lk_interlock; /* lock on remaining fields */
120 u_int lk_flags; /* see below */
121 int lk_sharecount; /* # of accepted shared locks */
122 short lk_exclusivecount; /* # of recursive exclusive locks */
123 short lk_recurselevel; /* lvl above which recursion ok */
124 int lk_waitcount; /* # of sleepers/spinners */
125
126 /*
127 * This is the sleep message for sleep locks, and a simple name
128 * for spin locks.
129 */
130 const char *lk_wmesg;
131
132 union {
133 struct {
134 /* pid of exclusive lock holder */
135 pid_t lk_sleep_lockholder;
136 lwpid_t lk_sleep_locklwp;
137
138 /* priority at which to sleep */
139 int lk_sleep_prio;
140
141 /* maximum sleep time (for tsleep) */
142 int lk_sleep_timo;
143
144 /* lock taking over this lock */
145 struct lock *lk_newlock;
146 } lk_un_sleep;
147 struct {
148 /* CPU ID of exclusive lock holder */
149 cpuid_t lk_spin_cpu;
150 #if defined(LOCKDEBUG)
151 _TAILQ_ENTRY(struct lock, volatile) lk_spin_list;
152 #endif
153 } lk_un_spin;
154 } lk_un;
155
156 #define lk_lockholder lk_un.lk_un_sleep.lk_sleep_lockholder
157 #define lk_locklwp lk_un.lk_un_sleep.lk_sleep_locklwp
158 #define lk_prio lk_un.lk_un_sleep.lk_sleep_prio
159 #define lk_timo lk_un.lk_un_sleep.lk_sleep_timo
160 #define lk_newlock lk_un.lk_un_sleep.lk_newlock
161
162 #define lk_cpu lk_un.lk_un_spin.lk_spin_cpu
163 #if defined(LOCKDEBUG)
164 #define lk_list lk_un.lk_un_spin.lk_spin_list
165 #endif
166
167 #if defined(LOCKDEBUG)
168 const char *lk_lock_file;
169 const char *lk_unlock_file;
170 int lk_lock_line;
171 int lk_unlock_line;
172 #endif
173 };
174
175 #ifndef LOCKDEBUG
176 #define LOCK_INITIALIZER(prio, wmesg, timo, flags) \
177 { SIMPLELOCK_INITIALIZER, /* interlock */ \
178 (flags), /* flags */ \
179 0, /* sharecount */ \
180 0, /* exclusivecount */ \
181 0, /* recurselevel */ \
182 0, /* waitcount */ \
183 (wmesg), /* waitmesg */ \
184 { .lk_un_sleep = { 0, 0, (prio), (timo), NULL } }, \
185 }
186 #else
187 #define LOCK_INITIALIZER(prio, wmesg, timo, flags) \
188 { SIMPLELOCK_INITIALIZER, /* interlock */ \
189 (flags), /* flags */ \
190 0, /* sharecount */ \
191 0, /* exclusivecount */ \
192 0, /* recurselevel */ \
193 0, /* waitcount */ \
194 (wmesg), /* waitmesg */ \
195 { .lk_un_sleep = { 0, 0, (prio), (timo), NULL } }, \
196 NULL, /* lk_lock_file */ \
197 NULL, /* lk_unlock_file */ \
198 0, /* lk_lock_line */ \
199 0, /* lk_unlock_line */ \
200 }
201 #endif
202
203
204
205 /*
206 * Lock request types:
207 * LK_SHARED - get one of many possible shared locks. If a process
208 * holding an exclusive lock requests a shared lock, the exclusive
209 * lock(s) will be downgraded to shared locks.
210 * LK_EXCLUSIVE - stop further shared locks, when they are cleared,
211 * grant a pending upgrade if it exists, then grant an exclusive
212 * lock. Only one exclusive lock may exist at a time, except that
213 * a process holding an exclusive lock may get additional exclusive
214 * locks if it explicitly sets the LK_CANRECURSE flag in the lock
215 * request, or if the LK_CANRECURSE flag was set when the lock was
216 * initialized.
217 * LK_UPGRADE - the process must hold a shared lock that it wants to
218 * have upgraded to an exclusive lock. Other processes may get
219 * exclusive access to the resource between the time that the upgrade
220 * is requested and the time that it is granted.
221 * LK_EXCLUPGRADE - the process must hold a shared lock that it wants to
222 * have upgraded to an exclusive lock. If the request succeeds, no
223 * other processes will have gotten exclusive access to the resource
224 * between the time that the upgrade is requested and the time that
225 * it is granted. However, if another process has already requested
226 * an upgrade, the request will fail (see error returns below).
227 * LK_DOWNGRADE - the process must hold an exclusive lock that it wants
228 * to have downgraded to a shared lock. If the process holds multiple
229 * (recursive) exclusive locks, they will all be downgraded to shared
230 * locks.
231 * LK_RELEASE - release one instance of a lock.
232 * LK_DRAIN - wait for all activity on the lock to end, then mark it
233 * decommissioned. This feature is used before freeing a lock that
234 * is part of a piece of memory that is about to be freed.
235 *
236 * These are flags that are passed to the lockmgr routine.
237 */
238 #define LK_TYPE_MASK 0x0000000f /* type of lock sought */
239 #define LK_SHARED 0x00000001 /* shared lock */
240 #define LK_EXCLUSIVE 0x00000002 /* exclusive lock */
241 #define LK_UPGRADE 0x00000003 /* shared-to-exclusive upgrade */
242 #define LK_EXCLUPGRADE 0x00000004 /* first shared-to-exclusive upgrade */
243 #define LK_DOWNGRADE 0x00000005 /* exclusive-to-shared downgrade */
244 #define LK_RELEASE 0x00000006 /* release any type of lock */
245 #define LK_DRAIN 0x00000007 /* wait for all lock activity to end */
246 #define LK_EXCLOTHER 0x00000008 /* other process holds lock */
247 /*
248 * External lock flags.
249 *
250 * The first three flags may be set in lock_init to set their mode permanently,
251 * or passed in as arguments to the lock manager. The LK_REENABLE flag may be
252 * set only at the release of a lock obtained by drain.
253 */
254 #define LK_EXTFLG_MASK 0x00f00070 /* mask of external flags */
255 #define LK_NOWAIT 0x00000010 /* do not sleep to await lock */
256 #define LK_SLEEPFAIL 0x00000020 /* sleep, then return failure */
257 #define LK_CANRECURSE 0x00000040 /* this may be recursive lock attempt */
258 #define LK_REENABLE 0x00000080 /* lock is be reenabled after drain */
259 #define LK_SETRECURSE 0x00100000 /* other locks while we have it OK */
260 #define LK_RECURSEFAIL 0x00200000 /* attempt at recursive lock fails */
261 #define LK_SPIN 0x00400000 /* lock spins instead of sleeps */
262 #define LK_RESURRECT 0x00800000 /* immediately reenable drained lock */
263 /*
264 * Internal lock flags.
265 *
266 * These flags are used internally to the lock manager.
267 */
268 #define LK_WANT_UPGRADE 0x00000100 /* waiting for share-to-excl upgrade */
269 #define LK_WANT_EXCL 0x00000200 /* exclusive lock sought */
270 #define LK_HAVE_EXCL 0x00000400 /* exclusive lock obtained */
271 #define LK_WAITDRAIN 0x00000800 /* process waiting for lock to drain */
272 #define LK_DRAINING 0x00004000 /* lock is being drained */
273 #define LK_DRAINED 0x00008000 /* lock has been decommissioned */
274 /*
275 * Internal state flags corresponding to lk_sharecount, and lk_waitcount
276 */
277 #define LK_SHARE_NONZERO 0x00040000 /* lk_sharecount != 0 */
278 #define LK_WAIT_NONZERO 0x00080000 /* lk_waitcount != 0 */
279 /*
280 * Control flags
281 *
282 * Non-persistent external flags.
283 */
284 #define LK_INTERLOCK 0x00010000 /* unlock passed simple lock after
285 getting lk_interlock */
286 #define LK_RETRY 0x00020000 /* vn_lock: retry until locked */
287
288 /*
289 * Lock return status.
290 *
291 * Successfully obtained locks return 0. Locks will always succeed
292 * unless one of the following is true:
293 * LK_FORCEUPGRADE is requested and some other process has already
294 * requested a lock upgrade (returns EBUSY).
295 * LK_NOWAIT is set and a sleep would be required (returns EBUSY).
296 * LK_SLEEPFAIL is set and a sleep was done (returns ENOLCK).
297 * PCATCH is set in lock priority and a signal arrives (returns
298 * either EINTR or ERESTART if system calls is to be restarted).
299 * Non-null lock timeout and timeout expires (returns EWOULDBLOCK).
300 * A failed lock attempt always returns a non-zero error value. No lock
301 * is held after an error return (in particular, a failed LK_UPGRADE
302 * or LK_FORCEUPGRADE will have released its shared access lock).
303 */
304
305 /*
306 * Indicator that no process/cpu holds exclusive lock
307 */
308 #define LK_KERNPROC ((pid_t) -2)
309 #define LK_NOPROC ((pid_t) -1)
310 #define LK_NOCPU ((cpuid_t) -1)
311
312 #ifdef _KERNEL
313
314 struct proc;
315
316 void lockinit(struct lock *, int, const char *, int, int);
317 #if defined(LOCKDEBUG)
318 int _lockmgr(volatile struct lock *, u_int, struct simplelock *,
319 const char *, int);
320 #define lockmgr(l, f, i) _lockmgr((l), (f), (i), __FILE__, __LINE__)
321 #else
322 int lockmgr(volatile struct lock *, u_int flags, struct simplelock *);
323 #endif /* LOCKDEBUG */
324 void transferlockers(struct lock *, struct lock *);
325 int lockstatus(struct lock *);
326 void lockmgr_printinfo(volatile struct lock *);
327
328 #if defined(LOCKDEBUG)
329 void spinlock_switchcheck(void);
330 #endif
331
332 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
333 #define spinlockinit(lkp, name, flags) \
334 lockinit((lkp), 0, (name), 0, (flags) | LK_SPIN)
335 #define spinlockmgr(lkp, flags, intrlk) \
336 lockmgr((lkp), (flags) | LK_SPIN, (intrlk))
337 #else
338 #define spinlockinit(lkp, name, flags) (void)(lkp)
339 #define spinlockmgr(lkp, flags, intrlk) (0)
340 #endif
341
342 #if defined(LOCKDEBUG)
343 int _spinlock_release_all(volatile struct lock *, const char *, int);
344 void _spinlock_acquire_count(volatile struct lock *, int, const char *,
345 int);
346
347 #define spinlock_release_all(l) _spinlock_release_all((l), __FILE__, __LINE__)
348 #define spinlock_acquire_count(l, c) _spinlock_acquire_count((l), (c), \
349 __FILE__, __LINE__)
350
351 #else
352 int spinlock_release_all(volatile struct lock *);
353 void spinlock_acquire_count(volatile struct lock *, int);
354 #endif
355
356 #if defined(LOCKDEBUG)
357
358 void _simple_lock(volatile struct simplelock *, const char *, int);
359 int _simple_lock_try(volatile struct simplelock *, const char *, int);
360 void _simple_unlock(volatile struct simplelock *, const char *, int);
361 int _simple_lock_held(volatile struct simplelock *);
362 void simple_lock_only_held(volatile struct simplelock *, const char *);
363 void _simple_lock_assert_locked(volatile struct simplelock *, const char *,
364 const char *, int l);
365 void _simple_lock_assert_unlocked(volatile struct simplelock *, const char *,
366 const char *, int l);
367
368 #define simple_lock(alp) _simple_lock((alp), __FILE__, __LINE__)
369 #define simple_lock_try(alp) _simple_lock_try((alp), __FILE__, __LINE__)
370 #define simple_unlock(alp) _simple_unlock((alp), __FILE__, __LINE__)
371 #define simple_lock_held(alp) _simple_lock_held((alp))
372 #define simple_lock_assert_locked(alp,lockname) \
373 _simple_lock_assert_locked((alp),(lockname), __FILE__, __LINE__)
374 #define simple_lock_assert_unlocked(alp,lockname) \
375 _simple_lock_assert_unlocked((alp),(lockname), __FILE__, __LINE__)
376
377 #define LOCK_ASSERT(x) KASSERT(x)
378
379 void simple_lock_init(volatile struct simplelock *);
380 void simple_lock_dump(void);
381 void simple_lock_freecheck(void *, void *);
382 void simple_lock_switchcheck(void);
383 #elif defined(MULTIPROCESSOR)
384 #define simple_lock_init(alp) __cpu_simple_lock_init(&(alp)->lock_data)
385 #define simple_lock(alp) __cpu_simple_lock(&(alp)->lock_data)
386 #define simple_lock_try(alp) __cpu_simple_lock_try(&(alp)->lock_data)
387 #define simple_unlock(alp) __cpu_simple_unlock(&(alp)->lock_data)
388 #define LOCK_ASSERT(x) /* nothing */
389 #define simple_lock_only_held(x,y) /* nothing */
390 #define simple_lock_assert_locked(alp,lockname) /* nothing */
391 #define simple_lock_assert_unlocked(alp,lockname) /* nothing */
392 #else
393 #define simple_lock_try(alp) (1)
394 #ifndef __lint__
395 #define simple_lock_init(alp) (void)(alp)
396 #define simple_lock(alp) (void)(alp)
397 #define simple_unlock(alp) (void)(alp)
398 #define simple_lock_assert_locked(alp,lockname) (void)(alp)
399 #define simple_lock_assert_unlocked(alp,lockname) (void)(alp)
400 #else /* __lint__ */
401 #define simple_lock_init(alp) /* nothing */
402 #define simple_lock(alp) /* nothing */
403 #define simple_unlock(alp) /* nothing */
404 #define simple_lock_only_held(x,y) /* nothing */
405 #define simple_lock_assert_locked(alp,lockname) /* nothing */
406 #define simple_lock_assert_unlocked(alp,lockname) /* nothing */
407 #endif /* __lint__ */
408 #define LOCK_ASSERT(x) /* nothing */
409 #endif
410
411 int lock_owner_onproc(uintptr_t);
412
413 #ifndef SPINLOCK_SPIN_HOOK /* from <machine/lock.h> */
414 #define SPINLOCK_SPIN_HOOK /* nothing */
415 #endif
416
417 #ifndef SPINLOCK_BACKOFF_MIN
418 #define SPINLOCK_BACKOFF_MIN 32
419 #endif
420 #ifndef SPINLOCK_BACKOFF_MAX
421 #define SPINLOCK_BACKOFF_MAX 1024
422 #endif
423 #define SPINLOCK_BACKOFF(count) \
424 do { \
425 int __i; \
426 for (__i = 0; __i < (count); __i++) { \
427 SPINLOCK_SPIN_HOOK; \
428 nullop(NULL); \
429 } \
430 if ((__i <<= 1) <= SPINLOCK_BACKOFF_MAX) \
431 (count) = SPINLOCK_BACKOFF_MAX; \
432 } while (/* CONSTCOND */ 0);
433
434 #endif /* _KERNEL */
435
436 #endif /* _SYS_LOCK_H_ */
Cache object: 81bf6fdce9cd94380bcb80aeaefdc19f
|