FreeBSD/Linux Kernel Cross Reference
sys/sys/mutex.h
1 /* $NetBSD: mutex.h,v 1.16 2008/04/28 20:24:11 martin Exp $ */
2
3 /*-
4 * Copyright (c) 2002, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe and Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #ifndef _SYS_MUTEX_H_
33 #define _SYS_MUTEX_H_
34
35 /*
36 * There are 2 types of mutexes:
37 *
38 * * Adaptive -- If the lock is already held, the thread attempting
39 * to acquire the lock determines if the thread that holds it is
40 * currently running. If so, it spins, else it sleeps.
41 *
42 * * Spin -- If the lock is already held, the thread attempting to
43 * acquire the lock spins. The IPL will be raised on entry.
44 *
45 * Machine dependent code must provide the following:
46 *
47 * struct mutex
48 * The actual mutex structure. This structure is mostly
49 * opaque to machine-independent code; most access are done
50 * through macros. However, machine-independent code must
51 * be able to access the following members:
52 *
53 * uintptr_t mtx_owner
54 * ipl_cookie_t mtx_ipl
55 * __cpu_simple_lock_t mtx_lock
56 *
57 * If an architecture can be considered 'simple' (no interlock required in
58 * the MP case, or no MP) it need only define __HAVE_SIMPLE_MUTEXES and
59 * provide the following:
60 *
61 * struct mutex
62 *
63 * [additionally:]
64 * volatile integer mtx_id
65 *
66 * MUTEX_RECEIVE(mtx)
67 * Post a load fence after acquiring the mutex, if necessary.
68 *
69 * MUTEX_GIVE(mtx)
70 * Post a load/store fence after releasing the mutex, if
71 * necessary.
72 *
73 * MUTEX_CAS(ptr, old, new)
74 * Perform an atomic "compare and swap" operation and
75 * evaluate to true or false according to the success
76 *
77 * Otherwise, the following must be defined:
78 *
79 * MUTEX_INITIALIZE_SPIN(mtx, dodebug, minipl)
80 * Initialize a spin mutex.
81 *
82 * MUTEX_INITIALIZE_ADAPTIVE(mtx, dodebug)
83 * Initialize an adaptive mutex.
84 *
85 * MUTEX_DESTROY(mtx)
86 * Tear down a mutex.
87 *
88 * MUTEX_ADAPTIVE_P(mtx)
89 * Evaluates to true if the mutex is an adaptive mutex.
90 *
91 * MUTEX_SPIN_P(mtx)
92 * Evaluates to true if the mutex is a spin mutex.
93 *
94 * MUTEX_OWNER(owner)
95 * Returns the owner of the adaptive mutex (LWP address).
96 *
97 * MUTEX_OWNED(owner)
98 * Returns non-zero if an adaptive mutex is currently
99 * held by an LWP.
100 *
101 * MUTEX_HAS_WAITERS(mtx)
102 * Returns true if the mutex has waiters.
103 *
104 * MUTEX_SET_WAITERS(mtx)
105 * Mark the mutex has having waiters.
106 *
107 * MUTEX_ACQUIRE(mtx, owner)
108 * Try to acquire an adaptive mutex such that:
109 * if (lock held OR waiters)
110 * return 0;
111 * else
112 * return 1;
113 * Must be MP/interrupt atomic.
114 *
115 * MUTEX_RELEASE(mtx)
116 * Release the lock and clear the "has waiters" indication.
117 * Must be interrupt atomic, need not be MP safe.
118 *
119 * MUTEX_DEBUG_P(mtx)
120 * Evaluates to true if the mutex is initialized with
121 * dodebug==true. Only used in the LOCKDEBUG case.
122 *
123 * Machine dependent code may optionally provide stubs for the following
124 * functions to implement the easy (unlocked / no waiters) cases. If
125 * these stubs are provided, __HAVE_MUTEX_STUBS should be defined.
126 *
127 * mutex_enter()
128 * mutex_exit()
129 *
130 * Two additional stubs may be implemented that handle only the spinlock
131 * case, primarily for the scheduler. These should not be documented for
132 * or used by device drivers. __HAVE_SPIN_MUTEX_STUBS should be defined
133 * if these are provided:
134 *
135 * mutex_spin_enter()
136 * mutex_spin_exit()
137 */
138
139 #if defined(_KERNEL_OPT)
140 #include "opt_lockdebug.h"
141 #endif
142
143 #if !defined(_KERNEL)
144 #include <sys/types.h>
145 #include <sys/inttypes.h>
146 #endif
147
148 typedef enum kmutex_type_t {
149 MUTEX_SPIN = 0, /* To get a spin mutex at IPL_NONE */
150 MUTEX_ADAPTIVE = 1, /* For porting code written for Solaris */
151 MUTEX_DEFAULT = 2, /* The only native, endorsed type */
152 MUTEX_DRIVER = 3, /* For porting code written for Solaris */
153 MUTEX_NODEBUG = 4 /* Disables LOCKDEBUG; use with care */
154 } kmutex_type_t;
155
156 typedef struct kmutex kmutex_t;
157
158 #if defined(__MUTEX_PRIVATE)
159
160 #define MUTEX_THREAD ((uintptr_t)-16L)
161
162 #define MUTEX_BIT_SPIN 0x01
163 #define MUTEX_BIT_WAITERS 0x02
164 #define MUTEX_BIT_DEBUG 0x04
165
166 #define MUTEX_SPIN_IPL(mtx) ((mtx)->mtx_ipl)
167 #define MUTEX_SPIN_OLDSPL(ci) ((ci)->ci_mtx_oldspl)
168
169 void mutex_vector_enter(kmutex_t *);
170 void mutex_vector_exit(kmutex_t *);
171 void mutex_spin_retry(kmutex_t *);
172 void mutex_wakeup(kmutex_t *);
173
174 #endif /* __MUTEX_PRIVATE */
175
176 #ifdef _KERNEL
177 #include <sys/intr.h>
178 #endif
179
180 #include <machine/mutex.h>
181
182 /*
183 * Return true if no spin mutexes are held by the current CPU.
184 */
185 #ifndef MUTEX_NO_SPIN_ACTIVE_P
186 #define MUTEX_NO_SPIN_ACTIVE_P(ci) ((ci)->ci_mtx_count == 0)
187 #endif
188
189 #ifdef _KERNEL
190
191 void mutex_init(kmutex_t *, kmutex_type_t, int);
192 void mutex_destroy(kmutex_t *);
193
194 void mutex_enter(kmutex_t *);
195 void mutex_exit(kmutex_t *);
196
197 void mutex_spin_enter(kmutex_t *);
198 void mutex_spin_exit(kmutex_t *);
199
200 int mutex_tryenter(kmutex_t *);
201
202 int mutex_owned(kmutex_t *);
203 lwp_t *mutex_owner(kmutex_t *);
204
205 void mutex_obj_init(void);
206 kmutex_t *mutex_obj_alloc(kmutex_type_t, int);
207 void mutex_obj_hold(kmutex_t *);
208 bool mutex_obj_free(kmutex_t *);
209
210 #endif /* _KERNEL */
211
212 #endif /* _SYS_MUTEX_H_ */
Cache object: d39f1d7fde9f6dab6edd0f1ee69a06e8
|