FreeBSD/Linux Kernel Cross Reference
sys/sys/mutex.h
1 /* $OpenBSD: mutex.h,v 1.18 2019/04/23 13:35:12 visa Exp $ */
2
3 /*
4 * Copyright (c) 2004 Artur Grabowski <art@openbsd.org>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19 #ifndef _SYS_MUTEX_H_
20 #define _SYS_MUTEX_H_
21
22 /*
23 * A mutex is:
24 * - owned by a cpu.
25 * - non-recursive.
26 * - spinning.
27 * - not providing mutual exclusion between processes, only cpus.
28 * - providing interrupt blocking when necessary.
29 *
30 * Different mutexes can be nested, but not interleaved. This is ok:
31 * "mtx_enter(foo); mtx_enter(bar); mtx_leave(bar); mtx_leave(foo);"
32 * This is _not_ ok:
33 * "mtx_enter(foo); mtx_enter(bar); mtx_leave(foo); mtx_leave(bar);"
34 */
35
36 #include <machine/mutex.h>
37
38 #ifdef __USE_MI_MUTEX
39
40 #include <sys/_lock.h>
41
42 struct mutex {
43 volatile void *mtx_owner;
44 int mtx_wantipl;
45 int mtx_oldipl;
46 #ifdef WITNESS
47 struct lock_object mtx_lock_obj;
48 #endif
49 };
50
51 /*
52 * To prevent lock ordering problems with the kernel lock, we need to
53 * make sure we block all interrupts that can grab the kernel lock.
54 * The simplest way to achieve this is to make sure mutexes always
55 * raise the interrupt priority level to the highest level that has
56 * interrupts that grab the kernel lock.
57 */
58 #ifdef MULTIPROCESSOR
59 #define __MUTEX_IPL(ipl) \
60 (((ipl) > IPL_NONE && (ipl) < IPL_MPFLOOR) ? IPL_MPFLOOR : (ipl))
61 #else
62 #define __MUTEX_IPL(ipl) (ipl)
63 #endif
64
65 #ifdef WITNESS
66 #define MUTEX_INITIALIZER_FLAGS(ipl, name, flags) \
67 { NULL, __MUTEX_IPL((ipl)), IPL_NONE, MTX_LO_INITIALIZER(name, flags) }
68 #else
69 #define MUTEX_INITIALIZER_FLAGS(ipl, name, flags) \
70 { NULL, __MUTEX_IPL((ipl)), IPL_NONE }
71 #endif
72
73 void __mtx_init(struct mutex *, int);
74 #define _mtx_init(mtx, ipl) __mtx_init((mtx), __MUTEX_IPL((ipl)))
75
76 #ifdef DIAGNOSTIC
77 #define MUTEX_ASSERT_LOCKED(mtx) do { \
78 if (((mtx)->mtx_owner != curcpu()) && !(panicstr || db_active)) \
79 panic("mutex %p not held in %s", (mtx), __func__); \
80 } while (0)
81
82 #define MUTEX_ASSERT_UNLOCKED(mtx) do { \
83 if (((mtx)->mtx_owner == curcpu()) && !(panicstr || db_active)) \
84 panic("mutex %p held in %s", (mtx), __func__); \
85 } while (0)
86 #else
87 #define MUTEX_ASSERT_LOCKED(mtx) do { } while (0)
88 #define MUTEX_ASSERT_UNLOCKED(mtx) do { } while (0)
89 #endif
90
91 #define MUTEX_LOCK_OBJECT(mtx) (&(mtx)->mtx_lock_obj)
92 #define MUTEX_OLDIPL(mtx) (mtx)->mtx_oldipl
93
94 #endif /* __USE_MI_MUTEX */
95
96
97 #define MTX_LO_FLAGS(flags) \
98 ((!((flags) & MTX_NOWITNESS) ? LO_WITNESS : 0) | \
99 ((flags) & MTX_DUPOK ? LO_DUPOK : 0) | \
100 LO_INITIALIZED | (LO_CLASS_MUTEX << LO_CLASSSHIFT))
101
102 #define __MTX_STRING(x) #x
103 #define __MTX_S(x) __MTX_STRING(x)
104 #define __MTX_NAME __FILE__ ":" __MTX_S(__LINE__)
105
106 #define MTX_LO_INITIALIZER(name, flags) \
107 { .lo_type = &(const struct lock_type){ .lt_name = __MTX_NAME }, \
108 .lo_name = (name), \
109 .lo_flags = MTX_LO_FLAGS(flags) }
110
111 #define MTX_NOWITNESS 0x01
112 #define MTX_DUPOK 0x02
113
114 #define MUTEX_INITIALIZER(ipl) \
115 MUTEX_INITIALIZER_FLAGS(ipl, __MTX_NAME, 0)
116
117 /*
118 * Some architectures need to do magic for the ipl, so they need a macro.
119 */
120 #ifndef _mtx_init
121 void _mtx_init(struct mutex *, int);
122 #endif
123
124 void mtx_enter(struct mutex *);
125 int mtx_enter_try(struct mutex *);
126 void mtx_leave(struct mutex *);
127
128 #define mtx_init(m, ipl) mtx_init_flags(m, ipl, NULL, 0)
129
130 #ifdef WITNESS
131
132 void _mtx_init_flags(struct mutex *, int, const char *, int,
133 const struct lock_type *);
134
135 #define mtx_init_flags(m, ipl, name, flags) do { \
136 static const struct lock_type __lock_type = { .lt_name = #m }; \
137 _mtx_init_flags(m, ipl, name, flags, &__lock_type); \
138 } while (0)
139
140 #else /* WITNESS */
141
142 #define mtx_init_flags(m, ipl, name, flags) do { \
143 (void)(name); (void)(flags); \
144 _mtx_init(m, ipl); \
145 } while (0)
146
147 #define _mtx_init_flags(m,i,n,f,t) _mtx_init(m,i)
148
149 #endif /* WITNESS */
150
151 #if defined(_KERNEL) && defined(DDB)
152
153 struct db_mutex {
154 struct cpu_info *mtx_owner;
155 unsigned long mtx_intr_state;
156 };
157
158 #define DB_MUTEX_INITIALIZER { NULL, 0 }
159
160 void db_mtx_enter(struct db_mutex *);
161 void db_mtx_leave(struct db_mutex *);
162
163 #endif /* _KERNEL && DDB */
164
165 #endif
Cache object: dca23233e0d93553fbe6640bbd10a666
|