FreeBSD/Linux Kernel Cross Reference
sys/geom/geom_event.c
1 /*-
2 * Copyright (c) 2002 Poul-Henning Kamp
3 * Copyright (c) 2002 Networks Associates Technology, Inc.
4 * All rights reserved.
5 *
6 * This software was developed for the FreeBSD Project by Poul-Henning Kamp
7 * and NAI Labs, the Security Research Division of Network Associates, Inc.
8 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
9 * DARPA CHATS research program.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. The names of the authors may not be used to endorse or promote
20 * products derived from this software without specific prior written
21 * permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * $FreeBSD: releng/5.1/sys/geom/geom_event.c 114490 2003-05-02 05:26:19Z phk $
36 */
37
38 /*
39 * XXX: How do we in general know that objects referenced in events
40 * have not been destroyed before we get around to handle the event ?
41 */
42
43 #include <sys/param.h>
44 #include <sys/malloc.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/lock.h>
48 #include <sys/mutex.h>
49 #include <machine/stdarg.h>
50 #include <sys/errno.h>
51 #include <sys/time.h>
52 #include <geom/geom.h>
53 #include <geom/geom_int.h>
54
55 TAILQ_HEAD(event_tailq_head, g_event);
56
57 static struct event_tailq_head g_events = TAILQ_HEAD_INITIALIZER(g_events);
58 static u_int g_pending_events;
59 static TAILQ_HEAD(,g_provider) g_doorstep = TAILQ_HEAD_INITIALIZER(g_doorstep);
60 static struct mtx g_eventlock;
61 static struct sx g_eventstall;
62
63 #define G_N_EVENTREFS 20
64
65 struct g_event {
66 TAILQ_ENTRY(g_event) events;
67 g_event_t *func;
68 void *arg;
69 int flag;
70 void *ref[G_N_EVENTREFS];
71 };
72
73 #define EV_DONE 0x80000
74 #define EV_WAKEUP 0x40000
75 #define EV_CANCELED 0x20000
76
77 void
78 g_waitidle(void)
79 {
80
81 while (g_pending_events)
82 tsleep(&g_pending_events, PPAUSE, "g_waitidle", hz/5);
83 }
84
85 void
86 g_stall_events(void)
87 {
88
89 sx_xlock(&g_eventstall);
90 }
91
92 void
93 g_release_events(void)
94 {
95
96 sx_xunlock(&g_eventstall);
97 }
98
99 void
100 g_orphan_provider(struct g_provider *pp, int error)
101 {
102
103 g_trace(G_T_TOPOLOGY, "g_orphan_provider(%p(%s), %d)",
104 pp, pp->name, error);
105 KASSERT(error != 0,
106 ("g_orphan_provider(%p(%s), 0) error must be non-zero\n",
107 pp, pp->name));
108 pp->error = error;
109 mtx_lock(&g_eventlock);
110 TAILQ_INSERT_TAIL(&g_doorstep, pp, orphan);
111 mtx_unlock(&g_eventlock);
112 wakeup(&g_wait_event);
113 }
114
115 /*
116 * This function is called once on each provider which the event handler
117 * finds on its g_doorstep.
118 */
119
120 static void
121 g_orphan_register(struct g_provider *pp)
122 {
123 struct g_consumer *cp, *cp2;
124
125 g_trace(G_T_TOPOLOGY, "g_orphan_register(%s)", pp->name);
126 g_topology_assert();
127
128 /*
129 * Tell all consumers the bad news.
130 * Don't be surprised if they self-destruct.
131 */
132 cp = LIST_FIRST(&pp->consumers);
133 while (cp != NULL) {
134 cp2 = LIST_NEXT(cp, consumers);
135 KASSERT(cp->geom->orphan != NULL,
136 ("geom %s has no orphan, class %s",
137 cp->geom->name, cp->geom->class->name));
138 cp->geom->orphan(cp);
139 cp = cp2;
140 }
141 #ifdef notyet
142 cp = LIST_FIRST(&pp->consumers);
143 if (cp != NULL)
144 return;
145 if (pp->geom->flags & G_GEOM_WITHER)
146 g_destroy_provider(pp);
147 #endif
148 }
149
150 static int
151 one_event(void)
152 {
153 struct g_event *ep;
154 struct g_provider *pp;
155
156 sx_xlock(&g_eventstall);
157 g_topology_lock();
158 for (;;) {
159 mtx_lock(&g_eventlock);
160 pp = TAILQ_FIRST(&g_doorstep);
161 if (pp != NULL)
162 TAILQ_REMOVE(&g_doorstep, pp, orphan);
163 mtx_unlock(&g_eventlock);
164 if (pp == NULL)
165 break;
166 g_orphan_register(pp);
167 }
168 mtx_lock(&g_eventlock);
169 ep = TAILQ_FIRST(&g_events);
170 if (ep == NULL) {
171 mtx_unlock(&g_eventlock);
172 g_topology_unlock();
173 sx_xunlock(&g_eventstall);
174 return (0);
175 }
176 TAILQ_REMOVE(&g_events, ep, events);
177 mtx_unlock(&g_eventlock);
178 g_topology_assert();
179 ep->func(ep->arg, 0);
180 g_topology_assert();
181 if (ep->flag & EV_WAKEUP) {
182 ep->flag |= EV_DONE;
183 wakeup(ep);
184 } else {
185 g_free(ep);
186 }
187 g_pending_events--;
188 if (g_pending_events == 0)
189 wakeup(&g_pending_events);
190 g_topology_unlock();
191 sx_xunlock(&g_eventstall);
192 return (1);
193 }
194
195 void
196 g_run_events()
197 {
198
199 while (one_event())
200 ;
201 }
202
203 void
204 g_cancel_event(void *ref)
205 {
206 struct g_event *ep, *epn;
207 struct g_provider *pp;
208 u_int n;
209
210 mtx_lock(&g_eventlock);
211 TAILQ_FOREACH(pp, &g_doorstep, orphan) {
212 if (pp != ref)
213 continue;
214 TAILQ_REMOVE(&g_doorstep, pp, orphan);
215 break;
216 }
217 for (ep = TAILQ_FIRST(&g_events); ep != NULL; ep = epn) {
218 epn = TAILQ_NEXT(ep, events);
219 for (n = 0; n < G_N_EVENTREFS; n++) {
220 if (ep->ref[n] == NULL)
221 break;
222 if (ep->ref[n] == ref) {
223 TAILQ_REMOVE(&g_events, ep, events);
224 ep->func(ep->arg, EV_CANCEL);
225 if (ep->flag & EV_WAKEUP) {
226 ep->flag |= EV_DONE;
227 ep->flag |= EV_CANCELED;
228 wakeup(ep);
229 } else {
230 g_free(ep);
231 }
232 break;
233 }
234 }
235 }
236 mtx_unlock(&g_eventlock);
237 }
238
239 static int
240 g_post_event_x(g_event_t *func, void *arg, int flag, struct g_event **epp, va_list ap)
241 {
242 struct g_event *ep;
243 void *p;
244 u_int n;
245
246 g_trace(G_T_TOPOLOGY, "g_post_event_x(%p, %p, %d", func, arg, flag);
247 ep = g_malloc(sizeof *ep, flag | M_ZERO);
248 if (ep == NULL)
249 return (ENOMEM);
250 ep->flag = flag;
251 for (n = 0; n < G_N_EVENTREFS; n++) {
252 p = va_arg(ap, void *);
253 if (p == NULL)
254 break;
255 g_trace(G_T_TOPOLOGY, " ref %p", p);
256 ep->ref[n++] = p;
257 }
258 va_end(ap);
259 KASSERT(p == NULL, ("Too many references to event"));
260 ep->func = func;
261 ep->arg = arg;
262 mtx_lock(&g_eventlock);
263 g_pending_events++;
264 TAILQ_INSERT_TAIL(&g_events, ep, events);
265 mtx_unlock(&g_eventlock);
266 wakeup(&g_wait_event);
267 if (epp != NULL)
268 *epp = ep;
269 return (0);
270 }
271
272 int
273 g_post_event(g_event_t *func, void *arg, int flag, ...)
274 {
275 va_list ap;
276
277 va_start(ap, flag);
278 KASSERT(flag == M_WAITOK || flag == M_NOWAIT,
279 ("Wrong flag to g_post_event"));
280 return (g_post_event_x(func, arg, flag, NULL, ap));
281 }
282
283
284 /*
285 * XXX: It might actually be useful to call this function with topology held.
286 * XXX: This would ensure that the event gets created before anything else
287 * XXX: changes. At present all users have a handle on things in some other
288 * XXX: way, so this remains an XXX for now.
289 */
290
291 int
292 g_waitfor_event(g_event_t *func, void *arg, int flag, ...)
293 {
294 va_list ap;
295 struct g_event *ep;
296 int error;
297
298 /* g_topology_assert_not(); */
299 va_start(ap, flag);
300 KASSERT(flag == M_WAITOK || flag == M_NOWAIT,
301 ("Wrong flag to g_post_event"));
302 error = g_post_event_x(func, arg, flag | EV_WAKEUP, &ep, ap);
303 if (error)
304 return (error);
305 do
306 tsleep(ep, PRIBIO, "g_waitfor_event", hz);
307 while (!(ep->flag & EV_DONE));
308 if (ep->flag & EV_CANCELED)
309 error = EAGAIN;
310 g_free(ep);
311 return (error);
312 }
313
314 void
315 g_event_init()
316 {
317
318 mtx_init(&g_eventlock, "GEOM orphanage", NULL, MTX_DEF);
319 sx_init(&g_eventstall, "GEOM event stalling");
320 }
Cache object: 383e66646e942e8475638f3b912b1ee9
|