FreeBSD/Linux Kernel Cross Reference
sys/geom/geom_event.c
1 /*-
2 * Copyright (c) 2002 Poul-Henning Kamp
3 * Copyright (c) 2002 Networks Associates Technology, Inc.
4 * All rights reserved.
5 *
6 * This software was developed for the FreeBSD Project by Poul-Henning Kamp
7 * and NAI Labs, the Security Research Division of Network Associates, Inc.
8 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
9 * DARPA CHATS research program.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. The names of the authors may not be used to endorse or promote
20 * products derived from this software without specific prior written
21 * permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * $FreeBSD: releng/5.0/sys/geom/geom_event.c 106408 2002-11-04 09:31:02Z phk $
36 */
37
38 /*
39 * XXX: How do we in general know that objects referenced in events
40 * have not been destroyed before we get around to handle the event ?
41 */
42
43 #include <sys/param.h>
44 #ifndef _KERNEL
45 #include <stdio.h>
46 #include <unistd.h>
47 #include <string.h>
48 #include <stdlib.h>
49 #include <signal.h>
50 #include <err.h>
51 #else
52 #include <sys/malloc.h>
53 #include <sys/systm.h>
54 #include <sys/kernel.h>
55 #include <sys/lock.h>
56 #include <sys/mutex.h>
57 #include <sys/eventhandler.h>
58 #endif
59 #include <sys/errno.h>
60 #include <sys/time.h>
61 #include <geom/geom.h>
62 #include <geom/geom_int.h>
63
64 static struct event_tailq_head g_events = TAILQ_HEAD_INITIALIZER(g_events);
65 static u_int g_pending_events, g_silence_events;
66 static void g_do_event(struct g_event *ep);
67 static TAILQ_HEAD(,g_provider) g_doorstep = TAILQ_HEAD_INITIALIZER(g_doorstep);
68 static struct mtx g_eventlock;
69 static int g_shutdown;
70
71 void
72 g_silence(void)
73 {
74
75 g_silence_events = 1;
76 }
77
78 void
79 g_waitidle(void)
80 {
81
82 g_silence_events = 0;
83 mtx_lock(&Giant);
84 wakeup(&g_silence_events);
85 while (g_pending_events)
86 tsleep(&g_pending_events, PPAUSE, "g_waitidle", hz/5);
87 mtx_unlock(&Giant);
88 }
89
90 void
91 g_orphan_provider(struct g_provider *pp, int error)
92 {
93
94 g_trace(G_T_TOPOLOGY, "g_orphan_provider(%p(%s), %d)",
95 pp, pp->name, error);
96 KASSERT(error != 0,
97 ("g_orphan_provider(%p(%s), 0) error must be non-zero\n",
98 pp, pp->name));
99 pp->error = error;
100 mtx_lock(&g_eventlock);
101 TAILQ_INSERT_TAIL(&g_doorstep, pp, orphan);
102 mtx_unlock(&g_eventlock);
103 wakeup(&g_wait_event);
104 }
105
106 /*
107 * This function is called once on each provider which the event handler
108 * finds on its g_doorstep.
109 */
110
111 static void
112 g_orphan_register(struct g_provider *pp)
113 {
114 struct g_consumer *cp, *cp2;
115
116 g_trace(G_T_TOPOLOGY, "g_orphan_register(%s)", pp->name);
117 g_topology_assert();
118
119 /*
120 * Tell all consumers the bad news.
121 * Don't get surprised if they self-destruct.
122 */
123 cp = LIST_FIRST(&pp->consumers);
124 while (cp != NULL) {
125 cp2 = LIST_NEXT(cp, consumers);
126 KASSERT(cp->geom->orphan != NULL,
127 ("geom %s has no orphan, class %s",
128 cp->geom->name, cp->geom->class->name));
129 cp->geom->orphan(cp);
130 cp = cp2;
131 }
132 }
133
134 static void
135 g_destroy_event(struct g_event *ep)
136 {
137
138 g_free(ep);
139 }
140
141 static void
142 g_do_event(struct g_event *ep)
143 {
144 struct g_class *mp, *mp2;
145 struct g_geom *gp;
146 struct g_consumer *cp, *cp2;
147 struct g_provider *pp;
148 int i;
149
150 g_trace(G_T_TOPOLOGY, "g_do_event(%p) %d m:%p g:%p p:%p c:%p - ",
151 ep, ep->event, ep->class, ep->geom, ep->provider, ep->consumer);
152 g_topology_assert();
153 switch (ep->event) {
154 case EV_CALL_ME:
155 ep->func(ep->arg);
156 g_topology_assert();
157 break;
158 case EV_NEW_CLASS:
159 mp2 = ep->class;
160 if (g_shutdown)
161 break;
162 if (mp2->taste == NULL)
163 break;
164 if (g_shutdown)
165 break;
166 LIST_FOREACH(mp, &g_classes, class) {
167 if (mp2 == mp)
168 continue;
169 LIST_FOREACH(gp, &mp->geom, geom) {
170 LIST_FOREACH(pp, &gp->provider, provider) {
171 mp2->taste(ep->class, pp, 0);
172 g_topology_assert();
173 }
174 }
175 }
176 break;
177 case EV_NEW_PROVIDER:
178 if (g_shutdown)
179 break;
180 g_trace(G_T_TOPOLOGY, "EV_NEW_PROVIDER(%s)",
181 ep->provider->name);
182 LIST_FOREACH(mp, &g_classes, class) {
183 if (mp->taste == NULL)
184 continue;
185 if (!strcmp(ep->provider->name, "geom.ctl") &&
186 strcmp(mp->name, "DEV"))
187 continue;
188 i = 1;
189 LIST_FOREACH(cp, &ep->provider->consumers, consumers)
190 if(cp->geom->class == mp)
191 i = 0;
192 if (i) {
193 mp->taste(mp, ep->provider, 0);
194 g_topology_assert();
195 }
196 }
197 break;
198 case EV_SPOILED:
199 g_trace(G_T_TOPOLOGY, "EV_SPOILED(%p(%s),%p)",
200 ep->provider, ep->provider->name, ep->consumer);
201 cp = LIST_FIRST(&ep->provider->consumers);
202 while (cp != NULL) {
203 cp2 = LIST_NEXT(cp, consumers);
204 if (cp->spoiled) {
205 g_trace(G_T_TOPOLOGY, "spoiling %p (%s) (%p)",
206 cp, cp->geom->name, cp->geom->spoiled);
207 if (cp->geom->spoiled != NULL)
208 cp->geom->spoiled(cp);
209 else
210 cp->spoiled = 0;
211 }
212 cp = cp2;
213 }
214 break;
215 case EV_LAST:
216 default:
217 KASSERT(1 == 0, ("Unknown event %d", ep->event));
218 }
219 }
220
221 static int
222 one_event(void)
223 {
224 struct g_event *ep;
225 struct g_provider *pp;
226
227 g_topology_lock();
228 for (;;) {
229 mtx_lock(&g_eventlock);
230 pp = TAILQ_FIRST(&g_doorstep);
231 if (pp != NULL)
232 TAILQ_REMOVE(&g_doorstep, pp, orphan);
233 mtx_unlock(&g_eventlock);
234 if (pp == NULL)
235 break;
236 g_orphan_register(pp);
237 }
238 mtx_lock(&g_eventlock);
239 ep = TAILQ_FIRST(&g_events);
240 if (ep == NULL) {
241 mtx_unlock(&g_eventlock);
242 g_topology_unlock();
243 return (0);
244 }
245 TAILQ_REMOVE(&g_events, ep, events);
246 mtx_unlock(&g_eventlock);
247 if (ep->class != NULL)
248 ep->class->event = NULL;
249 if (ep->geom != NULL)
250 ep->geom->event = NULL;
251 if (ep->provider != NULL)
252 ep->provider->event = NULL;
253 if (ep->consumer != NULL)
254 ep->consumer->event = NULL;
255 g_do_event(ep);
256 g_destroy_event(ep);
257 g_pending_events--;
258 if (g_pending_events == 0)
259 wakeup(&g_pending_events);
260 g_topology_unlock();
261 return (1);
262 }
263
264 void
265 g_run_events()
266 {
267
268 while (one_event())
269 ;
270 }
271
272 void
273 g_post_event(enum g_events ev, struct g_class *mp, struct g_geom *gp, struct g_provider *pp, struct g_consumer *cp)
274 {
275 struct g_event *ep;
276
277 g_trace(G_T_TOPOLOGY, "g_post_event(%d, %p, %p, %p, %p)",
278 ev, mp, gp, pp, cp);
279 g_topology_assert();
280 ep = g_malloc(sizeof *ep, M_WAITOK | M_ZERO);
281 ep->event = ev;
282 if (mp != NULL) {
283 ep->class = mp;
284 KASSERT(mp->event == NULL, ("Double event on class %d %d",
285 ep->event, mp->event->event));
286 mp->event = ep;
287 }
288 if (gp != NULL) {
289 ep->geom = gp;
290 KASSERT(gp->event == NULL, ("Double event on geom %d %d",
291 ep->event, gp->event->event));
292 gp->event = ep;
293 }
294 if (pp != NULL) {
295 ep->provider = pp;
296 KASSERT(pp->event == NULL, ("Double event on provider %s %d %d",
297 pp->name, ep->event, pp->event->event));
298 pp->event = ep;
299 }
300 if (cp != NULL) {
301 ep->consumer = cp;
302 KASSERT(cp->event == NULL, ("Double event on consumer %d %d",
303 ep->event, cp->event->event));
304 cp->event = ep;
305 }
306 mtx_lock(&g_eventlock);
307 g_pending_events++;
308 TAILQ_INSERT_TAIL(&g_events, ep, events);
309 mtx_unlock(&g_eventlock);
310 wakeup(&g_wait_event);
311 }
312
313 int
314 g_call_me(g_call_me_t *func, void *arg)
315 {
316 struct g_event *ep;
317
318 g_trace(G_T_TOPOLOGY, "g_call_me(%p, %p", func, arg);
319 ep = g_malloc(sizeof *ep, M_NOWAIT | M_ZERO);
320 if (ep == NULL)
321 return (ENOMEM);
322 ep->event = EV_CALL_ME;
323 ep->func = func;
324 ep->arg = arg;
325 mtx_lock(&g_eventlock);
326 g_pending_events++;
327 TAILQ_INSERT_TAIL(&g_events, ep, events);
328 mtx_unlock(&g_eventlock);
329 wakeup(&g_wait_event);
330 return (0);
331 }
332
333 #ifdef _KERNEL
334 static void
335 geom_shutdown(void *foo __unused)
336 {
337
338 g_shutdown = 1;
339 }
340 #endif
341
342 void
343 g_event_init()
344 {
345
346 #ifdef _KERNEL
347
348 EVENTHANDLER_REGISTER(shutdown_pre_sync, geom_shutdown, NULL,
349 SHUTDOWN_PRI_FIRST);
350 #endif
351 mtx_init(&g_eventlock, "GEOM orphanage", NULL, MTX_DEF);
352 }
Cache object: a9befa03da9b81c744096cd6a57b13d0
|