FreeBSD/Linux Kernel Cross Reference
sys/geom/geom_dev.c
1 /*-
2 * Copyright (c) 2002 Poul-Henning Kamp
3 * Copyright (c) 2002 Networks Associates Technology, Inc.
4 * All rights reserved.
5 *
6 * This software was developed for the FreeBSD Project by Poul-Henning Kamp
7 * and NAI Labs, the Security Research Division of Network Associates, Inc.
8 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
9 * DARPA CHATS research program.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. The names of the authors may not be used to endorse or promote
20 * products derived from this software without specific prior written
21 * permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 */
35
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD: releng/5.4/sys/geom/geom_dev.c 145335 2005-04-20 19:11:07Z cvs2svn $");
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/malloc.h>
42 #include <sys/kernel.h>
43 #include <sys/conf.h>
44 #include <sys/bio.h>
45 #include <sys/lock.h>
46 #include <sys/mutex.h>
47 #include <sys/proc.h>
48 #include <sys/errno.h>
49 #include <sys/time.h>
50 #include <sys/disk.h>
51 #include <sys/fcntl.h>
52 #include <sys/limits.h>
53 #include <geom/geom.h>
54 #include <geom/geom_int.h>
55
56 static d_open_t g_dev_open;
57 static d_close_t g_dev_close;
58 static d_strategy_t g_dev_strategy;
59 static d_ioctl_t g_dev_ioctl;
60
61 static struct cdevsw g_dev_cdevsw = {
62 .d_version = D_VERSION,
63 .d_open = g_dev_open,
64 .d_close = g_dev_close,
65 .d_read = physread,
66 .d_write = physwrite,
67 .d_ioctl = g_dev_ioctl,
68 .d_strategy = g_dev_strategy,
69 .d_name = "g_dev",
70 .d_maj = GEOM_MAJOR,
71 .d_flags = D_DISK | D_TRACKCLOSE,
72 };
73
74 static g_taste_t g_dev_taste;
75 static g_orphan_t g_dev_orphan;
76
77 static struct g_class g_dev_class = {
78 .name = "DEV",
79 .version = G_VERSION,
80 .taste = g_dev_taste,
81 .orphan = g_dev_orphan,
82 };
83
84 void
85 g_dev_print(void)
86 {
87 struct g_geom *gp;
88 char const *p = "";
89
90 LIST_FOREACH(gp, &g_dev_class.geom, geom) {
91 printf("%s%s", p, gp->name);
92 p = " ";
93 }
94 printf("\n");
95 }
96
97 struct g_provider *
98 g_dev_getprovider(struct cdev *dev)
99 {
100 struct g_consumer *cp;
101
102 if (dev == NULL)
103 return (NULL);
104 if (devsw(dev) != &g_dev_cdevsw)
105 return (NULL);
106 cp = dev->si_drv2;
107 return (cp->provider);
108 }
109
110
111 static struct g_geom *
112 g_dev_taste(struct g_class *mp, struct g_provider *pp, int insist __unused)
113 {
114 struct g_geom *gp;
115 struct g_consumer *cp;
116 static int unit = GEOM_MINOR_PROVIDERS;
117 int error;
118 struct cdev *dev;
119
120 g_trace(G_T_TOPOLOGY, "dev_taste(%s,%s)", mp->name, pp->name);
121 g_topology_assert();
122 LIST_FOREACH(cp, &pp->consumers, consumers)
123 if (cp->geom->class == mp)
124 return (NULL);
125 gp = g_new_geomf(mp, pp->name);
126 cp = g_new_consumer(gp);
127 error = g_attach(cp, pp);
128 KASSERT(error == 0,
129 ("g_dev_taste(%s) failed to g_attach, err=%d", pp->name, error));
130 /*
131 * XXX: I'm not 100% sure we can call make_dev(9) without Giant
132 * yet. Once we can, we don't need to drop topology here either.
133 */
134 g_topology_unlock();
135 mtx_lock(&Giant);
136 dev = make_dev(&g_dev_cdevsw, unit2minor(unit++),
137 UID_ROOT, GID_OPERATOR, 0640, gp->name);
138 if (pp->flags & G_PF_CANDELETE)
139 dev->si_flags |= SI_CANDELETE;
140 mtx_unlock(&Giant);
141 g_topology_lock();
142 dev->si_iosize_max = MAXPHYS;
143 dev->si_stripesize = pp->stripesize;
144 dev->si_stripeoffset = pp->stripeoffset;
145 gp->softc = dev;
146 dev->si_drv1 = gp;
147 dev->si_drv2 = cp;
148 return (gp);
149 }
150
151 static int
152 g_dev_open(struct cdev *dev, int flags, int fmt, struct thread *td)
153 {
154 struct g_geom *gp;
155 struct g_consumer *cp;
156 int error, r, w, e;
157
158 gp = dev->si_drv1;
159 cp = dev->si_drv2;
160 if (gp == NULL || cp == NULL || gp->softc != dev)
161 return(ENXIO); /* g_dev_taste() not done yet */
162
163 g_trace(G_T_ACCESS, "g_dev_open(%s, %d, %d, %p)",
164 gp->name, flags, fmt, td);
165
166 r = flags & FREAD ? 1 : 0;
167 w = flags & FWRITE ? 1 : 0;
168 #ifdef notyet
169 e = flags & O_EXCL ? 1 : 0;
170 #else
171 e = 0;
172 #endif
173 if (w) {
174 /*
175 * When running in very secure mode, do not allow
176 * opens for writing of any disks.
177 */
178 error = securelevel_ge(td->td_ucred, 2);
179 if (error)
180 return (error);
181 }
182 g_topology_lock();
183 if (dev->si_devsw == NULL)
184 error = ENXIO; /* We were orphaned */
185 else
186 error = g_access(cp, r, w, e);
187 g_topology_unlock();
188 if (!error)
189 dev->si_bsize_phys = cp->provider->sectorsize;
190 return(error);
191 }
192
193 static int
194 g_dev_close(struct cdev *dev, int flags, int fmt, struct thread *td)
195 {
196 struct g_geom *gp;
197 struct g_consumer *cp;
198 int error, r, w, e, i;
199
200 gp = dev->si_drv1;
201 cp = dev->si_drv2;
202 if (gp == NULL || cp == NULL)
203 return(ENXIO);
204 g_trace(G_T_ACCESS, "g_dev_close(%s, %d, %d, %p)",
205 gp->name, flags, fmt, td);
206 r = flags & FREAD ? -1 : 0;
207 w = flags & FWRITE ? -1 : 0;
208 #ifdef notyet
209 e = flags & O_EXCL ? -1 : 0;
210 #else
211 e = 0;
212 #endif
213 g_topology_lock();
214 if (dev->si_devsw == NULL)
215 error = ENXIO; /* We were orphaned */
216 else
217 error = g_access(cp, r, w, e);
218 for (i = 0; i < 10 * hz;) {
219 if (cp->acr != 0 || cp->acw != 0)
220 break;
221 if (cp->nstart == cp->nend)
222 break;
223 tsleep(&i, PRIBIO, "gdevwclose", hz / 10);
224 i += hz / 10;
225 }
226 if (cp->acr == 0 && cp->acw == 0 && cp->nstart != cp->nend) {
227 printf("WARNING: Final close of geom_dev(%s) %s %s\n",
228 gp->name,
229 "still has outstanding I/O after 10 seconds.",
230 "Completing close anyway, panic may happen later.");
231 }
232 g_topology_unlock();
233 return (error);
234 }
235
236 /*
237 * XXX: Until we have unmessed the ioctl situation, there is a race against
238 * XXX: a concurrent orphanization. We cannot close it by holding topology
239 * XXX: since that would prevent us from doing our job, and stalling events
240 * XXX: will break (actually: stall) the BSD disklabel hacks.
241 */
242 static int
243 g_dev_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td)
244 {
245 struct g_geom *gp;
246 struct g_consumer *cp;
247 struct g_kerneldump kd;
248 int i, error;
249 u_int u;
250
251 gp = dev->si_drv1;
252 cp = dev->si_drv2;
253
254 error = 0;
255 KASSERT(cp->acr || cp->acw,
256 ("Consumer with zero access count in g_dev_ioctl"));
257
258 i = IOCPARM_LEN(cmd);
259 switch (cmd) {
260 case DIOCGSECTORSIZE:
261 *(u_int *)data = cp->provider->sectorsize;
262 if (*(u_int *)data == 0)
263 error = ENOENT;
264 break;
265 case DIOCGMEDIASIZE:
266 *(off_t *)data = cp->provider->mediasize;
267 if (*(off_t *)data == 0)
268 error = ENOENT;
269 break;
270 case DIOCGFWSECTORS:
271 error = g_io_getattr("GEOM::fwsectors", cp, &i, data);
272 if (error == 0 && *(u_int *)data == 0)
273 error = ENOENT;
274 break;
275 case DIOCGFWHEADS:
276 error = g_io_getattr("GEOM::fwheads", cp, &i, data);
277 if (error == 0 && *(u_int *)data == 0)
278 error = ENOENT;
279 break;
280 case DIOCGFRONTSTUFF:
281 error = g_io_getattr("GEOM::frontstuff", cp, &i, data);
282 break;
283 case DIOCSKERNELDUMP:
284 u = *((u_int *)data);
285 if (!u) {
286 set_dumper(NULL);
287 error = 0;
288 break;
289 }
290 kd.offset = 0;
291 kd.length = OFF_MAX;
292 i = sizeof kd;
293 error = g_io_getattr("GEOM::kerneldump", cp, &i, &kd);
294 if (!error)
295 dev->si_flags |= SI_DUMPDEV;
296 break;
297
298 default:
299 if (cp->provider->geom->ioctl != NULL) {
300 error = cp->provider->geom->ioctl(cp->provider, cmd, data, fflag, td);
301 } else {
302 error = ENOIOCTL;
303 }
304 }
305
306 return (error);
307 }
308
309 static void
310 g_dev_done(struct bio *bp2)
311 {
312 struct bio *bp;
313
314 bp = bp2->bio_parent;
315 bp->bio_error = bp2->bio_error;
316 if (bp->bio_error != 0) {
317 g_trace(G_T_BIO, "g_dev_done(%p) had error %d",
318 bp2, bp->bio_error);
319 bp->bio_flags |= BIO_ERROR;
320 } else {
321 g_trace(G_T_BIO, "g_dev_done(%p/%p) resid %ld completed %jd",
322 bp2, bp, bp->bio_resid, (intmax_t)bp2->bio_completed);
323 }
324 bp->bio_resid = bp->bio_bcount - bp2->bio_completed;
325 g_destroy_bio(bp2);
326 biodone(bp);
327 }
328
329 static void
330 g_dev_strategy(struct bio *bp)
331 {
332 struct g_consumer *cp;
333 struct bio *bp2;
334 struct cdev *dev;
335
336 KASSERT(bp->bio_cmd == BIO_READ ||
337 bp->bio_cmd == BIO_WRITE ||
338 bp->bio_cmd == BIO_DELETE,
339 ("Wrong bio_cmd bio=%p cmd=%d", bp, bp->bio_cmd));
340 dev = bp->bio_dev;
341 cp = dev->si_drv2;
342 KASSERT(cp->acr || cp->acw,
343 ("Consumer with zero access count in g_dev_strategy"));
344
345 if ((bp->bio_offset % cp->provider->sectorsize) != 0 ||
346 (bp->bio_bcount % cp->provider->sectorsize) != 0) {
347 biofinish(bp, NULL, EINVAL);
348 return;
349 }
350
351 for (;;) {
352 /*
353 * XXX: This is not an ideal solution, but I belive it to
354 * XXX: deadlock safe, all things considered.
355 */
356 bp2 = g_clone_bio(bp);
357 if (bp2 != NULL)
358 break;
359 tsleep(&bp, PRIBIO, "gdstrat", hz / 10);
360 }
361 KASSERT(bp2 != NULL, ("XXX: ENOMEM in a bad place"));
362 bp2->bio_length = (off_t)bp->bio_bcount;
363 bp2->bio_done = g_dev_done;
364 g_trace(G_T_BIO,
365 "g_dev_strategy(%p/%p) offset %jd length %jd data %p cmd %d",
366 bp, bp2, (intmax_t)bp->bio_offset, (intmax_t)bp2->bio_length,
367 bp2->bio_data, bp2->bio_cmd);
368 g_io_request(bp2, cp);
369 KASSERT(cp->acr || cp->acw,
370 ("g_dev_strategy raced with g_dev_close and lost"));
371
372 }
373
374 /*
375 * g_dev_orphan()
376 *
377 * Called from below when the provider orphaned us.
378 * - Clear any dump settings.
379 * - Destroy the struct cdev *to prevent any more request from coming in. The
380 * provider is already marked with an error, so anything which comes in
381 * in the interrim will be returned immediately.
382 * - Wait for any outstanding I/O to finish.
383 * - Set our access counts to zero, whatever they were.
384 * - Detach and self-destruct.
385 */
386
387 static void
388 g_dev_orphan(struct g_consumer *cp)
389 {
390 struct g_geom *gp;
391 struct cdev *dev;
392
393 g_topology_assert();
394 gp = cp->geom;
395 dev = gp->softc;
396 g_trace(G_T_TOPOLOGY, "g_dev_orphan(%p(%s))", cp, gp->name);
397
398 /* Reset any dump-area set on this device */
399 if (dev->si_flags & SI_DUMPDEV)
400 set_dumper(NULL);
401
402 /* Destroy the struct cdev *so we get no more requests */
403 destroy_dev(dev);
404
405 /* Wait for the cows to come home */
406 while (cp->nstart != cp->nend)
407 msleep(&dev, NULL, PRIBIO, "gdevorphan", hz / 10);
408
409 if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0)
410 g_access(cp, -cp->acr, -cp->acw, -cp->ace);
411
412 g_detach(cp);
413 g_destroy_consumer(cp);
414 g_destroy_geom(gp);
415 }
416
417 DECLARE_GEOM_CLASS(g_dev_class, g_dev);
Cache object: 07e60a8775f4c8363a34e8fdc3145547
|