FreeBSD/Linux Kernel Cross Reference
sys/geom/geom_dev.c
1 /*-
2 * Copyright (c) 2002 Poul-Henning Kamp
3 * Copyright (c) 2002 Networks Associates Technology, Inc.
4 * All rights reserved.
5 *
6 * This software was developed for the FreeBSD Project by Poul-Henning Kamp
7 * and NAI Labs, the Security Research Division of Network Associates, Inc.
8 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
9 * DARPA CHATS research program.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. The names of the authors may not be used to endorse or promote
20 * products derived from this software without specific prior written
21 * permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 */
35
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD: releng/8.0/sys/geom/geom_dev.c 195436 2009-07-08 05:56:14Z marcel $");
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/malloc.h>
42 #include <sys/kernel.h>
43 #include <sys/conf.h>
44 #include <sys/bio.h>
45 #include <sys/lock.h>
46 #include <sys/mutex.h>
47 #include <sys/proc.h>
48 #include <sys/errno.h>
49 #include <sys/time.h>
50 #include <sys/disk.h>
51 #include <sys/fcntl.h>
52 #include <sys/limits.h>
53 #include <geom/geom.h>
54 #include <geom/geom_int.h>
55
56 static d_open_t g_dev_open;
57 static d_close_t g_dev_close;
58 static d_strategy_t g_dev_strategy;
59 static d_ioctl_t g_dev_ioctl;
60
61 static struct cdevsw g_dev_cdevsw = {
62 .d_version = D_VERSION,
63 .d_open = g_dev_open,
64 .d_close = g_dev_close,
65 .d_read = physread,
66 .d_write = physwrite,
67 .d_ioctl = g_dev_ioctl,
68 .d_strategy = g_dev_strategy,
69 .d_name = "g_dev",
70 .d_flags = D_DISK | D_TRACKCLOSE,
71 };
72
73 static g_taste_t g_dev_taste;
74 static g_orphan_t g_dev_orphan;
75
76 static struct g_class g_dev_class = {
77 .name = "DEV",
78 .version = G_VERSION,
79 .taste = g_dev_taste,
80 .orphan = g_dev_orphan,
81 };
82
83 void
84 g_dev_print(void)
85 {
86 struct g_geom *gp;
87 char const *p = "";
88
89 LIST_FOREACH(gp, &g_dev_class.geom, geom) {
90 printf("%s%s", p, gp->name);
91 p = " ";
92 }
93 printf("\n");
94 }
95
96 struct g_provider *
97 g_dev_getprovider(struct cdev *dev)
98 {
99 struct g_consumer *cp;
100
101 g_topology_assert();
102 if (dev == NULL)
103 return (NULL);
104 if (dev->si_devsw != &g_dev_cdevsw)
105 return (NULL);
106 cp = dev->si_drv2;
107 return (cp->provider);
108 }
109
110
111 static struct g_geom *
112 g_dev_taste(struct g_class *mp, struct g_provider *pp, int insist __unused)
113 {
114 struct g_geom *gp;
115 struct g_consumer *cp;
116 int error;
117 struct cdev *dev;
118
119 g_trace(G_T_TOPOLOGY, "dev_taste(%s,%s)", mp->name, pp->name);
120 g_topology_assert();
121 LIST_FOREACH(cp, &pp->consumers, consumers)
122 if (cp->geom->class == mp)
123 return (NULL);
124 gp = g_new_geomf(mp, pp->name);
125 cp = g_new_consumer(gp);
126 error = g_attach(cp, pp);
127 KASSERT(error == 0,
128 ("g_dev_taste(%s) failed to g_attach, err=%d", pp->name, error));
129 dev = make_dev(&g_dev_cdevsw, 0,
130 UID_ROOT, GID_OPERATOR, 0640, gp->name);
131 if (pp->flags & G_PF_CANDELETE)
132 dev->si_flags |= SI_CANDELETE;
133 dev->si_iosize_max = MAXPHYS;
134 gp->softc = dev;
135 dev->si_drv1 = gp;
136 dev->si_drv2 = cp;
137 return (gp);
138 }
139
140 static int
141 g_dev_open(struct cdev *dev, int flags, int fmt, struct thread *td)
142 {
143 struct g_geom *gp;
144 struct g_consumer *cp;
145 int error, r, w, e;
146
147 gp = dev->si_drv1;
148 cp = dev->si_drv2;
149 if (gp == NULL || cp == NULL || gp->softc != dev)
150 return(ENXIO); /* g_dev_taste() not done yet */
151
152 g_trace(G_T_ACCESS, "g_dev_open(%s, %d, %d, %p)",
153 gp->name, flags, fmt, td);
154
155 r = flags & FREAD ? 1 : 0;
156 w = flags & FWRITE ? 1 : 0;
157 #ifdef notyet
158 e = flags & O_EXCL ? 1 : 0;
159 #else
160 e = 0;
161 #endif
162 if (w) {
163 /*
164 * When running in very secure mode, do not allow
165 * opens for writing of any disks.
166 */
167 error = securelevel_ge(td->td_ucred, 2);
168 if (error)
169 return (error);
170 }
171 g_topology_lock();
172 if (dev->si_devsw == NULL)
173 error = ENXIO; /* We were orphaned */
174 else
175 error = g_access(cp, r, w, e);
176 g_topology_unlock();
177 return(error);
178 }
179
180 static int
181 g_dev_close(struct cdev *dev, int flags, int fmt, struct thread *td)
182 {
183 struct g_geom *gp;
184 struct g_consumer *cp;
185 int error, r, w, e, i;
186
187 gp = dev->si_drv1;
188 cp = dev->si_drv2;
189 if (gp == NULL || cp == NULL)
190 return(ENXIO);
191 g_trace(G_T_ACCESS, "g_dev_close(%s, %d, %d, %p)",
192 gp->name, flags, fmt, td);
193 r = flags & FREAD ? -1 : 0;
194 w = flags & FWRITE ? -1 : 0;
195 #ifdef notyet
196 e = flags & O_EXCL ? -1 : 0;
197 #else
198 e = 0;
199 #endif
200 g_topology_lock();
201 if (dev->si_devsw == NULL)
202 error = ENXIO; /* We were orphaned */
203 else
204 error = g_access(cp, r, w, e);
205 for (i = 0; i < 10 * hz;) {
206 if (cp->acr != 0 || cp->acw != 0)
207 break;
208 if (cp->nstart == cp->nend)
209 break;
210 pause("gdevwclose", hz / 10);
211 i += hz / 10;
212 }
213 if (cp->acr == 0 && cp->acw == 0 && cp->nstart != cp->nend) {
214 printf("WARNING: Final close of geom_dev(%s) %s %s\n",
215 gp->name,
216 "still has outstanding I/O after 10 seconds.",
217 "Completing close anyway, panic may happen later.");
218 }
219 g_topology_unlock();
220 return (error);
221 }
222
223 /*
224 * XXX: Until we have unmessed the ioctl situation, there is a race against
225 * XXX: a concurrent orphanization. We cannot close it by holding topology
226 * XXX: since that would prevent us from doing our job, and stalling events
227 * XXX: will break (actually: stall) the BSD disklabel hacks.
228 */
229 static int
230 g_dev_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td)
231 {
232 struct g_geom *gp;
233 struct g_consumer *cp;
234 struct g_provider *pp;
235 struct g_kerneldump kd;
236 off_t offset, length, chunk;
237 int i, error;
238 u_int u;
239
240 gp = dev->si_drv1;
241 cp = dev->si_drv2;
242 pp = cp->provider;
243
244 error = 0;
245 KASSERT(cp->acr || cp->acw,
246 ("Consumer with zero access count in g_dev_ioctl"));
247
248 i = IOCPARM_LEN(cmd);
249 switch (cmd) {
250 case DIOCGSECTORSIZE:
251 *(u_int *)data = cp->provider->sectorsize;
252 if (*(u_int *)data == 0)
253 error = ENOENT;
254 break;
255 case DIOCGMEDIASIZE:
256 *(off_t *)data = cp->provider->mediasize;
257 if (*(off_t *)data == 0)
258 error = ENOENT;
259 break;
260 case DIOCGFWSECTORS:
261 error = g_io_getattr("GEOM::fwsectors", cp, &i, data);
262 if (error == 0 && *(u_int *)data == 0)
263 error = ENOENT;
264 break;
265 case DIOCGFWHEADS:
266 error = g_io_getattr("GEOM::fwheads", cp, &i, data);
267 if (error == 0 && *(u_int *)data == 0)
268 error = ENOENT;
269 break;
270 case DIOCGFRONTSTUFF:
271 error = g_io_getattr("GEOM::frontstuff", cp, &i, data);
272 break;
273 case DIOCSKERNELDUMP:
274 u = *((u_int *)data);
275 if (!u) {
276 set_dumper(NULL);
277 error = 0;
278 break;
279 }
280 kd.offset = 0;
281 kd.length = OFF_MAX;
282 i = sizeof kd;
283 error = g_io_getattr("GEOM::kerneldump", cp, &i, &kd);
284 if (!error)
285 dev->si_flags |= SI_DUMPDEV;
286 break;
287 case DIOCGFLUSH:
288 error = g_io_flush(cp);
289 break;
290 case DIOCGDELETE:
291 offset = ((off_t *)data)[0];
292 length = ((off_t *)data)[1];
293 if ((offset % cp->provider->sectorsize) != 0 ||
294 (length % cp->provider->sectorsize) != 0 || length <= 0) {
295 printf("%s: offset=%jd length=%jd\n", __func__, offset,
296 length);
297 error = EINVAL;
298 break;
299 }
300 while (length > 0) {
301 chunk = length;
302 if (chunk > 1024 * cp->provider->sectorsize)
303 chunk = 1024 * cp->provider->sectorsize;
304 error = g_delete_data(cp, offset, chunk);
305 length -= chunk;
306 offset += chunk;
307 if (error)
308 break;
309 /*
310 * Since the request size is unbounded, the service
311 * time is likewise. We make this ioctl interruptible
312 * by checking for signals for each bio.
313 */
314 if (SIGPENDING(td))
315 break;
316 }
317 break;
318 case DIOCGIDENT:
319 error = g_io_getattr("GEOM::ident", cp, &i, data);
320 break;
321 case DIOCGPROVIDERNAME:
322 if (pp == NULL)
323 return (ENOENT);
324 strlcpy(data, pp->name, i);
325 break;
326
327 default:
328 if (cp->provider->geom->ioctl != NULL) {
329 error = cp->provider->geom->ioctl(cp->provider, cmd, data, fflag, td);
330 } else {
331 error = ENOIOCTL;
332 }
333 }
334
335 return (error);
336 }
337
338 static void
339 g_dev_done(struct bio *bp2)
340 {
341 struct bio *bp;
342
343 bp = bp2->bio_parent;
344 bp->bio_error = bp2->bio_error;
345 if (bp->bio_error != 0) {
346 g_trace(G_T_BIO, "g_dev_done(%p) had error %d",
347 bp2, bp->bio_error);
348 bp->bio_flags |= BIO_ERROR;
349 } else {
350 g_trace(G_T_BIO, "g_dev_done(%p/%p) resid %ld completed %jd",
351 bp2, bp, bp->bio_resid, (intmax_t)bp2->bio_completed);
352 }
353 bp->bio_resid = bp->bio_length - bp2->bio_completed;
354 bp->bio_completed = bp2->bio_completed;
355 g_destroy_bio(bp2);
356 biodone(bp);
357 }
358
359 static void
360 g_dev_strategy(struct bio *bp)
361 {
362 struct g_consumer *cp;
363 struct bio *bp2;
364 struct cdev *dev;
365
366 KASSERT(bp->bio_cmd == BIO_READ ||
367 bp->bio_cmd == BIO_WRITE ||
368 bp->bio_cmd == BIO_DELETE,
369 ("Wrong bio_cmd bio=%p cmd=%d", bp, bp->bio_cmd));
370 dev = bp->bio_dev;
371 cp = dev->si_drv2;
372 KASSERT(cp->acr || cp->acw,
373 ("Consumer with zero access count in g_dev_strategy"));
374
375 if ((bp->bio_offset % cp->provider->sectorsize) != 0 ||
376 (bp->bio_bcount % cp->provider->sectorsize) != 0) {
377 bp->bio_resid = bp->bio_bcount;
378 biofinish(bp, NULL, EINVAL);
379 return;
380 }
381
382 for (;;) {
383 /*
384 * XXX: This is not an ideal solution, but I belive it to
385 * XXX: deadlock safe, all things considered.
386 */
387 bp2 = g_clone_bio(bp);
388 if (bp2 != NULL)
389 break;
390 pause("gdstrat", hz / 10);
391 }
392 KASSERT(bp2 != NULL, ("XXX: ENOMEM in a bad place"));
393 bp2->bio_done = g_dev_done;
394 g_trace(G_T_BIO,
395 "g_dev_strategy(%p/%p) offset %jd length %jd data %p cmd %d",
396 bp, bp2, (intmax_t)bp->bio_offset, (intmax_t)bp2->bio_length,
397 bp2->bio_data, bp2->bio_cmd);
398 g_io_request(bp2, cp);
399 KASSERT(cp->acr || cp->acw,
400 ("g_dev_strategy raced with g_dev_close and lost"));
401
402 }
403
404 /*
405 * g_dev_orphan()
406 *
407 * Called from below when the provider orphaned us.
408 * - Clear any dump settings.
409 * - Destroy the struct cdev *to prevent any more request from coming in. The
410 * provider is already marked with an error, so anything which comes in
411 * in the interrim will be returned immediately.
412 * - Wait for any outstanding I/O to finish.
413 * - Set our access counts to zero, whatever they were.
414 * - Detach and self-destruct.
415 */
416
417 static void
418 g_dev_orphan(struct g_consumer *cp)
419 {
420 struct g_geom *gp;
421 struct cdev *dev;
422
423 g_topology_assert();
424 gp = cp->geom;
425 dev = gp->softc;
426 g_trace(G_T_TOPOLOGY, "g_dev_orphan(%p(%s))", cp, gp->name);
427
428 /* Reset any dump-area set on this device */
429 if (dev->si_flags & SI_DUMPDEV)
430 set_dumper(NULL);
431
432 /* Destroy the struct cdev *so we get no more requests */
433 destroy_dev(dev);
434
435 /* Wait for the cows to come home */
436 while (cp->nstart != cp->nend)
437 pause("gdevorphan", hz / 10);
438
439 if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0)
440 g_access(cp, -cp->acr, -cp->acw, -cp->ace);
441
442 g_detach(cp);
443 g_destroy_consumer(cp);
444 g_destroy_geom(gp);
445 }
446
447 DECLARE_GEOM_CLASS(g_dev_class, g_dev);
Cache object: 00b2a66256fed87d04a239c2e6babc1d
|