FreeBSD/Linux Kernel Cross Reference
sys/geom/geom_dev.c
1 /*-
2 * Copyright (c) 2002 Poul-Henning Kamp
3 * Copyright (c) 2002 Networks Associates Technology, Inc.
4 * All rights reserved.
5 *
6 * This software was developed for the FreeBSD Project by Poul-Henning Kamp
7 * and NAI Labs, the Security Research Division of Network Associates, Inc.
8 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
9 * DARPA CHATS research program.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. The names of the authors may not be used to endorse or promote
20 * products derived from this software without specific prior written
21 * permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 */
35
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD: releng/8.3/sys/geom/geom_dev.c 223172 2011-06-17 05:55:41Z mav $");
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/malloc.h>
42 #include <sys/kernel.h>
43 #include <sys/conf.h>
44 #include <sys/bio.h>
45 #include <sys/lock.h>
46 #include <sys/mutex.h>
47 #include <sys/proc.h>
48 #include <sys/errno.h>
49 #include <sys/time.h>
50 #include <sys/disk.h>
51 #include <sys/fcntl.h>
52 #include <sys/limits.h>
53 #include <geom/geom.h>
54 #include <geom/geom_int.h>
55
56 static d_open_t g_dev_open;
57 static d_close_t g_dev_close;
58 static d_strategy_t g_dev_strategy;
59 static d_ioctl_t g_dev_ioctl;
60
61 static struct cdevsw g_dev_cdevsw = {
62 .d_version = D_VERSION,
63 .d_open = g_dev_open,
64 .d_close = g_dev_close,
65 .d_read = physread,
66 .d_write = physwrite,
67 .d_ioctl = g_dev_ioctl,
68 .d_strategy = g_dev_strategy,
69 .d_name = "g_dev",
70 .d_flags = D_DISK | D_TRACKCLOSE,
71 };
72
73 static g_taste_t g_dev_taste;
74 static g_orphan_t g_dev_orphan;
75
76 static struct g_class g_dev_class = {
77 .name = "DEV",
78 .version = G_VERSION,
79 .taste = g_dev_taste,
80 .orphan = g_dev_orphan,
81 };
82
83 void
84 g_dev_print(void)
85 {
86 struct g_geom *gp;
87 char const *p = "";
88
89 LIST_FOREACH(gp, &g_dev_class.geom, geom) {
90 printf("%s%s", p, gp->name);
91 p = " ";
92 }
93 printf("\n");
94 }
95
96 struct g_provider *
97 g_dev_getprovider(struct cdev *dev)
98 {
99 struct g_consumer *cp;
100
101 g_topology_assert();
102 if (dev == NULL)
103 return (NULL);
104 if (dev->si_devsw != &g_dev_cdevsw)
105 return (NULL);
106 cp = dev->si_drv2;
107 return (cp->provider);
108 }
109
110
111 static struct g_geom *
112 g_dev_taste(struct g_class *mp, struct g_provider *pp, int insist __unused)
113 {
114 struct g_geom *gp;
115 struct g_consumer *cp;
116 int error;
117 struct cdev *dev;
118
119 g_trace(G_T_TOPOLOGY, "dev_taste(%s,%s)", mp->name, pp->name);
120 g_topology_assert();
121 LIST_FOREACH(cp, &pp->consumers, consumers)
122 if (cp->geom->class == mp)
123 return (NULL);
124 gp = g_new_geomf(mp, pp->name);
125 cp = g_new_consumer(gp);
126 error = g_attach(cp, pp);
127 KASSERT(error == 0,
128 ("g_dev_taste(%s) failed to g_attach, err=%d", pp->name, error));
129 dev = make_dev(&g_dev_cdevsw, 0,
130 UID_ROOT, GID_OPERATOR, 0640, "%s", gp->name);
131 if (pp->flags & G_PF_CANDELETE)
132 dev->si_flags |= SI_CANDELETE;
133 dev->si_iosize_max = MAXPHYS;
134 gp->softc = dev;
135 dev->si_drv1 = gp;
136 dev->si_drv2 = cp;
137 return (gp);
138 }
139
140 static int
141 g_dev_open(struct cdev *dev, int flags, int fmt, struct thread *td)
142 {
143 struct g_geom *gp;
144 struct g_consumer *cp;
145 int error, r, w, e;
146
147 gp = dev->si_drv1;
148 cp = dev->si_drv2;
149 if (gp == NULL || cp == NULL || gp->softc != dev)
150 return(ENXIO); /* g_dev_taste() not done yet */
151
152 g_trace(G_T_ACCESS, "g_dev_open(%s, %d, %d, %p)",
153 gp->name, flags, fmt, td);
154
155 r = flags & FREAD ? 1 : 0;
156 w = flags & FWRITE ? 1 : 0;
157 #ifdef notyet
158 e = flags & O_EXCL ? 1 : 0;
159 #else
160 e = 0;
161 #endif
162 if (w) {
163 /*
164 * When running in very secure mode, do not allow
165 * opens for writing of any disks.
166 */
167 error = securelevel_ge(td->td_ucred, 2);
168 if (error)
169 return (error);
170 }
171 g_topology_lock();
172 if (dev->si_devsw == NULL)
173 error = ENXIO; /* We were orphaned */
174 else
175 error = g_access(cp, r, w, e);
176 g_topology_unlock();
177 return(error);
178 }
179
180 static int
181 g_dev_close(struct cdev *dev, int flags, int fmt, struct thread *td)
182 {
183 struct g_geom *gp;
184 struct g_consumer *cp;
185 int error, r, w, e, i;
186
187 gp = dev->si_drv1;
188 cp = dev->si_drv2;
189 if (gp == NULL || cp == NULL)
190 return(ENXIO);
191 g_trace(G_T_ACCESS, "g_dev_close(%s, %d, %d, %p)",
192 gp->name, flags, fmt, td);
193 r = flags & FREAD ? -1 : 0;
194 w = flags & FWRITE ? -1 : 0;
195 #ifdef notyet
196 e = flags & O_EXCL ? -1 : 0;
197 #else
198 e = 0;
199 #endif
200 g_topology_lock();
201 if (dev->si_devsw == NULL)
202 error = ENXIO; /* We were orphaned */
203 else
204 error = g_access(cp, r, w, e);
205 for (i = 0; i < 10 * hz;) {
206 if (cp->acr != 0 || cp->acw != 0)
207 break;
208 if (cp->nstart == cp->nend)
209 break;
210 pause("gdevwclose", hz / 10);
211 i += hz / 10;
212 }
213 if (cp->acr == 0 && cp->acw == 0 && cp->nstart != cp->nend) {
214 printf("WARNING: Final close of geom_dev(%s) %s %s\n",
215 gp->name,
216 "still has outstanding I/O after 10 seconds.",
217 "Completing close anyway, panic may happen later.");
218 }
219 g_topology_unlock();
220 return (error);
221 }
222
223 /*
224 * XXX: Until we have unmessed the ioctl situation, there is a race against
225 * XXX: a concurrent orphanization. We cannot close it by holding topology
226 * XXX: since that would prevent us from doing our job, and stalling events
227 * XXX: will break (actually: stall) the BSD disklabel hacks.
228 */
229 static int
230 g_dev_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td)
231 {
232 struct g_geom *gp;
233 struct g_consumer *cp;
234 struct g_provider *pp;
235 struct g_kerneldump kd;
236 off_t offset, length, chunk;
237 int i, error;
238 u_int u;
239
240 gp = dev->si_drv1;
241 cp = dev->si_drv2;
242 pp = cp->provider;
243
244 error = 0;
245 KASSERT(cp->acr || cp->acw,
246 ("Consumer with zero access count in g_dev_ioctl"));
247
248 i = IOCPARM_LEN(cmd);
249 switch (cmd) {
250 case DIOCGSECTORSIZE:
251 *(u_int *)data = cp->provider->sectorsize;
252 if (*(u_int *)data == 0)
253 error = ENOENT;
254 break;
255 case DIOCGMEDIASIZE:
256 *(off_t *)data = cp->provider->mediasize;
257 if (*(off_t *)data == 0)
258 error = ENOENT;
259 break;
260 case DIOCGFWSECTORS:
261 error = g_io_getattr("GEOM::fwsectors", cp, &i, data);
262 if (error == 0 && *(u_int *)data == 0)
263 error = ENOENT;
264 break;
265 case DIOCGFWHEADS:
266 error = g_io_getattr("GEOM::fwheads", cp, &i, data);
267 if (error == 0 && *(u_int *)data == 0)
268 error = ENOENT;
269 break;
270 case DIOCGFRONTSTUFF:
271 error = g_io_getattr("GEOM::frontstuff", cp, &i, data);
272 break;
273 case DIOCSKERNELDUMP:
274 u = *((u_int *)data);
275 if (!u) {
276 set_dumper(NULL);
277 error = 0;
278 break;
279 }
280 kd.offset = 0;
281 kd.length = OFF_MAX;
282 i = sizeof kd;
283 error = g_io_getattr("GEOM::kerneldump", cp, &i, &kd);
284 if (!error) {
285 error = set_dumper(&kd.di);
286 if (!error)
287 dev->si_flags |= SI_DUMPDEV;
288 }
289 break;
290 case DIOCGFLUSH:
291 error = g_io_flush(cp);
292 break;
293 case DIOCGDELETE:
294 offset = ((off_t *)data)[0];
295 length = ((off_t *)data)[1];
296 if ((offset % cp->provider->sectorsize) != 0 ||
297 (length % cp->provider->sectorsize) != 0 || length <= 0) {
298 printf("%s: offset=%jd length=%jd\n", __func__, offset,
299 length);
300 error = EINVAL;
301 break;
302 }
303 while (length > 0) {
304 chunk = length;
305 if (chunk > 65536 * cp->provider->sectorsize)
306 chunk = 65536 * cp->provider->sectorsize;
307 error = g_delete_data(cp, offset, chunk);
308 length -= chunk;
309 offset += chunk;
310 if (error)
311 break;
312 /*
313 * Since the request size is unbounded, the service
314 * time is likewise. We make this ioctl interruptible
315 * by checking for signals for each bio.
316 */
317 if (SIGPENDING(td))
318 break;
319 }
320 break;
321 case DIOCGIDENT:
322 error = g_io_getattr("GEOM::ident", cp, &i, data);
323 break;
324 case DIOCGPROVIDERNAME:
325 if (pp == NULL)
326 return (ENOENT);
327 strlcpy(data, pp->name, i);
328 break;
329 case DIOCGSTRIPESIZE:
330 *(off_t *)data = cp->provider->stripesize;
331 break;
332 case DIOCGSTRIPEOFFSET:
333 *(off_t *)data = cp->provider->stripeoffset;
334 break;
335 default:
336 if (cp->provider->geom->ioctl != NULL) {
337 error = cp->provider->geom->ioctl(cp->provider, cmd, data, fflag, td);
338 } else {
339 error = ENOIOCTL;
340 }
341 }
342
343 return (error);
344 }
345
346 static void
347 g_dev_done(struct bio *bp2)
348 {
349 struct bio *bp;
350
351 bp = bp2->bio_parent;
352 bp->bio_error = bp2->bio_error;
353 if (bp->bio_error != 0) {
354 g_trace(G_T_BIO, "g_dev_done(%p) had error %d",
355 bp2, bp->bio_error);
356 bp->bio_flags |= BIO_ERROR;
357 } else {
358 g_trace(G_T_BIO, "g_dev_done(%p/%p) resid %ld completed %jd",
359 bp2, bp, bp->bio_resid, (intmax_t)bp2->bio_completed);
360 }
361 bp->bio_resid = bp->bio_length - bp2->bio_completed;
362 bp->bio_completed = bp2->bio_completed;
363 g_destroy_bio(bp2);
364 biodone(bp);
365 }
366
367 static void
368 g_dev_strategy(struct bio *bp)
369 {
370 struct g_consumer *cp;
371 struct bio *bp2;
372 struct cdev *dev;
373
374 KASSERT(bp->bio_cmd == BIO_READ ||
375 bp->bio_cmd == BIO_WRITE ||
376 bp->bio_cmd == BIO_DELETE,
377 ("Wrong bio_cmd bio=%p cmd=%d", bp, bp->bio_cmd));
378 dev = bp->bio_dev;
379 cp = dev->si_drv2;
380 KASSERT(cp->acr || cp->acw,
381 ("Consumer with zero access count in g_dev_strategy"));
382 #ifdef INVARIANTS
383 if ((bp->bio_offset % cp->provider->sectorsize) != 0 ||
384 (bp->bio_bcount % cp->provider->sectorsize) != 0) {
385 bp->bio_resid = bp->bio_bcount;
386 biofinish(bp, NULL, EINVAL);
387 return;
388 }
389 #endif
390 for (;;) {
391 /*
392 * XXX: This is not an ideal solution, but I belive it to
393 * XXX: deadlock safe, all things considered.
394 */
395 bp2 = g_clone_bio(bp);
396 if (bp2 != NULL)
397 break;
398 pause("gdstrat", hz / 10);
399 }
400 KASSERT(bp2 != NULL, ("XXX: ENOMEM in a bad place"));
401 bp2->bio_done = g_dev_done;
402 g_trace(G_T_BIO,
403 "g_dev_strategy(%p/%p) offset %jd length %jd data %p cmd %d",
404 bp, bp2, (intmax_t)bp->bio_offset, (intmax_t)bp2->bio_length,
405 bp2->bio_data, bp2->bio_cmd);
406 g_io_request(bp2, cp);
407 KASSERT(cp->acr || cp->acw,
408 ("g_dev_strategy raced with g_dev_close and lost"));
409
410 }
411
412 /*
413 * g_dev_orphan()
414 *
415 * Called from below when the provider orphaned us.
416 * - Clear any dump settings.
417 * - Destroy the struct cdev *to prevent any more request from coming in. The
418 * provider is already marked with an error, so anything which comes in
419 * in the interrim will be returned immediately.
420 * - Wait for any outstanding I/O to finish.
421 * - Set our access counts to zero, whatever they were.
422 * - Detach and self-destruct.
423 */
424
425 static void
426 g_dev_orphan(struct g_consumer *cp)
427 {
428 struct g_geom *gp;
429 struct cdev *dev;
430
431 g_topology_assert();
432 gp = cp->geom;
433 dev = gp->softc;
434 g_trace(G_T_TOPOLOGY, "g_dev_orphan(%p(%s))", cp, gp->name);
435
436 /* Reset any dump-area set on this device */
437 if (dev->si_flags & SI_DUMPDEV)
438 set_dumper(NULL);
439
440 /* Destroy the struct cdev *so we get no more requests */
441 destroy_dev(dev);
442
443 /* Wait for the cows to come home */
444 while (cp->nstart != cp->nend)
445 pause("gdevorphan", hz / 10);
446
447 if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0)
448 g_access(cp, -cp->acr, -cp->acw, -cp->ace);
449
450 g_detach(cp);
451 g_destroy_consumer(cp);
452 g_destroy_geom(gp);
453 }
454
455 DECLARE_GEOM_CLASS(g_dev_class, g_dev);
Cache object: 7cf998739b9fb70d181776fffe68defb
|