FreeBSD/Linux Kernel Cross Reference
sys/geom/geom_dev.c
1 /*-
2 * Copyright (c) 2002 Poul-Henning Kamp
3 * Copyright (c) 2002 Networks Associates Technology, Inc.
4 * All rights reserved.
5 *
6 * This software was developed for the FreeBSD Project by Poul-Henning Kamp
7 * and NAI Labs, the Security Research Division of Network Associates, Inc.
8 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
9 * DARPA CHATS research program.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. The names of the authors may not be used to endorse or promote
20 * products derived from this software without specific prior written
21 * permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 */
35
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/malloc.h>
42 #include <sys/kernel.h>
43 #include <sys/conf.h>
44 #include <sys/bio.h>
45 #include <sys/lock.h>
46 #include <sys/mutex.h>
47 #include <sys/proc.h>
48 #include <sys/errno.h>
49 #include <sys/time.h>
50 #include <sys/disk.h>
51 #include <sys/fcntl.h>
52 #include <sys/limits.h>
53 #include <sys/sysctl.h>
54 #include <geom/geom.h>
55 #include <geom/geom_int.h>
56
57 static d_open_t g_dev_open;
58 static d_close_t g_dev_close;
59 static d_strategy_t g_dev_strategy;
60 static d_ioctl_t g_dev_ioctl;
61
62 static struct cdevsw g_dev_cdevsw = {
63 .d_version = D_VERSION,
64 .d_open = g_dev_open,
65 .d_close = g_dev_close,
66 .d_read = physread,
67 .d_write = physwrite,
68 .d_ioctl = g_dev_ioctl,
69 .d_strategy = g_dev_strategy,
70 .d_name = "g_dev",
71 .d_flags = D_DISK | D_TRACKCLOSE,
72 };
73
74 static g_taste_t g_dev_taste;
75 static g_orphan_t g_dev_orphan;
76
77 static struct g_class g_dev_class = {
78 .name = "DEV",
79 .version = G_VERSION,
80 .taste = g_dev_taste,
81 .orphan = g_dev_orphan,
82 };
83
84 /*
85 * We target 262144 (8 x 32768) sectors by default as this significantly
86 * increases the throughput on commonly used SSD's with a marginal
87 * increase in non-interruptible request latency.
88 */
89 static uint64_t g_dev_del_max_sectors = 262144;
90 SYSCTL_DECL(_kern_geom);
91 SYSCTL_NODE(_kern_geom, OID_AUTO, dev, CTLFLAG_RW, 0, "GEOM_DEV stuff");
92 SYSCTL_QUAD(_kern_geom_dev, OID_AUTO, delete_max_sectors, CTLFLAG_RW,
93 &g_dev_del_max_sectors, 0, "Maximum number of sectors in a single "
94 "delete request sent to the provider. Larger requests are chunked "
95 "so they can be interrupted. (0 = disable chunking)");
96
97 void
98 g_dev_print(void)
99 {
100 struct g_geom *gp;
101 char const *p = "";
102
103 LIST_FOREACH(gp, &g_dev_class.geom, geom) {
104 printf("%s%s", p, gp->name);
105 p = " ";
106 }
107 printf("\n");
108 }
109
110 struct g_provider *
111 g_dev_getprovider(struct cdev *dev)
112 {
113 struct g_consumer *cp;
114
115 g_topology_assert();
116 if (dev == NULL)
117 return (NULL);
118 if (dev->si_devsw != &g_dev_cdevsw)
119 return (NULL);
120 cp = dev->si_drv2;
121 return (cp->provider);
122 }
123
124
125 static struct g_geom *
126 g_dev_taste(struct g_class *mp, struct g_provider *pp, int insist __unused)
127 {
128 struct g_geom *gp;
129 struct g_consumer *cp;
130 int error, len;
131 struct cdev *dev, *adev;
132 char buf[64], *val;
133
134 g_trace(G_T_TOPOLOGY, "dev_taste(%s,%s)", mp->name, pp->name);
135 g_topology_assert();
136 LIST_FOREACH(cp, &pp->consumers, consumers)
137 if (cp->geom->class == mp)
138 return (NULL);
139 gp = g_new_geomf(mp, pp->name);
140 cp = g_new_consumer(gp);
141 error = g_attach(cp, pp);
142 KASSERT(error == 0,
143 ("g_dev_taste(%s) failed to g_attach, err=%d", pp->name, error));
144 dev = make_dev(&g_dev_cdevsw, 0,
145 UID_ROOT, GID_OPERATOR, 0640, "%s", gp->name);
146
147 /* Search for device alias name and create it if found. */
148 adev = NULL;
149 for (len = MIN(strlen(gp->name), sizeof(buf) - 15); len > 0; len--) {
150 snprintf(buf, sizeof(buf), "kern.devalias.%s", gp->name);
151 buf[14 + len] = 0;
152 val = getenv(buf);
153 if (val != NULL) {
154 snprintf(buf, sizeof(buf), "%s%s",
155 val, gp->name + len);
156 freeenv(val);
157 adev = make_dev_alias(dev, buf);
158 break;
159 }
160 }
161
162 if (pp->flags & G_PF_CANDELETE)
163 dev->si_flags |= SI_CANDELETE;
164 dev->si_iosize_max = MAXPHYS;
165 gp->softc = dev;
166 dev->si_drv1 = gp;
167 dev->si_drv2 = cp;
168 if (adev != NULL) {
169 if (pp->flags & G_PF_CANDELETE)
170 adev->si_flags |= SI_CANDELETE;
171 adev->si_iosize_max = MAXPHYS;
172 adev->si_drv1 = gp;
173 adev->si_drv2 = cp;
174 }
175 return (gp);
176 }
177
178 static int
179 g_dev_open(struct cdev *dev, int flags, int fmt, struct thread *td)
180 {
181 struct g_geom *gp;
182 struct g_consumer *cp;
183 int error, r, w, e;
184
185 gp = dev->si_drv1;
186 cp = dev->si_drv2;
187 if (gp == NULL || cp == NULL || gp->softc != dev)
188 return(ENXIO); /* g_dev_taste() not done yet */
189
190 g_trace(G_T_ACCESS, "g_dev_open(%s, %d, %d, %p)",
191 gp->name, flags, fmt, td);
192
193 r = flags & FREAD ? 1 : 0;
194 w = flags & FWRITE ? 1 : 0;
195 #ifdef notyet
196 e = flags & O_EXCL ? 1 : 0;
197 #else
198 e = 0;
199 #endif
200 if (w) {
201 /*
202 * When running in very secure mode, do not allow
203 * opens for writing of any disks.
204 */
205 error = securelevel_ge(td->td_ucred, 2);
206 if (error)
207 return (error);
208 }
209 g_topology_lock();
210 if (dev->si_devsw == NULL)
211 error = ENXIO; /* We were orphaned */
212 else
213 error = g_access(cp, r, w, e);
214 g_topology_unlock();
215 return(error);
216 }
217
218 static int
219 g_dev_close(struct cdev *dev, int flags, int fmt, struct thread *td)
220 {
221 struct g_geom *gp;
222 struct g_consumer *cp;
223 int error, r, w, e, i;
224
225 gp = dev->si_drv1;
226 cp = dev->si_drv2;
227 if (gp == NULL || cp == NULL)
228 return(ENXIO);
229 g_trace(G_T_ACCESS, "g_dev_close(%s, %d, %d, %p)",
230 gp->name, flags, fmt, td);
231 r = flags & FREAD ? -1 : 0;
232 w = flags & FWRITE ? -1 : 0;
233 #ifdef notyet
234 e = flags & O_EXCL ? -1 : 0;
235 #else
236 e = 0;
237 #endif
238 g_topology_lock();
239 if (dev->si_devsw == NULL)
240 error = ENXIO; /* We were orphaned */
241 else
242 error = g_access(cp, r, w, e);
243 for (i = 0; i < 10 * hz;) {
244 if (cp->acr != 0 || cp->acw != 0)
245 break;
246 if (cp->nstart == cp->nend)
247 break;
248 pause("gdevwclose", hz / 10);
249 i += hz / 10;
250 }
251 if (cp->acr == 0 && cp->acw == 0 && cp->nstart != cp->nend) {
252 printf("WARNING: Final close of geom_dev(%s) %s %s\n",
253 gp->name,
254 "still has outstanding I/O after 10 seconds.",
255 "Completing close anyway, panic may happen later.");
256 }
257 g_topology_unlock();
258 return (error);
259 }
260
261 /*
262 * XXX: Until we have unmessed the ioctl situation, there is a race against
263 * XXX: a concurrent orphanization. We cannot close it by holding topology
264 * XXX: since that would prevent us from doing our job, and stalling events
265 * XXX: will break (actually: stall) the BSD disklabel hacks.
266 */
267 static int
268 g_dev_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td)
269 {
270 struct g_geom *gp;
271 struct g_consumer *cp;
272 struct g_provider *pp;
273 struct g_kerneldump kd;
274 off_t offset, length, chunk;
275 int i, error;
276 u_int u;
277
278 gp = dev->si_drv1;
279 cp = dev->si_drv2;
280 pp = cp->provider;
281
282 error = 0;
283 KASSERT(cp->acr || cp->acw,
284 ("Consumer with zero access count in g_dev_ioctl"));
285
286 i = IOCPARM_LEN(cmd);
287 switch (cmd) {
288 case DIOCGSECTORSIZE:
289 *(u_int *)data = cp->provider->sectorsize;
290 if (*(u_int *)data == 0)
291 error = ENOENT;
292 break;
293 case DIOCGMEDIASIZE:
294 *(off_t *)data = cp->provider->mediasize;
295 if (*(off_t *)data == 0)
296 error = ENOENT;
297 break;
298 case DIOCGFWSECTORS:
299 error = g_io_getattr("GEOM::fwsectors", cp, &i, data);
300 if (error == 0 && *(u_int *)data == 0)
301 error = ENOENT;
302 break;
303 case DIOCGFWHEADS:
304 error = g_io_getattr("GEOM::fwheads", cp, &i, data);
305 if (error == 0 && *(u_int *)data == 0)
306 error = ENOENT;
307 break;
308 case DIOCGFRONTSTUFF:
309 error = g_io_getattr("GEOM::frontstuff", cp, &i, data);
310 break;
311 case DIOCSKERNELDUMP:
312 u = *((u_int *)data);
313 if (!u) {
314 set_dumper(NULL);
315 error = 0;
316 break;
317 }
318 kd.offset = 0;
319 kd.length = OFF_MAX;
320 i = sizeof kd;
321 error = g_io_getattr("GEOM::kerneldump", cp, &i, &kd);
322 if (!error) {
323 error = set_dumper(&kd.di);
324 if (!error)
325 dev->si_flags |= SI_DUMPDEV;
326 }
327 break;
328 case DIOCGFLUSH:
329 error = g_io_flush(cp);
330 break;
331 case DIOCGDELETE:
332 offset = ((off_t *)data)[0];
333 length = ((off_t *)data)[1];
334 if ((offset % cp->provider->sectorsize) != 0 ||
335 (length % cp->provider->sectorsize) != 0 || length <= 0) {
336 printf("%s: offset=%jd length=%jd\n", __func__, offset,
337 length);
338 error = EINVAL;
339 break;
340 }
341 while (length > 0) {
342 chunk = length;
343 if (g_dev_del_max_sectors != 0 && chunk >
344 g_dev_del_max_sectors * cp->provider->sectorsize) {
345 chunk = g_dev_del_max_sectors *
346 cp->provider->sectorsize;
347 }
348 error = g_delete_data(cp, offset, chunk);
349 length -= chunk;
350 offset += chunk;
351 if (error)
352 break;
353 /*
354 * Since the request size can be large, the service
355 * time can be is likewise. We make this ioctl
356 * interruptible by checking for signals for each bio.
357 */
358 if (SIGPENDING(td))
359 break;
360 }
361 break;
362 case DIOCGIDENT:
363 error = g_io_getattr("GEOM::ident", cp, &i, data);
364 break;
365 case DIOCGPROVIDERNAME:
366 if (pp == NULL)
367 return (ENOENT);
368 strlcpy(data, pp->name, i);
369 break;
370 case DIOCGSTRIPESIZE:
371 *(off_t *)data = cp->provider->stripesize;
372 break;
373 case DIOCGSTRIPEOFFSET:
374 *(off_t *)data = cp->provider->stripeoffset;
375 break;
376 default:
377 if (cp->provider->geom->ioctl != NULL) {
378 error = cp->provider->geom->ioctl(cp->provider, cmd, data, fflag, td);
379 } else {
380 error = ENOIOCTL;
381 }
382 }
383
384 return (error);
385 }
386
387 static void
388 g_dev_done(struct bio *bp2)
389 {
390 struct bio *bp;
391
392 bp = bp2->bio_parent;
393 bp->bio_error = bp2->bio_error;
394 if (bp->bio_error != 0) {
395 g_trace(G_T_BIO, "g_dev_done(%p) had error %d",
396 bp2, bp->bio_error);
397 bp->bio_flags |= BIO_ERROR;
398 } else {
399 g_trace(G_T_BIO, "g_dev_done(%p/%p) resid %ld completed %jd",
400 bp2, bp, bp->bio_resid, (intmax_t)bp2->bio_completed);
401 }
402 bp->bio_resid = bp->bio_length - bp2->bio_completed;
403 bp->bio_completed = bp2->bio_completed;
404 g_destroy_bio(bp2);
405 biodone(bp);
406 }
407
408 static void
409 g_dev_strategy(struct bio *bp)
410 {
411 struct g_consumer *cp;
412 struct bio *bp2;
413 struct cdev *dev;
414
415 KASSERT(bp->bio_cmd == BIO_READ ||
416 bp->bio_cmd == BIO_WRITE ||
417 bp->bio_cmd == BIO_DELETE,
418 ("Wrong bio_cmd bio=%p cmd=%d", bp, bp->bio_cmd));
419 dev = bp->bio_dev;
420 cp = dev->si_drv2;
421 KASSERT(cp->acr || cp->acw,
422 ("Consumer with zero access count in g_dev_strategy"));
423 #ifdef INVARIANTS
424 if ((bp->bio_offset % cp->provider->sectorsize) != 0 ||
425 (bp->bio_bcount % cp->provider->sectorsize) != 0) {
426 bp->bio_resid = bp->bio_bcount;
427 biofinish(bp, NULL, EINVAL);
428 return;
429 }
430 #endif
431 for (;;) {
432 /*
433 * XXX: This is not an ideal solution, but I belive it to
434 * XXX: deadlock safe, all things considered.
435 */
436 bp2 = g_clone_bio(bp);
437 if (bp2 != NULL)
438 break;
439 pause("gdstrat", hz / 10);
440 }
441 KASSERT(bp2 != NULL, ("XXX: ENOMEM in a bad place"));
442 bp2->bio_done = g_dev_done;
443 g_trace(G_T_BIO,
444 "g_dev_strategy(%p/%p) offset %jd length %jd data %p cmd %d",
445 bp, bp2, (intmax_t)bp->bio_offset, (intmax_t)bp2->bio_length,
446 bp2->bio_data, bp2->bio_cmd);
447 g_io_request(bp2, cp);
448 KASSERT(cp->acr || cp->acw,
449 ("g_dev_strategy raced with g_dev_close and lost"));
450
451 }
452
453 /*
454 * g_dev_orphan()
455 *
456 * Called from below when the provider orphaned us.
457 * - Clear any dump settings.
458 * - Destroy the struct cdev *to prevent any more request from coming in. The
459 * provider is already marked with an error, so anything which comes in
460 * in the interrim will be returned immediately.
461 * - Wait for any outstanding I/O to finish.
462 * - Set our access counts to zero, whatever they were.
463 * - Detach and self-destruct.
464 */
465
466 static void
467 g_dev_orphan(struct g_consumer *cp)
468 {
469 struct g_geom *gp;
470 struct cdev *dev;
471
472 g_topology_assert();
473 gp = cp->geom;
474 dev = gp->softc;
475 g_trace(G_T_TOPOLOGY, "g_dev_orphan(%p(%s))", cp, gp->name);
476
477 /* Reset any dump-area set on this device */
478 if (dev->si_flags & SI_DUMPDEV)
479 set_dumper(NULL);
480
481 /* Destroy the struct cdev *so we get no more requests */
482 destroy_dev(dev);
483
484 /* Wait for the cows to come home */
485 while (cp->nstart != cp->nend)
486 pause("gdevorphan", hz / 10);
487
488 if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0)
489 g_access(cp, -cp->acr, -cp->acw, -cp->ace);
490
491 g_detach(cp);
492 g_destroy_consumer(cp);
493 g_destroy_geom(gp);
494 }
495
496 DECLARE_GEOM_CLASS(g_dev_class, g_dev);
Cache object: 381f97d4b4adbdb48cf16f702845885b
|