FreeBSD/Linux Kernel Cross Reference
sys/geom/geom_dev.c
1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2002 Poul-Henning Kamp
5 * Copyright (c) 2002 Networks Associates Technology, Inc.
6 * All rights reserved.
7 *
8 * This software was developed for the FreeBSD Project by Poul-Henning Kamp
9 * and NAI Labs, the Security Research Division of Network Associates, Inc.
10 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
11 * DARPA CHATS research program.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. The names of the authors may not be used to endorse or promote
22 * products derived from this software without specific prior written
23 * permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 */
37
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/malloc.h>
44 #include <sys/kernel.h>
45 #include <sys/conf.h>
46 #include <sys/ctype.h>
47 #include <sys/bio.h>
48 #include <sys/bus.h>
49 #include <sys/lock.h>
50 #include <sys/mutex.h>
51 #include <sys/proc.h>
52 #include <sys/errno.h>
53 #include <sys/time.h>
54 #include <sys/disk.h>
55 #include <sys/fcntl.h>
56 #include <sys/limits.h>
57 #include <sys/sysctl.h>
58 #include <geom/geom.h>
59 #include <geom/geom_int.h>
60 #include <machine/stdarg.h>
61
62 struct g_dev_softc {
63 struct mtx sc_mtx;
64 struct cdev *sc_dev;
65 struct cdev *sc_alias;
66 int sc_open;
67 u_int sc_active;
68 #define SC_A_DESTROY (1 << 31)
69 #define SC_A_OPEN (1 << 30)
70 #define SC_A_ACTIVE (SC_A_OPEN - 1)
71 };
72
73 static d_open_t g_dev_open;
74 static d_close_t g_dev_close;
75 static d_strategy_t g_dev_strategy;
76 static d_ioctl_t g_dev_ioctl;
77
78 static struct cdevsw g_dev_cdevsw = {
79 .d_version = D_VERSION,
80 .d_open = g_dev_open,
81 .d_close = g_dev_close,
82 .d_read = physread,
83 .d_write = physwrite,
84 .d_ioctl = g_dev_ioctl,
85 .d_strategy = g_dev_strategy,
86 .d_name = "g_dev",
87 .d_flags = D_DISK | D_TRACKCLOSE,
88 };
89
90 static g_init_t g_dev_init;
91 static g_fini_t g_dev_fini;
92 static g_taste_t g_dev_taste;
93 static g_orphan_t g_dev_orphan;
94 static g_attrchanged_t g_dev_attrchanged;
95
96 static struct g_class g_dev_class = {
97 .name = "DEV",
98 .version = G_VERSION,
99 .init = g_dev_init,
100 .fini = g_dev_fini,
101 .taste = g_dev_taste,
102 .orphan = g_dev_orphan,
103 .attrchanged = g_dev_attrchanged
104 };
105
106 /*
107 * We target 262144 (8 x 32768) sectors by default as this significantly
108 * increases the throughput on commonly used SSD's with a marginal
109 * increase in non-interruptible request latency.
110 */
111 static uint64_t g_dev_del_max_sectors = 262144;
112 SYSCTL_DECL(_kern_geom);
113 SYSCTL_NODE(_kern_geom, OID_AUTO, dev, CTLFLAG_RW, 0, "GEOM_DEV stuff");
114 SYSCTL_QUAD(_kern_geom_dev, OID_AUTO, delete_max_sectors, CTLFLAG_RW,
115 &g_dev_del_max_sectors, 0, "Maximum number of sectors in a single "
116 "delete request sent to the provider. Larger requests are chunked "
117 "so they can be interrupted. (0 = disable chunking)");
118
119 static char *dumpdev = NULL;
120 static void
121 g_dev_init(struct g_class *mp)
122 {
123
124 dumpdev = kern_getenv("dumpdev");
125 }
126
127 static void
128 g_dev_fini(struct g_class *mp)
129 {
130
131 freeenv(dumpdev);
132 dumpdev = NULL;
133 }
134
135 static int
136 g_dev_setdumpdev(struct cdev *dev, struct diocskerneldump_arg *kda,
137 struct thread *td)
138 {
139 struct g_kerneldump kd;
140 struct g_consumer *cp;
141 int error, len;
142
143 if (dev == NULL || kda == NULL)
144 return (clear_dumper(td));
145
146 cp = dev->si_drv2;
147 len = sizeof(kd);
148 memset(&kd, 0, len);
149 kd.offset = 0;
150 kd.length = OFF_MAX;
151 error = g_io_getattr("GEOM::kerneldump", cp, &len, &kd);
152 if (error != 0)
153 return (error);
154
155 error = set_dumper(&kd.di, devtoname(dev), td, kda->kda_compression,
156 kda->kda_encryption, kda->kda_key, kda->kda_encryptedkeysize,
157 kda->kda_encryptedkey);
158 if (error == 0)
159 dev->si_flags |= SI_DUMPDEV;
160
161 return (error);
162 }
163
164 static int
165 init_dumpdev(struct cdev *dev)
166 {
167 struct diocskerneldump_arg kda;
168 struct g_consumer *cp;
169 const char *devprefix = "/dev/", *devname;
170 int error;
171 size_t len;
172
173 bzero(&kda, sizeof(kda));
174 kda.kda_enable = 1;
175
176 if (dumpdev == NULL)
177 return (0);
178
179 len = strlen(devprefix);
180 devname = devtoname(dev);
181 if (strcmp(devname, dumpdev) != 0 &&
182 (strncmp(dumpdev, devprefix, len) != 0 ||
183 strcmp(devname, dumpdev + len) != 0))
184 return (0);
185
186 cp = (struct g_consumer *)dev->si_drv2;
187 error = g_access(cp, 1, 0, 0);
188 if (error != 0)
189 return (error);
190
191 error = g_dev_setdumpdev(dev, &kda, curthread);
192 if (error == 0) {
193 freeenv(dumpdev);
194 dumpdev = NULL;
195 }
196
197 (void)g_access(cp, -1, 0, 0);
198
199 return (error);
200 }
201
202 static void
203 g_dev_destroy(void *arg, int flags __unused)
204 {
205 struct g_consumer *cp;
206 struct g_geom *gp;
207 struct g_dev_softc *sc;
208 char buf[SPECNAMELEN + 6];
209
210 g_topology_assert();
211 cp = arg;
212 gp = cp->geom;
213 sc = cp->private;
214 g_trace(G_T_TOPOLOGY, "g_dev_destroy(%p(%s))", cp, gp->name);
215 snprintf(buf, sizeof(buf), "cdev=%s", gp->name);
216 devctl_notify_f("GEOM", "DEV", "DESTROY", buf, M_WAITOK);
217 if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0)
218 g_access(cp, -cp->acr, -cp->acw, -cp->ace);
219 g_detach(cp);
220 g_destroy_consumer(cp);
221 g_destroy_geom(gp);
222 mtx_destroy(&sc->sc_mtx);
223 g_free(sc);
224 }
225
226 void
227 g_dev_print(void)
228 {
229 struct g_geom *gp;
230 char const *p = "";
231
232 LIST_FOREACH(gp, &g_dev_class.geom, geom) {
233 printf("%s%s", p, gp->name);
234 p = " ";
235 }
236 printf("\n");
237 }
238
239 static void
240 g_dev_set_physpath(struct g_consumer *cp)
241 {
242 struct g_dev_softc *sc;
243 char *physpath;
244 int error, physpath_len;
245
246 if (g_access(cp, 1, 0, 0) != 0)
247 return;
248
249 sc = cp->private;
250 physpath_len = MAXPATHLEN;
251 physpath = g_malloc(physpath_len, M_WAITOK|M_ZERO);
252 error = g_io_getattr("GEOM::physpath", cp, &physpath_len, physpath);
253 g_access(cp, -1, 0, 0);
254 if (error == 0 && strlen(physpath) != 0) {
255 struct cdev *dev, *old_alias_dev;
256 struct cdev **alias_devp;
257
258 dev = sc->sc_dev;
259 old_alias_dev = sc->sc_alias;
260 alias_devp = (struct cdev **)&sc->sc_alias;
261 make_dev_physpath_alias(MAKEDEV_WAITOK | MAKEDEV_CHECKNAME,
262 alias_devp, dev, old_alias_dev, physpath);
263 } else if (sc->sc_alias) {
264 destroy_dev((struct cdev *)sc->sc_alias);
265 sc->sc_alias = NULL;
266 }
267 g_free(physpath);
268 }
269
270 static void
271 g_dev_set_media(struct g_consumer *cp)
272 {
273 struct g_dev_softc *sc;
274 struct cdev *dev;
275 char buf[SPECNAMELEN + 6];
276
277 sc = cp->private;
278 dev = sc->sc_dev;
279 snprintf(buf, sizeof(buf), "cdev=%s", dev->si_name);
280 devctl_notify_f("DEVFS", "CDEV", "MEDIACHANGE", buf, M_WAITOK);
281 devctl_notify_f("GEOM", "DEV", "MEDIACHANGE", buf, M_WAITOK);
282 dev = sc->sc_alias;
283 if (dev != NULL) {
284 snprintf(buf, sizeof(buf), "cdev=%s", dev->si_name);
285 devctl_notify_f("DEVFS", "CDEV", "MEDIACHANGE", buf, M_WAITOK);
286 devctl_notify_f("GEOM", "DEV", "MEDIACHANGE", buf, M_WAITOK);
287 }
288 }
289
290 static void
291 g_dev_attrchanged(struct g_consumer *cp, const char *attr)
292 {
293
294 if (strcmp(attr, "GEOM::media") == 0) {
295 g_dev_set_media(cp);
296 return;
297 }
298
299 if (strcmp(attr, "GEOM::physpath") == 0) {
300 g_dev_set_physpath(cp);
301 return;
302 }
303 }
304
305 struct g_provider *
306 g_dev_getprovider(struct cdev *dev)
307 {
308 struct g_consumer *cp;
309
310 g_topology_assert();
311 if (dev == NULL)
312 return (NULL);
313 if (dev->si_devsw != &g_dev_cdevsw)
314 return (NULL);
315 cp = dev->si_drv2;
316 return (cp->provider);
317 }
318
319 static struct g_geom *
320 g_dev_taste(struct g_class *mp, struct g_provider *pp, int insist __unused)
321 {
322 struct g_geom *gp;
323 struct g_geom_alias *gap;
324 struct g_consumer *cp;
325 struct g_dev_softc *sc;
326 int error;
327 struct cdev *dev, *adev;
328 char buf[SPECNAMELEN + 6];
329 struct make_dev_args args;
330
331 g_trace(G_T_TOPOLOGY, "dev_taste(%s,%s)", mp->name, pp->name);
332 g_topology_assert();
333 gp = g_new_geomf(mp, "%s", pp->name);
334 sc = g_malloc(sizeof(*sc), M_WAITOK | M_ZERO);
335 mtx_init(&sc->sc_mtx, "g_dev", NULL, MTX_DEF);
336 cp = g_new_consumer(gp);
337 cp->private = sc;
338 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
339 error = g_attach(cp, pp);
340 KASSERT(error == 0,
341 ("g_dev_taste(%s) failed to g_attach, err=%d", pp->name, error));
342
343 make_dev_args_init(&args);
344 args.mda_flags = MAKEDEV_CHECKNAME | MAKEDEV_WAITOK;
345 args.mda_devsw = &g_dev_cdevsw;
346 args.mda_cr = NULL;
347 args.mda_uid = UID_ROOT;
348 args.mda_gid = GID_OPERATOR;
349 args.mda_mode = 0640;
350 args.mda_si_drv1 = sc;
351 args.mda_si_drv2 = cp;
352 error = make_dev_s(&args, &sc->sc_dev, "%s", gp->name);
353 if (error != 0) {
354 printf("%s: make_dev_p() failed (gp->name=%s, error=%d)\n",
355 __func__, gp->name, error);
356 g_detach(cp);
357 g_destroy_consumer(cp);
358 g_destroy_geom(gp);
359 mtx_destroy(&sc->sc_mtx);
360 g_free(sc);
361 return (NULL);
362 }
363 dev = sc->sc_dev;
364 dev->si_flags |= SI_UNMAPPED;
365 dev->si_iosize_max = MAXPHYS;
366 error = init_dumpdev(dev);
367 if (error != 0)
368 printf("%s: init_dumpdev() failed (gp->name=%s, error=%d)\n",
369 __func__, gp->name, error);
370
371 g_dev_attrchanged(cp, "GEOM::physpath");
372 snprintf(buf, sizeof(buf), "cdev=%s", gp->name);
373 devctl_notify_f("GEOM", "DEV", "CREATE", buf, M_WAITOK);
374 /*
375 * Now add all the aliases for this drive
376 */
377 LIST_FOREACH(gap, &pp->geom->aliases, ga_next) {
378 error = make_dev_alias_p(MAKEDEV_CHECKNAME | MAKEDEV_WAITOK, &adev, dev,
379 "%s", gap->ga_alias);
380 if (error) {
381 printf("%s: make_dev_alias_p() failed (name=%s, error=%d)\n",
382 __func__, gap->ga_alias, error);
383 continue;
384 }
385 snprintf(buf, sizeof(buf), "cdev=%s", gap->ga_alias);
386 devctl_notify_f("GEOM", "DEV", "CREATE", buf, M_WAITOK);
387 }
388
389 return (gp);
390 }
391
392 static int
393 g_dev_open(struct cdev *dev, int flags, int fmt, struct thread *td)
394 {
395 struct g_consumer *cp;
396 struct g_dev_softc *sc;
397 int error, r, w, e;
398
399 cp = dev->si_drv2;
400 g_trace(G_T_ACCESS, "g_dev_open(%s, %d, %d, %p)",
401 cp->geom->name, flags, fmt, td);
402
403 r = flags & FREAD ? 1 : 0;
404 w = flags & FWRITE ? 1 : 0;
405 #ifdef notyet
406 e = flags & O_EXCL ? 1 : 0;
407 #else
408 e = 0;
409 #endif
410
411 /*
412 * This happens on attempt to open a device node with O_EXEC.
413 */
414 if (r + w + e == 0)
415 return (EINVAL);
416
417 if (w) {
418 /*
419 * When running in very secure mode, do not allow
420 * opens for writing of any disks.
421 */
422 error = securelevel_ge(td->td_ucred, 2);
423 if (error)
424 return (error);
425 }
426 g_topology_lock();
427 error = g_access(cp, r, w, e);
428 g_topology_unlock();
429 if (error == 0) {
430 sc = dev->si_drv1;
431 mtx_lock(&sc->sc_mtx);
432 if (sc->sc_open == 0 && (sc->sc_active & SC_A_ACTIVE) != 0)
433 wakeup(&sc->sc_active);
434 sc->sc_open += r + w + e;
435 if (sc->sc_open == 0)
436 atomic_clear_int(&sc->sc_active, SC_A_OPEN);
437 else
438 atomic_set_int(&sc->sc_active, SC_A_OPEN);
439 mtx_unlock(&sc->sc_mtx);
440 }
441 return (error);
442 }
443
444 static int
445 g_dev_close(struct cdev *dev, int flags, int fmt, struct thread *td)
446 {
447 struct g_consumer *cp;
448 struct g_dev_softc *sc;
449 int error, r, w, e;
450
451 cp = dev->si_drv2;
452 g_trace(G_T_ACCESS, "g_dev_close(%s, %d, %d, %p)",
453 cp->geom->name, flags, fmt, td);
454
455 r = flags & FREAD ? -1 : 0;
456 w = flags & FWRITE ? -1 : 0;
457 #ifdef notyet
458 e = flags & O_EXCL ? -1 : 0;
459 #else
460 e = 0;
461 #endif
462
463 /*
464 * The vgonel(9) - caused by eg. forced unmount of devfs - calls
465 * VOP_CLOSE(9) on devfs vnode without any FREAD or FWRITE flags,
466 * which would result in zero deltas, which in turn would cause
467 * panic in g_access(9).
468 *
469 * Note that we cannot zero the counters (ie. do "r = cp->acr"
470 * etc) instead, because the consumer might be opened in another
471 * devfs instance.
472 */
473 if (r + w + e == 0)
474 return (EINVAL);
475
476 sc = dev->si_drv1;
477 mtx_lock(&sc->sc_mtx);
478 sc->sc_open += r + w + e;
479 if (sc->sc_open == 0)
480 atomic_clear_int(&sc->sc_active, SC_A_OPEN);
481 else
482 atomic_set_int(&sc->sc_active, SC_A_OPEN);
483 while (sc->sc_open == 0 && (sc->sc_active & SC_A_ACTIVE) != 0)
484 msleep(&sc->sc_active, &sc->sc_mtx, 0, "g_dev_close", hz / 10);
485 mtx_unlock(&sc->sc_mtx);
486 g_topology_lock();
487 error = g_access(cp, r, w, e);
488 g_topology_unlock();
489 return (error);
490 }
491
492 static int
493 g_dev_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td)
494 {
495 struct g_consumer *cp;
496 struct g_provider *pp;
497 off_t offset, length, chunk, odd;
498 int i, error;
499
500 cp = dev->si_drv2;
501 pp = cp->provider;
502
503 /* If consumer or provider is dying, don't disturb. */
504 if (cp->flags & G_CF_ORPHAN)
505 return (ENXIO);
506 if (pp->error)
507 return (pp->error);
508
509 error = 0;
510 KASSERT(cp->acr || cp->acw,
511 ("Consumer with zero access count in g_dev_ioctl"));
512
513 i = IOCPARM_LEN(cmd);
514 switch (cmd) {
515 case DIOCGSECTORSIZE:
516 *(u_int *)data = pp->sectorsize;
517 if (*(u_int *)data == 0)
518 error = ENOENT;
519 break;
520 case DIOCGMEDIASIZE:
521 *(off_t *)data = pp->mediasize;
522 if (*(off_t *)data == 0)
523 error = ENOENT;
524 break;
525 case DIOCGFWSECTORS:
526 error = g_io_getattr("GEOM::fwsectors", cp, &i, data);
527 if (error == 0 && *(u_int *)data == 0)
528 error = ENOENT;
529 break;
530 case DIOCGFWHEADS:
531 error = g_io_getattr("GEOM::fwheads", cp, &i, data);
532 if (error == 0 && *(u_int *)data == 0)
533 error = ENOENT;
534 break;
535 case DIOCGFRONTSTUFF:
536 error = g_io_getattr("GEOM::frontstuff", cp, &i, data);
537 break;
538 #ifdef COMPAT_FREEBSD11
539 case DIOCSKERNELDUMP_FREEBSD11:
540 {
541 struct diocskerneldump_arg kda;
542
543 bzero(&kda, sizeof(kda));
544 kda.kda_encryption = KERNELDUMP_ENC_NONE;
545 kda.kda_enable = (uint8_t)*(u_int *)data;
546 if (kda.kda_enable == 0)
547 error = g_dev_setdumpdev(NULL, NULL, td);
548 else
549 error = g_dev_setdumpdev(dev, &kda, td);
550 break;
551 }
552 #endif
553 case DIOCSKERNELDUMP:
554 {
555 struct diocskerneldump_arg *kda;
556 uint8_t *encryptedkey;
557
558 kda = (struct diocskerneldump_arg *)data;
559 if (kda->kda_enable == 0) {
560 error = g_dev_setdumpdev(NULL, NULL, td);
561 break;
562 }
563
564 if (kda->kda_encryption != KERNELDUMP_ENC_NONE) {
565 if (kda->kda_encryptedkeysize <= 0 ||
566 kda->kda_encryptedkeysize >
567 KERNELDUMP_ENCKEY_MAX_SIZE) {
568 return (EINVAL);
569 }
570 encryptedkey = malloc(kda->kda_encryptedkeysize, M_TEMP,
571 M_WAITOK);
572 error = copyin(kda->kda_encryptedkey, encryptedkey,
573 kda->kda_encryptedkeysize);
574 } else {
575 encryptedkey = NULL;
576 }
577 if (error == 0) {
578 kda->kda_encryptedkey = encryptedkey;
579 error = g_dev_setdumpdev(dev, kda, td);
580 }
581 if (encryptedkey != NULL) {
582 explicit_bzero(encryptedkey, kda->kda_encryptedkeysize);
583 free(encryptedkey, M_TEMP);
584 }
585 explicit_bzero(kda, sizeof(*kda));
586 break;
587 }
588 case DIOCGFLUSH:
589 error = g_io_flush(cp);
590 break;
591 case DIOCGDELETE:
592 offset = ((off_t *)data)[0];
593 length = ((off_t *)data)[1];
594 if ((offset % pp->sectorsize) != 0 ||
595 (length % pp->sectorsize) != 0 || length <= 0) {
596 printf("%s: offset=%jd length=%jd\n", __func__, offset,
597 length);
598 error = EINVAL;
599 break;
600 }
601 while (length > 0) {
602 chunk = length;
603 if (g_dev_del_max_sectors != 0 &&
604 chunk > g_dev_del_max_sectors * pp->sectorsize) {
605 chunk = g_dev_del_max_sectors * pp->sectorsize;
606 if (pp->stripesize > 0) {
607 odd = (offset + chunk +
608 pp->stripeoffset) % pp->stripesize;
609 if (chunk > odd)
610 chunk -= odd;
611 }
612 }
613 error = g_delete_data(cp, offset, chunk);
614 length -= chunk;
615 offset += chunk;
616 if (error)
617 break;
618 /*
619 * Since the request size can be large, the service
620 * time can be is likewise. We make this ioctl
621 * interruptible by checking for signals for each bio.
622 */
623 if (SIGPENDING(td))
624 break;
625 }
626 break;
627 case DIOCGIDENT:
628 error = g_io_getattr("GEOM::ident", cp, &i, data);
629 break;
630 case DIOCGPROVIDERNAME:
631 strlcpy(data, pp->name, i);
632 break;
633 case DIOCGSTRIPESIZE:
634 *(off_t *)data = pp->stripesize;
635 break;
636 case DIOCGSTRIPEOFFSET:
637 *(off_t *)data = pp->stripeoffset;
638 break;
639 case DIOCGPHYSPATH:
640 error = g_io_getattr("GEOM::physpath", cp, &i, data);
641 if (error == 0 && *(char *)data == '\0')
642 error = ENOENT;
643 break;
644 case DIOCGATTR: {
645 struct diocgattr_arg *arg = (struct diocgattr_arg *)data;
646
647 if (arg->len > sizeof(arg->value)) {
648 error = EINVAL;
649 break;
650 }
651 error = g_io_getattr(arg->name, cp, &arg->len, &arg->value);
652 break;
653 }
654 case DIOCZONECMD: {
655 struct disk_zone_args *zone_args =(struct disk_zone_args *)data;
656 struct disk_zone_rep_entry *new_entries, *old_entries;
657 struct disk_zone_report *rep;
658 size_t alloc_size;
659
660 old_entries = NULL;
661 new_entries = NULL;
662 rep = NULL;
663 alloc_size = 0;
664
665 if (zone_args->zone_cmd == DISK_ZONE_REPORT_ZONES) {
666 rep = &zone_args->zone_params.report;
667 #define MAXENTRIES (MAXPHYS / sizeof(struct disk_zone_rep_entry))
668 if (rep->entries_allocated > MAXENTRIES)
669 rep->entries_allocated = MAXENTRIES;
670 alloc_size = rep->entries_allocated *
671 sizeof(struct disk_zone_rep_entry);
672 if (alloc_size != 0)
673 new_entries = g_malloc(alloc_size,
674 M_WAITOK| M_ZERO);
675 old_entries = rep->entries;
676 rep->entries = new_entries;
677 }
678 error = g_io_zonecmd(zone_args, cp);
679 if (zone_args->zone_cmd == DISK_ZONE_REPORT_ZONES &&
680 alloc_size != 0 && error == 0)
681 error = copyout(new_entries, old_entries, alloc_size);
682 if (old_entries != NULL && rep != NULL)
683 rep->entries = old_entries;
684 if (new_entries != NULL)
685 g_free(new_entries);
686 break;
687 }
688 default:
689 if (pp->geom->ioctl != NULL) {
690 error = pp->geom->ioctl(pp, cmd, data, fflag, td);
691 } else {
692 error = ENOIOCTL;
693 }
694 }
695
696 return (error);
697 }
698
699 static void
700 g_dev_done(struct bio *bp2)
701 {
702 struct g_consumer *cp;
703 struct g_dev_softc *sc;
704 struct bio *bp;
705 int active;
706
707 cp = bp2->bio_from;
708 sc = cp->private;
709 bp = bp2->bio_parent;
710 bp->bio_error = bp2->bio_error;
711 bp->bio_completed = bp2->bio_completed;
712 bp->bio_resid = bp->bio_length - bp2->bio_completed;
713 if (bp2->bio_cmd == BIO_ZONE)
714 bcopy(&bp2->bio_zone, &bp->bio_zone, sizeof(bp->bio_zone));
715
716 if (bp2->bio_error != 0) {
717 g_trace(G_T_BIO, "g_dev_done(%p) had error %d",
718 bp2, bp2->bio_error);
719 bp->bio_flags |= BIO_ERROR;
720 } else {
721 g_trace(G_T_BIO, "g_dev_done(%p/%p) resid %ld completed %jd",
722 bp2, bp, bp2->bio_resid, (intmax_t)bp2->bio_completed);
723 }
724 g_destroy_bio(bp2);
725 active = atomic_fetchadd_int(&sc->sc_active, -1) - 1;
726 if ((active & SC_A_ACTIVE) == 0) {
727 if ((active & SC_A_OPEN) == 0)
728 wakeup(&sc->sc_active);
729 if (active & SC_A_DESTROY)
730 g_post_event(g_dev_destroy, cp, M_NOWAIT, NULL);
731 }
732 biodone(bp);
733 }
734
735 static void
736 g_dev_strategy(struct bio *bp)
737 {
738 struct g_consumer *cp;
739 struct bio *bp2;
740 struct cdev *dev;
741 struct g_dev_softc *sc;
742
743 KASSERT(bp->bio_cmd == BIO_READ ||
744 bp->bio_cmd == BIO_WRITE ||
745 bp->bio_cmd == BIO_DELETE ||
746 bp->bio_cmd == BIO_FLUSH ||
747 bp->bio_cmd == BIO_ZONE,
748 ("Wrong bio_cmd bio=%p cmd=%d", bp, bp->bio_cmd));
749 dev = bp->bio_dev;
750 cp = dev->si_drv2;
751 KASSERT(cp->acr || cp->acw,
752 ("Consumer with zero access count in g_dev_strategy"));
753 biotrack(bp, __func__);
754 #ifdef INVARIANTS
755 if ((bp->bio_offset % cp->provider->sectorsize) != 0 ||
756 (bp->bio_bcount % cp->provider->sectorsize) != 0) {
757 bp->bio_resid = bp->bio_bcount;
758 biofinish(bp, NULL, EINVAL);
759 return;
760 }
761 #endif
762 sc = dev->si_drv1;
763 KASSERT(sc->sc_open > 0, ("Closed device in g_dev_strategy"));
764 atomic_add_int(&sc->sc_active, 1);
765
766 for (;;) {
767 /*
768 * XXX: This is not an ideal solution, but I believe it to
769 * XXX: deadlock safely, all things considered.
770 */
771 bp2 = g_clone_bio(bp);
772 if (bp2 != NULL)
773 break;
774 pause("gdstrat", hz / 10);
775 }
776 KASSERT(bp2 != NULL, ("XXX: ENOMEM in a bad place"));
777 bp2->bio_done = g_dev_done;
778 g_trace(G_T_BIO,
779 "g_dev_strategy(%p/%p) offset %jd length %jd data %p cmd %d",
780 bp, bp2, (intmax_t)bp->bio_offset, (intmax_t)bp2->bio_length,
781 bp2->bio_data, bp2->bio_cmd);
782 g_io_request(bp2, cp);
783 KASSERT(cp->acr || cp->acw,
784 ("g_dev_strategy raced with g_dev_close and lost"));
785
786 }
787
788 /*
789 * g_dev_callback()
790 *
791 * Called by devfs when asynchronous device destruction is completed.
792 * - Mark that we have no attached device any more.
793 * - If there are no outstanding requests, schedule geom destruction.
794 * Otherwise destruction will be scheduled later by g_dev_done().
795 */
796
797 static void
798 g_dev_callback(void *arg)
799 {
800 struct g_consumer *cp;
801 struct g_dev_softc *sc;
802 int active;
803
804 cp = arg;
805 sc = cp->private;
806 g_trace(G_T_TOPOLOGY, "g_dev_callback(%p(%s))", cp, cp->geom->name);
807
808 sc->sc_dev = NULL;
809 sc->sc_alias = NULL;
810 active = atomic_fetchadd_int(&sc->sc_active, SC_A_DESTROY);
811 if ((active & SC_A_ACTIVE) == 0)
812 g_post_event(g_dev_destroy, cp, M_WAITOK, NULL);
813 }
814
815 /*
816 * g_dev_orphan()
817 *
818 * Called from below when the provider orphaned us.
819 * - Clear any dump settings.
820 * - Request asynchronous device destruction to prevent any more requests
821 * from coming in. The provider is already marked with an error, so
822 * anything which comes in the interim will be returned immediately.
823 */
824
825 static void
826 g_dev_orphan(struct g_consumer *cp)
827 {
828 struct cdev *dev;
829 struct g_dev_softc *sc;
830
831 g_topology_assert();
832 sc = cp->private;
833 dev = sc->sc_dev;
834 g_trace(G_T_TOPOLOGY, "g_dev_orphan(%p(%s))", cp, cp->geom->name);
835
836 /* Reset any dump-area set on this device */
837 if (dev->si_flags & SI_DUMPDEV)
838 (void)clear_dumper(curthread);
839
840 /* Destroy the struct cdev *so we get no more requests */
841 delist_dev(dev);
842 destroy_dev_sched_cb(dev, g_dev_callback, cp);
843 }
844
845 DECLARE_GEOM_CLASS(g_dev_class, g_dev);
Cache object: 20006d2325095d604d12bfe4797ab683
|