1 /*-
2 * Copyright (c) 2005 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD: releng/9.0/sys/geom/shsec/g_shsec.c 223921 2011-07-11 05:22:31Z ae $");
29
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/module.h>
34 #include <sys/lock.h>
35 #include <sys/mutex.h>
36 #include <sys/bio.h>
37 #include <sys/sbuf.h>
38 #include <sys/sysctl.h>
39 #include <sys/malloc.h>
40 #include <vm/uma.h>
41 #include <geom/geom.h>
42 #include <geom/shsec/g_shsec.h>
43
44 FEATURE(geom_shsec, "GEOM shared secret device support");
45
46 static MALLOC_DEFINE(M_SHSEC, "shsec_data", "GEOM_SHSEC Data");
47
48 static uma_zone_t g_shsec_zone;
49
50 static int g_shsec_destroy(struct g_shsec_softc *sc, boolean_t force);
51 static int g_shsec_destroy_geom(struct gctl_req *req, struct g_class *mp,
52 struct g_geom *gp);
53
54 static g_taste_t g_shsec_taste;
55 static g_ctl_req_t g_shsec_config;
56 static g_dumpconf_t g_shsec_dumpconf;
57 static g_init_t g_shsec_init;
58 static g_fini_t g_shsec_fini;
59
60 struct g_class g_shsec_class = {
61 .name = G_SHSEC_CLASS_NAME,
62 .version = G_VERSION,
63 .ctlreq = g_shsec_config,
64 .taste = g_shsec_taste,
65 .destroy_geom = g_shsec_destroy_geom,
66 .init = g_shsec_init,
67 .fini = g_shsec_fini
68 };
69
70 SYSCTL_DECL(_kern_geom);
71 SYSCTL_NODE(_kern_geom, OID_AUTO, shsec, CTLFLAG_RW, 0, "GEOM_SHSEC stuff");
72 static u_int g_shsec_debug = 0;
73 TUNABLE_INT("kern.geom.shsec.debug", &g_shsec_debug);
74 SYSCTL_UINT(_kern_geom_shsec, OID_AUTO, debug, CTLFLAG_RW, &g_shsec_debug, 0,
75 "Debug level");
76 static u_int g_shsec_maxmem = MAXPHYS * 100;
77 TUNABLE_INT("kern.geom.shsec.maxmem", &g_shsec_maxmem);
78 SYSCTL_UINT(_kern_geom_shsec, OID_AUTO, maxmem, CTLFLAG_RD, &g_shsec_maxmem,
79 0, "Maximum memory that can be allocated for I/O (in bytes)");
80 static u_int g_shsec_alloc_failed = 0;
81 SYSCTL_UINT(_kern_geom_shsec, OID_AUTO, alloc_failed, CTLFLAG_RD,
82 &g_shsec_alloc_failed, 0, "How many times I/O allocation failed");
83
84 /*
85 * Greatest Common Divisor.
86 */
87 static u_int
88 gcd(u_int a, u_int b)
89 {
90 u_int c;
91
92 while (b != 0) {
93 c = a;
94 a = b;
95 b = (c % b);
96 }
97 return (a);
98 }
99
100 /*
101 * Least Common Multiple.
102 */
103 static u_int
104 lcm(u_int a, u_int b)
105 {
106
107 return ((a * b) / gcd(a, b));
108 }
109
110 static void
111 g_shsec_init(struct g_class *mp __unused)
112 {
113
114 g_shsec_zone = uma_zcreate("g_shsec_zone", MAXPHYS, NULL, NULL, NULL,
115 NULL, 0, 0);
116 g_shsec_maxmem -= g_shsec_maxmem % MAXPHYS;
117 uma_zone_set_max(g_shsec_zone, g_shsec_maxmem / MAXPHYS);
118 }
119
120 static void
121 g_shsec_fini(struct g_class *mp __unused)
122 {
123
124 uma_zdestroy(g_shsec_zone);
125 }
126
127 /*
128 * Return the number of valid disks.
129 */
130 static u_int
131 g_shsec_nvalid(struct g_shsec_softc *sc)
132 {
133 u_int i, no;
134
135 no = 0;
136 for (i = 0; i < sc->sc_ndisks; i++) {
137 if (sc->sc_disks[i] != NULL)
138 no++;
139 }
140
141 return (no);
142 }
143
144 static void
145 g_shsec_remove_disk(struct g_consumer *cp)
146 {
147 struct g_shsec_softc *sc;
148 u_int no;
149
150 KASSERT(cp != NULL, ("Non-valid disk in %s.", __func__));
151 sc = (struct g_shsec_softc *)cp->private;
152 KASSERT(sc != NULL, ("NULL sc in %s.", __func__));
153 no = cp->index;
154
155 G_SHSEC_DEBUG(0, "Disk %s removed from %s.", cp->provider->name,
156 sc->sc_name);
157
158 sc->sc_disks[no] = NULL;
159 if (sc->sc_provider != NULL) {
160 g_orphan_provider(sc->sc_provider, ENXIO);
161 sc->sc_provider = NULL;
162 G_SHSEC_DEBUG(0, "Device %s removed.", sc->sc_name);
163 }
164
165 if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0)
166 g_access(cp, -cp->acr, -cp->acw, -cp->ace);
167 g_detach(cp);
168 g_destroy_consumer(cp);
169 }
170
171 static void
172 g_shsec_orphan(struct g_consumer *cp)
173 {
174 struct g_shsec_softc *sc;
175 struct g_geom *gp;
176
177 g_topology_assert();
178 gp = cp->geom;
179 sc = gp->softc;
180 if (sc == NULL)
181 return;
182
183 g_shsec_remove_disk(cp);
184 /* If there are no valid disks anymore, remove device. */
185 if (g_shsec_nvalid(sc) == 0)
186 g_shsec_destroy(sc, 1);
187 }
188
189 static int
190 g_shsec_access(struct g_provider *pp, int dr, int dw, int de)
191 {
192 struct g_consumer *cp1, *cp2;
193 struct g_shsec_softc *sc;
194 struct g_geom *gp;
195 int error;
196
197 gp = pp->geom;
198 sc = gp->softc;
199
200 if (sc == NULL) {
201 /*
202 * It looks like geom is being withered.
203 * In that case we allow only negative requests.
204 */
205 KASSERT(dr <= 0 && dw <= 0 && de <= 0,
206 ("Positive access request (device=%s).", pp->name));
207 if ((pp->acr + dr) == 0 && (pp->acw + dw) == 0 &&
208 (pp->ace + de) == 0) {
209 G_SHSEC_DEBUG(0, "Device %s definitely destroyed.",
210 gp->name);
211 }
212 return (0);
213 }
214
215 /* On first open, grab an extra "exclusive" bit */
216 if (pp->acr == 0 && pp->acw == 0 && pp->ace == 0)
217 de++;
218 /* ... and let go of it on last close */
219 if ((pp->acr + dr) == 0 && (pp->acw + dw) == 0 && (pp->ace + de) == 0)
220 de--;
221
222 error = ENXIO;
223 LIST_FOREACH(cp1, &gp->consumer, consumer) {
224 error = g_access(cp1, dr, dw, de);
225 if (error == 0)
226 continue;
227 /*
228 * If we fail here, backout all previous changes.
229 */
230 LIST_FOREACH(cp2, &gp->consumer, consumer) {
231 if (cp1 == cp2)
232 return (error);
233 g_access(cp2, -dr, -dw, -de);
234 }
235 /* NOTREACHED */
236 }
237
238 return (error);
239 }
240
241 static void
242 g_shsec_xor1(uint32_t *src, uint32_t *dst, ssize_t len)
243 {
244
245 for (; len > 0; len -= sizeof(uint32_t), dst++)
246 *dst = *dst ^ *src++;
247 KASSERT(len == 0, ("len != 0 (len=%zd)", len));
248 }
249
250 static void
251 g_shsec_done(struct bio *bp)
252 {
253 struct g_shsec_softc *sc;
254 struct bio *pbp;
255
256 pbp = bp->bio_parent;
257 sc = pbp->bio_to->geom->softc;
258 if (bp->bio_error == 0)
259 G_SHSEC_LOGREQ(2, bp, "Request done.");
260 else {
261 G_SHSEC_LOGREQ(0, bp, "Request failed (error=%d).",
262 bp->bio_error);
263 if (pbp->bio_error == 0)
264 pbp->bio_error = bp->bio_error;
265 }
266 if (pbp->bio_cmd == BIO_READ) {
267 if ((pbp->bio_pflags & G_SHSEC_BFLAG_FIRST) != 0) {
268 bcopy(bp->bio_data, pbp->bio_data, pbp->bio_length);
269 pbp->bio_pflags = 0;
270 } else {
271 g_shsec_xor1((uint32_t *)bp->bio_data,
272 (uint32_t *)pbp->bio_data,
273 (ssize_t)pbp->bio_length);
274 }
275 }
276 bzero(bp->bio_data, bp->bio_length);
277 uma_zfree(g_shsec_zone, bp->bio_data);
278 g_destroy_bio(bp);
279 pbp->bio_inbed++;
280 if (pbp->bio_children == pbp->bio_inbed) {
281 pbp->bio_completed = pbp->bio_length;
282 g_io_deliver(pbp, pbp->bio_error);
283 }
284 }
285
286 static void
287 g_shsec_xor2(uint32_t *rand, uint32_t *dst, ssize_t len)
288 {
289
290 for (; len > 0; len -= sizeof(uint32_t), dst++) {
291 *rand = arc4random();
292 *dst = *dst ^ *rand++;
293 }
294 KASSERT(len == 0, ("len != 0 (len=%zd)", len));
295 }
296
297 static void
298 g_shsec_start(struct bio *bp)
299 {
300 TAILQ_HEAD(, bio) queue = TAILQ_HEAD_INITIALIZER(queue);
301 struct g_shsec_softc *sc;
302 struct bio *cbp;
303 uint32_t *dst;
304 ssize_t len;
305 u_int no;
306 int error;
307
308 sc = bp->bio_to->geom->softc;
309 /*
310 * If sc == NULL, provider's error should be set and g_shsec_start()
311 * should not be called at all.
312 */
313 KASSERT(sc != NULL,
314 ("Provider's error should be set (error=%d)(device=%s).",
315 bp->bio_to->error, bp->bio_to->name));
316
317 G_SHSEC_LOGREQ(2, bp, "Request received.");
318
319 switch (bp->bio_cmd) {
320 case BIO_READ:
321 case BIO_WRITE:
322 case BIO_FLUSH:
323 /*
324 * Only those requests are supported.
325 */
326 break;
327 case BIO_DELETE:
328 case BIO_GETATTR:
329 /* To which provider it should be delivered? */
330 default:
331 g_io_deliver(bp, EOPNOTSUPP);
332 return;
333 }
334
335 /*
336 * Allocate all bios first and calculate XOR.
337 */
338 dst = NULL;
339 len = bp->bio_length;
340 if (bp->bio_cmd == BIO_READ)
341 bp->bio_pflags = G_SHSEC_BFLAG_FIRST;
342 for (no = 0; no < sc->sc_ndisks; no++) {
343 cbp = g_clone_bio(bp);
344 if (cbp == NULL) {
345 error = ENOMEM;
346 goto failure;
347 }
348 TAILQ_INSERT_TAIL(&queue, cbp, bio_queue);
349
350 /*
351 * Fill in the component buf structure.
352 */
353 cbp->bio_done = g_shsec_done;
354 cbp->bio_data = uma_zalloc(g_shsec_zone, M_NOWAIT);
355 if (cbp->bio_data == NULL) {
356 g_shsec_alloc_failed++;
357 error = ENOMEM;
358 goto failure;
359 }
360 cbp->bio_caller2 = sc->sc_disks[no];
361 if (bp->bio_cmd == BIO_WRITE) {
362 if (no == 0) {
363 dst = (uint32_t *)cbp->bio_data;
364 bcopy(bp->bio_data, dst, len);
365 } else {
366 g_shsec_xor2((uint32_t *)cbp->bio_data, dst,
367 len);
368 }
369 }
370 }
371 /*
372 * Fire off all allocated requests!
373 */
374 while ((cbp = TAILQ_FIRST(&queue)) != NULL) {
375 struct g_consumer *cp;
376
377 TAILQ_REMOVE(&queue, cbp, bio_queue);
378 cp = cbp->bio_caller2;
379 cbp->bio_caller2 = NULL;
380 cbp->bio_to = cp->provider;
381 G_SHSEC_LOGREQ(2, cbp, "Sending request.");
382 g_io_request(cbp, cp);
383 }
384 return;
385 failure:
386 while ((cbp = TAILQ_FIRST(&queue)) != NULL) {
387 TAILQ_REMOVE(&queue, cbp, bio_queue);
388 bp->bio_children--;
389 if (cbp->bio_data != NULL) {
390 bzero(cbp->bio_data, cbp->bio_length);
391 uma_zfree(g_shsec_zone, cbp->bio_data);
392 }
393 g_destroy_bio(cbp);
394 }
395 if (bp->bio_error == 0)
396 bp->bio_error = error;
397 g_io_deliver(bp, bp->bio_error);
398 }
399
400 static void
401 g_shsec_check_and_run(struct g_shsec_softc *sc)
402 {
403 off_t mediasize, ms;
404 u_int no, sectorsize = 0;
405
406 if (g_shsec_nvalid(sc) != sc->sc_ndisks)
407 return;
408
409 sc->sc_provider = g_new_providerf(sc->sc_geom, "shsec/%s", sc->sc_name);
410 /*
411 * Find the smallest disk.
412 */
413 mediasize = sc->sc_disks[0]->provider->mediasize;
414 mediasize -= sc->sc_disks[0]->provider->sectorsize;
415 sectorsize = sc->sc_disks[0]->provider->sectorsize;
416 for (no = 1; no < sc->sc_ndisks; no++) {
417 ms = sc->sc_disks[no]->provider->mediasize;
418 ms -= sc->sc_disks[no]->provider->sectorsize;
419 if (ms < mediasize)
420 mediasize = ms;
421 sectorsize = lcm(sectorsize,
422 sc->sc_disks[no]->provider->sectorsize);
423 }
424 sc->sc_provider->sectorsize = sectorsize;
425 sc->sc_provider->mediasize = mediasize;
426 g_error_provider(sc->sc_provider, 0);
427
428 G_SHSEC_DEBUG(0, "Device %s activated.", sc->sc_name);
429 }
430
431 static int
432 g_shsec_read_metadata(struct g_consumer *cp, struct g_shsec_metadata *md)
433 {
434 struct g_provider *pp;
435 u_char *buf;
436 int error;
437
438 g_topology_assert();
439
440 error = g_access(cp, 1, 0, 0);
441 if (error != 0)
442 return (error);
443 pp = cp->provider;
444 g_topology_unlock();
445 buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
446 &error);
447 g_topology_lock();
448 g_access(cp, -1, 0, 0);
449 if (buf == NULL)
450 return (error);
451
452 /* Decode metadata. */
453 shsec_metadata_decode(buf, md);
454 g_free(buf);
455
456 return (0);
457 }
458
459 /*
460 * Add disk to given device.
461 */
462 static int
463 g_shsec_add_disk(struct g_shsec_softc *sc, struct g_provider *pp, u_int no)
464 {
465 struct g_consumer *cp, *fcp;
466 struct g_geom *gp;
467 struct g_shsec_metadata md;
468 int error;
469
470 /* Metadata corrupted? */
471 if (no >= sc->sc_ndisks)
472 return (EINVAL);
473
474 /* Check if disk is not already attached. */
475 if (sc->sc_disks[no] != NULL)
476 return (EEXIST);
477
478 gp = sc->sc_geom;
479 fcp = LIST_FIRST(&gp->consumer);
480
481 cp = g_new_consumer(gp);
482 error = g_attach(cp, pp);
483 if (error != 0) {
484 g_destroy_consumer(cp);
485 return (error);
486 }
487
488 if (fcp != NULL && (fcp->acr > 0 || fcp->acw > 0 || fcp->ace > 0)) {
489 error = g_access(cp, fcp->acr, fcp->acw, fcp->ace);
490 if (error != 0) {
491 g_detach(cp);
492 g_destroy_consumer(cp);
493 return (error);
494 }
495 }
496
497 /* Reread metadata. */
498 error = g_shsec_read_metadata(cp, &md);
499 if (error != 0)
500 goto fail;
501
502 if (strcmp(md.md_magic, G_SHSEC_MAGIC) != 0 ||
503 strcmp(md.md_name, sc->sc_name) != 0 || md.md_id != sc->sc_id) {
504 G_SHSEC_DEBUG(0, "Metadata on %s changed.", pp->name);
505 goto fail;
506 }
507
508 cp->private = sc;
509 cp->index = no;
510 sc->sc_disks[no] = cp;
511
512 G_SHSEC_DEBUG(0, "Disk %s attached to %s.", pp->name, sc->sc_name);
513
514 g_shsec_check_and_run(sc);
515
516 return (0);
517 fail:
518 if (fcp != NULL && (fcp->acr > 0 || fcp->acw > 0 || fcp->ace > 0))
519 g_access(cp, -fcp->acr, -fcp->acw, -fcp->ace);
520 g_detach(cp);
521 g_destroy_consumer(cp);
522 return (error);
523 }
524
525 static struct g_geom *
526 g_shsec_create(struct g_class *mp, const struct g_shsec_metadata *md)
527 {
528 struct g_shsec_softc *sc;
529 struct g_geom *gp;
530 u_int no;
531
532 G_SHSEC_DEBUG(1, "Creating device %s (id=%u).", md->md_name, md->md_id);
533
534 /* Two disks is minimum. */
535 if (md->md_all < 2) {
536 G_SHSEC_DEBUG(0, "Too few disks defined for %s.", md->md_name);
537 return (NULL);
538 }
539
540 /* Check for duplicate unit */
541 LIST_FOREACH(gp, &mp->geom, geom) {
542 sc = gp->softc;
543 if (sc != NULL && strcmp(sc->sc_name, md->md_name) == 0) {
544 G_SHSEC_DEBUG(0, "Device %s already configured.",
545 sc->sc_name);
546 return (NULL);
547 }
548 }
549 gp = g_new_geomf(mp, "%s", md->md_name);
550 sc = malloc(sizeof(*sc), M_SHSEC, M_WAITOK | M_ZERO);
551 gp->start = g_shsec_start;
552 gp->spoiled = g_shsec_orphan;
553 gp->orphan = g_shsec_orphan;
554 gp->access = g_shsec_access;
555 gp->dumpconf = g_shsec_dumpconf;
556
557 sc->sc_id = md->md_id;
558 sc->sc_ndisks = md->md_all;
559 sc->sc_disks = malloc(sizeof(struct g_consumer *) * sc->sc_ndisks,
560 M_SHSEC, M_WAITOK | M_ZERO);
561 for (no = 0; no < sc->sc_ndisks; no++)
562 sc->sc_disks[no] = NULL;
563
564 gp->softc = sc;
565 sc->sc_geom = gp;
566 sc->sc_provider = NULL;
567
568 G_SHSEC_DEBUG(0, "Device %s created (id=%u).", sc->sc_name, sc->sc_id);
569
570 return (gp);
571 }
572
573 static int
574 g_shsec_destroy(struct g_shsec_softc *sc, boolean_t force)
575 {
576 struct g_provider *pp;
577 struct g_geom *gp;
578 u_int no;
579
580 g_topology_assert();
581
582 if (sc == NULL)
583 return (ENXIO);
584
585 pp = sc->sc_provider;
586 if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
587 if (force) {
588 G_SHSEC_DEBUG(0, "Device %s is still open, so it "
589 "can't be definitely removed.", pp->name);
590 } else {
591 G_SHSEC_DEBUG(1,
592 "Device %s is still open (r%dw%de%d).", pp->name,
593 pp->acr, pp->acw, pp->ace);
594 return (EBUSY);
595 }
596 }
597
598 for (no = 0; no < sc->sc_ndisks; no++) {
599 if (sc->sc_disks[no] != NULL)
600 g_shsec_remove_disk(sc->sc_disks[no]);
601 }
602
603 gp = sc->sc_geom;
604 gp->softc = NULL;
605 KASSERT(sc->sc_provider == NULL, ("Provider still exists? (device=%s)",
606 gp->name));
607 free(sc->sc_disks, M_SHSEC);
608 free(sc, M_SHSEC);
609
610 pp = LIST_FIRST(&gp->provider);
611 if (pp == NULL || (pp->acr == 0 && pp->acw == 0 && pp->ace == 0))
612 G_SHSEC_DEBUG(0, "Device %s destroyed.", gp->name);
613
614 g_wither_geom(gp, ENXIO);
615
616 return (0);
617 }
618
619 static int
620 g_shsec_destroy_geom(struct gctl_req *req __unused, struct g_class *mp __unused,
621 struct g_geom *gp)
622 {
623 struct g_shsec_softc *sc;
624
625 sc = gp->softc;
626 return (g_shsec_destroy(sc, 0));
627 }
628
629 static struct g_geom *
630 g_shsec_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
631 {
632 struct g_shsec_metadata md;
633 struct g_shsec_softc *sc;
634 struct g_consumer *cp;
635 struct g_geom *gp;
636 int error;
637
638 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
639 g_topology_assert();
640
641 /* Skip providers that are already open for writing. */
642 if (pp->acw > 0)
643 return (NULL);
644
645 G_SHSEC_DEBUG(3, "Tasting %s.", pp->name);
646
647 gp = g_new_geomf(mp, "shsec:taste");
648 gp->start = g_shsec_start;
649 gp->access = g_shsec_access;
650 gp->orphan = g_shsec_orphan;
651 cp = g_new_consumer(gp);
652 g_attach(cp, pp);
653 error = g_shsec_read_metadata(cp, &md);
654 g_detach(cp);
655 g_destroy_consumer(cp);
656 g_destroy_geom(gp);
657 if (error != 0)
658 return (NULL);
659 gp = NULL;
660
661 if (strcmp(md.md_magic, G_SHSEC_MAGIC) != 0)
662 return (NULL);
663 if (md.md_version > G_SHSEC_VERSION) {
664 G_SHSEC_DEBUG(0, "Kernel module is too old to handle %s.\n",
665 pp->name);
666 return (NULL);
667 }
668 /*
669 * Backward compatibility:
670 */
671 /* There was no md_provsize field in earlier versions of metadata. */
672 if (md.md_version < 1)
673 md.md_provsize = pp->mediasize;
674
675 if (md.md_provider[0] != '\0' &&
676 !g_compare_names(md.md_provider, pp->name))
677 return (NULL);
678 if (md.md_provsize != pp->mediasize)
679 return (NULL);
680
681 /*
682 * Let's check if device already exists.
683 */
684 sc = NULL;
685 LIST_FOREACH(gp, &mp->geom, geom) {
686 sc = gp->softc;
687 if (sc == NULL)
688 continue;
689 if (strcmp(md.md_name, sc->sc_name) != 0)
690 continue;
691 if (md.md_id != sc->sc_id)
692 continue;
693 break;
694 }
695 if (gp != NULL) {
696 G_SHSEC_DEBUG(1, "Adding disk %s to %s.", pp->name, gp->name);
697 error = g_shsec_add_disk(sc, pp, md.md_no);
698 if (error != 0) {
699 G_SHSEC_DEBUG(0, "Cannot add disk %s to %s (error=%d).",
700 pp->name, gp->name, error);
701 return (NULL);
702 }
703 } else {
704 gp = g_shsec_create(mp, &md);
705 if (gp == NULL) {
706 G_SHSEC_DEBUG(0, "Cannot create device %s.", md.md_name);
707 return (NULL);
708 }
709 sc = gp->softc;
710 G_SHSEC_DEBUG(1, "Adding disk %s to %s.", pp->name, gp->name);
711 error = g_shsec_add_disk(sc, pp, md.md_no);
712 if (error != 0) {
713 G_SHSEC_DEBUG(0, "Cannot add disk %s to %s (error=%d).",
714 pp->name, gp->name, error);
715 g_shsec_destroy(sc, 1);
716 return (NULL);
717 }
718 }
719 return (gp);
720 }
721
722 static struct g_shsec_softc *
723 g_shsec_find_device(struct g_class *mp, const char *name)
724 {
725 struct g_shsec_softc *sc;
726 struct g_geom *gp;
727
728 LIST_FOREACH(gp, &mp->geom, geom) {
729 sc = gp->softc;
730 if (sc == NULL)
731 continue;
732 if (strcmp(sc->sc_name, name) == 0)
733 return (sc);
734 }
735 return (NULL);
736 }
737
738 static void
739 g_shsec_ctl_destroy(struct gctl_req *req, struct g_class *mp)
740 {
741 struct g_shsec_softc *sc;
742 int *force, *nargs, error;
743 const char *name;
744 char param[16];
745 u_int i;
746
747 g_topology_assert();
748
749 nargs = gctl_get_paraml(req, "nargs", sizeof(*nargs));
750 if (nargs == NULL) {
751 gctl_error(req, "No '%s' argument.", "nargs");
752 return;
753 }
754 if (*nargs <= 0) {
755 gctl_error(req, "Missing device(s).");
756 return;
757 }
758 force = gctl_get_paraml(req, "force", sizeof(*force));
759 if (force == NULL) {
760 gctl_error(req, "No '%s' argument.", "force");
761 return;
762 }
763
764 for (i = 0; i < (u_int)*nargs; i++) {
765 snprintf(param, sizeof(param), "arg%u", i);
766 name = gctl_get_asciiparam(req, param);
767 if (name == NULL) {
768 gctl_error(req, "No 'arg%u' argument.", i);
769 return;
770 }
771 sc = g_shsec_find_device(mp, name);
772 if (sc == NULL) {
773 gctl_error(req, "No such device: %s.", name);
774 return;
775 }
776 error = g_shsec_destroy(sc, *force);
777 if (error != 0) {
778 gctl_error(req, "Cannot destroy device %s (error=%d).",
779 sc->sc_name, error);
780 return;
781 }
782 }
783 }
784
785 static void
786 g_shsec_config(struct gctl_req *req, struct g_class *mp, const char *verb)
787 {
788 uint32_t *version;
789
790 g_topology_assert();
791
792 version = gctl_get_paraml(req, "version", sizeof(*version));
793 if (version == NULL) {
794 gctl_error(req, "No '%s' argument.", "version");
795 return;
796 }
797 if (*version != G_SHSEC_VERSION) {
798 gctl_error(req, "Userland and kernel parts are out of sync.");
799 return;
800 }
801
802 if (strcmp(verb, "stop") == 0) {
803 g_shsec_ctl_destroy(req, mp);
804 return;
805 }
806
807 gctl_error(req, "Unknown verb.");
808 }
809
810 static void
811 g_shsec_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
812 struct g_consumer *cp, struct g_provider *pp)
813 {
814 struct g_shsec_softc *sc;
815
816 sc = gp->softc;
817 if (sc == NULL)
818 return;
819 if (pp != NULL) {
820 /* Nothing here. */
821 } else if (cp != NULL) {
822 sbuf_printf(sb, "%s<Number>%u</Number>\n", indent,
823 (u_int)cp->index);
824 } else {
825 sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)sc->sc_id);
826 sbuf_printf(sb, "%s<Status>Total=%u, Online=%u</Status>\n",
827 indent, sc->sc_ndisks, g_shsec_nvalid(sc));
828 sbuf_printf(sb, "%s<State>", indent);
829 if (sc->sc_provider != NULL && sc->sc_provider->error == 0)
830 sbuf_printf(sb, "UP");
831 else
832 sbuf_printf(sb, "DOWN");
833 sbuf_printf(sb, "</State>\n");
834 }
835 }
836
837 DECLARE_GEOM_CLASS(g_shsec_class, g_shsec);
Cache object: ca2b1ca66f2cfdbea7d62904bcb36515
|