1 /*-
2 * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD: releng/8.3/sys/geom/mirror/g_mirror.c 222920 2011-06-10 09:12:09Z mav $");
29
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/module.h>
34 #include <sys/limits.h>
35 #include <sys/lock.h>
36 #include <sys/mutex.h>
37 #include <sys/bio.h>
38 #include <sys/sysctl.h>
39 #include <sys/malloc.h>
40 #include <sys/eventhandler.h>
41 #include <vm/uma.h>
42 #include <geom/geom.h>
43 #include <sys/proc.h>
44 #include <sys/kthread.h>
45 #include <sys/sched.h>
46 #include <geom/mirror/g_mirror.h>
47
48
49 static MALLOC_DEFINE(M_MIRROR, "mirror_data", "GEOM_MIRROR Data");
50
51 SYSCTL_DECL(_kern_geom);
52 SYSCTL_NODE(_kern_geom, OID_AUTO, mirror, CTLFLAG_RW, 0, "GEOM_MIRROR stuff");
53 u_int g_mirror_debug = 0;
54 TUNABLE_INT("kern.geom.mirror.debug", &g_mirror_debug);
55 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, debug, CTLFLAG_RW, &g_mirror_debug, 0,
56 "Debug level");
57 static u_int g_mirror_timeout = 4;
58 TUNABLE_INT("kern.geom.mirror.timeout", &g_mirror_timeout);
59 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, timeout, CTLFLAG_RW, &g_mirror_timeout,
60 0, "Time to wait on all mirror components");
61 static u_int g_mirror_idletime = 5;
62 TUNABLE_INT("kern.geom.mirror.idletime", &g_mirror_idletime);
63 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, idletime, CTLFLAG_RW,
64 &g_mirror_idletime, 0, "Mark components as clean when idling");
65 static u_int g_mirror_disconnect_on_failure = 1;
66 TUNABLE_INT("kern.geom.mirror.disconnect_on_failure",
67 &g_mirror_disconnect_on_failure);
68 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, disconnect_on_failure, CTLFLAG_RW,
69 &g_mirror_disconnect_on_failure, 0, "Disconnect component on I/O failure.");
70 static u_int g_mirror_syncreqs = 2;
71 TUNABLE_INT("kern.geom.mirror.sync_requests", &g_mirror_syncreqs);
72 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, sync_requests, CTLFLAG_RDTUN,
73 &g_mirror_syncreqs, 0, "Parallel synchronization I/O requests.");
74
75 #define MSLEEP(ident, mtx, priority, wmesg, timeout) do { \
76 G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, (ident)); \
77 msleep((ident), (mtx), (priority), (wmesg), (timeout)); \
78 G_MIRROR_DEBUG(4, "%s: Woken up %p.", __func__, (ident)); \
79 } while (0)
80
81 static eventhandler_tag g_mirror_pre_sync = NULL;
82
83 static int g_mirror_destroy_geom(struct gctl_req *req, struct g_class *mp,
84 struct g_geom *gp);
85 static g_taste_t g_mirror_taste;
86 static void g_mirror_init(struct g_class *mp);
87 static void g_mirror_fini(struct g_class *mp);
88
89 struct g_class g_mirror_class = {
90 .name = G_MIRROR_CLASS_NAME,
91 .version = G_VERSION,
92 .ctlreq = g_mirror_config,
93 .taste = g_mirror_taste,
94 .destroy_geom = g_mirror_destroy_geom,
95 .init = g_mirror_init,
96 .fini = g_mirror_fini
97 };
98
99
100 static void g_mirror_destroy_provider(struct g_mirror_softc *sc);
101 static int g_mirror_update_disk(struct g_mirror_disk *disk, u_int state);
102 static void g_mirror_update_device(struct g_mirror_softc *sc, boolean_t force);
103 static void g_mirror_dumpconf(struct sbuf *sb, const char *indent,
104 struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp);
105 static void g_mirror_sync_stop(struct g_mirror_disk *disk, int type);
106 static void g_mirror_register_request(struct bio *bp);
107 static void g_mirror_sync_release(struct g_mirror_softc *sc);
108
109
110 static const char *
111 g_mirror_disk_state2str(int state)
112 {
113
114 switch (state) {
115 case G_MIRROR_DISK_STATE_NONE:
116 return ("NONE");
117 case G_MIRROR_DISK_STATE_NEW:
118 return ("NEW");
119 case G_MIRROR_DISK_STATE_ACTIVE:
120 return ("ACTIVE");
121 case G_MIRROR_DISK_STATE_STALE:
122 return ("STALE");
123 case G_MIRROR_DISK_STATE_SYNCHRONIZING:
124 return ("SYNCHRONIZING");
125 case G_MIRROR_DISK_STATE_DISCONNECTED:
126 return ("DISCONNECTED");
127 case G_MIRROR_DISK_STATE_DESTROY:
128 return ("DESTROY");
129 default:
130 return ("INVALID");
131 }
132 }
133
134 static const char *
135 g_mirror_device_state2str(int state)
136 {
137
138 switch (state) {
139 case G_MIRROR_DEVICE_STATE_STARTING:
140 return ("STARTING");
141 case G_MIRROR_DEVICE_STATE_RUNNING:
142 return ("RUNNING");
143 default:
144 return ("INVALID");
145 }
146 }
147
148 static const char *
149 g_mirror_get_diskname(struct g_mirror_disk *disk)
150 {
151
152 if (disk->d_consumer == NULL || disk->d_consumer->provider == NULL)
153 return ("[unknown]");
154 return (disk->d_name);
155 }
156
157 /*
158 * --- Events handling functions ---
159 * Events in geom_mirror are used to maintain disks and device status
160 * from one thread to simplify locking.
161 */
162 static void
163 g_mirror_event_free(struct g_mirror_event *ep)
164 {
165
166 free(ep, M_MIRROR);
167 }
168
169 int
170 g_mirror_event_send(void *arg, int state, int flags)
171 {
172 struct g_mirror_softc *sc;
173 struct g_mirror_disk *disk;
174 struct g_mirror_event *ep;
175 int error;
176
177 ep = malloc(sizeof(*ep), M_MIRROR, M_WAITOK);
178 G_MIRROR_DEBUG(4, "%s: Sending event %p.", __func__, ep);
179 if ((flags & G_MIRROR_EVENT_DEVICE) != 0) {
180 disk = NULL;
181 sc = arg;
182 } else {
183 disk = arg;
184 sc = disk->d_softc;
185 }
186 ep->e_disk = disk;
187 ep->e_state = state;
188 ep->e_flags = flags;
189 ep->e_error = 0;
190 mtx_lock(&sc->sc_events_mtx);
191 TAILQ_INSERT_TAIL(&sc->sc_events, ep, e_next);
192 mtx_unlock(&sc->sc_events_mtx);
193 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc);
194 mtx_lock(&sc->sc_queue_mtx);
195 wakeup(sc);
196 mtx_unlock(&sc->sc_queue_mtx);
197 if ((flags & G_MIRROR_EVENT_DONTWAIT) != 0)
198 return (0);
199 sx_assert(&sc->sc_lock, SX_XLOCKED);
200 G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, ep);
201 sx_xunlock(&sc->sc_lock);
202 while ((ep->e_flags & G_MIRROR_EVENT_DONE) == 0) {
203 mtx_lock(&sc->sc_events_mtx);
204 MSLEEP(ep, &sc->sc_events_mtx, PRIBIO | PDROP, "m:event",
205 hz * 5);
206 }
207 error = ep->e_error;
208 g_mirror_event_free(ep);
209 sx_xlock(&sc->sc_lock);
210 return (error);
211 }
212
213 static struct g_mirror_event *
214 g_mirror_event_get(struct g_mirror_softc *sc)
215 {
216 struct g_mirror_event *ep;
217
218 mtx_lock(&sc->sc_events_mtx);
219 ep = TAILQ_FIRST(&sc->sc_events);
220 mtx_unlock(&sc->sc_events_mtx);
221 return (ep);
222 }
223
224 static void
225 g_mirror_event_remove(struct g_mirror_softc *sc, struct g_mirror_event *ep)
226 {
227
228 mtx_lock(&sc->sc_events_mtx);
229 TAILQ_REMOVE(&sc->sc_events, ep, e_next);
230 mtx_unlock(&sc->sc_events_mtx);
231 }
232
233 static void
234 g_mirror_event_cancel(struct g_mirror_disk *disk)
235 {
236 struct g_mirror_softc *sc;
237 struct g_mirror_event *ep, *tmpep;
238
239 sc = disk->d_softc;
240 sx_assert(&sc->sc_lock, SX_XLOCKED);
241
242 mtx_lock(&sc->sc_events_mtx);
243 TAILQ_FOREACH_SAFE(ep, &sc->sc_events, e_next, tmpep) {
244 if ((ep->e_flags & G_MIRROR_EVENT_DEVICE) != 0)
245 continue;
246 if (ep->e_disk != disk)
247 continue;
248 TAILQ_REMOVE(&sc->sc_events, ep, e_next);
249 if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0)
250 g_mirror_event_free(ep);
251 else {
252 ep->e_error = ECANCELED;
253 wakeup(ep);
254 }
255 }
256 mtx_unlock(&sc->sc_events_mtx);
257 }
258
259 /*
260 * Return the number of disks in given state.
261 * If state is equal to -1, count all connected disks.
262 */
263 u_int
264 g_mirror_ndisks(struct g_mirror_softc *sc, int state)
265 {
266 struct g_mirror_disk *disk;
267 u_int n = 0;
268
269 sx_assert(&sc->sc_lock, SX_LOCKED);
270
271 LIST_FOREACH(disk, &sc->sc_disks, d_next) {
272 if (state == -1 || disk->d_state == state)
273 n++;
274 }
275 return (n);
276 }
277
278 /*
279 * Find a disk in mirror by its disk ID.
280 */
281 static struct g_mirror_disk *
282 g_mirror_id2disk(struct g_mirror_softc *sc, uint32_t id)
283 {
284 struct g_mirror_disk *disk;
285
286 sx_assert(&sc->sc_lock, SX_XLOCKED);
287
288 LIST_FOREACH(disk, &sc->sc_disks, d_next) {
289 if (disk->d_id == id)
290 return (disk);
291 }
292 return (NULL);
293 }
294
295 static u_int
296 g_mirror_nrequests(struct g_mirror_softc *sc, struct g_consumer *cp)
297 {
298 struct bio *bp;
299 u_int nreqs = 0;
300
301 mtx_lock(&sc->sc_queue_mtx);
302 TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) {
303 if (bp->bio_from == cp)
304 nreqs++;
305 }
306 mtx_unlock(&sc->sc_queue_mtx);
307 return (nreqs);
308 }
309
310 static int
311 g_mirror_is_busy(struct g_mirror_softc *sc, struct g_consumer *cp)
312 {
313
314 if (cp->index > 0) {
315 G_MIRROR_DEBUG(2,
316 "I/O requests for %s exist, can't destroy it now.",
317 cp->provider->name);
318 return (1);
319 }
320 if (g_mirror_nrequests(sc, cp) > 0) {
321 G_MIRROR_DEBUG(2,
322 "I/O requests for %s in queue, can't destroy it now.",
323 cp->provider->name);
324 return (1);
325 }
326 return (0);
327 }
328
329 static void
330 g_mirror_destroy_consumer(void *arg, int flags __unused)
331 {
332 struct g_consumer *cp;
333
334 g_topology_assert();
335
336 cp = arg;
337 G_MIRROR_DEBUG(1, "Consumer %s destroyed.", cp->provider->name);
338 g_detach(cp);
339 g_destroy_consumer(cp);
340 }
341
342 static void
343 g_mirror_kill_consumer(struct g_mirror_softc *sc, struct g_consumer *cp)
344 {
345 struct g_provider *pp;
346 int retaste_wait;
347
348 g_topology_assert();
349
350 cp->private = NULL;
351 if (g_mirror_is_busy(sc, cp))
352 return;
353 pp = cp->provider;
354 retaste_wait = 0;
355 if (cp->acw == 1) {
356 if ((pp->geom->flags & G_GEOM_WITHER) == 0)
357 retaste_wait = 1;
358 }
359 G_MIRROR_DEBUG(2, "Access %s r%dw%de%d = %d", pp->name, -cp->acr,
360 -cp->acw, -cp->ace, 0);
361 if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0)
362 g_access(cp, -cp->acr, -cp->acw, -cp->ace);
363 if (retaste_wait) {
364 /*
365 * After retaste event was send (inside g_access()), we can send
366 * event to detach and destroy consumer.
367 * A class, which has consumer to the given provider connected
368 * will not receive retaste event for the provider.
369 * This is the way how I ignore retaste events when I close
370 * consumers opened for write: I detach and destroy consumer
371 * after retaste event is sent.
372 */
373 g_post_event(g_mirror_destroy_consumer, cp, M_WAITOK, NULL);
374 return;
375 }
376 G_MIRROR_DEBUG(1, "Consumer %s destroyed.", pp->name);
377 g_detach(cp);
378 g_destroy_consumer(cp);
379 }
380
381 static int
382 g_mirror_connect_disk(struct g_mirror_disk *disk, struct g_provider *pp)
383 {
384 struct g_consumer *cp;
385 int error;
386
387 g_topology_assert_not();
388 KASSERT(disk->d_consumer == NULL,
389 ("Disk already connected (device %s).", disk->d_softc->sc_name));
390
391 g_topology_lock();
392 cp = g_new_consumer(disk->d_softc->sc_geom);
393 error = g_attach(cp, pp);
394 if (error != 0) {
395 g_destroy_consumer(cp);
396 g_topology_unlock();
397 return (error);
398 }
399 error = g_access(cp, 1, 1, 1);
400 if (error != 0) {
401 g_detach(cp);
402 g_destroy_consumer(cp);
403 g_topology_unlock();
404 G_MIRROR_DEBUG(0, "Cannot open consumer %s (error=%d).",
405 pp->name, error);
406 return (error);
407 }
408 g_topology_unlock();
409 disk->d_consumer = cp;
410 disk->d_consumer->private = disk;
411 disk->d_consumer->index = 0;
412
413 G_MIRROR_DEBUG(2, "Disk %s connected.", g_mirror_get_diskname(disk));
414 return (0);
415 }
416
417 static void
418 g_mirror_disconnect_consumer(struct g_mirror_softc *sc, struct g_consumer *cp)
419 {
420
421 g_topology_assert();
422
423 if (cp == NULL)
424 return;
425 if (cp->provider != NULL)
426 g_mirror_kill_consumer(sc, cp);
427 else
428 g_destroy_consumer(cp);
429 }
430
431 /*
432 * Initialize disk. This means allocate memory, create consumer, attach it
433 * to the provider and open access (r1w1e1) to it.
434 */
435 static struct g_mirror_disk *
436 g_mirror_init_disk(struct g_mirror_softc *sc, struct g_provider *pp,
437 struct g_mirror_metadata *md, int *errorp)
438 {
439 struct g_mirror_disk *disk;
440 int error;
441
442 disk = malloc(sizeof(*disk), M_MIRROR, M_NOWAIT | M_ZERO);
443 if (disk == NULL) {
444 error = ENOMEM;
445 goto fail;
446 }
447 disk->d_softc = sc;
448 error = g_mirror_connect_disk(disk, pp);
449 if (error != 0)
450 goto fail;
451 disk->d_id = md->md_did;
452 disk->d_state = G_MIRROR_DISK_STATE_NONE;
453 disk->d_priority = md->md_priority;
454 disk->d_flags = md->md_dflags;
455 if (md->md_provider[0] != '\0')
456 disk->d_flags |= G_MIRROR_DISK_FLAG_HARDCODED;
457 disk->d_sync.ds_consumer = NULL;
458 disk->d_sync.ds_offset = md->md_sync_offset;
459 disk->d_sync.ds_offset_done = md->md_sync_offset;
460 disk->d_genid = md->md_genid;
461 disk->d_sync.ds_syncid = md->md_syncid;
462 if (errorp != NULL)
463 *errorp = 0;
464 return (disk);
465 fail:
466 if (errorp != NULL)
467 *errorp = error;
468 if (disk != NULL)
469 free(disk, M_MIRROR);
470 return (NULL);
471 }
472
473 static void
474 g_mirror_destroy_disk(struct g_mirror_disk *disk)
475 {
476 struct g_mirror_softc *sc;
477
478 g_topology_assert_not();
479 sc = disk->d_softc;
480 sx_assert(&sc->sc_lock, SX_XLOCKED);
481
482 LIST_REMOVE(disk, d_next);
483 g_mirror_event_cancel(disk);
484 if (sc->sc_hint == disk)
485 sc->sc_hint = NULL;
486 switch (disk->d_state) {
487 case G_MIRROR_DISK_STATE_SYNCHRONIZING:
488 g_mirror_sync_stop(disk, 1);
489 /* FALLTHROUGH */
490 case G_MIRROR_DISK_STATE_NEW:
491 case G_MIRROR_DISK_STATE_STALE:
492 case G_MIRROR_DISK_STATE_ACTIVE:
493 g_topology_lock();
494 g_mirror_disconnect_consumer(sc, disk->d_consumer);
495 g_topology_unlock();
496 free(disk, M_MIRROR);
497 break;
498 default:
499 KASSERT(0 == 1, ("Wrong disk state (%s, %s).",
500 g_mirror_get_diskname(disk),
501 g_mirror_disk_state2str(disk->d_state)));
502 }
503 }
504
505 static void
506 g_mirror_destroy_device(struct g_mirror_softc *sc)
507 {
508 struct g_mirror_disk *disk;
509 struct g_mirror_event *ep;
510 struct g_geom *gp;
511 struct g_consumer *cp, *tmpcp;
512
513 g_topology_assert_not();
514 sx_assert(&sc->sc_lock, SX_XLOCKED);
515
516 gp = sc->sc_geom;
517 if (sc->sc_provider != NULL)
518 g_mirror_destroy_provider(sc);
519 for (disk = LIST_FIRST(&sc->sc_disks); disk != NULL;
520 disk = LIST_FIRST(&sc->sc_disks)) {
521 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
522 g_mirror_update_metadata(disk);
523 g_mirror_destroy_disk(disk);
524 }
525 while ((ep = g_mirror_event_get(sc)) != NULL) {
526 g_mirror_event_remove(sc, ep);
527 if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0)
528 g_mirror_event_free(ep);
529 else {
530 ep->e_error = ECANCELED;
531 ep->e_flags |= G_MIRROR_EVENT_DONE;
532 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, ep);
533 mtx_lock(&sc->sc_events_mtx);
534 wakeup(ep);
535 mtx_unlock(&sc->sc_events_mtx);
536 }
537 }
538 callout_drain(&sc->sc_callout);
539
540 g_topology_lock();
541 LIST_FOREACH_SAFE(cp, &sc->sc_sync.ds_geom->consumer, consumer, tmpcp) {
542 g_mirror_disconnect_consumer(sc, cp);
543 }
544 g_wither_geom(sc->sc_sync.ds_geom, ENXIO);
545 G_MIRROR_DEBUG(0, "Device %s destroyed.", gp->name);
546 g_wither_geom(gp, ENXIO);
547 g_topology_unlock();
548 mtx_destroy(&sc->sc_queue_mtx);
549 mtx_destroy(&sc->sc_events_mtx);
550 sx_xunlock(&sc->sc_lock);
551 sx_destroy(&sc->sc_lock);
552 }
553
554 static void
555 g_mirror_orphan(struct g_consumer *cp)
556 {
557 struct g_mirror_disk *disk;
558
559 g_topology_assert();
560
561 disk = cp->private;
562 if (disk == NULL)
563 return;
564 disk->d_softc->sc_bump_id |= G_MIRROR_BUMP_SYNCID;
565 g_mirror_event_send(disk, G_MIRROR_DISK_STATE_DISCONNECTED,
566 G_MIRROR_EVENT_DONTWAIT);
567 }
568
569 /*
570 * Function should return the next active disk on the list.
571 * It is possible that it will be the same disk as given.
572 * If there are no active disks on list, NULL is returned.
573 */
574 static __inline struct g_mirror_disk *
575 g_mirror_find_next(struct g_mirror_softc *sc, struct g_mirror_disk *disk)
576 {
577 struct g_mirror_disk *dp;
578
579 for (dp = LIST_NEXT(disk, d_next); dp != disk;
580 dp = LIST_NEXT(dp, d_next)) {
581 if (dp == NULL)
582 dp = LIST_FIRST(&sc->sc_disks);
583 if (dp->d_state == G_MIRROR_DISK_STATE_ACTIVE)
584 break;
585 }
586 if (dp->d_state != G_MIRROR_DISK_STATE_ACTIVE)
587 return (NULL);
588 return (dp);
589 }
590
591 static struct g_mirror_disk *
592 g_mirror_get_disk(struct g_mirror_softc *sc)
593 {
594 struct g_mirror_disk *disk;
595
596 if (sc->sc_hint == NULL) {
597 sc->sc_hint = LIST_FIRST(&sc->sc_disks);
598 if (sc->sc_hint == NULL)
599 return (NULL);
600 }
601 disk = sc->sc_hint;
602 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) {
603 disk = g_mirror_find_next(sc, disk);
604 if (disk == NULL)
605 return (NULL);
606 }
607 sc->sc_hint = g_mirror_find_next(sc, disk);
608 return (disk);
609 }
610
611 static int
612 g_mirror_write_metadata(struct g_mirror_disk *disk,
613 struct g_mirror_metadata *md)
614 {
615 struct g_mirror_softc *sc;
616 struct g_consumer *cp;
617 off_t offset, length;
618 u_char *sector;
619 int error = 0;
620
621 g_topology_assert_not();
622 sc = disk->d_softc;
623 sx_assert(&sc->sc_lock, SX_LOCKED);
624
625 cp = disk->d_consumer;
626 KASSERT(cp != NULL, ("NULL consumer (%s).", sc->sc_name));
627 KASSERT(cp->provider != NULL, ("NULL provider (%s).", sc->sc_name));
628 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
629 ("Consumer %s closed? (r%dw%de%d).", cp->provider->name, cp->acr,
630 cp->acw, cp->ace));
631 length = cp->provider->sectorsize;
632 offset = cp->provider->mediasize - length;
633 sector = malloc((size_t)length, M_MIRROR, M_WAITOK | M_ZERO);
634 if (md != NULL)
635 mirror_metadata_encode(md, sector);
636 error = g_write_data(cp, offset, sector, length);
637 free(sector, M_MIRROR);
638 if (error != 0) {
639 if ((disk->d_flags & G_MIRROR_DISK_FLAG_BROKEN) == 0) {
640 disk->d_flags |= G_MIRROR_DISK_FLAG_BROKEN;
641 G_MIRROR_DEBUG(0, "Cannot write metadata on %s "
642 "(device=%s, error=%d).",
643 g_mirror_get_diskname(disk), sc->sc_name, error);
644 } else {
645 G_MIRROR_DEBUG(1, "Cannot write metadata on %s "
646 "(device=%s, error=%d).",
647 g_mirror_get_diskname(disk), sc->sc_name, error);
648 }
649 if (g_mirror_disconnect_on_failure &&
650 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 1) {
651 sc->sc_bump_id |= G_MIRROR_BUMP_GENID;
652 g_mirror_event_send(disk,
653 G_MIRROR_DISK_STATE_DISCONNECTED,
654 G_MIRROR_EVENT_DONTWAIT);
655 }
656 }
657 return (error);
658 }
659
660 static int
661 g_mirror_clear_metadata(struct g_mirror_disk *disk)
662 {
663 int error;
664
665 g_topology_assert_not();
666 sx_assert(&disk->d_softc->sc_lock, SX_LOCKED);
667
668 error = g_mirror_write_metadata(disk, NULL);
669 if (error == 0) {
670 G_MIRROR_DEBUG(2, "Metadata on %s cleared.",
671 g_mirror_get_diskname(disk));
672 } else {
673 G_MIRROR_DEBUG(0,
674 "Cannot clear metadata on disk %s (error=%d).",
675 g_mirror_get_diskname(disk), error);
676 }
677 return (error);
678 }
679
680 void
681 g_mirror_fill_metadata(struct g_mirror_softc *sc, struct g_mirror_disk *disk,
682 struct g_mirror_metadata *md)
683 {
684
685 strlcpy(md->md_magic, G_MIRROR_MAGIC, sizeof(md->md_magic));
686 md->md_version = G_MIRROR_VERSION;
687 strlcpy(md->md_name, sc->sc_name, sizeof(md->md_name));
688 md->md_mid = sc->sc_id;
689 md->md_all = sc->sc_ndisks;
690 md->md_slice = sc->sc_slice;
691 md->md_balance = sc->sc_balance;
692 md->md_genid = sc->sc_genid;
693 md->md_mediasize = sc->sc_mediasize;
694 md->md_sectorsize = sc->sc_sectorsize;
695 md->md_mflags = (sc->sc_flags & G_MIRROR_DEVICE_FLAG_MASK);
696 bzero(md->md_provider, sizeof(md->md_provider));
697 if (disk == NULL) {
698 md->md_did = arc4random();
699 md->md_priority = 0;
700 md->md_syncid = 0;
701 md->md_dflags = 0;
702 md->md_sync_offset = 0;
703 md->md_provsize = 0;
704 } else {
705 md->md_did = disk->d_id;
706 md->md_priority = disk->d_priority;
707 md->md_syncid = disk->d_sync.ds_syncid;
708 md->md_dflags = (disk->d_flags & G_MIRROR_DISK_FLAG_MASK);
709 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING)
710 md->md_sync_offset = disk->d_sync.ds_offset_done;
711 else
712 md->md_sync_offset = 0;
713 if ((disk->d_flags & G_MIRROR_DISK_FLAG_HARDCODED) != 0) {
714 strlcpy(md->md_provider,
715 disk->d_consumer->provider->name,
716 sizeof(md->md_provider));
717 }
718 md->md_provsize = disk->d_consumer->provider->mediasize;
719 }
720 }
721
722 void
723 g_mirror_update_metadata(struct g_mirror_disk *disk)
724 {
725 struct g_mirror_softc *sc;
726 struct g_mirror_metadata md;
727 int error;
728
729 g_topology_assert_not();
730 sc = disk->d_softc;
731 sx_assert(&sc->sc_lock, SX_LOCKED);
732
733 g_mirror_fill_metadata(sc, disk, &md);
734 error = g_mirror_write_metadata(disk, &md);
735 if (error == 0) {
736 G_MIRROR_DEBUG(2, "Metadata on %s updated.",
737 g_mirror_get_diskname(disk));
738 } else {
739 G_MIRROR_DEBUG(0,
740 "Cannot update metadata on disk %s (error=%d).",
741 g_mirror_get_diskname(disk), error);
742 }
743 }
744
745 static void
746 g_mirror_bump_syncid(struct g_mirror_softc *sc)
747 {
748 struct g_mirror_disk *disk;
749
750 g_topology_assert_not();
751 sx_assert(&sc->sc_lock, SX_XLOCKED);
752 KASSERT(g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 0,
753 ("%s called with no active disks (device=%s).", __func__,
754 sc->sc_name));
755
756 sc->sc_syncid++;
757 G_MIRROR_DEBUG(1, "Device %s: syncid bumped to %u.", sc->sc_name,
758 sc->sc_syncid);
759 LIST_FOREACH(disk, &sc->sc_disks, d_next) {
760 if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE ||
761 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) {
762 disk->d_sync.ds_syncid = sc->sc_syncid;
763 g_mirror_update_metadata(disk);
764 }
765 }
766 }
767
768 static void
769 g_mirror_bump_genid(struct g_mirror_softc *sc)
770 {
771 struct g_mirror_disk *disk;
772
773 g_topology_assert_not();
774 sx_assert(&sc->sc_lock, SX_XLOCKED);
775 KASSERT(g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 0,
776 ("%s called with no active disks (device=%s).", __func__,
777 sc->sc_name));
778
779 sc->sc_genid++;
780 G_MIRROR_DEBUG(1, "Device %s: genid bumped to %u.", sc->sc_name,
781 sc->sc_genid);
782 LIST_FOREACH(disk, &sc->sc_disks, d_next) {
783 if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE ||
784 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) {
785 disk->d_genid = sc->sc_genid;
786 g_mirror_update_metadata(disk);
787 }
788 }
789 }
790
791 static int
792 g_mirror_idle(struct g_mirror_softc *sc, int acw)
793 {
794 struct g_mirror_disk *disk;
795 int timeout;
796
797 g_topology_assert_not();
798 sx_assert(&sc->sc_lock, SX_XLOCKED);
799
800 if (sc->sc_provider == NULL)
801 return (0);
802 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0)
803 return (0);
804 if (sc->sc_idle)
805 return (0);
806 if (sc->sc_writes > 0)
807 return (0);
808 if (acw > 0 || (acw == -1 && sc->sc_provider->acw > 0)) {
809 timeout = g_mirror_idletime - (time_uptime - sc->sc_last_write);
810 if (timeout > 0)
811 return (timeout);
812 }
813 sc->sc_idle = 1;
814 LIST_FOREACH(disk, &sc->sc_disks, d_next) {
815 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE)
816 continue;
817 G_MIRROR_DEBUG(1, "Disk %s (device %s) marked as clean.",
818 g_mirror_get_diskname(disk), sc->sc_name);
819 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
820 g_mirror_update_metadata(disk);
821 }
822 return (0);
823 }
824
825 static void
826 g_mirror_unidle(struct g_mirror_softc *sc)
827 {
828 struct g_mirror_disk *disk;
829
830 g_topology_assert_not();
831 sx_assert(&sc->sc_lock, SX_XLOCKED);
832
833 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0)
834 return;
835 sc->sc_idle = 0;
836 sc->sc_last_write = time_uptime;
837 LIST_FOREACH(disk, &sc->sc_disks, d_next) {
838 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE)
839 continue;
840 G_MIRROR_DEBUG(1, "Disk %s (device %s) marked as dirty.",
841 g_mirror_get_diskname(disk), sc->sc_name);
842 disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY;
843 g_mirror_update_metadata(disk);
844 }
845 }
846
847 static void
848 g_mirror_done(struct bio *bp)
849 {
850 struct g_mirror_softc *sc;
851
852 sc = bp->bio_from->geom->softc;
853 bp->bio_cflags = G_MIRROR_BIO_FLAG_REGULAR;
854 mtx_lock(&sc->sc_queue_mtx);
855 bioq_disksort(&sc->sc_queue, bp);
856 mtx_unlock(&sc->sc_queue_mtx);
857 wakeup(sc);
858 }
859
860 static void
861 g_mirror_regular_request(struct bio *bp)
862 {
863 struct g_mirror_softc *sc;
864 struct g_mirror_disk *disk;
865 struct bio *pbp;
866
867 g_topology_assert_not();
868
869 pbp = bp->bio_parent;
870 sc = pbp->bio_to->geom->softc;
871 bp->bio_from->index--;
872 if (bp->bio_cmd == BIO_WRITE)
873 sc->sc_writes--;
874 disk = bp->bio_from->private;
875 if (disk == NULL) {
876 g_topology_lock();
877 g_mirror_kill_consumer(sc, bp->bio_from);
878 g_topology_unlock();
879 }
880
881 pbp->bio_inbed++;
882 KASSERT(pbp->bio_inbed <= pbp->bio_children,
883 ("bio_inbed (%u) is bigger than bio_children (%u).", pbp->bio_inbed,
884 pbp->bio_children));
885 if (bp->bio_error == 0 && pbp->bio_error == 0) {
886 G_MIRROR_LOGREQ(3, bp, "Request delivered.");
887 g_destroy_bio(bp);
888 if (pbp->bio_children == pbp->bio_inbed) {
889 G_MIRROR_LOGREQ(3, pbp, "Request delivered.");
890 pbp->bio_completed = pbp->bio_length;
891 if (pbp->bio_cmd == BIO_WRITE) {
892 bioq_remove(&sc->sc_inflight, pbp);
893 /* Release delayed sync requests if possible. */
894 g_mirror_sync_release(sc);
895 }
896 g_io_deliver(pbp, pbp->bio_error);
897 }
898 return;
899 } else if (bp->bio_error != 0) {
900 if (pbp->bio_error == 0)
901 pbp->bio_error = bp->bio_error;
902 if (disk != NULL) {
903 if ((disk->d_flags & G_MIRROR_DISK_FLAG_BROKEN) == 0) {
904 disk->d_flags |= G_MIRROR_DISK_FLAG_BROKEN;
905 G_MIRROR_LOGREQ(0, bp,
906 "Request failed (error=%d).",
907 bp->bio_error);
908 } else {
909 G_MIRROR_LOGREQ(1, bp,
910 "Request failed (error=%d).",
911 bp->bio_error);
912 }
913 if (g_mirror_disconnect_on_failure &&
914 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 1)
915 {
916 sc->sc_bump_id |= G_MIRROR_BUMP_GENID;
917 g_mirror_event_send(disk,
918 G_MIRROR_DISK_STATE_DISCONNECTED,
919 G_MIRROR_EVENT_DONTWAIT);
920 }
921 }
922 switch (pbp->bio_cmd) {
923 case BIO_DELETE:
924 case BIO_WRITE:
925 pbp->bio_inbed--;
926 pbp->bio_children--;
927 break;
928 }
929 }
930 g_destroy_bio(bp);
931
932 switch (pbp->bio_cmd) {
933 case BIO_READ:
934 if (pbp->bio_inbed < pbp->bio_children)
935 break;
936 if (g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) == 1)
937 g_io_deliver(pbp, pbp->bio_error);
938 else {
939 pbp->bio_error = 0;
940 mtx_lock(&sc->sc_queue_mtx);
941 bioq_disksort(&sc->sc_queue, pbp);
942 mtx_unlock(&sc->sc_queue_mtx);
943 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc);
944 wakeup(sc);
945 }
946 break;
947 case BIO_DELETE:
948 case BIO_WRITE:
949 if (pbp->bio_children == 0) {
950 /*
951 * All requests failed.
952 */
953 } else if (pbp->bio_inbed < pbp->bio_children) {
954 /* Do nothing. */
955 break;
956 } else if (pbp->bio_children == pbp->bio_inbed) {
957 /* Some requests succeeded. */
958 pbp->bio_error = 0;
959 pbp->bio_completed = pbp->bio_length;
960 }
961 bioq_remove(&sc->sc_inflight, pbp);
962 /* Release delayed sync requests if possible. */
963 g_mirror_sync_release(sc);
964 g_io_deliver(pbp, pbp->bio_error);
965 break;
966 default:
967 KASSERT(1 == 0, ("Invalid request: %u.", pbp->bio_cmd));
968 break;
969 }
970 }
971
972 static void
973 g_mirror_sync_done(struct bio *bp)
974 {
975 struct g_mirror_softc *sc;
976
977 G_MIRROR_LOGREQ(3, bp, "Synchronization request delivered.");
978 sc = bp->bio_from->geom->softc;
979 bp->bio_cflags = G_MIRROR_BIO_FLAG_SYNC;
980 mtx_lock(&sc->sc_queue_mtx);
981 bioq_disksort(&sc->sc_queue, bp);
982 mtx_unlock(&sc->sc_queue_mtx);
983 wakeup(sc);
984 }
985
986 static void
987 g_mirror_kernel_dump(struct bio *bp)
988 {
989 struct g_mirror_softc *sc;
990 struct g_mirror_disk *disk;
991 struct bio *cbp;
992 struct g_kerneldump *gkd;
993
994 /*
995 * We configure dumping to the first component, because this component
996 * will be used for reading with 'prefer' balance algorithm.
997 * If the component with the higest priority is currently disconnected
998 * we will not be able to read the dump after the reboot if it will be
999 * connected and synchronized later. Can we do something better?
1000 */
1001 sc = bp->bio_to->geom->softc;
1002 disk = LIST_FIRST(&sc->sc_disks);
1003
1004 gkd = (struct g_kerneldump *)bp->bio_data;
1005 if (gkd->length > bp->bio_to->mediasize)
1006 gkd->length = bp->bio_to->mediasize;
1007 cbp = g_clone_bio(bp);
1008 if (cbp == NULL) {
1009 g_io_deliver(bp, ENOMEM);
1010 return;
1011 }
1012 cbp->bio_done = g_std_done;
1013 g_io_request(cbp, disk->d_consumer);
1014 G_MIRROR_DEBUG(1, "Kernel dump will go to %s.",
1015 g_mirror_get_diskname(disk));
1016 }
1017
1018 static void
1019 g_mirror_flush(struct g_mirror_softc *sc, struct bio *bp)
1020 {
1021 struct bio_queue_head queue;
1022 struct g_mirror_disk *disk;
1023 struct g_consumer *cp;
1024 struct bio *cbp;
1025
1026 bioq_init(&queue);
1027 LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1028 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE)
1029 continue;
1030 cbp = g_clone_bio(bp);
1031 if (cbp == NULL) {
1032 for (cbp = bioq_first(&queue); cbp != NULL;
1033 cbp = bioq_first(&queue)) {
1034 bioq_remove(&queue, cbp);
1035 g_destroy_bio(cbp);
1036 }
1037 if (bp->bio_error == 0)
1038 bp->bio_error = ENOMEM;
1039 g_io_deliver(bp, bp->bio_error);
1040 return;
1041 }
1042 bioq_insert_tail(&queue, cbp);
1043 cbp->bio_done = g_std_done;
1044 cbp->bio_caller1 = disk;
1045 cbp->bio_to = disk->d_consumer->provider;
1046 }
1047 for (cbp = bioq_first(&queue); cbp != NULL; cbp = bioq_first(&queue)) {
1048 bioq_remove(&queue, cbp);
1049 G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1050 disk = cbp->bio_caller1;
1051 cbp->bio_caller1 = NULL;
1052 cp = disk->d_consumer;
1053 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1054 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
1055 cp->acr, cp->acw, cp->ace));
1056 g_io_request(cbp, disk->d_consumer);
1057 }
1058 }
1059
1060 static void
1061 g_mirror_start(struct bio *bp)
1062 {
1063 struct g_mirror_softc *sc;
1064
1065 sc = bp->bio_to->geom->softc;
1066 /*
1067 * If sc == NULL or there are no valid disks, provider's error
1068 * should be set and g_mirror_start() should not be called at all.
1069 */
1070 KASSERT(sc != NULL && sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
1071 ("Provider's error should be set (error=%d)(mirror=%s).",
1072 bp->bio_to->error, bp->bio_to->name));
1073 G_MIRROR_LOGREQ(3, bp, "Request received.");
1074
1075 switch (bp->bio_cmd) {
1076 case BIO_READ:
1077 case BIO_WRITE:
1078 case BIO_DELETE:
1079 break;
1080 case BIO_FLUSH:
1081 g_mirror_flush(sc, bp);
1082 return;
1083 case BIO_GETATTR:
1084 if (strcmp("GEOM::kerneldump", bp->bio_attribute) == 0) {
1085 g_mirror_kernel_dump(bp);
1086 return;
1087 }
1088 /* FALLTHROUGH */
1089 default:
1090 g_io_deliver(bp, EOPNOTSUPP);
1091 return;
1092 }
1093 mtx_lock(&sc->sc_queue_mtx);
1094 bioq_disksort(&sc->sc_queue, bp);
1095 mtx_unlock(&sc->sc_queue_mtx);
1096 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc);
1097 wakeup(sc);
1098 }
1099
1100 /*
1101 * Return TRUE if the given request is colliding with a in-progress
1102 * synchronization request.
1103 */
1104 static int
1105 g_mirror_sync_collision(struct g_mirror_softc *sc, struct bio *bp)
1106 {
1107 struct g_mirror_disk *disk;
1108 struct bio *sbp;
1109 off_t rstart, rend, sstart, send;
1110 int i;
1111
1112 if (sc->sc_sync.ds_ndisks == 0)
1113 return (0);
1114 rstart = bp->bio_offset;
1115 rend = bp->bio_offset + bp->bio_length;
1116 LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1117 if (disk->d_state != G_MIRROR_DISK_STATE_SYNCHRONIZING)
1118 continue;
1119 for (i = 0; i < g_mirror_syncreqs; i++) {
1120 sbp = disk->d_sync.ds_bios[i];
1121 if (sbp == NULL)
1122 continue;
1123 sstart = sbp->bio_offset;
1124 send = sbp->bio_offset + sbp->bio_length;
1125 if (rend > sstart && rstart < send)
1126 return (1);
1127 }
1128 }
1129 return (0);
1130 }
1131
1132 /*
1133 * Return TRUE if the given sync request is colliding with a in-progress regular
1134 * request.
1135 */
1136 static int
1137 g_mirror_regular_collision(struct g_mirror_softc *sc, struct bio *sbp)
1138 {
1139 off_t rstart, rend, sstart, send;
1140 struct bio *bp;
1141
1142 if (sc->sc_sync.ds_ndisks == 0)
1143 return (0);
1144 sstart = sbp->bio_offset;
1145 send = sbp->bio_offset + sbp->bio_length;
1146 TAILQ_FOREACH(bp, &sc->sc_inflight.queue, bio_queue) {
1147 rstart = bp->bio_offset;
1148 rend = bp->bio_offset + bp->bio_length;
1149 if (rend > sstart && rstart < send)
1150 return (1);
1151 }
1152 return (0);
1153 }
1154
1155 /*
1156 * Puts request onto delayed queue.
1157 */
1158 static void
1159 g_mirror_regular_delay(struct g_mirror_softc *sc, struct bio *bp)
1160 {
1161
1162 G_MIRROR_LOGREQ(2, bp, "Delaying request.");
1163 bioq_insert_head(&sc->sc_regular_delayed, bp);
1164 }
1165
1166 /*
1167 * Puts synchronization request onto delayed queue.
1168 */
1169 static void
1170 g_mirror_sync_delay(struct g_mirror_softc *sc, struct bio *bp)
1171 {
1172
1173 G_MIRROR_LOGREQ(2, bp, "Delaying synchronization request.");
1174 bioq_insert_tail(&sc->sc_sync_delayed, bp);
1175 }
1176
1177 /*
1178 * Releases delayed regular requests which don't collide anymore with sync
1179 * requests.
1180 */
1181 static void
1182 g_mirror_regular_release(struct g_mirror_softc *sc)
1183 {
1184 struct bio *bp, *bp2;
1185
1186 TAILQ_FOREACH_SAFE(bp, &sc->sc_regular_delayed.queue, bio_queue, bp2) {
1187 if (g_mirror_sync_collision(sc, bp))
1188 continue;
1189 bioq_remove(&sc->sc_regular_delayed, bp);
1190 G_MIRROR_LOGREQ(2, bp, "Releasing delayed request (%p).", bp);
1191 mtx_lock(&sc->sc_queue_mtx);
1192 bioq_insert_head(&sc->sc_queue, bp);
1193 #if 0
1194 /*
1195 * wakeup() is not needed, because this function is called from
1196 * the worker thread.
1197 */
1198 wakeup(&sc->sc_queue);
1199 #endif
1200 mtx_unlock(&sc->sc_queue_mtx);
1201 }
1202 }
1203
1204 /*
1205 * Releases delayed sync requests which don't collide anymore with regular
1206 * requests.
1207 */
1208 static void
1209 g_mirror_sync_release(struct g_mirror_softc *sc)
1210 {
1211 struct bio *bp, *bp2;
1212
1213 TAILQ_FOREACH_SAFE(bp, &sc->sc_sync_delayed.queue, bio_queue, bp2) {
1214 if (g_mirror_regular_collision(sc, bp))
1215 continue;
1216 bioq_remove(&sc->sc_sync_delayed, bp);
1217 G_MIRROR_LOGREQ(2, bp,
1218 "Releasing delayed synchronization request.");
1219 g_io_request(bp, bp->bio_from);
1220 }
1221 }
1222
1223 /*
1224 * Handle synchronization requests.
1225 * Every synchronization request is two-steps process: first, READ request is
1226 * send to active provider and then WRITE request (with read data) to the provider
1227 * beeing synchronized. When WRITE is finished, new synchronization request is
1228 * send.
1229 */
1230 static void
1231 g_mirror_sync_request(struct bio *bp)
1232 {
1233 struct g_mirror_softc *sc;
1234 struct g_mirror_disk *disk;
1235
1236 bp->bio_from->index--;
1237 sc = bp->bio_from->geom->softc;
1238 disk = bp->bio_from->private;
1239 if (disk == NULL) {
1240 sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */
1241 g_topology_lock();
1242 g_mirror_kill_consumer(sc, bp->bio_from);
1243 g_topology_unlock();
1244 free(bp->bio_data, M_MIRROR);
1245 g_destroy_bio(bp);
1246 sx_xlock(&sc->sc_lock);
1247 return;
1248 }
1249
1250 /*
1251 * Synchronization request.
1252 */
1253 switch (bp->bio_cmd) {
1254 case BIO_READ:
1255 {
1256 struct g_consumer *cp;
1257
1258 if (bp->bio_error != 0) {
1259 G_MIRROR_LOGREQ(0, bp,
1260 "Synchronization request failed (error=%d).",
1261 bp->bio_error);
1262 g_destroy_bio(bp);
1263 return;
1264 }
1265 G_MIRROR_LOGREQ(3, bp,
1266 "Synchronization request half-finished.");
1267 bp->bio_cmd = BIO_WRITE;
1268 bp->bio_cflags = 0;
1269 cp = disk->d_consumer;
1270 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1271 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
1272 cp->acr, cp->acw, cp->ace));
1273 cp->index++;
1274 g_io_request(bp, cp);
1275 return;
1276 }
1277 case BIO_WRITE:
1278 {
1279 struct g_mirror_disk_sync *sync;
1280 off_t offset;
1281 void *data;
1282 int i;
1283
1284 if (bp->bio_error != 0) {
1285 G_MIRROR_LOGREQ(0, bp,
1286 "Synchronization request failed (error=%d).",
1287 bp->bio_error);
1288 g_destroy_bio(bp);
1289 sc->sc_bump_id |= G_MIRROR_BUMP_GENID;
1290 g_mirror_event_send(disk,
1291 G_MIRROR_DISK_STATE_DISCONNECTED,
1292 G_MIRROR_EVENT_DONTWAIT);
1293 return;
1294 }
1295 G_MIRROR_LOGREQ(3, bp, "Synchronization request finished.");
1296 sync = &disk->d_sync;
1297 if (sync->ds_offset == sc->sc_mediasize ||
1298 sync->ds_consumer == NULL ||
1299 (sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) {
1300 /* Don't send more synchronization requests. */
1301 sync->ds_inflight--;
1302 if (sync->ds_bios != NULL) {
1303 i = (int)(uintptr_t)bp->bio_caller1;
1304 sync->ds_bios[i] = NULL;
1305 }
1306 free(bp->bio_data, M_MIRROR);
1307 g_destroy_bio(bp);
1308 if (sync->ds_inflight > 0)
1309 return;
1310 if (sync->ds_consumer == NULL ||
1311 (sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) {
1312 return;
1313 }
1314 /* Disk up-to-date, activate it. */
1315 g_mirror_event_send(disk, G_MIRROR_DISK_STATE_ACTIVE,
1316 G_MIRROR_EVENT_DONTWAIT);
1317 return;
1318 }
1319
1320 /* Send next synchronization request. */
1321 data = bp->bio_data;
1322 bzero(bp, sizeof(*bp));
1323 bp->bio_cmd = BIO_READ;
1324 bp->bio_offset = sync->ds_offset;
1325 bp->bio_length = MIN(MAXPHYS, sc->sc_mediasize - bp->bio_offset);
1326 sync->ds_offset += bp->bio_length;
1327 bp->bio_done = g_mirror_sync_done;
1328 bp->bio_data = data;
1329 bp->bio_from = sync->ds_consumer;
1330 bp->bio_to = sc->sc_provider;
1331 G_MIRROR_LOGREQ(3, bp, "Sending synchronization request.");
1332 sync->ds_consumer->index++;
1333 /*
1334 * Delay the request if it is colliding with a regular request.
1335 */
1336 if (g_mirror_regular_collision(sc, bp))
1337 g_mirror_sync_delay(sc, bp);
1338 else
1339 g_io_request(bp, sync->ds_consumer);
1340
1341 /* Release delayed requests if possible. */
1342 g_mirror_regular_release(sc);
1343
1344 /* Find the smallest offset */
1345 offset = sc->sc_mediasize;
1346 for (i = 0; i < g_mirror_syncreqs; i++) {
1347 bp = sync->ds_bios[i];
1348 if (bp->bio_offset < offset)
1349 offset = bp->bio_offset;
1350 }
1351 if (sync->ds_offset_done + (MAXPHYS * 100) < offset) {
1352 /* Update offset_done on every 100 blocks. */
1353 sync->ds_offset_done = offset;
1354 g_mirror_update_metadata(disk);
1355 }
1356 return;
1357 }
1358 default:
1359 KASSERT(1 == 0, ("Invalid command here: %u (device=%s)",
1360 bp->bio_cmd, sc->sc_name));
1361 break;
1362 }
1363 }
1364
1365 static void
1366 g_mirror_request_prefer(struct g_mirror_softc *sc, struct bio *bp)
1367 {
1368 struct g_mirror_disk *disk;
1369 struct g_consumer *cp;
1370 struct bio *cbp;
1371
1372 LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1373 if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE)
1374 break;
1375 }
1376 if (disk == NULL) {
1377 if (bp->bio_error == 0)
1378 bp->bio_error = ENXIO;
1379 g_io_deliver(bp, bp->bio_error);
1380 return;
1381 }
1382 cbp = g_clone_bio(bp);
1383 if (cbp == NULL) {
1384 if (bp->bio_error == 0)
1385 bp->bio_error = ENOMEM;
1386 g_io_deliver(bp, bp->bio_error);
1387 return;
1388 }
1389 /*
1390 * Fill in the component buf structure.
1391 */
1392 cp = disk->d_consumer;
1393 cbp->bio_done = g_mirror_done;
1394 cbp->bio_to = cp->provider;
1395 G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1396 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1397 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr,
1398 cp->acw, cp->ace));
1399 cp->index++;
1400 g_io_request(cbp, cp);
1401 }
1402
1403 static void
1404 g_mirror_request_round_robin(struct g_mirror_softc *sc, struct bio *bp)
1405 {
1406 struct g_mirror_disk *disk;
1407 struct g_consumer *cp;
1408 struct bio *cbp;
1409
1410 disk = g_mirror_get_disk(sc);
1411 if (disk == NULL) {
1412 if (bp->bio_error == 0)
1413 bp->bio_error = ENXIO;
1414 g_io_deliver(bp, bp->bio_error);
1415 return;
1416 }
1417 cbp = g_clone_bio(bp);
1418 if (cbp == NULL) {
1419 if (bp->bio_error == 0)
1420 bp->bio_error = ENOMEM;
1421 g_io_deliver(bp, bp->bio_error);
1422 return;
1423 }
1424 /*
1425 * Fill in the component buf structure.
1426 */
1427 cp = disk->d_consumer;
1428 cbp->bio_done = g_mirror_done;
1429 cbp->bio_to = cp->provider;
1430 G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1431 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1432 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr,
1433 cp->acw, cp->ace));
1434 cp->index++;
1435 g_io_request(cbp, cp);
1436 }
1437
1438 #define TRACK_SIZE (1 * 1024 * 1024)
1439 #define LOAD_SCALE 256
1440 #define ABS(x) (((x) >= 0) ? (x) : (-(x)))
1441
1442 static void
1443 g_mirror_request_load(struct g_mirror_softc *sc, struct bio *bp)
1444 {
1445 struct g_mirror_disk *disk, *dp;
1446 struct g_consumer *cp;
1447 struct bio *cbp;
1448 int prio, best;
1449
1450 /* Find a disk with the smallest load. */
1451 disk = NULL;
1452 best = INT_MAX;
1453 LIST_FOREACH(dp, &sc->sc_disks, d_next) {
1454 if (dp->d_state != G_MIRROR_DISK_STATE_ACTIVE)
1455 continue;
1456 prio = dp->load;
1457 /* If disk head is precisely in position - highly prefer it. */
1458 if (dp->d_last_offset == bp->bio_offset)
1459 prio -= 2 * LOAD_SCALE;
1460 else
1461 /* If disk head is close to position - prefer it. */
1462 if (ABS(dp->d_last_offset - bp->bio_offset) < TRACK_SIZE)
1463 prio -= 1 * LOAD_SCALE;
1464 if (prio <= best) {
1465 disk = dp;
1466 best = prio;
1467 }
1468 }
1469 KASSERT(disk != NULL, ("NULL disk for %s.", sc->sc_name));
1470 cbp = g_clone_bio(bp);
1471 if (cbp == NULL) {
1472 if (bp->bio_error == 0)
1473 bp->bio_error = ENOMEM;
1474 g_io_deliver(bp, bp->bio_error);
1475 return;
1476 }
1477 /*
1478 * Fill in the component buf structure.
1479 */
1480 cp = disk->d_consumer;
1481 cbp->bio_done = g_mirror_done;
1482 cbp->bio_to = cp->provider;
1483 G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1484 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1485 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr,
1486 cp->acw, cp->ace));
1487 cp->index++;
1488 /* Remember last head position */
1489 disk->d_last_offset = bp->bio_offset + bp->bio_length;
1490 /* Update loads. */
1491 LIST_FOREACH(dp, &sc->sc_disks, d_next) {
1492 dp->load = (dp->d_consumer->index * LOAD_SCALE +
1493 dp->load * 7) / 8;
1494 }
1495 g_io_request(cbp, cp);
1496 }
1497
1498 static void
1499 g_mirror_request_split(struct g_mirror_softc *sc, struct bio *bp)
1500 {
1501 struct bio_queue_head queue;
1502 struct g_mirror_disk *disk;
1503 struct g_consumer *cp;
1504 struct bio *cbp;
1505 off_t left, mod, offset, slice;
1506 u_char *data;
1507 u_int ndisks;
1508
1509 if (bp->bio_length <= sc->sc_slice) {
1510 g_mirror_request_round_robin(sc, bp);
1511 return;
1512 }
1513 ndisks = g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE);
1514 slice = bp->bio_length / ndisks;
1515 mod = slice % sc->sc_provider->sectorsize;
1516 if (mod != 0)
1517 slice += sc->sc_provider->sectorsize - mod;
1518 /*
1519 * Allocate all bios before sending any request, so we can
1520 * return ENOMEM in nice and clean way.
1521 */
1522 left = bp->bio_length;
1523 offset = bp->bio_offset;
1524 data = bp->bio_data;
1525 bioq_init(&queue);
1526 LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1527 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE)
1528 continue;
1529 cbp = g_clone_bio(bp);
1530 if (cbp == NULL) {
1531 for (cbp = bioq_first(&queue); cbp != NULL;
1532 cbp = bioq_first(&queue)) {
1533 bioq_remove(&queue, cbp);
1534 g_destroy_bio(cbp);
1535 }
1536 if (bp->bio_error == 0)
1537 bp->bio_error = ENOMEM;
1538 g_io_deliver(bp, bp->bio_error);
1539 return;
1540 }
1541 bioq_insert_tail(&queue, cbp);
1542 cbp->bio_done = g_mirror_done;
1543 cbp->bio_caller1 = disk;
1544 cbp->bio_to = disk->d_consumer->provider;
1545 cbp->bio_offset = offset;
1546 cbp->bio_data = data;
1547 cbp->bio_length = MIN(left, slice);
1548 left -= cbp->bio_length;
1549 if (left == 0)
1550 break;
1551 offset += cbp->bio_length;
1552 data += cbp->bio_length;
1553 }
1554 for (cbp = bioq_first(&queue); cbp != NULL; cbp = bioq_first(&queue)) {
1555 bioq_remove(&queue, cbp);
1556 G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1557 disk = cbp->bio_caller1;
1558 cbp->bio_caller1 = NULL;
1559 cp = disk->d_consumer;
1560 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1561 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
1562 cp->acr, cp->acw, cp->ace));
1563 disk->d_consumer->index++;
1564 g_io_request(cbp, disk->d_consumer);
1565 }
1566 }
1567
1568 static void
1569 g_mirror_register_request(struct bio *bp)
1570 {
1571 struct g_mirror_softc *sc;
1572
1573 sc = bp->bio_to->geom->softc;
1574 switch (bp->bio_cmd) {
1575 case BIO_READ:
1576 switch (sc->sc_balance) {
1577 case G_MIRROR_BALANCE_LOAD:
1578 g_mirror_request_load(sc, bp);
1579 break;
1580 case G_MIRROR_BALANCE_PREFER:
1581 g_mirror_request_prefer(sc, bp);
1582 break;
1583 case G_MIRROR_BALANCE_ROUND_ROBIN:
1584 g_mirror_request_round_robin(sc, bp);
1585 break;
1586 case G_MIRROR_BALANCE_SPLIT:
1587 g_mirror_request_split(sc, bp);
1588 break;
1589 }
1590 return;
1591 case BIO_WRITE:
1592 case BIO_DELETE:
1593 {
1594 struct g_mirror_disk *disk;
1595 struct g_mirror_disk_sync *sync;
1596 struct bio_queue_head queue;
1597 struct g_consumer *cp;
1598 struct bio *cbp;
1599
1600 /*
1601 * Delay the request if it is colliding with a synchronization
1602 * request.
1603 */
1604 if (g_mirror_sync_collision(sc, bp)) {
1605 g_mirror_regular_delay(sc, bp);
1606 return;
1607 }
1608
1609 if (sc->sc_idle)
1610 g_mirror_unidle(sc);
1611 else
1612 sc->sc_last_write = time_uptime;
1613
1614 /*
1615 * Allocate all bios before sending any request, so we can
1616 * return ENOMEM in nice and clean way.
1617 */
1618 bioq_init(&queue);
1619 LIST_FOREACH(disk, &sc->sc_disks, d_next) {
1620 sync = &disk->d_sync;
1621 switch (disk->d_state) {
1622 case G_MIRROR_DISK_STATE_ACTIVE:
1623 break;
1624 case G_MIRROR_DISK_STATE_SYNCHRONIZING:
1625 if (bp->bio_offset >= sync->ds_offset)
1626 continue;
1627 break;
1628 default:
1629 continue;
1630 }
1631 cbp = g_clone_bio(bp);
1632 if (cbp == NULL) {
1633 for (cbp = bioq_first(&queue); cbp != NULL;
1634 cbp = bioq_first(&queue)) {
1635 bioq_remove(&queue, cbp);
1636 g_destroy_bio(cbp);
1637 }
1638 if (bp->bio_error == 0)
1639 bp->bio_error = ENOMEM;
1640 g_io_deliver(bp, bp->bio_error);
1641 return;
1642 }
1643 bioq_insert_tail(&queue, cbp);
1644 cbp->bio_done = g_mirror_done;
1645 cp = disk->d_consumer;
1646 cbp->bio_caller1 = cp;
1647 cbp->bio_to = cp->provider;
1648 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1649 ("Consumer %s not opened (r%dw%de%d).",
1650 cp->provider->name, cp->acr, cp->acw, cp->ace));
1651 }
1652 for (cbp = bioq_first(&queue); cbp != NULL;
1653 cbp = bioq_first(&queue)) {
1654 bioq_remove(&queue, cbp);
1655 G_MIRROR_LOGREQ(3, cbp, "Sending request.");
1656 cp = cbp->bio_caller1;
1657 cbp->bio_caller1 = NULL;
1658 cp->index++;
1659 sc->sc_writes++;
1660 g_io_request(cbp, cp);
1661 }
1662 /*
1663 * Put request onto inflight queue, so we can check if new
1664 * synchronization requests don't collide with it.
1665 */
1666 bioq_insert_tail(&sc->sc_inflight, bp);
1667 /*
1668 * Bump syncid on first write.
1669 */
1670 if ((sc->sc_bump_id & G_MIRROR_BUMP_SYNCID) != 0) {
1671 sc->sc_bump_id &= ~G_MIRROR_BUMP_SYNCID;
1672 g_mirror_bump_syncid(sc);
1673 }
1674 return;
1675 }
1676 default:
1677 KASSERT(1 == 0, ("Invalid command here: %u (device=%s)",
1678 bp->bio_cmd, sc->sc_name));
1679 break;
1680 }
1681 }
1682
1683 static int
1684 g_mirror_can_destroy(struct g_mirror_softc *sc)
1685 {
1686 struct g_geom *gp;
1687 struct g_consumer *cp;
1688
1689 g_topology_assert();
1690 gp = sc->sc_geom;
1691 if (gp->softc == NULL)
1692 return (1);
1693 LIST_FOREACH(cp, &gp->consumer, consumer) {
1694 if (g_mirror_is_busy(sc, cp))
1695 return (0);
1696 }
1697 gp = sc->sc_sync.ds_geom;
1698 LIST_FOREACH(cp, &gp->consumer, consumer) {
1699 if (g_mirror_is_busy(sc, cp))
1700 return (0);
1701 }
1702 G_MIRROR_DEBUG(2, "No I/O requests for %s, it can be destroyed.",
1703 sc->sc_name);
1704 return (1);
1705 }
1706
1707 static int
1708 g_mirror_try_destroy(struct g_mirror_softc *sc)
1709 {
1710
1711 if (sc->sc_rootmount != NULL) {
1712 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", __LINE__,
1713 sc->sc_rootmount);
1714 root_mount_rel(sc->sc_rootmount);
1715 sc->sc_rootmount = NULL;
1716 }
1717 g_topology_lock();
1718 if (!g_mirror_can_destroy(sc)) {
1719 g_topology_unlock();
1720 return (0);
1721 }
1722 sc->sc_geom->softc = NULL;
1723 sc->sc_sync.ds_geom->softc = NULL;
1724 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_WAIT) != 0) {
1725 g_topology_unlock();
1726 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__,
1727 &sc->sc_worker);
1728 /* Unlock sc_lock here, as it can be destroyed after wakeup. */
1729 sx_xunlock(&sc->sc_lock);
1730 wakeup(&sc->sc_worker);
1731 sc->sc_worker = NULL;
1732 } else {
1733 g_topology_unlock();
1734 g_mirror_destroy_device(sc);
1735 free(sc, M_MIRROR);
1736 }
1737 return (1);
1738 }
1739
1740 /*
1741 * Worker thread.
1742 */
1743 static void
1744 g_mirror_worker(void *arg)
1745 {
1746 struct g_mirror_softc *sc;
1747 struct g_mirror_event *ep;
1748 struct bio *bp;
1749 int timeout;
1750
1751 sc = arg;
1752 thread_lock(curthread);
1753 sched_prio(curthread, PRIBIO);
1754 thread_unlock(curthread);
1755
1756 sx_xlock(&sc->sc_lock);
1757 for (;;) {
1758 G_MIRROR_DEBUG(5, "%s: Let's see...", __func__);
1759 /*
1760 * First take a look at events.
1761 * This is important to handle events before any I/O requests.
1762 */
1763 ep = g_mirror_event_get(sc);
1764 if (ep != NULL) {
1765 g_mirror_event_remove(sc, ep);
1766 if ((ep->e_flags & G_MIRROR_EVENT_DEVICE) != 0) {
1767 /* Update only device status. */
1768 G_MIRROR_DEBUG(3,
1769 "Running event for device %s.",
1770 sc->sc_name);
1771 ep->e_error = 0;
1772 g_mirror_update_device(sc, 1);
1773 } else {
1774 /* Update disk status. */
1775 G_MIRROR_DEBUG(3, "Running event for disk %s.",
1776 g_mirror_get_diskname(ep->e_disk));
1777 ep->e_error = g_mirror_update_disk(ep->e_disk,
1778 ep->e_state);
1779 if (ep->e_error == 0)
1780 g_mirror_update_device(sc, 0);
1781 }
1782 if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0) {
1783 KASSERT(ep->e_error == 0,
1784 ("Error cannot be handled."));
1785 g_mirror_event_free(ep);
1786 } else {
1787 ep->e_flags |= G_MIRROR_EVENT_DONE;
1788 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__,
1789 ep);
1790 mtx_lock(&sc->sc_events_mtx);
1791 wakeup(ep);
1792 mtx_unlock(&sc->sc_events_mtx);
1793 }
1794 if ((sc->sc_flags &
1795 G_MIRROR_DEVICE_FLAG_DESTROY) != 0) {
1796 if (g_mirror_try_destroy(sc)) {
1797 curthread->td_pflags &= ~TDP_GEOM;
1798 G_MIRROR_DEBUG(1, "Thread exiting.");
1799 kproc_exit(0);
1800 }
1801 }
1802 G_MIRROR_DEBUG(5, "%s: I'm here 1.", __func__);
1803 continue;
1804 }
1805 /*
1806 * Check if we can mark array as CLEAN and if we can't take
1807 * how much seconds should we wait.
1808 */
1809 timeout = g_mirror_idle(sc, -1);
1810 /*
1811 * Now I/O requests.
1812 */
1813 /* Get first request from the queue. */
1814 mtx_lock(&sc->sc_queue_mtx);
1815 bp = bioq_first(&sc->sc_queue);
1816 if (bp == NULL) {
1817 if ((sc->sc_flags &
1818 G_MIRROR_DEVICE_FLAG_DESTROY) != 0) {
1819 mtx_unlock(&sc->sc_queue_mtx);
1820 if (g_mirror_try_destroy(sc)) {
1821 curthread->td_pflags &= ~TDP_GEOM;
1822 G_MIRROR_DEBUG(1, "Thread exiting.");
1823 kproc_exit(0);
1824 }
1825 mtx_lock(&sc->sc_queue_mtx);
1826 }
1827 sx_xunlock(&sc->sc_lock);
1828 /*
1829 * XXX: We can miss an event here, because an event
1830 * can be added without sx-device-lock and without
1831 * mtx-queue-lock. Maybe I should just stop using
1832 * dedicated mutex for events synchronization and
1833 * stick with the queue lock?
1834 * The event will hang here until next I/O request
1835 * or next event is received.
1836 */
1837 MSLEEP(sc, &sc->sc_queue_mtx, PRIBIO | PDROP, "m:w1",
1838 timeout * hz);
1839 sx_xlock(&sc->sc_lock);
1840 G_MIRROR_DEBUG(5, "%s: I'm here 4.", __func__);
1841 continue;
1842 }
1843 bioq_remove(&sc->sc_queue, bp);
1844 mtx_unlock(&sc->sc_queue_mtx);
1845
1846 if (bp->bio_from->geom == sc->sc_sync.ds_geom &&
1847 (bp->bio_cflags & G_MIRROR_BIO_FLAG_SYNC) != 0) {
1848 g_mirror_sync_request(bp); /* READ */
1849 } else if (bp->bio_to != sc->sc_provider) {
1850 if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_REGULAR) != 0)
1851 g_mirror_regular_request(bp);
1852 else if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_SYNC) != 0)
1853 g_mirror_sync_request(bp); /* WRITE */
1854 else {
1855 KASSERT(0,
1856 ("Invalid request cflags=0x%hhx to=%s.",
1857 bp->bio_cflags, bp->bio_to->name));
1858 }
1859 } else {
1860 g_mirror_register_request(bp);
1861 }
1862 G_MIRROR_DEBUG(5, "%s: I'm here 9.", __func__);
1863 }
1864 }
1865
1866 static void
1867 g_mirror_update_idle(struct g_mirror_softc *sc, struct g_mirror_disk *disk)
1868 {
1869
1870 sx_assert(&sc->sc_lock, SX_LOCKED);
1871
1872 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0)
1873 return;
1874 if (!sc->sc_idle && (disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) == 0) {
1875 G_MIRROR_DEBUG(1, "Disk %s (device %s) marked as dirty.",
1876 g_mirror_get_diskname(disk), sc->sc_name);
1877 disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY;
1878 } else if (sc->sc_idle &&
1879 (disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) != 0) {
1880 G_MIRROR_DEBUG(1, "Disk %s (device %s) marked as clean.",
1881 g_mirror_get_diskname(disk), sc->sc_name);
1882 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
1883 }
1884 }
1885
1886 static void
1887 g_mirror_sync_start(struct g_mirror_disk *disk)
1888 {
1889 struct g_mirror_softc *sc;
1890 struct g_consumer *cp;
1891 struct bio *bp;
1892 int error, i;
1893
1894 g_topology_assert_not();
1895 sc = disk->d_softc;
1896 sx_assert(&sc->sc_lock, SX_LOCKED);
1897
1898 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING,
1899 ("Disk %s is not marked for synchronization.",
1900 g_mirror_get_diskname(disk)));
1901 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
1902 ("Device not in RUNNING state (%s, %u).", sc->sc_name,
1903 sc->sc_state));
1904
1905 sx_xunlock(&sc->sc_lock);
1906 g_topology_lock();
1907 cp = g_new_consumer(sc->sc_sync.ds_geom);
1908 error = g_attach(cp, sc->sc_provider);
1909 KASSERT(error == 0,
1910 ("Cannot attach to %s (error=%d).", sc->sc_name, error));
1911 error = g_access(cp, 1, 0, 0);
1912 KASSERT(error == 0, ("Cannot open %s (error=%d).", sc->sc_name, error));
1913 g_topology_unlock();
1914 sx_xlock(&sc->sc_lock);
1915
1916 G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s.", sc->sc_name,
1917 g_mirror_get_diskname(disk));
1918 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) == 0)
1919 disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY;
1920 KASSERT(disk->d_sync.ds_consumer == NULL,
1921 ("Sync consumer already exists (device=%s, disk=%s).",
1922 sc->sc_name, g_mirror_get_diskname(disk)));
1923
1924 disk->d_sync.ds_consumer = cp;
1925 disk->d_sync.ds_consumer->private = disk;
1926 disk->d_sync.ds_consumer->index = 0;
1927
1928 /*
1929 * Allocate memory for synchronization bios and initialize them.
1930 */
1931 disk->d_sync.ds_bios = malloc(sizeof(struct bio *) * g_mirror_syncreqs,
1932 M_MIRROR, M_WAITOK);
1933 for (i = 0; i < g_mirror_syncreqs; i++) {
1934 bp = g_alloc_bio();
1935 disk->d_sync.ds_bios[i] = bp;
1936 bp->bio_parent = NULL;
1937 bp->bio_cmd = BIO_READ;
1938 bp->bio_data = malloc(MAXPHYS, M_MIRROR, M_WAITOK);
1939 bp->bio_cflags = 0;
1940 bp->bio_offset = disk->d_sync.ds_offset;
1941 bp->bio_length = MIN(MAXPHYS, sc->sc_mediasize - bp->bio_offset);
1942 disk->d_sync.ds_offset += bp->bio_length;
1943 bp->bio_done = g_mirror_sync_done;
1944 bp->bio_from = disk->d_sync.ds_consumer;
1945 bp->bio_to = sc->sc_provider;
1946 bp->bio_caller1 = (void *)(uintptr_t)i;
1947 }
1948
1949 /* Increase the number of disks in SYNCHRONIZING state. */
1950 sc->sc_sync.ds_ndisks++;
1951 /* Set the number of in-flight synchronization requests. */
1952 disk->d_sync.ds_inflight = g_mirror_syncreqs;
1953
1954 /*
1955 * Fire off first synchronization requests.
1956 */
1957 for (i = 0; i < g_mirror_syncreqs; i++) {
1958 bp = disk->d_sync.ds_bios[i];
1959 G_MIRROR_LOGREQ(3, bp, "Sending synchronization request.");
1960 disk->d_sync.ds_consumer->index++;
1961 /*
1962 * Delay the request if it is colliding with a regular request.
1963 */
1964 if (g_mirror_regular_collision(sc, bp))
1965 g_mirror_sync_delay(sc, bp);
1966 else
1967 g_io_request(bp, disk->d_sync.ds_consumer);
1968 }
1969 }
1970
1971 /*
1972 * Stop synchronization process.
1973 * type: 0 - synchronization finished
1974 * 1 - synchronization stopped
1975 */
1976 static void
1977 g_mirror_sync_stop(struct g_mirror_disk *disk, int type)
1978 {
1979 struct g_mirror_softc *sc;
1980 struct g_consumer *cp;
1981
1982 g_topology_assert_not();
1983 sc = disk->d_softc;
1984 sx_assert(&sc->sc_lock, SX_LOCKED);
1985
1986 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING,
1987 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk),
1988 g_mirror_disk_state2str(disk->d_state)));
1989 if (disk->d_sync.ds_consumer == NULL)
1990 return;
1991
1992 if (type == 0) {
1993 G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s finished.",
1994 sc->sc_name, g_mirror_get_diskname(disk));
1995 } else /* if (type == 1) */ {
1996 G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s stopped.",
1997 sc->sc_name, g_mirror_get_diskname(disk));
1998 }
1999 free(disk->d_sync.ds_bios, M_MIRROR);
2000 disk->d_sync.ds_bios = NULL;
2001 cp = disk->d_sync.ds_consumer;
2002 disk->d_sync.ds_consumer = NULL;
2003 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
2004 sc->sc_sync.ds_ndisks--;
2005 sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */
2006 g_topology_lock();
2007 g_mirror_kill_consumer(sc, cp);
2008 g_topology_unlock();
2009 sx_xlock(&sc->sc_lock);
2010 }
2011
2012 static void
2013 g_mirror_launch_provider(struct g_mirror_softc *sc)
2014 {
2015 struct g_mirror_disk *disk;
2016 struct g_provider *pp;
2017
2018 sx_assert(&sc->sc_lock, SX_LOCKED);
2019
2020 g_topology_lock();
2021 pp = g_new_providerf(sc->sc_geom, "mirror/%s", sc->sc_name);
2022 pp->mediasize = sc->sc_mediasize;
2023 pp->sectorsize = sc->sc_sectorsize;
2024 pp->stripesize = 0;
2025 pp->stripeoffset = 0;
2026 LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2027 if (disk->d_consumer && disk->d_consumer->provider &&
2028 disk->d_consumer->provider->stripesize > pp->stripesize) {
2029 pp->stripesize = disk->d_consumer->provider->stripesize;
2030 pp->stripeoffset = disk->d_consumer->provider->stripeoffset;
2031 }
2032 }
2033 sc->sc_provider = pp;
2034 g_error_provider(pp, 0);
2035 g_topology_unlock();
2036 G_MIRROR_DEBUG(0, "Device %s launched (%u/%u).", pp->name,
2037 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE), sc->sc_ndisks);
2038 LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2039 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING)
2040 g_mirror_sync_start(disk);
2041 }
2042 }
2043
2044 static void
2045 g_mirror_destroy_provider(struct g_mirror_softc *sc)
2046 {
2047 struct g_mirror_disk *disk;
2048 struct bio *bp;
2049
2050 g_topology_assert_not();
2051 KASSERT(sc->sc_provider != NULL, ("NULL provider (device=%s).",
2052 sc->sc_name));
2053
2054 g_topology_lock();
2055 g_error_provider(sc->sc_provider, ENXIO);
2056 mtx_lock(&sc->sc_queue_mtx);
2057 while ((bp = bioq_first(&sc->sc_queue)) != NULL) {
2058 bioq_remove(&sc->sc_queue, bp);
2059 g_io_deliver(bp, ENXIO);
2060 }
2061 mtx_unlock(&sc->sc_queue_mtx);
2062 G_MIRROR_DEBUG(0, "Device %s: provider %s destroyed.", sc->sc_name,
2063 sc->sc_provider->name);
2064 sc->sc_provider->flags |= G_PF_WITHER;
2065 g_orphan_provider(sc->sc_provider, ENXIO);
2066 g_topology_unlock();
2067 sc->sc_provider = NULL;
2068 LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2069 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING)
2070 g_mirror_sync_stop(disk, 1);
2071 }
2072 }
2073
2074 static void
2075 g_mirror_go(void *arg)
2076 {
2077 struct g_mirror_softc *sc;
2078
2079 sc = arg;
2080 G_MIRROR_DEBUG(0, "Force device %s start due to timeout.", sc->sc_name);
2081 g_mirror_event_send(sc, 0,
2082 G_MIRROR_EVENT_DONTWAIT | G_MIRROR_EVENT_DEVICE);
2083 }
2084
2085 static u_int
2086 g_mirror_determine_state(struct g_mirror_disk *disk)
2087 {
2088 struct g_mirror_softc *sc;
2089 u_int state;
2090
2091 sc = disk->d_softc;
2092 if (sc->sc_syncid == disk->d_sync.ds_syncid) {
2093 if ((disk->d_flags &
2094 G_MIRROR_DISK_FLAG_SYNCHRONIZING) == 0) {
2095 /* Disk does not need synchronization. */
2096 state = G_MIRROR_DISK_STATE_ACTIVE;
2097 } else {
2098 if ((sc->sc_flags &
2099 G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) == 0 ||
2100 (disk->d_flags &
2101 G_MIRROR_DISK_FLAG_FORCE_SYNC) != 0) {
2102 /*
2103 * We can start synchronization from
2104 * the stored offset.
2105 */
2106 state = G_MIRROR_DISK_STATE_SYNCHRONIZING;
2107 } else {
2108 state = G_MIRROR_DISK_STATE_STALE;
2109 }
2110 }
2111 } else if (disk->d_sync.ds_syncid < sc->sc_syncid) {
2112 /*
2113 * Reset all synchronization data for this disk,
2114 * because if it even was synchronized, it was
2115 * synchronized to disks with different syncid.
2116 */
2117 disk->d_flags |= G_MIRROR_DISK_FLAG_SYNCHRONIZING;
2118 disk->d_sync.ds_offset = 0;
2119 disk->d_sync.ds_offset_done = 0;
2120 disk->d_sync.ds_syncid = sc->sc_syncid;
2121 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) == 0 ||
2122 (disk->d_flags & G_MIRROR_DISK_FLAG_FORCE_SYNC) != 0) {
2123 state = G_MIRROR_DISK_STATE_SYNCHRONIZING;
2124 } else {
2125 state = G_MIRROR_DISK_STATE_STALE;
2126 }
2127 } else /* if (sc->sc_syncid < disk->d_sync.ds_syncid) */ {
2128 /*
2129 * Not good, NOT GOOD!
2130 * It means that mirror was started on stale disks
2131 * and more fresh disk just arrive.
2132 * If there were writes, mirror is broken, sorry.
2133 * I think the best choice here is don't touch
2134 * this disk and inform the user loudly.
2135 */
2136 G_MIRROR_DEBUG(0, "Device %s was started before the freshest "
2137 "disk (%s) arrives!! It will not be connected to the "
2138 "running device.", sc->sc_name,
2139 g_mirror_get_diskname(disk));
2140 g_mirror_destroy_disk(disk);
2141 state = G_MIRROR_DISK_STATE_NONE;
2142 /* Return immediately, because disk was destroyed. */
2143 return (state);
2144 }
2145 G_MIRROR_DEBUG(3, "State for %s disk: %s.",
2146 g_mirror_get_diskname(disk), g_mirror_disk_state2str(state));
2147 return (state);
2148 }
2149
2150 /*
2151 * Update device state.
2152 */
2153 static void
2154 g_mirror_update_device(struct g_mirror_softc *sc, boolean_t force)
2155 {
2156 struct g_mirror_disk *disk;
2157 u_int state;
2158
2159 sx_assert(&sc->sc_lock, SX_XLOCKED);
2160
2161 switch (sc->sc_state) {
2162 case G_MIRROR_DEVICE_STATE_STARTING:
2163 {
2164 struct g_mirror_disk *pdisk, *tdisk;
2165 u_int dirty, ndisks, genid, syncid;
2166
2167 KASSERT(sc->sc_provider == NULL,
2168 ("Non-NULL provider in STARTING state (%s).", sc->sc_name));
2169 /*
2170 * Are we ready? We are, if all disks are connected or
2171 * if we have any disks and 'force' is true.
2172 */
2173 ndisks = g_mirror_ndisks(sc, -1);
2174 if (sc->sc_ndisks == ndisks || (force && ndisks > 0)) {
2175 ;
2176 } else if (ndisks == 0) {
2177 /*
2178 * Disks went down in starting phase, so destroy
2179 * device.
2180 */
2181 callout_drain(&sc->sc_callout);
2182 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY;
2183 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", __LINE__,
2184 sc->sc_rootmount);
2185 root_mount_rel(sc->sc_rootmount);
2186 sc->sc_rootmount = NULL;
2187 return;
2188 } else {
2189 return;
2190 }
2191
2192 /*
2193 * Activate all disks with the biggest syncid.
2194 */
2195 if (force) {
2196 /*
2197 * If 'force' is true, we have been called due to
2198 * timeout, so don't bother canceling timeout.
2199 */
2200 ndisks = 0;
2201 LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2202 if ((disk->d_flags &
2203 G_MIRROR_DISK_FLAG_SYNCHRONIZING) == 0) {
2204 ndisks++;
2205 }
2206 }
2207 if (ndisks == 0) {
2208 /* No valid disks found, destroy device. */
2209 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY;
2210 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p",
2211 __LINE__, sc->sc_rootmount);
2212 root_mount_rel(sc->sc_rootmount);
2213 sc->sc_rootmount = NULL;
2214 return;
2215 }
2216 } else {
2217 /* Cancel timeout. */
2218 callout_drain(&sc->sc_callout);
2219 }
2220
2221 /*
2222 * Find the biggest genid.
2223 */
2224 genid = 0;
2225 LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2226 if (disk->d_genid > genid)
2227 genid = disk->d_genid;
2228 }
2229 sc->sc_genid = genid;
2230 /*
2231 * Remove all disks without the biggest genid.
2232 */
2233 LIST_FOREACH_SAFE(disk, &sc->sc_disks, d_next, tdisk) {
2234 if (disk->d_genid < genid) {
2235 G_MIRROR_DEBUG(0,
2236 "Component %s (device %s) broken, skipping.",
2237 g_mirror_get_diskname(disk), sc->sc_name);
2238 g_mirror_destroy_disk(disk);
2239 }
2240 }
2241
2242 /*
2243 * Find the biggest syncid.
2244 */
2245 syncid = 0;
2246 LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2247 if (disk->d_sync.ds_syncid > syncid)
2248 syncid = disk->d_sync.ds_syncid;
2249 }
2250
2251 /*
2252 * Here we need to look for dirty disks and if all disks
2253 * with the biggest syncid are dirty, we have to choose
2254 * one with the biggest priority and rebuild the rest.
2255 */
2256 /*
2257 * Find the number of dirty disks with the biggest syncid.
2258 * Find the number of disks with the biggest syncid.
2259 * While here, find a disk with the biggest priority.
2260 */
2261 dirty = ndisks = 0;
2262 pdisk = NULL;
2263 LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2264 if (disk->d_sync.ds_syncid != syncid)
2265 continue;
2266 if ((disk->d_flags &
2267 G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) {
2268 continue;
2269 }
2270 ndisks++;
2271 if ((disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) != 0) {
2272 dirty++;
2273 if (pdisk == NULL ||
2274 pdisk->d_priority < disk->d_priority) {
2275 pdisk = disk;
2276 }
2277 }
2278 }
2279 if (dirty == 0) {
2280 /* No dirty disks at all, great. */
2281 } else if (dirty == ndisks) {
2282 /*
2283 * Force synchronization for all dirty disks except one
2284 * with the biggest priority.
2285 */
2286 KASSERT(pdisk != NULL, ("pdisk == NULL"));
2287 G_MIRROR_DEBUG(1, "Using disk %s (device %s) as a "
2288 "master disk for synchronization.",
2289 g_mirror_get_diskname(pdisk), sc->sc_name);
2290 LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2291 if (disk->d_sync.ds_syncid != syncid)
2292 continue;
2293 if ((disk->d_flags &
2294 G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) {
2295 continue;
2296 }
2297 KASSERT((disk->d_flags &
2298 G_MIRROR_DISK_FLAG_DIRTY) != 0,
2299 ("Disk %s isn't marked as dirty.",
2300 g_mirror_get_diskname(disk)));
2301 /* Skip the disk with the biggest priority. */
2302 if (disk == pdisk)
2303 continue;
2304 disk->d_sync.ds_syncid = 0;
2305 }
2306 } else if (dirty < ndisks) {
2307 /*
2308 * Force synchronization for all dirty disks.
2309 * We have some non-dirty disks.
2310 */
2311 LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2312 if (disk->d_sync.ds_syncid != syncid)
2313 continue;
2314 if ((disk->d_flags &
2315 G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) {
2316 continue;
2317 }
2318 if ((disk->d_flags &
2319 G_MIRROR_DISK_FLAG_DIRTY) == 0) {
2320 continue;
2321 }
2322 disk->d_sync.ds_syncid = 0;
2323 }
2324 }
2325
2326 /* Reset hint. */
2327 sc->sc_hint = NULL;
2328 sc->sc_syncid = syncid;
2329 if (force) {
2330 /* Remember to bump syncid on first write. */
2331 sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID;
2332 }
2333 state = G_MIRROR_DEVICE_STATE_RUNNING;
2334 G_MIRROR_DEBUG(1, "Device %s state changed from %s to %s.",
2335 sc->sc_name, g_mirror_device_state2str(sc->sc_state),
2336 g_mirror_device_state2str(state));
2337 sc->sc_state = state;
2338 LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2339 state = g_mirror_determine_state(disk);
2340 g_mirror_event_send(disk, state,
2341 G_MIRROR_EVENT_DONTWAIT);
2342 if (state == G_MIRROR_DISK_STATE_STALE)
2343 sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID;
2344 }
2345 break;
2346 }
2347 case G_MIRROR_DEVICE_STATE_RUNNING:
2348 if (g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) == 0 &&
2349 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_NEW) == 0) {
2350 /*
2351 * No active disks or no disks at all,
2352 * so destroy device.
2353 */
2354 if (sc->sc_provider != NULL)
2355 g_mirror_destroy_provider(sc);
2356 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY;
2357 break;
2358 } else if (g_mirror_ndisks(sc,
2359 G_MIRROR_DISK_STATE_ACTIVE) > 0 &&
2360 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_NEW) == 0) {
2361 /*
2362 * We have active disks, launch provider if it doesn't
2363 * exist.
2364 */
2365 if (sc->sc_provider == NULL)
2366 g_mirror_launch_provider(sc);
2367 if (sc->sc_rootmount != NULL) {
2368 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p",
2369 __LINE__, sc->sc_rootmount);
2370 root_mount_rel(sc->sc_rootmount);
2371 sc->sc_rootmount = NULL;
2372 }
2373 }
2374 /*
2375 * Genid should be bumped immediately, so do it here.
2376 */
2377 if ((sc->sc_bump_id & G_MIRROR_BUMP_GENID) != 0) {
2378 sc->sc_bump_id &= ~G_MIRROR_BUMP_GENID;
2379 g_mirror_bump_genid(sc);
2380 }
2381 break;
2382 default:
2383 KASSERT(1 == 0, ("Wrong device state (%s, %s).",
2384 sc->sc_name, g_mirror_device_state2str(sc->sc_state)));
2385 break;
2386 }
2387 }
2388
2389 /*
2390 * Update disk state and device state if needed.
2391 */
2392 #define DISK_STATE_CHANGED() G_MIRROR_DEBUG(1, \
2393 "Disk %s state changed from %s to %s (device %s).", \
2394 g_mirror_get_diskname(disk), \
2395 g_mirror_disk_state2str(disk->d_state), \
2396 g_mirror_disk_state2str(state), sc->sc_name)
2397 static int
2398 g_mirror_update_disk(struct g_mirror_disk *disk, u_int state)
2399 {
2400 struct g_mirror_softc *sc;
2401
2402 sc = disk->d_softc;
2403 sx_assert(&sc->sc_lock, SX_XLOCKED);
2404
2405 again:
2406 G_MIRROR_DEBUG(3, "Changing disk %s state from %s to %s.",
2407 g_mirror_get_diskname(disk), g_mirror_disk_state2str(disk->d_state),
2408 g_mirror_disk_state2str(state));
2409 switch (state) {
2410 case G_MIRROR_DISK_STATE_NEW:
2411 /*
2412 * Possible scenarios:
2413 * 1. New disk arrive.
2414 */
2415 /* Previous state should be NONE. */
2416 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NONE,
2417 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk),
2418 g_mirror_disk_state2str(disk->d_state)));
2419 DISK_STATE_CHANGED();
2420
2421 disk->d_state = state;
2422 if (LIST_EMPTY(&sc->sc_disks))
2423 LIST_INSERT_HEAD(&sc->sc_disks, disk, d_next);
2424 else {
2425 struct g_mirror_disk *dp;
2426
2427 LIST_FOREACH(dp, &sc->sc_disks, d_next) {
2428 if (disk->d_priority >= dp->d_priority) {
2429 LIST_INSERT_BEFORE(dp, disk, d_next);
2430 dp = NULL;
2431 break;
2432 }
2433 if (LIST_NEXT(dp, d_next) == NULL)
2434 break;
2435 }
2436 if (dp != NULL)
2437 LIST_INSERT_AFTER(dp, disk, d_next);
2438 }
2439 G_MIRROR_DEBUG(1, "Device %s: provider %s detected.",
2440 sc->sc_name, g_mirror_get_diskname(disk));
2441 if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING)
2442 break;
2443 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
2444 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2445 g_mirror_device_state2str(sc->sc_state),
2446 g_mirror_get_diskname(disk),
2447 g_mirror_disk_state2str(disk->d_state)));
2448 state = g_mirror_determine_state(disk);
2449 if (state != G_MIRROR_DISK_STATE_NONE)
2450 goto again;
2451 break;
2452 case G_MIRROR_DISK_STATE_ACTIVE:
2453 /*
2454 * Possible scenarios:
2455 * 1. New disk does not need synchronization.
2456 * 2. Synchronization process finished successfully.
2457 */
2458 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
2459 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2460 g_mirror_device_state2str(sc->sc_state),
2461 g_mirror_get_diskname(disk),
2462 g_mirror_disk_state2str(disk->d_state)));
2463 /* Previous state should be NEW or SYNCHRONIZING. */
2464 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW ||
2465 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING,
2466 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk),
2467 g_mirror_disk_state2str(disk->d_state)));
2468 DISK_STATE_CHANGED();
2469
2470 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) {
2471 disk->d_flags &= ~G_MIRROR_DISK_FLAG_SYNCHRONIZING;
2472 disk->d_flags &= ~G_MIRROR_DISK_FLAG_FORCE_SYNC;
2473 g_mirror_sync_stop(disk, 0);
2474 }
2475 disk->d_state = state;
2476 disk->d_sync.ds_offset = 0;
2477 disk->d_sync.ds_offset_done = 0;
2478 g_mirror_update_idle(sc, disk);
2479 g_mirror_update_metadata(disk);
2480 G_MIRROR_DEBUG(1, "Device %s: provider %s activated.",
2481 sc->sc_name, g_mirror_get_diskname(disk));
2482 break;
2483 case G_MIRROR_DISK_STATE_STALE:
2484 /*
2485 * Possible scenarios:
2486 * 1. Stale disk was connected.
2487 */
2488 /* Previous state should be NEW. */
2489 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW,
2490 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk),
2491 g_mirror_disk_state2str(disk->d_state)));
2492 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
2493 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2494 g_mirror_device_state2str(sc->sc_state),
2495 g_mirror_get_diskname(disk),
2496 g_mirror_disk_state2str(disk->d_state)));
2497 /*
2498 * STALE state is only possible if device is marked
2499 * NOAUTOSYNC.
2500 */
2501 KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) != 0,
2502 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2503 g_mirror_device_state2str(sc->sc_state),
2504 g_mirror_get_diskname(disk),
2505 g_mirror_disk_state2str(disk->d_state)));
2506 DISK_STATE_CHANGED();
2507
2508 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
2509 disk->d_state = state;
2510 g_mirror_update_metadata(disk);
2511 G_MIRROR_DEBUG(0, "Device %s: provider %s is stale.",
2512 sc->sc_name, g_mirror_get_diskname(disk));
2513 break;
2514 case G_MIRROR_DISK_STATE_SYNCHRONIZING:
2515 /*
2516 * Possible scenarios:
2517 * 1. Disk which needs synchronization was connected.
2518 */
2519 /* Previous state should be NEW. */
2520 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW,
2521 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk),
2522 g_mirror_disk_state2str(disk->d_state)));
2523 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING,
2524 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2525 g_mirror_device_state2str(sc->sc_state),
2526 g_mirror_get_diskname(disk),
2527 g_mirror_disk_state2str(disk->d_state)));
2528 DISK_STATE_CHANGED();
2529
2530 if (disk->d_state == G_MIRROR_DISK_STATE_NEW)
2531 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY;
2532 disk->d_state = state;
2533 if (sc->sc_provider != NULL) {
2534 g_mirror_sync_start(disk);
2535 g_mirror_update_metadata(disk);
2536 }
2537 break;
2538 case G_MIRROR_DISK_STATE_DISCONNECTED:
2539 /*
2540 * Possible scenarios:
2541 * 1. Device wasn't running yet, but disk disappear.
2542 * 2. Disk was active and disapppear.
2543 * 3. Disk disappear during synchronization process.
2544 */
2545 if (sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING) {
2546 /*
2547 * Previous state should be ACTIVE, STALE or
2548 * SYNCHRONIZING.
2549 */
2550 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_ACTIVE ||
2551 disk->d_state == G_MIRROR_DISK_STATE_STALE ||
2552 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING,
2553 ("Wrong disk state (%s, %s).",
2554 g_mirror_get_diskname(disk),
2555 g_mirror_disk_state2str(disk->d_state)));
2556 } else if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING) {
2557 /* Previous state should be NEW. */
2558 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW,
2559 ("Wrong disk state (%s, %s).",
2560 g_mirror_get_diskname(disk),
2561 g_mirror_disk_state2str(disk->d_state)));
2562 /*
2563 * Reset bumping syncid if disk disappeared in STARTING
2564 * state.
2565 */
2566 if ((sc->sc_bump_id & G_MIRROR_BUMP_SYNCID) != 0)
2567 sc->sc_bump_id &= ~G_MIRROR_BUMP_SYNCID;
2568 #ifdef INVARIANTS
2569 } else {
2570 KASSERT(1 == 0, ("Wrong device state (%s, %s, %s, %s).",
2571 sc->sc_name,
2572 g_mirror_device_state2str(sc->sc_state),
2573 g_mirror_get_diskname(disk),
2574 g_mirror_disk_state2str(disk->d_state)));
2575 #endif
2576 }
2577 DISK_STATE_CHANGED();
2578 G_MIRROR_DEBUG(0, "Device %s: provider %s disconnected.",
2579 sc->sc_name, g_mirror_get_diskname(disk));
2580
2581 g_mirror_destroy_disk(disk);
2582 break;
2583 case G_MIRROR_DISK_STATE_DESTROY:
2584 {
2585 int error;
2586
2587 error = g_mirror_clear_metadata(disk);
2588 if (error != 0)
2589 return (error);
2590 DISK_STATE_CHANGED();
2591 G_MIRROR_DEBUG(0, "Device %s: provider %s destroyed.",
2592 sc->sc_name, g_mirror_get_diskname(disk));
2593
2594 g_mirror_destroy_disk(disk);
2595 sc->sc_ndisks--;
2596 LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2597 g_mirror_update_metadata(disk);
2598 }
2599 break;
2600 }
2601 default:
2602 KASSERT(1 == 0, ("Unknown state (%u).", state));
2603 break;
2604 }
2605 return (0);
2606 }
2607 #undef DISK_STATE_CHANGED
2608
2609 int
2610 g_mirror_read_metadata(struct g_consumer *cp, struct g_mirror_metadata *md)
2611 {
2612 struct g_provider *pp;
2613 u_char *buf;
2614 int error;
2615
2616 g_topology_assert();
2617
2618 error = g_access(cp, 1, 0, 0);
2619 if (error != 0)
2620 return (error);
2621 pp = cp->provider;
2622 g_topology_unlock();
2623 /* Metadata are stored on last sector. */
2624 buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
2625 &error);
2626 g_topology_lock();
2627 g_access(cp, -1, 0, 0);
2628 if (buf == NULL) {
2629 G_MIRROR_DEBUG(1, "Cannot read metadata from %s (error=%d).",
2630 cp->provider->name, error);
2631 return (error);
2632 }
2633
2634 /* Decode metadata. */
2635 error = mirror_metadata_decode(buf, md);
2636 g_free(buf);
2637 if (strcmp(md->md_magic, G_MIRROR_MAGIC) != 0)
2638 return (EINVAL);
2639 if (md->md_version > G_MIRROR_VERSION) {
2640 G_MIRROR_DEBUG(0,
2641 "Kernel module is too old to handle metadata from %s.",
2642 cp->provider->name);
2643 return (EINVAL);
2644 }
2645 if (error != 0) {
2646 G_MIRROR_DEBUG(1, "MD5 metadata hash mismatch for provider %s.",
2647 cp->provider->name);
2648 return (error);
2649 }
2650
2651 return (0);
2652 }
2653
2654 static int
2655 g_mirror_check_metadata(struct g_mirror_softc *sc, struct g_provider *pp,
2656 struct g_mirror_metadata *md)
2657 {
2658
2659 if (g_mirror_id2disk(sc, md->md_did) != NULL) {
2660 G_MIRROR_DEBUG(1, "Disk %s (id=%u) already exists, skipping.",
2661 pp->name, md->md_did);
2662 return (EEXIST);
2663 }
2664 if (md->md_all != sc->sc_ndisks) {
2665 G_MIRROR_DEBUG(1,
2666 "Invalid '%s' field on disk %s (device %s), skipping.",
2667 "md_all", pp->name, sc->sc_name);
2668 return (EINVAL);
2669 }
2670 if (md->md_slice != sc->sc_slice) {
2671 G_MIRROR_DEBUG(1,
2672 "Invalid '%s' field on disk %s (device %s), skipping.",
2673 "md_slice", pp->name, sc->sc_name);
2674 return (EINVAL);
2675 }
2676 if (md->md_balance != sc->sc_balance) {
2677 G_MIRROR_DEBUG(1,
2678 "Invalid '%s' field on disk %s (device %s), skipping.",
2679 "md_balance", pp->name, sc->sc_name);
2680 return (EINVAL);
2681 }
2682 if (md->md_mediasize != sc->sc_mediasize) {
2683 G_MIRROR_DEBUG(1,
2684 "Invalid '%s' field on disk %s (device %s), skipping.",
2685 "md_mediasize", pp->name, sc->sc_name);
2686 return (EINVAL);
2687 }
2688 if (sc->sc_mediasize > pp->mediasize) {
2689 G_MIRROR_DEBUG(1,
2690 "Invalid size of disk %s (device %s), skipping.", pp->name,
2691 sc->sc_name);
2692 return (EINVAL);
2693 }
2694 if (md->md_sectorsize != sc->sc_sectorsize) {
2695 G_MIRROR_DEBUG(1,
2696 "Invalid '%s' field on disk %s (device %s), skipping.",
2697 "md_sectorsize", pp->name, sc->sc_name);
2698 return (EINVAL);
2699 }
2700 if ((sc->sc_sectorsize % pp->sectorsize) != 0) {
2701 G_MIRROR_DEBUG(1,
2702 "Invalid sector size of disk %s (device %s), skipping.",
2703 pp->name, sc->sc_name);
2704 return (EINVAL);
2705 }
2706 if ((md->md_mflags & ~G_MIRROR_DEVICE_FLAG_MASK) != 0) {
2707 G_MIRROR_DEBUG(1,
2708 "Invalid device flags on disk %s (device %s), skipping.",
2709 pp->name, sc->sc_name);
2710 return (EINVAL);
2711 }
2712 if ((md->md_dflags & ~G_MIRROR_DISK_FLAG_MASK) != 0) {
2713 G_MIRROR_DEBUG(1,
2714 "Invalid disk flags on disk %s (device %s), skipping.",
2715 pp->name, sc->sc_name);
2716 return (EINVAL);
2717 }
2718 return (0);
2719 }
2720
2721 int
2722 g_mirror_add_disk(struct g_mirror_softc *sc, struct g_provider *pp,
2723 struct g_mirror_metadata *md)
2724 {
2725 struct g_mirror_disk *disk;
2726 int error;
2727
2728 g_topology_assert_not();
2729 G_MIRROR_DEBUG(2, "Adding disk %s.", pp->name);
2730
2731 error = g_mirror_check_metadata(sc, pp, md);
2732 if (error != 0)
2733 return (error);
2734 if (sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING &&
2735 md->md_genid < sc->sc_genid) {
2736 G_MIRROR_DEBUG(0, "Component %s (device %s) broken, skipping.",
2737 pp->name, sc->sc_name);
2738 return (EINVAL);
2739 }
2740 disk = g_mirror_init_disk(sc, pp, md, &error);
2741 if (disk == NULL)
2742 return (error);
2743 error = g_mirror_event_send(disk, G_MIRROR_DISK_STATE_NEW,
2744 G_MIRROR_EVENT_WAIT);
2745 if (error != 0)
2746 return (error);
2747 if (md->md_version < G_MIRROR_VERSION) {
2748 G_MIRROR_DEBUG(0, "Upgrading metadata on %s (v%d->v%d).",
2749 pp->name, md->md_version, G_MIRROR_VERSION);
2750 g_mirror_update_metadata(disk);
2751 }
2752 return (0);
2753 }
2754
2755 static void
2756 g_mirror_destroy_delayed(void *arg, int flag)
2757 {
2758 struct g_mirror_softc *sc;
2759 int error;
2760
2761 if (flag == EV_CANCEL) {
2762 G_MIRROR_DEBUG(1, "Destroying canceled.");
2763 return;
2764 }
2765 sc = arg;
2766 g_topology_unlock();
2767 sx_xlock(&sc->sc_lock);
2768 KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) == 0,
2769 ("DESTROY flag set on %s.", sc->sc_name));
2770 KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROYING) != 0,
2771 ("DESTROYING flag not set on %s.", sc->sc_name));
2772 G_MIRROR_DEBUG(1, "Destroying %s (delayed).", sc->sc_name);
2773 error = g_mirror_destroy(sc, G_MIRROR_DESTROY_SOFT);
2774 if (error != 0) {
2775 G_MIRROR_DEBUG(0, "Cannot destroy %s.", sc->sc_name);
2776 sx_xunlock(&sc->sc_lock);
2777 }
2778 g_topology_lock();
2779 }
2780
2781 static int
2782 g_mirror_access(struct g_provider *pp, int acr, int acw, int ace)
2783 {
2784 struct g_mirror_softc *sc;
2785 int dcr, dcw, dce, error = 0;
2786
2787 g_topology_assert();
2788 G_MIRROR_DEBUG(2, "Access request for %s: r%dw%de%d.", pp->name, acr,
2789 acw, ace);
2790
2791 sc = pp->geom->softc;
2792 if (sc == NULL && acr <= 0 && acw <= 0 && ace <= 0)
2793 return (0);
2794 KASSERT(sc != NULL, ("NULL softc (provider=%s).", pp->name));
2795
2796 dcr = pp->acr + acr;
2797 dcw = pp->acw + acw;
2798 dce = pp->ace + ace;
2799
2800 g_topology_unlock();
2801 sx_xlock(&sc->sc_lock);
2802 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0 ||
2803 LIST_EMPTY(&sc->sc_disks)) {
2804 if (acr > 0 || acw > 0 || ace > 0)
2805 error = ENXIO;
2806 goto end;
2807 }
2808 if (dcw == 0 && !sc->sc_idle)
2809 g_mirror_idle(sc, dcw);
2810 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROYING) != 0) {
2811 if (acr > 0 || acw > 0 || ace > 0) {
2812 error = ENXIO;
2813 goto end;
2814 }
2815 if (dcr == 0 && dcw == 0 && dce == 0) {
2816 g_post_event(g_mirror_destroy_delayed, sc, M_WAITOK,
2817 sc, NULL);
2818 }
2819 }
2820 end:
2821 sx_xunlock(&sc->sc_lock);
2822 g_topology_lock();
2823 return (error);
2824 }
2825
2826 static struct g_geom *
2827 g_mirror_create(struct g_class *mp, const struct g_mirror_metadata *md)
2828 {
2829 struct g_mirror_softc *sc;
2830 struct g_geom *gp;
2831 int error, timeout;
2832
2833 g_topology_assert();
2834 G_MIRROR_DEBUG(1, "Creating device %s (id=%u).", md->md_name,
2835 md->md_mid);
2836
2837 /* One disk is minimum. */
2838 if (md->md_all < 1)
2839 return (NULL);
2840 /*
2841 * Action geom.
2842 */
2843 gp = g_new_geomf(mp, "%s", md->md_name);
2844 sc = malloc(sizeof(*sc), M_MIRROR, M_WAITOK | M_ZERO);
2845 gp->start = g_mirror_start;
2846 gp->orphan = g_mirror_orphan;
2847 gp->access = g_mirror_access;
2848 gp->dumpconf = g_mirror_dumpconf;
2849
2850 sc->sc_id = md->md_mid;
2851 sc->sc_slice = md->md_slice;
2852 sc->sc_balance = md->md_balance;
2853 sc->sc_mediasize = md->md_mediasize;
2854 sc->sc_sectorsize = md->md_sectorsize;
2855 sc->sc_ndisks = md->md_all;
2856 sc->sc_flags = md->md_mflags;
2857 sc->sc_bump_id = 0;
2858 sc->sc_idle = 1;
2859 sc->sc_last_write = time_uptime;
2860 sc->sc_writes = 0;
2861 sx_init(&sc->sc_lock, "gmirror:lock");
2862 bioq_init(&sc->sc_queue);
2863 mtx_init(&sc->sc_queue_mtx, "gmirror:queue", NULL, MTX_DEF);
2864 bioq_init(&sc->sc_regular_delayed);
2865 bioq_init(&sc->sc_inflight);
2866 bioq_init(&sc->sc_sync_delayed);
2867 LIST_INIT(&sc->sc_disks);
2868 TAILQ_INIT(&sc->sc_events);
2869 mtx_init(&sc->sc_events_mtx, "gmirror:events", NULL, MTX_DEF);
2870 callout_init(&sc->sc_callout, CALLOUT_MPSAFE);
2871 sc->sc_state = G_MIRROR_DEVICE_STATE_STARTING;
2872 gp->softc = sc;
2873 sc->sc_geom = gp;
2874 sc->sc_provider = NULL;
2875 /*
2876 * Synchronization geom.
2877 */
2878 gp = g_new_geomf(mp, "%s.sync", md->md_name);
2879 gp->softc = sc;
2880 gp->orphan = g_mirror_orphan;
2881 sc->sc_sync.ds_geom = gp;
2882 sc->sc_sync.ds_ndisks = 0;
2883 error = kproc_create(g_mirror_worker, sc, &sc->sc_worker, 0, 0,
2884 "g_mirror %s", md->md_name);
2885 if (error != 0) {
2886 G_MIRROR_DEBUG(1, "Cannot create kernel thread for %s.",
2887 sc->sc_name);
2888 g_destroy_geom(sc->sc_sync.ds_geom);
2889 mtx_destroy(&sc->sc_events_mtx);
2890 mtx_destroy(&sc->sc_queue_mtx);
2891 sx_destroy(&sc->sc_lock);
2892 g_destroy_geom(sc->sc_geom);
2893 free(sc, M_MIRROR);
2894 return (NULL);
2895 }
2896
2897 G_MIRROR_DEBUG(1, "Device %s created (%u components, id=%u).",
2898 sc->sc_name, sc->sc_ndisks, sc->sc_id);
2899
2900 sc->sc_rootmount = root_mount_hold("GMIRROR");
2901 G_MIRROR_DEBUG(1, "root_mount_hold %p", sc->sc_rootmount);
2902 /*
2903 * Run timeout.
2904 */
2905 timeout = g_mirror_timeout * hz;
2906 callout_reset(&sc->sc_callout, timeout, g_mirror_go, sc);
2907 return (sc->sc_geom);
2908 }
2909
2910 int
2911 g_mirror_destroy(struct g_mirror_softc *sc, int how)
2912 {
2913 struct g_mirror_disk *disk;
2914 struct g_provider *pp;
2915
2916 g_topology_assert_not();
2917 if (sc == NULL)
2918 return (ENXIO);
2919 sx_assert(&sc->sc_lock, SX_XLOCKED);
2920
2921 pp = sc->sc_provider;
2922 if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
2923 switch (how) {
2924 case G_MIRROR_DESTROY_SOFT:
2925 G_MIRROR_DEBUG(1,
2926 "Device %s is still open (r%dw%de%d).", pp->name,
2927 pp->acr, pp->acw, pp->ace);
2928 return (EBUSY);
2929 case G_MIRROR_DESTROY_DELAYED:
2930 G_MIRROR_DEBUG(1,
2931 "Device %s will be destroyed on last close.",
2932 pp->name);
2933 LIST_FOREACH(disk, &sc->sc_disks, d_next) {
2934 if (disk->d_state ==
2935 G_MIRROR_DISK_STATE_SYNCHRONIZING) {
2936 g_mirror_sync_stop(disk, 1);
2937 }
2938 }
2939 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROYING;
2940 return (EBUSY);
2941 case G_MIRROR_DESTROY_HARD:
2942 G_MIRROR_DEBUG(1, "Device %s is still open, so it "
2943 "can't be definitely removed.", pp->name);
2944 }
2945 }
2946
2947 g_topology_lock();
2948 if (sc->sc_geom->softc == NULL) {
2949 g_topology_unlock();
2950 return (0);
2951 }
2952 sc->sc_geom->softc = NULL;
2953 sc->sc_sync.ds_geom->softc = NULL;
2954 g_topology_unlock();
2955
2956 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY;
2957 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_WAIT;
2958 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc);
2959 sx_xunlock(&sc->sc_lock);
2960 mtx_lock(&sc->sc_queue_mtx);
2961 wakeup(sc);
2962 mtx_unlock(&sc->sc_queue_mtx);
2963 G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, &sc->sc_worker);
2964 while (sc->sc_worker != NULL)
2965 tsleep(&sc->sc_worker, PRIBIO, "m:destroy", hz / 5);
2966 G_MIRROR_DEBUG(4, "%s: Woken up %p.", __func__, &sc->sc_worker);
2967 sx_xlock(&sc->sc_lock);
2968 g_mirror_destroy_device(sc);
2969 free(sc, M_MIRROR);
2970 return (0);
2971 }
2972
2973 static void
2974 g_mirror_taste_orphan(struct g_consumer *cp)
2975 {
2976
2977 KASSERT(1 == 0, ("%s called while tasting %s.", __func__,
2978 cp->provider->name));
2979 }
2980
2981 static struct g_geom *
2982 g_mirror_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
2983 {
2984 struct g_mirror_metadata md;
2985 struct g_mirror_softc *sc;
2986 struct g_consumer *cp;
2987 struct g_geom *gp;
2988 int error;
2989
2990 g_topology_assert();
2991 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
2992 G_MIRROR_DEBUG(2, "Tasting %s.", pp->name);
2993
2994 gp = g_new_geomf(mp, "mirror:taste");
2995 /*
2996 * This orphan function should be never called.
2997 */
2998 gp->orphan = g_mirror_taste_orphan;
2999 cp = g_new_consumer(gp);
3000 g_attach(cp, pp);
3001 error = g_mirror_read_metadata(cp, &md);
3002 g_detach(cp);
3003 g_destroy_consumer(cp);
3004 g_destroy_geom(gp);
3005 if (error != 0)
3006 return (NULL);
3007 gp = NULL;
3008
3009 if (md.md_provider[0] != '\0' &&
3010 !g_compare_names(md.md_provider, pp->name))
3011 return (NULL);
3012 if (md.md_provsize != 0 && md.md_provsize != pp->mediasize)
3013 return (NULL);
3014 if ((md.md_dflags & G_MIRROR_DISK_FLAG_INACTIVE) != 0) {
3015 G_MIRROR_DEBUG(0,
3016 "Device %s: provider %s marked as inactive, skipping.",
3017 md.md_name, pp->name);
3018 return (NULL);
3019 }
3020 if (g_mirror_debug >= 2)
3021 mirror_metadata_dump(&md);
3022
3023 /*
3024 * Let's check if device already exists.
3025 */
3026 sc = NULL;
3027 LIST_FOREACH(gp, &mp->geom, geom) {
3028 sc = gp->softc;
3029 if (sc == NULL)
3030 continue;
3031 if (sc->sc_sync.ds_geom == gp)
3032 continue;
3033 if (strcmp(md.md_name, sc->sc_name) != 0)
3034 continue;
3035 if (md.md_mid != sc->sc_id) {
3036 G_MIRROR_DEBUG(0, "Device %s already configured.",
3037 sc->sc_name);
3038 return (NULL);
3039 }
3040 break;
3041 }
3042 if (gp == NULL) {
3043 gp = g_mirror_create(mp, &md);
3044 if (gp == NULL) {
3045 G_MIRROR_DEBUG(0, "Cannot create device %s.",
3046 md.md_name);
3047 return (NULL);
3048 }
3049 sc = gp->softc;
3050 }
3051 G_MIRROR_DEBUG(1, "Adding disk %s to %s.", pp->name, gp->name);
3052 g_topology_unlock();
3053 sx_xlock(&sc->sc_lock);
3054 error = g_mirror_add_disk(sc, pp, &md);
3055 if (error != 0) {
3056 G_MIRROR_DEBUG(0, "Cannot add disk %s to %s (error=%d).",
3057 pp->name, gp->name, error);
3058 if (LIST_EMPTY(&sc->sc_disks)) {
3059 g_cancel_event(sc);
3060 g_mirror_destroy(sc, G_MIRROR_DESTROY_HARD);
3061 g_topology_lock();
3062 return (NULL);
3063 }
3064 gp = NULL;
3065 }
3066 sx_xunlock(&sc->sc_lock);
3067 g_topology_lock();
3068 return (gp);
3069 }
3070
3071 static int
3072 g_mirror_destroy_geom(struct gctl_req *req __unused,
3073 struct g_class *mp __unused, struct g_geom *gp)
3074 {
3075 struct g_mirror_softc *sc;
3076 int error;
3077
3078 g_topology_unlock();
3079 sc = gp->softc;
3080 sx_xlock(&sc->sc_lock);
3081 g_cancel_event(sc);
3082 error = g_mirror_destroy(gp->softc, G_MIRROR_DESTROY_SOFT);
3083 if (error != 0)
3084 sx_xunlock(&sc->sc_lock);
3085 g_topology_lock();
3086 return (error);
3087 }
3088
3089 static void
3090 g_mirror_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
3091 struct g_consumer *cp, struct g_provider *pp)
3092 {
3093 struct g_mirror_softc *sc;
3094
3095 g_topology_assert();
3096
3097 sc = gp->softc;
3098 if (sc == NULL)
3099 return;
3100 /* Skip synchronization geom. */
3101 if (gp == sc->sc_sync.ds_geom)
3102 return;
3103 if (pp != NULL) {
3104 /* Nothing here. */
3105 } else if (cp != NULL) {
3106 struct g_mirror_disk *disk;
3107
3108 disk = cp->private;
3109 if (disk == NULL)
3110 return;
3111 g_topology_unlock();
3112 sx_xlock(&sc->sc_lock);
3113 sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)disk->d_id);
3114 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) {
3115 sbuf_printf(sb, "%s<Synchronized>", indent);
3116 if (disk->d_sync.ds_offset == 0)
3117 sbuf_printf(sb, "0%%");
3118 else {
3119 sbuf_printf(sb, "%u%%",
3120 (u_int)((disk->d_sync.ds_offset * 100) /
3121 sc->sc_provider->mediasize));
3122 }
3123 sbuf_printf(sb, "</Synchronized>\n");
3124 }
3125 sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent,
3126 disk->d_sync.ds_syncid);
3127 sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent,
3128 disk->d_genid);
3129 sbuf_printf(sb, "%s<Flags>", indent);
3130 if (disk->d_flags == 0)
3131 sbuf_printf(sb, "NONE");
3132 else {
3133 int first = 1;
3134
3135 #define ADD_FLAG(flag, name) do { \
3136 if ((disk->d_flags & (flag)) != 0) { \
3137 if (!first) \
3138 sbuf_printf(sb, ", "); \
3139 else \
3140 first = 0; \
3141 sbuf_printf(sb, name); \
3142 } \
3143 } while (0)
3144 ADD_FLAG(G_MIRROR_DISK_FLAG_DIRTY, "DIRTY");
3145 ADD_FLAG(G_MIRROR_DISK_FLAG_HARDCODED, "HARDCODED");
3146 ADD_FLAG(G_MIRROR_DISK_FLAG_INACTIVE, "INACTIVE");
3147 ADD_FLAG(G_MIRROR_DISK_FLAG_SYNCHRONIZING,
3148 "SYNCHRONIZING");
3149 ADD_FLAG(G_MIRROR_DISK_FLAG_FORCE_SYNC, "FORCE_SYNC");
3150 ADD_FLAG(G_MIRROR_DISK_FLAG_BROKEN, "BROKEN");
3151 #undef ADD_FLAG
3152 }
3153 sbuf_printf(sb, "</Flags>\n");
3154 sbuf_printf(sb, "%s<Priority>%u</Priority>\n", indent,
3155 disk->d_priority);
3156 sbuf_printf(sb, "%s<State>%s</State>\n", indent,
3157 g_mirror_disk_state2str(disk->d_state));
3158 sx_xunlock(&sc->sc_lock);
3159 g_topology_lock();
3160 } else {
3161 g_topology_unlock();
3162 sx_xlock(&sc->sc_lock);
3163 sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)sc->sc_id);
3164 sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent, sc->sc_syncid);
3165 sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, sc->sc_genid);
3166 sbuf_printf(sb, "%s<Flags>", indent);
3167 if (sc->sc_flags == 0)
3168 sbuf_printf(sb, "NONE");
3169 else {
3170 int first = 1;
3171
3172 #define ADD_FLAG(flag, name) do { \
3173 if ((sc->sc_flags & (flag)) != 0) { \
3174 if (!first) \
3175 sbuf_printf(sb, ", "); \
3176 else \
3177 first = 0; \
3178 sbuf_printf(sb, name); \
3179 } \
3180 } while (0)
3181 ADD_FLAG(G_MIRROR_DEVICE_FLAG_NOFAILSYNC, "NOFAILSYNC");
3182 ADD_FLAG(G_MIRROR_DEVICE_FLAG_NOAUTOSYNC, "NOAUTOSYNC");
3183 #undef ADD_FLAG
3184 }
3185 sbuf_printf(sb, "</Flags>\n");
3186 sbuf_printf(sb, "%s<Slice>%u</Slice>\n", indent,
3187 (u_int)sc->sc_slice);
3188 sbuf_printf(sb, "%s<Balance>%s</Balance>\n", indent,
3189 balance_name(sc->sc_balance));
3190 sbuf_printf(sb, "%s<Components>%u</Components>\n", indent,
3191 sc->sc_ndisks);
3192 sbuf_printf(sb, "%s<State>", indent);
3193 if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING)
3194 sbuf_printf(sb, "%s", "STARTING");
3195 else if (sc->sc_ndisks ==
3196 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE))
3197 sbuf_printf(sb, "%s", "COMPLETE");
3198 else
3199 sbuf_printf(sb, "%s", "DEGRADED");
3200 sbuf_printf(sb, "</State>\n");
3201 sx_xunlock(&sc->sc_lock);
3202 g_topology_lock();
3203 }
3204 }
3205
3206 static void
3207 g_mirror_shutdown_pre_sync(void *arg, int howto)
3208 {
3209 struct g_class *mp;
3210 struct g_geom *gp, *gp2;
3211 struct g_mirror_softc *sc;
3212 int error;
3213
3214 mp = arg;
3215 DROP_GIANT();
3216 g_topology_lock();
3217 LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
3218 if ((sc = gp->softc) == NULL)
3219 continue;
3220 /* Skip synchronization geom. */
3221 if (gp == sc->sc_sync.ds_geom)
3222 continue;
3223 g_topology_unlock();
3224 sx_xlock(&sc->sc_lock);
3225 g_cancel_event(sc);
3226 error = g_mirror_destroy(sc, G_MIRROR_DESTROY_DELAYED);
3227 if (error != 0)
3228 sx_xunlock(&sc->sc_lock);
3229 g_topology_lock();
3230 }
3231 g_topology_unlock();
3232 PICKUP_GIANT();
3233 }
3234
3235 static void
3236 g_mirror_init(struct g_class *mp)
3237 {
3238
3239 g_mirror_pre_sync = EVENTHANDLER_REGISTER(shutdown_pre_sync,
3240 g_mirror_shutdown_pre_sync, mp, SHUTDOWN_PRI_FIRST);
3241 if (g_mirror_pre_sync == NULL)
3242 G_MIRROR_DEBUG(0, "Warning! Cannot register shutdown event.");
3243 }
3244
3245 static void
3246 g_mirror_fini(struct g_class *mp)
3247 {
3248
3249 if (g_mirror_pre_sync != NULL)
3250 EVENTHANDLER_DEREGISTER(shutdown_pre_sync, g_mirror_pre_sync);
3251 }
3252
3253 DECLARE_GEOM_CLASS(g_mirror_class, g_mirror);
Cache object: 35c62f24844ffc1621ae85435b957611
|