1 /*-
2 * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD: releng/8.3/sys/geom/raid3/g_raid3.c 222920 2011-06-10 09:12:09Z mav $");
29
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/module.h>
34 #include <sys/limits.h>
35 #include <sys/lock.h>
36 #include <sys/mutex.h>
37 #include <sys/bio.h>
38 #include <sys/sysctl.h>
39 #include <sys/malloc.h>
40 #include <sys/eventhandler.h>
41 #include <vm/uma.h>
42 #include <geom/geom.h>
43 #include <sys/proc.h>
44 #include <sys/kthread.h>
45 #include <sys/sched.h>
46 #include <geom/raid3/g_raid3.h>
47
48
49 static MALLOC_DEFINE(M_RAID3, "raid3_data", "GEOM_RAID3 Data");
50
51 SYSCTL_DECL(_kern_geom);
52 SYSCTL_NODE(_kern_geom, OID_AUTO, raid3, CTLFLAG_RW, 0, "GEOM_RAID3 stuff");
53 u_int g_raid3_debug = 0;
54 TUNABLE_INT("kern.geom.raid3.debug", &g_raid3_debug);
55 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, debug, CTLFLAG_RW, &g_raid3_debug, 0,
56 "Debug level");
57 static u_int g_raid3_timeout = 4;
58 TUNABLE_INT("kern.geom.raid3.timeout", &g_raid3_timeout);
59 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, timeout, CTLFLAG_RW, &g_raid3_timeout,
60 0, "Time to wait on all raid3 components");
61 static u_int g_raid3_idletime = 5;
62 TUNABLE_INT("kern.geom.raid3.idletime", &g_raid3_idletime);
63 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, idletime, CTLFLAG_RW,
64 &g_raid3_idletime, 0, "Mark components as clean when idling");
65 static u_int g_raid3_disconnect_on_failure = 1;
66 TUNABLE_INT("kern.geom.raid3.disconnect_on_failure",
67 &g_raid3_disconnect_on_failure);
68 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, disconnect_on_failure, CTLFLAG_RW,
69 &g_raid3_disconnect_on_failure, 0, "Disconnect component on I/O failure.");
70 static u_int g_raid3_syncreqs = 2;
71 TUNABLE_INT("kern.geom.raid3.sync_requests", &g_raid3_syncreqs);
72 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, sync_requests, CTLFLAG_RDTUN,
73 &g_raid3_syncreqs, 0, "Parallel synchronization I/O requests.");
74 static u_int g_raid3_use_malloc = 0;
75 TUNABLE_INT("kern.geom.raid3.use_malloc", &g_raid3_use_malloc);
76 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, use_malloc, CTLFLAG_RDTUN,
77 &g_raid3_use_malloc, 0, "Use malloc(9) instead of uma(9).");
78
79 static u_int g_raid3_n64k = 50;
80 TUNABLE_INT("kern.geom.raid3.n64k", &g_raid3_n64k);
81 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, n64k, CTLFLAG_RD, &g_raid3_n64k, 0,
82 "Maximum number of 64kB allocations");
83 static u_int g_raid3_n16k = 200;
84 TUNABLE_INT("kern.geom.raid3.n16k", &g_raid3_n16k);
85 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, n16k, CTLFLAG_RD, &g_raid3_n16k, 0,
86 "Maximum number of 16kB allocations");
87 static u_int g_raid3_n4k = 1200;
88 TUNABLE_INT("kern.geom.raid3.n4k", &g_raid3_n4k);
89 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, n4k, CTLFLAG_RD, &g_raid3_n4k, 0,
90 "Maximum number of 4kB allocations");
91
92 SYSCTL_NODE(_kern_geom_raid3, OID_AUTO, stat, CTLFLAG_RW, 0,
93 "GEOM_RAID3 statistics");
94 static u_int g_raid3_parity_mismatch = 0;
95 SYSCTL_UINT(_kern_geom_raid3_stat, OID_AUTO, parity_mismatch, CTLFLAG_RD,
96 &g_raid3_parity_mismatch, 0, "Number of failures in VERIFY mode");
97
98 #define MSLEEP(ident, mtx, priority, wmesg, timeout) do { \
99 G_RAID3_DEBUG(4, "%s: Sleeping %p.", __func__, (ident)); \
100 msleep((ident), (mtx), (priority), (wmesg), (timeout)); \
101 G_RAID3_DEBUG(4, "%s: Woken up %p.", __func__, (ident)); \
102 } while (0)
103
104 static eventhandler_tag g_raid3_pre_sync = NULL;
105
106 static int g_raid3_destroy_geom(struct gctl_req *req, struct g_class *mp,
107 struct g_geom *gp);
108 static g_taste_t g_raid3_taste;
109 static void g_raid3_init(struct g_class *mp);
110 static void g_raid3_fini(struct g_class *mp);
111
112 struct g_class g_raid3_class = {
113 .name = G_RAID3_CLASS_NAME,
114 .version = G_VERSION,
115 .ctlreq = g_raid3_config,
116 .taste = g_raid3_taste,
117 .destroy_geom = g_raid3_destroy_geom,
118 .init = g_raid3_init,
119 .fini = g_raid3_fini
120 };
121
122
123 static void g_raid3_destroy_provider(struct g_raid3_softc *sc);
124 static int g_raid3_update_disk(struct g_raid3_disk *disk, u_int state);
125 static void g_raid3_update_device(struct g_raid3_softc *sc, boolean_t force);
126 static void g_raid3_dumpconf(struct sbuf *sb, const char *indent,
127 struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp);
128 static void g_raid3_sync_stop(struct g_raid3_softc *sc, int type);
129 static int g_raid3_register_request(struct bio *pbp);
130 static void g_raid3_sync_release(struct g_raid3_softc *sc);
131
132
133 static const char *
134 g_raid3_disk_state2str(int state)
135 {
136
137 switch (state) {
138 case G_RAID3_DISK_STATE_NODISK:
139 return ("NODISK");
140 case G_RAID3_DISK_STATE_NONE:
141 return ("NONE");
142 case G_RAID3_DISK_STATE_NEW:
143 return ("NEW");
144 case G_RAID3_DISK_STATE_ACTIVE:
145 return ("ACTIVE");
146 case G_RAID3_DISK_STATE_STALE:
147 return ("STALE");
148 case G_RAID3_DISK_STATE_SYNCHRONIZING:
149 return ("SYNCHRONIZING");
150 case G_RAID3_DISK_STATE_DISCONNECTED:
151 return ("DISCONNECTED");
152 default:
153 return ("INVALID");
154 }
155 }
156
157 static const char *
158 g_raid3_device_state2str(int state)
159 {
160
161 switch (state) {
162 case G_RAID3_DEVICE_STATE_STARTING:
163 return ("STARTING");
164 case G_RAID3_DEVICE_STATE_DEGRADED:
165 return ("DEGRADED");
166 case G_RAID3_DEVICE_STATE_COMPLETE:
167 return ("COMPLETE");
168 default:
169 return ("INVALID");
170 }
171 }
172
173 const char *
174 g_raid3_get_diskname(struct g_raid3_disk *disk)
175 {
176
177 if (disk->d_consumer == NULL || disk->d_consumer->provider == NULL)
178 return ("[unknown]");
179 return (disk->d_name);
180 }
181
182 static void *
183 g_raid3_alloc(struct g_raid3_softc *sc, size_t size, int flags)
184 {
185 void *ptr;
186 enum g_raid3_zones zone;
187
188 if (g_raid3_use_malloc ||
189 (zone = g_raid3_zone(size)) == G_RAID3_NUM_ZONES)
190 ptr = malloc(size, M_RAID3, flags);
191 else {
192 ptr = uma_zalloc_arg(sc->sc_zones[zone].sz_zone,
193 &sc->sc_zones[zone], flags);
194 sc->sc_zones[zone].sz_requested++;
195 if (ptr == NULL)
196 sc->sc_zones[zone].sz_failed++;
197 }
198 return (ptr);
199 }
200
201 static void
202 g_raid3_free(struct g_raid3_softc *sc, void *ptr, size_t size)
203 {
204 enum g_raid3_zones zone;
205
206 if (g_raid3_use_malloc ||
207 (zone = g_raid3_zone(size)) == G_RAID3_NUM_ZONES)
208 free(ptr, M_RAID3);
209 else {
210 uma_zfree_arg(sc->sc_zones[zone].sz_zone,
211 ptr, &sc->sc_zones[zone]);
212 }
213 }
214
215 static int
216 g_raid3_uma_ctor(void *mem, int size, void *arg, int flags)
217 {
218 struct g_raid3_zone *sz = arg;
219
220 if (sz->sz_max > 0 && sz->sz_inuse == sz->sz_max)
221 return (ENOMEM);
222 sz->sz_inuse++;
223 return (0);
224 }
225
226 static void
227 g_raid3_uma_dtor(void *mem, int size, void *arg)
228 {
229 struct g_raid3_zone *sz = arg;
230
231 sz->sz_inuse--;
232 }
233
234 #define g_raid3_xor(src, dst, size) \
235 _g_raid3_xor((uint64_t *)(src), \
236 (uint64_t *)(dst), (size_t)size)
237 static void
238 _g_raid3_xor(uint64_t *src, uint64_t *dst, size_t size)
239 {
240
241 KASSERT((size % 128) == 0, ("Invalid size: %zu.", size));
242 for (; size > 0; size -= 128) {
243 *dst++ ^= (*src++);
244 *dst++ ^= (*src++);
245 *dst++ ^= (*src++);
246 *dst++ ^= (*src++);
247 *dst++ ^= (*src++);
248 *dst++ ^= (*src++);
249 *dst++ ^= (*src++);
250 *dst++ ^= (*src++);
251 *dst++ ^= (*src++);
252 *dst++ ^= (*src++);
253 *dst++ ^= (*src++);
254 *dst++ ^= (*src++);
255 *dst++ ^= (*src++);
256 *dst++ ^= (*src++);
257 *dst++ ^= (*src++);
258 *dst++ ^= (*src++);
259 }
260 }
261
262 static int
263 g_raid3_is_zero(struct bio *bp)
264 {
265 static const uint64_t zeros[] = {
266 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
267 };
268 u_char *addr;
269 ssize_t size;
270
271 size = bp->bio_length;
272 addr = (u_char *)bp->bio_data;
273 for (; size > 0; size -= sizeof(zeros), addr += sizeof(zeros)) {
274 if (bcmp(addr, zeros, sizeof(zeros)) != 0)
275 return (0);
276 }
277 return (1);
278 }
279
280 /*
281 * --- Events handling functions ---
282 * Events in geom_raid3 are used to maintain disks and device status
283 * from one thread to simplify locking.
284 */
285 static void
286 g_raid3_event_free(struct g_raid3_event *ep)
287 {
288
289 free(ep, M_RAID3);
290 }
291
292 int
293 g_raid3_event_send(void *arg, int state, int flags)
294 {
295 struct g_raid3_softc *sc;
296 struct g_raid3_disk *disk;
297 struct g_raid3_event *ep;
298 int error;
299
300 ep = malloc(sizeof(*ep), M_RAID3, M_WAITOK);
301 G_RAID3_DEBUG(4, "%s: Sending event %p.", __func__, ep);
302 if ((flags & G_RAID3_EVENT_DEVICE) != 0) {
303 disk = NULL;
304 sc = arg;
305 } else {
306 disk = arg;
307 sc = disk->d_softc;
308 }
309 ep->e_disk = disk;
310 ep->e_state = state;
311 ep->e_flags = flags;
312 ep->e_error = 0;
313 mtx_lock(&sc->sc_events_mtx);
314 TAILQ_INSERT_TAIL(&sc->sc_events, ep, e_next);
315 mtx_unlock(&sc->sc_events_mtx);
316 G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, sc);
317 mtx_lock(&sc->sc_queue_mtx);
318 wakeup(sc);
319 wakeup(&sc->sc_queue);
320 mtx_unlock(&sc->sc_queue_mtx);
321 if ((flags & G_RAID3_EVENT_DONTWAIT) != 0)
322 return (0);
323 sx_assert(&sc->sc_lock, SX_XLOCKED);
324 G_RAID3_DEBUG(4, "%s: Sleeping %p.", __func__, ep);
325 sx_xunlock(&sc->sc_lock);
326 while ((ep->e_flags & G_RAID3_EVENT_DONE) == 0) {
327 mtx_lock(&sc->sc_events_mtx);
328 MSLEEP(ep, &sc->sc_events_mtx, PRIBIO | PDROP, "r3:event",
329 hz * 5);
330 }
331 error = ep->e_error;
332 g_raid3_event_free(ep);
333 sx_xlock(&sc->sc_lock);
334 return (error);
335 }
336
337 static struct g_raid3_event *
338 g_raid3_event_get(struct g_raid3_softc *sc)
339 {
340 struct g_raid3_event *ep;
341
342 mtx_lock(&sc->sc_events_mtx);
343 ep = TAILQ_FIRST(&sc->sc_events);
344 mtx_unlock(&sc->sc_events_mtx);
345 return (ep);
346 }
347
348 static void
349 g_raid3_event_remove(struct g_raid3_softc *sc, struct g_raid3_event *ep)
350 {
351
352 mtx_lock(&sc->sc_events_mtx);
353 TAILQ_REMOVE(&sc->sc_events, ep, e_next);
354 mtx_unlock(&sc->sc_events_mtx);
355 }
356
357 static void
358 g_raid3_event_cancel(struct g_raid3_disk *disk)
359 {
360 struct g_raid3_softc *sc;
361 struct g_raid3_event *ep, *tmpep;
362
363 sc = disk->d_softc;
364 sx_assert(&sc->sc_lock, SX_XLOCKED);
365
366 mtx_lock(&sc->sc_events_mtx);
367 TAILQ_FOREACH_SAFE(ep, &sc->sc_events, e_next, tmpep) {
368 if ((ep->e_flags & G_RAID3_EVENT_DEVICE) != 0)
369 continue;
370 if (ep->e_disk != disk)
371 continue;
372 TAILQ_REMOVE(&sc->sc_events, ep, e_next);
373 if ((ep->e_flags & G_RAID3_EVENT_DONTWAIT) != 0)
374 g_raid3_event_free(ep);
375 else {
376 ep->e_error = ECANCELED;
377 wakeup(ep);
378 }
379 }
380 mtx_unlock(&sc->sc_events_mtx);
381 }
382
383 /*
384 * Return the number of disks in the given state.
385 * If state is equal to -1, count all connected disks.
386 */
387 u_int
388 g_raid3_ndisks(struct g_raid3_softc *sc, int state)
389 {
390 struct g_raid3_disk *disk;
391 u_int n, ndisks;
392
393 sx_assert(&sc->sc_lock, SX_LOCKED);
394
395 for (n = ndisks = 0; n < sc->sc_ndisks; n++) {
396 disk = &sc->sc_disks[n];
397 if (disk->d_state == G_RAID3_DISK_STATE_NODISK)
398 continue;
399 if (state == -1 || disk->d_state == state)
400 ndisks++;
401 }
402 return (ndisks);
403 }
404
405 static u_int
406 g_raid3_nrequests(struct g_raid3_softc *sc, struct g_consumer *cp)
407 {
408 struct bio *bp;
409 u_int nreqs = 0;
410
411 mtx_lock(&sc->sc_queue_mtx);
412 TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) {
413 if (bp->bio_from == cp)
414 nreqs++;
415 }
416 mtx_unlock(&sc->sc_queue_mtx);
417 return (nreqs);
418 }
419
420 static int
421 g_raid3_is_busy(struct g_raid3_softc *sc, struct g_consumer *cp)
422 {
423
424 if (cp->index > 0) {
425 G_RAID3_DEBUG(2,
426 "I/O requests for %s exist, can't destroy it now.",
427 cp->provider->name);
428 return (1);
429 }
430 if (g_raid3_nrequests(sc, cp) > 0) {
431 G_RAID3_DEBUG(2,
432 "I/O requests for %s in queue, can't destroy it now.",
433 cp->provider->name);
434 return (1);
435 }
436 return (0);
437 }
438
439 static void
440 g_raid3_destroy_consumer(void *arg, int flags __unused)
441 {
442 struct g_consumer *cp;
443
444 g_topology_assert();
445
446 cp = arg;
447 G_RAID3_DEBUG(1, "Consumer %s destroyed.", cp->provider->name);
448 g_detach(cp);
449 g_destroy_consumer(cp);
450 }
451
452 static void
453 g_raid3_kill_consumer(struct g_raid3_softc *sc, struct g_consumer *cp)
454 {
455 struct g_provider *pp;
456 int retaste_wait;
457
458 g_topology_assert();
459
460 cp->private = NULL;
461 if (g_raid3_is_busy(sc, cp))
462 return;
463 G_RAID3_DEBUG(2, "Consumer %s destroyed.", cp->provider->name);
464 pp = cp->provider;
465 retaste_wait = 0;
466 if (cp->acw == 1) {
467 if ((pp->geom->flags & G_GEOM_WITHER) == 0)
468 retaste_wait = 1;
469 }
470 G_RAID3_DEBUG(2, "Access %s r%dw%de%d = %d", pp->name, -cp->acr,
471 -cp->acw, -cp->ace, 0);
472 if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0)
473 g_access(cp, -cp->acr, -cp->acw, -cp->ace);
474 if (retaste_wait) {
475 /*
476 * After retaste event was send (inside g_access()), we can send
477 * event to detach and destroy consumer.
478 * A class, which has consumer to the given provider connected
479 * will not receive retaste event for the provider.
480 * This is the way how I ignore retaste events when I close
481 * consumers opened for write: I detach and destroy consumer
482 * after retaste event is sent.
483 */
484 g_post_event(g_raid3_destroy_consumer, cp, M_WAITOK, NULL);
485 return;
486 }
487 G_RAID3_DEBUG(1, "Consumer %s destroyed.", pp->name);
488 g_detach(cp);
489 g_destroy_consumer(cp);
490 }
491
492 static int
493 g_raid3_connect_disk(struct g_raid3_disk *disk, struct g_provider *pp)
494 {
495 struct g_consumer *cp;
496 int error;
497
498 g_topology_assert_not();
499 KASSERT(disk->d_consumer == NULL,
500 ("Disk already connected (device %s).", disk->d_softc->sc_name));
501
502 g_topology_lock();
503 cp = g_new_consumer(disk->d_softc->sc_geom);
504 error = g_attach(cp, pp);
505 if (error != 0) {
506 g_destroy_consumer(cp);
507 g_topology_unlock();
508 return (error);
509 }
510 error = g_access(cp, 1, 1, 1);
511 g_topology_unlock();
512 if (error != 0) {
513 g_detach(cp);
514 g_destroy_consumer(cp);
515 G_RAID3_DEBUG(0, "Cannot open consumer %s (error=%d).",
516 pp->name, error);
517 return (error);
518 }
519 disk->d_consumer = cp;
520 disk->d_consumer->private = disk;
521 disk->d_consumer->index = 0;
522 G_RAID3_DEBUG(2, "Disk %s connected.", g_raid3_get_diskname(disk));
523 return (0);
524 }
525
526 static void
527 g_raid3_disconnect_consumer(struct g_raid3_softc *sc, struct g_consumer *cp)
528 {
529
530 g_topology_assert();
531
532 if (cp == NULL)
533 return;
534 if (cp->provider != NULL)
535 g_raid3_kill_consumer(sc, cp);
536 else
537 g_destroy_consumer(cp);
538 }
539
540 /*
541 * Initialize disk. This means allocate memory, create consumer, attach it
542 * to the provider and open access (r1w1e1) to it.
543 */
544 static struct g_raid3_disk *
545 g_raid3_init_disk(struct g_raid3_softc *sc, struct g_provider *pp,
546 struct g_raid3_metadata *md, int *errorp)
547 {
548 struct g_raid3_disk *disk;
549 int error;
550
551 disk = &sc->sc_disks[md->md_no];
552 error = g_raid3_connect_disk(disk, pp);
553 if (error != 0) {
554 if (errorp != NULL)
555 *errorp = error;
556 return (NULL);
557 }
558 disk->d_state = G_RAID3_DISK_STATE_NONE;
559 disk->d_flags = md->md_dflags;
560 if (md->md_provider[0] != '\0')
561 disk->d_flags |= G_RAID3_DISK_FLAG_HARDCODED;
562 disk->d_sync.ds_consumer = NULL;
563 disk->d_sync.ds_offset = md->md_sync_offset;
564 disk->d_sync.ds_offset_done = md->md_sync_offset;
565 disk->d_genid = md->md_genid;
566 disk->d_sync.ds_syncid = md->md_syncid;
567 if (errorp != NULL)
568 *errorp = 0;
569 return (disk);
570 }
571
572 static void
573 g_raid3_destroy_disk(struct g_raid3_disk *disk)
574 {
575 struct g_raid3_softc *sc;
576
577 g_topology_assert_not();
578 sc = disk->d_softc;
579 sx_assert(&sc->sc_lock, SX_XLOCKED);
580
581 if (disk->d_state == G_RAID3_DISK_STATE_NODISK)
582 return;
583 g_raid3_event_cancel(disk);
584 switch (disk->d_state) {
585 case G_RAID3_DISK_STATE_SYNCHRONIZING:
586 if (sc->sc_syncdisk != NULL)
587 g_raid3_sync_stop(sc, 1);
588 /* FALLTHROUGH */
589 case G_RAID3_DISK_STATE_NEW:
590 case G_RAID3_DISK_STATE_STALE:
591 case G_RAID3_DISK_STATE_ACTIVE:
592 g_topology_lock();
593 g_raid3_disconnect_consumer(sc, disk->d_consumer);
594 g_topology_unlock();
595 disk->d_consumer = NULL;
596 break;
597 default:
598 KASSERT(0 == 1, ("Wrong disk state (%s, %s).",
599 g_raid3_get_diskname(disk),
600 g_raid3_disk_state2str(disk->d_state)));
601 }
602 disk->d_state = G_RAID3_DISK_STATE_NODISK;
603 }
604
605 static void
606 g_raid3_destroy_device(struct g_raid3_softc *sc)
607 {
608 struct g_raid3_event *ep;
609 struct g_raid3_disk *disk;
610 struct g_geom *gp;
611 struct g_consumer *cp;
612 u_int n;
613
614 g_topology_assert_not();
615 sx_assert(&sc->sc_lock, SX_XLOCKED);
616
617 gp = sc->sc_geom;
618 if (sc->sc_provider != NULL)
619 g_raid3_destroy_provider(sc);
620 for (n = 0; n < sc->sc_ndisks; n++) {
621 disk = &sc->sc_disks[n];
622 if (disk->d_state != G_RAID3_DISK_STATE_NODISK) {
623 disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY;
624 g_raid3_update_metadata(disk);
625 g_raid3_destroy_disk(disk);
626 }
627 }
628 while ((ep = g_raid3_event_get(sc)) != NULL) {
629 g_raid3_event_remove(sc, ep);
630 if ((ep->e_flags & G_RAID3_EVENT_DONTWAIT) != 0)
631 g_raid3_event_free(ep);
632 else {
633 ep->e_error = ECANCELED;
634 ep->e_flags |= G_RAID3_EVENT_DONE;
635 G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, ep);
636 mtx_lock(&sc->sc_events_mtx);
637 wakeup(ep);
638 mtx_unlock(&sc->sc_events_mtx);
639 }
640 }
641 callout_drain(&sc->sc_callout);
642 cp = LIST_FIRST(&sc->sc_sync.ds_geom->consumer);
643 g_topology_lock();
644 if (cp != NULL)
645 g_raid3_disconnect_consumer(sc, cp);
646 g_wither_geom(sc->sc_sync.ds_geom, ENXIO);
647 G_RAID3_DEBUG(0, "Device %s destroyed.", gp->name);
648 g_wither_geom(gp, ENXIO);
649 g_topology_unlock();
650 if (!g_raid3_use_malloc) {
651 uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_64K].sz_zone);
652 uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_16K].sz_zone);
653 uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_4K].sz_zone);
654 }
655 mtx_destroy(&sc->sc_queue_mtx);
656 mtx_destroy(&sc->sc_events_mtx);
657 sx_xunlock(&sc->sc_lock);
658 sx_destroy(&sc->sc_lock);
659 }
660
661 static void
662 g_raid3_orphan(struct g_consumer *cp)
663 {
664 struct g_raid3_disk *disk;
665
666 g_topology_assert();
667
668 disk = cp->private;
669 if (disk == NULL)
670 return;
671 disk->d_softc->sc_bump_id = G_RAID3_BUMP_SYNCID;
672 g_raid3_event_send(disk, G_RAID3_DISK_STATE_DISCONNECTED,
673 G_RAID3_EVENT_DONTWAIT);
674 }
675
676 static int
677 g_raid3_write_metadata(struct g_raid3_disk *disk, struct g_raid3_metadata *md)
678 {
679 struct g_raid3_softc *sc;
680 struct g_consumer *cp;
681 off_t offset, length;
682 u_char *sector;
683 int error = 0;
684
685 g_topology_assert_not();
686 sc = disk->d_softc;
687 sx_assert(&sc->sc_lock, SX_LOCKED);
688
689 cp = disk->d_consumer;
690 KASSERT(cp != NULL, ("NULL consumer (%s).", sc->sc_name));
691 KASSERT(cp->provider != NULL, ("NULL provider (%s).", sc->sc_name));
692 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
693 ("Consumer %s closed? (r%dw%de%d).", cp->provider->name, cp->acr,
694 cp->acw, cp->ace));
695 length = cp->provider->sectorsize;
696 offset = cp->provider->mediasize - length;
697 sector = malloc((size_t)length, M_RAID3, M_WAITOK | M_ZERO);
698 if (md != NULL)
699 raid3_metadata_encode(md, sector);
700 error = g_write_data(cp, offset, sector, length);
701 free(sector, M_RAID3);
702 if (error != 0) {
703 if ((disk->d_flags & G_RAID3_DISK_FLAG_BROKEN) == 0) {
704 G_RAID3_DEBUG(0, "Cannot write metadata on %s "
705 "(device=%s, error=%d).",
706 g_raid3_get_diskname(disk), sc->sc_name, error);
707 disk->d_flags |= G_RAID3_DISK_FLAG_BROKEN;
708 } else {
709 G_RAID3_DEBUG(1, "Cannot write metadata on %s "
710 "(device=%s, error=%d).",
711 g_raid3_get_diskname(disk), sc->sc_name, error);
712 }
713 if (g_raid3_disconnect_on_failure &&
714 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) {
715 sc->sc_bump_id |= G_RAID3_BUMP_GENID;
716 g_raid3_event_send(disk,
717 G_RAID3_DISK_STATE_DISCONNECTED,
718 G_RAID3_EVENT_DONTWAIT);
719 }
720 }
721 return (error);
722 }
723
724 int
725 g_raid3_clear_metadata(struct g_raid3_disk *disk)
726 {
727 int error;
728
729 g_topology_assert_not();
730 sx_assert(&disk->d_softc->sc_lock, SX_LOCKED);
731
732 error = g_raid3_write_metadata(disk, NULL);
733 if (error == 0) {
734 G_RAID3_DEBUG(2, "Metadata on %s cleared.",
735 g_raid3_get_diskname(disk));
736 } else {
737 G_RAID3_DEBUG(0,
738 "Cannot clear metadata on disk %s (error=%d).",
739 g_raid3_get_diskname(disk), error);
740 }
741 return (error);
742 }
743
744 void
745 g_raid3_fill_metadata(struct g_raid3_disk *disk, struct g_raid3_metadata *md)
746 {
747 struct g_raid3_softc *sc;
748 struct g_provider *pp;
749
750 sc = disk->d_softc;
751 strlcpy(md->md_magic, G_RAID3_MAGIC, sizeof(md->md_magic));
752 md->md_version = G_RAID3_VERSION;
753 strlcpy(md->md_name, sc->sc_name, sizeof(md->md_name));
754 md->md_id = sc->sc_id;
755 md->md_all = sc->sc_ndisks;
756 md->md_genid = sc->sc_genid;
757 md->md_mediasize = sc->sc_mediasize;
758 md->md_sectorsize = sc->sc_sectorsize;
759 md->md_mflags = (sc->sc_flags & G_RAID3_DEVICE_FLAG_MASK);
760 md->md_no = disk->d_no;
761 md->md_syncid = disk->d_sync.ds_syncid;
762 md->md_dflags = (disk->d_flags & G_RAID3_DISK_FLAG_MASK);
763 if (disk->d_state != G_RAID3_DISK_STATE_SYNCHRONIZING)
764 md->md_sync_offset = 0;
765 else {
766 md->md_sync_offset =
767 disk->d_sync.ds_offset_done / (sc->sc_ndisks - 1);
768 }
769 if (disk->d_consumer != NULL && disk->d_consumer->provider != NULL)
770 pp = disk->d_consumer->provider;
771 else
772 pp = NULL;
773 if ((disk->d_flags & G_RAID3_DISK_FLAG_HARDCODED) != 0 && pp != NULL)
774 strlcpy(md->md_provider, pp->name, sizeof(md->md_provider));
775 else
776 bzero(md->md_provider, sizeof(md->md_provider));
777 if (pp != NULL)
778 md->md_provsize = pp->mediasize;
779 else
780 md->md_provsize = 0;
781 }
782
783 void
784 g_raid3_update_metadata(struct g_raid3_disk *disk)
785 {
786 struct g_raid3_softc *sc;
787 struct g_raid3_metadata md;
788 int error;
789
790 g_topology_assert_not();
791 sc = disk->d_softc;
792 sx_assert(&sc->sc_lock, SX_LOCKED);
793
794 g_raid3_fill_metadata(disk, &md);
795 error = g_raid3_write_metadata(disk, &md);
796 if (error == 0) {
797 G_RAID3_DEBUG(2, "Metadata on %s updated.",
798 g_raid3_get_diskname(disk));
799 } else {
800 G_RAID3_DEBUG(0,
801 "Cannot update metadata on disk %s (error=%d).",
802 g_raid3_get_diskname(disk), error);
803 }
804 }
805
806 static void
807 g_raid3_bump_syncid(struct g_raid3_softc *sc)
808 {
809 struct g_raid3_disk *disk;
810 u_int n;
811
812 g_topology_assert_not();
813 sx_assert(&sc->sc_lock, SX_XLOCKED);
814 KASSERT(g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) > 0,
815 ("%s called with no active disks (device=%s).", __func__,
816 sc->sc_name));
817
818 sc->sc_syncid++;
819 G_RAID3_DEBUG(1, "Device %s: syncid bumped to %u.", sc->sc_name,
820 sc->sc_syncid);
821 for (n = 0; n < sc->sc_ndisks; n++) {
822 disk = &sc->sc_disks[n];
823 if (disk->d_state == G_RAID3_DISK_STATE_ACTIVE ||
824 disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) {
825 disk->d_sync.ds_syncid = sc->sc_syncid;
826 g_raid3_update_metadata(disk);
827 }
828 }
829 }
830
831 static void
832 g_raid3_bump_genid(struct g_raid3_softc *sc)
833 {
834 struct g_raid3_disk *disk;
835 u_int n;
836
837 g_topology_assert_not();
838 sx_assert(&sc->sc_lock, SX_XLOCKED);
839 KASSERT(g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) > 0,
840 ("%s called with no active disks (device=%s).", __func__,
841 sc->sc_name));
842
843 sc->sc_genid++;
844 G_RAID3_DEBUG(1, "Device %s: genid bumped to %u.", sc->sc_name,
845 sc->sc_genid);
846 for (n = 0; n < sc->sc_ndisks; n++) {
847 disk = &sc->sc_disks[n];
848 if (disk->d_state == G_RAID3_DISK_STATE_ACTIVE ||
849 disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) {
850 disk->d_genid = sc->sc_genid;
851 g_raid3_update_metadata(disk);
852 }
853 }
854 }
855
856 static int
857 g_raid3_idle(struct g_raid3_softc *sc, int acw)
858 {
859 struct g_raid3_disk *disk;
860 u_int i;
861 int timeout;
862
863 g_topology_assert_not();
864 sx_assert(&sc->sc_lock, SX_XLOCKED);
865
866 if (sc->sc_provider == NULL)
867 return (0);
868 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOFAILSYNC) != 0)
869 return (0);
870 if (sc->sc_idle)
871 return (0);
872 if (sc->sc_writes > 0)
873 return (0);
874 if (acw > 0 || (acw == -1 && sc->sc_provider->acw > 0)) {
875 timeout = g_raid3_idletime - (time_uptime - sc->sc_last_write);
876 if (timeout > 0)
877 return (timeout);
878 }
879 sc->sc_idle = 1;
880 for (i = 0; i < sc->sc_ndisks; i++) {
881 disk = &sc->sc_disks[i];
882 if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE)
883 continue;
884 G_RAID3_DEBUG(1, "Disk %s (device %s) marked as clean.",
885 g_raid3_get_diskname(disk), sc->sc_name);
886 disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY;
887 g_raid3_update_metadata(disk);
888 }
889 return (0);
890 }
891
892 static void
893 g_raid3_unidle(struct g_raid3_softc *sc)
894 {
895 struct g_raid3_disk *disk;
896 u_int i;
897
898 g_topology_assert_not();
899 sx_assert(&sc->sc_lock, SX_XLOCKED);
900
901 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOFAILSYNC) != 0)
902 return;
903 sc->sc_idle = 0;
904 sc->sc_last_write = time_uptime;
905 for (i = 0; i < sc->sc_ndisks; i++) {
906 disk = &sc->sc_disks[i];
907 if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE)
908 continue;
909 G_RAID3_DEBUG(1, "Disk %s (device %s) marked as dirty.",
910 g_raid3_get_diskname(disk), sc->sc_name);
911 disk->d_flags |= G_RAID3_DISK_FLAG_DIRTY;
912 g_raid3_update_metadata(disk);
913 }
914 }
915
916 /*
917 * Treat bio_driver1 field in parent bio as list head and field bio_caller1
918 * in child bio as pointer to the next element on the list.
919 */
920 #define G_RAID3_HEAD_BIO(pbp) (pbp)->bio_driver1
921
922 #define G_RAID3_NEXT_BIO(cbp) (cbp)->bio_caller1
923
924 #define G_RAID3_FOREACH_BIO(pbp, bp) \
925 for ((bp) = G_RAID3_HEAD_BIO(pbp); (bp) != NULL; \
926 (bp) = G_RAID3_NEXT_BIO(bp))
927
928 #define G_RAID3_FOREACH_SAFE_BIO(pbp, bp, tmpbp) \
929 for ((bp) = G_RAID3_HEAD_BIO(pbp); \
930 (bp) != NULL && ((tmpbp) = G_RAID3_NEXT_BIO(bp), 1); \
931 (bp) = (tmpbp))
932
933 static void
934 g_raid3_init_bio(struct bio *pbp)
935 {
936
937 G_RAID3_HEAD_BIO(pbp) = NULL;
938 }
939
940 static void
941 g_raid3_remove_bio(struct bio *cbp)
942 {
943 struct bio *pbp, *bp;
944
945 pbp = cbp->bio_parent;
946 if (G_RAID3_HEAD_BIO(pbp) == cbp)
947 G_RAID3_HEAD_BIO(pbp) = G_RAID3_NEXT_BIO(cbp);
948 else {
949 G_RAID3_FOREACH_BIO(pbp, bp) {
950 if (G_RAID3_NEXT_BIO(bp) == cbp) {
951 G_RAID3_NEXT_BIO(bp) = G_RAID3_NEXT_BIO(cbp);
952 break;
953 }
954 }
955 }
956 G_RAID3_NEXT_BIO(cbp) = NULL;
957 }
958
959 static void
960 g_raid3_replace_bio(struct bio *sbp, struct bio *dbp)
961 {
962 struct bio *pbp, *bp;
963
964 g_raid3_remove_bio(sbp);
965 pbp = dbp->bio_parent;
966 G_RAID3_NEXT_BIO(sbp) = G_RAID3_NEXT_BIO(dbp);
967 if (G_RAID3_HEAD_BIO(pbp) == dbp)
968 G_RAID3_HEAD_BIO(pbp) = sbp;
969 else {
970 G_RAID3_FOREACH_BIO(pbp, bp) {
971 if (G_RAID3_NEXT_BIO(bp) == dbp) {
972 G_RAID3_NEXT_BIO(bp) = sbp;
973 break;
974 }
975 }
976 }
977 G_RAID3_NEXT_BIO(dbp) = NULL;
978 }
979
980 static void
981 g_raid3_destroy_bio(struct g_raid3_softc *sc, struct bio *cbp)
982 {
983 struct bio *bp, *pbp;
984 size_t size;
985
986 pbp = cbp->bio_parent;
987 pbp->bio_children--;
988 KASSERT(cbp->bio_data != NULL, ("NULL bio_data"));
989 size = pbp->bio_length / (sc->sc_ndisks - 1);
990 g_raid3_free(sc, cbp->bio_data, size);
991 if (G_RAID3_HEAD_BIO(pbp) == cbp) {
992 G_RAID3_HEAD_BIO(pbp) = G_RAID3_NEXT_BIO(cbp);
993 G_RAID3_NEXT_BIO(cbp) = NULL;
994 g_destroy_bio(cbp);
995 } else {
996 G_RAID3_FOREACH_BIO(pbp, bp) {
997 if (G_RAID3_NEXT_BIO(bp) == cbp)
998 break;
999 }
1000 if (bp != NULL) {
1001 KASSERT(G_RAID3_NEXT_BIO(bp) != NULL,
1002 ("NULL bp->bio_driver1"));
1003 G_RAID3_NEXT_BIO(bp) = G_RAID3_NEXT_BIO(cbp);
1004 G_RAID3_NEXT_BIO(cbp) = NULL;
1005 }
1006 g_destroy_bio(cbp);
1007 }
1008 }
1009
1010 static struct bio *
1011 g_raid3_clone_bio(struct g_raid3_softc *sc, struct bio *pbp)
1012 {
1013 struct bio *bp, *cbp;
1014 size_t size;
1015 int memflag;
1016
1017 cbp = g_clone_bio(pbp);
1018 if (cbp == NULL)
1019 return (NULL);
1020 size = pbp->bio_length / (sc->sc_ndisks - 1);
1021 if ((pbp->bio_cflags & G_RAID3_BIO_CFLAG_REGULAR) != 0)
1022 memflag = M_WAITOK;
1023 else
1024 memflag = M_NOWAIT;
1025 cbp->bio_data = g_raid3_alloc(sc, size, memflag);
1026 if (cbp->bio_data == NULL) {
1027 pbp->bio_children--;
1028 g_destroy_bio(cbp);
1029 return (NULL);
1030 }
1031 G_RAID3_NEXT_BIO(cbp) = NULL;
1032 if (G_RAID3_HEAD_BIO(pbp) == NULL)
1033 G_RAID3_HEAD_BIO(pbp) = cbp;
1034 else {
1035 G_RAID3_FOREACH_BIO(pbp, bp) {
1036 if (G_RAID3_NEXT_BIO(bp) == NULL) {
1037 G_RAID3_NEXT_BIO(bp) = cbp;
1038 break;
1039 }
1040 }
1041 }
1042 return (cbp);
1043 }
1044
1045 static void
1046 g_raid3_scatter(struct bio *pbp)
1047 {
1048 struct g_raid3_softc *sc;
1049 struct g_raid3_disk *disk;
1050 struct bio *bp, *cbp, *tmpbp;
1051 off_t atom, cadd, padd, left;
1052 int first;
1053
1054 sc = pbp->bio_to->geom->softc;
1055 bp = NULL;
1056 if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_NOPARITY) == 0) {
1057 /*
1058 * Find bio for which we should calculate data.
1059 */
1060 G_RAID3_FOREACH_BIO(pbp, cbp) {
1061 if ((cbp->bio_cflags & G_RAID3_BIO_CFLAG_PARITY) != 0) {
1062 bp = cbp;
1063 break;
1064 }
1065 }
1066 KASSERT(bp != NULL, ("NULL parity bio."));
1067 }
1068 atom = sc->sc_sectorsize / (sc->sc_ndisks - 1);
1069 cadd = padd = 0;
1070 for (left = pbp->bio_length; left > 0; left -= sc->sc_sectorsize) {
1071 G_RAID3_FOREACH_BIO(pbp, cbp) {
1072 if (cbp == bp)
1073 continue;
1074 bcopy(pbp->bio_data + padd, cbp->bio_data + cadd, atom);
1075 padd += atom;
1076 }
1077 cadd += atom;
1078 }
1079 if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_NOPARITY) == 0) {
1080 /*
1081 * Calculate parity.
1082 */
1083 first = 1;
1084 G_RAID3_FOREACH_SAFE_BIO(pbp, cbp, tmpbp) {
1085 if (cbp == bp)
1086 continue;
1087 if (first) {
1088 bcopy(cbp->bio_data, bp->bio_data,
1089 bp->bio_length);
1090 first = 0;
1091 } else {
1092 g_raid3_xor(cbp->bio_data, bp->bio_data,
1093 bp->bio_length);
1094 }
1095 if ((cbp->bio_cflags & G_RAID3_BIO_CFLAG_NODISK) != 0)
1096 g_raid3_destroy_bio(sc, cbp);
1097 }
1098 }
1099 G_RAID3_FOREACH_SAFE_BIO(pbp, cbp, tmpbp) {
1100 struct g_consumer *cp;
1101
1102 disk = cbp->bio_caller2;
1103 cp = disk->d_consumer;
1104 cbp->bio_to = cp->provider;
1105 G_RAID3_LOGREQ(3, cbp, "Sending request.");
1106 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1107 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
1108 cp->acr, cp->acw, cp->ace));
1109 cp->index++;
1110 sc->sc_writes++;
1111 g_io_request(cbp, cp);
1112 }
1113 }
1114
1115 static void
1116 g_raid3_gather(struct bio *pbp)
1117 {
1118 struct g_raid3_softc *sc;
1119 struct g_raid3_disk *disk;
1120 struct bio *xbp, *fbp, *cbp;
1121 off_t atom, cadd, padd, left;
1122
1123 sc = pbp->bio_to->geom->softc;
1124 /*
1125 * Find bio for which we have to calculate data.
1126 * While going through this path, check if all requests
1127 * succeeded, if not, deny whole request.
1128 * If we're in COMPLETE mode, we allow one request to fail,
1129 * so if we find one, we're sending it to the parity consumer.
1130 * If there are more failed requests, we deny whole request.
1131 */
1132 xbp = fbp = NULL;
1133 G_RAID3_FOREACH_BIO(pbp, cbp) {
1134 if ((cbp->bio_cflags & G_RAID3_BIO_CFLAG_PARITY) != 0) {
1135 KASSERT(xbp == NULL, ("More than one parity bio."));
1136 xbp = cbp;
1137 }
1138 if (cbp->bio_error == 0)
1139 continue;
1140 /*
1141 * Found failed request.
1142 */
1143 if (fbp == NULL) {
1144 if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_DEGRADED) != 0) {
1145 /*
1146 * We are already in degraded mode, so we can't
1147 * accept any failures.
1148 */
1149 if (pbp->bio_error == 0)
1150 pbp->bio_error = cbp->bio_error;
1151 } else {
1152 fbp = cbp;
1153 }
1154 } else {
1155 /*
1156 * Next failed request, that's too many.
1157 */
1158 if (pbp->bio_error == 0)
1159 pbp->bio_error = fbp->bio_error;
1160 }
1161 disk = cbp->bio_caller2;
1162 if (disk == NULL)
1163 continue;
1164 if ((disk->d_flags & G_RAID3_DISK_FLAG_BROKEN) == 0) {
1165 disk->d_flags |= G_RAID3_DISK_FLAG_BROKEN;
1166 G_RAID3_LOGREQ(0, cbp, "Request failed (error=%d).",
1167 cbp->bio_error);
1168 } else {
1169 G_RAID3_LOGREQ(1, cbp, "Request failed (error=%d).",
1170 cbp->bio_error);
1171 }
1172 if (g_raid3_disconnect_on_failure &&
1173 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) {
1174 sc->sc_bump_id |= G_RAID3_BUMP_GENID;
1175 g_raid3_event_send(disk,
1176 G_RAID3_DISK_STATE_DISCONNECTED,
1177 G_RAID3_EVENT_DONTWAIT);
1178 }
1179 }
1180 if (pbp->bio_error != 0)
1181 goto finish;
1182 if (fbp != NULL && (pbp->bio_pflags & G_RAID3_BIO_PFLAG_VERIFY) != 0) {
1183 pbp->bio_pflags &= ~G_RAID3_BIO_PFLAG_VERIFY;
1184 if (xbp != fbp)
1185 g_raid3_replace_bio(xbp, fbp);
1186 g_raid3_destroy_bio(sc, fbp);
1187 } else if (fbp != NULL) {
1188 struct g_consumer *cp;
1189
1190 /*
1191 * One request failed, so send the same request to
1192 * the parity consumer.
1193 */
1194 disk = pbp->bio_driver2;
1195 if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE) {
1196 pbp->bio_error = fbp->bio_error;
1197 goto finish;
1198 }
1199 pbp->bio_pflags |= G_RAID3_BIO_PFLAG_DEGRADED;
1200 pbp->bio_inbed--;
1201 fbp->bio_flags &= ~(BIO_DONE | BIO_ERROR);
1202 if (disk->d_no == sc->sc_ndisks - 1)
1203 fbp->bio_cflags |= G_RAID3_BIO_CFLAG_PARITY;
1204 fbp->bio_error = 0;
1205 fbp->bio_completed = 0;
1206 fbp->bio_children = 0;
1207 fbp->bio_inbed = 0;
1208 cp = disk->d_consumer;
1209 fbp->bio_caller2 = disk;
1210 fbp->bio_to = cp->provider;
1211 G_RAID3_LOGREQ(3, fbp, "Sending request (recover).");
1212 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1213 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
1214 cp->acr, cp->acw, cp->ace));
1215 cp->index++;
1216 g_io_request(fbp, cp);
1217 return;
1218 }
1219 if (xbp != NULL) {
1220 /*
1221 * Calculate parity.
1222 */
1223 G_RAID3_FOREACH_BIO(pbp, cbp) {
1224 if ((cbp->bio_cflags & G_RAID3_BIO_CFLAG_PARITY) != 0)
1225 continue;
1226 g_raid3_xor(cbp->bio_data, xbp->bio_data,
1227 xbp->bio_length);
1228 }
1229 xbp->bio_cflags &= ~G_RAID3_BIO_CFLAG_PARITY;
1230 if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_VERIFY) != 0) {
1231 if (!g_raid3_is_zero(xbp)) {
1232 g_raid3_parity_mismatch++;
1233 pbp->bio_error = EIO;
1234 goto finish;
1235 }
1236 g_raid3_destroy_bio(sc, xbp);
1237 }
1238 }
1239 atom = sc->sc_sectorsize / (sc->sc_ndisks - 1);
1240 cadd = padd = 0;
1241 for (left = pbp->bio_length; left > 0; left -= sc->sc_sectorsize) {
1242 G_RAID3_FOREACH_BIO(pbp, cbp) {
1243 bcopy(cbp->bio_data + cadd, pbp->bio_data + padd, atom);
1244 pbp->bio_completed += atom;
1245 padd += atom;
1246 }
1247 cadd += atom;
1248 }
1249 finish:
1250 if (pbp->bio_error == 0)
1251 G_RAID3_LOGREQ(3, pbp, "Request finished.");
1252 else {
1253 if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_VERIFY) != 0)
1254 G_RAID3_LOGREQ(1, pbp, "Verification error.");
1255 else
1256 G_RAID3_LOGREQ(0, pbp, "Request failed.");
1257 }
1258 pbp->bio_pflags &= ~G_RAID3_BIO_PFLAG_MASK;
1259 while ((cbp = G_RAID3_HEAD_BIO(pbp)) != NULL)
1260 g_raid3_destroy_bio(sc, cbp);
1261 g_io_deliver(pbp, pbp->bio_error);
1262 }
1263
1264 static void
1265 g_raid3_done(struct bio *bp)
1266 {
1267 struct g_raid3_softc *sc;
1268
1269 sc = bp->bio_from->geom->softc;
1270 bp->bio_cflags |= G_RAID3_BIO_CFLAG_REGULAR;
1271 G_RAID3_LOGREQ(3, bp, "Regular request done (error=%d).", bp->bio_error);
1272 mtx_lock(&sc->sc_queue_mtx);
1273 bioq_insert_head(&sc->sc_queue, bp);
1274 mtx_unlock(&sc->sc_queue_mtx);
1275 wakeup(sc);
1276 wakeup(&sc->sc_queue);
1277 }
1278
1279 static void
1280 g_raid3_regular_request(struct bio *cbp)
1281 {
1282 struct g_raid3_softc *sc;
1283 struct g_raid3_disk *disk;
1284 struct bio *pbp;
1285
1286 g_topology_assert_not();
1287
1288 pbp = cbp->bio_parent;
1289 sc = pbp->bio_to->geom->softc;
1290 cbp->bio_from->index--;
1291 if (cbp->bio_cmd == BIO_WRITE)
1292 sc->sc_writes--;
1293 disk = cbp->bio_from->private;
1294 if (disk == NULL) {
1295 g_topology_lock();
1296 g_raid3_kill_consumer(sc, cbp->bio_from);
1297 g_topology_unlock();
1298 }
1299
1300 G_RAID3_LOGREQ(3, cbp, "Request finished.");
1301 pbp->bio_inbed++;
1302 KASSERT(pbp->bio_inbed <= pbp->bio_children,
1303 ("bio_inbed (%u) is bigger than bio_children (%u).", pbp->bio_inbed,
1304 pbp->bio_children));
1305 if (pbp->bio_inbed != pbp->bio_children)
1306 return;
1307 switch (pbp->bio_cmd) {
1308 case BIO_READ:
1309 g_raid3_gather(pbp);
1310 break;
1311 case BIO_WRITE:
1312 case BIO_DELETE:
1313 {
1314 int error = 0;
1315
1316 pbp->bio_completed = pbp->bio_length;
1317 while ((cbp = G_RAID3_HEAD_BIO(pbp)) != NULL) {
1318 if (cbp->bio_error == 0) {
1319 g_raid3_destroy_bio(sc, cbp);
1320 continue;
1321 }
1322
1323 if (error == 0)
1324 error = cbp->bio_error;
1325 else if (pbp->bio_error == 0) {
1326 /*
1327 * Next failed request, that's too many.
1328 */
1329 pbp->bio_error = error;
1330 }
1331
1332 disk = cbp->bio_caller2;
1333 if (disk == NULL) {
1334 g_raid3_destroy_bio(sc, cbp);
1335 continue;
1336 }
1337
1338 if ((disk->d_flags & G_RAID3_DISK_FLAG_BROKEN) == 0) {
1339 disk->d_flags |= G_RAID3_DISK_FLAG_BROKEN;
1340 G_RAID3_LOGREQ(0, cbp,
1341 "Request failed (error=%d).",
1342 cbp->bio_error);
1343 } else {
1344 G_RAID3_LOGREQ(1, cbp,
1345 "Request failed (error=%d).",
1346 cbp->bio_error);
1347 }
1348 if (g_raid3_disconnect_on_failure &&
1349 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) {
1350 sc->sc_bump_id |= G_RAID3_BUMP_GENID;
1351 g_raid3_event_send(disk,
1352 G_RAID3_DISK_STATE_DISCONNECTED,
1353 G_RAID3_EVENT_DONTWAIT);
1354 }
1355 g_raid3_destroy_bio(sc, cbp);
1356 }
1357 if (pbp->bio_error == 0)
1358 G_RAID3_LOGREQ(3, pbp, "Request finished.");
1359 else
1360 G_RAID3_LOGREQ(0, pbp, "Request failed.");
1361 pbp->bio_pflags &= ~G_RAID3_BIO_PFLAG_DEGRADED;
1362 pbp->bio_pflags &= ~G_RAID3_BIO_PFLAG_NOPARITY;
1363 bioq_remove(&sc->sc_inflight, pbp);
1364 /* Release delayed sync requests if possible. */
1365 g_raid3_sync_release(sc);
1366 g_io_deliver(pbp, pbp->bio_error);
1367 break;
1368 }
1369 }
1370 }
1371
1372 static void
1373 g_raid3_sync_done(struct bio *bp)
1374 {
1375 struct g_raid3_softc *sc;
1376
1377 G_RAID3_LOGREQ(3, bp, "Synchronization request delivered.");
1378 sc = bp->bio_from->geom->softc;
1379 bp->bio_cflags |= G_RAID3_BIO_CFLAG_SYNC;
1380 mtx_lock(&sc->sc_queue_mtx);
1381 bioq_insert_head(&sc->sc_queue, bp);
1382 mtx_unlock(&sc->sc_queue_mtx);
1383 wakeup(sc);
1384 wakeup(&sc->sc_queue);
1385 }
1386
1387 static void
1388 g_raid3_flush(struct g_raid3_softc *sc, struct bio *bp)
1389 {
1390 struct bio_queue_head queue;
1391 struct g_raid3_disk *disk;
1392 struct g_consumer *cp;
1393 struct bio *cbp;
1394 u_int i;
1395
1396 bioq_init(&queue);
1397 for (i = 0; i < sc->sc_ndisks; i++) {
1398 disk = &sc->sc_disks[i];
1399 if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE)
1400 continue;
1401 cbp = g_clone_bio(bp);
1402 if (cbp == NULL) {
1403 for (cbp = bioq_first(&queue); cbp != NULL;
1404 cbp = bioq_first(&queue)) {
1405 bioq_remove(&queue, cbp);
1406 g_destroy_bio(cbp);
1407 }
1408 if (bp->bio_error == 0)
1409 bp->bio_error = ENOMEM;
1410 g_io_deliver(bp, bp->bio_error);
1411 return;
1412 }
1413 bioq_insert_tail(&queue, cbp);
1414 cbp->bio_done = g_std_done;
1415 cbp->bio_caller1 = disk;
1416 cbp->bio_to = disk->d_consumer->provider;
1417 }
1418 for (cbp = bioq_first(&queue); cbp != NULL; cbp = bioq_first(&queue)) {
1419 bioq_remove(&queue, cbp);
1420 G_RAID3_LOGREQ(3, cbp, "Sending request.");
1421 disk = cbp->bio_caller1;
1422 cbp->bio_caller1 = NULL;
1423 cp = disk->d_consumer;
1424 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1425 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
1426 cp->acr, cp->acw, cp->ace));
1427 g_io_request(cbp, disk->d_consumer);
1428 }
1429 }
1430
1431 static void
1432 g_raid3_start(struct bio *bp)
1433 {
1434 struct g_raid3_softc *sc;
1435
1436 sc = bp->bio_to->geom->softc;
1437 /*
1438 * If sc == NULL or there are no valid disks, provider's error
1439 * should be set and g_raid3_start() should not be called at all.
1440 */
1441 KASSERT(sc != NULL && (sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED ||
1442 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE),
1443 ("Provider's error should be set (error=%d)(device=%s).",
1444 bp->bio_to->error, bp->bio_to->name));
1445 G_RAID3_LOGREQ(3, bp, "Request received.");
1446
1447 switch (bp->bio_cmd) {
1448 case BIO_READ:
1449 case BIO_WRITE:
1450 case BIO_DELETE:
1451 break;
1452 case BIO_FLUSH:
1453 g_raid3_flush(sc, bp);
1454 return;
1455 case BIO_GETATTR:
1456 default:
1457 g_io_deliver(bp, EOPNOTSUPP);
1458 return;
1459 }
1460 mtx_lock(&sc->sc_queue_mtx);
1461 bioq_insert_tail(&sc->sc_queue, bp);
1462 mtx_unlock(&sc->sc_queue_mtx);
1463 G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, sc);
1464 wakeup(sc);
1465 }
1466
1467 /*
1468 * Return TRUE if the given request is colliding with a in-progress
1469 * synchronization request.
1470 */
1471 static int
1472 g_raid3_sync_collision(struct g_raid3_softc *sc, struct bio *bp)
1473 {
1474 struct g_raid3_disk *disk;
1475 struct bio *sbp;
1476 off_t rstart, rend, sstart, send;
1477 int i;
1478
1479 disk = sc->sc_syncdisk;
1480 if (disk == NULL)
1481 return (0);
1482 rstart = bp->bio_offset;
1483 rend = bp->bio_offset + bp->bio_length;
1484 for (i = 0; i < g_raid3_syncreqs; i++) {
1485 sbp = disk->d_sync.ds_bios[i];
1486 if (sbp == NULL)
1487 continue;
1488 sstart = sbp->bio_offset;
1489 send = sbp->bio_length;
1490 if (sbp->bio_cmd == BIO_WRITE) {
1491 sstart *= sc->sc_ndisks - 1;
1492 send *= sc->sc_ndisks - 1;
1493 }
1494 send += sstart;
1495 if (rend > sstart && rstart < send)
1496 return (1);
1497 }
1498 return (0);
1499 }
1500
1501 /*
1502 * Return TRUE if the given sync request is colliding with a in-progress regular
1503 * request.
1504 */
1505 static int
1506 g_raid3_regular_collision(struct g_raid3_softc *sc, struct bio *sbp)
1507 {
1508 off_t rstart, rend, sstart, send;
1509 struct bio *bp;
1510
1511 if (sc->sc_syncdisk == NULL)
1512 return (0);
1513 sstart = sbp->bio_offset;
1514 send = sstart + sbp->bio_length;
1515 TAILQ_FOREACH(bp, &sc->sc_inflight.queue, bio_queue) {
1516 rstart = bp->bio_offset;
1517 rend = bp->bio_offset + bp->bio_length;
1518 if (rend > sstart && rstart < send)
1519 return (1);
1520 }
1521 return (0);
1522 }
1523
1524 /*
1525 * Puts request onto delayed queue.
1526 */
1527 static void
1528 g_raid3_regular_delay(struct g_raid3_softc *sc, struct bio *bp)
1529 {
1530
1531 G_RAID3_LOGREQ(2, bp, "Delaying request.");
1532 bioq_insert_head(&sc->sc_regular_delayed, bp);
1533 }
1534
1535 /*
1536 * Puts synchronization request onto delayed queue.
1537 */
1538 static void
1539 g_raid3_sync_delay(struct g_raid3_softc *sc, struct bio *bp)
1540 {
1541
1542 G_RAID3_LOGREQ(2, bp, "Delaying synchronization request.");
1543 bioq_insert_tail(&sc->sc_sync_delayed, bp);
1544 }
1545
1546 /*
1547 * Releases delayed regular requests which don't collide anymore with sync
1548 * requests.
1549 */
1550 static void
1551 g_raid3_regular_release(struct g_raid3_softc *sc)
1552 {
1553 struct bio *bp, *bp2;
1554
1555 TAILQ_FOREACH_SAFE(bp, &sc->sc_regular_delayed.queue, bio_queue, bp2) {
1556 if (g_raid3_sync_collision(sc, bp))
1557 continue;
1558 bioq_remove(&sc->sc_regular_delayed, bp);
1559 G_RAID3_LOGREQ(2, bp, "Releasing delayed request (%p).", bp);
1560 mtx_lock(&sc->sc_queue_mtx);
1561 bioq_insert_head(&sc->sc_queue, bp);
1562 #if 0
1563 /*
1564 * wakeup() is not needed, because this function is called from
1565 * the worker thread.
1566 */
1567 wakeup(&sc->sc_queue);
1568 #endif
1569 mtx_unlock(&sc->sc_queue_mtx);
1570 }
1571 }
1572
1573 /*
1574 * Releases delayed sync requests which don't collide anymore with regular
1575 * requests.
1576 */
1577 static void
1578 g_raid3_sync_release(struct g_raid3_softc *sc)
1579 {
1580 struct bio *bp, *bp2;
1581
1582 TAILQ_FOREACH_SAFE(bp, &sc->sc_sync_delayed.queue, bio_queue, bp2) {
1583 if (g_raid3_regular_collision(sc, bp))
1584 continue;
1585 bioq_remove(&sc->sc_sync_delayed, bp);
1586 G_RAID3_LOGREQ(2, bp,
1587 "Releasing delayed synchronization request.");
1588 g_io_request(bp, bp->bio_from);
1589 }
1590 }
1591
1592 /*
1593 * Handle synchronization requests.
1594 * Every synchronization request is two-steps process: first, READ request is
1595 * send to active provider and then WRITE request (with read data) to the provider
1596 * beeing synchronized. When WRITE is finished, new synchronization request is
1597 * send.
1598 */
1599 static void
1600 g_raid3_sync_request(struct bio *bp)
1601 {
1602 struct g_raid3_softc *sc;
1603 struct g_raid3_disk *disk;
1604
1605 bp->bio_from->index--;
1606 sc = bp->bio_from->geom->softc;
1607 disk = bp->bio_from->private;
1608 if (disk == NULL) {
1609 sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */
1610 g_topology_lock();
1611 g_raid3_kill_consumer(sc, bp->bio_from);
1612 g_topology_unlock();
1613 free(bp->bio_data, M_RAID3);
1614 g_destroy_bio(bp);
1615 sx_xlock(&sc->sc_lock);
1616 return;
1617 }
1618
1619 /*
1620 * Synchronization request.
1621 */
1622 switch (bp->bio_cmd) {
1623 case BIO_READ:
1624 {
1625 struct g_consumer *cp;
1626 u_char *dst, *src;
1627 off_t left;
1628 u_int atom;
1629
1630 if (bp->bio_error != 0) {
1631 G_RAID3_LOGREQ(0, bp,
1632 "Synchronization request failed (error=%d).",
1633 bp->bio_error);
1634 g_destroy_bio(bp);
1635 return;
1636 }
1637 G_RAID3_LOGREQ(3, bp, "Synchronization request finished.");
1638 atom = sc->sc_sectorsize / (sc->sc_ndisks - 1);
1639 dst = src = bp->bio_data;
1640 if (disk->d_no == sc->sc_ndisks - 1) {
1641 u_int n;
1642
1643 /* Parity component. */
1644 for (left = bp->bio_length; left > 0;
1645 left -= sc->sc_sectorsize) {
1646 bcopy(src, dst, atom);
1647 src += atom;
1648 for (n = 1; n < sc->sc_ndisks - 1; n++) {
1649 g_raid3_xor(src, dst, atom);
1650 src += atom;
1651 }
1652 dst += atom;
1653 }
1654 } else {
1655 /* Regular component. */
1656 src += atom * disk->d_no;
1657 for (left = bp->bio_length; left > 0;
1658 left -= sc->sc_sectorsize) {
1659 bcopy(src, dst, atom);
1660 src += sc->sc_sectorsize;
1661 dst += atom;
1662 }
1663 }
1664 bp->bio_driver1 = bp->bio_driver2 = NULL;
1665 bp->bio_pflags = 0;
1666 bp->bio_offset /= sc->sc_ndisks - 1;
1667 bp->bio_length /= sc->sc_ndisks - 1;
1668 bp->bio_cmd = BIO_WRITE;
1669 bp->bio_cflags = 0;
1670 bp->bio_children = bp->bio_inbed = 0;
1671 cp = disk->d_consumer;
1672 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1673 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
1674 cp->acr, cp->acw, cp->ace));
1675 cp->index++;
1676 g_io_request(bp, cp);
1677 return;
1678 }
1679 case BIO_WRITE:
1680 {
1681 struct g_raid3_disk_sync *sync;
1682 off_t boffset, moffset;
1683 void *data;
1684 int i;
1685
1686 if (bp->bio_error != 0) {
1687 G_RAID3_LOGREQ(0, bp,
1688 "Synchronization request failed (error=%d).",
1689 bp->bio_error);
1690 g_destroy_bio(bp);
1691 sc->sc_bump_id |= G_RAID3_BUMP_GENID;
1692 g_raid3_event_send(disk,
1693 G_RAID3_DISK_STATE_DISCONNECTED,
1694 G_RAID3_EVENT_DONTWAIT);
1695 return;
1696 }
1697 G_RAID3_LOGREQ(3, bp, "Synchronization request finished.");
1698 sync = &disk->d_sync;
1699 if (sync->ds_offset == sc->sc_mediasize / (sc->sc_ndisks - 1) ||
1700 sync->ds_consumer == NULL ||
1701 (sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROY) != 0) {
1702 /* Don't send more synchronization requests. */
1703 sync->ds_inflight--;
1704 if (sync->ds_bios != NULL) {
1705 i = (int)(uintptr_t)bp->bio_caller1;
1706 sync->ds_bios[i] = NULL;
1707 }
1708 free(bp->bio_data, M_RAID3);
1709 g_destroy_bio(bp);
1710 if (sync->ds_inflight > 0)
1711 return;
1712 if (sync->ds_consumer == NULL ||
1713 (sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROY) != 0) {
1714 return;
1715 }
1716 /*
1717 * Disk up-to-date, activate it.
1718 */
1719 g_raid3_event_send(disk, G_RAID3_DISK_STATE_ACTIVE,
1720 G_RAID3_EVENT_DONTWAIT);
1721 return;
1722 }
1723
1724 /* Send next synchronization request. */
1725 data = bp->bio_data;
1726 bzero(bp, sizeof(*bp));
1727 bp->bio_cmd = BIO_READ;
1728 bp->bio_offset = sync->ds_offset * (sc->sc_ndisks - 1);
1729 bp->bio_length = MIN(MAXPHYS, sc->sc_mediasize - bp->bio_offset);
1730 sync->ds_offset += bp->bio_length / (sc->sc_ndisks - 1);
1731 bp->bio_done = g_raid3_sync_done;
1732 bp->bio_data = data;
1733 bp->bio_from = sync->ds_consumer;
1734 bp->bio_to = sc->sc_provider;
1735 G_RAID3_LOGREQ(3, bp, "Sending synchronization request.");
1736 sync->ds_consumer->index++;
1737 /*
1738 * Delay the request if it is colliding with a regular request.
1739 */
1740 if (g_raid3_regular_collision(sc, bp))
1741 g_raid3_sync_delay(sc, bp);
1742 else
1743 g_io_request(bp, sync->ds_consumer);
1744
1745 /* Release delayed requests if possible. */
1746 g_raid3_regular_release(sc);
1747
1748 /* Find the smallest offset. */
1749 moffset = sc->sc_mediasize;
1750 for (i = 0; i < g_raid3_syncreqs; i++) {
1751 bp = sync->ds_bios[i];
1752 boffset = bp->bio_offset;
1753 if (bp->bio_cmd == BIO_WRITE)
1754 boffset *= sc->sc_ndisks - 1;
1755 if (boffset < moffset)
1756 moffset = boffset;
1757 }
1758 if (sync->ds_offset_done + (MAXPHYS * 100) < moffset) {
1759 /* Update offset_done on every 100 blocks. */
1760 sync->ds_offset_done = moffset;
1761 g_raid3_update_metadata(disk);
1762 }
1763 return;
1764 }
1765 default:
1766 KASSERT(1 == 0, ("Invalid command here: %u (device=%s)",
1767 bp->bio_cmd, sc->sc_name));
1768 break;
1769 }
1770 }
1771
1772 static int
1773 g_raid3_register_request(struct bio *pbp)
1774 {
1775 struct g_raid3_softc *sc;
1776 struct g_raid3_disk *disk;
1777 struct g_consumer *cp;
1778 struct bio *cbp, *tmpbp;
1779 off_t offset, length;
1780 u_int n, ndisks;
1781 int round_robin, verify;
1782
1783 ndisks = 0;
1784 sc = pbp->bio_to->geom->softc;
1785 if ((pbp->bio_cflags & G_RAID3_BIO_CFLAG_REGSYNC) != 0 &&
1786 sc->sc_syncdisk == NULL) {
1787 g_io_deliver(pbp, EIO);
1788 return (0);
1789 }
1790 g_raid3_init_bio(pbp);
1791 length = pbp->bio_length / (sc->sc_ndisks - 1);
1792 offset = pbp->bio_offset / (sc->sc_ndisks - 1);
1793 round_robin = verify = 0;
1794 switch (pbp->bio_cmd) {
1795 case BIO_READ:
1796 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_VERIFY) != 0 &&
1797 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) {
1798 pbp->bio_pflags |= G_RAID3_BIO_PFLAG_VERIFY;
1799 verify = 1;
1800 ndisks = sc->sc_ndisks;
1801 } else {
1802 verify = 0;
1803 ndisks = sc->sc_ndisks - 1;
1804 }
1805 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_ROUND_ROBIN) != 0 &&
1806 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) {
1807 round_robin = 1;
1808 } else {
1809 round_robin = 0;
1810 }
1811 KASSERT(!round_robin || !verify,
1812 ("ROUND-ROBIN and VERIFY are mutually exclusive."));
1813 pbp->bio_driver2 = &sc->sc_disks[sc->sc_ndisks - 1];
1814 break;
1815 case BIO_WRITE:
1816 case BIO_DELETE:
1817 /*
1818 * Delay the request if it is colliding with a synchronization
1819 * request.
1820 */
1821 if (g_raid3_sync_collision(sc, pbp)) {
1822 g_raid3_regular_delay(sc, pbp);
1823 return (0);
1824 }
1825
1826 if (sc->sc_idle)
1827 g_raid3_unidle(sc);
1828 else
1829 sc->sc_last_write = time_uptime;
1830
1831 ndisks = sc->sc_ndisks;
1832 break;
1833 }
1834 for (n = 0; n < ndisks; n++) {
1835 disk = &sc->sc_disks[n];
1836 cbp = g_raid3_clone_bio(sc, pbp);
1837 if (cbp == NULL) {
1838 while ((cbp = G_RAID3_HEAD_BIO(pbp)) != NULL)
1839 g_raid3_destroy_bio(sc, cbp);
1840 /*
1841 * To prevent deadlock, we must run back up
1842 * with the ENOMEM for failed requests of any
1843 * of our consumers. Our own sync requests
1844 * can stick around, as they are finite.
1845 */
1846 if ((pbp->bio_cflags &
1847 G_RAID3_BIO_CFLAG_REGULAR) != 0) {
1848 g_io_deliver(pbp, ENOMEM);
1849 return (0);
1850 }
1851 return (ENOMEM);
1852 }
1853 cbp->bio_offset = offset;
1854 cbp->bio_length = length;
1855 cbp->bio_done = g_raid3_done;
1856 switch (pbp->bio_cmd) {
1857 case BIO_READ:
1858 if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE) {
1859 /*
1860 * Replace invalid component with the parity
1861 * component.
1862 */
1863 disk = &sc->sc_disks[sc->sc_ndisks - 1];
1864 cbp->bio_cflags |= G_RAID3_BIO_CFLAG_PARITY;
1865 pbp->bio_pflags |= G_RAID3_BIO_PFLAG_DEGRADED;
1866 } else if (round_robin &&
1867 disk->d_no == sc->sc_round_robin) {
1868 /*
1869 * In round-robin mode skip one data component
1870 * and use parity component when reading.
1871 */
1872 pbp->bio_driver2 = disk;
1873 disk = &sc->sc_disks[sc->sc_ndisks - 1];
1874 cbp->bio_cflags |= G_RAID3_BIO_CFLAG_PARITY;
1875 sc->sc_round_robin++;
1876 round_robin = 0;
1877 } else if (verify && disk->d_no == sc->sc_ndisks - 1) {
1878 cbp->bio_cflags |= G_RAID3_BIO_CFLAG_PARITY;
1879 }
1880 break;
1881 case BIO_WRITE:
1882 case BIO_DELETE:
1883 if (disk->d_state == G_RAID3_DISK_STATE_ACTIVE ||
1884 disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) {
1885 if (n == ndisks - 1) {
1886 /*
1887 * Active parity component, mark it as such.
1888 */
1889 cbp->bio_cflags |=
1890 G_RAID3_BIO_CFLAG_PARITY;
1891 }
1892 } else {
1893 pbp->bio_pflags |= G_RAID3_BIO_PFLAG_DEGRADED;
1894 if (n == ndisks - 1) {
1895 /*
1896 * Parity component is not connected,
1897 * so destroy its request.
1898 */
1899 pbp->bio_pflags |=
1900 G_RAID3_BIO_PFLAG_NOPARITY;
1901 g_raid3_destroy_bio(sc, cbp);
1902 cbp = NULL;
1903 } else {
1904 cbp->bio_cflags |=
1905 G_RAID3_BIO_CFLAG_NODISK;
1906 disk = NULL;
1907 }
1908 }
1909 break;
1910 }
1911 if (cbp != NULL)
1912 cbp->bio_caller2 = disk;
1913 }
1914 switch (pbp->bio_cmd) {
1915 case BIO_READ:
1916 if (round_robin) {
1917 /*
1918 * If we are in round-robin mode and 'round_robin' is
1919 * still 1, it means, that we skipped parity component
1920 * for this read and must reset sc_round_robin field.
1921 */
1922 sc->sc_round_robin = 0;
1923 }
1924 G_RAID3_FOREACH_SAFE_BIO(pbp, cbp, tmpbp) {
1925 disk = cbp->bio_caller2;
1926 cp = disk->d_consumer;
1927 cbp->bio_to = cp->provider;
1928 G_RAID3_LOGREQ(3, cbp, "Sending request.");
1929 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1930 ("Consumer %s not opened (r%dw%de%d).",
1931 cp->provider->name, cp->acr, cp->acw, cp->ace));
1932 cp->index++;
1933 g_io_request(cbp, cp);
1934 }
1935 break;
1936 case BIO_WRITE:
1937 case BIO_DELETE:
1938 /*
1939 * Put request onto inflight queue, so we can check if new
1940 * synchronization requests don't collide with it.
1941 */
1942 bioq_insert_tail(&sc->sc_inflight, pbp);
1943
1944 /*
1945 * Bump syncid on first write.
1946 */
1947 if ((sc->sc_bump_id & G_RAID3_BUMP_SYNCID) != 0) {
1948 sc->sc_bump_id &= ~G_RAID3_BUMP_SYNCID;
1949 g_raid3_bump_syncid(sc);
1950 }
1951 g_raid3_scatter(pbp);
1952 break;
1953 }
1954 return (0);
1955 }
1956
1957 static int
1958 g_raid3_can_destroy(struct g_raid3_softc *sc)
1959 {
1960 struct g_geom *gp;
1961 struct g_consumer *cp;
1962
1963 g_topology_assert();
1964 gp = sc->sc_geom;
1965 if (gp->softc == NULL)
1966 return (1);
1967 LIST_FOREACH(cp, &gp->consumer, consumer) {
1968 if (g_raid3_is_busy(sc, cp))
1969 return (0);
1970 }
1971 gp = sc->sc_sync.ds_geom;
1972 LIST_FOREACH(cp, &gp->consumer, consumer) {
1973 if (g_raid3_is_busy(sc, cp))
1974 return (0);
1975 }
1976 G_RAID3_DEBUG(2, "No I/O requests for %s, it can be destroyed.",
1977 sc->sc_name);
1978 return (1);
1979 }
1980
1981 static int
1982 g_raid3_try_destroy(struct g_raid3_softc *sc)
1983 {
1984
1985 g_topology_assert_not();
1986 sx_assert(&sc->sc_lock, SX_XLOCKED);
1987
1988 if (sc->sc_rootmount != NULL) {
1989 G_RAID3_DEBUG(1, "root_mount_rel[%u] %p", __LINE__,
1990 sc->sc_rootmount);
1991 root_mount_rel(sc->sc_rootmount);
1992 sc->sc_rootmount = NULL;
1993 }
1994
1995 g_topology_lock();
1996 if (!g_raid3_can_destroy(sc)) {
1997 g_topology_unlock();
1998 return (0);
1999 }
2000 sc->sc_geom->softc = NULL;
2001 sc->sc_sync.ds_geom->softc = NULL;
2002 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_WAIT) != 0) {
2003 g_topology_unlock();
2004 G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__,
2005 &sc->sc_worker);
2006 /* Unlock sc_lock here, as it can be destroyed after wakeup. */
2007 sx_xunlock(&sc->sc_lock);
2008 wakeup(&sc->sc_worker);
2009 sc->sc_worker = NULL;
2010 } else {
2011 g_topology_unlock();
2012 g_raid3_destroy_device(sc);
2013 free(sc->sc_disks, M_RAID3);
2014 free(sc, M_RAID3);
2015 }
2016 return (1);
2017 }
2018
2019 /*
2020 * Worker thread.
2021 */
2022 static void
2023 g_raid3_worker(void *arg)
2024 {
2025 struct g_raid3_softc *sc;
2026 struct g_raid3_event *ep;
2027 struct bio *bp;
2028 int timeout;
2029
2030 sc = arg;
2031 thread_lock(curthread);
2032 sched_prio(curthread, PRIBIO);
2033 thread_unlock(curthread);
2034
2035 sx_xlock(&sc->sc_lock);
2036 for (;;) {
2037 G_RAID3_DEBUG(5, "%s: Let's see...", __func__);
2038 /*
2039 * First take a look at events.
2040 * This is important to handle events before any I/O requests.
2041 */
2042 ep = g_raid3_event_get(sc);
2043 if (ep != NULL) {
2044 g_raid3_event_remove(sc, ep);
2045 if ((ep->e_flags & G_RAID3_EVENT_DEVICE) != 0) {
2046 /* Update only device status. */
2047 G_RAID3_DEBUG(3,
2048 "Running event for device %s.",
2049 sc->sc_name);
2050 ep->e_error = 0;
2051 g_raid3_update_device(sc, 1);
2052 } else {
2053 /* Update disk status. */
2054 G_RAID3_DEBUG(3, "Running event for disk %s.",
2055 g_raid3_get_diskname(ep->e_disk));
2056 ep->e_error = g_raid3_update_disk(ep->e_disk,
2057 ep->e_state);
2058 if (ep->e_error == 0)
2059 g_raid3_update_device(sc, 0);
2060 }
2061 if ((ep->e_flags & G_RAID3_EVENT_DONTWAIT) != 0) {
2062 KASSERT(ep->e_error == 0,
2063 ("Error cannot be handled."));
2064 g_raid3_event_free(ep);
2065 } else {
2066 ep->e_flags |= G_RAID3_EVENT_DONE;
2067 G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__,
2068 ep);
2069 mtx_lock(&sc->sc_events_mtx);
2070 wakeup(ep);
2071 mtx_unlock(&sc->sc_events_mtx);
2072 }
2073 if ((sc->sc_flags &
2074 G_RAID3_DEVICE_FLAG_DESTROY) != 0) {
2075 if (g_raid3_try_destroy(sc)) {
2076 curthread->td_pflags &= ~TDP_GEOM;
2077 G_RAID3_DEBUG(1, "Thread exiting.");
2078 kproc_exit(0);
2079 }
2080 }
2081 G_RAID3_DEBUG(5, "%s: I'm here 1.", __func__);
2082 continue;
2083 }
2084 /*
2085 * Check if we can mark array as CLEAN and if we can't take
2086 * how much seconds should we wait.
2087 */
2088 timeout = g_raid3_idle(sc, -1);
2089 /*
2090 * Now I/O requests.
2091 */
2092 /* Get first request from the queue. */
2093 mtx_lock(&sc->sc_queue_mtx);
2094 bp = bioq_first(&sc->sc_queue);
2095 if (bp == NULL) {
2096 if ((sc->sc_flags &
2097 G_RAID3_DEVICE_FLAG_DESTROY) != 0) {
2098 mtx_unlock(&sc->sc_queue_mtx);
2099 if (g_raid3_try_destroy(sc)) {
2100 curthread->td_pflags &= ~TDP_GEOM;
2101 G_RAID3_DEBUG(1, "Thread exiting.");
2102 kproc_exit(0);
2103 }
2104 mtx_lock(&sc->sc_queue_mtx);
2105 }
2106 sx_xunlock(&sc->sc_lock);
2107 /*
2108 * XXX: We can miss an event here, because an event
2109 * can be added without sx-device-lock and without
2110 * mtx-queue-lock. Maybe I should just stop using
2111 * dedicated mutex for events synchronization and
2112 * stick with the queue lock?
2113 * The event will hang here until next I/O request
2114 * or next event is received.
2115 */
2116 MSLEEP(sc, &sc->sc_queue_mtx, PRIBIO | PDROP, "r3:w1",
2117 timeout * hz);
2118 sx_xlock(&sc->sc_lock);
2119 G_RAID3_DEBUG(5, "%s: I'm here 4.", __func__);
2120 continue;
2121 }
2122 process:
2123 bioq_remove(&sc->sc_queue, bp);
2124 mtx_unlock(&sc->sc_queue_mtx);
2125
2126 if (bp->bio_from->geom == sc->sc_sync.ds_geom &&
2127 (bp->bio_cflags & G_RAID3_BIO_CFLAG_SYNC) != 0) {
2128 g_raid3_sync_request(bp); /* READ */
2129 } else if (bp->bio_to != sc->sc_provider) {
2130 if ((bp->bio_cflags & G_RAID3_BIO_CFLAG_REGULAR) != 0)
2131 g_raid3_regular_request(bp);
2132 else if ((bp->bio_cflags & G_RAID3_BIO_CFLAG_SYNC) != 0)
2133 g_raid3_sync_request(bp); /* WRITE */
2134 else {
2135 KASSERT(0,
2136 ("Invalid request cflags=0x%hhx to=%s.",
2137 bp->bio_cflags, bp->bio_to->name));
2138 }
2139 } else if (g_raid3_register_request(bp) != 0) {
2140 mtx_lock(&sc->sc_queue_mtx);
2141 bioq_insert_head(&sc->sc_queue, bp);
2142 /*
2143 * We are short in memory, let see if there are finished
2144 * request we can free.
2145 */
2146 TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) {
2147 if (bp->bio_cflags & G_RAID3_BIO_CFLAG_REGULAR)
2148 goto process;
2149 }
2150 /*
2151 * No finished regular request, so at least keep
2152 * synchronization running.
2153 */
2154 TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) {
2155 if (bp->bio_cflags & G_RAID3_BIO_CFLAG_SYNC)
2156 goto process;
2157 }
2158 sx_xunlock(&sc->sc_lock);
2159 MSLEEP(&sc->sc_queue, &sc->sc_queue_mtx, PRIBIO | PDROP,
2160 "r3:lowmem", hz / 10);
2161 sx_xlock(&sc->sc_lock);
2162 }
2163 G_RAID3_DEBUG(5, "%s: I'm here 9.", __func__);
2164 }
2165 }
2166
2167 static void
2168 g_raid3_update_idle(struct g_raid3_softc *sc, struct g_raid3_disk *disk)
2169 {
2170
2171 sx_assert(&sc->sc_lock, SX_LOCKED);
2172 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOFAILSYNC) != 0)
2173 return;
2174 if (!sc->sc_idle && (disk->d_flags & G_RAID3_DISK_FLAG_DIRTY) == 0) {
2175 G_RAID3_DEBUG(1, "Disk %s (device %s) marked as dirty.",
2176 g_raid3_get_diskname(disk), sc->sc_name);
2177 disk->d_flags |= G_RAID3_DISK_FLAG_DIRTY;
2178 } else if (sc->sc_idle &&
2179 (disk->d_flags & G_RAID3_DISK_FLAG_DIRTY) != 0) {
2180 G_RAID3_DEBUG(1, "Disk %s (device %s) marked as clean.",
2181 g_raid3_get_diskname(disk), sc->sc_name);
2182 disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY;
2183 }
2184 }
2185
2186 static void
2187 g_raid3_sync_start(struct g_raid3_softc *sc)
2188 {
2189 struct g_raid3_disk *disk;
2190 struct g_consumer *cp;
2191 struct bio *bp;
2192 int error;
2193 u_int n;
2194
2195 g_topology_assert_not();
2196 sx_assert(&sc->sc_lock, SX_XLOCKED);
2197
2198 KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED,
2199 ("Device not in DEGRADED state (%s, %u).", sc->sc_name,
2200 sc->sc_state));
2201 KASSERT(sc->sc_syncdisk == NULL, ("Syncdisk is not NULL (%s, %u).",
2202 sc->sc_name, sc->sc_state));
2203 disk = NULL;
2204 for (n = 0; n < sc->sc_ndisks; n++) {
2205 if (sc->sc_disks[n].d_state != G_RAID3_DISK_STATE_SYNCHRONIZING)
2206 continue;
2207 disk = &sc->sc_disks[n];
2208 break;
2209 }
2210 if (disk == NULL)
2211 return;
2212
2213 sx_xunlock(&sc->sc_lock);
2214 g_topology_lock();
2215 cp = g_new_consumer(sc->sc_sync.ds_geom);
2216 error = g_attach(cp, sc->sc_provider);
2217 KASSERT(error == 0,
2218 ("Cannot attach to %s (error=%d).", sc->sc_name, error));
2219 error = g_access(cp, 1, 0, 0);
2220 KASSERT(error == 0, ("Cannot open %s (error=%d).", sc->sc_name, error));
2221 g_topology_unlock();
2222 sx_xlock(&sc->sc_lock);
2223
2224 G_RAID3_DEBUG(0, "Device %s: rebuilding provider %s.", sc->sc_name,
2225 g_raid3_get_diskname(disk));
2226 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOFAILSYNC) == 0)
2227 disk->d_flags |= G_RAID3_DISK_FLAG_DIRTY;
2228 KASSERT(disk->d_sync.ds_consumer == NULL,
2229 ("Sync consumer already exists (device=%s, disk=%s).",
2230 sc->sc_name, g_raid3_get_diskname(disk)));
2231
2232 disk->d_sync.ds_consumer = cp;
2233 disk->d_sync.ds_consumer->private = disk;
2234 disk->d_sync.ds_consumer->index = 0;
2235 sc->sc_syncdisk = disk;
2236
2237 /*
2238 * Allocate memory for synchronization bios and initialize them.
2239 */
2240 disk->d_sync.ds_bios = malloc(sizeof(struct bio *) * g_raid3_syncreqs,
2241 M_RAID3, M_WAITOK);
2242 for (n = 0; n < g_raid3_syncreqs; n++) {
2243 bp = g_alloc_bio();
2244 disk->d_sync.ds_bios[n] = bp;
2245 bp->bio_parent = NULL;
2246 bp->bio_cmd = BIO_READ;
2247 bp->bio_data = malloc(MAXPHYS, M_RAID3, M_WAITOK);
2248 bp->bio_cflags = 0;
2249 bp->bio_offset = disk->d_sync.ds_offset * (sc->sc_ndisks - 1);
2250 bp->bio_length = MIN(MAXPHYS, sc->sc_mediasize - bp->bio_offset);
2251 disk->d_sync.ds_offset += bp->bio_length / (sc->sc_ndisks - 1);
2252 bp->bio_done = g_raid3_sync_done;
2253 bp->bio_from = disk->d_sync.ds_consumer;
2254 bp->bio_to = sc->sc_provider;
2255 bp->bio_caller1 = (void *)(uintptr_t)n;
2256 }
2257
2258 /* Set the number of in-flight synchronization requests. */
2259 disk->d_sync.ds_inflight = g_raid3_syncreqs;
2260
2261 /*
2262 * Fire off first synchronization requests.
2263 */
2264 for (n = 0; n < g_raid3_syncreqs; n++) {
2265 bp = disk->d_sync.ds_bios[n];
2266 G_RAID3_LOGREQ(3, bp, "Sending synchronization request.");
2267 disk->d_sync.ds_consumer->index++;
2268 /*
2269 * Delay the request if it is colliding with a regular request.
2270 */
2271 if (g_raid3_regular_collision(sc, bp))
2272 g_raid3_sync_delay(sc, bp);
2273 else
2274 g_io_request(bp, disk->d_sync.ds_consumer);
2275 }
2276 }
2277
2278 /*
2279 * Stop synchronization process.
2280 * type: 0 - synchronization finished
2281 * 1 - synchronization stopped
2282 */
2283 static void
2284 g_raid3_sync_stop(struct g_raid3_softc *sc, int type)
2285 {
2286 struct g_raid3_disk *disk;
2287 struct g_consumer *cp;
2288
2289 g_topology_assert_not();
2290 sx_assert(&sc->sc_lock, SX_LOCKED);
2291
2292 KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED,
2293 ("Device not in DEGRADED state (%s, %u).", sc->sc_name,
2294 sc->sc_state));
2295 disk = sc->sc_syncdisk;
2296 sc->sc_syncdisk = NULL;
2297 KASSERT(disk != NULL, ("No disk was synchronized (%s).", sc->sc_name));
2298 KASSERT(disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING,
2299 ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk),
2300 g_raid3_disk_state2str(disk->d_state)));
2301 if (disk->d_sync.ds_consumer == NULL)
2302 return;
2303
2304 if (type == 0) {
2305 G_RAID3_DEBUG(0, "Device %s: rebuilding provider %s finished.",
2306 sc->sc_name, g_raid3_get_diskname(disk));
2307 } else /* if (type == 1) */ {
2308 G_RAID3_DEBUG(0, "Device %s: rebuilding provider %s stopped.",
2309 sc->sc_name, g_raid3_get_diskname(disk));
2310 }
2311 free(disk->d_sync.ds_bios, M_RAID3);
2312 disk->d_sync.ds_bios = NULL;
2313 cp = disk->d_sync.ds_consumer;
2314 disk->d_sync.ds_consumer = NULL;
2315 disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY;
2316 sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */
2317 g_topology_lock();
2318 g_raid3_kill_consumer(sc, cp);
2319 g_topology_unlock();
2320 sx_xlock(&sc->sc_lock);
2321 }
2322
2323 static void
2324 g_raid3_launch_provider(struct g_raid3_softc *sc)
2325 {
2326 struct g_provider *pp;
2327 struct g_raid3_disk *disk;
2328 int n;
2329
2330 sx_assert(&sc->sc_lock, SX_LOCKED);
2331
2332 g_topology_lock();
2333 pp = g_new_providerf(sc->sc_geom, "raid3/%s", sc->sc_name);
2334 pp->mediasize = sc->sc_mediasize;
2335 pp->sectorsize = sc->sc_sectorsize;
2336 pp->stripesize = 0;
2337 pp->stripeoffset = 0;
2338 for (n = 0; n < sc->sc_ndisks; n++) {
2339 disk = &sc->sc_disks[n];
2340 if (disk->d_consumer && disk->d_consumer->provider &&
2341 disk->d_consumer->provider->stripesize > pp->stripesize) {
2342 pp->stripesize = disk->d_consumer->provider->stripesize;
2343 pp->stripeoffset = disk->d_consumer->provider->stripeoffset;
2344 }
2345 }
2346 pp->stripesize *= sc->sc_ndisks - 1;
2347 pp->stripeoffset *= sc->sc_ndisks - 1;
2348 sc->sc_provider = pp;
2349 g_error_provider(pp, 0);
2350 g_topology_unlock();
2351 G_RAID3_DEBUG(0, "Device %s launched (%u/%u).", pp->name,
2352 g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE), sc->sc_ndisks);
2353
2354 if (sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED)
2355 g_raid3_sync_start(sc);
2356 }
2357
2358 static void
2359 g_raid3_destroy_provider(struct g_raid3_softc *sc)
2360 {
2361 struct bio *bp;
2362
2363 g_topology_assert_not();
2364 KASSERT(sc->sc_provider != NULL, ("NULL provider (device=%s).",
2365 sc->sc_name));
2366
2367 g_topology_lock();
2368 g_error_provider(sc->sc_provider, ENXIO);
2369 mtx_lock(&sc->sc_queue_mtx);
2370 while ((bp = bioq_first(&sc->sc_queue)) != NULL) {
2371 bioq_remove(&sc->sc_queue, bp);
2372 g_io_deliver(bp, ENXIO);
2373 }
2374 mtx_unlock(&sc->sc_queue_mtx);
2375 G_RAID3_DEBUG(0, "Device %s: provider %s destroyed.", sc->sc_name,
2376 sc->sc_provider->name);
2377 sc->sc_provider->flags |= G_PF_WITHER;
2378 g_orphan_provider(sc->sc_provider, ENXIO);
2379 g_topology_unlock();
2380 sc->sc_provider = NULL;
2381 if (sc->sc_syncdisk != NULL)
2382 g_raid3_sync_stop(sc, 1);
2383 }
2384
2385 static void
2386 g_raid3_go(void *arg)
2387 {
2388 struct g_raid3_softc *sc;
2389
2390 sc = arg;
2391 G_RAID3_DEBUG(0, "Force device %s start due to timeout.", sc->sc_name);
2392 g_raid3_event_send(sc, 0,
2393 G_RAID3_EVENT_DONTWAIT | G_RAID3_EVENT_DEVICE);
2394 }
2395
2396 static u_int
2397 g_raid3_determine_state(struct g_raid3_disk *disk)
2398 {
2399 struct g_raid3_softc *sc;
2400 u_int state;
2401
2402 sc = disk->d_softc;
2403 if (sc->sc_syncid == disk->d_sync.ds_syncid) {
2404 if ((disk->d_flags &
2405 G_RAID3_DISK_FLAG_SYNCHRONIZING) == 0) {
2406 /* Disk does not need synchronization. */
2407 state = G_RAID3_DISK_STATE_ACTIVE;
2408 } else {
2409 if ((sc->sc_flags &
2410 G_RAID3_DEVICE_FLAG_NOAUTOSYNC) == 0 ||
2411 (disk->d_flags &
2412 G_RAID3_DISK_FLAG_FORCE_SYNC) != 0) {
2413 /*
2414 * We can start synchronization from
2415 * the stored offset.
2416 */
2417 state = G_RAID3_DISK_STATE_SYNCHRONIZING;
2418 } else {
2419 state = G_RAID3_DISK_STATE_STALE;
2420 }
2421 }
2422 } else if (disk->d_sync.ds_syncid < sc->sc_syncid) {
2423 /*
2424 * Reset all synchronization data for this disk,
2425 * because if it even was synchronized, it was
2426 * synchronized to disks with different syncid.
2427 */
2428 disk->d_flags |= G_RAID3_DISK_FLAG_SYNCHRONIZING;
2429 disk->d_sync.ds_offset = 0;
2430 disk->d_sync.ds_offset_done = 0;
2431 disk->d_sync.ds_syncid = sc->sc_syncid;
2432 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOAUTOSYNC) == 0 ||
2433 (disk->d_flags & G_RAID3_DISK_FLAG_FORCE_SYNC) != 0) {
2434 state = G_RAID3_DISK_STATE_SYNCHRONIZING;
2435 } else {
2436 state = G_RAID3_DISK_STATE_STALE;
2437 }
2438 } else /* if (sc->sc_syncid < disk->d_sync.ds_syncid) */ {
2439 /*
2440 * Not good, NOT GOOD!
2441 * It means that device was started on stale disks
2442 * and more fresh disk just arrive.
2443 * If there were writes, device is broken, sorry.
2444 * I think the best choice here is don't touch
2445 * this disk and inform the user loudly.
2446 */
2447 G_RAID3_DEBUG(0, "Device %s was started before the freshest "
2448 "disk (%s) arrives!! It will not be connected to the "
2449 "running device.", sc->sc_name,
2450 g_raid3_get_diskname(disk));
2451 g_raid3_destroy_disk(disk);
2452 state = G_RAID3_DISK_STATE_NONE;
2453 /* Return immediately, because disk was destroyed. */
2454 return (state);
2455 }
2456 G_RAID3_DEBUG(3, "State for %s disk: %s.",
2457 g_raid3_get_diskname(disk), g_raid3_disk_state2str(state));
2458 return (state);
2459 }
2460
2461 /*
2462 * Update device state.
2463 */
2464 static void
2465 g_raid3_update_device(struct g_raid3_softc *sc, boolean_t force)
2466 {
2467 struct g_raid3_disk *disk;
2468 u_int state;
2469
2470 sx_assert(&sc->sc_lock, SX_XLOCKED);
2471
2472 switch (sc->sc_state) {
2473 case G_RAID3_DEVICE_STATE_STARTING:
2474 {
2475 u_int n, ndirty, ndisks, genid, syncid;
2476
2477 KASSERT(sc->sc_provider == NULL,
2478 ("Non-NULL provider in STARTING state (%s).", sc->sc_name));
2479 /*
2480 * Are we ready? We are, if all disks are connected or
2481 * one disk is missing and 'force' is true.
2482 */
2483 if (g_raid3_ndisks(sc, -1) + force == sc->sc_ndisks) {
2484 if (!force)
2485 callout_drain(&sc->sc_callout);
2486 } else {
2487 if (force) {
2488 /*
2489 * Timeout expired, so destroy device.
2490 */
2491 sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROY;
2492 G_RAID3_DEBUG(1, "root_mount_rel[%u] %p",
2493 __LINE__, sc->sc_rootmount);
2494 root_mount_rel(sc->sc_rootmount);
2495 sc->sc_rootmount = NULL;
2496 }
2497 return;
2498 }
2499
2500 /*
2501 * Find the biggest genid.
2502 */
2503 genid = 0;
2504 for (n = 0; n < sc->sc_ndisks; n++) {
2505 disk = &sc->sc_disks[n];
2506 if (disk->d_state == G_RAID3_DISK_STATE_NODISK)
2507 continue;
2508 if (disk->d_genid > genid)
2509 genid = disk->d_genid;
2510 }
2511 sc->sc_genid = genid;
2512 /*
2513 * Remove all disks without the biggest genid.
2514 */
2515 for (n = 0; n < sc->sc_ndisks; n++) {
2516 disk = &sc->sc_disks[n];
2517 if (disk->d_state == G_RAID3_DISK_STATE_NODISK)
2518 continue;
2519 if (disk->d_genid < genid) {
2520 G_RAID3_DEBUG(0,
2521 "Component %s (device %s) broken, skipping.",
2522 g_raid3_get_diskname(disk), sc->sc_name);
2523 g_raid3_destroy_disk(disk);
2524 }
2525 }
2526
2527 /*
2528 * There must be at least 'sc->sc_ndisks - 1' components
2529 * with the same syncid and without SYNCHRONIZING flag.
2530 */
2531
2532 /*
2533 * Find the biggest syncid, number of valid components and
2534 * number of dirty components.
2535 */
2536 ndirty = ndisks = syncid = 0;
2537 for (n = 0; n < sc->sc_ndisks; n++) {
2538 disk = &sc->sc_disks[n];
2539 if (disk->d_state == G_RAID3_DISK_STATE_NODISK)
2540 continue;
2541 if ((disk->d_flags & G_RAID3_DISK_FLAG_DIRTY) != 0)
2542 ndirty++;
2543 if (disk->d_sync.ds_syncid > syncid) {
2544 syncid = disk->d_sync.ds_syncid;
2545 ndisks = 0;
2546 } else if (disk->d_sync.ds_syncid < syncid) {
2547 continue;
2548 }
2549 if ((disk->d_flags &
2550 G_RAID3_DISK_FLAG_SYNCHRONIZING) != 0) {
2551 continue;
2552 }
2553 ndisks++;
2554 }
2555 /*
2556 * Do we have enough valid components?
2557 */
2558 if (ndisks + 1 < sc->sc_ndisks) {
2559 G_RAID3_DEBUG(0,
2560 "Device %s is broken, too few valid components.",
2561 sc->sc_name);
2562 sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROY;
2563 return;
2564 }
2565 /*
2566 * If there is one DIRTY component and all disks are present,
2567 * mark it for synchronization. If there is more than one DIRTY
2568 * component, mark parity component for synchronization.
2569 */
2570 if (ndisks == sc->sc_ndisks && ndirty == 1) {
2571 for (n = 0; n < sc->sc_ndisks; n++) {
2572 disk = &sc->sc_disks[n];
2573 if ((disk->d_flags &
2574 G_RAID3_DISK_FLAG_DIRTY) == 0) {
2575 continue;
2576 }
2577 disk->d_flags |=
2578 G_RAID3_DISK_FLAG_SYNCHRONIZING;
2579 }
2580 } else if (ndisks == sc->sc_ndisks && ndirty > 1) {
2581 disk = &sc->sc_disks[sc->sc_ndisks - 1];
2582 disk->d_flags |= G_RAID3_DISK_FLAG_SYNCHRONIZING;
2583 }
2584
2585 sc->sc_syncid = syncid;
2586 if (force) {
2587 /* Remember to bump syncid on first write. */
2588 sc->sc_bump_id |= G_RAID3_BUMP_SYNCID;
2589 }
2590 if (ndisks == sc->sc_ndisks)
2591 state = G_RAID3_DEVICE_STATE_COMPLETE;
2592 else /* if (ndisks == sc->sc_ndisks - 1) */
2593 state = G_RAID3_DEVICE_STATE_DEGRADED;
2594 G_RAID3_DEBUG(1, "Device %s state changed from %s to %s.",
2595 sc->sc_name, g_raid3_device_state2str(sc->sc_state),
2596 g_raid3_device_state2str(state));
2597 sc->sc_state = state;
2598 for (n = 0; n < sc->sc_ndisks; n++) {
2599 disk = &sc->sc_disks[n];
2600 if (disk->d_state == G_RAID3_DISK_STATE_NODISK)
2601 continue;
2602 state = g_raid3_determine_state(disk);
2603 g_raid3_event_send(disk, state, G_RAID3_EVENT_DONTWAIT);
2604 if (state == G_RAID3_DISK_STATE_STALE)
2605 sc->sc_bump_id |= G_RAID3_BUMP_SYNCID;
2606 }
2607 break;
2608 }
2609 case G_RAID3_DEVICE_STATE_DEGRADED:
2610 /*
2611 * Genid need to be bumped immediately, so do it here.
2612 */
2613 if ((sc->sc_bump_id & G_RAID3_BUMP_GENID) != 0) {
2614 sc->sc_bump_id &= ~G_RAID3_BUMP_GENID;
2615 g_raid3_bump_genid(sc);
2616 }
2617
2618 if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_NEW) > 0)
2619 return;
2620 if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) <
2621 sc->sc_ndisks - 1) {
2622 if (sc->sc_provider != NULL)
2623 g_raid3_destroy_provider(sc);
2624 sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROY;
2625 return;
2626 }
2627 if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) ==
2628 sc->sc_ndisks) {
2629 state = G_RAID3_DEVICE_STATE_COMPLETE;
2630 G_RAID3_DEBUG(1,
2631 "Device %s state changed from %s to %s.",
2632 sc->sc_name, g_raid3_device_state2str(sc->sc_state),
2633 g_raid3_device_state2str(state));
2634 sc->sc_state = state;
2635 }
2636 if (sc->sc_provider == NULL)
2637 g_raid3_launch_provider(sc);
2638 if (sc->sc_rootmount != NULL) {
2639 G_RAID3_DEBUG(1, "root_mount_rel[%u] %p", __LINE__,
2640 sc->sc_rootmount);
2641 root_mount_rel(sc->sc_rootmount);
2642 sc->sc_rootmount = NULL;
2643 }
2644 break;
2645 case G_RAID3_DEVICE_STATE_COMPLETE:
2646 /*
2647 * Genid need to be bumped immediately, so do it here.
2648 */
2649 if ((sc->sc_bump_id & G_RAID3_BUMP_GENID) != 0) {
2650 sc->sc_bump_id &= ~G_RAID3_BUMP_GENID;
2651 g_raid3_bump_genid(sc);
2652 }
2653
2654 if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_NEW) > 0)
2655 return;
2656 KASSERT(g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) >=
2657 sc->sc_ndisks - 1,
2658 ("Too few ACTIVE components in COMPLETE state (device %s).",
2659 sc->sc_name));
2660 if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) ==
2661 sc->sc_ndisks - 1) {
2662 state = G_RAID3_DEVICE_STATE_DEGRADED;
2663 G_RAID3_DEBUG(1,
2664 "Device %s state changed from %s to %s.",
2665 sc->sc_name, g_raid3_device_state2str(sc->sc_state),
2666 g_raid3_device_state2str(state));
2667 sc->sc_state = state;
2668 }
2669 if (sc->sc_provider == NULL)
2670 g_raid3_launch_provider(sc);
2671 if (sc->sc_rootmount != NULL) {
2672 G_RAID3_DEBUG(1, "root_mount_rel[%u] %p", __LINE__,
2673 sc->sc_rootmount);
2674 root_mount_rel(sc->sc_rootmount);
2675 sc->sc_rootmount = NULL;
2676 }
2677 break;
2678 default:
2679 KASSERT(1 == 0, ("Wrong device state (%s, %s).", sc->sc_name,
2680 g_raid3_device_state2str(sc->sc_state)));
2681 break;
2682 }
2683 }
2684
2685 /*
2686 * Update disk state and device state if needed.
2687 */
2688 #define DISK_STATE_CHANGED() G_RAID3_DEBUG(1, \
2689 "Disk %s state changed from %s to %s (device %s).", \
2690 g_raid3_get_diskname(disk), \
2691 g_raid3_disk_state2str(disk->d_state), \
2692 g_raid3_disk_state2str(state), sc->sc_name)
2693 static int
2694 g_raid3_update_disk(struct g_raid3_disk *disk, u_int state)
2695 {
2696 struct g_raid3_softc *sc;
2697
2698 sc = disk->d_softc;
2699 sx_assert(&sc->sc_lock, SX_XLOCKED);
2700
2701 again:
2702 G_RAID3_DEBUG(3, "Changing disk %s state from %s to %s.",
2703 g_raid3_get_diskname(disk), g_raid3_disk_state2str(disk->d_state),
2704 g_raid3_disk_state2str(state));
2705 switch (state) {
2706 case G_RAID3_DISK_STATE_NEW:
2707 /*
2708 * Possible scenarios:
2709 * 1. New disk arrive.
2710 */
2711 /* Previous state should be NONE. */
2712 KASSERT(disk->d_state == G_RAID3_DISK_STATE_NONE,
2713 ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk),
2714 g_raid3_disk_state2str(disk->d_state)));
2715 DISK_STATE_CHANGED();
2716
2717 disk->d_state = state;
2718 G_RAID3_DEBUG(1, "Device %s: provider %s detected.",
2719 sc->sc_name, g_raid3_get_diskname(disk));
2720 if (sc->sc_state == G_RAID3_DEVICE_STATE_STARTING)
2721 break;
2722 KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED ||
2723 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE,
2724 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2725 g_raid3_device_state2str(sc->sc_state),
2726 g_raid3_get_diskname(disk),
2727 g_raid3_disk_state2str(disk->d_state)));
2728 state = g_raid3_determine_state(disk);
2729 if (state != G_RAID3_DISK_STATE_NONE)
2730 goto again;
2731 break;
2732 case G_RAID3_DISK_STATE_ACTIVE:
2733 /*
2734 * Possible scenarios:
2735 * 1. New disk does not need synchronization.
2736 * 2. Synchronization process finished successfully.
2737 */
2738 KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED ||
2739 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE,
2740 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2741 g_raid3_device_state2str(sc->sc_state),
2742 g_raid3_get_diskname(disk),
2743 g_raid3_disk_state2str(disk->d_state)));
2744 /* Previous state should be NEW or SYNCHRONIZING. */
2745 KASSERT(disk->d_state == G_RAID3_DISK_STATE_NEW ||
2746 disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING,
2747 ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk),
2748 g_raid3_disk_state2str(disk->d_state)));
2749 DISK_STATE_CHANGED();
2750
2751 if (disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) {
2752 disk->d_flags &= ~G_RAID3_DISK_FLAG_SYNCHRONIZING;
2753 disk->d_flags &= ~G_RAID3_DISK_FLAG_FORCE_SYNC;
2754 g_raid3_sync_stop(sc, 0);
2755 }
2756 disk->d_state = state;
2757 disk->d_sync.ds_offset = 0;
2758 disk->d_sync.ds_offset_done = 0;
2759 g_raid3_update_idle(sc, disk);
2760 g_raid3_update_metadata(disk);
2761 G_RAID3_DEBUG(1, "Device %s: provider %s activated.",
2762 sc->sc_name, g_raid3_get_diskname(disk));
2763 break;
2764 case G_RAID3_DISK_STATE_STALE:
2765 /*
2766 * Possible scenarios:
2767 * 1. Stale disk was connected.
2768 */
2769 /* Previous state should be NEW. */
2770 KASSERT(disk->d_state == G_RAID3_DISK_STATE_NEW,
2771 ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk),
2772 g_raid3_disk_state2str(disk->d_state)));
2773 KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED ||
2774 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE,
2775 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2776 g_raid3_device_state2str(sc->sc_state),
2777 g_raid3_get_diskname(disk),
2778 g_raid3_disk_state2str(disk->d_state)));
2779 /*
2780 * STALE state is only possible if device is marked
2781 * NOAUTOSYNC.
2782 */
2783 KASSERT((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOAUTOSYNC) != 0,
2784 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2785 g_raid3_device_state2str(sc->sc_state),
2786 g_raid3_get_diskname(disk),
2787 g_raid3_disk_state2str(disk->d_state)));
2788 DISK_STATE_CHANGED();
2789
2790 disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY;
2791 disk->d_state = state;
2792 g_raid3_update_metadata(disk);
2793 G_RAID3_DEBUG(0, "Device %s: provider %s is stale.",
2794 sc->sc_name, g_raid3_get_diskname(disk));
2795 break;
2796 case G_RAID3_DISK_STATE_SYNCHRONIZING:
2797 /*
2798 * Possible scenarios:
2799 * 1. Disk which needs synchronization was connected.
2800 */
2801 /* Previous state should be NEW. */
2802 KASSERT(disk->d_state == G_RAID3_DISK_STATE_NEW,
2803 ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk),
2804 g_raid3_disk_state2str(disk->d_state)));
2805 KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED ||
2806 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE,
2807 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2808 g_raid3_device_state2str(sc->sc_state),
2809 g_raid3_get_diskname(disk),
2810 g_raid3_disk_state2str(disk->d_state)));
2811 DISK_STATE_CHANGED();
2812
2813 if (disk->d_state == G_RAID3_DISK_STATE_NEW)
2814 disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY;
2815 disk->d_state = state;
2816 if (sc->sc_provider != NULL) {
2817 g_raid3_sync_start(sc);
2818 g_raid3_update_metadata(disk);
2819 }
2820 break;
2821 case G_RAID3_DISK_STATE_DISCONNECTED:
2822 /*
2823 * Possible scenarios:
2824 * 1. Device wasn't running yet, but disk disappear.
2825 * 2. Disk was active and disapppear.
2826 * 3. Disk disappear during synchronization process.
2827 */
2828 if (sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED ||
2829 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) {
2830 /*
2831 * Previous state should be ACTIVE, STALE or
2832 * SYNCHRONIZING.
2833 */
2834 KASSERT(disk->d_state == G_RAID3_DISK_STATE_ACTIVE ||
2835 disk->d_state == G_RAID3_DISK_STATE_STALE ||
2836 disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING,
2837 ("Wrong disk state (%s, %s).",
2838 g_raid3_get_diskname(disk),
2839 g_raid3_disk_state2str(disk->d_state)));
2840 } else if (sc->sc_state == G_RAID3_DEVICE_STATE_STARTING) {
2841 /* Previous state should be NEW. */
2842 KASSERT(disk->d_state == G_RAID3_DISK_STATE_NEW,
2843 ("Wrong disk state (%s, %s).",
2844 g_raid3_get_diskname(disk),
2845 g_raid3_disk_state2str(disk->d_state)));
2846 /*
2847 * Reset bumping syncid if disk disappeared in STARTING
2848 * state.
2849 */
2850 if ((sc->sc_bump_id & G_RAID3_BUMP_SYNCID) != 0)
2851 sc->sc_bump_id &= ~G_RAID3_BUMP_SYNCID;
2852 #ifdef INVARIANTS
2853 } else {
2854 KASSERT(1 == 0, ("Wrong device state (%s, %s, %s, %s).",
2855 sc->sc_name,
2856 g_raid3_device_state2str(sc->sc_state),
2857 g_raid3_get_diskname(disk),
2858 g_raid3_disk_state2str(disk->d_state)));
2859 #endif
2860 }
2861 DISK_STATE_CHANGED();
2862 G_RAID3_DEBUG(0, "Device %s: provider %s disconnected.",
2863 sc->sc_name, g_raid3_get_diskname(disk));
2864
2865 g_raid3_destroy_disk(disk);
2866 break;
2867 default:
2868 KASSERT(1 == 0, ("Unknown state (%u).", state));
2869 break;
2870 }
2871 return (0);
2872 }
2873 #undef DISK_STATE_CHANGED
2874
2875 int
2876 g_raid3_read_metadata(struct g_consumer *cp, struct g_raid3_metadata *md)
2877 {
2878 struct g_provider *pp;
2879 u_char *buf;
2880 int error;
2881
2882 g_topology_assert();
2883
2884 error = g_access(cp, 1, 0, 0);
2885 if (error != 0)
2886 return (error);
2887 pp = cp->provider;
2888 g_topology_unlock();
2889 /* Metadata are stored on last sector. */
2890 buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
2891 &error);
2892 g_topology_lock();
2893 g_access(cp, -1, 0, 0);
2894 if (buf == NULL) {
2895 G_RAID3_DEBUG(1, "Cannot read metadata from %s (error=%d).",
2896 cp->provider->name, error);
2897 return (error);
2898 }
2899
2900 /* Decode metadata. */
2901 error = raid3_metadata_decode(buf, md);
2902 g_free(buf);
2903 if (strcmp(md->md_magic, G_RAID3_MAGIC) != 0)
2904 return (EINVAL);
2905 if (md->md_version > G_RAID3_VERSION) {
2906 G_RAID3_DEBUG(0,
2907 "Kernel module is too old to handle metadata from %s.",
2908 cp->provider->name);
2909 return (EINVAL);
2910 }
2911 if (error != 0) {
2912 G_RAID3_DEBUG(1, "MD5 metadata hash mismatch for provider %s.",
2913 cp->provider->name);
2914 return (error);
2915 }
2916 if (md->md_sectorsize > MAXPHYS) {
2917 G_RAID3_DEBUG(0, "The blocksize is too big.");
2918 return (EINVAL);
2919 }
2920
2921 return (0);
2922 }
2923
2924 static int
2925 g_raid3_check_metadata(struct g_raid3_softc *sc, struct g_provider *pp,
2926 struct g_raid3_metadata *md)
2927 {
2928
2929 if (md->md_no >= sc->sc_ndisks) {
2930 G_RAID3_DEBUG(1, "Invalid disk %s number (no=%u), skipping.",
2931 pp->name, md->md_no);
2932 return (EINVAL);
2933 }
2934 if (sc->sc_disks[md->md_no].d_state != G_RAID3_DISK_STATE_NODISK) {
2935 G_RAID3_DEBUG(1, "Disk %s (no=%u) already exists, skipping.",
2936 pp->name, md->md_no);
2937 return (EEXIST);
2938 }
2939 if (md->md_all != sc->sc_ndisks) {
2940 G_RAID3_DEBUG(1,
2941 "Invalid '%s' field on disk %s (device %s), skipping.",
2942 "md_all", pp->name, sc->sc_name);
2943 return (EINVAL);
2944 }
2945 if ((md->md_mediasize % md->md_sectorsize) != 0) {
2946 G_RAID3_DEBUG(1, "Invalid metadata (mediasize %% sectorsize != "
2947 "0) on disk %s (device %s), skipping.", pp->name,
2948 sc->sc_name);
2949 return (EINVAL);
2950 }
2951 if (md->md_mediasize != sc->sc_mediasize) {
2952 G_RAID3_DEBUG(1,
2953 "Invalid '%s' field on disk %s (device %s), skipping.",
2954 "md_mediasize", pp->name, sc->sc_name);
2955 return (EINVAL);
2956 }
2957 if ((md->md_mediasize % (sc->sc_ndisks - 1)) != 0) {
2958 G_RAID3_DEBUG(1,
2959 "Invalid '%s' field on disk %s (device %s), skipping.",
2960 "md_mediasize", pp->name, sc->sc_name);
2961 return (EINVAL);
2962 }
2963 if ((sc->sc_mediasize / (sc->sc_ndisks - 1)) > pp->mediasize) {
2964 G_RAID3_DEBUG(1,
2965 "Invalid size of disk %s (device %s), skipping.", pp->name,
2966 sc->sc_name);
2967 return (EINVAL);
2968 }
2969 if ((md->md_sectorsize / pp->sectorsize) < sc->sc_ndisks - 1) {
2970 G_RAID3_DEBUG(1,
2971 "Invalid '%s' field on disk %s (device %s), skipping.",
2972 "md_sectorsize", pp->name, sc->sc_name);
2973 return (EINVAL);
2974 }
2975 if (md->md_sectorsize != sc->sc_sectorsize) {
2976 G_RAID3_DEBUG(1,
2977 "Invalid '%s' field on disk %s (device %s), skipping.",
2978 "md_sectorsize", pp->name, sc->sc_name);
2979 return (EINVAL);
2980 }
2981 if ((sc->sc_sectorsize % pp->sectorsize) != 0) {
2982 G_RAID3_DEBUG(1,
2983 "Invalid sector size of disk %s (device %s), skipping.",
2984 pp->name, sc->sc_name);
2985 return (EINVAL);
2986 }
2987 if ((md->md_mflags & ~G_RAID3_DEVICE_FLAG_MASK) != 0) {
2988 G_RAID3_DEBUG(1,
2989 "Invalid device flags on disk %s (device %s), skipping.",
2990 pp->name, sc->sc_name);
2991 return (EINVAL);
2992 }
2993 if ((md->md_mflags & G_RAID3_DEVICE_FLAG_VERIFY) != 0 &&
2994 (md->md_mflags & G_RAID3_DEVICE_FLAG_ROUND_ROBIN) != 0) {
2995 /*
2996 * VERIFY and ROUND-ROBIN options are mutally exclusive.
2997 */
2998 G_RAID3_DEBUG(1, "Both VERIFY and ROUND-ROBIN flags exist on "
2999 "disk %s (device %s), skipping.", pp->name, sc->sc_name);
3000 return (EINVAL);
3001 }
3002 if ((md->md_dflags & ~G_RAID3_DISK_FLAG_MASK) != 0) {
3003 G_RAID3_DEBUG(1,
3004 "Invalid disk flags on disk %s (device %s), skipping.",
3005 pp->name, sc->sc_name);
3006 return (EINVAL);
3007 }
3008 return (0);
3009 }
3010
3011 int
3012 g_raid3_add_disk(struct g_raid3_softc *sc, struct g_provider *pp,
3013 struct g_raid3_metadata *md)
3014 {
3015 struct g_raid3_disk *disk;
3016 int error;
3017
3018 g_topology_assert_not();
3019 G_RAID3_DEBUG(2, "Adding disk %s.", pp->name);
3020
3021 error = g_raid3_check_metadata(sc, pp, md);
3022 if (error != 0)
3023 return (error);
3024 if (sc->sc_state != G_RAID3_DEVICE_STATE_STARTING &&
3025 md->md_genid < sc->sc_genid) {
3026 G_RAID3_DEBUG(0, "Component %s (device %s) broken, skipping.",
3027 pp->name, sc->sc_name);
3028 return (EINVAL);
3029 }
3030 disk = g_raid3_init_disk(sc, pp, md, &error);
3031 if (disk == NULL)
3032 return (error);
3033 error = g_raid3_event_send(disk, G_RAID3_DISK_STATE_NEW,
3034 G_RAID3_EVENT_WAIT);
3035 if (error != 0)
3036 return (error);
3037 if (md->md_version < G_RAID3_VERSION) {
3038 G_RAID3_DEBUG(0, "Upgrading metadata on %s (v%d->v%d).",
3039 pp->name, md->md_version, G_RAID3_VERSION);
3040 g_raid3_update_metadata(disk);
3041 }
3042 return (0);
3043 }
3044
3045 static void
3046 g_raid3_destroy_delayed(void *arg, int flag)
3047 {
3048 struct g_raid3_softc *sc;
3049 int error;
3050
3051 if (flag == EV_CANCEL) {
3052 G_RAID3_DEBUG(1, "Destroying canceled.");
3053 return;
3054 }
3055 sc = arg;
3056 g_topology_unlock();
3057 sx_xlock(&sc->sc_lock);
3058 KASSERT((sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROY) == 0,
3059 ("DESTROY flag set on %s.", sc->sc_name));
3060 KASSERT((sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROYING) != 0,
3061 ("DESTROYING flag not set on %s.", sc->sc_name));
3062 G_RAID3_DEBUG(0, "Destroying %s (delayed).", sc->sc_name);
3063 error = g_raid3_destroy(sc, G_RAID3_DESTROY_SOFT);
3064 if (error != 0) {
3065 G_RAID3_DEBUG(0, "Cannot destroy %s.", sc->sc_name);
3066 sx_xunlock(&sc->sc_lock);
3067 }
3068 g_topology_lock();
3069 }
3070
3071 static int
3072 g_raid3_access(struct g_provider *pp, int acr, int acw, int ace)
3073 {
3074 struct g_raid3_softc *sc;
3075 int dcr, dcw, dce, error = 0;
3076
3077 g_topology_assert();
3078 G_RAID3_DEBUG(2, "Access request for %s: r%dw%de%d.", pp->name, acr,
3079 acw, ace);
3080
3081 sc = pp->geom->softc;
3082 if (sc == NULL && acr <= 0 && acw <= 0 && ace <= 0)
3083 return (0);
3084 KASSERT(sc != NULL, ("NULL softc (provider=%s).", pp->name));
3085
3086 dcr = pp->acr + acr;
3087 dcw = pp->acw + acw;
3088 dce = pp->ace + ace;
3089
3090 g_topology_unlock();
3091 sx_xlock(&sc->sc_lock);
3092 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROY) != 0 ||
3093 g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) < sc->sc_ndisks - 1) {
3094 if (acr > 0 || acw > 0 || ace > 0)
3095 error = ENXIO;
3096 goto end;
3097 }
3098 if (dcw == 0 && !sc->sc_idle)
3099 g_raid3_idle(sc, dcw);
3100 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROYING) != 0) {
3101 if (acr > 0 || acw > 0 || ace > 0) {
3102 error = ENXIO;
3103 goto end;
3104 }
3105 if (dcr == 0 && dcw == 0 && dce == 0) {
3106 g_post_event(g_raid3_destroy_delayed, sc, M_WAITOK,
3107 sc, NULL);
3108 }
3109 }
3110 end:
3111 sx_xunlock(&sc->sc_lock);
3112 g_topology_lock();
3113 return (error);
3114 }
3115
3116 static struct g_geom *
3117 g_raid3_create(struct g_class *mp, const struct g_raid3_metadata *md)
3118 {
3119 struct g_raid3_softc *sc;
3120 struct g_geom *gp;
3121 int error, timeout;
3122 u_int n;
3123
3124 g_topology_assert();
3125 G_RAID3_DEBUG(1, "Creating device %s (id=%u).", md->md_name, md->md_id);
3126
3127 /* One disk is minimum. */
3128 if (md->md_all < 1)
3129 return (NULL);
3130 /*
3131 * Action geom.
3132 */
3133 gp = g_new_geomf(mp, "%s", md->md_name);
3134 sc = malloc(sizeof(*sc), M_RAID3, M_WAITOK | M_ZERO);
3135 sc->sc_disks = malloc(sizeof(struct g_raid3_disk) * md->md_all, M_RAID3,
3136 M_WAITOK | M_ZERO);
3137 gp->start = g_raid3_start;
3138 gp->orphan = g_raid3_orphan;
3139 gp->access = g_raid3_access;
3140 gp->dumpconf = g_raid3_dumpconf;
3141
3142 sc->sc_id = md->md_id;
3143 sc->sc_mediasize = md->md_mediasize;
3144 sc->sc_sectorsize = md->md_sectorsize;
3145 sc->sc_ndisks = md->md_all;
3146 sc->sc_round_robin = 0;
3147 sc->sc_flags = md->md_mflags;
3148 sc->sc_bump_id = 0;
3149 sc->sc_idle = 1;
3150 sc->sc_last_write = time_uptime;
3151 sc->sc_writes = 0;
3152 for (n = 0; n < sc->sc_ndisks; n++) {
3153 sc->sc_disks[n].d_softc = sc;
3154 sc->sc_disks[n].d_no = n;
3155 sc->sc_disks[n].d_state = G_RAID3_DISK_STATE_NODISK;
3156 }
3157 sx_init(&sc->sc_lock, "graid3:lock");
3158 bioq_init(&sc->sc_queue);
3159 mtx_init(&sc->sc_queue_mtx, "graid3:queue", NULL, MTX_DEF);
3160 bioq_init(&sc->sc_regular_delayed);
3161 bioq_init(&sc->sc_inflight);
3162 bioq_init(&sc->sc_sync_delayed);
3163 TAILQ_INIT(&sc->sc_events);
3164 mtx_init(&sc->sc_events_mtx, "graid3:events", NULL, MTX_DEF);
3165 callout_init(&sc->sc_callout, CALLOUT_MPSAFE);
3166 sc->sc_state = G_RAID3_DEVICE_STATE_STARTING;
3167 gp->softc = sc;
3168 sc->sc_geom = gp;
3169 sc->sc_provider = NULL;
3170 /*
3171 * Synchronization geom.
3172 */
3173 gp = g_new_geomf(mp, "%s.sync", md->md_name);
3174 gp->softc = sc;
3175 gp->orphan = g_raid3_orphan;
3176 sc->sc_sync.ds_geom = gp;
3177
3178 if (!g_raid3_use_malloc) {
3179 sc->sc_zones[G_RAID3_ZONE_64K].sz_zone = uma_zcreate("gr3:64k",
3180 65536, g_raid3_uma_ctor, g_raid3_uma_dtor, NULL, NULL,
3181 UMA_ALIGN_PTR, 0);
3182 sc->sc_zones[G_RAID3_ZONE_64K].sz_inuse = 0;
3183 sc->sc_zones[G_RAID3_ZONE_64K].sz_max = g_raid3_n64k;
3184 sc->sc_zones[G_RAID3_ZONE_64K].sz_requested =
3185 sc->sc_zones[G_RAID3_ZONE_64K].sz_failed = 0;
3186 sc->sc_zones[G_RAID3_ZONE_16K].sz_zone = uma_zcreate("gr3:16k",
3187 16384, g_raid3_uma_ctor, g_raid3_uma_dtor, NULL, NULL,
3188 UMA_ALIGN_PTR, 0);
3189 sc->sc_zones[G_RAID3_ZONE_16K].sz_inuse = 0;
3190 sc->sc_zones[G_RAID3_ZONE_16K].sz_max = g_raid3_n16k;
3191 sc->sc_zones[G_RAID3_ZONE_16K].sz_requested =
3192 sc->sc_zones[G_RAID3_ZONE_16K].sz_failed = 0;
3193 sc->sc_zones[G_RAID3_ZONE_4K].sz_zone = uma_zcreate("gr3:4k",
3194 4096, g_raid3_uma_ctor, g_raid3_uma_dtor, NULL, NULL,
3195 UMA_ALIGN_PTR, 0);
3196 sc->sc_zones[G_RAID3_ZONE_4K].sz_inuse = 0;
3197 sc->sc_zones[G_RAID3_ZONE_4K].sz_max = g_raid3_n4k;
3198 sc->sc_zones[G_RAID3_ZONE_4K].sz_requested =
3199 sc->sc_zones[G_RAID3_ZONE_4K].sz_failed = 0;
3200 }
3201
3202 error = kproc_create(g_raid3_worker, sc, &sc->sc_worker, 0, 0,
3203 "g_raid3 %s", md->md_name);
3204 if (error != 0) {
3205 G_RAID3_DEBUG(1, "Cannot create kernel thread for %s.",
3206 sc->sc_name);
3207 if (!g_raid3_use_malloc) {
3208 uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_64K].sz_zone);
3209 uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_16K].sz_zone);
3210 uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_4K].sz_zone);
3211 }
3212 g_destroy_geom(sc->sc_sync.ds_geom);
3213 mtx_destroy(&sc->sc_events_mtx);
3214 mtx_destroy(&sc->sc_queue_mtx);
3215 sx_destroy(&sc->sc_lock);
3216 g_destroy_geom(sc->sc_geom);
3217 free(sc->sc_disks, M_RAID3);
3218 free(sc, M_RAID3);
3219 return (NULL);
3220 }
3221
3222 G_RAID3_DEBUG(1, "Device %s created (%u components, id=%u).",
3223 sc->sc_name, sc->sc_ndisks, sc->sc_id);
3224
3225 sc->sc_rootmount = root_mount_hold("GRAID3");
3226 G_RAID3_DEBUG(1, "root_mount_hold %p", sc->sc_rootmount);
3227
3228 /*
3229 * Run timeout.
3230 */
3231 timeout = atomic_load_acq_int(&g_raid3_timeout);
3232 callout_reset(&sc->sc_callout, timeout * hz, g_raid3_go, sc);
3233 return (sc->sc_geom);
3234 }
3235
3236 int
3237 g_raid3_destroy(struct g_raid3_softc *sc, int how)
3238 {
3239 struct g_provider *pp;
3240
3241 g_topology_assert_not();
3242 if (sc == NULL)
3243 return (ENXIO);
3244 sx_assert(&sc->sc_lock, SX_XLOCKED);
3245
3246 pp = sc->sc_provider;
3247 if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
3248 switch (how) {
3249 case G_RAID3_DESTROY_SOFT:
3250 G_RAID3_DEBUG(1,
3251 "Device %s is still open (r%dw%de%d).", pp->name,
3252 pp->acr, pp->acw, pp->ace);
3253 return (EBUSY);
3254 case G_RAID3_DESTROY_DELAYED:
3255 G_RAID3_DEBUG(1,
3256 "Device %s will be destroyed on last close.",
3257 pp->name);
3258 if (sc->sc_syncdisk != NULL)
3259 g_raid3_sync_stop(sc, 1);
3260 sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROYING;
3261 return (EBUSY);
3262 case G_RAID3_DESTROY_HARD:
3263 G_RAID3_DEBUG(1, "Device %s is still open, so it "
3264 "can't be definitely removed.", pp->name);
3265 break;
3266 }
3267 }
3268
3269 g_topology_lock();
3270 if (sc->sc_geom->softc == NULL) {
3271 g_topology_unlock();
3272 return (0);
3273 }
3274 sc->sc_geom->softc = NULL;
3275 sc->sc_sync.ds_geom->softc = NULL;
3276 g_topology_unlock();
3277
3278 sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROY;
3279 sc->sc_flags |= G_RAID3_DEVICE_FLAG_WAIT;
3280 G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, sc);
3281 sx_xunlock(&sc->sc_lock);
3282 mtx_lock(&sc->sc_queue_mtx);
3283 wakeup(sc);
3284 wakeup(&sc->sc_queue);
3285 mtx_unlock(&sc->sc_queue_mtx);
3286 G_RAID3_DEBUG(4, "%s: Sleeping %p.", __func__, &sc->sc_worker);
3287 while (sc->sc_worker != NULL)
3288 tsleep(&sc->sc_worker, PRIBIO, "r3:destroy", hz / 5);
3289 G_RAID3_DEBUG(4, "%s: Woken up %p.", __func__, &sc->sc_worker);
3290 sx_xlock(&sc->sc_lock);
3291 g_raid3_destroy_device(sc);
3292 free(sc->sc_disks, M_RAID3);
3293 free(sc, M_RAID3);
3294 return (0);
3295 }
3296
3297 static void
3298 g_raid3_taste_orphan(struct g_consumer *cp)
3299 {
3300
3301 KASSERT(1 == 0, ("%s called while tasting %s.", __func__,
3302 cp->provider->name));
3303 }
3304
3305 static struct g_geom *
3306 g_raid3_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
3307 {
3308 struct g_raid3_metadata md;
3309 struct g_raid3_softc *sc;
3310 struct g_consumer *cp;
3311 struct g_geom *gp;
3312 int error;
3313
3314 g_topology_assert();
3315 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
3316 G_RAID3_DEBUG(2, "Tasting %s.", pp->name);
3317
3318 gp = g_new_geomf(mp, "raid3:taste");
3319 /* This orphan function should be never called. */
3320 gp->orphan = g_raid3_taste_orphan;
3321 cp = g_new_consumer(gp);
3322 g_attach(cp, pp);
3323 error = g_raid3_read_metadata(cp, &md);
3324 g_detach(cp);
3325 g_destroy_consumer(cp);
3326 g_destroy_geom(gp);
3327 if (error != 0)
3328 return (NULL);
3329 gp = NULL;
3330
3331 if (md.md_provider[0] != '\0' &&
3332 !g_compare_names(md.md_provider, pp->name))
3333 return (NULL);
3334 if (md.md_provsize != 0 && md.md_provsize != pp->mediasize)
3335 return (NULL);
3336 if (g_raid3_debug >= 2)
3337 raid3_metadata_dump(&md);
3338
3339 /*
3340 * Let's check if device already exists.
3341 */
3342 sc = NULL;
3343 LIST_FOREACH(gp, &mp->geom, geom) {
3344 sc = gp->softc;
3345 if (sc == NULL)
3346 continue;
3347 if (sc->sc_sync.ds_geom == gp)
3348 continue;
3349 if (strcmp(md.md_name, sc->sc_name) != 0)
3350 continue;
3351 if (md.md_id != sc->sc_id) {
3352 G_RAID3_DEBUG(0, "Device %s already configured.",
3353 sc->sc_name);
3354 return (NULL);
3355 }
3356 break;
3357 }
3358 if (gp == NULL) {
3359 gp = g_raid3_create(mp, &md);
3360 if (gp == NULL) {
3361 G_RAID3_DEBUG(0, "Cannot create device %s.",
3362 md.md_name);
3363 return (NULL);
3364 }
3365 sc = gp->softc;
3366 }
3367 G_RAID3_DEBUG(1, "Adding disk %s to %s.", pp->name, gp->name);
3368 g_topology_unlock();
3369 sx_xlock(&sc->sc_lock);
3370 error = g_raid3_add_disk(sc, pp, &md);
3371 if (error != 0) {
3372 G_RAID3_DEBUG(0, "Cannot add disk %s to %s (error=%d).",
3373 pp->name, gp->name, error);
3374 if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_NODISK) ==
3375 sc->sc_ndisks) {
3376 g_cancel_event(sc);
3377 g_raid3_destroy(sc, G_RAID3_DESTROY_HARD);
3378 g_topology_lock();
3379 return (NULL);
3380 }
3381 gp = NULL;
3382 }
3383 sx_xunlock(&sc->sc_lock);
3384 g_topology_lock();
3385 return (gp);
3386 }
3387
3388 static int
3389 g_raid3_destroy_geom(struct gctl_req *req __unused, struct g_class *mp __unused,
3390 struct g_geom *gp)
3391 {
3392 struct g_raid3_softc *sc;
3393 int error;
3394
3395 g_topology_unlock();
3396 sc = gp->softc;
3397 sx_xlock(&sc->sc_lock);
3398 g_cancel_event(sc);
3399 error = g_raid3_destroy(gp->softc, G_RAID3_DESTROY_SOFT);
3400 if (error != 0)
3401 sx_xunlock(&sc->sc_lock);
3402 g_topology_lock();
3403 return (error);
3404 }
3405
3406 static void
3407 g_raid3_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
3408 struct g_consumer *cp, struct g_provider *pp)
3409 {
3410 struct g_raid3_softc *sc;
3411
3412 g_topology_assert();
3413
3414 sc = gp->softc;
3415 if (sc == NULL)
3416 return;
3417 /* Skip synchronization geom. */
3418 if (gp == sc->sc_sync.ds_geom)
3419 return;
3420 if (pp != NULL) {
3421 /* Nothing here. */
3422 } else if (cp != NULL) {
3423 struct g_raid3_disk *disk;
3424
3425 disk = cp->private;
3426 if (disk == NULL)
3427 return;
3428 g_topology_unlock();
3429 sx_xlock(&sc->sc_lock);
3430 sbuf_printf(sb, "%s<Type>", indent);
3431 if (disk->d_no == sc->sc_ndisks - 1)
3432 sbuf_printf(sb, "PARITY");
3433 else
3434 sbuf_printf(sb, "DATA");
3435 sbuf_printf(sb, "</Type>\n");
3436 sbuf_printf(sb, "%s<Number>%u</Number>\n", indent,
3437 (u_int)disk->d_no);
3438 if (disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) {
3439 sbuf_printf(sb, "%s<Synchronized>", indent);
3440 if (disk->d_sync.ds_offset == 0)
3441 sbuf_printf(sb, "0%%");
3442 else {
3443 sbuf_printf(sb, "%u%%",
3444 (u_int)((disk->d_sync.ds_offset * 100) /
3445 (sc->sc_mediasize / (sc->sc_ndisks - 1))));
3446 }
3447 sbuf_printf(sb, "</Synchronized>\n");
3448 }
3449 sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent,
3450 disk->d_sync.ds_syncid);
3451 sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, disk->d_genid);
3452 sbuf_printf(sb, "%s<Flags>", indent);
3453 if (disk->d_flags == 0)
3454 sbuf_printf(sb, "NONE");
3455 else {
3456 int first = 1;
3457
3458 #define ADD_FLAG(flag, name) do { \
3459 if ((disk->d_flags & (flag)) != 0) { \
3460 if (!first) \
3461 sbuf_printf(sb, ", "); \
3462 else \
3463 first = 0; \
3464 sbuf_printf(sb, name); \
3465 } \
3466 } while (0)
3467 ADD_FLAG(G_RAID3_DISK_FLAG_DIRTY, "DIRTY");
3468 ADD_FLAG(G_RAID3_DISK_FLAG_HARDCODED, "HARDCODED");
3469 ADD_FLAG(G_RAID3_DISK_FLAG_SYNCHRONIZING,
3470 "SYNCHRONIZING");
3471 ADD_FLAG(G_RAID3_DISK_FLAG_FORCE_SYNC, "FORCE_SYNC");
3472 ADD_FLAG(G_RAID3_DISK_FLAG_BROKEN, "BROKEN");
3473 #undef ADD_FLAG
3474 }
3475 sbuf_printf(sb, "</Flags>\n");
3476 sbuf_printf(sb, "%s<State>%s</State>\n", indent,
3477 g_raid3_disk_state2str(disk->d_state));
3478 sx_xunlock(&sc->sc_lock);
3479 g_topology_lock();
3480 } else {
3481 g_topology_unlock();
3482 sx_xlock(&sc->sc_lock);
3483 if (!g_raid3_use_malloc) {
3484 sbuf_printf(sb,
3485 "%s<Zone4kRequested>%u</Zone4kRequested>\n", indent,
3486 sc->sc_zones[G_RAID3_ZONE_4K].sz_requested);
3487 sbuf_printf(sb,
3488 "%s<Zone4kFailed>%u</Zone4kFailed>\n", indent,
3489 sc->sc_zones[G_RAID3_ZONE_4K].sz_failed);
3490 sbuf_printf(sb,
3491 "%s<Zone16kRequested>%u</Zone16kRequested>\n", indent,
3492 sc->sc_zones[G_RAID3_ZONE_16K].sz_requested);
3493 sbuf_printf(sb,
3494 "%s<Zone16kFailed>%u</Zone16kFailed>\n", indent,
3495 sc->sc_zones[G_RAID3_ZONE_16K].sz_failed);
3496 sbuf_printf(sb,
3497 "%s<Zone64kRequested>%u</Zone64kRequested>\n", indent,
3498 sc->sc_zones[G_RAID3_ZONE_64K].sz_requested);
3499 sbuf_printf(sb,
3500 "%s<Zone64kFailed>%u</Zone64kFailed>\n", indent,
3501 sc->sc_zones[G_RAID3_ZONE_64K].sz_failed);
3502 }
3503 sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)sc->sc_id);
3504 sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent, sc->sc_syncid);
3505 sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, sc->sc_genid);
3506 sbuf_printf(sb, "%s<Flags>", indent);
3507 if (sc->sc_flags == 0)
3508 sbuf_printf(sb, "NONE");
3509 else {
3510 int first = 1;
3511
3512 #define ADD_FLAG(flag, name) do { \
3513 if ((sc->sc_flags & (flag)) != 0) { \
3514 if (!first) \
3515 sbuf_printf(sb, ", "); \
3516 else \
3517 first = 0; \
3518 sbuf_printf(sb, name); \
3519 } \
3520 } while (0)
3521 ADD_FLAG(G_RAID3_DEVICE_FLAG_NOFAILSYNC, "NOFAILSYNC");
3522 ADD_FLAG(G_RAID3_DEVICE_FLAG_NOAUTOSYNC, "NOAUTOSYNC");
3523 ADD_FLAG(G_RAID3_DEVICE_FLAG_ROUND_ROBIN,
3524 "ROUND-ROBIN");
3525 ADD_FLAG(G_RAID3_DEVICE_FLAG_VERIFY, "VERIFY");
3526 #undef ADD_FLAG
3527 }
3528 sbuf_printf(sb, "</Flags>\n");
3529 sbuf_printf(sb, "%s<Components>%u</Components>\n", indent,
3530 sc->sc_ndisks);
3531 sbuf_printf(sb, "%s<State>%s</State>\n", indent,
3532 g_raid3_device_state2str(sc->sc_state));
3533 sx_xunlock(&sc->sc_lock);
3534 g_topology_lock();
3535 }
3536 }
3537
3538 static void
3539 g_raid3_shutdown_pre_sync(void *arg, int howto)
3540 {
3541 struct g_class *mp;
3542 struct g_geom *gp, *gp2;
3543 struct g_raid3_softc *sc;
3544 int error;
3545
3546 mp = arg;
3547 DROP_GIANT();
3548 g_topology_lock();
3549 LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
3550 if ((sc = gp->softc) == NULL)
3551 continue;
3552 /* Skip synchronization geom. */
3553 if (gp == sc->sc_sync.ds_geom)
3554 continue;
3555 g_topology_unlock();
3556 sx_xlock(&sc->sc_lock);
3557 g_cancel_event(sc);
3558 error = g_raid3_destroy(sc, G_RAID3_DESTROY_DELAYED);
3559 if (error != 0)
3560 sx_xunlock(&sc->sc_lock);
3561 g_topology_lock();
3562 }
3563 g_topology_unlock();
3564 PICKUP_GIANT();
3565 }
3566
3567 static void
3568 g_raid3_init(struct g_class *mp)
3569 {
3570
3571 g_raid3_pre_sync = EVENTHANDLER_REGISTER(shutdown_pre_sync,
3572 g_raid3_shutdown_pre_sync, mp, SHUTDOWN_PRI_FIRST);
3573 if (g_raid3_pre_sync == NULL)
3574 G_RAID3_DEBUG(0, "Warning! Cannot register shutdown event.");
3575 }
3576
3577 static void
3578 g_raid3_fini(struct g_class *mp)
3579 {
3580
3581 if (g_raid3_pre_sync != NULL)
3582 EVENTHANDLER_DEREGISTER(shutdown_pre_sync, g_raid3_pre_sync);
3583 }
3584
3585 DECLARE_GEOM_CLASS(g_raid3_class, g_raid3);
Cache object: 5ed6bf8ac13f5f9b254049f92eb28bfe
|