1 /*-
2 * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD: releng/10.0/sys/geom/raid3/g_raid3.c 245444 2013-01-15 01:27:04Z mav $");
29
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/module.h>
34 #include <sys/limits.h>
35 #include <sys/lock.h>
36 #include <sys/mutex.h>
37 #include <sys/bio.h>
38 #include <sys/sbuf.h>
39 #include <sys/sysctl.h>
40 #include <sys/malloc.h>
41 #include <sys/eventhandler.h>
42 #include <vm/uma.h>
43 #include <geom/geom.h>
44 #include <sys/proc.h>
45 #include <sys/kthread.h>
46 #include <sys/sched.h>
47 #include <geom/raid3/g_raid3.h>
48
49 FEATURE(geom_raid3, "GEOM RAID-3 functionality");
50
51 static MALLOC_DEFINE(M_RAID3, "raid3_data", "GEOM_RAID3 Data");
52
53 SYSCTL_DECL(_kern_geom);
54 static SYSCTL_NODE(_kern_geom, OID_AUTO, raid3, CTLFLAG_RW, 0,
55 "GEOM_RAID3 stuff");
56 u_int g_raid3_debug = 0;
57 TUNABLE_INT("kern.geom.raid3.debug", &g_raid3_debug);
58 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, debug, CTLFLAG_RW, &g_raid3_debug, 0,
59 "Debug level");
60 static u_int g_raid3_timeout = 4;
61 TUNABLE_INT("kern.geom.raid3.timeout", &g_raid3_timeout);
62 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, timeout, CTLFLAG_RW, &g_raid3_timeout,
63 0, "Time to wait on all raid3 components");
64 static u_int g_raid3_idletime = 5;
65 TUNABLE_INT("kern.geom.raid3.idletime", &g_raid3_idletime);
66 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, idletime, CTLFLAG_RW,
67 &g_raid3_idletime, 0, "Mark components as clean when idling");
68 static u_int g_raid3_disconnect_on_failure = 1;
69 TUNABLE_INT("kern.geom.raid3.disconnect_on_failure",
70 &g_raid3_disconnect_on_failure);
71 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, disconnect_on_failure, CTLFLAG_RW,
72 &g_raid3_disconnect_on_failure, 0, "Disconnect component on I/O failure.");
73 static u_int g_raid3_syncreqs = 2;
74 TUNABLE_INT("kern.geom.raid3.sync_requests", &g_raid3_syncreqs);
75 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, sync_requests, CTLFLAG_RDTUN,
76 &g_raid3_syncreqs, 0, "Parallel synchronization I/O requests.");
77 static u_int g_raid3_use_malloc = 0;
78 TUNABLE_INT("kern.geom.raid3.use_malloc", &g_raid3_use_malloc);
79 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, use_malloc, CTLFLAG_RDTUN,
80 &g_raid3_use_malloc, 0, "Use malloc(9) instead of uma(9).");
81
82 static u_int g_raid3_n64k = 50;
83 TUNABLE_INT("kern.geom.raid3.n64k", &g_raid3_n64k);
84 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, n64k, CTLFLAG_RD, &g_raid3_n64k, 0,
85 "Maximum number of 64kB allocations");
86 static u_int g_raid3_n16k = 200;
87 TUNABLE_INT("kern.geom.raid3.n16k", &g_raid3_n16k);
88 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, n16k, CTLFLAG_RD, &g_raid3_n16k, 0,
89 "Maximum number of 16kB allocations");
90 static u_int g_raid3_n4k = 1200;
91 TUNABLE_INT("kern.geom.raid3.n4k", &g_raid3_n4k);
92 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, n4k, CTLFLAG_RD, &g_raid3_n4k, 0,
93 "Maximum number of 4kB allocations");
94
95 static SYSCTL_NODE(_kern_geom_raid3, OID_AUTO, stat, CTLFLAG_RW, 0,
96 "GEOM_RAID3 statistics");
97 static u_int g_raid3_parity_mismatch = 0;
98 SYSCTL_UINT(_kern_geom_raid3_stat, OID_AUTO, parity_mismatch, CTLFLAG_RD,
99 &g_raid3_parity_mismatch, 0, "Number of failures in VERIFY mode");
100
101 #define MSLEEP(ident, mtx, priority, wmesg, timeout) do { \
102 G_RAID3_DEBUG(4, "%s: Sleeping %p.", __func__, (ident)); \
103 msleep((ident), (mtx), (priority), (wmesg), (timeout)); \
104 G_RAID3_DEBUG(4, "%s: Woken up %p.", __func__, (ident)); \
105 } while (0)
106
107 static eventhandler_tag g_raid3_post_sync = NULL;
108 static int g_raid3_shutdown = 0;
109
110 static int g_raid3_destroy_geom(struct gctl_req *req, struct g_class *mp,
111 struct g_geom *gp);
112 static g_taste_t g_raid3_taste;
113 static void g_raid3_init(struct g_class *mp);
114 static void g_raid3_fini(struct g_class *mp);
115
116 struct g_class g_raid3_class = {
117 .name = G_RAID3_CLASS_NAME,
118 .version = G_VERSION,
119 .ctlreq = g_raid3_config,
120 .taste = g_raid3_taste,
121 .destroy_geom = g_raid3_destroy_geom,
122 .init = g_raid3_init,
123 .fini = g_raid3_fini
124 };
125
126
127 static void g_raid3_destroy_provider(struct g_raid3_softc *sc);
128 static int g_raid3_update_disk(struct g_raid3_disk *disk, u_int state);
129 static void g_raid3_update_device(struct g_raid3_softc *sc, boolean_t force);
130 static void g_raid3_dumpconf(struct sbuf *sb, const char *indent,
131 struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp);
132 static void g_raid3_sync_stop(struct g_raid3_softc *sc, int type);
133 static int g_raid3_register_request(struct bio *pbp);
134 static void g_raid3_sync_release(struct g_raid3_softc *sc);
135
136
137 static const char *
138 g_raid3_disk_state2str(int state)
139 {
140
141 switch (state) {
142 case G_RAID3_DISK_STATE_NODISK:
143 return ("NODISK");
144 case G_RAID3_DISK_STATE_NONE:
145 return ("NONE");
146 case G_RAID3_DISK_STATE_NEW:
147 return ("NEW");
148 case G_RAID3_DISK_STATE_ACTIVE:
149 return ("ACTIVE");
150 case G_RAID3_DISK_STATE_STALE:
151 return ("STALE");
152 case G_RAID3_DISK_STATE_SYNCHRONIZING:
153 return ("SYNCHRONIZING");
154 case G_RAID3_DISK_STATE_DISCONNECTED:
155 return ("DISCONNECTED");
156 default:
157 return ("INVALID");
158 }
159 }
160
161 static const char *
162 g_raid3_device_state2str(int state)
163 {
164
165 switch (state) {
166 case G_RAID3_DEVICE_STATE_STARTING:
167 return ("STARTING");
168 case G_RAID3_DEVICE_STATE_DEGRADED:
169 return ("DEGRADED");
170 case G_RAID3_DEVICE_STATE_COMPLETE:
171 return ("COMPLETE");
172 default:
173 return ("INVALID");
174 }
175 }
176
177 const char *
178 g_raid3_get_diskname(struct g_raid3_disk *disk)
179 {
180
181 if (disk->d_consumer == NULL || disk->d_consumer->provider == NULL)
182 return ("[unknown]");
183 return (disk->d_name);
184 }
185
186 static void *
187 g_raid3_alloc(struct g_raid3_softc *sc, size_t size, int flags)
188 {
189 void *ptr;
190 enum g_raid3_zones zone;
191
192 if (g_raid3_use_malloc ||
193 (zone = g_raid3_zone(size)) == G_RAID3_NUM_ZONES)
194 ptr = malloc(size, M_RAID3, flags);
195 else {
196 ptr = uma_zalloc_arg(sc->sc_zones[zone].sz_zone,
197 &sc->sc_zones[zone], flags);
198 sc->sc_zones[zone].sz_requested++;
199 if (ptr == NULL)
200 sc->sc_zones[zone].sz_failed++;
201 }
202 return (ptr);
203 }
204
205 static void
206 g_raid3_free(struct g_raid3_softc *sc, void *ptr, size_t size)
207 {
208 enum g_raid3_zones zone;
209
210 if (g_raid3_use_malloc ||
211 (zone = g_raid3_zone(size)) == G_RAID3_NUM_ZONES)
212 free(ptr, M_RAID3);
213 else {
214 uma_zfree_arg(sc->sc_zones[zone].sz_zone,
215 ptr, &sc->sc_zones[zone]);
216 }
217 }
218
219 static int
220 g_raid3_uma_ctor(void *mem, int size, void *arg, int flags)
221 {
222 struct g_raid3_zone *sz = arg;
223
224 if (sz->sz_max > 0 && sz->sz_inuse == sz->sz_max)
225 return (ENOMEM);
226 sz->sz_inuse++;
227 return (0);
228 }
229
230 static void
231 g_raid3_uma_dtor(void *mem, int size, void *arg)
232 {
233 struct g_raid3_zone *sz = arg;
234
235 sz->sz_inuse--;
236 }
237
238 #define g_raid3_xor(src, dst, size) \
239 _g_raid3_xor((uint64_t *)(src), \
240 (uint64_t *)(dst), (size_t)size)
241 static void
242 _g_raid3_xor(uint64_t *src, uint64_t *dst, size_t size)
243 {
244
245 KASSERT((size % 128) == 0, ("Invalid size: %zu.", size));
246 for (; size > 0; size -= 128) {
247 *dst++ ^= (*src++);
248 *dst++ ^= (*src++);
249 *dst++ ^= (*src++);
250 *dst++ ^= (*src++);
251 *dst++ ^= (*src++);
252 *dst++ ^= (*src++);
253 *dst++ ^= (*src++);
254 *dst++ ^= (*src++);
255 *dst++ ^= (*src++);
256 *dst++ ^= (*src++);
257 *dst++ ^= (*src++);
258 *dst++ ^= (*src++);
259 *dst++ ^= (*src++);
260 *dst++ ^= (*src++);
261 *dst++ ^= (*src++);
262 *dst++ ^= (*src++);
263 }
264 }
265
266 static int
267 g_raid3_is_zero(struct bio *bp)
268 {
269 static const uint64_t zeros[] = {
270 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
271 };
272 u_char *addr;
273 ssize_t size;
274
275 size = bp->bio_length;
276 addr = (u_char *)bp->bio_data;
277 for (; size > 0; size -= sizeof(zeros), addr += sizeof(zeros)) {
278 if (bcmp(addr, zeros, sizeof(zeros)) != 0)
279 return (0);
280 }
281 return (1);
282 }
283
284 /*
285 * --- Events handling functions ---
286 * Events in geom_raid3 are used to maintain disks and device status
287 * from one thread to simplify locking.
288 */
289 static void
290 g_raid3_event_free(struct g_raid3_event *ep)
291 {
292
293 free(ep, M_RAID3);
294 }
295
296 int
297 g_raid3_event_send(void *arg, int state, int flags)
298 {
299 struct g_raid3_softc *sc;
300 struct g_raid3_disk *disk;
301 struct g_raid3_event *ep;
302 int error;
303
304 ep = malloc(sizeof(*ep), M_RAID3, M_WAITOK);
305 G_RAID3_DEBUG(4, "%s: Sending event %p.", __func__, ep);
306 if ((flags & G_RAID3_EVENT_DEVICE) != 0) {
307 disk = NULL;
308 sc = arg;
309 } else {
310 disk = arg;
311 sc = disk->d_softc;
312 }
313 ep->e_disk = disk;
314 ep->e_state = state;
315 ep->e_flags = flags;
316 ep->e_error = 0;
317 mtx_lock(&sc->sc_events_mtx);
318 TAILQ_INSERT_TAIL(&sc->sc_events, ep, e_next);
319 mtx_unlock(&sc->sc_events_mtx);
320 G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, sc);
321 mtx_lock(&sc->sc_queue_mtx);
322 wakeup(sc);
323 wakeup(&sc->sc_queue);
324 mtx_unlock(&sc->sc_queue_mtx);
325 if ((flags & G_RAID3_EVENT_DONTWAIT) != 0)
326 return (0);
327 sx_assert(&sc->sc_lock, SX_XLOCKED);
328 G_RAID3_DEBUG(4, "%s: Sleeping %p.", __func__, ep);
329 sx_xunlock(&sc->sc_lock);
330 while ((ep->e_flags & G_RAID3_EVENT_DONE) == 0) {
331 mtx_lock(&sc->sc_events_mtx);
332 MSLEEP(ep, &sc->sc_events_mtx, PRIBIO | PDROP, "r3:event",
333 hz * 5);
334 }
335 error = ep->e_error;
336 g_raid3_event_free(ep);
337 sx_xlock(&sc->sc_lock);
338 return (error);
339 }
340
341 static struct g_raid3_event *
342 g_raid3_event_get(struct g_raid3_softc *sc)
343 {
344 struct g_raid3_event *ep;
345
346 mtx_lock(&sc->sc_events_mtx);
347 ep = TAILQ_FIRST(&sc->sc_events);
348 mtx_unlock(&sc->sc_events_mtx);
349 return (ep);
350 }
351
352 static void
353 g_raid3_event_remove(struct g_raid3_softc *sc, struct g_raid3_event *ep)
354 {
355
356 mtx_lock(&sc->sc_events_mtx);
357 TAILQ_REMOVE(&sc->sc_events, ep, e_next);
358 mtx_unlock(&sc->sc_events_mtx);
359 }
360
361 static void
362 g_raid3_event_cancel(struct g_raid3_disk *disk)
363 {
364 struct g_raid3_softc *sc;
365 struct g_raid3_event *ep, *tmpep;
366
367 sc = disk->d_softc;
368 sx_assert(&sc->sc_lock, SX_XLOCKED);
369
370 mtx_lock(&sc->sc_events_mtx);
371 TAILQ_FOREACH_SAFE(ep, &sc->sc_events, e_next, tmpep) {
372 if ((ep->e_flags & G_RAID3_EVENT_DEVICE) != 0)
373 continue;
374 if (ep->e_disk != disk)
375 continue;
376 TAILQ_REMOVE(&sc->sc_events, ep, e_next);
377 if ((ep->e_flags & G_RAID3_EVENT_DONTWAIT) != 0)
378 g_raid3_event_free(ep);
379 else {
380 ep->e_error = ECANCELED;
381 wakeup(ep);
382 }
383 }
384 mtx_unlock(&sc->sc_events_mtx);
385 }
386
387 /*
388 * Return the number of disks in the given state.
389 * If state is equal to -1, count all connected disks.
390 */
391 u_int
392 g_raid3_ndisks(struct g_raid3_softc *sc, int state)
393 {
394 struct g_raid3_disk *disk;
395 u_int n, ndisks;
396
397 sx_assert(&sc->sc_lock, SX_LOCKED);
398
399 for (n = ndisks = 0; n < sc->sc_ndisks; n++) {
400 disk = &sc->sc_disks[n];
401 if (disk->d_state == G_RAID3_DISK_STATE_NODISK)
402 continue;
403 if (state == -1 || disk->d_state == state)
404 ndisks++;
405 }
406 return (ndisks);
407 }
408
409 static u_int
410 g_raid3_nrequests(struct g_raid3_softc *sc, struct g_consumer *cp)
411 {
412 struct bio *bp;
413 u_int nreqs = 0;
414
415 mtx_lock(&sc->sc_queue_mtx);
416 TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) {
417 if (bp->bio_from == cp)
418 nreqs++;
419 }
420 mtx_unlock(&sc->sc_queue_mtx);
421 return (nreqs);
422 }
423
424 static int
425 g_raid3_is_busy(struct g_raid3_softc *sc, struct g_consumer *cp)
426 {
427
428 if (cp->index > 0) {
429 G_RAID3_DEBUG(2,
430 "I/O requests for %s exist, can't destroy it now.",
431 cp->provider->name);
432 return (1);
433 }
434 if (g_raid3_nrequests(sc, cp) > 0) {
435 G_RAID3_DEBUG(2,
436 "I/O requests for %s in queue, can't destroy it now.",
437 cp->provider->name);
438 return (1);
439 }
440 return (0);
441 }
442
443 static void
444 g_raid3_destroy_consumer(void *arg, int flags __unused)
445 {
446 struct g_consumer *cp;
447
448 g_topology_assert();
449
450 cp = arg;
451 G_RAID3_DEBUG(1, "Consumer %s destroyed.", cp->provider->name);
452 g_detach(cp);
453 g_destroy_consumer(cp);
454 }
455
456 static void
457 g_raid3_kill_consumer(struct g_raid3_softc *sc, struct g_consumer *cp)
458 {
459 struct g_provider *pp;
460 int retaste_wait;
461
462 g_topology_assert();
463
464 cp->private = NULL;
465 if (g_raid3_is_busy(sc, cp))
466 return;
467 G_RAID3_DEBUG(2, "Consumer %s destroyed.", cp->provider->name);
468 pp = cp->provider;
469 retaste_wait = 0;
470 if (cp->acw == 1) {
471 if ((pp->geom->flags & G_GEOM_WITHER) == 0)
472 retaste_wait = 1;
473 }
474 G_RAID3_DEBUG(2, "Access %s r%dw%de%d = %d", pp->name, -cp->acr,
475 -cp->acw, -cp->ace, 0);
476 if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0)
477 g_access(cp, -cp->acr, -cp->acw, -cp->ace);
478 if (retaste_wait) {
479 /*
480 * After retaste event was send (inside g_access()), we can send
481 * event to detach and destroy consumer.
482 * A class, which has consumer to the given provider connected
483 * will not receive retaste event for the provider.
484 * This is the way how I ignore retaste events when I close
485 * consumers opened for write: I detach and destroy consumer
486 * after retaste event is sent.
487 */
488 g_post_event(g_raid3_destroy_consumer, cp, M_WAITOK, NULL);
489 return;
490 }
491 G_RAID3_DEBUG(1, "Consumer %s destroyed.", pp->name);
492 g_detach(cp);
493 g_destroy_consumer(cp);
494 }
495
496 static int
497 g_raid3_connect_disk(struct g_raid3_disk *disk, struct g_provider *pp)
498 {
499 struct g_consumer *cp;
500 int error;
501
502 g_topology_assert_not();
503 KASSERT(disk->d_consumer == NULL,
504 ("Disk already connected (device %s).", disk->d_softc->sc_name));
505
506 g_topology_lock();
507 cp = g_new_consumer(disk->d_softc->sc_geom);
508 error = g_attach(cp, pp);
509 if (error != 0) {
510 g_destroy_consumer(cp);
511 g_topology_unlock();
512 return (error);
513 }
514 error = g_access(cp, 1, 1, 1);
515 g_topology_unlock();
516 if (error != 0) {
517 g_detach(cp);
518 g_destroy_consumer(cp);
519 G_RAID3_DEBUG(0, "Cannot open consumer %s (error=%d).",
520 pp->name, error);
521 return (error);
522 }
523 disk->d_consumer = cp;
524 disk->d_consumer->private = disk;
525 disk->d_consumer->index = 0;
526 G_RAID3_DEBUG(2, "Disk %s connected.", g_raid3_get_diskname(disk));
527 return (0);
528 }
529
530 static void
531 g_raid3_disconnect_consumer(struct g_raid3_softc *sc, struct g_consumer *cp)
532 {
533
534 g_topology_assert();
535
536 if (cp == NULL)
537 return;
538 if (cp->provider != NULL)
539 g_raid3_kill_consumer(sc, cp);
540 else
541 g_destroy_consumer(cp);
542 }
543
544 /*
545 * Initialize disk. This means allocate memory, create consumer, attach it
546 * to the provider and open access (r1w1e1) to it.
547 */
548 static struct g_raid3_disk *
549 g_raid3_init_disk(struct g_raid3_softc *sc, struct g_provider *pp,
550 struct g_raid3_metadata *md, int *errorp)
551 {
552 struct g_raid3_disk *disk;
553 int error;
554
555 disk = &sc->sc_disks[md->md_no];
556 error = g_raid3_connect_disk(disk, pp);
557 if (error != 0) {
558 if (errorp != NULL)
559 *errorp = error;
560 return (NULL);
561 }
562 disk->d_state = G_RAID3_DISK_STATE_NONE;
563 disk->d_flags = md->md_dflags;
564 if (md->md_provider[0] != '\0')
565 disk->d_flags |= G_RAID3_DISK_FLAG_HARDCODED;
566 disk->d_sync.ds_consumer = NULL;
567 disk->d_sync.ds_offset = md->md_sync_offset;
568 disk->d_sync.ds_offset_done = md->md_sync_offset;
569 disk->d_genid = md->md_genid;
570 disk->d_sync.ds_syncid = md->md_syncid;
571 if (errorp != NULL)
572 *errorp = 0;
573 return (disk);
574 }
575
576 static void
577 g_raid3_destroy_disk(struct g_raid3_disk *disk)
578 {
579 struct g_raid3_softc *sc;
580
581 g_topology_assert_not();
582 sc = disk->d_softc;
583 sx_assert(&sc->sc_lock, SX_XLOCKED);
584
585 if (disk->d_state == G_RAID3_DISK_STATE_NODISK)
586 return;
587 g_raid3_event_cancel(disk);
588 switch (disk->d_state) {
589 case G_RAID3_DISK_STATE_SYNCHRONIZING:
590 if (sc->sc_syncdisk != NULL)
591 g_raid3_sync_stop(sc, 1);
592 /* FALLTHROUGH */
593 case G_RAID3_DISK_STATE_NEW:
594 case G_RAID3_DISK_STATE_STALE:
595 case G_RAID3_DISK_STATE_ACTIVE:
596 g_topology_lock();
597 g_raid3_disconnect_consumer(sc, disk->d_consumer);
598 g_topology_unlock();
599 disk->d_consumer = NULL;
600 break;
601 default:
602 KASSERT(0 == 1, ("Wrong disk state (%s, %s).",
603 g_raid3_get_diskname(disk),
604 g_raid3_disk_state2str(disk->d_state)));
605 }
606 disk->d_state = G_RAID3_DISK_STATE_NODISK;
607 }
608
609 static void
610 g_raid3_destroy_device(struct g_raid3_softc *sc)
611 {
612 struct g_raid3_event *ep;
613 struct g_raid3_disk *disk;
614 struct g_geom *gp;
615 struct g_consumer *cp;
616 u_int n;
617
618 g_topology_assert_not();
619 sx_assert(&sc->sc_lock, SX_XLOCKED);
620
621 gp = sc->sc_geom;
622 if (sc->sc_provider != NULL)
623 g_raid3_destroy_provider(sc);
624 for (n = 0; n < sc->sc_ndisks; n++) {
625 disk = &sc->sc_disks[n];
626 if (disk->d_state != G_RAID3_DISK_STATE_NODISK) {
627 disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY;
628 g_raid3_update_metadata(disk);
629 g_raid3_destroy_disk(disk);
630 }
631 }
632 while ((ep = g_raid3_event_get(sc)) != NULL) {
633 g_raid3_event_remove(sc, ep);
634 if ((ep->e_flags & G_RAID3_EVENT_DONTWAIT) != 0)
635 g_raid3_event_free(ep);
636 else {
637 ep->e_error = ECANCELED;
638 ep->e_flags |= G_RAID3_EVENT_DONE;
639 G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, ep);
640 mtx_lock(&sc->sc_events_mtx);
641 wakeup(ep);
642 mtx_unlock(&sc->sc_events_mtx);
643 }
644 }
645 callout_drain(&sc->sc_callout);
646 cp = LIST_FIRST(&sc->sc_sync.ds_geom->consumer);
647 g_topology_lock();
648 if (cp != NULL)
649 g_raid3_disconnect_consumer(sc, cp);
650 g_wither_geom(sc->sc_sync.ds_geom, ENXIO);
651 G_RAID3_DEBUG(0, "Device %s destroyed.", gp->name);
652 g_wither_geom(gp, ENXIO);
653 g_topology_unlock();
654 if (!g_raid3_use_malloc) {
655 uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_64K].sz_zone);
656 uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_16K].sz_zone);
657 uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_4K].sz_zone);
658 }
659 mtx_destroy(&sc->sc_queue_mtx);
660 mtx_destroy(&sc->sc_events_mtx);
661 sx_xunlock(&sc->sc_lock);
662 sx_destroy(&sc->sc_lock);
663 }
664
665 static void
666 g_raid3_orphan(struct g_consumer *cp)
667 {
668 struct g_raid3_disk *disk;
669
670 g_topology_assert();
671
672 disk = cp->private;
673 if (disk == NULL)
674 return;
675 disk->d_softc->sc_bump_id = G_RAID3_BUMP_SYNCID;
676 g_raid3_event_send(disk, G_RAID3_DISK_STATE_DISCONNECTED,
677 G_RAID3_EVENT_DONTWAIT);
678 }
679
680 static int
681 g_raid3_write_metadata(struct g_raid3_disk *disk, struct g_raid3_metadata *md)
682 {
683 struct g_raid3_softc *sc;
684 struct g_consumer *cp;
685 off_t offset, length;
686 u_char *sector;
687 int error = 0;
688
689 g_topology_assert_not();
690 sc = disk->d_softc;
691 sx_assert(&sc->sc_lock, SX_LOCKED);
692
693 cp = disk->d_consumer;
694 KASSERT(cp != NULL, ("NULL consumer (%s).", sc->sc_name));
695 KASSERT(cp->provider != NULL, ("NULL provider (%s).", sc->sc_name));
696 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
697 ("Consumer %s closed? (r%dw%de%d).", cp->provider->name, cp->acr,
698 cp->acw, cp->ace));
699 length = cp->provider->sectorsize;
700 offset = cp->provider->mediasize - length;
701 sector = malloc((size_t)length, M_RAID3, M_WAITOK | M_ZERO);
702 if (md != NULL)
703 raid3_metadata_encode(md, sector);
704 error = g_write_data(cp, offset, sector, length);
705 free(sector, M_RAID3);
706 if (error != 0) {
707 if ((disk->d_flags & G_RAID3_DISK_FLAG_BROKEN) == 0) {
708 G_RAID3_DEBUG(0, "Cannot write metadata on %s "
709 "(device=%s, error=%d).",
710 g_raid3_get_diskname(disk), sc->sc_name, error);
711 disk->d_flags |= G_RAID3_DISK_FLAG_BROKEN;
712 } else {
713 G_RAID3_DEBUG(1, "Cannot write metadata on %s "
714 "(device=%s, error=%d).",
715 g_raid3_get_diskname(disk), sc->sc_name, error);
716 }
717 if (g_raid3_disconnect_on_failure &&
718 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) {
719 sc->sc_bump_id |= G_RAID3_BUMP_GENID;
720 g_raid3_event_send(disk,
721 G_RAID3_DISK_STATE_DISCONNECTED,
722 G_RAID3_EVENT_DONTWAIT);
723 }
724 }
725 return (error);
726 }
727
728 int
729 g_raid3_clear_metadata(struct g_raid3_disk *disk)
730 {
731 int error;
732
733 g_topology_assert_not();
734 sx_assert(&disk->d_softc->sc_lock, SX_LOCKED);
735
736 error = g_raid3_write_metadata(disk, NULL);
737 if (error == 0) {
738 G_RAID3_DEBUG(2, "Metadata on %s cleared.",
739 g_raid3_get_diskname(disk));
740 } else {
741 G_RAID3_DEBUG(0,
742 "Cannot clear metadata on disk %s (error=%d).",
743 g_raid3_get_diskname(disk), error);
744 }
745 return (error);
746 }
747
748 void
749 g_raid3_fill_metadata(struct g_raid3_disk *disk, struct g_raid3_metadata *md)
750 {
751 struct g_raid3_softc *sc;
752 struct g_provider *pp;
753
754 sc = disk->d_softc;
755 strlcpy(md->md_magic, G_RAID3_MAGIC, sizeof(md->md_magic));
756 md->md_version = G_RAID3_VERSION;
757 strlcpy(md->md_name, sc->sc_name, sizeof(md->md_name));
758 md->md_id = sc->sc_id;
759 md->md_all = sc->sc_ndisks;
760 md->md_genid = sc->sc_genid;
761 md->md_mediasize = sc->sc_mediasize;
762 md->md_sectorsize = sc->sc_sectorsize;
763 md->md_mflags = (sc->sc_flags & G_RAID3_DEVICE_FLAG_MASK);
764 md->md_no = disk->d_no;
765 md->md_syncid = disk->d_sync.ds_syncid;
766 md->md_dflags = (disk->d_flags & G_RAID3_DISK_FLAG_MASK);
767 if (disk->d_state != G_RAID3_DISK_STATE_SYNCHRONIZING)
768 md->md_sync_offset = 0;
769 else {
770 md->md_sync_offset =
771 disk->d_sync.ds_offset_done / (sc->sc_ndisks - 1);
772 }
773 if (disk->d_consumer != NULL && disk->d_consumer->provider != NULL)
774 pp = disk->d_consumer->provider;
775 else
776 pp = NULL;
777 if ((disk->d_flags & G_RAID3_DISK_FLAG_HARDCODED) != 0 && pp != NULL)
778 strlcpy(md->md_provider, pp->name, sizeof(md->md_provider));
779 else
780 bzero(md->md_provider, sizeof(md->md_provider));
781 if (pp != NULL)
782 md->md_provsize = pp->mediasize;
783 else
784 md->md_provsize = 0;
785 }
786
787 void
788 g_raid3_update_metadata(struct g_raid3_disk *disk)
789 {
790 struct g_raid3_softc *sc;
791 struct g_raid3_metadata md;
792 int error;
793
794 g_topology_assert_not();
795 sc = disk->d_softc;
796 sx_assert(&sc->sc_lock, SX_LOCKED);
797
798 g_raid3_fill_metadata(disk, &md);
799 error = g_raid3_write_metadata(disk, &md);
800 if (error == 0) {
801 G_RAID3_DEBUG(2, "Metadata on %s updated.",
802 g_raid3_get_diskname(disk));
803 } else {
804 G_RAID3_DEBUG(0,
805 "Cannot update metadata on disk %s (error=%d).",
806 g_raid3_get_diskname(disk), error);
807 }
808 }
809
810 static void
811 g_raid3_bump_syncid(struct g_raid3_softc *sc)
812 {
813 struct g_raid3_disk *disk;
814 u_int n;
815
816 g_topology_assert_not();
817 sx_assert(&sc->sc_lock, SX_XLOCKED);
818 KASSERT(g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) > 0,
819 ("%s called with no active disks (device=%s).", __func__,
820 sc->sc_name));
821
822 sc->sc_syncid++;
823 G_RAID3_DEBUG(1, "Device %s: syncid bumped to %u.", sc->sc_name,
824 sc->sc_syncid);
825 for (n = 0; n < sc->sc_ndisks; n++) {
826 disk = &sc->sc_disks[n];
827 if (disk->d_state == G_RAID3_DISK_STATE_ACTIVE ||
828 disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) {
829 disk->d_sync.ds_syncid = sc->sc_syncid;
830 g_raid3_update_metadata(disk);
831 }
832 }
833 }
834
835 static void
836 g_raid3_bump_genid(struct g_raid3_softc *sc)
837 {
838 struct g_raid3_disk *disk;
839 u_int n;
840
841 g_topology_assert_not();
842 sx_assert(&sc->sc_lock, SX_XLOCKED);
843 KASSERT(g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) > 0,
844 ("%s called with no active disks (device=%s).", __func__,
845 sc->sc_name));
846
847 sc->sc_genid++;
848 G_RAID3_DEBUG(1, "Device %s: genid bumped to %u.", sc->sc_name,
849 sc->sc_genid);
850 for (n = 0; n < sc->sc_ndisks; n++) {
851 disk = &sc->sc_disks[n];
852 if (disk->d_state == G_RAID3_DISK_STATE_ACTIVE ||
853 disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) {
854 disk->d_genid = sc->sc_genid;
855 g_raid3_update_metadata(disk);
856 }
857 }
858 }
859
860 static int
861 g_raid3_idle(struct g_raid3_softc *sc, int acw)
862 {
863 struct g_raid3_disk *disk;
864 u_int i;
865 int timeout;
866
867 g_topology_assert_not();
868 sx_assert(&sc->sc_lock, SX_XLOCKED);
869
870 if (sc->sc_provider == NULL)
871 return (0);
872 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOFAILSYNC) != 0)
873 return (0);
874 if (sc->sc_idle)
875 return (0);
876 if (sc->sc_writes > 0)
877 return (0);
878 if (acw > 0 || (acw == -1 && sc->sc_provider->acw > 0)) {
879 timeout = g_raid3_idletime - (time_uptime - sc->sc_last_write);
880 if (!g_raid3_shutdown && timeout > 0)
881 return (timeout);
882 }
883 sc->sc_idle = 1;
884 for (i = 0; i < sc->sc_ndisks; i++) {
885 disk = &sc->sc_disks[i];
886 if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE)
887 continue;
888 G_RAID3_DEBUG(1, "Disk %s (device %s) marked as clean.",
889 g_raid3_get_diskname(disk), sc->sc_name);
890 disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY;
891 g_raid3_update_metadata(disk);
892 }
893 return (0);
894 }
895
896 static void
897 g_raid3_unidle(struct g_raid3_softc *sc)
898 {
899 struct g_raid3_disk *disk;
900 u_int i;
901
902 g_topology_assert_not();
903 sx_assert(&sc->sc_lock, SX_XLOCKED);
904
905 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOFAILSYNC) != 0)
906 return;
907 sc->sc_idle = 0;
908 sc->sc_last_write = time_uptime;
909 for (i = 0; i < sc->sc_ndisks; i++) {
910 disk = &sc->sc_disks[i];
911 if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE)
912 continue;
913 G_RAID3_DEBUG(1, "Disk %s (device %s) marked as dirty.",
914 g_raid3_get_diskname(disk), sc->sc_name);
915 disk->d_flags |= G_RAID3_DISK_FLAG_DIRTY;
916 g_raid3_update_metadata(disk);
917 }
918 }
919
920 /*
921 * Treat bio_driver1 field in parent bio as list head and field bio_caller1
922 * in child bio as pointer to the next element on the list.
923 */
924 #define G_RAID3_HEAD_BIO(pbp) (pbp)->bio_driver1
925
926 #define G_RAID3_NEXT_BIO(cbp) (cbp)->bio_caller1
927
928 #define G_RAID3_FOREACH_BIO(pbp, bp) \
929 for ((bp) = G_RAID3_HEAD_BIO(pbp); (bp) != NULL; \
930 (bp) = G_RAID3_NEXT_BIO(bp))
931
932 #define G_RAID3_FOREACH_SAFE_BIO(pbp, bp, tmpbp) \
933 for ((bp) = G_RAID3_HEAD_BIO(pbp); \
934 (bp) != NULL && ((tmpbp) = G_RAID3_NEXT_BIO(bp), 1); \
935 (bp) = (tmpbp))
936
937 static void
938 g_raid3_init_bio(struct bio *pbp)
939 {
940
941 G_RAID3_HEAD_BIO(pbp) = NULL;
942 }
943
944 static void
945 g_raid3_remove_bio(struct bio *cbp)
946 {
947 struct bio *pbp, *bp;
948
949 pbp = cbp->bio_parent;
950 if (G_RAID3_HEAD_BIO(pbp) == cbp)
951 G_RAID3_HEAD_BIO(pbp) = G_RAID3_NEXT_BIO(cbp);
952 else {
953 G_RAID3_FOREACH_BIO(pbp, bp) {
954 if (G_RAID3_NEXT_BIO(bp) == cbp) {
955 G_RAID3_NEXT_BIO(bp) = G_RAID3_NEXT_BIO(cbp);
956 break;
957 }
958 }
959 }
960 G_RAID3_NEXT_BIO(cbp) = NULL;
961 }
962
963 static void
964 g_raid3_replace_bio(struct bio *sbp, struct bio *dbp)
965 {
966 struct bio *pbp, *bp;
967
968 g_raid3_remove_bio(sbp);
969 pbp = dbp->bio_parent;
970 G_RAID3_NEXT_BIO(sbp) = G_RAID3_NEXT_BIO(dbp);
971 if (G_RAID3_HEAD_BIO(pbp) == dbp)
972 G_RAID3_HEAD_BIO(pbp) = sbp;
973 else {
974 G_RAID3_FOREACH_BIO(pbp, bp) {
975 if (G_RAID3_NEXT_BIO(bp) == dbp) {
976 G_RAID3_NEXT_BIO(bp) = sbp;
977 break;
978 }
979 }
980 }
981 G_RAID3_NEXT_BIO(dbp) = NULL;
982 }
983
984 static void
985 g_raid3_destroy_bio(struct g_raid3_softc *sc, struct bio *cbp)
986 {
987 struct bio *bp, *pbp;
988 size_t size;
989
990 pbp = cbp->bio_parent;
991 pbp->bio_children--;
992 KASSERT(cbp->bio_data != NULL, ("NULL bio_data"));
993 size = pbp->bio_length / (sc->sc_ndisks - 1);
994 g_raid3_free(sc, cbp->bio_data, size);
995 if (G_RAID3_HEAD_BIO(pbp) == cbp) {
996 G_RAID3_HEAD_BIO(pbp) = G_RAID3_NEXT_BIO(cbp);
997 G_RAID3_NEXT_BIO(cbp) = NULL;
998 g_destroy_bio(cbp);
999 } else {
1000 G_RAID3_FOREACH_BIO(pbp, bp) {
1001 if (G_RAID3_NEXT_BIO(bp) == cbp)
1002 break;
1003 }
1004 if (bp != NULL) {
1005 KASSERT(G_RAID3_NEXT_BIO(bp) != NULL,
1006 ("NULL bp->bio_driver1"));
1007 G_RAID3_NEXT_BIO(bp) = G_RAID3_NEXT_BIO(cbp);
1008 G_RAID3_NEXT_BIO(cbp) = NULL;
1009 }
1010 g_destroy_bio(cbp);
1011 }
1012 }
1013
1014 static struct bio *
1015 g_raid3_clone_bio(struct g_raid3_softc *sc, struct bio *pbp)
1016 {
1017 struct bio *bp, *cbp;
1018 size_t size;
1019 int memflag;
1020
1021 cbp = g_clone_bio(pbp);
1022 if (cbp == NULL)
1023 return (NULL);
1024 size = pbp->bio_length / (sc->sc_ndisks - 1);
1025 if ((pbp->bio_cflags & G_RAID3_BIO_CFLAG_REGULAR) != 0)
1026 memflag = M_WAITOK;
1027 else
1028 memflag = M_NOWAIT;
1029 cbp->bio_data = g_raid3_alloc(sc, size, memflag);
1030 if (cbp->bio_data == NULL) {
1031 pbp->bio_children--;
1032 g_destroy_bio(cbp);
1033 return (NULL);
1034 }
1035 G_RAID3_NEXT_BIO(cbp) = NULL;
1036 if (G_RAID3_HEAD_BIO(pbp) == NULL)
1037 G_RAID3_HEAD_BIO(pbp) = cbp;
1038 else {
1039 G_RAID3_FOREACH_BIO(pbp, bp) {
1040 if (G_RAID3_NEXT_BIO(bp) == NULL) {
1041 G_RAID3_NEXT_BIO(bp) = cbp;
1042 break;
1043 }
1044 }
1045 }
1046 return (cbp);
1047 }
1048
1049 static void
1050 g_raid3_scatter(struct bio *pbp)
1051 {
1052 struct g_raid3_softc *sc;
1053 struct g_raid3_disk *disk;
1054 struct bio *bp, *cbp, *tmpbp;
1055 off_t atom, cadd, padd, left;
1056 int first;
1057
1058 sc = pbp->bio_to->geom->softc;
1059 bp = NULL;
1060 if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_NOPARITY) == 0) {
1061 /*
1062 * Find bio for which we should calculate data.
1063 */
1064 G_RAID3_FOREACH_BIO(pbp, cbp) {
1065 if ((cbp->bio_cflags & G_RAID3_BIO_CFLAG_PARITY) != 0) {
1066 bp = cbp;
1067 break;
1068 }
1069 }
1070 KASSERT(bp != NULL, ("NULL parity bio."));
1071 }
1072 atom = sc->sc_sectorsize / (sc->sc_ndisks - 1);
1073 cadd = padd = 0;
1074 for (left = pbp->bio_length; left > 0; left -= sc->sc_sectorsize) {
1075 G_RAID3_FOREACH_BIO(pbp, cbp) {
1076 if (cbp == bp)
1077 continue;
1078 bcopy(pbp->bio_data + padd, cbp->bio_data + cadd, atom);
1079 padd += atom;
1080 }
1081 cadd += atom;
1082 }
1083 if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_NOPARITY) == 0) {
1084 /*
1085 * Calculate parity.
1086 */
1087 first = 1;
1088 G_RAID3_FOREACH_SAFE_BIO(pbp, cbp, tmpbp) {
1089 if (cbp == bp)
1090 continue;
1091 if (first) {
1092 bcopy(cbp->bio_data, bp->bio_data,
1093 bp->bio_length);
1094 first = 0;
1095 } else {
1096 g_raid3_xor(cbp->bio_data, bp->bio_data,
1097 bp->bio_length);
1098 }
1099 if ((cbp->bio_cflags & G_RAID3_BIO_CFLAG_NODISK) != 0)
1100 g_raid3_destroy_bio(sc, cbp);
1101 }
1102 }
1103 G_RAID3_FOREACH_SAFE_BIO(pbp, cbp, tmpbp) {
1104 struct g_consumer *cp;
1105
1106 disk = cbp->bio_caller2;
1107 cp = disk->d_consumer;
1108 cbp->bio_to = cp->provider;
1109 G_RAID3_LOGREQ(3, cbp, "Sending request.");
1110 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1111 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
1112 cp->acr, cp->acw, cp->ace));
1113 cp->index++;
1114 sc->sc_writes++;
1115 g_io_request(cbp, cp);
1116 }
1117 }
1118
1119 static void
1120 g_raid3_gather(struct bio *pbp)
1121 {
1122 struct g_raid3_softc *sc;
1123 struct g_raid3_disk *disk;
1124 struct bio *xbp, *fbp, *cbp;
1125 off_t atom, cadd, padd, left;
1126
1127 sc = pbp->bio_to->geom->softc;
1128 /*
1129 * Find bio for which we have to calculate data.
1130 * While going through this path, check if all requests
1131 * succeeded, if not, deny whole request.
1132 * If we're in COMPLETE mode, we allow one request to fail,
1133 * so if we find one, we're sending it to the parity consumer.
1134 * If there are more failed requests, we deny whole request.
1135 */
1136 xbp = fbp = NULL;
1137 G_RAID3_FOREACH_BIO(pbp, cbp) {
1138 if ((cbp->bio_cflags & G_RAID3_BIO_CFLAG_PARITY) != 0) {
1139 KASSERT(xbp == NULL, ("More than one parity bio."));
1140 xbp = cbp;
1141 }
1142 if (cbp->bio_error == 0)
1143 continue;
1144 /*
1145 * Found failed request.
1146 */
1147 if (fbp == NULL) {
1148 if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_DEGRADED) != 0) {
1149 /*
1150 * We are already in degraded mode, so we can't
1151 * accept any failures.
1152 */
1153 if (pbp->bio_error == 0)
1154 pbp->bio_error = cbp->bio_error;
1155 } else {
1156 fbp = cbp;
1157 }
1158 } else {
1159 /*
1160 * Next failed request, that's too many.
1161 */
1162 if (pbp->bio_error == 0)
1163 pbp->bio_error = fbp->bio_error;
1164 }
1165 disk = cbp->bio_caller2;
1166 if (disk == NULL)
1167 continue;
1168 if ((disk->d_flags & G_RAID3_DISK_FLAG_BROKEN) == 0) {
1169 disk->d_flags |= G_RAID3_DISK_FLAG_BROKEN;
1170 G_RAID3_LOGREQ(0, cbp, "Request failed (error=%d).",
1171 cbp->bio_error);
1172 } else {
1173 G_RAID3_LOGREQ(1, cbp, "Request failed (error=%d).",
1174 cbp->bio_error);
1175 }
1176 if (g_raid3_disconnect_on_failure &&
1177 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) {
1178 sc->sc_bump_id |= G_RAID3_BUMP_GENID;
1179 g_raid3_event_send(disk,
1180 G_RAID3_DISK_STATE_DISCONNECTED,
1181 G_RAID3_EVENT_DONTWAIT);
1182 }
1183 }
1184 if (pbp->bio_error != 0)
1185 goto finish;
1186 if (fbp != NULL && (pbp->bio_pflags & G_RAID3_BIO_PFLAG_VERIFY) != 0) {
1187 pbp->bio_pflags &= ~G_RAID3_BIO_PFLAG_VERIFY;
1188 if (xbp != fbp)
1189 g_raid3_replace_bio(xbp, fbp);
1190 g_raid3_destroy_bio(sc, fbp);
1191 } else if (fbp != NULL) {
1192 struct g_consumer *cp;
1193
1194 /*
1195 * One request failed, so send the same request to
1196 * the parity consumer.
1197 */
1198 disk = pbp->bio_driver2;
1199 if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE) {
1200 pbp->bio_error = fbp->bio_error;
1201 goto finish;
1202 }
1203 pbp->bio_pflags |= G_RAID3_BIO_PFLAG_DEGRADED;
1204 pbp->bio_inbed--;
1205 fbp->bio_flags &= ~(BIO_DONE | BIO_ERROR);
1206 if (disk->d_no == sc->sc_ndisks - 1)
1207 fbp->bio_cflags |= G_RAID3_BIO_CFLAG_PARITY;
1208 fbp->bio_error = 0;
1209 fbp->bio_completed = 0;
1210 fbp->bio_children = 0;
1211 fbp->bio_inbed = 0;
1212 cp = disk->d_consumer;
1213 fbp->bio_caller2 = disk;
1214 fbp->bio_to = cp->provider;
1215 G_RAID3_LOGREQ(3, fbp, "Sending request (recover).");
1216 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1217 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
1218 cp->acr, cp->acw, cp->ace));
1219 cp->index++;
1220 g_io_request(fbp, cp);
1221 return;
1222 }
1223 if (xbp != NULL) {
1224 /*
1225 * Calculate parity.
1226 */
1227 G_RAID3_FOREACH_BIO(pbp, cbp) {
1228 if ((cbp->bio_cflags & G_RAID3_BIO_CFLAG_PARITY) != 0)
1229 continue;
1230 g_raid3_xor(cbp->bio_data, xbp->bio_data,
1231 xbp->bio_length);
1232 }
1233 xbp->bio_cflags &= ~G_RAID3_BIO_CFLAG_PARITY;
1234 if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_VERIFY) != 0) {
1235 if (!g_raid3_is_zero(xbp)) {
1236 g_raid3_parity_mismatch++;
1237 pbp->bio_error = EIO;
1238 goto finish;
1239 }
1240 g_raid3_destroy_bio(sc, xbp);
1241 }
1242 }
1243 atom = sc->sc_sectorsize / (sc->sc_ndisks - 1);
1244 cadd = padd = 0;
1245 for (left = pbp->bio_length; left > 0; left -= sc->sc_sectorsize) {
1246 G_RAID3_FOREACH_BIO(pbp, cbp) {
1247 bcopy(cbp->bio_data + cadd, pbp->bio_data + padd, atom);
1248 pbp->bio_completed += atom;
1249 padd += atom;
1250 }
1251 cadd += atom;
1252 }
1253 finish:
1254 if (pbp->bio_error == 0)
1255 G_RAID3_LOGREQ(3, pbp, "Request finished.");
1256 else {
1257 if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_VERIFY) != 0)
1258 G_RAID3_LOGREQ(1, pbp, "Verification error.");
1259 else
1260 G_RAID3_LOGREQ(0, pbp, "Request failed.");
1261 }
1262 pbp->bio_pflags &= ~G_RAID3_BIO_PFLAG_MASK;
1263 while ((cbp = G_RAID3_HEAD_BIO(pbp)) != NULL)
1264 g_raid3_destroy_bio(sc, cbp);
1265 g_io_deliver(pbp, pbp->bio_error);
1266 }
1267
1268 static void
1269 g_raid3_done(struct bio *bp)
1270 {
1271 struct g_raid3_softc *sc;
1272
1273 sc = bp->bio_from->geom->softc;
1274 bp->bio_cflags |= G_RAID3_BIO_CFLAG_REGULAR;
1275 G_RAID3_LOGREQ(3, bp, "Regular request done (error=%d).", bp->bio_error);
1276 mtx_lock(&sc->sc_queue_mtx);
1277 bioq_insert_head(&sc->sc_queue, bp);
1278 mtx_unlock(&sc->sc_queue_mtx);
1279 wakeup(sc);
1280 wakeup(&sc->sc_queue);
1281 }
1282
1283 static void
1284 g_raid3_regular_request(struct bio *cbp)
1285 {
1286 struct g_raid3_softc *sc;
1287 struct g_raid3_disk *disk;
1288 struct bio *pbp;
1289
1290 g_topology_assert_not();
1291
1292 pbp = cbp->bio_parent;
1293 sc = pbp->bio_to->geom->softc;
1294 cbp->bio_from->index--;
1295 if (cbp->bio_cmd == BIO_WRITE)
1296 sc->sc_writes--;
1297 disk = cbp->bio_from->private;
1298 if (disk == NULL) {
1299 g_topology_lock();
1300 g_raid3_kill_consumer(sc, cbp->bio_from);
1301 g_topology_unlock();
1302 }
1303
1304 G_RAID3_LOGREQ(3, cbp, "Request finished.");
1305 pbp->bio_inbed++;
1306 KASSERT(pbp->bio_inbed <= pbp->bio_children,
1307 ("bio_inbed (%u) is bigger than bio_children (%u).", pbp->bio_inbed,
1308 pbp->bio_children));
1309 if (pbp->bio_inbed != pbp->bio_children)
1310 return;
1311 switch (pbp->bio_cmd) {
1312 case BIO_READ:
1313 g_raid3_gather(pbp);
1314 break;
1315 case BIO_WRITE:
1316 case BIO_DELETE:
1317 {
1318 int error = 0;
1319
1320 pbp->bio_completed = pbp->bio_length;
1321 while ((cbp = G_RAID3_HEAD_BIO(pbp)) != NULL) {
1322 if (cbp->bio_error == 0) {
1323 g_raid3_destroy_bio(sc, cbp);
1324 continue;
1325 }
1326
1327 if (error == 0)
1328 error = cbp->bio_error;
1329 else if (pbp->bio_error == 0) {
1330 /*
1331 * Next failed request, that's too many.
1332 */
1333 pbp->bio_error = error;
1334 }
1335
1336 disk = cbp->bio_caller2;
1337 if (disk == NULL) {
1338 g_raid3_destroy_bio(sc, cbp);
1339 continue;
1340 }
1341
1342 if ((disk->d_flags & G_RAID3_DISK_FLAG_BROKEN) == 0) {
1343 disk->d_flags |= G_RAID3_DISK_FLAG_BROKEN;
1344 G_RAID3_LOGREQ(0, cbp,
1345 "Request failed (error=%d).",
1346 cbp->bio_error);
1347 } else {
1348 G_RAID3_LOGREQ(1, cbp,
1349 "Request failed (error=%d).",
1350 cbp->bio_error);
1351 }
1352 if (g_raid3_disconnect_on_failure &&
1353 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) {
1354 sc->sc_bump_id |= G_RAID3_BUMP_GENID;
1355 g_raid3_event_send(disk,
1356 G_RAID3_DISK_STATE_DISCONNECTED,
1357 G_RAID3_EVENT_DONTWAIT);
1358 }
1359 g_raid3_destroy_bio(sc, cbp);
1360 }
1361 if (pbp->bio_error == 0)
1362 G_RAID3_LOGREQ(3, pbp, "Request finished.");
1363 else
1364 G_RAID3_LOGREQ(0, pbp, "Request failed.");
1365 pbp->bio_pflags &= ~G_RAID3_BIO_PFLAG_DEGRADED;
1366 pbp->bio_pflags &= ~G_RAID3_BIO_PFLAG_NOPARITY;
1367 bioq_remove(&sc->sc_inflight, pbp);
1368 /* Release delayed sync requests if possible. */
1369 g_raid3_sync_release(sc);
1370 g_io_deliver(pbp, pbp->bio_error);
1371 break;
1372 }
1373 }
1374 }
1375
1376 static void
1377 g_raid3_sync_done(struct bio *bp)
1378 {
1379 struct g_raid3_softc *sc;
1380
1381 G_RAID3_LOGREQ(3, bp, "Synchronization request delivered.");
1382 sc = bp->bio_from->geom->softc;
1383 bp->bio_cflags |= G_RAID3_BIO_CFLAG_SYNC;
1384 mtx_lock(&sc->sc_queue_mtx);
1385 bioq_insert_head(&sc->sc_queue, bp);
1386 mtx_unlock(&sc->sc_queue_mtx);
1387 wakeup(sc);
1388 wakeup(&sc->sc_queue);
1389 }
1390
1391 static void
1392 g_raid3_flush(struct g_raid3_softc *sc, struct bio *bp)
1393 {
1394 struct bio_queue_head queue;
1395 struct g_raid3_disk *disk;
1396 struct g_consumer *cp;
1397 struct bio *cbp;
1398 u_int i;
1399
1400 bioq_init(&queue);
1401 for (i = 0; i < sc->sc_ndisks; i++) {
1402 disk = &sc->sc_disks[i];
1403 if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE)
1404 continue;
1405 cbp = g_clone_bio(bp);
1406 if (cbp == NULL) {
1407 for (cbp = bioq_first(&queue); cbp != NULL;
1408 cbp = bioq_first(&queue)) {
1409 bioq_remove(&queue, cbp);
1410 g_destroy_bio(cbp);
1411 }
1412 if (bp->bio_error == 0)
1413 bp->bio_error = ENOMEM;
1414 g_io_deliver(bp, bp->bio_error);
1415 return;
1416 }
1417 bioq_insert_tail(&queue, cbp);
1418 cbp->bio_done = g_std_done;
1419 cbp->bio_caller1 = disk;
1420 cbp->bio_to = disk->d_consumer->provider;
1421 }
1422 for (cbp = bioq_first(&queue); cbp != NULL; cbp = bioq_first(&queue)) {
1423 bioq_remove(&queue, cbp);
1424 G_RAID3_LOGREQ(3, cbp, "Sending request.");
1425 disk = cbp->bio_caller1;
1426 cbp->bio_caller1 = NULL;
1427 cp = disk->d_consumer;
1428 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1429 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
1430 cp->acr, cp->acw, cp->ace));
1431 g_io_request(cbp, disk->d_consumer);
1432 }
1433 }
1434
1435 static void
1436 g_raid3_start(struct bio *bp)
1437 {
1438 struct g_raid3_softc *sc;
1439
1440 sc = bp->bio_to->geom->softc;
1441 /*
1442 * If sc == NULL or there are no valid disks, provider's error
1443 * should be set and g_raid3_start() should not be called at all.
1444 */
1445 KASSERT(sc != NULL && (sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED ||
1446 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE),
1447 ("Provider's error should be set (error=%d)(device=%s).",
1448 bp->bio_to->error, bp->bio_to->name));
1449 G_RAID3_LOGREQ(3, bp, "Request received.");
1450
1451 switch (bp->bio_cmd) {
1452 case BIO_READ:
1453 case BIO_WRITE:
1454 case BIO_DELETE:
1455 break;
1456 case BIO_FLUSH:
1457 g_raid3_flush(sc, bp);
1458 return;
1459 case BIO_GETATTR:
1460 default:
1461 g_io_deliver(bp, EOPNOTSUPP);
1462 return;
1463 }
1464 mtx_lock(&sc->sc_queue_mtx);
1465 bioq_insert_tail(&sc->sc_queue, bp);
1466 mtx_unlock(&sc->sc_queue_mtx);
1467 G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, sc);
1468 wakeup(sc);
1469 }
1470
1471 /*
1472 * Return TRUE if the given request is colliding with a in-progress
1473 * synchronization request.
1474 */
1475 static int
1476 g_raid3_sync_collision(struct g_raid3_softc *sc, struct bio *bp)
1477 {
1478 struct g_raid3_disk *disk;
1479 struct bio *sbp;
1480 off_t rstart, rend, sstart, send;
1481 int i;
1482
1483 disk = sc->sc_syncdisk;
1484 if (disk == NULL)
1485 return (0);
1486 rstart = bp->bio_offset;
1487 rend = bp->bio_offset + bp->bio_length;
1488 for (i = 0; i < g_raid3_syncreqs; i++) {
1489 sbp = disk->d_sync.ds_bios[i];
1490 if (sbp == NULL)
1491 continue;
1492 sstart = sbp->bio_offset;
1493 send = sbp->bio_length;
1494 if (sbp->bio_cmd == BIO_WRITE) {
1495 sstart *= sc->sc_ndisks - 1;
1496 send *= sc->sc_ndisks - 1;
1497 }
1498 send += sstart;
1499 if (rend > sstart && rstart < send)
1500 return (1);
1501 }
1502 return (0);
1503 }
1504
1505 /*
1506 * Return TRUE if the given sync request is colliding with a in-progress regular
1507 * request.
1508 */
1509 static int
1510 g_raid3_regular_collision(struct g_raid3_softc *sc, struct bio *sbp)
1511 {
1512 off_t rstart, rend, sstart, send;
1513 struct bio *bp;
1514
1515 if (sc->sc_syncdisk == NULL)
1516 return (0);
1517 sstart = sbp->bio_offset;
1518 send = sstart + sbp->bio_length;
1519 TAILQ_FOREACH(bp, &sc->sc_inflight.queue, bio_queue) {
1520 rstart = bp->bio_offset;
1521 rend = bp->bio_offset + bp->bio_length;
1522 if (rend > sstart && rstart < send)
1523 return (1);
1524 }
1525 return (0);
1526 }
1527
1528 /*
1529 * Puts request onto delayed queue.
1530 */
1531 static void
1532 g_raid3_regular_delay(struct g_raid3_softc *sc, struct bio *bp)
1533 {
1534
1535 G_RAID3_LOGREQ(2, bp, "Delaying request.");
1536 bioq_insert_head(&sc->sc_regular_delayed, bp);
1537 }
1538
1539 /*
1540 * Puts synchronization request onto delayed queue.
1541 */
1542 static void
1543 g_raid3_sync_delay(struct g_raid3_softc *sc, struct bio *bp)
1544 {
1545
1546 G_RAID3_LOGREQ(2, bp, "Delaying synchronization request.");
1547 bioq_insert_tail(&sc->sc_sync_delayed, bp);
1548 }
1549
1550 /*
1551 * Releases delayed regular requests which don't collide anymore with sync
1552 * requests.
1553 */
1554 static void
1555 g_raid3_regular_release(struct g_raid3_softc *sc)
1556 {
1557 struct bio *bp, *bp2;
1558
1559 TAILQ_FOREACH_SAFE(bp, &sc->sc_regular_delayed.queue, bio_queue, bp2) {
1560 if (g_raid3_sync_collision(sc, bp))
1561 continue;
1562 bioq_remove(&sc->sc_regular_delayed, bp);
1563 G_RAID3_LOGREQ(2, bp, "Releasing delayed request (%p).", bp);
1564 mtx_lock(&sc->sc_queue_mtx);
1565 bioq_insert_head(&sc->sc_queue, bp);
1566 #if 0
1567 /*
1568 * wakeup() is not needed, because this function is called from
1569 * the worker thread.
1570 */
1571 wakeup(&sc->sc_queue);
1572 #endif
1573 mtx_unlock(&sc->sc_queue_mtx);
1574 }
1575 }
1576
1577 /*
1578 * Releases delayed sync requests which don't collide anymore with regular
1579 * requests.
1580 */
1581 static void
1582 g_raid3_sync_release(struct g_raid3_softc *sc)
1583 {
1584 struct bio *bp, *bp2;
1585
1586 TAILQ_FOREACH_SAFE(bp, &sc->sc_sync_delayed.queue, bio_queue, bp2) {
1587 if (g_raid3_regular_collision(sc, bp))
1588 continue;
1589 bioq_remove(&sc->sc_sync_delayed, bp);
1590 G_RAID3_LOGREQ(2, bp,
1591 "Releasing delayed synchronization request.");
1592 g_io_request(bp, bp->bio_from);
1593 }
1594 }
1595
1596 /*
1597 * Handle synchronization requests.
1598 * Every synchronization request is two-steps process: first, READ request is
1599 * send to active provider and then WRITE request (with read data) to the provider
1600 * beeing synchronized. When WRITE is finished, new synchronization request is
1601 * send.
1602 */
1603 static void
1604 g_raid3_sync_request(struct bio *bp)
1605 {
1606 struct g_raid3_softc *sc;
1607 struct g_raid3_disk *disk;
1608
1609 bp->bio_from->index--;
1610 sc = bp->bio_from->geom->softc;
1611 disk = bp->bio_from->private;
1612 if (disk == NULL) {
1613 sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */
1614 g_topology_lock();
1615 g_raid3_kill_consumer(sc, bp->bio_from);
1616 g_topology_unlock();
1617 free(bp->bio_data, M_RAID3);
1618 g_destroy_bio(bp);
1619 sx_xlock(&sc->sc_lock);
1620 return;
1621 }
1622
1623 /*
1624 * Synchronization request.
1625 */
1626 switch (bp->bio_cmd) {
1627 case BIO_READ:
1628 {
1629 struct g_consumer *cp;
1630 u_char *dst, *src;
1631 off_t left;
1632 u_int atom;
1633
1634 if (bp->bio_error != 0) {
1635 G_RAID3_LOGREQ(0, bp,
1636 "Synchronization request failed (error=%d).",
1637 bp->bio_error);
1638 g_destroy_bio(bp);
1639 return;
1640 }
1641 G_RAID3_LOGREQ(3, bp, "Synchronization request finished.");
1642 atom = sc->sc_sectorsize / (sc->sc_ndisks - 1);
1643 dst = src = bp->bio_data;
1644 if (disk->d_no == sc->sc_ndisks - 1) {
1645 u_int n;
1646
1647 /* Parity component. */
1648 for (left = bp->bio_length; left > 0;
1649 left -= sc->sc_sectorsize) {
1650 bcopy(src, dst, atom);
1651 src += atom;
1652 for (n = 1; n < sc->sc_ndisks - 1; n++) {
1653 g_raid3_xor(src, dst, atom);
1654 src += atom;
1655 }
1656 dst += atom;
1657 }
1658 } else {
1659 /* Regular component. */
1660 src += atom * disk->d_no;
1661 for (left = bp->bio_length; left > 0;
1662 left -= sc->sc_sectorsize) {
1663 bcopy(src, dst, atom);
1664 src += sc->sc_sectorsize;
1665 dst += atom;
1666 }
1667 }
1668 bp->bio_driver1 = bp->bio_driver2 = NULL;
1669 bp->bio_pflags = 0;
1670 bp->bio_offset /= sc->sc_ndisks - 1;
1671 bp->bio_length /= sc->sc_ndisks - 1;
1672 bp->bio_cmd = BIO_WRITE;
1673 bp->bio_cflags = 0;
1674 bp->bio_children = bp->bio_inbed = 0;
1675 cp = disk->d_consumer;
1676 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1677 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name,
1678 cp->acr, cp->acw, cp->ace));
1679 cp->index++;
1680 g_io_request(bp, cp);
1681 return;
1682 }
1683 case BIO_WRITE:
1684 {
1685 struct g_raid3_disk_sync *sync;
1686 off_t boffset, moffset;
1687 void *data;
1688 int i;
1689
1690 if (bp->bio_error != 0) {
1691 G_RAID3_LOGREQ(0, bp,
1692 "Synchronization request failed (error=%d).",
1693 bp->bio_error);
1694 g_destroy_bio(bp);
1695 sc->sc_bump_id |= G_RAID3_BUMP_GENID;
1696 g_raid3_event_send(disk,
1697 G_RAID3_DISK_STATE_DISCONNECTED,
1698 G_RAID3_EVENT_DONTWAIT);
1699 return;
1700 }
1701 G_RAID3_LOGREQ(3, bp, "Synchronization request finished.");
1702 sync = &disk->d_sync;
1703 if (sync->ds_offset == sc->sc_mediasize / (sc->sc_ndisks - 1) ||
1704 sync->ds_consumer == NULL ||
1705 (sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROY) != 0) {
1706 /* Don't send more synchronization requests. */
1707 sync->ds_inflight--;
1708 if (sync->ds_bios != NULL) {
1709 i = (int)(uintptr_t)bp->bio_caller1;
1710 sync->ds_bios[i] = NULL;
1711 }
1712 free(bp->bio_data, M_RAID3);
1713 g_destroy_bio(bp);
1714 if (sync->ds_inflight > 0)
1715 return;
1716 if (sync->ds_consumer == NULL ||
1717 (sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROY) != 0) {
1718 return;
1719 }
1720 /*
1721 * Disk up-to-date, activate it.
1722 */
1723 g_raid3_event_send(disk, G_RAID3_DISK_STATE_ACTIVE,
1724 G_RAID3_EVENT_DONTWAIT);
1725 return;
1726 }
1727
1728 /* Send next synchronization request. */
1729 data = bp->bio_data;
1730 bzero(bp, sizeof(*bp));
1731 bp->bio_cmd = BIO_READ;
1732 bp->bio_offset = sync->ds_offset * (sc->sc_ndisks - 1);
1733 bp->bio_length = MIN(MAXPHYS, sc->sc_mediasize - bp->bio_offset);
1734 sync->ds_offset += bp->bio_length / (sc->sc_ndisks - 1);
1735 bp->bio_done = g_raid3_sync_done;
1736 bp->bio_data = data;
1737 bp->bio_from = sync->ds_consumer;
1738 bp->bio_to = sc->sc_provider;
1739 G_RAID3_LOGREQ(3, bp, "Sending synchronization request.");
1740 sync->ds_consumer->index++;
1741 /*
1742 * Delay the request if it is colliding with a regular request.
1743 */
1744 if (g_raid3_regular_collision(sc, bp))
1745 g_raid3_sync_delay(sc, bp);
1746 else
1747 g_io_request(bp, sync->ds_consumer);
1748
1749 /* Release delayed requests if possible. */
1750 g_raid3_regular_release(sc);
1751
1752 /* Find the smallest offset. */
1753 moffset = sc->sc_mediasize;
1754 for (i = 0; i < g_raid3_syncreqs; i++) {
1755 bp = sync->ds_bios[i];
1756 boffset = bp->bio_offset;
1757 if (bp->bio_cmd == BIO_WRITE)
1758 boffset *= sc->sc_ndisks - 1;
1759 if (boffset < moffset)
1760 moffset = boffset;
1761 }
1762 if (sync->ds_offset_done + (MAXPHYS * 100) < moffset) {
1763 /* Update offset_done on every 100 blocks. */
1764 sync->ds_offset_done = moffset;
1765 g_raid3_update_metadata(disk);
1766 }
1767 return;
1768 }
1769 default:
1770 KASSERT(1 == 0, ("Invalid command here: %u (device=%s)",
1771 bp->bio_cmd, sc->sc_name));
1772 break;
1773 }
1774 }
1775
1776 static int
1777 g_raid3_register_request(struct bio *pbp)
1778 {
1779 struct g_raid3_softc *sc;
1780 struct g_raid3_disk *disk;
1781 struct g_consumer *cp;
1782 struct bio *cbp, *tmpbp;
1783 off_t offset, length;
1784 u_int n, ndisks;
1785 int round_robin, verify;
1786
1787 ndisks = 0;
1788 sc = pbp->bio_to->geom->softc;
1789 if ((pbp->bio_cflags & G_RAID3_BIO_CFLAG_REGSYNC) != 0 &&
1790 sc->sc_syncdisk == NULL) {
1791 g_io_deliver(pbp, EIO);
1792 return (0);
1793 }
1794 g_raid3_init_bio(pbp);
1795 length = pbp->bio_length / (sc->sc_ndisks - 1);
1796 offset = pbp->bio_offset / (sc->sc_ndisks - 1);
1797 round_robin = verify = 0;
1798 switch (pbp->bio_cmd) {
1799 case BIO_READ:
1800 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_VERIFY) != 0 &&
1801 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) {
1802 pbp->bio_pflags |= G_RAID3_BIO_PFLAG_VERIFY;
1803 verify = 1;
1804 ndisks = sc->sc_ndisks;
1805 } else {
1806 verify = 0;
1807 ndisks = sc->sc_ndisks - 1;
1808 }
1809 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_ROUND_ROBIN) != 0 &&
1810 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) {
1811 round_robin = 1;
1812 } else {
1813 round_robin = 0;
1814 }
1815 KASSERT(!round_robin || !verify,
1816 ("ROUND-ROBIN and VERIFY are mutually exclusive."));
1817 pbp->bio_driver2 = &sc->sc_disks[sc->sc_ndisks - 1];
1818 break;
1819 case BIO_WRITE:
1820 case BIO_DELETE:
1821 /*
1822 * Delay the request if it is colliding with a synchronization
1823 * request.
1824 */
1825 if (g_raid3_sync_collision(sc, pbp)) {
1826 g_raid3_regular_delay(sc, pbp);
1827 return (0);
1828 }
1829
1830 if (sc->sc_idle)
1831 g_raid3_unidle(sc);
1832 else
1833 sc->sc_last_write = time_uptime;
1834
1835 ndisks = sc->sc_ndisks;
1836 break;
1837 }
1838 for (n = 0; n < ndisks; n++) {
1839 disk = &sc->sc_disks[n];
1840 cbp = g_raid3_clone_bio(sc, pbp);
1841 if (cbp == NULL) {
1842 while ((cbp = G_RAID3_HEAD_BIO(pbp)) != NULL)
1843 g_raid3_destroy_bio(sc, cbp);
1844 /*
1845 * To prevent deadlock, we must run back up
1846 * with the ENOMEM for failed requests of any
1847 * of our consumers. Our own sync requests
1848 * can stick around, as they are finite.
1849 */
1850 if ((pbp->bio_cflags &
1851 G_RAID3_BIO_CFLAG_REGULAR) != 0) {
1852 g_io_deliver(pbp, ENOMEM);
1853 return (0);
1854 }
1855 return (ENOMEM);
1856 }
1857 cbp->bio_offset = offset;
1858 cbp->bio_length = length;
1859 cbp->bio_done = g_raid3_done;
1860 switch (pbp->bio_cmd) {
1861 case BIO_READ:
1862 if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE) {
1863 /*
1864 * Replace invalid component with the parity
1865 * component.
1866 */
1867 disk = &sc->sc_disks[sc->sc_ndisks - 1];
1868 cbp->bio_cflags |= G_RAID3_BIO_CFLAG_PARITY;
1869 pbp->bio_pflags |= G_RAID3_BIO_PFLAG_DEGRADED;
1870 } else if (round_robin &&
1871 disk->d_no == sc->sc_round_robin) {
1872 /*
1873 * In round-robin mode skip one data component
1874 * and use parity component when reading.
1875 */
1876 pbp->bio_driver2 = disk;
1877 disk = &sc->sc_disks[sc->sc_ndisks - 1];
1878 cbp->bio_cflags |= G_RAID3_BIO_CFLAG_PARITY;
1879 sc->sc_round_robin++;
1880 round_robin = 0;
1881 } else if (verify && disk->d_no == sc->sc_ndisks - 1) {
1882 cbp->bio_cflags |= G_RAID3_BIO_CFLAG_PARITY;
1883 }
1884 break;
1885 case BIO_WRITE:
1886 case BIO_DELETE:
1887 if (disk->d_state == G_RAID3_DISK_STATE_ACTIVE ||
1888 disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) {
1889 if (n == ndisks - 1) {
1890 /*
1891 * Active parity component, mark it as such.
1892 */
1893 cbp->bio_cflags |=
1894 G_RAID3_BIO_CFLAG_PARITY;
1895 }
1896 } else {
1897 pbp->bio_pflags |= G_RAID3_BIO_PFLAG_DEGRADED;
1898 if (n == ndisks - 1) {
1899 /*
1900 * Parity component is not connected,
1901 * so destroy its request.
1902 */
1903 pbp->bio_pflags |=
1904 G_RAID3_BIO_PFLAG_NOPARITY;
1905 g_raid3_destroy_bio(sc, cbp);
1906 cbp = NULL;
1907 } else {
1908 cbp->bio_cflags |=
1909 G_RAID3_BIO_CFLAG_NODISK;
1910 disk = NULL;
1911 }
1912 }
1913 break;
1914 }
1915 if (cbp != NULL)
1916 cbp->bio_caller2 = disk;
1917 }
1918 switch (pbp->bio_cmd) {
1919 case BIO_READ:
1920 if (round_robin) {
1921 /*
1922 * If we are in round-robin mode and 'round_robin' is
1923 * still 1, it means, that we skipped parity component
1924 * for this read and must reset sc_round_robin field.
1925 */
1926 sc->sc_round_robin = 0;
1927 }
1928 G_RAID3_FOREACH_SAFE_BIO(pbp, cbp, tmpbp) {
1929 disk = cbp->bio_caller2;
1930 cp = disk->d_consumer;
1931 cbp->bio_to = cp->provider;
1932 G_RAID3_LOGREQ(3, cbp, "Sending request.");
1933 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1,
1934 ("Consumer %s not opened (r%dw%de%d).",
1935 cp->provider->name, cp->acr, cp->acw, cp->ace));
1936 cp->index++;
1937 g_io_request(cbp, cp);
1938 }
1939 break;
1940 case BIO_WRITE:
1941 case BIO_DELETE:
1942 /*
1943 * Put request onto inflight queue, so we can check if new
1944 * synchronization requests don't collide with it.
1945 */
1946 bioq_insert_tail(&sc->sc_inflight, pbp);
1947
1948 /*
1949 * Bump syncid on first write.
1950 */
1951 if ((sc->sc_bump_id & G_RAID3_BUMP_SYNCID) != 0) {
1952 sc->sc_bump_id &= ~G_RAID3_BUMP_SYNCID;
1953 g_raid3_bump_syncid(sc);
1954 }
1955 g_raid3_scatter(pbp);
1956 break;
1957 }
1958 return (0);
1959 }
1960
1961 static int
1962 g_raid3_can_destroy(struct g_raid3_softc *sc)
1963 {
1964 struct g_geom *gp;
1965 struct g_consumer *cp;
1966
1967 g_topology_assert();
1968 gp = sc->sc_geom;
1969 if (gp->softc == NULL)
1970 return (1);
1971 LIST_FOREACH(cp, &gp->consumer, consumer) {
1972 if (g_raid3_is_busy(sc, cp))
1973 return (0);
1974 }
1975 gp = sc->sc_sync.ds_geom;
1976 LIST_FOREACH(cp, &gp->consumer, consumer) {
1977 if (g_raid3_is_busy(sc, cp))
1978 return (0);
1979 }
1980 G_RAID3_DEBUG(2, "No I/O requests for %s, it can be destroyed.",
1981 sc->sc_name);
1982 return (1);
1983 }
1984
1985 static int
1986 g_raid3_try_destroy(struct g_raid3_softc *sc)
1987 {
1988
1989 g_topology_assert_not();
1990 sx_assert(&sc->sc_lock, SX_XLOCKED);
1991
1992 if (sc->sc_rootmount != NULL) {
1993 G_RAID3_DEBUG(1, "root_mount_rel[%u] %p", __LINE__,
1994 sc->sc_rootmount);
1995 root_mount_rel(sc->sc_rootmount);
1996 sc->sc_rootmount = NULL;
1997 }
1998
1999 g_topology_lock();
2000 if (!g_raid3_can_destroy(sc)) {
2001 g_topology_unlock();
2002 return (0);
2003 }
2004 sc->sc_geom->softc = NULL;
2005 sc->sc_sync.ds_geom->softc = NULL;
2006 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_WAIT) != 0) {
2007 g_topology_unlock();
2008 G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__,
2009 &sc->sc_worker);
2010 /* Unlock sc_lock here, as it can be destroyed after wakeup. */
2011 sx_xunlock(&sc->sc_lock);
2012 wakeup(&sc->sc_worker);
2013 sc->sc_worker = NULL;
2014 } else {
2015 g_topology_unlock();
2016 g_raid3_destroy_device(sc);
2017 free(sc->sc_disks, M_RAID3);
2018 free(sc, M_RAID3);
2019 }
2020 return (1);
2021 }
2022
2023 /*
2024 * Worker thread.
2025 */
2026 static void
2027 g_raid3_worker(void *arg)
2028 {
2029 struct g_raid3_softc *sc;
2030 struct g_raid3_event *ep;
2031 struct bio *bp;
2032 int timeout;
2033
2034 sc = arg;
2035 thread_lock(curthread);
2036 sched_prio(curthread, PRIBIO);
2037 thread_unlock(curthread);
2038
2039 sx_xlock(&sc->sc_lock);
2040 for (;;) {
2041 G_RAID3_DEBUG(5, "%s: Let's see...", __func__);
2042 /*
2043 * First take a look at events.
2044 * This is important to handle events before any I/O requests.
2045 */
2046 ep = g_raid3_event_get(sc);
2047 if (ep != NULL) {
2048 g_raid3_event_remove(sc, ep);
2049 if ((ep->e_flags & G_RAID3_EVENT_DEVICE) != 0) {
2050 /* Update only device status. */
2051 G_RAID3_DEBUG(3,
2052 "Running event for device %s.",
2053 sc->sc_name);
2054 ep->e_error = 0;
2055 g_raid3_update_device(sc, 1);
2056 } else {
2057 /* Update disk status. */
2058 G_RAID3_DEBUG(3, "Running event for disk %s.",
2059 g_raid3_get_diskname(ep->e_disk));
2060 ep->e_error = g_raid3_update_disk(ep->e_disk,
2061 ep->e_state);
2062 if (ep->e_error == 0)
2063 g_raid3_update_device(sc, 0);
2064 }
2065 if ((ep->e_flags & G_RAID3_EVENT_DONTWAIT) != 0) {
2066 KASSERT(ep->e_error == 0,
2067 ("Error cannot be handled."));
2068 g_raid3_event_free(ep);
2069 } else {
2070 ep->e_flags |= G_RAID3_EVENT_DONE;
2071 G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__,
2072 ep);
2073 mtx_lock(&sc->sc_events_mtx);
2074 wakeup(ep);
2075 mtx_unlock(&sc->sc_events_mtx);
2076 }
2077 if ((sc->sc_flags &
2078 G_RAID3_DEVICE_FLAG_DESTROY) != 0) {
2079 if (g_raid3_try_destroy(sc)) {
2080 curthread->td_pflags &= ~TDP_GEOM;
2081 G_RAID3_DEBUG(1, "Thread exiting.");
2082 kproc_exit(0);
2083 }
2084 }
2085 G_RAID3_DEBUG(5, "%s: I'm here 1.", __func__);
2086 continue;
2087 }
2088 /*
2089 * Check if we can mark array as CLEAN and if we can't take
2090 * how much seconds should we wait.
2091 */
2092 timeout = g_raid3_idle(sc, -1);
2093 /*
2094 * Now I/O requests.
2095 */
2096 /* Get first request from the queue. */
2097 mtx_lock(&sc->sc_queue_mtx);
2098 bp = bioq_first(&sc->sc_queue);
2099 if (bp == NULL) {
2100 if ((sc->sc_flags &
2101 G_RAID3_DEVICE_FLAG_DESTROY) != 0) {
2102 mtx_unlock(&sc->sc_queue_mtx);
2103 if (g_raid3_try_destroy(sc)) {
2104 curthread->td_pflags &= ~TDP_GEOM;
2105 G_RAID3_DEBUG(1, "Thread exiting.");
2106 kproc_exit(0);
2107 }
2108 mtx_lock(&sc->sc_queue_mtx);
2109 }
2110 sx_xunlock(&sc->sc_lock);
2111 /*
2112 * XXX: We can miss an event here, because an event
2113 * can be added without sx-device-lock and without
2114 * mtx-queue-lock. Maybe I should just stop using
2115 * dedicated mutex for events synchronization and
2116 * stick with the queue lock?
2117 * The event will hang here until next I/O request
2118 * or next event is received.
2119 */
2120 MSLEEP(sc, &sc->sc_queue_mtx, PRIBIO | PDROP, "r3:w1",
2121 timeout * hz);
2122 sx_xlock(&sc->sc_lock);
2123 G_RAID3_DEBUG(5, "%s: I'm here 4.", __func__);
2124 continue;
2125 }
2126 process:
2127 bioq_remove(&sc->sc_queue, bp);
2128 mtx_unlock(&sc->sc_queue_mtx);
2129
2130 if (bp->bio_from->geom == sc->sc_sync.ds_geom &&
2131 (bp->bio_cflags & G_RAID3_BIO_CFLAG_SYNC) != 0) {
2132 g_raid3_sync_request(bp); /* READ */
2133 } else if (bp->bio_to != sc->sc_provider) {
2134 if ((bp->bio_cflags & G_RAID3_BIO_CFLAG_REGULAR) != 0)
2135 g_raid3_regular_request(bp);
2136 else if ((bp->bio_cflags & G_RAID3_BIO_CFLAG_SYNC) != 0)
2137 g_raid3_sync_request(bp); /* WRITE */
2138 else {
2139 KASSERT(0,
2140 ("Invalid request cflags=0x%hhx to=%s.",
2141 bp->bio_cflags, bp->bio_to->name));
2142 }
2143 } else if (g_raid3_register_request(bp) != 0) {
2144 mtx_lock(&sc->sc_queue_mtx);
2145 bioq_insert_head(&sc->sc_queue, bp);
2146 /*
2147 * We are short in memory, let see if there are finished
2148 * request we can free.
2149 */
2150 TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) {
2151 if (bp->bio_cflags & G_RAID3_BIO_CFLAG_REGULAR)
2152 goto process;
2153 }
2154 /*
2155 * No finished regular request, so at least keep
2156 * synchronization running.
2157 */
2158 TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) {
2159 if (bp->bio_cflags & G_RAID3_BIO_CFLAG_SYNC)
2160 goto process;
2161 }
2162 sx_xunlock(&sc->sc_lock);
2163 MSLEEP(&sc->sc_queue, &sc->sc_queue_mtx, PRIBIO | PDROP,
2164 "r3:lowmem", hz / 10);
2165 sx_xlock(&sc->sc_lock);
2166 }
2167 G_RAID3_DEBUG(5, "%s: I'm here 9.", __func__);
2168 }
2169 }
2170
2171 static void
2172 g_raid3_update_idle(struct g_raid3_softc *sc, struct g_raid3_disk *disk)
2173 {
2174
2175 sx_assert(&sc->sc_lock, SX_LOCKED);
2176 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOFAILSYNC) != 0)
2177 return;
2178 if (!sc->sc_idle && (disk->d_flags & G_RAID3_DISK_FLAG_DIRTY) == 0) {
2179 G_RAID3_DEBUG(1, "Disk %s (device %s) marked as dirty.",
2180 g_raid3_get_diskname(disk), sc->sc_name);
2181 disk->d_flags |= G_RAID3_DISK_FLAG_DIRTY;
2182 } else if (sc->sc_idle &&
2183 (disk->d_flags & G_RAID3_DISK_FLAG_DIRTY) != 0) {
2184 G_RAID3_DEBUG(1, "Disk %s (device %s) marked as clean.",
2185 g_raid3_get_diskname(disk), sc->sc_name);
2186 disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY;
2187 }
2188 }
2189
2190 static void
2191 g_raid3_sync_start(struct g_raid3_softc *sc)
2192 {
2193 struct g_raid3_disk *disk;
2194 struct g_consumer *cp;
2195 struct bio *bp;
2196 int error;
2197 u_int n;
2198
2199 g_topology_assert_not();
2200 sx_assert(&sc->sc_lock, SX_XLOCKED);
2201
2202 KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED,
2203 ("Device not in DEGRADED state (%s, %u).", sc->sc_name,
2204 sc->sc_state));
2205 KASSERT(sc->sc_syncdisk == NULL, ("Syncdisk is not NULL (%s, %u).",
2206 sc->sc_name, sc->sc_state));
2207 disk = NULL;
2208 for (n = 0; n < sc->sc_ndisks; n++) {
2209 if (sc->sc_disks[n].d_state != G_RAID3_DISK_STATE_SYNCHRONIZING)
2210 continue;
2211 disk = &sc->sc_disks[n];
2212 break;
2213 }
2214 if (disk == NULL)
2215 return;
2216
2217 sx_xunlock(&sc->sc_lock);
2218 g_topology_lock();
2219 cp = g_new_consumer(sc->sc_sync.ds_geom);
2220 error = g_attach(cp, sc->sc_provider);
2221 KASSERT(error == 0,
2222 ("Cannot attach to %s (error=%d).", sc->sc_name, error));
2223 error = g_access(cp, 1, 0, 0);
2224 KASSERT(error == 0, ("Cannot open %s (error=%d).", sc->sc_name, error));
2225 g_topology_unlock();
2226 sx_xlock(&sc->sc_lock);
2227
2228 G_RAID3_DEBUG(0, "Device %s: rebuilding provider %s.", sc->sc_name,
2229 g_raid3_get_diskname(disk));
2230 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOFAILSYNC) == 0)
2231 disk->d_flags |= G_RAID3_DISK_FLAG_DIRTY;
2232 KASSERT(disk->d_sync.ds_consumer == NULL,
2233 ("Sync consumer already exists (device=%s, disk=%s).",
2234 sc->sc_name, g_raid3_get_diskname(disk)));
2235
2236 disk->d_sync.ds_consumer = cp;
2237 disk->d_sync.ds_consumer->private = disk;
2238 disk->d_sync.ds_consumer->index = 0;
2239 sc->sc_syncdisk = disk;
2240
2241 /*
2242 * Allocate memory for synchronization bios and initialize them.
2243 */
2244 disk->d_sync.ds_bios = malloc(sizeof(struct bio *) * g_raid3_syncreqs,
2245 M_RAID3, M_WAITOK);
2246 for (n = 0; n < g_raid3_syncreqs; n++) {
2247 bp = g_alloc_bio();
2248 disk->d_sync.ds_bios[n] = bp;
2249 bp->bio_parent = NULL;
2250 bp->bio_cmd = BIO_READ;
2251 bp->bio_data = malloc(MAXPHYS, M_RAID3, M_WAITOK);
2252 bp->bio_cflags = 0;
2253 bp->bio_offset = disk->d_sync.ds_offset * (sc->sc_ndisks - 1);
2254 bp->bio_length = MIN(MAXPHYS, sc->sc_mediasize - bp->bio_offset);
2255 disk->d_sync.ds_offset += bp->bio_length / (sc->sc_ndisks - 1);
2256 bp->bio_done = g_raid3_sync_done;
2257 bp->bio_from = disk->d_sync.ds_consumer;
2258 bp->bio_to = sc->sc_provider;
2259 bp->bio_caller1 = (void *)(uintptr_t)n;
2260 }
2261
2262 /* Set the number of in-flight synchronization requests. */
2263 disk->d_sync.ds_inflight = g_raid3_syncreqs;
2264
2265 /*
2266 * Fire off first synchronization requests.
2267 */
2268 for (n = 0; n < g_raid3_syncreqs; n++) {
2269 bp = disk->d_sync.ds_bios[n];
2270 G_RAID3_LOGREQ(3, bp, "Sending synchronization request.");
2271 disk->d_sync.ds_consumer->index++;
2272 /*
2273 * Delay the request if it is colliding with a regular request.
2274 */
2275 if (g_raid3_regular_collision(sc, bp))
2276 g_raid3_sync_delay(sc, bp);
2277 else
2278 g_io_request(bp, disk->d_sync.ds_consumer);
2279 }
2280 }
2281
2282 /*
2283 * Stop synchronization process.
2284 * type: 0 - synchronization finished
2285 * 1 - synchronization stopped
2286 */
2287 static void
2288 g_raid3_sync_stop(struct g_raid3_softc *sc, int type)
2289 {
2290 struct g_raid3_disk *disk;
2291 struct g_consumer *cp;
2292
2293 g_topology_assert_not();
2294 sx_assert(&sc->sc_lock, SX_LOCKED);
2295
2296 KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED,
2297 ("Device not in DEGRADED state (%s, %u).", sc->sc_name,
2298 sc->sc_state));
2299 disk = sc->sc_syncdisk;
2300 sc->sc_syncdisk = NULL;
2301 KASSERT(disk != NULL, ("No disk was synchronized (%s).", sc->sc_name));
2302 KASSERT(disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING,
2303 ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk),
2304 g_raid3_disk_state2str(disk->d_state)));
2305 if (disk->d_sync.ds_consumer == NULL)
2306 return;
2307
2308 if (type == 0) {
2309 G_RAID3_DEBUG(0, "Device %s: rebuilding provider %s finished.",
2310 sc->sc_name, g_raid3_get_diskname(disk));
2311 } else /* if (type == 1) */ {
2312 G_RAID3_DEBUG(0, "Device %s: rebuilding provider %s stopped.",
2313 sc->sc_name, g_raid3_get_diskname(disk));
2314 }
2315 free(disk->d_sync.ds_bios, M_RAID3);
2316 disk->d_sync.ds_bios = NULL;
2317 cp = disk->d_sync.ds_consumer;
2318 disk->d_sync.ds_consumer = NULL;
2319 disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY;
2320 sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */
2321 g_topology_lock();
2322 g_raid3_kill_consumer(sc, cp);
2323 g_topology_unlock();
2324 sx_xlock(&sc->sc_lock);
2325 }
2326
2327 static void
2328 g_raid3_launch_provider(struct g_raid3_softc *sc)
2329 {
2330 struct g_provider *pp;
2331 struct g_raid3_disk *disk;
2332 int n;
2333
2334 sx_assert(&sc->sc_lock, SX_LOCKED);
2335
2336 g_topology_lock();
2337 pp = g_new_providerf(sc->sc_geom, "raid3/%s", sc->sc_name);
2338 pp->mediasize = sc->sc_mediasize;
2339 pp->sectorsize = sc->sc_sectorsize;
2340 pp->stripesize = 0;
2341 pp->stripeoffset = 0;
2342 for (n = 0; n < sc->sc_ndisks; n++) {
2343 disk = &sc->sc_disks[n];
2344 if (disk->d_consumer && disk->d_consumer->provider &&
2345 disk->d_consumer->provider->stripesize > pp->stripesize) {
2346 pp->stripesize = disk->d_consumer->provider->stripesize;
2347 pp->stripeoffset = disk->d_consumer->provider->stripeoffset;
2348 }
2349 }
2350 pp->stripesize *= sc->sc_ndisks - 1;
2351 pp->stripeoffset *= sc->sc_ndisks - 1;
2352 sc->sc_provider = pp;
2353 g_error_provider(pp, 0);
2354 g_topology_unlock();
2355 G_RAID3_DEBUG(0, "Device %s launched (%u/%u).", pp->name,
2356 g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE), sc->sc_ndisks);
2357
2358 if (sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED)
2359 g_raid3_sync_start(sc);
2360 }
2361
2362 static void
2363 g_raid3_destroy_provider(struct g_raid3_softc *sc)
2364 {
2365 struct bio *bp;
2366
2367 g_topology_assert_not();
2368 KASSERT(sc->sc_provider != NULL, ("NULL provider (device=%s).",
2369 sc->sc_name));
2370
2371 g_topology_lock();
2372 g_error_provider(sc->sc_provider, ENXIO);
2373 mtx_lock(&sc->sc_queue_mtx);
2374 while ((bp = bioq_first(&sc->sc_queue)) != NULL) {
2375 bioq_remove(&sc->sc_queue, bp);
2376 g_io_deliver(bp, ENXIO);
2377 }
2378 mtx_unlock(&sc->sc_queue_mtx);
2379 G_RAID3_DEBUG(0, "Device %s: provider %s destroyed.", sc->sc_name,
2380 sc->sc_provider->name);
2381 sc->sc_provider->flags |= G_PF_WITHER;
2382 g_orphan_provider(sc->sc_provider, ENXIO);
2383 g_topology_unlock();
2384 sc->sc_provider = NULL;
2385 if (sc->sc_syncdisk != NULL)
2386 g_raid3_sync_stop(sc, 1);
2387 }
2388
2389 static void
2390 g_raid3_go(void *arg)
2391 {
2392 struct g_raid3_softc *sc;
2393
2394 sc = arg;
2395 G_RAID3_DEBUG(0, "Force device %s start due to timeout.", sc->sc_name);
2396 g_raid3_event_send(sc, 0,
2397 G_RAID3_EVENT_DONTWAIT | G_RAID3_EVENT_DEVICE);
2398 }
2399
2400 static u_int
2401 g_raid3_determine_state(struct g_raid3_disk *disk)
2402 {
2403 struct g_raid3_softc *sc;
2404 u_int state;
2405
2406 sc = disk->d_softc;
2407 if (sc->sc_syncid == disk->d_sync.ds_syncid) {
2408 if ((disk->d_flags &
2409 G_RAID3_DISK_FLAG_SYNCHRONIZING) == 0) {
2410 /* Disk does not need synchronization. */
2411 state = G_RAID3_DISK_STATE_ACTIVE;
2412 } else {
2413 if ((sc->sc_flags &
2414 G_RAID3_DEVICE_FLAG_NOAUTOSYNC) == 0 ||
2415 (disk->d_flags &
2416 G_RAID3_DISK_FLAG_FORCE_SYNC) != 0) {
2417 /*
2418 * We can start synchronization from
2419 * the stored offset.
2420 */
2421 state = G_RAID3_DISK_STATE_SYNCHRONIZING;
2422 } else {
2423 state = G_RAID3_DISK_STATE_STALE;
2424 }
2425 }
2426 } else if (disk->d_sync.ds_syncid < sc->sc_syncid) {
2427 /*
2428 * Reset all synchronization data for this disk,
2429 * because if it even was synchronized, it was
2430 * synchronized to disks with different syncid.
2431 */
2432 disk->d_flags |= G_RAID3_DISK_FLAG_SYNCHRONIZING;
2433 disk->d_sync.ds_offset = 0;
2434 disk->d_sync.ds_offset_done = 0;
2435 disk->d_sync.ds_syncid = sc->sc_syncid;
2436 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOAUTOSYNC) == 0 ||
2437 (disk->d_flags & G_RAID3_DISK_FLAG_FORCE_SYNC) != 0) {
2438 state = G_RAID3_DISK_STATE_SYNCHRONIZING;
2439 } else {
2440 state = G_RAID3_DISK_STATE_STALE;
2441 }
2442 } else /* if (sc->sc_syncid < disk->d_sync.ds_syncid) */ {
2443 /*
2444 * Not good, NOT GOOD!
2445 * It means that device was started on stale disks
2446 * and more fresh disk just arrive.
2447 * If there were writes, device is broken, sorry.
2448 * I think the best choice here is don't touch
2449 * this disk and inform the user loudly.
2450 */
2451 G_RAID3_DEBUG(0, "Device %s was started before the freshest "
2452 "disk (%s) arrives!! It will not be connected to the "
2453 "running device.", sc->sc_name,
2454 g_raid3_get_diskname(disk));
2455 g_raid3_destroy_disk(disk);
2456 state = G_RAID3_DISK_STATE_NONE;
2457 /* Return immediately, because disk was destroyed. */
2458 return (state);
2459 }
2460 G_RAID3_DEBUG(3, "State for %s disk: %s.",
2461 g_raid3_get_diskname(disk), g_raid3_disk_state2str(state));
2462 return (state);
2463 }
2464
2465 /*
2466 * Update device state.
2467 */
2468 static void
2469 g_raid3_update_device(struct g_raid3_softc *sc, boolean_t force)
2470 {
2471 struct g_raid3_disk *disk;
2472 u_int state;
2473
2474 sx_assert(&sc->sc_lock, SX_XLOCKED);
2475
2476 switch (sc->sc_state) {
2477 case G_RAID3_DEVICE_STATE_STARTING:
2478 {
2479 u_int n, ndirty, ndisks, genid, syncid;
2480
2481 KASSERT(sc->sc_provider == NULL,
2482 ("Non-NULL provider in STARTING state (%s).", sc->sc_name));
2483 /*
2484 * Are we ready? We are, if all disks are connected or
2485 * one disk is missing and 'force' is true.
2486 */
2487 if (g_raid3_ndisks(sc, -1) + force == sc->sc_ndisks) {
2488 if (!force)
2489 callout_drain(&sc->sc_callout);
2490 } else {
2491 if (force) {
2492 /*
2493 * Timeout expired, so destroy device.
2494 */
2495 sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROY;
2496 G_RAID3_DEBUG(1, "root_mount_rel[%u] %p",
2497 __LINE__, sc->sc_rootmount);
2498 root_mount_rel(sc->sc_rootmount);
2499 sc->sc_rootmount = NULL;
2500 }
2501 return;
2502 }
2503
2504 /*
2505 * Find the biggest genid.
2506 */
2507 genid = 0;
2508 for (n = 0; n < sc->sc_ndisks; n++) {
2509 disk = &sc->sc_disks[n];
2510 if (disk->d_state == G_RAID3_DISK_STATE_NODISK)
2511 continue;
2512 if (disk->d_genid > genid)
2513 genid = disk->d_genid;
2514 }
2515 sc->sc_genid = genid;
2516 /*
2517 * Remove all disks without the biggest genid.
2518 */
2519 for (n = 0; n < sc->sc_ndisks; n++) {
2520 disk = &sc->sc_disks[n];
2521 if (disk->d_state == G_RAID3_DISK_STATE_NODISK)
2522 continue;
2523 if (disk->d_genid < genid) {
2524 G_RAID3_DEBUG(0,
2525 "Component %s (device %s) broken, skipping.",
2526 g_raid3_get_diskname(disk), sc->sc_name);
2527 g_raid3_destroy_disk(disk);
2528 }
2529 }
2530
2531 /*
2532 * There must be at least 'sc->sc_ndisks - 1' components
2533 * with the same syncid and without SYNCHRONIZING flag.
2534 */
2535
2536 /*
2537 * Find the biggest syncid, number of valid components and
2538 * number of dirty components.
2539 */
2540 ndirty = ndisks = syncid = 0;
2541 for (n = 0; n < sc->sc_ndisks; n++) {
2542 disk = &sc->sc_disks[n];
2543 if (disk->d_state == G_RAID3_DISK_STATE_NODISK)
2544 continue;
2545 if ((disk->d_flags & G_RAID3_DISK_FLAG_DIRTY) != 0)
2546 ndirty++;
2547 if (disk->d_sync.ds_syncid > syncid) {
2548 syncid = disk->d_sync.ds_syncid;
2549 ndisks = 0;
2550 } else if (disk->d_sync.ds_syncid < syncid) {
2551 continue;
2552 }
2553 if ((disk->d_flags &
2554 G_RAID3_DISK_FLAG_SYNCHRONIZING) != 0) {
2555 continue;
2556 }
2557 ndisks++;
2558 }
2559 /*
2560 * Do we have enough valid components?
2561 */
2562 if (ndisks + 1 < sc->sc_ndisks) {
2563 G_RAID3_DEBUG(0,
2564 "Device %s is broken, too few valid components.",
2565 sc->sc_name);
2566 sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROY;
2567 return;
2568 }
2569 /*
2570 * If there is one DIRTY component and all disks are present,
2571 * mark it for synchronization. If there is more than one DIRTY
2572 * component, mark parity component for synchronization.
2573 */
2574 if (ndisks == sc->sc_ndisks && ndirty == 1) {
2575 for (n = 0; n < sc->sc_ndisks; n++) {
2576 disk = &sc->sc_disks[n];
2577 if ((disk->d_flags &
2578 G_RAID3_DISK_FLAG_DIRTY) == 0) {
2579 continue;
2580 }
2581 disk->d_flags |=
2582 G_RAID3_DISK_FLAG_SYNCHRONIZING;
2583 }
2584 } else if (ndisks == sc->sc_ndisks && ndirty > 1) {
2585 disk = &sc->sc_disks[sc->sc_ndisks - 1];
2586 disk->d_flags |= G_RAID3_DISK_FLAG_SYNCHRONIZING;
2587 }
2588
2589 sc->sc_syncid = syncid;
2590 if (force) {
2591 /* Remember to bump syncid on first write. */
2592 sc->sc_bump_id |= G_RAID3_BUMP_SYNCID;
2593 }
2594 if (ndisks == sc->sc_ndisks)
2595 state = G_RAID3_DEVICE_STATE_COMPLETE;
2596 else /* if (ndisks == sc->sc_ndisks - 1) */
2597 state = G_RAID3_DEVICE_STATE_DEGRADED;
2598 G_RAID3_DEBUG(1, "Device %s state changed from %s to %s.",
2599 sc->sc_name, g_raid3_device_state2str(sc->sc_state),
2600 g_raid3_device_state2str(state));
2601 sc->sc_state = state;
2602 for (n = 0; n < sc->sc_ndisks; n++) {
2603 disk = &sc->sc_disks[n];
2604 if (disk->d_state == G_RAID3_DISK_STATE_NODISK)
2605 continue;
2606 state = g_raid3_determine_state(disk);
2607 g_raid3_event_send(disk, state, G_RAID3_EVENT_DONTWAIT);
2608 if (state == G_RAID3_DISK_STATE_STALE)
2609 sc->sc_bump_id |= G_RAID3_BUMP_SYNCID;
2610 }
2611 break;
2612 }
2613 case G_RAID3_DEVICE_STATE_DEGRADED:
2614 /*
2615 * Genid need to be bumped immediately, so do it here.
2616 */
2617 if ((sc->sc_bump_id & G_RAID3_BUMP_GENID) != 0) {
2618 sc->sc_bump_id &= ~G_RAID3_BUMP_GENID;
2619 g_raid3_bump_genid(sc);
2620 }
2621
2622 if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_NEW) > 0)
2623 return;
2624 if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) <
2625 sc->sc_ndisks - 1) {
2626 if (sc->sc_provider != NULL)
2627 g_raid3_destroy_provider(sc);
2628 sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROY;
2629 return;
2630 }
2631 if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) ==
2632 sc->sc_ndisks) {
2633 state = G_RAID3_DEVICE_STATE_COMPLETE;
2634 G_RAID3_DEBUG(1,
2635 "Device %s state changed from %s to %s.",
2636 sc->sc_name, g_raid3_device_state2str(sc->sc_state),
2637 g_raid3_device_state2str(state));
2638 sc->sc_state = state;
2639 }
2640 if (sc->sc_provider == NULL)
2641 g_raid3_launch_provider(sc);
2642 if (sc->sc_rootmount != NULL) {
2643 G_RAID3_DEBUG(1, "root_mount_rel[%u] %p", __LINE__,
2644 sc->sc_rootmount);
2645 root_mount_rel(sc->sc_rootmount);
2646 sc->sc_rootmount = NULL;
2647 }
2648 break;
2649 case G_RAID3_DEVICE_STATE_COMPLETE:
2650 /*
2651 * Genid need to be bumped immediately, so do it here.
2652 */
2653 if ((sc->sc_bump_id & G_RAID3_BUMP_GENID) != 0) {
2654 sc->sc_bump_id &= ~G_RAID3_BUMP_GENID;
2655 g_raid3_bump_genid(sc);
2656 }
2657
2658 if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_NEW) > 0)
2659 return;
2660 KASSERT(g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) >=
2661 sc->sc_ndisks - 1,
2662 ("Too few ACTIVE components in COMPLETE state (device %s).",
2663 sc->sc_name));
2664 if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) ==
2665 sc->sc_ndisks - 1) {
2666 state = G_RAID3_DEVICE_STATE_DEGRADED;
2667 G_RAID3_DEBUG(1,
2668 "Device %s state changed from %s to %s.",
2669 sc->sc_name, g_raid3_device_state2str(sc->sc_state),
2670 g_raid3_device_state2str(state));
2671 sc->sc_state = state;
2672 }
2673 if (sc->sc_provider == NULL)
2674 g_raid3_launch_provider(sc);
2675 if (sc->sc_rootmount != NULL) {
2676 G_RAID3_DEBUG(1, "root_mount_rel[%u] %p", __LINE__,
2677 sc->sc_rootmount);
2678 root_mount_rel(sc->sc_rootmount);
2679 sc->sc_rootmount = NULL;
2680 }
2681 break;
2682 default:
2683 KASSERT(1 == 0, ("Wrong device state (%s, %s).", sc->sc_name,
2684 g_raid3_device_state2str(sc->sc_state)));
2685 break;
2686 }
2687 }
2688
2689 /*
2690 * Update disk state and device state if needed.
2691 */
2692 #define DISK_STATE_CHANGED() G_RAID3_DEBUG(1, \
2693 "Disk %s state changed from %s to %s (device %s).", \
2694 g_raid3_get_diskname(disk), \
2695 g_raid3_disk_state2str(disk->d_state), \
2696 g_raid3_disk_state2str(state), sc->sc_name)
2697 static int
2698 g_raid3_update_disk(struct g_raid3_disk *disk, u_int state)
2699 {
2700 struct g_raid3_softc *sc;
2701
2702 sc = disk->d_softc;
2703 sx_assert(&sc->sc_lock, SX_XLOCKED);
2704
2705 again:
2706 G_RAID3_DEBUG(3, "Changing disk %s state from %s to %s.",
2707 g_raid3_get_diskname(disk), g_raid3_disk_state2str(disk->d_state),
2708 g_raid3_disk_state2str(state));
2709 switch (state) {
2710 case G_RAID3_DISK_STATE_NEW:
2711 /*
2712 * Possible scenarios:
2713 * 1. New disk arrive.
2714 */
2715 /* Previous state should be NONE. */
2716 KASSERT(disk->d_state == G_RAID3_DISK_STATE_NONE,
2717 ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk),
2718 g_raid3_disk_state2str(disk->d_state)));
2719 DISK_STATE_CHANGED();
2720
2721 disk->d_state = state;
2722 G_RAID3_DEBUG(1, "Device %s: provider %s detected.",
2723 sc->sc_name, g_raid3_get_diskname(disk));
2724 if (sc->sc_state == G_RAID3_DEVICE_STATE_STARTING)
2725 break;
2726 KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED ||
2727 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE,
2728 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2729 g_raid3_device_state2str(sc->sc_state),
2730 g_raid3_get_diskname(disk),
2731 g_raid3_disk_state2str(disk->d_state)));
2732 state = g_raid3_determine_state(disk);
2733 if (state != G_RAID3_DISK_STATE_NONE)
2734 goto again;
2735 break;
2736 case G_RAID3_DISK_STATE_ACTIVE:
2737 /*
2738 * Possible scenarios:
2739 * 1. New disk does not need synchronization.
2740 * 2. Synchronization process finished successfully.
2741 */
2742 KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED ||
2743 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE,
2744 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2745 g_raid3_device_state2str(sc->sc_state),
2746 g_raid3_get_diskname(disk),
2747 g_raid3_disk_state2str(disk->d_state)));
2748 /* Previous state should be NEW or SYNCHRONIZING. */
2749 KASSERT(disk->d_state == G_RAID3_DISK_STATE_NEW ||
2750 disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING,
2751 ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk),
2752 g_raid3_disk_state2str(disk->d_state)));
2753 DISK_STATE_CHANGED();
2754
2755 if (disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) {
2756 disk->d_flags &= ~G_RAID3_DISK_FLAG_SYNCHRONIZING;
2757 disk->d_flags &= ~G_RAID3_DISK_FLAG_FORCE_SYNC;
2758 g_raid3_sync_stop(sc, 0);
2759 }
2760 disk->d_state = state;
2761 disk->d_sync.ds_offset = 0;
2762 disk->d_sync.ds_offset_done = 0;
2763 g_raid3_update_idle(sc, disk);
2764 g_raid3_update_metadata(disk);
2765 G_RAID3_DEBUG(1, "Device %s: provider %s activated.",
2766 sc->sc_name, g_raid3_get_diskname(disk));
2767 break;
2768 case G_RAID3_DISK_STATE_STALE:
2769 /*
2770 * Possible scenarios:
2771 * 1. Stale disk was connected.
2772 */
2773 /* Previous state should be NEW. */
2774 KASSERT(disk->d_state == G_RAID3_DISK_STATE_NEW,
2775 ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk),
2776 g_raid3_disk_state2str(disk->d_state)));
2777 KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED ||
2778 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE,
2779 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2780 g_raid3_device_state2str(sc->sc_state),
2781 g_raid3_get_diskname(disk),
2782 g_raid3_disk_state2str(disk->d_state)));
2783 /*
2784 * STALE state is only possible if device is marked
2785 * NOAUTOSYNC.
2786 */
2787 KASSERT((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOAUTOSYNC) != 0,
2788 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2789 g_raid3_device_state2str(sc->sc_state),
2790 g_raid3_get_diskname(disk),
2791 g_raid3_disk_state2str(disk->d_state)));
2792 DISK_STATE_CHANGED();
2793
2794 disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY;
2795 disk->d_state = state;
2796 g_raid3_update_metadata(disk);
2797 G_RAID3_DEBUG(0, "Device %s: provider %s is stale.",
2798 sc->sc_name, g_raid3_get_diskname(disk));
2799 break;
2800 case G_RAID3_DISK_STATE_SYNCHRONIZING:
2801 /*
2802 * Possible scenarios:
2803 * 1. Disk which needs synchronization was connected.
2804 */
2805 /* Previous state should be NEW. */
2806 KASSERT(disk->d_state == G_RAID3_DISK_STATE_NEW,
2807 ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk),
2808 g_raid3_disk_state2str(disk->d_state)));
2809 KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED ||
2810 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE,
2811 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name,
2812 g_raid3_device_state2str(sc->sc_state),
2813 g_raid3_get_diskname(disk),
2814 g_raid3_disk_state2str(disk->d_state)));
2815 DISK_STATE_CHANGED();
2816
2817 if (disk->d_state == G_RAID3_DISK_STATE_NEW)
2818 disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY;
2819 disk->d_state = state;
2820 if (sc->sc_provider != NULL) {
2821 g_raid3_sync_start(sc);
2822 g_raid3_update_metadata(disk);
2823 }
2824 break;
2825 case G_RAID3_DISK_STATE_DISCONNECTED:
2826 /*
2827 * Possible scenarios:
2828 * 1. Device wasn't running yet, but disk disappear.
2829 * 2. Disk was active and disapppear.
2830 * 3. Disk disappear during synchronization process.
2831 */
2832 if (sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED ||
2833 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) {
2834 /*
2835 * Previous state should be ACTIVE, STALE or
2836 * SYNCHRONIZING.
2837 */
2838 KASSERT(disk->d_state == G_RAID3_DISK_STATE_ACTIVE ||
2839 disk->d_state == G_RAID3_DISK_STATE_STALE ||
2840 disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING,
2841 ("Wrong disk state (%s, %s).",
2842 g_raid3_get_diskname(disk),
2843 g_raid3_disk_state2str(disk->d_state)));
2844 } else if (sc->sc_state == G_RAID3_DEVICE_STATE_STARTING) {
2845 /* Previous state should be NEW. */
2846 KASSERT(disk->d_state == G_RAID3_DISK_STATE_NEW,
2847 ("Wrong disk state (%s, %s).",
2848 g_raid3_get_diskname(disk),
2849 g_raid3_disk_state2str(disk->d_state)));
2850 /*
2851 * Reset bumping syncid if disk disappeared in STARTING
2852 * state.
2853 */
2854 if ((sc->sc_bump_id & G_RAID3_BUMP_SYNCID) != 0)
2855 sc->sc_bump_id &= ~G_RAID3_BUMP_SYNCID;
2856 #ifdef INVARIANTS
2857 } else {
2858 KASSERT(1 == 0, ("Wrong device state (%s, %s, %s, %s).",
2859 sc->sc_name,
2860 g_raid3_device_state2str(sc->sc_state),
2861 g_raid3_get_diskname(disk),
2862 g_raid3_disk_state2str(disk->d_state)));
2863 #endif
2864 }
2865 DISK_STATE_CHANGED();
2866 G_RAID3_DEBUG(0, "Device %s: provider %s disconnected.",
2867 sc->sc_name, g_raid3_get_diskname(disk));
2868
2869 g_raid3_destroy_disk(disk);
2870 break;
2871 default:
2872 KASSERT(1 == 0, ("Unknown state (%u).", state));
2873 break;
2874 }
2875 return (0);
2876 }
2877 #undef DISK_STATE_CHANGED
2878
2879 int
2880 g_raid3_read_metadata(struct g_consumer *cp, struct g_raid3_metadata *md)
2881 {
2882 struct g_provider *pp;
2883 u_char *buf;
2884 int error;
2885
2886 g_topology_assert();
2887
2888 error = g_access(cp, 1, 0, 0);
2889 if (error != 0)
2890 return (error);
2891 pp = cp->provider;
2892 g_topology_unlock();
2893 /* Metadata are stored on last sector. */
2894 buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize,
2895 &error);
2896 g_topology_lock();
2897 g_access(cp, -1, 0, 0);
2898 if (buf == NULL) {
2899 G_RAID3_DEBUG(1, "Cannot read metadata from %s (error=%d).",
2900 cp->provider->name, error);
2901 return (error);
2902 }
2903
2904 /* Decode metadata. */
2905 error = raid3_metadata_decode(buf, md);
2906 g_free(buf);
2907 if (strcmp(md->md_magic, G_RAID3_MAGIC) != 0)
2908 return (EINVAL);
2909 if (md->md_version > G_RAID3_VERSION) {
2910 G_RAID3_DEBUG(0,
2911 "Kernel module is too old to handle metadata from %s.",
2912 cp->provider->name);
2913 return (EINVAL);
2914 }
2915 if (error != 0) {
2916 G_RAID3_DEBUG(1, "MD5 metadata hash mismatch for provider %s.",
2917 cp->provider->name);
2918 return (error);
2919 }
2920 if (md->md_sectorsize > MAXPHYS) {
2921 G_RAID3_DEBUG(0, "The blocksize is too big.");
2922 return (EINVAL);
2923 }
2924
2925 return (0);
2926 }
2927
2928 static int
2929 g_raid3_check_metadata(struct g_raid3_softc *sc, struct g_provider *pp,
2930 struct g_raid3_metadata *md)
2931 {
2932
2933 if (md->md_no >= sc->sc_ndisks) {
2934 G_RAID3_DEBUG(1, "Invalid disk %s number (no=%u), skipping.",
2935 pp->name, md->md_no);
2936 return (EINVAL);
2937 }
2938 if (sc->sc_disks[md->md_no].d_state != G_RAID3_DISK_STATE_NODISK) {
2939 G_RAID3_DEBUG(1, "Disk %s (no=%u) already exists, skipping.",
2940 pp->name, md->md_no);
2941 return (EEXIST);
2942 }
2943 if (md->md_all != sc->sc_ndisks) {
2944 G_RAID3_DEBUG(1,
2945 "Invalid '%s' field on disk %s (device %s), skipping.",
2946 "md_all", pp->name, sc->sc_name);
2947 return (EINVAL);
2948 }
2949 if ((md->md_mediasize % md->md_sectorsize) != 0) {
2950 G_RAID3_DEBUG(1, "Invalid metadata (mediasize %% sectorsize != "
2951 "0) on disk %s (device %s), skipping.", pp->name,
2952 sc->sc_name);
2953 return (EINVAL);
2954 }
2955 if (md->md_mediasize != sc->sc_mediasize) {
2956 G_RAID3_DEBUG(1,
2957 "Invalid '%s' field on disk %s (device %s), skipping.",
2958 "md_mediasize", pp->name, sc->sc_name);
2959 return (EINVAL);
2960 }
2961 if ((md->md_mediasize % (sc->sc_ndisks - 1)) != 0) {
2962 G_RAID3_DEBUG(1,
2963 "Invalid '%s' field on disk %s (device %s), skipping.",
2964 "md_mediasize", pp->name, sc->sc_name);
2965 return (EINVAL);
2966 }
2967 if ((sc->sc_mediasize / (sc->sc_ndisks - 1)) > pp->mediasize) {
2968 G_RAID3_DEBUG(1,
2969 "Invalid size of disk %s (device %s), skipping.", pp->name,
2970 sc->sc_name);
2971 return (EINVAL);
2972 }
2973 if ((md->md_sectorsize / pp->sectorsize) < sc->sc_ndisks - 1) {
2974 G_RAID3_DEBUG(1,
2975 "Invalid '%s' field on disk %s (device %s), skipping.",
2976 "md_sectorsize", pp->name, sc->sc_name);
2977 return (EINVAL);
2978 }
2979 if (md->md_sectorsize != sc->sc_sectorsize) {
2980 G_RAID3_DEBUG(1,
2981 "Invalid '%s' field on disk %s (device %s), skipping.",
2982 "md_sectorsize", pp->name, sc->sc_name);
2983 return (EINVAL);
2984 }
2985 if ((sc->sc_sectorsize % pp->sectorsize) != 0) {
2986 G_RAID3_DEBUG(1,
2987 "Invalid sector size of disk %s (device %s), skipping.",
2988 pp->name, sc->sc_name);
2989 return (EINVAL);
2990 }
2991 if ((md->md_mflags & ~G_RAID3_DEVICE_FLAG_MASK) != 0) {
2992 G_RAID3_DEBUG(1,
2993 "Invalid device flags on disk %s (device %s), skipping.",
2994 pp->name, sc->sc_name);
2995 return (EINVAL);
2996 }
2997 if ((md->md_mflags & G_RAID3_DEVICE_FLAG_VERIFY) != 0 &&
2998 (md->md_mflags & G_RAID3_DEVICE_FLAG_ROUND_ROBIN) != 0) {
2999 /*
3000 * VERIFY and ROUND-ROBIN options are mutally exclusive.
3001 */
3002 G_RAID3_DEBUG(1, "Both VERIFY and ROUND-ROBIN flags exist on "
3003 "disk %s (device %s), skipping.", pp->name, sc->sc_name);
3004 return (EINVAL);
3005 }
3006 if ((md->md_dflags & ~G_RAID3_DISK_FLAG_MASK) != 0) {
3007 G_RAID3_DEBUG(1,
3008 "Invalid disk flags on disk %s (device %s), skipping.",
3009 pp->name, sc->sc_name);
3010 return (EINVAL);
3011 }
3012 return (0);
3013 }
3014
3015 int
3016 g_raid3_add_disk(struct g_raid3_softc *sc, struct g_provider *pp,
3017 struct g_raid3_metadata *md)
3018 {
3019 struct g_raid3_disk *disk;
3020 int error;
3021
3022 g_topology_assert_not();
3023 G_RAID3_DEBUG(2, "Adding disk %s.", pp->name);
3024
3025 error = g_raid3_check_metadata(sc, pp, md);
3026 if (error != 0)
3027 return (error);
3028 if (sc->sc_state != G_RAID3_DEVICE_STATE_STARTING &&
3029 md->md_genid < sc->sc_genid) {
3030 G_RAID3_DEBUG(0, "Component %s (device %s) broken, skipping.",
3031 pp->name, sc->sc_name);
3032 return (EINVAL);
3033 }
3034 disk = g_raid3_init_disk(sc, pp, md, &error);
3035 if (disk == NULL)
3036 return (error);
3037 error = g_raid3_event_send(disk, G_RAID3_DISK_STATE_NEW,
3038 G_RAID3_EVENT_WAIT);
3039 if (error != 0)
3040 return (error);
3041 if (md->md_version < G_RAID3_VERSION) {
3042 G_RAID3_DEBUG(0, "Upgrading metadata on %s (v%d->v%d).",
3043 pp->name, md->md_version, G_RAID3_VERSION);
3044 g_raid3_update_metadata(disk);
3045 }
3046 return (0);
3047 }
3048
3049 static void
3050 g_raid3_destroy_delayed(void *arg, int flag)
3051 {
3052 struct g_raid3_softc *sc;
3053 int error;
3054
3055 if (flag == EV_CANCEL) {
3056 G_RAID3_DEBUG(1, "Destroying canceled.");
3057 return;
3058 }
3059 sc = arg;
3060 g_topology_unlock();
3061 sx_xlock(&sc->sc_lock);
3062 KASSERT((sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROY) == 0,
3063 ("DESTROY flag set on %s.", sc->sc_name));
3064 KASSERT((sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROYING) != 0,
3065 ("DESTROYING flag not set on %s.", sc->sc_name));
3066 G_RAID3_DEBUG(0, "Destroying %s (delayed).", sc->sc_name);
3067 error = g_raid3_destroy(sc, G_RAID3_DESTROY_SOFT);
3068 if (error != 0) {
3069 G_RAID3_DEBUG(0, "Cannot destroy %s.", sc->sc_name);
3070 sx_xunlock(&sc->sc_lock);
3071 }
3072 g_topology_lock();
3073 }
3074
3075 static int
3076 g_raid3_access(struct g_provider *pp, int acr, int acw, int ace)
3077 {
3078 struct g_raid3_softc *sc;
3079 int dcr, dcw, dce, error = 0;
3080
3081 g_topology_assert();
3082 G_RAID3_DEBUG(2, "Access request for %s: r%dw%de%d.", pp->name, acr,
3083 acw, ace);
3084
3085 sc = pp->geom->softc;
3086 if (sc == NULL && acr <= 0 && acw <= 0 && ace <= 0)
3087 return (0);
3088 KASSERT(sc != NULL, ("NULL softc (provider=%s).", pp->name));
3089
3090 dcr = pp->acr + acr;
3091 dcw = pp->acw + acw;
3092 dce = pp->ace + ace;
3093
3094 g_topology_unlock();
3095 sx_xlock(&sc->sc_lock);
3096 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROY) != 0 ||
3097 g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) < sc->sc_ndisks - 1) {
3098 if (acr > 0 || acw > 0 || ace > 0)
3099 error = ENXIO;
3100 goto end;
3101 }
3102 if (dcw == 0)
3103 g_raid3_idle(sc, dcw);
3104 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROYING) != 0) {
3105 if (acr > 0 || acw > 0 || ace > 0) {
3106 error = ENXIO;
3107 goto end;
3108 }
3109 if (dcr == 0 && dcw == 0 && dce == 0) {
3110 g_post_event(g_raid3_destroy_delayed, sc, M_WAITOK,
3111 sc, NULL);
3112 }
3113 }
3114 end:
3115 sx_xunlock(&sc->sc_lock);
3116 g_topology_lock();
3117 return (error);
3118 }
3119
3120 static struct g_geom *
3121 g_raid3_create(struct g_class *mp, const struct g_raid3_metadata *md)
3122 {
3123 struct g_raid3_softc *sc;
3124 struct g_geom *gp;
3125 int error, timeout;
3126 u_int n;
3127
3128 g_topology_assert();
3129 G_RAID3_DEBUG(1, "Creating device %s (id=%u).", md->md_name, md->md_id);
3130
3131 /* One disk is minimum. */
3132 if (md->md_all < 1)
3133 return (NULL);
3134 /*
3135 * Action geom.
3136 */
3137 gp = g_new_geomf(mp, "%s", md->md_name);
3138 sc = malloc(sizeof(*sc), M_RAID3, M_WAITOK | M_ZERO);
3139 sc->sc_disks = malloc(sizeof(struct g_raid3_disk) * md->md_all, M_RAID3,
3140 M_WAITOK | M_ZERO);
3141 gp->start = g_raid3_start;
3142 gp->orphan = g_raid3_orphan;
3143 gp->access = g_raid3_access;
3144 gp->dumpconf = g_raid3_dumpconf;
3145
3146 sc->sc_id = md->md_id;
3147 sc->sc_mediasize = md->md_mediasize;
3148 sc->sc_sectorsize = md->md_sectorsize;
3149 sc->sc_ndisks = md->md_all;
3150 sc->sc_round_robin = 0;
3151 sc->sc_flags = md->md_mflags;
3152 sc->sc_bump_id = 0;
3153 sc->sc_idle = 1;
3154 sc->sc_last_write = time_uptime;
3155 sc->sc_writes = 0;
3156 for (n = 0; n < sc->sc_ndisks; n++) {
3157 sc->sc_disks[n].d_softc = sc;
3158 sc->sc_disks[n].d_no = n;
3159 sc->sc_disks[n].d_state = G_RAID3_DISK_STATE_NODISK;
3160 }
3161 sx_init(&sc->sc_lock, "graid3:lock");
3162 bioq_init(&sc->sc_queue);
3163 mtx_init(&sc->sc_queue_mtx, "graid3:queue", NULL, MTX_DEF);
3164 bioq_init(&sc->sc_regular_delayed);
3165 bioq_init(&sc->sc_inflight);
3166 bioq_init(&sc->sc_sync_delayed);
3167 TAILQ_INIT(&sc->sc_events);
3168 mtx_init(&sc->sc_events_mtx, "graid3:events", NULL, MTX_DEF);
3169 callout_init(&sc->sc_callout, CALLOUT_MPSAFE);
3170 sc->sc_state = G_RAID3_DEVICE_STATE_STARTING;
3171 gp->softc = sc;
3172 sc->sc_geom = gp;
3173 sc->sc_provider = NULL;
3174 /*
3175 * Synchronization geom.
3176 */
3177 gp = g_new_geomf(mp, "%s.sync", md->md_name);
3178 gp->softc = sc;
3179 gp->orphan = g_raid3_orphan;
3180 sc->sc_sync.ds_geom = gp;
3181
3182 if (!g_raid3_use_malloc) {
3183 sc->sc_zones[G_RAID3_ZONE_64K].sz_zone = uma_zcreate("gr3:64k",
3184 65536, g_raid3_uma_ctor, g_raid3_uma_dtor, NULL, NULL,
3185 UMA_ALIGN_PTR, 0);
3186 sc->sc_zones[G_RAID3_ZONE_64K].sz_inuse = 0;
3187 sc->sc_zones[G_RAID3_ZONE_64K].sz_max = g_raid3_n64k;
3188 sc->sc_zones[G_RAID3_ZONE_64K].sz_requested =
3189 sc->sc_zones[G_RAID3_ZONE_64K].sz_failed = 0;
3190 sc->sc_zones[G_RAID3_ZONE_16K].sz_zone = uma_zcreate("gr3:16k",
3191 16384, g_raid3_uma_ctor, g_raid3_uma_dtor, NULL, NULL,
3192 UMA_ALIGN_PTR, 0);
3193 sc->sc_zones[G_RAID3_ZONE_16K].sz_inuse = 0;
3194 sc->sc_zones[G_RAID3_ZONE_16K].sz_max = g_raid3_n16k;
3195 sc->sc_zones[G_RAID3_ZONE_16K].sz_requested =
3196 sc->sc_zones[G_RAID3_ZONE_16K].sz_failed = 0;
3197 sc->sc_zones[G_RAID3_ZONE_4K].sz_zone = uma_zcreate("gr3:4k",
3198 4096, g_raid3_uma_ctor, g_raid3_uma_dtor, NULL, NULL,
3199 UMA_ALIGN_PTR, 0);
3200 sc->sc_zones[G_RAID3_ZONE_4K].sz_inuse = 0;
3201 sc->sc_zones[G_RAID3_ZONE_4K].sz_max = g_raid3_n4k;
3202 sc->sc_zones[G_RAID3_ZONE_4K].sz_requested =
3203 sc->sc_zones[G_RAID3_ZONE_4K].sz_failed = 0;
3204 }
3205
3206 error = kproc_create(g_raid3_worker, sc, &sc->sc_worker, 0, 0,
3207 "g_raid3 %s", md->md_name);
3208 if (error != 0) {
3209 G_RAID3_DEBUG(1, "Cannot create kernel thread for %s.",
3210 sc->sc_name);
3211 if (!g_raid3_use_malloc) {
3212 uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_64K].sz_zone);
3213 uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_16K].sz_zone);
3214 uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_4K].sz_zone);
3215 }
3216 g_destroy_geom(sc->sc_sync.ds_geom);
3217 mtx_destroy(&sc->sc_events_mtx);
3218 mtx_destroy(&sc->sc_queue_mtx);
3219 sx_destroy(&sc->sc_lock);
3220 g_destroy_geom(sc->sc_geom);
3221 free(sc->sc_disks, M_RAID3);
3222 free(sc, M_RAID3);
3223 return (NULL);
3224 }
3225
3226 G_RAID3_DEBUG(1, "Device %s created (%u components, id=%u).",
3227 sc->sc_name, sc->sc_ndisks, sc->sc_id);
3228
3229 sc->sc_rootmount = root_mount_hold("GRAID3");
3230 G_RAID3_DEBUG(1, "root_mount_hold %p", sc->sc_rootmount);
3231
3232 /*
3233 * Run timeout.
3234 */
3235 timeout = atomic_load_acq_int(&g_raid3_timeout);
3236 callout_reset(&sc->sc_callout, timeout * hz, g_raid3_go, sc);
3237 return (sc->sc_geom);
3238 }
3239
3240 int
3241 g_raid3_destroy(struct g_raid3_softc *sc, int how)
3242 {
3243 struct g_provider *pp;
3244
3245 g_topology_assert_not();
3246 if (sc == NULL)
3247 return (ENXIO);
3248 sx_assert(&sc->sc_lock, SX_XLOCKED);
3249
3250 pp = sc->sc_provider;
3251 if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) {
3252 switch (how) {
3253 case G_RAID3_DESTROY_SOFT:
3254 G_RAID3_DEBUG(1,
3255 "Device %s is still open (r%dw%de%d).", pp->name,
3256 pp->acr, pp->acw, pp->ace);
3257 return (EBUSY);
3258 case G_RAID3_DESTROY_DELAYED:
3259 G_RAID3_DEBUG(1,
3260 "Device %s will be destroyed on last close.",
3261 pp->name);
3262 if (sc->sc_syncdisk != NULL)
3263 g_raid3_sync_stop(sc, 1);
3264 sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROYING;
3265 return (EBUSY);
3266 case G_RAID3_DESTROY_HARD:
3267 G_RAID3_DEBUG(1, "Device %s is still open, so it "
3268 "can't be definitely removed.", pp->name);
3269 break;
3270 }
3271 }
3272
3273 g_topology_lock();
3274 if (sc->sc_geom->softc == NULL) {
3275 g_topology_unlock();
3276 return (0);
3277 }
3278 sc->sc_geom->softc = NULL;
3279 sc->sc_sync.ds_geom->softc = NULL;
3280 g_topology_unlock();
3281
3282 sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROY;
3283 sc->sc_flags |= G_RAID3_DEVICE_FLAG_WAIT;
3284 G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, sc);
3285 sx_xunlock(&sc->sc_lock);
3286 mtx_lock(&sc->sc_queue_mtx);
3287 wakeup(sc);
3288 wakeup(&sc->sc_queue);
3289 mtx_unlock(&sc->sc_queue_mtx);
3290 G_RAID3_DEBUG(4, "%s: Sleeping %p.", __func__, &sc->sc_worker);
3291 while (sc->sc_worker != NULL)
3292 tsleep(&sc->sc_worker, PRIBIO, "r3:destroy", hz / 5);
3293 G_RAID3_DEBUG(4, "%s: Woken up %p.", __func__, &sc->sc_worker);
3294 sx_xlock(&sc->sc_lock);
3295 g_raid3_destroy_device(sc);
3296 free(sc->sc_disks, M_RAID3);
3297 free(sc, M_RAID3);
3298 return (0);
3299 }
3300
3301 static void
3302 g_raid3_taste_orphan(struct g_consumer *cp)
3303 {
3304
3305 KASSERT(1 == 0, ("%s called while tasting %s.", __func__,
3306 cp->provider->name));
3307 }
3308
3309 static struct g_geom *
3310 g_raid3_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
3311 {
3312 struct g_raid3_metadata md;
3313 struct g_raid3_softc *sc;
3314 struct g_consumer *cp;
3315 struct g_geom *gp;
3316 int error;
3317
3318 g_topology_assert();
3319 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name);
3320 G_RAID3_DEBUG(2, "Tasting %s.", pp->name);
3321
3322 gp = g_new_geomf(mp, "raid3:taste");
3323 /* This orphan function should be never called. */
3324 gp->orphan = g_raid3_taste_orphan;
3325 cp = g_new_consumer(gp);
3326 g_attach(cp, pp);
3327 error = g_raid3_read_metadata(cp, &md);
3328 g_detach(cp);
3329 g_destroy_consumer(cp);
3330 g_destroy_geom(gp);
3331 if (error != 0)
3332 return (NULL);
3333 gp = NULL;
3334
3335 if (md.md_provider[0] != '\0' &&
3336 !g_compare_names(md.md_provider, pp->name))
3337 return (NULL);
3338 if (md.md_provsize != 0 && md.md_provsize != pp->mediasize)
3339 return (NULL);
3340 if (g_raid3_debug >= 2)
3341 raid3_metadata_dump(&md);
3342
3343 /*
3344 * Let's check if device already exists.
3345 */
3346 sc = NULL;
3347 LIST_FOREACH(gp, &mp->geom, geom) {
3348 sc = gp->softc;
3349 if (sc == NULL)
3350 continue;
3351 if (sc->sc_sync.ds_geom == gp)
3352 continue;
3353 if (strcmp(md.md_name, sc->sc_name) != 0)
3354 continue;
3355 if (md.md_id != sc->sc_id) {
3356 G_RAID3_DEBUG(0, "Device %s already configured.",
3357 sc->sc_name);
3358 return (NULL);
3359 }
3360 break;
3361 }
3362 if (gp == NULL) {
3363 gp = g_raid3_create(mp, &md);
3364 if (gp == NULL) {
3365 G_RAID3_DEBUG(0, "Cannot create device %s.",
3366 md.md_name);
3367 return (NULL);
3368 }
3369 sc = gp->softc;
3370 }
3371 G_RAID3_DEBUG(1, "Adding disk %s to %s.", pp->name, gp->name);
3372 g_topology_unlock();
3373 sx_xlock(&sc->sc_lock);
3374 error = g_raid3_add_disk(sc, pp, &md);
3375 if (error != 0) {
3376 G_RAID3_DEBUG(0, "Cannot add disk %s to %s (error=%d).",
3377 pp->name, gp->name, error);
3378 if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_NODISK) ==
3379 sc->sc_ndisks) {
3380 g_cancel_event(sc);
3381 g_raid3_destroy(sc, G_RAID3_DESTROY_HARD);
3382 g_topology_lock();
3383 return (NULL);
3384 }
3385 gp = NULL;
3386 }
3387 sx_xunlock(&sc->sc_lock);
3388 g_topology_lock();
3389 return (gp);
3390 }
3391
3392 static int
3393 g_raid3_destroy_geom(struct gctl_req *req __unused, struct g_class *mp __unused,
3394 struct g_geom *gp)
3395 {
3396 struct g_raid3_softc *sc;
3397 int error;
3398
3399 g_topology_unlock();
3400 sc = gp->softc;
3401 sx_xlock(&sc->sc_lock);
3402 g_cancel_event(sc);
3403 error = g_raid3_destroy(gp->softc, G_RAID3_DESTROY_SOFT);
3404 if (error != 0)
3405 sx_xunlock(&sc->sc_lock);
3406 g_topology_lock();
3407 return (error);
3408 }
3409
3410 static void
3411 g_raid3_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
3412 struct g_consumer *cp, struct g_provider *pp)
3413 {
3414 struct g_raid3_softc *sc;
3415
3416 g_topology_assert();
3417
3418 sc = gp->softc;
3419 if (sc == NULL)
3420 return;
3421 /* Skip synchronization geom. */
3422 if (gp == sc->sc_sync.ds_geom)
3423 return;
3424 if (pp != NULL) {
3425 /* Nothing here. */
3426 } else if (cp != NULL) {
3427 struct g_raid3_disk *disk;
3428
3429 disk = cp->private;
3430 if (disk == NULL)
3431 return;
3432 g_topology_unlock();
3433 sx_xlock(&sc->sc_lock);
3434 sbuf_printf(sb, "%s<Type>", indent);
3435 if (disk->d_no == sc->sc_ndisks - 1)
3436 sbuf_printf(sb, "PARITY");
3437 else
3438 sbuf_printf(sb, "DATA");
3439 sbuf_printf(sb, "</Type>\n");
3440 sbuf_printf(sb, "%s<Number>%u</Number>\n", indent,
3441 (u_int)disk->d_no);
3442 if (disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) {
3443 sbuf_printf(sb, "%s<Synchronized>", indent);
3444 if (disk->d_sync.ds_offset == 0)
3445 sbuf_printf(sb, "0%%");
3446 else {
3447 sbuf_printf(sb, "%u%%",
3448 (u_int)((disk->d_sync.ds_offset * 100) /
3449 (sc->sc_mediasize / (sc->sc_ndisks - 1))));
3450 }
3451 sbuf_printf(sb, "</Synchronized>\n");
3452 if (disk->d_sync.ds_offset > 0) {
3453 sbuf_printf(sb, "%s<BytesSynced>%jd"
3454 "</BytesSynced>\n", indent,
3455 (intmax_t)disk->d_sync.ds_offset);
3456 }
3457 }
3458 sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent,
3459 disk->d_sync.ds_syncid);
3460 sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, disk->d_genid);
3461 sbuf_printf(sb, "%s<Flags>", indent);
3462 if (disk->d_flags == 0)
3463 sbuf_printf(sb, "NONE");
3464 else {
3465 int first = 1;
3466
3467 #define ADD_FLAG(flag, name) do { \
3468 if ((disk->d_flags & (flag)) != 0) { \
3469 if (!first) \
3470 sbuf_printf(sb, ", "); \
3471 else \
3472 first = 0; \
3473 sbuf_printf(sb, name); \
3474 } \
3475 } while (0)
3476 ADD_FLAG(G_RAID3_DISK_FLAG_DIRTY, "DIRTY");
3477 ADD_FLAG(G_RAID3_DISK_FLAG_HARDCODED, "HARDCODED");
3478 ADD_FLAG(G_RAID3_DISK_FLAG_SYNCHRONIZING,
3479 "SYNCHRONIZING");
3480 ADD_FLAG(G_RAID3_DISK_FLAG_FORCE_SYNC, "FORCE_SYNC");
3481 ADD_FLAG(G_RAID3_DISK_FLAG_BROKEN, "BROKEN");
3482 #undef ADD_FLAG
3483 }
3484 sbuf_printf(sb, "</Flags>\n");
3485 sbuf_printf(sb, "%s<State>%s</State>\n", indent,
3486 g_raid3_disk_state2str(disk->d_state));
3487 sx_xunlock(&sc->sc_lock);
3488 g_topology_lock();
3489 } else {
3490 g_topology_unlock();
3491 sx_xlock(&sc->sc_lock);
3492 if (!g_raid3_use_malloc) {
3493 sbuf_printf(sb,
3494 "%s<Zone4kRequested>%u</Zone4kRequested>\n", indent,
3495 sc->sc_zones[G_RAID3_ZONE_4K].sz_requested);
3496 sbuf_printf(sb,
3497 "%s<Zone4kFailed>%u</Zone4kFailed>\n", indent,
3498 sc->sc_zones[G_RAID3_ZONE_4K].sz_failed);
3499 sbuf_printf(sb,
3500 "%s<Zone16kRequested>%u</Zone16kRequested>\n", indent,
3501 sc->sc_zones[G_RAID3_ZONE_16K].sz_requested);
3502 sbuf_printf(sb,
3503 "%s<Zone16kFailed>%u</Zone16kFailed>\n", indent,
3504 sc->sc_zones[G_RAID3_ZONE_16K].sz_failed);
3505 sbuf_printf(sb,
3506 "%s<Zone64kRequested>%u</Zone64kRequested>\n", indent,
3507 sc->sc_zones[G_RAID3_ZONE_64K].sz_requested);
3508 sbuf_printf(sb,
3509 "%s<Zone64kFailed>%u</Zone64kFailed>\n", indent,
3510 sc->sc_zones[G_RAID3_ZONE_64K].sz_failed);
3511 }
3512 sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)sc->sc_id);
3513 sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent, sc->sc_syncid);
3514 sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, sc->sc_genid);
3515 sbuf_printf(sb, "%s<Flags>", indent);
3516 if (sc->sc_flags == 0)
3517 sbuf_printf(sb, "NONE");
3518 else {
3519 int first = 1;
3520
3521 #define ADD_FLAG(flag, name) do { \
3522 if ((sc->sc_flags & (flag)) != 0) { \
3523 if (!first) \
3524 sbuf_printf(sb, ", "); \
3525 else \
3526 first = 0; \
3527 sbuf_printf(sb, name); \
3528 } \
3529 } while (0)
3530 ADD_FLAG(G_RAID3_DEVICE_FLAG_NOFAILSYNC, "NOFAILSYNC");
3531 ADD_FLAG(G_RAID3_DEVICE_FLAG_NOAUTOSYNC, "NOAUTOSYNC");
3532 ADD_FLAG(G_RAID3_DEVICE_FLAG_ROUND_ROBIN,
3533 "ROUND-ROBIN");
3534 ADD_FLAG(G_RAID3_DEVICE_FLAG_VERIFY, "VERIFY");
3535 #undef ADD_FLAG
3536 }
3537 sbuf_printf(sb, "</Flags>\n");
3538 sbuf_printf(sb, "%s<Components>%u</Components>\n", indent,
3539 sc->sc_ndisks);
3540 sbuf_printf(sb, "%s<State>%s</State>\n", indent,
3541 g_raid3_device_state2str(sc->sc_state));
3542 sx_xunlock(&sc->sc_lock);
3543 g_topology_lock();
3544 }
3545 }
3546
3547 static void
3548 g_raid3_shutdown_post_sync(void *arg, int howto)
3549 {
3550 struct g_class *mp;
3551 struct g_geom *gp, *gp2;
3552 struct g_raid3_softc *sc;
3553 int error;
3554
3555 mp = arg;
3556 DROP_GIANT();
3557 g_topology_lock();
3558 g_raid3_shutdown = 1;
3559 LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) {
3560 if ((sc = gp->softc) == NULL)
3561 continue;
3562 /* Skip synchronization geom. */
3563 if (gp == sc->sc_sync.ds_geom)
3564 continue;
3565 g_topology_unlock();
3566 sx_xlock(&sc->sc_lock);
3567 g_raid3_idle(sc, -1);
3568 g_cancel_event(sc);
3569 error = g_raid3_destroy(sc, G_RAID3_DESTROY_DELAYED);
3570 if (error != 0)
3571 sx_xunlock(&sc->sc_lock);
3572 g_topology_lock();
3573 }
3574 g_topology_unlock();
3575 PICKUP_GIANT();
3576 }
3577
3578 static void
3579 g_raid3_init(struct g_class *mp)
3580 {
3581
3582 g_raid3_post_sync = EVENTHANDLER_REGISTER(shutdown_post_sync,
3583 g_raid3_shutdown_post_sync, mp, SHUTDOWN_PRI_FIRST);
3584 if (g_raid3_post_sync == NULL)
3585 G_RAID3_DEBUG(0, "Warning! Cannot register shutdown event.");
3586 }
3587
3588 static void
3589 g_raid3_fini(struct g_class *mp)
3590 {
3591
3592 if (g_raid3_post_sync != NULL)
3593 EVENTHANDLER_DEREGISTER(shutdown_post_sync, g_raid3_post_sync);
3594 }
3595
3596 DECLARE_GEOM_CLASS(g_raid3_class, g_raid3);
Cache object: 1e0e6d85e060585ca5951110e6cba051
|