1 /*-
2 * Copyright (c) 2010 Alexander Motin <mav@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD: releng/10.0/sys/geom/raid/tr_raid1.c 242328 2012-10-29 21:08:06Z mav $");
29
30 #include <sys/param.h>
31 #include <sys/bio.h>
32 #include <sys/endian.h>
33 #include <sys/kernel.h>
34 #include <sys/kobj.h>
35 #include <sys/limits.h>
36 #include <sys/lock.h>
37 #include <sys/malloc.h>
38 #include <sys/mutex.h>
39 #include <sys/sysctl.h>
40 #include <sys/systm.h>
41 #include <geom/geom.h>
42 #include "geom/raid/g_raid.h"
43 #include "g_raid_tr_if.h"
44
45 SYSCTL_DECL(_kern_geom_raid_raid1);
46
47 #define RAID1_REBUILD_SLAB (1 << 20) /* One transation in a rebuild */
48 static int g_raid1_rebuild_slab = RAID1_REBUILD_SLAB;
49 TUNABLE_INT("kern.geom.raid.raid1.rebuild_slab_size",
50 &g_raid1_rebuild_slab);
51 SYSCTL_UINT(_kern_geom_raid_raid1, OID_AUTO, rebuild_slab_size, CTLFLAG_RW,
52 &g_raid1_rebuild_slab, 0,
53 "Amount of the disk to rebuild each read/write cycle of the rebuild.");
54
55 #define RAID1_REBUILD_FAIR_IO 20 /* use 1/x of the available I/O */
56 static int g_raid1_rebuild_fair_io = RAID1_REBUILD_FAIR_IO;
57 TUNABLE_INT("kern.geom.raid.raid1.rebuild_fair_io",
58 &g_raid1_rebuild_fair_io);
59 SYSCTL_UINT(_kern_geom_raid_raid1, OID_AUTO, rebuild_fair_io, CTLFLAG_RW,
60 &g_raid1_rebuild_fair_io, 0,
61 "Fraction of the I/O bandwidth to use when disk busy for rebuild.");
62
63 #define RAID1_REBUILD_CLUSTER_IDLE 100
64 static int g_raid1_rebuild_cluster_idle = RAID1_REBUILD_CLUSTER_IDLE;
65 TUNABLE_INT("kern.geom.raid.raid1.rebuild_cluster_idle",
66 &g_raid1_rebuild_cluster_idle);
67 SYSCTL_UINT(_kern_geom_raid_raid1, OID_AUTO, rebuild_cluster_idle, CTLFLAG_RW,
68 &g_raid1_rebuild_cluster_idle, 0,
69 "Number of slabs to do each time we trigger a rebuild cycle");
70
71 #define RAID1_REBUILD_META_UPDATE 1024 /* update meta data every 1GB or so */
72 static int g_raid1_rebuild_meta_update = RAID1_REBUILD_META_UPDATE;
73 TUNABLE_INT("kern.geom.raid.raid1.rebuild_meta_update",
74 &g_raid1_rebuild_meta_update);
75 SYSCTL_UINT(_kern_geom_raid_raid1, OID_AUTO, rebuild_meta_update, CTLFLAG_RW,
76 &g_raid1_rebuild_meta_update, 0,
77 "When to update the meta data.");
78
79 static MALLOC_DEFINE(M_TR_RAID1, "tr_raid1_data", "GEOM_RAID RAID1 data");
80
81 #define TR_RAID1_NONE 0
82 #define TR_RAID1_REBUILD 1
83 #define TR_RAID1_RESYNC 2
84
85 #define TR_RAID1_F_DOING_SOME 0x1
86 #define TR_RAID1_F_LOCKED 0x2
87 #define TR_RAID1_F_ABORT 0x4
88
89 struct g_raid_tr_raid1_object {
90 struct g_raid_tr_object trso_base;
91 int trso_starting;
92 int trso_stopping;
93 int trso_type;
94 int trso_recover_slabs; /* slabs before rest */
95 int trso_fair_io;
96 int trso_meta_update;
97 int trso_flags;
98 struct g_raid_subdisk *trso_failed_sd; /* like per volume */
99 void *trso_buffer; /* Buffer space */
100 struct bio trso_bio;
101 };
102
103 static g_raid_tr_taste_t g_raid_tr_taste_raid1;
104 static g_raid_tr_event_t g_raid_tr_event_raid1;
105 static g_raid_tr_start_t g_raid_tr_start_raid1;
106 static g_raid_tr_stop_t g_raid_tr_stop_raid1;
107 static g_raid_tr_iostart_t g_raid_tr_iostart_raid1;
108 static g_raid_tr_iodone_t g_raid_tr_iodone_raid1;
109 static g_raid_tr_kerneldump_t g_raid_tr_kerneldump_raid1;
110 static g_raid_tr_locked_t g_raid_tr_locked_raid1;
111 static g_raid_tr_idle_t g_raid_tr_idle_raid1;
112 static g_raid_tr_free_t g_raid_tr_free_raid1;
113
114 static kobj_method_t g_raid_tr_raid1_methods[] = {
115 KOBJMETHOD(g_raid_tr_taste, g_raid_tr_taste_raid1),
116 KOBJMETHOD(g_raid_tr_event, g_raid_tr_event_raid1),
117 KOBJMETHOD(g_raid_tr_start, g_raid_tr_start_raid1),
118 KOBJMETHOD(g_raid_tr_stop, g_raid_tr_stop_raid1),
119 KOBJMETHOD(g_raid_tr_iostart, g_raid_tr_iostart_raid1),
120 KOBJMETHOD(g_raid_tr_iodone, g_raid_tr_iodone_raid1),
121 KOBJMETHOD(g_raid_tr_kerneldump, g_raid_tr_kerneldump_raid1),
122 KOBJMETHOD(g_raid_tr_locked, g_raid_tr_locked_raid1),
123 KOBJMETHOD(g_raid_tr_idle, g_raid_tr_idle_raid1),
124 KOBJMETHOD(g_raid_tr_free, g_raid_tr_free_raid1),
125 { 0, 0 }
126 };
127
128 static struct g_raid_tr_class g_raid_tr_raid1_class = {
129 "RAID1",
130 g_raid_tr_raid1_methods,
131 sizeof(struct g_raid_tr_raid1_object),
132 .trc_enable = 1,
133 .trc_priority = 100
134 };
135
136 static void g_raid_tr_raid1_rebuild_abort(struct g_raid_tr_object *tr);
137 static void g_raid_tr_raid1_maybe_rebuild(struct g_raid_tr_object *tr,
138 struct g_raid_subdisk *sd);
139
140 static int
141 g_raid_tr_taste_raid1(struct g_raid_tr_object *tr, struct g_raid_volume *vol)
142 {
143 struct g_raid_tr_raid1_object *trs;
144
145 trs = (struct g_raid_tr_raid1_object *)tr;
146 if (tr->tro_volume->v_raid_level != G_RAID_VOLUME_RL_RAID1 ||
147 (tr->tro_volume->v_raid_level_qualifier != G_RAID_VOLUME_RLQ_R1SM &&
148 tr->tro_volume->v_raid_level_qualifier != G_RAID_VOLUME_RLQ_R1MM))
149 return (G_RAID_TR_TASTE_FAIL);
150 trs->trso_starting = 1;
151 return (G_RAID_TR_TASTE_SUCCEED);
152 }
153
154 static int
155 g_raid_tr_update_state_raid1(struct g_raid_volume *vol,
156 struct g_raid_subdisk *sd)
157 {
158 struct g_raid_tr_raid1_object *trs;
159 struct g_raid_softc *sc;
160 struct g_raid_subdisk *tsd, *bestsd;
161 u_int s;
162 int i, na, ns;
163
164 sc = vol->v_softc;
165 trs = (struct g_raid_tr_raid1_object *)vol->v_tr;
166 if (trs->trso_stopping &&
167 (trs->trso_flags & TR_RAID1_F_DOING_SOME) == 0)
168 s = G_RAID_VOLUME_S_STOPPED;
169 else if (trs->trso_starting)
170 s = G_RAID_VOLUME_S_STARTING;
171 else {
172 /* Make sure we have at least one ACTIVE disk. */
173 na = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_ACTIVE);
174 if (na == 0) {
175 /*
176 * Critical situation! We have no any active disk!
177 * Choose the best disk we have to make it active.
178 */
179 bestsd = &vol->v_subdisks[0];
180 for (i = 1; i < vol->v_disks_count; i++) {
181 tsd = &vol->v_subdisks[i];
182 if (tsd->sd_state > bestsd->sd_state)
183 bestsd = tsd;
184 else if (tsd->sd_state == bestsd->sd_state &&
185 (tsd->sd_state == G_RAID_SUBDISK_S_REBUILD ||
186 tsd->sd_state == G_RAID_SUBDISK_S_RESYNC) &&
187 tsd->sd_rebuild_pos > bestsd->sd_rebuild_pos)
188 bestsd = tsd;
189 }
190 if (bestsd->sd_state >= G_RAID_SUBDISK_S_UNINITIALIZED) {
191 /* We found reasonable candidate. */
192 G_RAID_DEBUG1(1, sc,
193 "Promote subdisk %s:%d from %s to ACTIVE.",
194 vol->v_name, bestsd->sd_pos,
195 g_raid_subdisk_state2str(bestsd->sd_state));
196 g_raid_change_subdisk_state(bestsd,
197 G_RAID_SUBDISK_S_ACTIVE);
198 g_raid_write_metadata(sc,
199 vol, bestsd, bestsd->sd_disk);
200 }
201 }
202 na = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_ACTIVE);
203 ns = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_STALE) +
204 g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_RESYNC);
205 if (na == vol->v_disks_count)
206 s = G_RAID_VOLUME_S_OPTIMAL;
207 else if (na + ns == vol->v_disks_count)
208 s = G_RAID_VOLUME_S_SUBOPTIMAL;
209 else if (na > 0)
210 s = G_RAID_VOLUME_S_DEGRADED;
211 else
212 s = G_RAID_VOLUME_S_BROKEN;
213 g_raid_tr_raid1_maybe_rebuild(vol->v_tr, sd);
214 }
215 if (s != vol->v_state) {
216 g_raid_event_send(vol, G_RAID_VOLUME_S_ALIVE(s) ?
217 G_RAID_VOLUME_E_UP : G_RAID_VOLUME_E_DOWN,
218 G_RAID_EVENT_VOLUME);
219 g_raid_change_volume_state(vol, s);
220 if (!trs->trso_starting && !trs->trso_stopping)
221 g_raid_write_metadata(sc, vol, NULL, NULL);
222 }
223 return (0);
224 }
225
226 static void
227 g_raid_tr_raid1_fail_disk(struct g_raid_softc *sc, struct g_raid_subdisk *sd,
228 struct g_raid_disk *disk)
229 {
230 /*
231 * We don't fail the last disk in the pack, since it still has decent
232 * data on it and that's better than failing the disk if it is the root
233 * file system.
234 *
235 * XXX should this be controlled via a tunable? It makes sense for
236 * the volume that has / on it. I can't think of a case where we'd
237 * want the volume to go away on this kind of event.
238 */
239 if (g_raid_nsubdisks(sd->sd_volume, G_RAID_SUBDISK_S_ACTIVE) == 1 &&
240 g_raid_get_subdisk(sd->sd_volume, G_RAID_SUBDISK_S_ACTIVE) == sd)
241 return;
242 g_raid_fail_disk(sc, sd, disk);
243 }
244
245 static void
246 g_raid_tr_raid1_rebuild_some(struct g_raid_tr_object *tr)
247 {
248 struct g_raid_tr_raid1_object *trs;
249 struct g_raid_subdisk *sd, *good_sd;
250 struct bio *bp;
251
252 trs = (struct g_raid_tr_raid1_object *)tr;
253 if (trs->trso_flags & TR_RAID1_F_DOING_SOME)
254 return;
255 sd = trs->trso_failed_sd;
256 good_sd = g_raid_get_subdisk(sd->sd_volume, G_RAID_SUBDISK_S_ACTIVE);
257 if (good_sd == NULL) {
258 g_raid_tr_raid1_rebuild_abort(tr);
259 return;
260 }
261 bp = &trs->trso_bio;
262 memset(bp, 0, sizeof(*bp));
263 bp->bio_offset = sd->sd_rebuild_pos;
264 bp->bio_length = MIN(g_raid1_rebuild_slab,
265 sd->sd_size - sd->sd_rebuild_pos);
266 bp->bio_data = trs->trso_buffer;
267 bp->bio_cmd = BIO_READ;
268 bp->bio_cflags = G_RAID_BIO_FLAG_SYNC;
269 bp->bio_caller1 = good_sd;
270 trs->trso_flags |= TR_RAID1_F_DOING_SOME;
271 trs->trso_flags |= TR_RAID1_F_LOCKED;
272 g_raid_lock_range(sd->sd_volume, /* Lock callback starts I/O */
273 bp->bio_offset, bp->bio_length, NULL, bp);
274 }
275
276 static void
277 g_raid_tr_raid1_rebuild_done(struct g_raid_tr_raid1_object *trs)
278 {
279 struct g_raid_volume *vol;
280 struct g_raid_subdisk *sd;
281
282 vol = trs->trso_base.tro_volume;
283 sd = trs->trso_failed_sd;
284 g_raid_write_metadata(vol->v_softc, vol, sd, sd->sd_disk);
285 free(trs->trso_buffer, M_TR_RAID1);
286 trs->trso_buffer = NULL;
287 trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
288 trs->trso_type = TR_RAID1_NONE;
289 trs->trso_recover_slabs = 0;
290 trs->trso_failed_sd = NULL;
291 g_raid_tr_update_state_raid1(vol, NULL);
292 }
293
294 static void
295 g_raid_tr_raid1_rebuild_finish(struct g_raid_tr_object *tr)
296 {
297 struct g_raid_tr_raid1_object *trs;
298 struct g_raid_subdisk *sd;
299
300 trs = (struct g_raid_tr_raid1_object *)tr;
301 sd = trs->trso_failed_sd;
302 G_RAID_DEBUG1(0, tr->tro_volume->v_softc,
303 "Subdisk %s:%d-%s rebuild completed.",
304 sd->sd_volume->v_name, sd->sd_pos,
305 sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]");
306 g_raid_change_subdisk_state(sd, G_RAID_SUBDISK_S_ACTIVE);
307 sd->sd_rebuild_pos = 0;
308 g_raid_tr_raid1_rebuild_done(trs);
309 }
310
311 static void
312 g_raid_tr_raid1_rebuild_abort(struct g_raid_tr_object *tr)
313 {
314 struct g_raid_tr_raid1_object *trs;
315 struct g_raid_subdisk *sd;
316 struct g_raid_volume *vol;
317 off_t len;
318
319 vol = tr->tro_volume;
320 trs = (struct g_raid_tr_raid1_object *)tr;
321 sd = trs->trso_failed_sd;
322 if (trs->trso_flags & TR_RAID1_F_DOING_SOME) {
323 G_RAID_DEBUG1(1, vol->v_softc,
324 "Subdisk %s:%d-%s rebuild is aborting.",
325 sd->sd_volume->v_name, sd->sd_pos,
326 sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]");
327 trs->trso_flags |= TR_RAID1_F_ABORT;
328 } else {
329 G_RAID_DEBUG1(0, vol->v_softc,
330 "Subdisk %s:%d-%s rebuild aborted.",
331 sd->sd_volume->v_name, sd->sd_pos,
332 sd->sd_disk ? g_raid_get_diskname(sd->sd_disk) : "[none]");
333 trs->trso_flags &= ~TR_RAID1_F_ABORT;
334 if (trs->trso_flags & TR_RAID1_F_LOCKED) {
335 trs->trso_flags &= ~TR_RAID1_F_LOCKED;
336 len = MIN(g_raid1_rebuild_slab,
337 sd->sd_size - sd->sd_rebuild_pos);
338 g_raid_unlock_range(tr->tro_volume,
339 sd->sd_rebuild_pos, len);
340 }
341 g_raid_tr_raid1_rebuild_done(trs);
342 }
343 }
344
345 static void
346 g_raid_tr_raid1_rebuild_start(struct g_raid_tr_object *tr)
347 {
348 struct g_raid_volume *vol;
349 struct g_raid_tr_raid1_object *trs;
350 struct g_raid_subdisk *sd, *fsd;
351
352 vol = tr->tro_volume;
353 trs = (struct g_raid_tr_raid1_object *)tr;
354 if (trs->trso_failed_sd) {
355 G_RAID_DEBUG1(1, vol->v_softc,
356 "Already rebuild in start rebuild. pos %jd\n",
357 (intmax_t)trs->trso_failed_sd->sd_rebuild_pos);
358 return;
359 }
360 sd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_ACTIVE);
361 if (sd == NULL) {
362 G_RAID_DEBUG1(1, vol->v_softc,
363 "No active disk to rebuild. night night.");
364 return;
365 }
366 fsd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_RESYNC);
367 if (fsd == NULL)
368 fsd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_REBUILD);
369 if (fsd == NULL) {
370 fsd = g_raid_get_subdisk(vol, G_RAID_SUBDISK_S_STALE);
371 if (fsd != NULL) {
372 fsd->sd_rebuild_pos = 0;
373 g_raid_change_subdisk_state(fsd,
374 G_RAID_SUBDISK_S_RESYNC);
375 g_raid_write_metadata(vol->v_softc, vol, fsd, NULL);
376 } else {
377 fsd = g_raid_get_subdisk(vol,
378 G_RAID_SUBDISK_S_UNINITIALIZED);
379 if (fsd == NULL)
380 fsd = g_raid_get_subdisk(vol,
381 G_RAID_SUBDISK_S_NEW);
382 if (fsd != NULL) {
383 fsd->sd_rebuild_pos = 0;
384 g_raid_change_subdisk_state(fsd,
385 G_RAID_SUBDISK_S_REBUILD);
386 g_raid_write_metadata(vol->v_softc,
387 vol, fsd, NULL);
388 }
389 }
390 }
391 if (fsd == NULL) {
392 G_RAID_DEBUG1(1, vol->v_softc,
393 "No failed disk to rebuild. night night.");
394 return;
395 }
396 trs->trso_failed_sd = fsd;
397 G_RAID_DEBUG1(0, vol->v_softc,
398 "Subdisk %s:%d-%s rebuild start at %jd.",
399 fsd->sd_volume->v_name, fsd->sd_pos,
400 fsd->sd_disk ? g_raid_get_diskname(fsd->sd_disk) : "[none]",
401 trs->trso_failed_sd->sd_rebuild_pos);
402 trs->trso_type = TR_RAID1_REBUILD;
403 trs->trso_buffer = malloc(g_raid1_rebuild_slab, M_TR_RAID1, M_WAITOK);
404 trs->trso_meta_update = g_raid1_rebuild_meta_update;
405 g_raid_tr_raid1_rebuild_some(tr);
406 }
407
408
409 static void
410 g_raid_tr_raid1_maybe_rebuild(struct g_raid_tr_object *tr,
411 struct g_raid_subdisk *sd)
412 {
413 struct g_raid_volume *vol;
414 struct g_raid_tr_raid1_object *trs;
415 int na, nr;
416
417 /*
418 * If we're stopping, don't do anything. If we don't have at least one
419 * good disk and one bad disk, we don't do anything. And if there's a
420 * 'good disk' stored in the trs, then we're in progress and we punt.
421 * If we make it past all these checks, we need to rebuild.
422 */
423 vol = tr->tro_volume;
424 trs = (struct g_raid_tr_raid1_object *)tr;
425 if (trs->trso_stopping)
426 return;
427 na = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_ACTIVE);
428 nr = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_REBUILD) +
429 g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_RESYNC);
430 switch(trs->trso_type) {
431 case TR_RAID1_NONE:
432 if (na == 0)
433 return;
434 if (nr == 0) {
435 nr = g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_NEW) +
436 g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_STALE) +
437 g_raid_nsubdisks(vol, G_RAID_SUBDISK_S_UNINITIALIZED);
438 if (nr == 0)
439 return;
440 }
441 g_raid_tr_raid1_rebuild_start(tr);
442 break;
443 case TR_RAID1_REBUILD:
444 if (na == 0 || nr == 0 || trs->trso_failed_sd == sd)
445 g_raid_tr_raid1_rebuild_abort(tr);
446 break;
447 case TR_RAID1_RESYNC:
448 break;
449 }
450 }
451
452 static int
453 g_raid_tr_event_raid1(struct g_raid_tr_object *tr,
454 struct g_raid_subdisk *sd, u_int event)
455 {
456
457 g_raid_tr_update_state_raid1(tr->tro_volume, sd);
458 return (0);
459 }
460
461 static int
462 g_raid_tr_start_raid1(struct g_raid_tr_object *tr)
463 {
464 struct g_raid_tr_raid1_object *trs;
465 struct g_raid_volume *vol;
466
467 trs = (struct g_raid_tr_raid1_object *)tr;
468 vol = tr->tro_volume;
469 trs->trso_starting = 0;
470 g_raid_tr_update_state_raid1(vol, NULL);
471 return (0);
472 }
473
474 static int
475 g_raid_tr_stop_raid1(struct g_raid_tr_object *tr)
476 {
477 struct g_raid_tr_raid1_object *trs;
478 struct g_raid_volume *vol;
479
480 trs = (struct g_raid_tr_raid1_object *)tr;
481 vol = tr->tro_volume;
482 trs->trso_starting = 0;
483 trs->trso_stopping = 1;
484 g_raid_tr_update_state_raid1(vol, NULL);
485 return (0);
486 }
487
488 /*
489 * Select the disk to read from. Take into account: subdisk state, running
490 * error recovery, average disk load, head position and possible cache hits.
491 */
492 #define ABS(x) (((x) >= 0) ? (x) : (-(x)))
493 static struct g_raid_subdisk *
494 g_raid_tr_raid1_select_read_disk(struct g_raid_volume *vol, struct bio *bp,
495 u_int mask)
496 {
497 struct g_raid_subdisk *sd, *best;
498 int i, prio, bestprio;
499
500 best = NULL;
501 bestprio = INT_MAX;
502 for (i = 0; i < vol->v_disks_count; i++) {
503 sd = &vol->v_subdisks[i];
504 if (sd->sd_state != G_RAID_SUBDISK_S_ACTIVE &&
505 ((sd->sd_state != G_RAID_SUBDISK_S_REBUILD &&
506 sd->sd_state != G_RAID_SUBDISK_S_RESYNC) ||
507 bp->bio_offset + bp->bio_length > sd->sd_rebuild_pos))
508 continue;
509 if ((mask & (1 << i)) != 0)
510 continue;
511 prio = G_RAID_SUBDISK_LOAD(sd);
512 prio += min(sd->sd_recovery, 255) << 22;
513 prio += (G_RAID_SUBDISK_S_ACTIVE - sd->sd_state) << 16;
514 /* If disk head is precisely in position - highly prefer it. */
515 if (G_RAID_SUBDISK_POS(sd) == bp->bio_offset)
516 prio -= 2 * G_RAID_SUBDISK_LOAD_SCALE;
517 else
518 /* If disk head is close to position - prefer it. */
519 if (ABS(G_RAID_SUBDISK_POS(sd) - bp->bio_offset) <
520 G_RAID_SUBDISK_TRACK_SIZE)
521 prio -= 1 * G_RAID_SUBDISK_LOAD_SCALE;
522 if (prio < bestprio) {
523 best = sd;
524 bestprio = prio;
525 }
526 }
527 return (best);
528 }
529
530 static void
531 g_raid_tr_iostart_raid1_read(struct g_raid_tr_object *tr, struct bio *bp)
532 {
533 struct g_raid_subdisk *sd;
534 struct bio *cbp;
535
536 sd = g_raid_tr_raid1_select_read_disk(tr->tro_volume, bp, 0);
537 KASSERT(sd != NULL, ("No active disks in volume %s.",
538 tr->tro_volume->v_name));
539
540 cbp = g_clone_bio(bp);
541 if (cbp == NULL) {
542 g_raid_iodone(bp, ENOMEM);
543 return;
544 }
545
546 g_raid_subdisk_iostart(sd, cbp);
547 }
548
549 static void
550 g_raid_tr_iostart_raid1_write(struct g_raid_tr_object *tr, struct bio *bp)
551 {
552 struct g_raid_volume *vol;
553 struct g_raid_subdisk *sd;
554 struct bio_queue_head queue;
555 struct bio *cbp;
556 int i;
557
558 vol = tr->tro_volume;
559
560 /*
561 * Allocate all bios before sending any request, so we can return
562 * ENOMEM in nice and clean way.
563 */
564 bioq_init(&queue);
565 for (i = 0; i < vol->v_disks_count; i++) {
566 sd = &vol->v_subdisks[i];
567 switch (sd->sd_state) {
568 case G_RAID_SUBDISK_S_ACTIVE:
569 break;
570 case G_RAID_SUBDISK_S_REBUILD:
571 /*
572 * When rebuilding, only part of this subdisk is
573 * writable, the rest will be written as part of the
574 * that process.
575 */
576 if (bp->bio_offset >= sd->sd_rebuild_pos)
577 continue;
578 break;
579 case G_RAID_SUBDISK_S_STALE:
580 case G_RAID_SUBDISK_S_RESYNC:
581 /*
582 * Resyncing still writes on the theory that the
583 * resync'd disk is very close and writing it will
584 * keep it that way better if we keep up while
585 * resyncing.
586 */
587 break;
588 default:
589 continue;
590 }
591 cbp = g_clone_bio(bp);
592 if (cbp == NULL)
593 goto failure;
594 cbp->bio_caller1 = sd;
595 bioq_insert_tail(&queue, cbp);
596 }
597 for (cbp = bioq_first(&queue); cbp != NULL;
598 cbp = bioq_first(&queue)) {
599 bioq_remove(&queue, cbp);
600 sd = cbp->bio_caller1;
601 cbp->bio_caller1 = NULL;
602 g_raid_subdisk_iostart(sd, cbp);
603 }
604 return;
605 failure:
606 for (cbp = bioq_first(&queue); cbp != NULL;
607 cbp = bioq_first(&queue)) {
608 bioq_remove(&queue, cbp);
609 g_destroy_bio(cbp);
610 }
611 if (bp->bio_error == 0)
612 bp->bio_error = ENOMEM;
613 g_raid_iodone(bp, bp->bio_error);
614 }
615
616 static void
617 g_raid_tr_iostart_raid1(struct g_raid_tr_object *tr, struct bio *bp)
618 {
619 struct g_raid_volume *vol;
620 struct g_raid_tr_raid1_object *trs;
621
622 vol = tr->tro_volume;
623 trs = (struct g_raid_tr_raid1_object *)tr;
624 if (vol->v_state != G_RAID_VOLUME_S_OPTIMAL &&
625 vol->v_state != G_RAID_VOLUME_S_SUBOPTIMAL &&
626 vol->v_state != G_RAID_VOLUME_S_DEGRADED) {
627 g_raid_iodone(bp, EIO);
628 return;
629 }
630 /*
631 * If we're rebuilding, squeeze in rebuild activity every so often,
632 * even when the disk is busy. Be sure to only count real I/O
633 * to the disk. All 'SPECIAL' I/O is traffic generated to the disk
634 * by this module.
635 */
636 if (trs->trso_failed_sd != NULL &&
637 !(bp->bio_cflags & G_RAID_BIO_FLAG_SPECIAL)) {
638 /* Make this new or running now round short. */
639 trs->trso_recover_slabs = 0;
640 if (--trs->trso_fair_io <= 0) {
641 trs->trso_fair_io = g_raid1_rebuild_fair_io;
642 g_raid_tr_raid1_rebuild_some(tr);
643 }
644 }
645 switch (bp->bio_cmd) {
646 case BIO_READ:
647 g_raid_tr_iostart_raid1_read(tr, bp);
648 break;
649 case BIO_WRITE:
650 case BIO_DELETE:
651 g_raid_tr_iostart_raid1_write(tr, bp);
652 break;
653 case BIO_FLUSH:
654 g_raid_tr_flush_common(tr, bp);
655 break;
656 default:
657 KASSERT(1 == 0, ("Invalid command here: %u (volume=%s)",
658 bp->bio_cmd, vol->v_name));
659 break;
660 }
661 }
662
663 static void
664 g_raid_tr_iodone_raid1(struct g_raid_tr_object *tr,
665 struct g_raid_subdisk *sd, struct bio *bp)
666 {
667 struct bio *cbp;
668 struct g_raid_subdisk *nsd;
669 struct g_raid_volume *vol;
670 struct bio *pbp;
671 struct g_raid_tr_raid1_object *trs;
672 uintptr_t *mask;
673 int error, do_write;
674
675 trs = (struct g_raid_tr_raid1_object *)tr;
676 vol = tr->tro_volume;
677 if (bp->bio_cflags & G_RAID_BIO_FLAG_SYNC) {
678 /*
679 * This operation is part of a rebuild or resync operation.
680 * See what work just got done, then schedule the next bit of
681 * work, if any. Rebuild/resync is done a little bit at a
682 * time. Either when a timeout happens, or after we get a
683 * bunch of I/Os to the disk (to make sure an active system
684 * will complete in a sane amount of time).
685 *
686 * We are setup to do differing amounts of work for each of
687 * these cases. so long as the slabs is smallish (less than
688 * 50 or so, I'd guess, but that's just a WAG), we shouldn't
689 * have any bio starvation issues. For active disks, we do
690 * 5MB of data, for inactive ones, we do 50MB.
691 */
692 if (trs->trso_type == TR_RAID1_REBUILD) {
693 if (bp->bio_cmd == BIO_READ) {
694
695 /* Immediately abort rebuild, if requested. */
696 if (trs->trso_flags & TR_RAID1_F_ABORT) {
697 trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
698 g_raid_tr_raid1_rebuild_abort(tr);
699 return;
700 }
701
702 /* On read error, skip and cross fingers. */
703 if (bp->bio_error != 0) {
704 G_RAID_LOGREQ(0, bp,
705 "Read error during rebuild (%d), "
706 "possible data loss!",
707 bp->bio_error);
708 goto rebuild_round_done;
709 }
710
711 /*
712 * The read operation finished, queue the
713 * write and get out.
714 */
715 G_RAID_LOGREQ(4, bp, "rebuild read done. %d",
716 bp->bio_error);
717 bp->bio_cmd = BIO_WRITE;
718 bp->bio_cflags = G_RAID_BIO_FLAG_SYNC;
719 G_RAID_LOGREQ(4, bp, "Queueing rebuild write.");
720 g_raid_subdisk_iostart(trs->trso_failed_sd, bp);
721 } else {
722 /*
723 * The write operation just finished. Do
724 * another. We keep cloning the master bio
725 * since it has the right buffers allocated to
726 * it.
727 */
728 G_RAID_LOGREQ(4, bp,
729 "rebuild write done. Error %d",
730 bp->bio_error);
731 nsd = trs->trso_failed_sd;
732 if (bp->bio_error != 0 ||
733 trs->trso_flags & TR_RAID1_F_ABORT) {
734 if ((trs->trso_flags &
735 TR_RAID1_F_ABORT) == 0) {
736 g_raid_tr_raid1_fail_disk(sd->sd_softc,
737 nsd, nsd->sd_disk);
738 }
739 trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
740 g_raid_tr_raid1_rebuild_abort(tr);
741 return;
742 }
743 rebuild_round_done:
744 nsd = trs->trso_failed_sd;
745 trs->trso_flags &= ~TR_RAID1_F_LOCKED;
746 g_raid_unlock_range(sd->sd_volume,
747 bp->bio_offset, bp->bio_length);
748 nsd->sd_rebuild_pos += bp->bio_length;
749 if (nsd->sd_rebuild_pos >= nsd->sd_size) {
750 g_raid_tr_raid1_rebuild_finish(tr);
751 return;
752 }
753
754 /* Abort rebuild if we are stopping */
755 if (trs->trso_stopping) {
756 trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
757 g_raid_tr_raid1_rebuild_abort(tr);
758 return;
759 }
760
761 if (--trs->trso_meta_update <= 0) {
762 g_raid_write_metadata(vol->v_softc,
763 vol, nsd, nsd->sd_disk);
764 trs->trso_meta_update =
765 g_raid1_rebuild_meta_update;
766 }
767 trs->trso_flags &= ~TR_RAID1_F_DOING_SOME;
768 if (--trs->trso_recover_slabs <= 0)
769 return;
770 g_raid_tr_raid1_rebuild_some(tr);
771 }
772 } else if (trs->trso_type == TR_RAID1_RESYNC) {
773 /*
774 * read good sd, read bad sd in parallel. when both
775 * done, compare the buffers. write good to the bad
776 * if different. do the next bit of work.
777 */
778 panic("Somehow, we think we're doing a resync");
779 }
780 return;
781 }
782 pbp = bp->bio_parent;
783 pbp->bio_inbed++;
784 if (bp->bio_cmd == BIO_READ && bp->bio_error != 0) {
785 /*
786 * Read failed on first drive. Retry the read error on
787 * another disk drive, if available, before erroring out the
788 * read.
789 */
790 sd->sd_disk->d_read_errs++;
791 G_RAID_LOGREQ(0, bp,
792 "Read error (%d), %d read errors total",
793 bp->bio_error, sd->sd_disk->d_read_errs);
794
795 /*
796 * If there are too many read errors, we move to degraded.
797 * XXX Do we want to FAIL the drive (eg, make the user redo
798 * everything to get it back in sync), or just degrade the
799 * drive, which kicks off a resync?
800 */
801 do_write = 1;
802 if (sd->sd_disk->d_read_errs > g_raid_read_err_thresh) {
803 g_raid_tr_raid1_fail_disk(sd->sd_softc, sd, sd->sd_disk);
804 if (pbp->bio_children == 1)
805 do_write = 0;
806 }
807
808 /*
809 * Find the other disk, and try to do the I/O to it.
810 */
811 mask = (uintptr_t *)(&pbp->bio_driver2);
812 if (pbp->bio_children == 1) {
813 /* Save original subdisk. */
814 pbp->bio_driver1 = do_write ? sd : NULL;
815 *mask = 0;
816 }
817 *mask |= 1 << sd->sd_pos;
818 nsd = g_raid_tr_raid1_select_read_disk(vol, pbp, *mask);
819 if (nsd != NULL && (cbp = g_clone_bio(pbp)) != NULL) {
820 g_destroy_bio(bp);
821 G_RAID_LOGREQ(2, cbp, "Retrying read from %d",
822 nsd->sd_pos);
823 if (pbp->bio_children == 2 && do_write) {
824 sd->sd_recovery++;
825 cbp->bio_caller1 = nsd;
826 pbp->bio_pflags = G_RAID_BIO_FLAG_LOCKED;
827 /* Lock callback starts I/O */
828 g_raid_lock_range(sd->sd_volume,
829 cbp->bio_offset, cbp->bio_length, pbp, cbp);
830 } else {
831 g_raid_subdisk_iostart(nsd, cbp);
832 }
833 return;
834 }
835 /*
836 * We can't retry. Return the original error by falling
837 * through. This will happen when there's only one good disk.
838 * We don't need to fail the raid, since its actual state is
839 * based on the state of the subdisks.
840 */
841 G_RAID_LOGREQ(2, bp, "Couldn't retry read, failing it");
842 }
843 if (bp->bio_cmd == BIO_READ &&
844 bp->bio_error == 0 &&
845 pbp->bio_children > 1 &&
846 pbp->bio_driver1 != NULL) {
847 /*
848 * If it was a read, and bio_children is >1, then we just
849 * recovered the data from the second drive. We should try to
850 * write that data to the first drive if sector remapping is
851 * enabled. A write should put the data in a new place on the
852 * disk, remapping the bad sector. Do we need to do that by
853 * queueing a request to the main worker thread? It doesn't
854 * affect the return code of this current read, and can be
855 * done at our liesure. However, to make the code simpler, it
856 * is done syncrhonously.
857 */
858 G_RAID_LOGREQ(3, bp, "Recovered data from other drive");
859 cbp = g_clone_bio(pbp);
860 if (cbp != NULL) {
861 g_destroy_bio(bp);
862 cbp->bio_cmd = BIO_WRITE;
863 cbp->bio_cflags = G_RAID_BIO_FLAG_REMAP;
864 G_RAID_LOGREQ(2, cbp,
865 "Attempting bad sector remap on failing drive.");
866 g_raid_subdisk_iostart(pbp->bio_driver1, cbp);
867 return;
868 }
869 }
870 if (pbp->bio_pflags & G_RAID_BIO_FLAG_LOCKED) {
871 /*
872 * We're done with a recovery, mark the range as unlocked.
873 * For any write errors, we agressively fail the disk since
874 * there was both a READ and a WRITE error at this location.
875 * Both types of errors generally indicates the drive is on
876 * the verge of total failure anyway. Better to stop trusting
877 * it now. However, we need to reset error to 0 in that case
878 * because we're not failing the original I/O which succeeded.
879 */
880 if (bp->bio_cmd == BIO_WRITE && bp->bio_error) {
881 G_RAID_LOGREQ(0, bp, "Remap write failed: "
882 "failing subdisk.");
883 g_raid_tr_raid1_fail_disk(sd->sd_softc, sd, sd->sd_disk);
884 bp->bio_error = 0;
885 }
886 if (pbp->bio_driver1 != NULL) {
887 ((struct g_raid_subdisk *)pbp->bio_driver1)
888 ->sd_recovery--;
889 }
890 G_RAID_LOGREQ(2, bp, "REMAP done %d.", bp->bio_error);
891 g_raid_unlock_range(sd->sd_volume, bp->bio_offset,
892 bp->bio_length);
893 }
894 if (pbp->bio_cmd != BIO_READ) {
895 if (pbp->bio_inbed == 1 || pbp->bio_error != 0)
896 pbp->bio_error = bp->bio_error;
897 if (pbp->bio_cmd == BIO_WRITE && bp->bio_error != 0) {
898 G_RAID_LOGREQ(0, bp, "Write failed: failing subdisk.");
899 g_raid_tr_raid1_fail_disk(sd->sd_softc, sd, sd->sd_disk);
900 }
901 error = pbp->bio_error;
902 } else
903 error = bp->bio_error;
904 g_destroy_bio(bp);
905 if (pbp->bio_children == pbp->bio_inbed) {
906 pbp->bio_completed = pbp->bio_length;
907 g_raid_iodone(pbp, error);
908 }
909 }
910
911 static int
912 g_raid_tr_kerneldump_raid1(struct g_raid_tr_object *tr,
913 void *virtual, vm_offset_t physical, off_t offset, size_t length)
914 {
915 struct g_raid_volume *vol;
916 struct g_raid_subdisk *sd;
917 int error, i, ok;
918
919 vol = tr->tro_volume;
920 error = 0;
921 ok = 0;
922 for (i = 0; i < vol->v_disks_count; i++) {
923 sd = &vol->v_subdisks[i];
924 switch (sd->sd_state) {
925 case G_RAID_SUBDISK_S_ACTIVE:
926 break;
927 case G_RAID_SUBDISK_S_REBUILD:
928 /*
929 * When rebuilding, only part of this subdisk is
930 * writable, the rest will be written as part of the
931 * that process.
932 */
933 if (offset >= sd->sd_rebuild_pos)
934 continue;
935 break;
936 case G_RAID_SUBDISK_S_STALE:
937 case G_RAID_SUBDISK_S_RESYNC:
938 /*
939 * Resyncing still writes on the theory that the
940 * resync'd disk is very close and writing it will
941 * keep it that way better if we keep up while
942 * resyncing.
943 */
944 break;
945 default:
946 continue;
947 }
948 error = g_raid_subdisk_kerneldump(sd,
949 virtual, physical, offset, length);
950 if (error == 0)
951 ok++;
952 }
953 return (ok > 0 ? 0 : error);
954 }
955
956 static int
957 g_raid_tr_locked_raid1(struct g_raid_tr_object *tr, void *argp)
958 {
959 struct bio *bp;
960 struct g_raid_subdisk *sd;
961
962 bp = (struct bio *)argp;
963 sd = (struct g_raid_subdisk *)bp->bio_caller1;
964 g_raid_subdisk_iostart(sd, bp);
965
966 return (0);
967 }
968
969 static int
970 g_raid_tr_idle_raid1(struct g_raid_tr_object *tr)
971 {
972 struct g_raid_tr_raid1_object *trs;
973
974 trs = (struct g_raid_tr_raid1_object *)tr;
975 trs->trso_fair_io = g_raid1_rebuild_fair_io;
976 trs->trso_recover_slabs = g_raid1_rebuild_cluster_idle;
977 if (trs->trso_type == TR_RAID1_REBUILD)
978 g_raid_tr_raid1_rebuild_some(tr);
979 return (0);
980 }
981
982 static int
983 g_raid_tr_free_raid1(struct g_raid_tr_object *tr)
984 {
985 struct g_raid_tr_raid1_object *trs;
986
987 trs = (struct g_raid_tr_raid1_object *)tr;
988
989 if (trs->trso_buffer != NULL) {
990 free(trs->trso_buffer, M_TR_RAID1);
991 trs->trso_buffer = NULL;
992 }
993 return (0);
994 }
995
996 G_RAID_TR_DECLARE(raid1, "RAID1");
Cache object: c3e82e69cdd8ba3cee67d6fe2e87e895
|